repo_name
stringclasses 6
values | pr_number
int64 512
78.9k
| pr_title
stringlengths 3
144
| pr_description
stringlengths 0
30.3k
| author
stringlengths 2
21
| date_created
timestamp[ns, tz=UTC] | date_merged
timestamp[ns, tz=UTC] | previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| query
stringlengths 17
30.4k
| filepath
stringlengths 9
210
| before_content
stringlengths 0
112M
| after_content
stringlengths 0
112M
| label
int64 -1
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/tests/Interop/PInvoke/NativeCallManagedComVisible/ComVisibleNative.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <stdlib.h>
#include <windows.h>
#include <Objbase.h>
#include <xplatform.h>
#include <platformdefines.h>
//
// Standard function to call the managed COM.
//
template <class T> class CCWTestTemplate
{
public:
static HRESULT CallManagedCom(IUnknown* pUnk, int* fooSuccessVal)
{
T *pTargetInterface = NULL;
(*fooSuccessVal) = -1;
HRESULT hr = pUnk->QueryInterface(_uuidof(T), reinterpret_cast<void**>(&pTargetInterface));
if (FAILED(hr))
return hr;
hr = pTargetInterface->Foo(fooSuccessVal);
pTargetInterface->Release();
return hr;
}
};
//
// Non Nested Interface:
//
//
// IInterfaceComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("52E5F852-BD3E-4DF2-8826-E1EC39557943")) IInterfaceComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("8FDE13DC-F917-44FF-AAC8-A638FD27D647")) IInterfaceVisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleFalse
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("0A2EF649-371D-4480-B0C7-07F455C836D3")) IInterfaceVisibleFalse : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleFalse(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleFalse>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceWithoutVisible
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("FB504D72-39C4-457F-ACF4-3E5D8A31AAE4")) IInterfaceWithoutVisible : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceWithoutVisible(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceWithoutVisible>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceNotPublic
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("11320010-13FA-4B40-8580-8CF92EE70774")) IInterfaceNotPublic : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceNotPublic(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceNotPublic>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrueNoGuid
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("ad50a327-d23a-38a4-9d6e-b32b32acf572")) IInterfaceVisibleTrueNoGuid : IUnknown
{
STDMETHOD(Foo1)(int* fooSuccessVal) = 0;
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
STDMETHOD(Foo2)(int* fooSuccessVal) = 0;
STDMETHOD(Foo3)(int* fooSuccessVal) = 0;
STDMETHOD(Foo4)(int* fooSuccessVal) = 0;
STDMETHOD(Foo5)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrueNoGuid(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrueNoGuid>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrueNoGuidGenericInterface
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("384f0b5c-28d0-368c-8c7e-5e31a84a5c84")) IInterfaceVisibleTrueNoGuidGenericInterface : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
STDMETHOD(Foo9)(int* fooSuccessVal, int listInt[]) = 0;
STDMETHOD(Foo10)(int* fooSuccessVal, void* intCollection, void* stringCollection) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrueNoGuidGenericInterface(IUnknown* pUnk, int* fooSuccessVal)
{
IInterfaceVisibleTrueNoGuidGenericInterface *pTargetInterface = NULL;
(*fooSuccessVal) = -1;
HRESULT hr = pUnk->QueryInterface(_uuidof(IInterfaceVisibleTrueNoGuidGenericInterface), reinterpret_cast<void**>(&pTargetInterface));
if (FAILED(hr))
return hr;
hr = pTargetInterface->Foo(fooSuccessVal);
if (FAILED(hr))
{
pTargetInterface->Release();
return hr;
}
hr = pTargetInterface->Foo9(fooSuccessVal, NULL);
if (FAILED(hr))
pTargetInterface->Release();
else
hr = (HRESULT)(pTargetInterface->Release());
return hr;
}
//
// IInterfaceNotVisibleNoGuid
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("b45587ec-9671-35bc-8b8e-f6bfb18a4d3a")) IInterfaceNotVisibleNoGuid : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceNotVisibleNoGuid(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceNotVisibleNoGuid>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceGenericVisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("BA4B32D4-1D73-4605-AD0A-900A31E75BC3")) IInterfaceGenericVisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceGenericVisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceGenericVisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceComImport_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("943759D7-3552-43AD-9C4D-CC2F787CF36E")) IInterfaceComImport_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceComImport_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceComImport_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrue_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("75DE245B-0CE3-4B07-8761-328906C750B7")) IInterfaceVisibleTrue_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrue_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrue_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleFalse_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("C73D96C3-B005-42D6-93F5-E30AEE08C66C")) IInterfaceVisibleFalse_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleFalse_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleFalse_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrue_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("60B3917B-9CC2-40F2-A975-CD6898DA697F")) IInterfaceVisibleTrue_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrue_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrue_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleFalse_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("2FC59DDB-B1D0-4678-93AF-6A48E838B705")) IInterfaceVisibleFalse_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleFalse_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleFalse_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrue_VisibleFalse
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("C82C25FC-FBAD-4EA9-BED1-343C887464B5")) IInterfaceVisibleTrue_VisibleFalse : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrue_VisibleFalse(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrue_VisibleFalse>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceNotPublic_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("8A4C1691-5615-4762-8568-481DC671F9CE")) IInterfaceNotPublic_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceNotPublic_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceNotPublic_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// Nested Interfaces:
//
//
// INestedInterfaceComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("1D927BC5-1530-4B8E-A183-995425CE4A0A")) INestedInterfaceComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("39209692-2568-4B1E-A6C8-A5C7F141D278")) INestedInterfaceVisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleFalse
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("1CE4B033-4927-447A-9F91-998357B32ADF")) INestedInterfaceVisibleFalse : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleFalse(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleFalse>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceWithoutVisible
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("C770422A-C363-49F1-AAA1-3EC81A452816")) INestedInterfaceWithoutVisible : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceWithoutVisible(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceWithoutVisible>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceNotPublic
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("F776FF8A-0673-49C2-957A-33C2576062ED")) INestedInterfaceNotPublic : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceNotPublic(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceNotPublic>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceNestedInClass
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("B31B4EC1-3B59-41C4-B3A0-CF89638CB837")) INestedInterfaceNestedInClass : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceNestedInClass(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceNestedInClass>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceNestedInClassNoGuid
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("486bcec9-904d-3445-871c-e7084a52eb1a")) INestedInterfaceNestedInClassNoGuid : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceNestedInClassNoGuid(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceNestedInClassNoGuid>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrueNoGuid
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("0ea2cb33-db9f-3655-9240-47ef1dea0f1e")) INestedInterfaceVisibleTrueNoGuid : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrueNoGuid(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrueNoGuid>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceGenericVisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("CAFBD2FF-710A-4E83-9229-42FA16963424")) INestedInterfaceGenericVisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceGenericVisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceGenericVisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceComImport_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("C57D849A-A1A9-4CDC-A609-789D79F9332C")) INestedInterfaceComImport_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceComImport_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceComImport_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrue_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("81F28686-F257-4B7E-A47F-57C9775BE2CE")) INestedInterfaceVisibleTrue_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrue_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrue_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleFalse_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("FAAB7E6C-8548-429F-AD34-0CEC3EBDD7B7")) INestedInterfaceVisibleFalse_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleFalse_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleFalse_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrue_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("BEFD79A9-D8E6-42E4-8228-1892298460D7")) INestedInterfaceVisibleTrue_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrue_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrue_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleFalse_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("5C497454-EA83-4F79-B990-4EB28505E801")) INestedInterfaceVisibleFalse_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleFalse_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleFalse_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrue_VisibleFalse
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("A17CF08F-EEC4-4EA5-B12C-5A603101415D")) INestedInterfaceVisibleTrue_VisibleFalse : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrue_VisibleFalse(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrue_VisibleFalse>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceNotPublic_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("40B723E9-E1BE-4F55-99CD-D2590D191A53")) INestedInterfaceNotPublic_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceNotPublic_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceNotPublic_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <stdlib.h>
#include <windows.h>
#include <Objbase.h>
#include <xplatform.h>
#include <platformdefines.h>
//
// Standard function to call the managed COM.
//
template <class T> class CCWTestTemplate
{
public:
static HRESULT CallManagedCom(IUnknown* pUnk, int* fooSuccessVal)
{
T *pTargetInterface = NULL;
(*fooSuccessVal) = -1;
HRESULT hr = pUnk->QueryInterface(_uuidof(T), reinterpret_cast<void**>(&pTargetInterface));
if (FAILED(hr))
return hr;
hr = pTargetInterface->Foo(fooSuccessVal);
pTargetInterface->Release();
return hr;
}
};
//
// Non Nested Interface:
//
//
// IInterfaceComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("52E5F852-BD3E-4DF2-8826-E1EC39557943")) IInterfaceComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("8FDE13DC-F917-44FF-AAC8-A638FD27D647")) IInterfaceVisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleFalse
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("0A2EF649-371D-4480-B0C7-07F455C836D3")) IInterfaceVisibleFalse : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleFalse(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleFalse>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceWithoutVisible
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("FB504D72-39C4-457F-ACF4-3E5D8A31AAE4")) IInterfaceWithoutVisible : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceWithoutVisible(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceWithoutVisible>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceNotPublic
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("11320010-13FA-4B40-8580-8CF92EE70774")) IInterfaceNotPublic : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceNotPublic(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceNotPublic>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrueNoGuid
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("ad50a327-d23a-38a4-9d6e-b32b32acf572")) IInterfaceVisibleTrueNoGuid : IUnknown
{
STDMETHOD(Foo1)(int* fooSuccessVal) = 0;
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
STDMETHOD(Foo2)(int* fooSuccessVal) = 0;
STDMETHOD(Foo3)(int* fooSuccessVal) = 0;
STDMETHOD(Foo4)(int* fooSuccessVal) = 0;
STDMETHOD(Foo5)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrueNoGuid(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrueNoGuid>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrueNoGuidGenericInterface
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("384f0b5c-28d0-368c-8c7e-5e31a84a5c84")) IInterfaceVisibleTrueNoGuidGenericInterface : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
STDMETHOD(Foo9)(int* fooSuccessVal, int listInt[]) = 0;
STDMETHOD(Foo10)(int* fooSuccessVal, void* intCollection, void* stringCollection) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrueNoGuidGenericInterface(IUnknown* pUnk, int* fooSuccessVal)
{
IInterfaceVisibleTrueNoGuidGenericInterface *pTargetInterface = NULL;
(*fooSuccessVal) = -1;
HRESULT hr = pUnk->QueryInterface(_uuidof(IInterfaceVisibleTrueNoGuidGenericInterface), reinterpret_cast<void**>(&pTargetInterface));
if (FAILED(hr))
return hr;
hr = pTargetInterface->Foo(fooSuccessVal);
if (FAILED(hr))
{
pTargetInterface->Release();
return hr;
}
hr = pTargetInterface->Foo9(fooSuccessVal, NULL);
if (FAILED(hr))
pTargetInterface->Release();
else
hr = (HRESULT)(pTargetInterface->Release());
return hr;
}
//
// IInterfaceNotVisibleNoGuid
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("b45587ec-9671-35bc-8b8e-f6bfb18a4d3a")) IInterfaceNotVisibleNoGuid : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceNotVisibleNoGuid(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceNotVisibleNoGuid>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceGenericVisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("BA4B32D4-1D73-4605-AD0A-900A31E75BC3")) IInterfaceGenericVisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceGenericVisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceGenericVisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceComImport_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("943759D7-3552-43AD-9C4D-CC2F787CF36E")) IInterfaceComImport_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceComImport_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceComImport_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrue_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("75DE245B-0CE3-4B07-8761-328906C750B7")) IInterfaceVisibleTrue_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrue_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrue_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleFalse_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("C73D96C3-B005-42D6-93F5-E30AEE08C66C")) IInterfaceVisibleFalse_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleFalse_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleFalse_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrue_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("60B3917B-9CC2-40F2-A975-CD6898DA697F")) IInterfaceVisibleTrue_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrue_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrue_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleFalse_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("2FC59DDB-B1D0-4678-93AF-6A48E838B705")) IInterfaceVisibleFalse_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleFalse_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleFalse_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceVisibleTrue_VisibleFalse
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("C82C25FC-FBAD-4EA9-BED1-343C887464B5")) IInterfaceVisibleTrue_VisibleFalse : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceVisibleTrue_VisibleFalse(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceVisibleTrue_VisibleFalse>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// IInterfaceNotPublic_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("8A4C1691-5615-4762-8568-481DC671F9CE")) IInterfaceNotPublic_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_InterfaceNotPublic_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<IInterfaceNotPublic_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// Nested Interfaces:
//
//
// INestedInterfaceComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("1D927BC5-1530-4B8E-A183-995425CE4A0A")) INestedInterfaceComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("39209692-2568-4B1E-A6C8-A5C7F141D278")) INestedInterfaceVisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleFalse
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("1CE4B033-4927-447A-9F91-998357B32ADF")) INestedInterfaceVisibleFalse : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleFalse(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleFalse>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceWithoutVisible
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("C770422A-C363-49F1-AAA1-3EC81A452816")) INestedInterfaceWithoutVisible : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceWithoutVisible(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceWithoutVisible>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceNotPublic
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("F776FF8A-0673-49C2-957A-33C2576062ED")) INestedInterfaceNotPublic : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceNotPublic(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceNotPublic>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceNestedInClass
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("B31B4EC1-3B59-41C4-B3A0-CF89638CB837")) INestedInterfaceNestedInClass : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceNestedInClass(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceNestedInClass>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceNestedInClassNoGuid
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("486bcec9-904d-3445-871c-e7084a52eb1a")) INestedInterfaceNestedInClassNoGuid : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceNestedInClassNoGuid(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceNestedInClassNoGuid>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrueNoGuid
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("0ea2cb33-db9f-3655-9240-47ef1dea0f1e")) INestedInterfaceVisibleTrueNoGuid : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrueNoGuid(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrueNoGuid>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceGenericVisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("CAFBD2FF-710A-4E83-9229-42FA16963424")) INestedInterfaceGenericVisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceGenericVisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceGenericVisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceComImport_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("C57D849A-A1A9-4CDC-A609-789D79F9332C")) INestedInterfaceComImport_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceComImport_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceComImport_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrue_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("81F28686-F257-4B7E-A47F-57C9775BE2CE")) INestedInterfaceVisibleTrue_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrue_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrue_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleFalse_ComImport
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("FAAB7E6C-8548-429F-AD34-0CEC3EBDD7B7")) INestedInterfaceVisibleFalse_ComImport : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleFalse_ComImport(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleFalse_ComImport>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrue_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("BEFD79A9-D8E6-42E4-8228-1892298460D7")) INestedInterfaceVisibleTrue_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrue_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrue_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleFalse_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("5C497454-EA83-4F79-B990-4EB28505E801")) INestedInterfaceVisibleFalse_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleFalse_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleFalse_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceVisibleTrue_VisibleFalse
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("A17CF08F-EEC4-4EA5-B12C-5A603101415D")) INestedInterfaceVisibleTrue_VisibleFalse : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceVisibleTrue_VisibleFalse(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceVisibleTrue_VisibleFalse>::CallManagedCom(pUnk, fooSuccessVal);
}
//
// INestedInterfaceNotPublic_VisibleTrue
// Interface definition and auxiliar function that will call the managed COM.
//
struct __declspec(uuid("40B723E9-E1BE-4F55-99CD-D2590D191A53")) INestedInterfaceNotPublic_VisibleTrue : IUnknown
{
STDMETHOD(Foo)(int* fooSuccessVal) = 0;
};
extern "C" DLL_EXPORT HRESULT _stdcall CCWTest_NestedInterfaceNotPublic_VisibleTrue(IUnknown* pUnk, int* fooSuccessVal)
{
return CCWTestTemplate<INestedInterfaceNotPublic_VisibleTrue>::CallManagedCom(pUnk, fooSuccessVal);
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/c_runtime/_snwprintf_s/test16/test16.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test16.c
**
** Purpose: Tests swprintf_s with decimal point format doubles
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../_snwprintf_s.h"
/* memcmp is used to verify the results, so this test is dependent on it. */
/* ditto with wcslen */
PALTEST(c_runtime__snwprintf_s_test16_paltest_snwprintf_test16, "c_runtime/_snwprintf_s/test16/paltest_snwprintf_test16")
{
double val = 2560.001;
double neg = -2560.001;
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
DoDoubleTest(convert("foo %f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %lf"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %hf"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %Lf"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %I64f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %12f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %-12f"), val, convert("foo 2560.001000 "),
convert("foo 2560.001000 "));
DoDoubleTest(convert("foo %.1f"), val, convert("foo 2560.0"),
convert("foo 2560.0"));
DoDoubleTest(convert("foo %.8f"), val, convert("foo 2560.00100000"),
convert("foo 2560.00100000"));
DoDoubleTest(convert("foo %012f"), val, convert("foo 02560.001000"),
convert("foo 02560.001000"));
DoDoubleTest(convert("foo %#f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %+f"), val, convert("foo +2560.001000"),
convert("foo +2560.001000"));
DoDoubleTest(convert("foo % f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %+f"), neg, convert("foo -2560.001000"),
convert("foo -2560.001000"));
DoDoubleTest(convert("foo % f"), neg, convert("foo -2560.001000"),
convert("foo -2560.001000"));
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test16.c
**
** Purpose: Tests swprintf_s with decimal point format doubles
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../_snwprintf_s.h"
/* memcmp is used to verify the results, so this test is dependent on it. */
/* ditto with wcslen */
PALTEST(c_runtime__snwprintf_s_test16_paltest_snwprintf_test16, "c_runtime/_snwprintf_s/test16/paltest_snwprintf_test16")
{
double val = 2560.001;
double neg = -2560.001;
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
DoDoubleTest(convert("foo %f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %lf"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %hf"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %Lf"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %I64f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %12f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %-12f"), val, convert("foo 2560.001000 "),
convert("foo 2560.001000 "));
DoDoubleTest(convert("foo %.1f"), val, convert("foo 2560.0"),
convert("foo 2560.0"));
DoDoubleTest(convert("foo %.8f"), val, convert("foo 2560.00100000"),
convert("foo 2560.00100000"));
DoDoubleTest(convert("foo %012f"), val, convert("foo 02560.001000"),
convert("foo 02560.001000"));
DoDoubleTest(convert("foo %#f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %+f"), val, convert("foo +2560.001000"),
convert("foo +2560.001000"));
DoDoubleTest(convert("foo % f"), val, convert("foo 2560.001000"),
convert("foo 2560.001000"));
DoDoubleTest(convert("foo %+f"), neg, convert("foo -2560.001000"),
convert("foo -2560.001000"));
DoDoubleTest(convert("foo % f"), neg, convert("foo -2560.001000"),
convert("foo -2560.001000"));
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/jit/forwardsub.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
//------------------------------------------------------------------------
// Simple Forward Substitution
//
// This phase tries to reconnect trees that were split early on by
// phases like the importer and inlining. We run it before morph
// to provide more context for morph's tree based optimizations, and
// we run it after the local address visitor because that phase sets
// address exposure for locals and computes (early) ref counts.
//
// The general pattern we look for is
//
// Statement(n):
// GT_ASG(lcl, tree)
// Statement(n+1):
// ... use of lcl ...
//
// where those are the only appearances of lcl and lcl is not address
// exposed.
//
// The "optimization" here transforms this to
//
// ~~Statement(n)~~ (removed)
// Statement(n+1):
// ... use of tree ...
//
// As always our main concerns are throughput, legality, profitability,
// and ensuring downstream phases do not get confused.
//
// For throughput, we try and early out on illegal or unprofitable cases
// before doing the more costly bits of analysis. We only scan a limited
// amount of IR and just give up if we can't find what we are looking for.
//
// If we're successful we will backtrack a bit, to try and catch cases like
//
// Statement(n):
// lcl1 = tree1
// Statement(n+1):
// lcl2 = tree2
// Statement(n+2):
// use ... lcl1 ... use ... lcl2 ...
//
// If we can forward sub tree2, then the def and use of lcl1 become
// adjacent.
//
// For legality we must show that evaluating "tree" at its new position
// can't change any observable behavior. This largely means running an
// interference analysis between tree and the portion of Statement(n+1)
// that will evaluate before "tree". This analysis is complicated by some
// missing flags on trees, in particular modelling the potential uses
// of exposed locals. We run supplementary scans looking for those.
//
// Ideally we'd update the tree with our findings, or better yet ensure
// that upstream phases didn't leave the wrong flags.
//
// For profitability we first try and avoid code growth. We do this
// by only substituting in cases where lcl has exactly one def and one use.
// This info is computed for us but the RCS_Early ref counting done during
// the immediately preceeding fgMarkAddressExposedLocals phase.
//
// Because of this, once we've substituted "tree" we know that lcl is dead
// and we can remove the assignment statement.
//
// Even with ref count screening, we don't know for sure where the
// single use of local might be, so we have to seach for it.
//
// We also take pains not to create overly large trees as the recursion
// done by morph incorporates a lot of state; deep trees may lead to
// stack overflows.
//
// There are a fair number of ad-hoc restrictions on what can be
// substituted where; these reflect various blemishes or implicit
// contracts in our IR shapes that we should either remove or mandate.
//
// Possible enhancements:
// * Allow fwd sub of "simple, cheap" trees when there's more than one use.
// * Search more widely for the use.
// * Use height/depth to avoid blowing morph's recursion, rather than tree size.
// * Sub across a block boundary if successor block is unique, join-free,
// and in the same EH region.
// * Rerun this later, after we have built SSA, and handle single-def single-use
// from SSA perspective.
// * Fix issue in morph that can unsoundly reorder call args, and remove
// extra effects computation from ForwardSubVisitor.
// * We can be more aggressive with GTF_IND_INVARIANT / GTF_IND_NONFAULTING
// nodes--even though they may be marked GTF_GLOB_REF, they can be freely
// reordered. See if this offers any benefit.
//
//------------------------------------------------------------------------
//------------------------------------------------------------------------
// fgForwardSub: run forward substitution in this method
//
// Returns:
// suitable phase status
//
PhaseStatus Compiler::fgForwardSub()
{
if (!opts.OptimizationEnabled())
{
return PhaseStatus::MODIFIED_NOTHING;
}
#if defined(DEBUG)
if (JitConfig.JitNoForwardSub() > 0)
{
return PhaseStatus::MODIFIED_NOTHING;
}
#endif
bool changed = false;
for (BasicBlock* const block : Blocks())
{
JITDUMP("\n\n===> " FMT_BB "\n", block->bbNum);
changed |= fgForwardSubBlock(block);
}
return changed ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING;
}
//------------------------------------------------------------------------
// fgForwardSubBlock: run forward substitution in this block
//
// Arguments:
// block -- block to process
//
// Returns:
// true if any IR was modified
//
bool Compiler::fgForwardSubBlock(BasicBlock* block)
{
Statement* stmt = block->firstStmt();
Statement* lastStmt = block->lastStmt();
bool changed = false;
while (stmt != lastStmt)
{
Statement* const prevStmt = stmt->GetPrevStmt();
Statement* const nextStmt = stmt->GetNextStmt();
bool const substituted = fgForwardSubStatement(stmt);
if (substituted)
{
fgRemoveStmt(block, stmt);
changed = true;
}
// Try backtracking if we substituted.
//
if (substituted && (prevStmt != lastStmt) && prevStmt->GetRootNode()->OperIs(GT_ASG))
{
// Yep, bactrack.
//
stmt = prevStmt;
}
else
{
// Move on to the next.
//
stmt = nextStmt;
}
}
return changed;
}
//------------------------------------------------------------------------
// ForwardSubVisitor: tree visitor to locate uses of a local in a tree
//
// Also computes the set of side effects that happen "before" the use,
// and counts the size of the tree.
//
// Effects accounting is complicated by missing flags and by the need
// to avoid introducing interfering call args.
//
class ForwardSubVisitor final : public GenTreeVisitor<ForwardSubVisitor>
{
public:
enum
{
ComputeStack = true,
DoPostOrder = true,
UseExecutionOrder = true
};
ForwardSubVisitor(Compiler* compiler, unsigned lclNum)
: GenTreeVisitor<ForwardSubVisitor>(compiler)
, m_use(nullptr)
, m_node(nullptr)
, m_parentNode(nullptr)
, m_callAncestor(nullptr)
, m_lclNum(lclNum)
, m_useCount(0)
, m_useFlags(GTF_EMPTY)
, m_accumulatedFlags(GTF_EMPTY)
, m_treeSize(0)
{
}
Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
m_treeSize++;
GenTree* const node = *use;
if (node->OperIs(GT_LCL_VAR))
{
unsigned const lclNum = node->AsLclVarCommon()->GetLclNum();
if (lclNum == m_lclNum)
{
m_useCount++;
// Screen out contextual "uses"
//
GenTree* const parent = m_ancestors.Top(1);
bool const isDef = parent->OperIs(GT_ASG) && (parent->gtGetOp1() == node);
bool const isAddr = parent->OperIs(GT_ADDR);
bool isCallTarget = false;
// Quirk:
//
// fgGetStubAddrArg cannot handle complex trees (it calls gtClone)
//
if (parent->IsCall())
{
GenTreeCall* const parentCall = parent->AsCall();
isCallTarget = (parentCall->gtCallType == CT_INDIRECT) && (parentCall->gtCallAddr == node);
}
if (!isDef && !isAddr && !isCallTarget)
{
m_node = node;
m_use = use;
m_useFlags = m_accumulatedFlags;
m_parentNode = parent;
// If this use contributes to a call arg we need to
// remember the call and handle it specially when we
// see it later in the postorder walk.
//
for (int i = 1; i < m_ancestors.Height(); i++)
{
if (m_ancestors.Top(i)->IsCall())
{
m_callAncestor = m_ancestors.Top(i)->AsCall();
break;
}
}
}
}
}
if (node->OperIsLocal())
{
unsigned const lclNum = node->AsLclVarCommon()->GetLclNum();
// Uses of address-exposed locals are modelled as global refs.
//
LclVarDsc* const varDsc = m_compiler->lvaGetDesc(lclNum);
if (varDsc->IsAddressExposed())
{
m_accumulatedFlags |= GTF_GLOB_REF;
}
}
// Is this the use's call ancestor?
//
if ((m_callAncestor != nullptr) && (node == m_callAncestor))
{
// To be conservative and avoid issues with morph
// reordering call args, we merge in effects of all args
// to this call.
//
// Remove this if/when morph's arg sorting is fixed.
//
GenTreeFlags oldUseFlags = m_useFlags;
if (m_callAncestor->gtCallThisArg != nullptr)
{
m_useFlags |= (m_callAncestor->gtCallThisArg->GetNode()->gtFlags & GTF_GLOB_EFFECT);
}
for (GenTreeCall::Use& use : m_callAncestor->Args())
{
m_useFlags |= (use.GetNode()->gtFlags & GTF_GLOB_EFFECT);
}
if (oldUseFlags != m_useFlags)
{
JITDUMP(" [added other call arg use flags: 0x%x]", m_useFlags & ~oldUseFlags);
}
}
m_accumulatedFlags |= (node->gtFlags & GTF_GLOB_EFFECT);
return fgWalkResult::WALK_CONTINUE;
}
unsigned GetUseCount() const
{
return m_useCount;
}
GenTree* GetNode() const
{
return m_node;
}
GenTree** GetUse() const
{
return m_use;
}
GenTree* GetParentNode() const
{
return m_parentNode;
}
GenTreeFlags GetFlags() const
{
return m_useFlags;
}
bool IsCallArg() const
{
return m_parentNode->IsCall();
}
unsigned GetComplexity() const
{
return m_treeSize;
}
private:
GenTree** m_use;
GenTree* m_node;
GenTree* m_parentNode;
GenTreeCall* m_callAncestor;
unsigned m_lclNum;
unsigned m_useCount;
GenTreeFlags m_useFlags;
GenTreeFlags m_accumulatedFlags;
unsigned m_treeSize;
};
//------------------------------------------------------------------------
// EffectsVisitor: tree visitor to compute missing effects of a tree.
//
class EffectsVisitor final : public GenTreeVisitor<EffectsVisitor>
{
public:
enum
{
DoPostOrder = true,
UseExecutionOrder = true
};
EffectsVisitor(Compiler* compiler) : GenTreeVisitor<EffectsVisitor>(compiler), m_flags(GTF_EMPTY)
{
}
Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
GenTree* const node = *use;
m_flags |= node->gtFlags & GTF_ALL_EFFECT;
if (node->OperIsLocal())
{
unsigned const lclNum = node->AsLclVarCommon()->GetLclNum();
LclVarDsc* const varDsc = m_compiler->lvaGetDesc(lclNum);
if (varDsc->IsAddressExposed())
{
m_flags |= GTF_GLOB_REF;
}
}
return fgWalkResult::WALK_CONTINUE;
}
GenTreeFlags GetFlags()
{
return m_flags;
}
private:
GenTreeFlags m_flags;
};
//------------------------------------------------------------------------
// fgForwardSubStatement: forward substitute this statement's
// computation to the next statement, if legal and profitable
//
// arguments:
// stmt - statement in question
//
// Returns:
// true if statement computation was forwarded.
// caller is responsible for removing the now-dead statement.
//
bool Compiler::fgForwardSubStatement(Statement* stmt)
{
// Is this tree a def of a single use, unaliased local?
//
GenTree* const rootNode = stmt->GetRootNode();
if (!rootNode->OperIs(GT_ASG))
{
return false;
}
GenTree* const lhsNode = rootNode->gtGetOp1();
if (!lhsNode->OperIs(GT_LCL_VAR))
{
return false;
}
JITDUMP(" [%06u]: ", dspTreeID(rootNode))
unsigned const lclNum = lhsNode->AsLclVarCommon()->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(lclNum);
// Leave pinned locals alone.
// This is just a perf opt -- we shouldn't find any uses.
//
if (varDsc->lvPinned)
{
JITDUMP(" pinned local\n");
return false;
}
// Only fwd sub if we expect no code duplication
// We expect one def and one use.
//
if (varDsc->lvRefCnt(RCS_EARLY) != 2)
{
JITDUMP(" not asg (single-use lcl)\n");
return false;
}
// And local is unalised
//
if (varDsc->IsAddressExposed())
{
JITDUMP(" not asg (unaliased single-use lcl)\n");
return false;
}
// Could handle this case --perhaps-- but we'd want to update ref counts.
//
if (lvaIsImplicitByRefLocal(lclNum))
{
JITDUMP(" implicit by-ref local\n");
return false;
}
// Check the tree to substitute.
//
// We could just extract the value portion and forward sub that,
// but cleanup would be more complicated.
//
GenTree* const rhsNode = rootNode->gtGetOp2();
GenTree* fwdSubNode = rhsNode;
// Can't substitute a qmark (unless the use is RHS of an assign... could check for this)
// Can't substitute GT_CATCH_ARG.
// Can't substitute GT_LCLHEAP.
//
// Don't substitute a no return call (trips up morph in some cases).
//
if (fwdSubNode->OperIs(GT_QMARK, GT_CATCH_ARG, GT_LCLHEAP))
{
JITDUMP(" tree to sub is qmark, catch arg, or lcl heap\n");
return false;
}
if (fwdSubNode->IsCall() && fwdSubNode->AsCall()->IsNoReturn())
{
JITDUMP(" tree to sub is a 'no return' call\n");
return false;
}
// Bail if sub node has embedded assignment.
//
if ((fwdSubNode->gtFlags & GTF_ASG) != 0)
{
JITDUMP(" tree to sub has effects\n");
return false;
}
// Bail if sub node has mismatched types.
// Might be able to tolerate these by retyping.
//
if (lhsNode->TypeGet() != fwdSubNode->TypeGet())
{
JITDUMP(" mismatched types (assignment)\n");
return false;
}
if (gtGetStructHandleIfPresent(fwdSubNode) != gtGetStructHandleIfPresent(lhsNode))
{
JITDUMP(" would change struct handle (assignment)\n");
return false;
}
// If lhs is mulit-reg, rhs must be too.
//
if (lhsNode->IsMultiRegNode() && !fwdSubNode->IsMultiRegNode())
{
JITDUMP(" would change multi-reg (assignment)\n");
return false;
}
// Don't fwd sub overly large trees.
// Size limit here is ad-hoc. Need to tune.
//
// Consider instead using the height of the fwdSubNode.
//
unsigned const nodeLimit = 16;
if (gtComplexityExceeds(&fwdSubNode, nodeLimit))
{
JITDUMP(" tree to sub has more than %u nodes\n", nodeLimit);
return false;
}
// Local and tree to substitute seem suitable.
// See if the next statement contains the one and only use.
//
Statement* const nextStmt = stmt->GetNextStmt();
// We often see stale flags, eg call flags after inlining.
// Try and clean these up.
//
gtUpdateStmtSideEffects(nextStmt);
gtUpdateStmtSideEffects(stmt);
// Scan for the (single) use.
//
ForwardSubVisitor fsv(this, lclNum);
fsv.WalkTree(nextStmt->GetRootNodePointer(), nullptr);
// LclMorph (via RCS_Early) said there was just one use.
// It had better have gotten this right.
//
assert(fsv.GetUseCount() <= 1);
if ((fsv.GetUseCount() == 0) || (fsv.GetNode() == nullptr))
{
JITDUMP(" no next stmt use\n");
return false;
}
JITDUMP(" [%06u] is only use of [%06u] (V%02u) ", dspTreeID(fsv.GetNode()), dspTreeID(lhsNode), lclNum);
// If next statement already has a large tree, hold off
// on making it even larger.
//
// We use total node count. Consider instead using the depth of the use and the
// height of the fwdSubNode.
//
unsigned const nextTreeLimit = 200;
if ((fsv.GetComplexity() > nextTreeLimit) && gtComplexityExceeds(&fwdSubNode, 1))
{
JITDUMP(" next stmt tree is too large (%u)\n", fsv.GetComplexity());
return false;
}
// Next statement seems suitable.
// See if we can forward sub without changing semantics.
//
GenTree* const nextRootNode = nextStmt->GetRootNode();
// Bail if types disagree.
// Might be able to tolerate these by retyping.
//
if (fsv.GetNode()->TypeGet() != fwdSubNode->TypeGet())
{
JITDUMP(" mismatched types (substitution)\n");
return false;
}
// We can forward sub if
//
// the value of the fwdSubNode can't change and its evaluation won't cause side effects,
//
// or,
//
// if the next tree can't change the value of fwdSubNode or be impacted by fwdSubNode effects
//
const bool fwdSubNodeInvariant = ((fwdSubNode->gtFlags & GTF_ALL_EFFECT) == 0);
const bool nextTreeIsPureUpToUse = ((fsv.GetFlags() & (GTF_EXCEPT | GTF_GLOB_REF | GTF_CALL)) == 0);
if (!fwdSubNodeInvariant && !nextTreeIsPureUpToUse)
{
// Fwd sub may impact global values and or reorder exceptions...
//
JITDUMP(" potentially interacting effects\n");
return false;
}
// If we're relying on purity of fwdSubNode for legality of forward sub,
// do some extra checks for global uses that might not be reflected in the flags.
//
// TODO: remove this once we can trust upstream phases and/or gtUpdateStmtSideEffects
// to set GTF_GLOB_REF properly.
//
if (fwdSubNodeInvariant && ((fsv.GetFlags() & (GTF_CALL | GTF_ASG)) != 0))
{
EffectsVisitor ev(this);
ev.WalkTree(&fwdSubNode, nullptr);
if ((ev.GetFlags() & GTF_GLOB_REF) != 0)
{
JITDUMP(" potentially interacting effects (AX locals)\n");
return false;
}
}
// Finally, profitability checks.
//
// These conditions can be checked earlier in the final version to save some throughput.
// Perhaps allowing for bypass with jit stress.
//
// If fwdSubNode is an address-exposed local, forwarding it may lose optimizations.
// (maybe similar for dner?)
//
if (fwdSubNode->IsLocal())
{
unsigned const fwdLclNum = fwdSubNode->AsLclVarCommon()->GetLclNum();
LclVarDsc* const fwdVarDsc = lvaGetDesc(fwdLclNum);
if (fwdVarDsc->IsAddressExposed())
{
JITDUMP(" V%02u is address exposed\n", fwdLclNum);
return false;
}
}
// Optimization:
//
// If we are about to substitute GT_OBJ, see if we can simplify it first.
// Not doing so can lead to regressions...
//
// Hold off on doing this for call args for now (per issue #51569).
// Hold off on OBJ(GT_LCL_ADDR).
//
if (fwdSubNode->OperIs(GT_OBJ) && !fsv.IsCallArg() && fwdSubNode->gtGetOp1()->OperIs(GT_ADDR))
{
const bool destroyNodes = false;
GenTree* const optTree = fgMorphTryFoldObjAsLclVar(fwdSubNode->AsObj(), destroyNodes);
if (optTree != nullptr)
{
JITDUMP(" [folding OBJ(ADDR(LCL...))]");
fwdSubNode = optTree;
}
}
// Quirks:
//
// We may sometimes lose or change a type handle. Avoid substituting if so.
//
if (gtGetStructHandleIfPresent(fwdSubNode) != gtGetStructHandleIfPresent(fsv.GetNode()))
{
JITDUMP(" would change struct handle (substitution)\n");
return false;
}
#ifdef FEATURE_SIMD
// Don't forward sub a SIMD call under a HW intrinsic node.
// LowerCallStruct is not prepared for this.
//
if (fwdSubNode->IsCall() && varTypeIsSIMD(fwdSubNode->TypeGet()) && fsv.GetParentNode()->OperIs(GT_HWINTRINSIC))
{
JITDUMP(" simd returning call; hw intrinsic\n");
return false;
}
#endif // FEATURE_SIMD
// There are implicit assumptions downstream on where/how multi-reg ops
// can appear.
//
// Eg if fwdSubNode is a multi-reg call, parent node must be GT_ASG and the
// local being defined must be specially marked up.
//
if (fwdSubNode->IsMultiRegCall())
{
GenTree* const parentNode = fsv.GetParentNode();
if (!parentNode->OperIs(GT_ASG))
{
JITDUMP(" multi-reg call, parent not asg\n");
return false;
}
GenTree* const parentNodeLHS = parentNode->gtGetOp1();
if (!parentNodeLHS->OperIs(GT_LCL_VAR))
{
JITDUMP(" multi-reg call, parent not asg(lcl, ...)\n");
return false;
}
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (fwdSubNode->TypeGet() == TYP_LONG)
{
JITDUMP(" TYP_LONG fwd sub node, target is x86/arm\n");
return false;
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
GenTreeLclVar* const parentNodeLHSLocal = parentNodeLHS->AsLclVar();
unsigned const lhsLclNum = parentNodeLHSLocal->GetLclNum();
LclVarDsc* const lhsVarDsc = lvaGetDesc(lhsLclNum);
JITDUMP(" [marking V%02u as multi-reg-ret]", lhsLclNum);
lhsVarDsc->lvIsMultiRegRet = true;
parentNodeLHSLocal->SetMultiReg();
}
// If a method returns a multi-reg type, only forward sub locals,
// and ensure the local and operand have the required markup.
//
// (see eg impFixupStructReturnType)
//
if (compMethodReturnsMultiRegRetType() && fsv.GetParentNode()->OperIs(GT_RETURN))
{
if (!fwdSubNode->OperIs(GT_LCL_VAR))
{
JITDUMP(" parent is return, fwd sub node is not lcl var\n");
return false;
}
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (fwdSubNode->TypeGet() == TYP_LONG)
{
JITDUMP(" TYP_LONG fwd sub node, target is x86/arm\n");
return false;
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
GenTreeLclVar* const fwdSubNodeLocal = fwdSubNode->AsLclVar();
unsigned const fwdLclNum = fwdSubNodeLocal->GetLclNum();
LclVarDsc* const fwdVarDsc = lvaGetDesc(fwdLclNum);
JITDUMP(" [marking V%02u as multi-reg-ret]", fwdLclNum);
fwdVarDsc->lvIsMultiRegRet = true;
fwdSubNodeLocal->SetMultiReg();
fwdSubNodeLocal->gtFlags |= GTF_DONT_CSE;
}
// If the use is a multi-reg arg, don't forward sub non-locals.
//
if (fsv.GetNode()->IsMultiRegNode() && !fwdSubNode->IsMultiRegNode())
{
JITDUMP(" would change multi-reg (substitution)\n");
return false;
}
// If the intial has truncate on store semantics, we need to replicate
// that here with a cast.
//
if (varDsc->lvNormalizeOnStore() && fgCastNeeded(fwdSubNode, varDsc->TypeGet()))
{
JITDUMP(" [adding cast for normalize on store]");
fwdSubNode = gtNewCastNode(TYP_INT, fwdSubNode, false, varDsc->TypeGet());
}
// Looks good, forward sub!
//
GenTree** use = fsv.GetUse();
*use = fwdSubNode;
if (!fwdSubNodeInvariant)
{
gtUpdateStmtSideEffects(nextStmt);
}
JITDUMP(" -- fwd subbing [%06u]; new next stmt is\n", dspTreeID(fwdSubNode));
DISPSTMT(nextStmt);
return true;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
//------------------------------------------------------------------------
// Simple Forward Substitution
//
// This phase tries to reconnect trees that were split early on by
// phases like the importer and inlining. We run it before morph
// to provide more context for morph's tree based optimizations, and
// we run it after the local address visitor because that phase sets
// address exposure for locals and computes (early) ref counts.
//
// The general pattern we look for is
//
// Statement(n):
// GT_ASG(lcl, tree)
// Statement(n+1):
// ... use of lcl ...
//
// where those are the only appearances of lcl and lcl is not address
// exposed.
//
// The "optimization" here transforms this to
//
// ~~Statement(n)~~ (removed)
// Statement(n+1):
// ... use of tree ...
//
// As always our main concerns are throughput, legality, profitability,
// and ensuring downstream phases do not get confused.
//
// For throughput, we try and early out on illegal or unprofitable cases
// before doing the more costly bits of analysis. We only scan a limited
// amount of IR and just give up if we can't find what we are looking for.
//
// If we're successful we will backtrack a bit, to try and catch cases like
//
// Statement(n):
// lcl1 = tree1
// Statement(n+1):
// lcl2 = tree2
// Statement(n+2):
// use ... lcl1 ... use ... lcl2 ...
//
// If we can forward sub tree2, then the def and use of lcl1 become
// adjacent.
//
// For legality we must show that evaluating "tree" at its new position
// can't change any observable behavior. This largely means running an
// interference analysis between tree and the portion of Statement(n+1)
// that will evaluate before "tree". This analysis is complicated by some
// missing flags on trees, in particular modelling the potential uses
// of exposed locals. We run supplementary scans looking for those.
//
// Ideally we'd update the tree with our findings, or better yet ensure
// that upstream phases didn't leave the wrong flags.
//
// For profitability we first try and avoid code growth. We do this
// by only substituting in cases where lcl has exactly one def and one use.
// This info is computed for us but the RCS_Early ref counting done during
// the immediately preceeding fgMarkAddressExposedLocals phase.
//
// Because of this, once we've substituted "tree" we know that lcl is dead
// and we can remove the assignment statement.
//
// Even with ref count screening, we don't know for sure where the
// single use of local might be, so we have to seach for it.
//
// We also take pains not to create overly large trees as the recursion
// done by morph incorporates a lot of state; deep trees may lead to
// stack overflows.
//
// There are a fair number of ad-hoc restrictions on what can be
// substituted where; these reflect various blemishes or implicit
// contracts in our IR shapes that we should either remove or mandate.
//
// Possible enhancements:
// * Allow fwd sub of "simple, cheap" trees when there's more than one use.
// * Search more widely for the use.
// * Use height/depth to avoid blowing morph's recursion, rather than tree size.
// * Sub across a block boundary if successor block is unique, join-free,
// and in the same EH region.
// * Rerun this later, after we have built SSA, and handle single-def single-use
// from SSA perspective.
// * Fix issue in morph that can unsoundly reorder call args, and remove
// extra effects computation from ForwardSubVisitor.
// * We can be more aggressive with GTF_IND_INVARIANT / GTF_IND_NONFAULTING
// nodes--even though they may be marked GTF_GLOB_REF, they can be freely
// reordered. See if this offers any benefit.
//
//------------------------------------------------------------------------
//------------------------------------------------------------------------
// fgForwardSub: run forward substitution in this method
//
// Returns:
// suitable phase status
//
PhaseStatus Compiler::fgForwardSub()
{
if (!opts.OptimizationEnabled())
{
return PhaseStatus::MODIFIED_NOTHING;
}
#if defined(DEBUG)
if (JitConfig.JitNoForwardSub() > 0)
{
return PhaseStatus::MODIFIED_NOTHING;
}
#endif
bool changed = false;
for (BasicBlock* const block : Blocks())
{
JITDUMP("\n\n===> " FMT_BB "\n", block->bbNum);
changed |= fgForwardSubBlock(block);
}
return changed ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING;
}
//------------------------------------------------------------------------
// fgForwardSubBlock: run forward substitution in this block
//
// Arguments:
// block -- block to process
//
// Returns:
// true if any IR was modified
//
bool Compiler::fgForwardSubBlock(BasicBlock* block)
{
Statement* stmt = block->firstStmt();
Statement* lastStmt = block->lastStmt();
bool changed = false;
while (stmt != lastStmt)
{
Statement* const prevStmt = stmt->GetPrevStmt();
Statement* const nextStmt = stmt->GetNextStmt();
bool const substituted = fgForwardSubStatement(stmt);
if (substituted)
{
fgRemoveStmt(block, stmt);
changed = true;
}
// Try backtracking if we substituted.
//
if (substituted && (prevStmt != lastStmt) && prevStmt->GetRootNode()->OperIs(GT_ASG))
{
// Yep, bactrack.
//
stmt = prevStmt;
}
else
{
// Move on to the next.
//
stmt = nextStmt;
}
}
return changed;
}
//------------------------------------------------------------------------
// ForwardSubVisitor: tree visitor to locate uses of a local in a tree
//
// Also computes the set of side effects that happen "before" the use,
// and counts the size of the tree.
//
// Effects accounting is complicated by missing flags and by the need
// to avoid introducing interfering call args.
//
class ForwardSubVisitor final : public GenTreeVisitor<ForwardSubVisitor>
{
public:
enum
{
ComputeStack = true,
DoPostOrder = true,
UseExecutionOrder = true
};
ForwardSubVisitor(Compiler* compiler, unsigned lclNum)
: GenTreeVisitor<ForwardSubVisitor>(compiler)
, m_use(nullptr)
, m_node(nullptr)
, m_parentNode(nullptr)
, m_callAncestor(nullptr)
, m_lclNum(lclNum)
, m_useCount(0)
, m_useFlags(GTF_EMPTY)
, m_accumulatedFlags(GTF_EMPTY)
, m_treeSize(0)
{
}
Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
m_treeSize++;
GenTree* const node = *use;
if (node->OperIs(GT_LCL_VAR))
{
unsigned const lclNum = node->AsLclVarCommon()->GetLclNum();
if (lclNum == m_lclNum)
{
m_useCount++;
// Screen out contextual "uses"
//
GenTree* const parent = m_ancestors.Top(1);
bool const isDef = parent->OperIs(GT_ASG) && (parent->gtGetOp1() == node);
bool const isAddr = parent->OperIs(GT_ADDR);
bool isCallTarget = false;
// Quirk:
//
// fgGetStubAddrArg cannot handle complex trees (it calls gtClone)
//
if (parent->IsCall())
{
GenTreeCall* const parentCall = parent->AsCall();
isCallTarget = (parentCall->gtCallType == CT_INDIRECT) && (parentCall->gtCallAddr == node);
}
if (!isDef && !isAddr && !isCallTarget)
{
m_node = node;
m_use = use;
m_useFlags = m_accumulatedFlags;
m_parentNode = parent;
// If this use contributes to a call arg we need to
// remember the call and handle it specially when we
// see it later in the postorder walk.
//
for (int i = 1; i < m_ancestors.Height(); i++)
{
if (m_ancestors.Top(i)->IsCall())
{
m_callAncestor = m_ancestors.Top(i)->AsCall();
break;
}
}
}
}
}
if (node->OperIsLocal())
{
unsigned const lclNum = node->AsLclVarCommon()->GetLclNum();
// Uses of address-exposed locals are modelled as global refs.
//
LclVarDsc* const varDsc = m_compiler->lvaGetDesc(lclNum);
if (varDsc->IsAddressExposed())
{
m_accumulatedFlags |= GTF_GLOB_REF;
}
}
// Is this the use's call ancestor?
//
if ((m_callAncestor != nullptr) && (node == m_callAncestor))
{
// To be conservative and avoid issues with morph
// reordering call args, we merge in effects of all args
// to this call.
//
// Remove this if/when morph's arg sorting is fixed.
//
GenTreeFlags oldUseFlags = m_useFlags;
if (m_callAncestor->gtCallThisArg != nullptr)
{
m_useFlags |= (m_callAncestor->gtCallThisArg->GetNode()->gtFlags & GTF_GLOB_EFFECT);
}
for (GenTreeCall::Use& use : m_callAncestor->Args())
{
m_useFlags |= (use.GetNode()->gtFlags & GTF_GLOB_EFFECT);
}
if (oldUseFlags != m_useFlags)
{
JITDUMP(" [added other call arg use flags: 0x%x]", m_useFlags & ~oldUseFlags);
}
}
m_accumulatedFlags |= (node->gtFlags & GTF_GLOB_EFFECT);
return fgWalkResult::WALK_CONTINUE;
}
unsigned GetUseCount() const
{
return m_useCount;
}
GenTree* GetNode() const
{
return m_node;
}
GenTree** GetUse() const
{
return m_use;
}
GenTree* GetParentNode() const
{
return m_parentNode;
}
GenTreeFlags GetFlags() const
{
return m_useFlags;
}
bool IsCallArg() const
{
return m_parentNode->IsCall();
}
unsigned GetComplexity() const
{
return m_treeSize;
}
private:
GenTree** m_use;
GenTree* m_node;
GenTree* m_parentNode;
GenTreeCall* m_callAncestor;
unsigned m_lclNum;
unsigned m_useCount;
GenTreeFlags m_useFlags;
GenTreeFlags m_accumulatedFlags;
unsigned m_treeSize;
};
//------------------------------------------------------------------------
// EffectsVisitor: tree visitor to compute missing effects of a tree.
//
class EffectsVisitor final : public GenTreeVisitor<EffectsVisitor>
{
public:
enum
{
DoPostOrder = true,
UseExecutionOrder = true
};
EffectsVisitor(Compiler* compiler) : GenTreeVisitor<EffectsVisitor>(compiler), m_flags(GTF_EMPTY)
{
}
Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
GenTree* const node = *use;
m_flags |= node->gtFlags & GTF_ALL_EFFECT;
if (node->OperIsLocal())
{
unsigned const lclNum = node->AsLclVarCommon()->GetLclNum();
LclVarDsc* const varDsc = m_compiler->lvaGetDesc(lclNum);
if (varDsc->IsAddressExposed())
{
m_flags |= GTF_GLOB_REF;
}
}
return fgWalkResult::WALK_CONTINUE;
}
GenTreeFlags GetFlags()
{
return m_flags;
}
private:
GenTreeFlags m_flags;
};
//------------------------------------------------------------------------
// fgForwardSubStatement: forward substitute this statement's
// computation to the next statement, if legal and profitable
//
// arguments:
// stmt - statement in question
//
// Returns:
// true if statement computation was forwarded.
// caller is responsible for removing the now-dead statement.
//
bool Compiler::fgForwardSubStatement(Statement* stmt)
{
// Is this tree a def of a single use, unaliased local?
//
GenTree* const rootNode = stmt->GetRootNode();
if (!rootNode->OperIs(GT_ASG))
{
return false;
}
GenTree* const lhsNode = rootNode->gtGetOp1();
if (!lhsNode->OperIs(GT_LCL_VAR))
{
return false;
}
JITDUMP(" [%06u]: ", dspTreeID(rootNode))
unsigned const lclNum = lhsNode->AsLclVarCommon()->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(lclNum);
// Leave pinned locals alone.
// This is just a perf opt -- we shouldn't find any uses.
//
if (varDsc->lvPinned)
{
JITDUMP(" pinned local\n");
return false;
}
// Only fwd sub if we expect no code duplication
// We expect one def and one use.
//
if (varDsc->lvRefCnt(RCS_EARLY) != 2)
{
JITDUMP(" not asg (single-use lcl)\n");
return false;
}
// And local is unalised
//
if (varDsc->IsAddressExposed())
{
JITDUMP(" not asg (unaliased single-use lcl)\n");
return false;
}
// Could handle this case --perhaps-- but we'd want to update ref counts.
//
if (lvaIsImplicitByRefLocal(lclNum))
{
JITDUMP(" implicit by-ref local\n");
return false;
}
// Check the tree to substitute.
//
// We could just extract the value portion and forward sub that,
// but cleanup would be more complicated.
//
GenTree* const rhsNode = rootNode->gtGetOp2();
GenTree* fwdSubNode = rhsNode;
// Can't substitute a qmark (unless the use is RHS of an assign... could check for this)
// Can't substitute GT_CATCH_ARG.
// Can't substitute GT_LCLHEAP.
//
// Don't substitute a no return call (trips up morph in some cases).
//
if (fwdSubNode->OperIs(GT_QMARK, GT_CATCH_ARG, GT_LCLHEAP))
{
JITDUMP(" tree to sub is qmark, catch arg, or lcl heap\n");
return false;
}
if (fwdSubNode->IsCall() && fwdSubNode->AsCall()->IsNoReturn())
{
JITDUMP(" tree to sub is a 'no return' call\n");
return false;
}
// Bail if sub node has embedded assignment.
//
if ((fwdSubNode->gtFlags & GTF_ASG) != 0)
{
JITDUMP(" tree to sub has effects\n");
return false;
}
// Bail if sub node has mismatched types.
// Might be able to tolerate these by retyping.
//
if (lhsNode->TypeGet() != fwdSubNode->TypeGet())
{
JITDUMP(" mismatched types (assignment)\n");
return false;
}
if (gtGetStructHandleIfPresent(fwdSubNode) != gtGetStructHandleIfPresent(lhsNode))
{
JITDUMP(" would change struct handle (assignment)\n");
return false;
}
// If lhs is mulit-reg, rhs must be too.
//
if (lhsNode->IsMultiRegNode() && !fwdSubNode->IsMultiRegNode())
{
JITDUMP(" would change multi-reg (assignment)\n");
return false;
}
// Don't fwd sub overly large trees.
// Size limit here is ad-hoc. Need to tune.
//
// Consider instead using the height of the fwdSubNode.
//
unsigned const nodeLimit = 16;
if (gtComplexityExceeds(&fwdSubNode, nodeLimit))
{
JITDUMP(" tree to sub has more than %u nodes\n", nodeLimit);
return false;
}
// Local and tree to substitute seem suitable.
// See if the next statement contains the one and only use.
//
Statement* const nextStmt = stmt->GetNextStmt();
// We often see stale flags, eg call flags after inlining.
// Try and clean these up.
//
gtUpdateStmtSideEffects(nextStmt);
gtUpdateStmtSideEffects(stmt);
// Scan for the (single) use.
//
ForwardSubVisitor fsv(this, lclNum);
fsv.WalkTree(nextStmt->GetRootNodePointer(), nullptr);
// LclMorph (via RCS_Early) said there was just one use.
// It had better have gotten this right.
//
assert(fsv.GetUseCount() <= 1);
if ((fsv.GetUseCount() == 0) || (fsv.GetNode() == nullptr))
{
JITDUMP(" no next stmt use\n");
return false;
}
JITDUMP(" [%06u] is only use of [%06u] (V%02u) ", dspTreeID(fsv.GetNode()), dspTreeID(lhsNode), lclNum);
// If next statement already has a large tree, hold off
// on making it even larger.
//
// We use total node count. Consider instead using the depth of the use and the
// height of the fwdSubNode.
//
unsigned const nextTreeLimit = 200;
if ((fsv.GetComplexity() > nextTreeLimit) && gtComplexityExceeds(&fwdSubNode, 1))
{
JITDUMP(" next stmt tree is too large (%u)\n", fsv.GetComplexity());
return false;
}
// Next statement seems suitable.
// See if we can forward sub without changing semantics.
//
GenTree* const nextRootNode = nextStmt->GetRootNode();
// Bail if types disagree.
// Might be able to tolerate these by retyping.
//
if (fsv.GetNode()->TypeGet() != fwdSubNode->TypeGet())
{
JITDUMP(" mismatched types (substitution)\n");
return false;
}
// We can forward sub if
//
// the value of the fwdSubNode can't change and its evaluation won't cause side effects,
//
// or,
//
// if the next tree can't change the value of fwdSubNode or be impacted by fwdSubNode effects
//
const bool fwdSubNodeInvariant = ((fwdSubNode->gtFlags & GTF_ALL_EFFECT) == 0);
const bool nextTreeIsPureUpToUse = ((fsv.GetFlags() & (GTF_EXCEPT | GTF_GLOB_REF | GTF_CALL)) == 0);
if (!fwdSubNodeInvariant && !nextTreeIsPureUpToUse)
{
// Fwd sub may impact global values and or reorder exceptions...
//
JITDUMP(" potentially interacting effects\n");
return false;
}
// If we're relying on purity of fwdSubNode for legality of forward sub,
// do some extra checks for global uses that might not be reflected in the flags.
//
// TODO: remove this once we can trust upstream phases and/or gtUpdateStmtSideEffects
// to set GTF_GLOB_REF properly.
//
if (fwdSubNodeInvariant && ((fsv.GetFlags() & (GTF_CALL | GTF_ASG)) != 0))
{
EffectsVisitor ev(this);
ev.WalkTree(&fwdSubNode, nullptr);
if ((ev.GetFlags() & GTF_GLOB_REF) != 0)
{
JITDUMP(" potentially interacting effects (AX locals)\n");
return false;
}
}
// Finally, profitability checks.
//
// These conditions can be checked earlier in the final version to save some throughput.
// Perhaps allowing for bypass with jit stress.
//
// If fwdSubNode is an address-exposed local, forwarding it may lose optimizations.
// (maybe similar for dner?)
//
if (fwdSubNode->IsLocal())
{
unsigned const fwdLclNum = fwdSubNode->AsLclVarCommon()->GetLclNum();
LclVarDsc* const fwdVarDsc = lvaGetDesc(fwdLclNum);
if (fwdVarDsc->IsAddressExposed())
{
JITDUMP(" V%02u is address exposed\n", fwdLclNum);
return false;
}
}
// Optimization:
//
// If we are about to substitute GT_OBJ, see if we can simplify it first.
// Not doing so can lead to regressions...
//
// Hold off on doing this for call args for now (per issue #51569).
// Hold off on OBJ(GT_LCL_ADDR).
//
if (fwdSubNode->OperIs(GT_OBJ) && !fsv.IsCallArg() && fwdSubNode->gtGetOp1()->OperIs(GT_ADDR))
{
const bool destroyNodes = false;
GenTree* const optTree = fgMorphTryFoldObjAsLclVar(fwdSubNode->AsObj(), destroyNodes);
if (optTree != nullptr)
{
JITDUMP(" [folding OBJ(ADDR(LCL...))]");
fwdSubNode = optTree;
}
}
// Quirks:
//
// We may sometimes lose or change a type handle. Avoid substituting if so.
//
if (gtGetStructHandleIfPresent(fwdSubNode) != gtGetStructHandleIfPresent(fsv.GetNode()))
{
JITDUMP(" would change struct handle (substitution)\n");
return false;
}
#ifdef FEATURE_SIMD
// Don't forward sub a SIMD call under a HW intrinsic node.
// LowerCallStruct is not prepared for this.
//
if (fwdSubNode->IsCall() && varTypeIsSIMD(fwdSubNode->TypeGet()) && fsv.GetParentNode()->OperIs(GT_HWINTRINSIC))
{
JITDUMP(" simd returning call; hw intrinsic\n");
return false;
}
#endif // FEATURE_SIMD
// There are implicit assumptions downstream on where/how multi-reg ops
// can appear.
//
// Eg if fwdSubNode is a multi-reg call, parent node must be GT_ASG and the
// local being defined must be specially marked up.
//
if (fwdSubNode->IsMultiRegCall())
{
GenTree* const parentNode = fsv.GetParentNode();
if (!parentNode->OperIs(GT_ASG))
{
JITDUMP(" multi-reg call, parent not asg\n");
return false;
}
GenTree* const parentNodeLHS = parentNode->gtGetOp1();
if (!parentNodeLHS->OperIs(GT_LCL_VAR))
{
JITDUMP(" multi-reg call, parent not asg(lcl, ...)\n");
return false;
}
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (fwdSubNode->TypeGet() == TYP_LONG)
{
JITDUMP(" TYP_LONG fwd sub node, target is x86/arm\n");
return false;
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
GenTreeLclVar* const parentNodeLHSLocal = parentNodeLHS->AsLclVar();
unsigned const lhsLclNum = parentNodeLHSLocal->GetLclNum();
LclVarDsc* const lhsVarDsc = lvaGetDesc(lhsLclNum);
JITDUMP(" [marking V%02u as multi-reg-ret]", lhsLclNum);
lhsVarDsc->lvIsMultiRegRet = true;
parentNodeLHSLocal->SetMultiReg();
}
// If a method returns a multi-reg type, only forward sub locals,
// and ensure the local and operand have the required markup.
//
// (see eg impFixupStructReturnType)
//
if (compMethodReturnsMultiRegRetType() && fsv.GetParentNode()->OperIs(GT_RETURN))
{
if (!fwdSubNode->OperIs(GT_LCL_VAR))
{
JITDUMP(" parent is return, fwd sub node is not lcl var\n");
return false;
}
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (fwdSubNode->TypeGet() == TYP_LONG)
{
JITDUMP(" TYP_LONG fwd sub node, target is x86/arm\n");
return false;
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
GenTreeLclVar* const fwdSubNodeLocal = fwdSubNode->AsLclVar();
unsigned const fwdLclNum = fwdSubNodeLocal->GetLclNum();
LclVarDsc* const fwdVarDsc = lvaGetDesc(fwdLclNum);
JITDUMP(" [marking V%02u as multi-reg-ret]", fwdLclNum);
fwdVarDsc->lvIsMultiRegRet = true;
fwdSubNodeLocal->SetMultiReg();
fwdSubNodeLocal->gtFlags |= GTF_DONT_CSE;
}
// If the use is a multi-reg arg, don't forward sub non-locals.
//
if (fsv.GetNode()->IsMultiRegNode() && !fwdSubNode->IsMultiRegNode())
{
JITDUMP(" would change multi-reg (substitution)\n");
return false;
}
// If the intial has truncate on store semantics, we need to replicate
// that here with a cast.
//
if (varDsc->lvNormalizeOnStore() && fgCastNeeded(fwdSubNode, varDsc->TypeGet()))
{
JITDUMP(" [adding cast for normalize on store]");
fwdSubNode = gtNewCastNode(TYP_INT, fwdSubNode, false, varDsc->TypeGet());
}
// Looks good, forward sub!
//
GenTree** use = fsv.GetUse();
*use = fwdSubNode;
if (!fwdSubNodeInvariant)
{
gtUpdateStmtSideEffects(nextStmt);
}
JITDUMP(" -- fwd subbing [%06u]; new next stmt is\n", dspTreeID(fwdSubNode));
DISPSTMT(nextStmt);
return true;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/composite/wfmo/main.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
** Source Code: main.c and mutex.c
** main.c creates process and waits for all processes to get over
** mutex.c creates a mutex and then calls threads which will contend for the mutex
**
** This test is for WFMO Test case for Mutex
** Algorithm
** o Create PROCESS_COUNT processes.
** o Main Thread of each process creates OBJECT_TYPE Object
**
** Author: ShamitP
**
**
**============================================================
*/
#include <palsuite.h>
#include "resulttime.h"
/* Test Input Variables */
unsigned int PROCESS_COUNT = 3;
unsigned int THREAD_COUNT = 30;
unsigned int REPEAT_COUNT = 40;
unsigned int SLEEP_LENGTH = 4;
unsigned int RELATION_ID = 1001;
struct TestStats{
DWORD operationTime;
unsigned int relationId;
unsigned int processCount;
unsigned int threadCount;
unsigned int repeatCount;
char* buildNumber;
};
int GetParameters( int argc, char **argv)
{
if( (argc != 6) || ((argc == 1) && !strcmp(argv[1],"/?"))
|| !strcmp(argv[1],"/h") || !strcmp(argv[1],"/H"))
{
printf("PAL -Composite WFMO Test\n");
printf("Usage:\n");
printf("main\n\t[PROCESS_COUNT [greater than 0] \n");
printf("\t[THREAD_COUNT [greater than 0] \n");
printf("\t[REPEAT_COUNT [greater than 0]\n");
printf("\t[SLEEP_LENGTH [greater than 0]\n");
printf("\t[RELATION_ID [greater than 0]\n");
return -1;
}
PROCESS_COUNT = atoi(argv[1]);
if( (PROCESS_COUNT < 1) || (PROCESS_COUNT > MAXIMUM_WAIT_OBJECTS) )
{
printf("\nMain Process:Invalid PROCESS_COUNT number, Pass greater than 1 and less than PROCESS_COUNT %d\n", MAXIMUM_WAIT_OBJECTS);
return -1;
}
THREAD_COUNT = atoi(argv[2]);
if( (THREAD_COUNT < 1) || (THREAD_COUNT > MAXIMUM_WAIT_OBJECTS) )
{
printf("\nInvalid THREAD_COUNT number, Pass greater than 1 and less than %d\n", MAXIMUM_WAIT_OBJECTS);
return -1;
}
REPEAT_COUNT = atoi(argv[3]);
if( REPEAT_COUNT < 1)
{
printf("\nMain Process:Invalid REPEAT_COUNT number, Pass greater than 1\n");
return -1;
}
SLEEP_LENGTH = atoi(argv[4]);
if( SLEEP_LENGTH < 1)
{
printf("\nMain Process:Invalid SLEEP_LENGTH number, Pass greater than 1\n");
return -1;
}
RELATION_ID = atoi(argv[5]);
if( RELATION_ID < 1)
{
printf("\nMain Process:Invalid RELATION_ID number, Pass greater than 1\n");
return -1;
}
return 0;
}
PALTEST(composite_wfmo_paltest_composite_wfmo, "composite/wfmo/paltest_composite_wfmo")
{
unsigned int i = 0;
HANDLE hProcess[MAXIMUM_WAIT_OBJECTS];
STARTUPINFO si[MAXIMUM_WAIT_OBJECTS];
PROCESS_INFORMATION pi[MAXIMUM_WAIT_OBJECTS];
char lpCommandLine[MAX_PATH] = "";
int returnCode = 0;
DWORD processReturnCode = 0;
int testReturnCode = PASS;
char fileName[MAX_PATH];
FILE *pFile = NULL;
DWORD dwStartTime;
struct TestStats testStats;
if(0 != (PAL_Initialize(argc, argv)))
{
return ( FAIL );
}
if(GetParameters(argc, argv))
{
Fail("Error in obtaining the parameters\n");
}
/* Register the start time */
dwStartTime = GetTickCount();
testStats.relationId = 0;
testStats.relationId = RELATION_ID;
testStats.processCount = PROCESS_COUNT;
testStats.threadCount = THREAD_COUNT;
testStats.repeatCount = REPEAT_COUNT;
testStats.buildNumber = getBuildNumber();
_snprintf(fileName, MAX_PATH, "main_wfmo_%d_.txt",testStats.relationId);
pFile = fopen(fileName, "w+");
if(pFile == NULL)
{
Fail("Error in opening main file for write\n");
}
for( i = 0; i < PROCESS_COUNT; i++ )
{
ZeroMemory( lpCommandLine, MAX_PATH );
if ( _snprintf( lpCommandLine, MAX_PATH-1, "mutex %d %d %d %d %d", i, THREAD_COUNT, REPEAT_COUNT, SLEEP_LENGTH, RELATION_ID) < 0 )
{
Trace ("Error: Insufficient commandline string length for for iteration [%d]\n", i);
}
/* Zero the data structure space */
ZeroMemory ( &pi[i], sizeof(pi[i]) );
ZeroMemory ( &si[i], sizeof(si[i]) );
/* Set the process flags and standard io handles */
si[i].cb = sizeof(si[i]);
//Create Process
if(!CreateProcess( NULL, /* lpApplicationName*/
lpCommandLine, /* lpCommandLine */
NULL, /* lpProcessAttributes */
NULL, /* lpThreadAttributes */
TRUE, /* bInheritHandles */
0, /* dwCreationFlags, */
NULL, /* lpEnvironment */
NULL, /* pCurrentDirectory */
&si[i], /* lpStartupInfo */
&pi[i] /* lpProcessInformation */
))
{
Fail("Process Not created for [%d], the error code is [%d]\n", i, GetLastError());
}
else
{
hProcess[i] = pi[i].hProcess;
// Trace("Process created for [%d]\n", i);
}
}
returnCode = WaitForMultipleObjects( PROCESS_COUNT, hProcess, TRUE, INFINITE);
if( WAIT_OBJECT_0 != returnCode )
{
Trace("Wait for Object(s) @ Main thread for %d processes returned %d, and GetLastError value is %d\n", PROCESS_COUNT, returnCode, GetLastError());
}
for( i = 0; i < PROCESS_COUNT; i++ )
{
/* check the exit code from the process */
if( ! GetExitCodeProcess( pi[i].hProcess, &processReturnCode ) )
{
Trace( "GetExitCodeProcess call failed for iteration %d with error code %u\n",
i, GetLastError() );
testReturnCode = FAIL;
}
if(processReturnCode == FAIL)
{
Trace( "Process [%d] failed and returned FAIL\n", i);
testReturnCode = FAIL;
}
if(!CloseHandle(pi[i].hThread))
{
Trace("Error:%d: CloseHandle failed for Process [%d] hThread\n", GetLastError(), i);
}
if(!CloseHandle(pi[i].hProcess) )
{
Trace("Error:%d: CloseHandle failed for Process [%d] hProcess\n", GetLastError(), i);
}
}
testStats.operationTime = GetTimeDiff(dwStartTime);
fprintf(pFile, "%d,%d,%d,%d,%d,%s\n", testStats.operationTime, testStats.relationId, testStats.processCount, testStats.threadCount, testStats.repeatCount, testStats.buildNumber);
if(fclose(pFile))
{
Trace("Error: fclose failed for pFile\n");
testReturnCode = FAIL;
}
if( testReturnCode == PASS)
{
Trace("Test Passed\n");
}
else
{
Trace("Test Failed\n");
}
PAL_Terminate();
return testReturnCode;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
** Source Code: main.c and mutex.c
** main.c creates process and waits for all processes to get over
** mutex.c creates a mutex and then calls threads which will contend for the mutex
**
** This test is for WFMO Test case for Mutex
** Algorithm
** o Create PROCESS_COUNT processes.
** o Main Thread of each process creates OBJECT_TYPE Object
**
** Author: ShamitP
**
**
**============================================================
*/
#include <palsuite.h>
#include "resulttime.h"
/* Test Input Variables */
unsigned int PROCESS_COUNT = 3;
unsigned int THREAD_COUNT = 30;
unsigned int REPEAT_COUNT = 40;
unsigned int SLEEP_LENGTH = 4;
unsigned int RELATION_ID = 1001;
struct TestStats{
DWORD operationTime;
unsigned int relationId;
unsigned int processCount;
unsigned int threadCount;
unsigned int repeatCount;
char* buildNumber;
};
int GetParameters( int argc, char **argv)
{
if( (argc != 6) || ((argc == 1) && !strcmp(argv[1],"/?"))
|| !strcmp(argv[1],"/h") || !strcmp(argv[1],"/H"))
{
printf("PAL -Composite WFMO Test\n");
printf("Usage:\n");
printf("main\n\t[PROCESS_COUNT [greater than 0] \n");
printf("\t[THREAD_COUNT [greater than 0] \n");
printf("\t[REPEAT_COUNT [greater than 0]\n");
printf("\t[SLEEP_LENGTH [greater than 0]\n");
printf("\t[RELATION_ID [greater than 0]\n");
return -1;
}
PROCESS_COUNT = atoi(argv[1]);
if( (PROCESS_COUNT < 1) || (PROCESS_COUNT > MAXIMUM_WAIT_OBJECTS) )
{
printf("\nMain Process:Invalid PROCESS_COUNT number, Pass greater than 1 and less than PROCESS_COUNT %d\n", MAXIMUM_WAIT_OBJECTS);
return -1;
}
THREAD_COUNT = atoi(argv[2]);
if( (THREAD_COUNT < 1) || (THREAD_COUNT > MAXIMUM_WAIT_OBJECTS) )
{
printf("\nInvalid THREAD_COUNT number, Pass greater than 1 and less than %d\n", MAXIMUM_WAIT_OBJECTS);
return -1;
}
REPEAT_COUNT = atoi(argv[3]);
if( REPEAT_COUNT < 1)
{
printf("\nMain Process:Invalid REPEAT_COUNT number, Pass greater than 1\n");
return -1;
}
SLEEP_LENGTH = atoi(argv[4]);
if( SLEEP_LENGTH < 1)
{
printf("\nMain Process:Invalid SLEEP_LENGTH number, Pass greater than 1\n");
return -1;
}
RELATION_ID = atoi(argv[5]);
if( RELATION_ID < 1)
{
printf("\nMain Process:Invalid RELATION_ID number, Pass greater than 1\n");
return -1;
}
return 0;
}
PALTEST(composite_wfmo_paltest_composite_wfmo, "composite/wfmo/paltest_composite_wfmo")
{
unsigned int i = 0;
HANDLE hProcess[MAXIMUM_WAIT_OBJECTS];
STARTUPINFO si[MAXIMUM_WAIT_OBJECTS];
PROCESS_INFORMATION pi[MAXIMUM_WAIT_OBJECTS];
char lpCommandLine[MAX_PATH] = "";
int returnCode = 0;
DWORD processReturnCode = 0;
int testReturnCode = PASS;
char fileName[MAX_PATH];
FILE *pFile = NULL;
DWORD dwStartTime;
struct TestStats testStats;
if(0 != (PAL_Initialize(argc, argv)))
{
return ( FAIL );
}
if(GetParameters(argc, argv))
{
Fail("Error in obtaining the parameters\n");
}
/* Register the start time */
dwStartTime = GetTickCount();
testStats.relationId = 0;
testStats.relationId = RELATION_ID;
testStats.processCount = PROCESS_COUNT;
testStats.threadCount = THREAD_COUNT;
testStats.repeatCount = REPEAT_COUNT;
testStats.buildNumber = getBuildNumber();
_snprintf(fileName, MAX_PATH, "main_wfmo_%d_.txt",testStats.relationId);
pFile = fopen(fileName, "w+");
if(pFile == NULL)
{
Fail("Error in opening main file for write\n");
}
for( i = 0; i < PROCESS_COUNT; i++ )
{
ZeroMemory( lpCommandLine, MAX_PATH );
if ( _snprintf( lpCommandLine, MAX_PATH-1, "mutex %d %d %d %d %d", i, THREAD_COUNT, REPEAT_COUNT, SLEEP_LENGTH, RELATION_ID) < 0 )
{
Trace ("Error: Insufficient commandline string length for for iteration [%d]\n", i);
}
/* Zero the data structure space */
ZeroMemory ( &pi[i], sizeof(pi[i]) );
ZeroMemory ( &si[i], sizeof(si[i]) );
/* Set the process flags and standard io handles */
si[i].cb = sizeof(si[i]);
//Create Process
if(!CreateProcess( NULL, /* lpApplicationName*/
lpCommandLine, /* lpCommandLine */
NULL, /* lpProcessAttributes */
NULL, /* lpThreadAttributes */
TRUE, /* bInheritHandles */
0, /* dwCreationFlags, */
NULL, /* lpEnvironment */
NULL, /* pCurrentDirectory */
&si[i], /* lpStartupInfo */
&pi[i] /* lpProcessInformation */
))
{
Fail("Process Not created for [%d], the error code is [%d]\n", i, GetLastError());
}
else
{
hProcess[i] = pi[i].hProcess;
// Trace("Process created for [%d]\n", i);
}
}
returnCode = WaitForMultipleObjects( PROCESS_COUNT, hProcess, TRUE, INFINITE);
if( WAIT_OBJECT_0 != returnCode )
{
Trace("Wait for Object(s) @ Main thread for %d processes returned %d, and GetLastError value is %d\n", PROCESS_COUNT, returnCode, GetLastError());
}
for( i = 0; i < PROCESS_COUNT; i++ )
{
/* check the exit code from the process */
if( ! GetExitCodeProcess( pi[i].hProcess, &processReturnCode ) )
{
Trace( "GetExitCodeProcess call failed for iteration %d with error code %u\n",
i, GetLastError() );
testReturnCode = FAIL;
}
if(processReturnCode == FAIL)
{
Trace( "Process [%d] failed and returned FAIL\n", i);
testReturnCode = FAIL;
}
if(!CloseHandle(pi[i].hThread))
{
Trace("Error:%d: CloseHandle failed for Process [%d] hThread\n", GetLastError(), i);
}
if(!CloseHandle(pi[i].hProcess) )
{
Trace("Error:%d: CloseHandle failed for Process [%d] hProcess\n", GetLastError(), i);
}
}
testStats.operationTime = GetTimeDiff(dwStartTime);
fprintf(pFile, "%d,%d,%d,%d,%d,%s\n", testStats.operationTime, testStats.relationId, testStats.processCount, testStats.threadCount, testStats.repeatCount, testStats.buildNumber);
if(fclose(pFile))
{
Trace("Error: fclose failed for pFile\n");
testReturnCode = FAIL;
}
if( testReturnCode == PASS)
{
Trace("Test Passed\n");
}
else
{
Trace("Test Failed\n");
}
PAL_Terminate();
return testReturnCode;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/public/mono/utils/mono-jemalloc.h | /**
* \file
*
* Header for jemalloc registration code
*/
#ifndef __MONO_JEMALLOC_H__
#define __MONO_JEMALLOC_H__
#if defined(MONO_JEMALLOC_ENABLED)
#include <jemalloc/jemalloc.h>
/* Jemalloc can be configured in three ways.
* 1. You can use it with library loading hacks at run-time
* 2. You can use it as a global malloc replacement
* 3. You can use it with a prefix. If you use it with a prefix, you have to explicitly name the malloc function.
*
* In order to make this feature able to be toggled at run-time, I chose to use a prefix of mono_je.
* This mapping is captured below in the header, in the spirit of "no magic constants".
*
* The place that configures jemalloc and sets this prefix is in the Makefile in
* mono/jemalloc/Makefile.am
*
*/
#define MONO_JEMALLOC_MALLOC mono_jemalloc
#define MONO_JEMALLOC_REALLOC mono_jerealloc
#define MONO_JEMALLOC_FREE mono_jefree
#define MONO_JEMALLOC_CALLOC mono_jecalloc
void mono_init_jemalloc (void);
#endif
#endif
| /**
* \file
*
* Header for jemalloc registration code
*/
#ifndef __MONO_JEMALLOC_H__
#define __MONO_JEMALLOC_H__
#if defined(MONO_JEMALLOC_ENABLED)
#include <jemalloc/jemalloc.h>
/* Jemalloc can be configured in three ways.
* 1. You can use it with library loading hacks at run-time
* 2. You can use it as a global malloc replacement
* 3. You can use it with a prefix. If you use it with a prefix, you have to explicitly name the malloc function.
*
* In order to make this feature able to be toggled at run-time, I chose to use a prefix of mono_je.
* This mapping is captured below in the header, in the spirit of "no magic constants".
*
* The place that configures jemalloc and sets this prefix is in the Makefile in
* mono/jemalloc/Makefile.am
*
*/
#define MONO_JEMALLOC_MALLOC mono_jemalloc
#define MONO_JEMALLOC_REALLOC mono_jerealloc
#define MONO_JEMALLOC_FREE mono_jefree
#define MONO_JEMALLOC_CALLOC mono_jecalloc
void mono_init_jemalloc (void);
#endif
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/inc/readytorun.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// readytorun.h
//
//
// Contains definitions for the Ready to Run file format
//
#ifndef __READYTORUN_H__
#define __READYTORUN_H__
#define READYTORUN_SIGNATURE 0x00525452 // 'RTR'
// Keep these in sync with src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs
#define READYTORUN_MAJOR_VERSION 0x0006
#define READYTORUN_MINOR_VERSION 0x0000
#define MINIMUM_READYTORUN_MAJOR_VERSION 0x006
// R2R Version 2.1 adds the InliningInfo section
// R2R Version 2.2 adds the ProfileDataInfo section
// R2R Version 3.0 changes calling conventions to correctly handle explicit structures to spec.
// R2R 3.0 is not backward compatible with 2.x.
// R2R Version 6.0 changes managed layout for sequential types with any unmanaged non-blittable fields.
// R2R 6.0 is not backward compatible with 5.x or earlier.
struct READYTORUN_CORE_HEADER
{
DWORD Flags; // READYTORUN_FLAG_XXX
DWORD NumberOfSections;
// Array of sections follows. The array entries are sorted by Type
// READYTORUN_SECTION Sections[];
};
struct READYTORUN_HEADER
{
DWORD Signature; // READYTORUN_SIGNATURE
USHORT MajorVersion; // READYTORUN_VERSION_XXX
USHORT MinorVersion;
READYTORUN_CORE_HEADER CoreHeader;
};
struct READYTORUN_COMPONENT_ASSEMBLIES_ENTRY
{
IMAGE_DATA_DIRECTORY CorHeader;
IMAGE_DATA_DIRECTORY ReadyToRunCoreHeader;
};
enum ReadyToRunFlag
{
READYTORUN_FLAG_PLATFORM_NEUTRAL_SOURCE = 0x00000001, // Set if the original IL assembly was platform-neutral
READYTORUN_FLAG_SKIP_TYPE_VALIDATION = 0x00000002, // Set of methods with native code was determined using profile data
READYTORUN_FLAG_PARTIAL = 0x00000004,
READYTORUN_FLAG_NONSHARED_PINVOKE_STUBS = 0x00000008, // PInvoke stubs compiled into image are non-shareable (no secret parameter)
READYTORUN_FLAG_EMBEDDED_MSIL = 0x00000010, // MSIL is embedded in the composite R2R executable
READYTORUN_FLAG_COMPONENT = 0x00000020, // This is the header describing a component assembly of composite R2R
};
enum class ReadyToRunSectionType : uint32_t
{
CompilerIdentifier = 100,
ImportSections = 101,
RuntimeFunctions = 102,
MethodDefEntryPoints = 103,
ExceptionInfo = 104,
DebugInfo = 105,
DelayLoadMethodCallThunks = 106,
// 107 used by an older format of AvailableTypes
AvailableTypes = 108,
InstanceMethodEntryPoints = 109,
InliningInfo = 110, // Added in V2.1, deprecated in 4.1
ProfileDataInfo = 111, // Added in V2.2
ManifestMetadata = 112, // Added in V2.3
AttributePresence = 113, // Added in V3.1
InliningInfo2 = 114, // Added in V4.1
ComponentAssemblies = 115, // Added in V4.1
OwnerCompositeExecutable = 116, // Added in V4.1
PgoInstrumentationData = 117, // Added in V5.2
ManifestAssemblyMvids = 118, // Added in V5.3
// If you add a new section consider whether it is a breaking or non-breaking change.
// Usually it is non-breaking, but if it is preferable to have older runtimes fail
// to load the image vs. ignoring the new section it could be marked breaking.
// Increment the READYTORUN_MINOR_VERSION (non-breaking) or READYTORUN_MAJOR_VERSION
// (breaking) as appropriate.
};
struct READYTORUN_SECTION
{
ReadyToRunSectionType Type; // READYTORUN_SECTION_XXX
IMAGE_DATA_DIRECTORY Section;
};
//
// READYTORUN_IMPORT_SECTION describes image range with references to code or runtime data structures
//
// There is number of different types of these ranges: eagerly initialized at image load vs. lazily initialized at method entry
// vs. lazily initialized on first use; handles vs. code pointers, etc.
//
struct READYTORUN_IMPORT_SECTION
{
IMAGE_DATA_DIRECTORY Section; // Section containing values to be fixed up
USHORT Flags; // One or more of ReadyToRunImportSectionFlags
BYTE Type; // One of ReadyToRunImportSectionType
BYTE EntrySize;
DWORD Signatures; // RVA of optional signature descriptors
DWORD AuxiliaryData; // RVA of optional auxiliary data (typically GC info)
};
enum ReadyToRunImportSectionType
{
READYTORUN_IMPORT_SECTION_TYPE_UNKNOWN = 0,
};
enum ReadyToRunImportSectionFlags
{
READYTORUN_IMPORT_SECTION_FLAGS_EAGER = 0x0001,
};
//
// Constants for method and field encoding
//
enum ReadyToRunMethodSigFlags
{
READYTORUN_METHOD_SIG_UnboxingStub = 0x01,
READYTORUN_METHOD_SIG_InstantiatingStub = 0x02,
READYTORUN_METHOD_SIG_MethodInstantiation = 0x04,
READYTORUN_METHOD_SIG_SlotInsteadOfToken = 0x08,
READYTORUN_METHOD_SIG_MemberRefToken = 0x10,
READYTORUN_METHOD_SIG_Constrained = 0x20,
READYTORUN_METHOD_SIG_OwnerType = 0x40,
READYTORUN_METHOD_SIG_UpdateContext = 0x80,
};
enum ReadyToRunFieldSigFlags
{
READYTORUN_FIELD_SIG_IndexInsteadOfToken = 0x08,
READYTORUN_FIELD_SIG_MemberRefToken = 0x10,
READYTORUN_FIELD_SIG_OwnerType = 0x40,
};
enum ReadyToRunTypeLayoutFlags
{
READYTORUN_LAYOUT_HFA = 0x01,
READYTORUN_LAYOUT_Alignment = 0x02,
READYTORUN_LAYOUT_Alignment_Native = 0x04,
READYTORUN_LAYOUT_GCLayout = 0x08,
READYTORUN_LAYOUT_GCLayout_Empty = 0x10,
};
enum ReadyToRunVirtualFunctionOverrideFlags
{
READYTORUN_VIRTUAL_OVERRIDE_None = 0x00,
READYTORUN_VIRTUAL_OVERRIDE_VirtualFunctionOverriden = 0x01,
};
//
// Constants for fixup signature encoding
//
enum ReadyToRunFixupKind
{
READYTORUN_FIXUP_ThisObjDictionaryLookup = 0x07,
READYTORUN_FIXUP_TypeDictionaryLookup = 0x08,
READYTORUN_FIXUP_MethodDictionaryLookup = 0x09,
READYTORUN_FIXUP_TypeHandle = 0x10,
READYTORUN_FIXUP_MethodHandle = 0x11,
READYTORUN_FIXUP_FieldHandle = 0x12,
READYTORUN_FIXUP_MethodEntry = 0x13, /* For calling a method entry point */
READYTORUN_FIXUP_MethodEntry_DefToken = 0x14, /* Smaller version of MethodEntry - method is def token */
READYTORUN_FIXUP_MethodEntry_RefToken = 0x15, /* Smaller version of MethodEntry - method is ref token */
READYTORUN_FIXUP_VirtualEntry = 0x16, /* For invoking a virtual method */
READYTORUN_FIXUP_VirtualEntry_DefToken = 0x17, /* Smaller version of VirtualEntry - method is def token */
READYTORUN_FIXUP_VirtualEntry_RefToken = 0x18, /* Smaller version of VirtualEntry - method is ref token */
READYTORUN_FIXUP_VirtualEntry_Slot = 0x19, /* Smaller version of VirtualEntry - type & slot */
READYTORUN_FIXUP_Helper = 0x1A, /* Helper */
READYTORUN_FIXUP_StringHandle = 0x1B, /* String handle */
READYTORUN_FIXUP_NewObject = 0x1C, /* Dynamically created new helper */
READYTORUN_FIXUP_NewArray = 0x1D,
READYTORUN_FIXUP_IsInstanceOf = 0x1E, /* Dynamically created casting helper */
READYTORUN_FIXUP_ChkCast = 0x1F,
READYTORUN_FIXUP_FieldAddress = 0x20, /* For accessing a cross-module static fields */
READYTORUN_FIXUP_CctorTrigger = 0x21, /* Static constructor trigger */
READYTORUN_FIXUP_StaticBaseNonGC = 0x22, /* Dynamically created static base helpers */
READYTORUN_FIXUP_StaticBaseGC = 0x23,
READYTORUN_FIXUP_ThreadStaticBaseNonGC = 0x24,
READYTORUN_FIXUP_ThreadStaticBaseGC = 0x25,
READYTORUN_FIXUP_FieldBaseOffset = 0x26, /* Field base offset */
READYTORUN_FIXUP_FieldOffset = 0x27, /* Field offset */
READYTORUN_FIXUP_TypeDictionary = 0x28,
READYTORUN_FIXUP_MethodDictionary = 0x29,
READYTORUN_FIXUP_Check_TypeLayout = 0x2A, /* size, alignment, HFA, reference map */
READYTORUN_FIXUP_Check_FieldOffset = 0x2B,
READYTORUN_FIXUP_DelegateCtor = 0x2C, /* optimized delegate ctor */
READYTORUN_FIXUP_DeclaringTypeHandle = 0x2D,
READYTORUN_FIXUP_IndirectPInvokeTarget = 0x2E, /* Target (indirect) of an inlined pinvoke */
READYTORUN_FIXUP_PInvokeTarget = 0x2F, /* Target of an inlined pinvoke */
READYTORUN_FIXUP_Check_InstructionSetSupport= 0x30, /* Define the set of instruction sets that must be supported/unsupported to use the fixup */
READYTORUN_FIXUP_Verify_FieldOffset = 0x31, /* Generate a runtime check to ensure that the field offset matches between compile and runtime. Unlike Check_FieldOffset, this will generate a runtime failure instead of silently dropping the method */
READYTORUN_FIXUP_Verify_TypeLayout = 0x32, /* Generate a runtime check to ensure that the type layout (size, alignment, HFA, reference map) matches between compile and runtime. Unlike Check_TypeLayout, this will generate a runtime failure instead of silently dropping the method */
READYTORUN_FIXUP_Check_VirtualFunctionOverride = 0x33, /* Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, code will not be used */
READYTORUN_FIXUP_Verify_VirtualFunctionOverride = 0x34, /* Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, generate runtime failure. */
};
//
// Intrinsics and helpers
//
enum ReadyToRunHelper
{
READYTORUN_HELPER_Invalid = 0x00,
// Not a real helper - handle to current module passed to delay load helpers.
READYTORUN_HELPER_Module = 0x01,
READYTORUN_HELPER_GSCookie = 0x02,
READYTORUN_HELPER_IndirectTrapThreads = 0x03,
//
// Delay load helpers
//
// All delay load helpers use custom calling convention:
// - scratch register - address of indirection cell. 0 = address is inferred from callsite.
// - stack - section index, module handle
READYTORUN_HELPER_DelayLoad_MethodCall = 0x08,
READYTORUN_HELPER_DelayLoad_Helper = 0x10,
READYTORUN_HELPER_DelayLoad_Helper_Obj = 0x11,
READYTORUN_HELPER_DelayLoad_Helper_ObjObj = 0x12,
// JIT helpers
// Exception handling helpers
READYTORUN_HELPER_Throw = 0x20,
READYTORUN_HELPER_Rethrow = 0x21,
READYTORUN_HELPER_Overflow = 0x22,
READYTORUN_HELPER_RngChkFail = 0x23,
READYTORUN_HELPER_FailFast = 0x24,
READYTORUN_HELPER_ThrowNullRef = 0x25,
READYTORUN_HELPER_ThrowDivZero = 0x26,
// Write barriers
READYTORUN_HELPER_WriteBarrier = 0x30,
READYTORUN_HELPER_CheckedWriteBarrier = 0x31,
READYTORUN_HELPER_ByRefWriteBarrier = 0x32,
// Array helpers
READYTORUN_HELPER_Stelem_Ref = 0x38,
READYTORUN_HELPER_Ldelema_Ref = 0x39,
READYTORUN_HELPER_MemSet = 0x40,
READYTORUN_HELPER_MemCpy = 0x41,
// PInvoke helpers
READYTORUN_HELPER_PInvokeBegin = 0x42,
READYTORUN_HELPER_PInvokeEnd = 0x43,
READYTORUN_HELPER_GCPoll = 0x44,
READYTORUN_HELPER_ReversePInvokeEnter = 0x45,
READYTORUN_HELPER_ReversePInvokeExit = 0x46,
// Get string handle lazily
READYTORUN_HELPER_GetString = 0x50,
// Used by /Tuning for Profile optimizations
READYTORUN_HELPER_LogMethodEnter = 0x51,
// Reflection helpers
READYTORUN_HELPER_GetRuntimeTypeHandle = 0x54,
READYTORUN_HELPER_GetRuntimeMethodHandle = 0x55,
READYTORUN_HELPER_GetRuntimeFieldHandle = 0x56,
READYTORUN_HELPER_Box = 0x58,
READYTORUN_HELPER_Box_Nullable = 0x59,
READYTORUN_HELPER_Unbox = 0x5A,
READYTORUN_HELPER_Unbox_Nullable = 0x5B,
READYTORUN_HELPER_NewMultiDimArr = 0x5C,
// Helpers used with generic handle lookup cases
READYTORUN_HELPER_NewObject = 0x60,
READYTORUN_HELPER_NewArray = 0x61,
READYTORUN_HELPER_CheckCastAny = 0x62,
READYTORUN_HELPER_CheckInstanceAny = 0x63,
READYTORUN_HELPER_GenericGcStaticBase = 0x64,
READYTORUN_HELPER_GenericNonGcStaticBase = 0x65,
READYTORUN_HELPER_GenericGcTlsBase = 0x66,
READYTORUN_HELPER_GenericNonGcTlsBase = 0x67,
READYTORUN_HELPER_VirtualFuncPtr = 0x68,
// Long mul/div/shift ops
READYTORUN_HELPER_LMul = 0xC0,
READYTORUN_HELPER_LMulOfv = 0xC1,
READYTORUN_HELPER_ULMulOvf = 0xC2,
READYTORUN_HELPER_LDiv = 0xC3,
READYTORUN_HELPER_LMod = 0xC4,
READYTORUN_HELPER_ULDiv = 0xC5,
READYTORUN_HELPER_ULMod = 0xC6,
READYTORUN_HELPER_LLsh = 0xC7,
READYTORUN_HELPER_LRsh = 0xC8,
READYTORUN_HELPER_LRsz = 0xC9,
READYTORUN_HELPER_Lng2Dbl = 0xCA,
READYTORUN_HELPER_ULng2Dbl = 0xCB,
// 32-bit division helpers
READYTORUN_HELPER_Div = 0xCC,
READYTORUN_HELPER_Mod = 0xCD,
READYTORUN_HELPER_UDiv = 0xCE,
READYTORUN_HELPER_UMod = 0xCF,
// Floating point conversions
READYTORUN_HELPER_Dbl2Int = 0xD0,
READYTORUN_HELPER_Dbl2IntOvf = 0xD1,
READYTORUN_HELPER_Dbl2Lng = 0xD2,
READYTORUN_HELPER_Dbl2LngOvf = 0xD3,
READYTORUN_HELPER_Dbl2UInt = 0xD4,
READYTORUN_HELPER_Dbl2UIntOvf = 0xD5,
READYTORUN_HELPER_Dbl2ULng = 0xD6,
READYTORUN_HELPER_Dbl2ULngOvf = 0xD7,
// Floating point ops
READYTORUN_HELPER_DblRem = 0xE0,
READYTORUN_HELPER_FltRem = 0xE1,
READYTORUN_HELPER_DblRound = 0xE2,
READYTORUN_HELPER_FltRound = 0xE3,
#ifdef FEATURE_EH_FUNCLETS
// Personality rountines
READYTORUN_HELPER_PersonalityRoutine = 0xF0,
READYTORUN_HELPER_PersonalityRoutineFilterFunclet = 0xF1,
#endif
// Synchronized methods
READYTORUN_HELPER_MonitorEnter = 0xF8,
READYTORUN_HELPER_MonitorExit = 0xF9,
//
// Deprecated/legacy
//
// JIT32 x86-specific write barriers
READYTORUN_HELPER_WriteBarrier_EAX = 0x100,
READYTORUN_HELPER_WriteBarrier_EBX = 0x101,
READYTORUN_HELPER_WriteBarrier_ECX = 0x102,
READYTORUN_HELPER_WriteBarrier_ESI = 0x103,
READYTORUN_HELPER_WriteBarrier_EDI = 0x104,
READYTORUN_HELPER_WriteBarrier_EBP = 0x105,
READYTORUN_HELPER_CheckedWriteBarrier_EAX = 0x106,
READYTORUN_HELPER_CheckedWriteBarrier_EBX = 0x107,
READYTORUN_HELPER_CheckedWriteBarrier_ECX = 0x108,
READYTORUN_HELPER_CheckedWriteBarrier_ESI = 0x109,
READYTORUN_HELPER_CheckedWriteBarrier_EDI = 0x10A,
READYTORUN_HELPER_CheckedWriteBarrier_EBP = 0x10B,
// JIT32 x86-specific exception handling
READYTORUN_HELPER_EndCatch = 0x110,
// Stack probing helper
READYTORUN_HELPER_StackProbe = 0x111,
READYTORUN_HELPER_GetCurrentManagedThreadId = 0x112,
};
#include "readytoruninstructionset.h"
//
// Exception info
//
struct READYTORUN_EXCEPTION_LOOKUP_TABLE_ENTRY
{
DWORD MethodStart;
DWORD ExceptionInfo;
};
struct READYTORUN_EXCEPTION_CLAUSE
{
CorExceptionFlag Flags;
DWORD TryStartPC;
DWORD TryEndPC;
DWORD HandlerStartPC;
DWORD HandlerEndPC;
union {
mdToken ClassToken;
DWORD FilterOffset;
};
};
enum ReadyToRunRuntimeConstants : DWORD
{
READYTORUN_PInvokeTransitionFrameSizeInPointerUnits = 11,
#ifdef TARGET_X86
READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits = 5,
#else
READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits = 2,
#endif
};
enum ReadyToRunHFAElemType : DWORD
{
READYTORUN_HFA_ELEMTYPE_None = 0,
READYTORUN_HFA_ELEMTYPE_Float32 = 1,
READYTORUN_HFA_ELEMTYPE_Float64 = 2,
READYTORUN_HFA_ELEMTYPE_Vector64 = 3,
READYTORUN_HFA_ELEMTYPE_Vector128 = 4,
};
#endif // __READYTORUN_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// readytorun.h
//
//
// Contains definitions for the Ready to Run file format
//
#ifndef __READYTORUN_H__
#define __READYTORUN_H__
#define READYTORUN_SIGNATURE 0x00525452 // 'RTR'
// Keep these in sync with src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs
#define READYTORUN_MAJOR_VERSION 0x0006
#define READYTORUN_MINOR_VERSION 0x0000
#define MINIMUM_READYTORUN_MAJOR_VERSION 0x006
// R2R Version 2.1 adds the InliningInfo section
// R2R Version 2.2 adds the ProfileDataInfo section
// R2R Version 3.0 changes calling conventions to correctly handle explicit structures to spec.
// R2R 3.0 is not backward compatible with 2.x.
// R2R Version 6.0 changes managed layout for sequential types with any unmanaged non-blittable fields.
// R2R 6.0 is not backward compatible with 5.x or earlier.
struct READYTORUN_CORE_HEADER
{
DWORD Flags; // READYTORUN_FLAG_XXX
DWORD NumberOfSections;
// Array of sections follows. The array entries are sorted by Type
// READYTORUN_SECTION Sections[];
};
struct READYTORUN_HEADER
{
DWORD Signature; // READYTORUN_SIGNATURE
USHORT MajorVersion; // READYTORUN_VERSION_XXX
USHORT MinorVersion;
READYTORUN_CORE_HEADER CoreHeader;
};
struct READYTORUN_COMPONENT_ASSEMBLIES_ENTRY
{
IMAGE_DATA_DIRECTORY CorHeader;
IMAGE_DATA_DIRECTORY ReadyToRunCoreHeader;
};
enum ReadyToRunFlag
{
READYTORUN_FLAG_PLATFORM_NEUTRAL_SOURCE = 0x00000001, // Set if the original IL assembly was platform-neutral
READYTORUN_FLAG_SKIP_TYPE_VALIDATION = 0x00000002, // Set of methods with native code was determined using profile data
READYTORUN_FLAG_PARTIAL = 0x00000004,
READYTORUN_FLAG_NONSHARED_PINVOKE_STUBS = 0x00000008, // PInvoke stubs compiled into image are non-shareable (no secret parameter)
READYTORUN_FLAG_EMBEDDED_MSIL = 0x00000010, // MSIL is embedded in the composite R2R executable
READYTORUN_FLAG_COMPONENT = 0x00000020, // This is the header describing a component assembly of composite R2R
};
enum class ReadyToRunSectionType : uint32_t
{
CompilerIdentifier = 100,
ImportSections = 101,
RuntimeFunctions = 102,
MethodDefEntryPoints = 103,
ExceptionInfo = 104,
DebugInfo = 105,
DelayLoadMethodCallThunks = 106,
// 107 used by an older format of AvailableTypes
AvailableTypes = 108,
InstanceMethodEntryPoints = 109,
InliningInfo = 110, // Added in V2.1, deprecated in 4.1
ProfileDataInfo = 111, // Added in V2.2
ManifestMetadata = 112, // Added in V2.3
AttributePresence = 113, // Added in V3.1
InliningInfo2 = 114, // Added in V4.1
ComponentAssemblies = 115, // Added in V4.1
OwnerCompositeExecutable = 116, // Added in V4.1
PgoInstrumentationData = 117, // Added in V5.2
ManifestAssemblyMvids = 118, // Added in V5.3
// If you add a new section consider whether it is a breaking or non-breaking change.
// Usually it is non-breaking, but if it is preferable to have older runtimes fail
// to load the image vs. ignoring the new section it could be marked breaking.
// Increment the READYTORUN_MINOR_VERSION (non-breaking) or READYTORUN_MAJOR_VERSION
// (breaking) as appropriate.
};
struct READYTORUN_SECTION
{
ReadyToRunSectionType Type; // READYTORUN_SECTION_XXX
IMAGE_DATA_DIRECTORY Section;
};
//
// READYTORUN_IMPORT_SECTION describes image range with references to code or runtime data structures
//
// There is number of different types of these ranges: eagerly initialized at image load vs. lazily initialized at method entry
// vs. lazily initialized on first use; handles vs. code pointers, etc.
//
struct READYTORUN_IMPORT_SECTION
{
IMAGE_DATA_DIRECTORY Section; // Section containing values to be fixed up
USHORT Flags; // One or more of ReadyToRunImportSectionFlags
BYTE Type; // One of ReadyToRunImportSectionType
BYTE EntrySize;
DWORD Signatures; // RVA of optional signature descriptors
DWORD AuxiliaryData; // RVA of optional auxiliary data (typically GC info)
};
enum ReadyToRunImportSectionType
{
READYTORUN_IMPORT_SECTION_TYPE_UNKNOWN = 0,
};
enum ReadyToRunImportSectionFlags
{
READYTORUN_IMPORT_SECTION_FLAGS_EAGER = 0x0001,
};
//
// Constants for method and field encoding
//
enum ReadyToRunMethodSigFlags
{
READYTORUN_METHOD_SIG_UnboxingStub = 0x01,
READYTORUN_METHOD_SIG_InstantiatingStub = 0x02,
READYTORUN_METHOD_SIG_MethodInstantiation = 0x04,
READYTORUN_METHOD_SIG_SlotInsteadOfToken = 0x08,
READYTORUN_METHOD_SIG_MemberRefToken = 0x10,
READYTORUN_METHOD_SIG_Constrained = 0x20,
READYTORUN_METHOD_SIG_OwnerType = 0x40,
READYTORUN_METHOD_SIG_UpdateContext = 0x80,
};
enum ReadyToRunFieldSigFlags
{
READYTORUN_FIELD_SIG_IndexInsteadOfToken = 0x08,
READYTORUN_FIELD_SIG_MemberRefToken = 0x10,
READYTORUN_FIELD_SIG_OwnerType = 0x40,
};
enum ReadyToRunTypeLayoutFlags
{
READYTORUN_LAYOUT_HFA = 0x01,
READYTORUN_LAYOUT_Alignment = 0x02,
READYTORUN_LAYOUT_Alignment_Native = 0x04,
READYTORUN_LAYOUT_GCLayout = 0x08,
READYTORUN_LAYOUT_GCLayout_Empty = 0x10,
};
enum ReadyToRunVirtualFunctionOverrideFlags
{
READYTORUN_VIRTUAL_OVERRIDE_None = 0x00,
READYTORUN_VIRTUAL_OVERRIDE_VirtualFunctionOverriden = 0x01,
};
//
// Constants for fixup signature encoding
//
enum ReadyToRunFixupKind
{
READYTORUN_FIXUP_ThisObjDictionaryLookup = 0x07,
READYTORUN_FIXUP_TypeDictionaryLookup = 0x08,
READYTORUN_FIXUP_MethodDictionaryLookup = 0x09,
READYTORUN_FIXUP_TypeHandle = 0x10,
READYTORUN_FIXUP_MethodHandle = 0x11,
READYTORUN_FIXUP_FieldHandle = 0x12,
READYTORUN_FIXUP_MethodEntry = 0x13, /* For calling a method entry point */
READYTORUN_FIXUP_MethodEntry_DefToken = 0x14, /* Smaller version of MethodEntry - method is def token */
READYTORUN_FIXUP_MethodEntry_RefToken = 0x15, /* Smaller version of MethodEntry - method is ref token */
READYTORUN_FIXUP_VirtualEntry = 0x16, /* For invoking a virtual method */
READYTORUN_FIXUP_VirtualEntry_DefToken = 0x17, /* Smaller version of VirtualEntry - method is def token */
READYTORUN_FIXUP_VirtualEntry_RefToken = 0x18, /* Smaller version of VirtualEntry - method is ref token */
READYTORUN_FIXUP_VirtualEntry_Slot = 0x19, /* Smaller version of VirtualEntry - type & slot */
READYTORUN_FIXUP_Helper = 0x1A, /* Helper */
READYTORUN_FIXUP_StringHandle = 0x1B, /* String handle */
READYTORUN_FIXUP_NewObject = 0x1C, /* Dynamically created new helper */
READYTORUN_FIXUP_NewArray = 0x1D,
READYTORUN_FIXUP_IsInstanceOf = 0x1E, /* Dynamically created casting helper */
READYTORUN_FIXUP_ChkCast = 0x1F,
READYTORUN_FIXUP_FieldAddress = 0x20, /* For accessing a cross-module static fields */
READYTORUN_FIXUP_CctorTrigger = 0x21, /* Static constructor trigger */
READYTORUN_FIXUP_StaticBaseNonGC = 0x22, /* Dynamically created static base helpers */
READYTORUN_FIXUP_StaticBaseGC = 0x23,
READYTORUN_FIXUP_ThreadStaticBaseNonGC = 0x24,
READYTORUN_FIXUP_ThreadStaticBaseGC = 0x25,
READYTORUN_FIXUP_FieldBaseOffset = 0x26, /* Field base offset */
READYTORUN_FIXUP_FieldOffset = 0x27, /* Field offset */
READYTORUN_FIXUP_TypeDictionary = 0x28,
READYTORUN_FIXUP_MethodDictionary = 0x29,
READYTORUN_FIXUP_Check_TypeLayout = 0x2A, /* size, alignment, HFA, reference map */
READYTORUN_FIXUP_Check_FieldOffset = 0x2B,
READYTORUN_FIXUP_DelegateCtor = 0x2C, /* optimized delegate ctor */
READYTORUN_FIXUP_DeclaringTypeHandle = 0x2D,
READYTORUN_FIXUP_IndirectPInvokeTarget = 0x2E, /* Target (indirect) of an inlined pinvoke */
READYTORUN_FIXUP_PInvokeTarget = 0x2F, /* Target of an inlined pinvoke */
READYTORUN_FIXUP_Check_InstructionSetSupport= 0x30, /* Define the set of instruction sets that must be supported/unsupported to use the fixup */
READYTORUN_FIXUP_Verify_FieldOffset = 0x31, /* Generate a runtime check to ensure that the field offset matches between compile and runtime. Unlike Check_FieldOffset, this will generate a runtime failure instead of silently dropping the method */
READYTORUN_FIXUP_Verify_TypeLayout = 0x32, /* Generate a runtime check to ensure that the type layout (size, alignment, HFA, reference map) matches between compile and runtime. Unlike Check_TypeLayout, this will generate a runtime failure instead of silently dropping the method */
READYTORUN_FIXUP_Check_VirtualFunctionOverride = 0x33, /* Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, code will not be used */
READYTORUN_FIXUP_Verify_VirtualFunctionOverride = 0x34, /* Generate a runtime check to ensure that virtual function resolution has equivalent behavior at runtime as at compile time. If not equivalent, generate runtime failure. */
};
//
// Intrinsics and helpers
//
enum ReadyToRunHelper
{
READYTORUN_HELPER_Invalid = 0x00,
// Not a real helper - handle to current module passed to delay load helpers.
READYTORUN_HELPER_Module = 0x01,
READYTORUN_HELPER_GSCookie = 0x02,
READYTORUN_HELPER_IndirectTrapThreads = 0x03,
//
// Delay load helpers
//
// All delay load helpers use custom calling convention:
// - scratch register - address of indirection cell. 0 = address is inferred from callsite.
// - stack - section index, module handle
READYTORUN_HELPER_DelayLoad_MethodCall = 0x08,
READYTORUN_HELPER_DelayLoad_Helper = 0x10,
READYTORUN_HELPER_DelayLoad_Helper_Obj = 0x11,
READYTORUN_HELPER_DelayLoad_Helper_ObjObj = 0x12,
// JIT helpers
// Exception handling helpers
READYTORUN_HELPER_Throw = 0x20,
READYTORUN_HELPER_Rethrow = 0x21,
READYTORUN_HELPER_Overflow = 0x22,
READYTORUN_HELPER_RngChkFail = 0x23,
READYTORUN_HELPER_FailFast = 0x24,
READYTORUN_HELPER_ThrowNullRef = 0x25,
READYTORUN_HELPER_ThrowDivZero = 0x26,
// Write barriers
READYTORUN_HELPER_WriteBarrier = 0x30,
READYTORUN_HELPER_CheckedWriteBarrier = 0x31,
READYTORUN_HELPER_ByRefWriteBarrier = 0x32,
// Array helpers
READYTORUN_HELPER_Stelem_Ref = 0x38,
READYTORUN_HELPER_Ldelema_Ref = 0x39,
READYTORUN_HELPER_MemSet = 0x40,
READYTORUN_HELPER_MemCpy = 0x41,
// PInvoke helpers
READYTORUN_HELPER_PInvokeBegin = 0x42,
READYTORUN_HELPER_PInvokeEnd = 0x43,
READYTORUN_HELPER_GCPoll = 0x44,
READYTORUN_HELPER_ReversePInvokeEnter = 0x45,
READYTORUN_HELPER_ReversePInvokeExit = 0x46,
// Get string handle lazily
READYTORUN_HELPER_GetString = 0x50,
// Used by /Tuning for Profile optimizations
READYTORUN_HELPER_LogMethodEnter = 0x51,
// Reflection helpers
READYTORUN_HELPER_GetRuntimeTypeHandle = 0x54,
READYTORUN_HELPER_GetRuntimeMethodHandle = 0x55,
READYTORUN_HELPER_GetRuntimeFieldHandle = 0x56,
READYTORUN_HELPER_Box = 0x58,
READYTORUN_HELPER_Box_Nullable = 0x59,
READYTORUN_HELPER_Unbox = 0x5A,
READYTORUN_HELPER_Unbox_Nullable = 0x5B,
READYTORUN_HELPER_NewMultiDimArr = 0x5C,
// Helpers used with generic handle lookup cases
READYTORUN_HELPER_NewObject = 0x60,
READYTORUN_HELPER_NewArray = 0x61,
READYTORUN_HELPER_CheckCastAny = 0x62,
READYTORUN_HELPER_CheckInstanceAny = 0x63,
READYTORUN_HELPER_GenericGcStaticBase = 0x64,
READYTORUN_HELPER_GenericNonGcStaticBase = 0x65,
READYTORUN_HELPER_GenericGcTlsBase = 0x66,
READYTORUN_HELPER_GenericNonGcTlsBase = 0x67,
READYTORUN_HELPER_VirtualFuncPtr = 0x68,
// Long mul/div/shift ops
READYTORUN_HELPER_LMul = 0xC0,
READYTORUN_HELPER_LMulOfv = 0xC1,
READYTORUN_HELPER_ULMulOvf = 0xC2,
READYTORUN_HELPER_LDiv = 0xC3,
READYTORUN_HELPER_LMod = 0xC4,
READYTORUN_HELPER_ULDiv = 0xC5,
READYTORUN_HELPER_ULMod = 0xC6,
READYTORUN_HELPER_LLsh = 0xC7,
READYTORUN_HELPER_LRsh = 0xC8,
READYTORUN_HELPER_LRsz = 0xC9,
READYTORUN_HELPER_Lng2Dbl = 0xCA,
READYTORUN_HELPER_ULng2Dbl = 0xCB,
// 32-bit division helpers
READYTORUN_HELPER_Div = 0xCC,
READYTORUN_HELPER_Mod = 0xCD,
READYTORUN_HELPER_UDiv = 0xCE,
READYTORUN_HELPER_UMod = 0xCF,
// Floating point conversions
READYTORUN_HELPER_Dbl2Int = 0xD0,
READYTORUN_HELPER_Dbl2IntOvf = 0xD1,
READYTORUN_HELPER_Dbl2Lng = 0xD2,
READYTORUN_HELPER_Dbl2LngOvf = 0xD3,
READYTORUN_HELPER_Dbl2UInt = 0xD4,
READYTORUN_HELPER_Dbl2UIntOvf = 0xD5,
READYTORUN_HELPER_Dbl2ULng = 0xD6,
READYTORUN_HELPER_Dbl2ULngOvf = 0xD7,
// Floating point ops
READYTORUN_HELPER_DblRem = 0xE0,
READYTORUN_HELPER_FltRem = 0xE1,
READYTORUN_HELPER_DblRound = 0xE2,
READYTORUN_HELPER_FltRound = 0xE3,
#ifdef FEATURE_EH_FUNCLETS
// Personality rountines
READYTORUN_HELPER_PersonalityRoutine = 0xF0,
READYTORUN_HELPER_PersonalityRoutineFilterFunclet = 0xF1,
#endif
// Synchronized methods
READYTORUN_HELPER_MonitorEnter = 0xF8,
READYTORUN_HELPER_MonitorExit = 0xF9,
//
// Deprecated/legacy
//
// JIT32 x86-specific write barriers
READYTORUN_HELPER_WriteBarrier_EAX = 0x100,
READYTORUN_HELPER_WriteBarrier_EBX = 0x101,
READYTORUN_HELPER_WriteBarrier_ECX = 0x102,
READYTORUN_HELPER_WriteBarrier_ESI = 0x103,
READYTORUN_HELPER_WriteBarrier_EDI = 0x104,
READYTORUN_HELPER_WriteBarrier_EBP = 0x105,
READYTORUN_HELPER_CheckedWriteBarrier_EAX = 0x106,
READYTORUN_HELPER_CheckedWriteBarrier_EBX = 0x107,
READYTORUN_HELPER_CheckedWriteBarrier_ECX = 0x108,
READYTORUN_HELPER_CheckedWriteBarrier_ESI = 0x109,
READYTORUN_HELPER_CheckedWriteBarrier_EDI = 0x10A,
READYTORUN_HELPER_CheckedWriteBarrier_EBP = 0x10B,
// JIT32 x86-specific exception handling
READYTORUN_HELPER_EndCatch = 0x110,
// Stack probing helper
READYTORUN_HELPER_StackProbe = 0x111,
READYTORUN_HELPER_GetCurrentManagedThreadId = 0x112,
};
#include "readytoruninstructionset.h"
//
// Exception info
//
struct READYTORUN_EXCEPTION_LOOKUP_TABLE_ENTRY
{
DWORD MethodStart;
DWORD ExceptionInfo;
};
struct READYTORUN_EXCEPTION_CLAUSE
{
CorExceptionFlag Flags;
DWORD TryStartPC;
DWORD TryEndPC;
DWORD HandlerStartPC;
DWORD HandlerEndPC;
union {
mdToken ClassToken;
DWORD FilterOffset;
};
};
enum ReadyToRunRuntimeConstants : DWORD
{
READYTORUN_PInvokeTransitionFrameSizeInPointerUnits = 11,
#ifdef TARGET_X86
READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits = 5,
#else
READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits = 2,
#endif
};
enum ReadyToRunHFAElemType : DWORD
{
READYTORUN_HFA_ELEMTYPE_None = 0,
READYTORUN_HFA_ELEMTYPE_Float32 = 1,
READYTORUN_HFA_ELEMTYPE_Float64 = 2,
READYTORUN_HFA_ELEMTYPE_Vector64 = 3,
READYTORUN_HFA_ELEMTYPE_Vector128 = 4,
};
#endif // __READYTORUN_H__
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/utils/memfuncs.c | /**
* \file
* Our own bzero/memmove.
*
* Copyright (C) 2013-2015 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
/*
* SGen cannot deal with invalid pointers on the heap or in registered roots. Sometimes we
* need to copy or zero out memory in code that might be interrupted by collections. To
* guarantee that those operations will not result in invalid pointers, we must do it
* word-atomically.
*
* libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
* cases where one would assume so. For instance, some implementations (like Darwin's on
* x86) have variants of memcpy() using vector instructions. Those may copy bytewise for
* the region preceding the first vector-aligned address. That region could be
* word-aligned, but it would still be copied byte-wise.
*
* All our memory writes here are to "volatile" locations. This is so that C compilers
* don't "optimize" our code back to calls to bzero()/memmove(). LLVM, specifically, will
* do that.
*/
#include <config.h>
#include <glib.h>
#include <string.h>
#if defined (__APPLE__)
#include <mach/message.h>
#include <mach/mach_host.h>
#include <mach/host_info.h>
#include <sys/sysctl.h>
#endif
#if defined (__NetBSD__)
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#endif
#if defined(TARGET_WIN32)
#include <windows.h>
#endif
#include "memfuncs.h"
#define ptr_mask ((sizeof (void*) - 1))
#define _toi(ptr) ((size_t)ptr)
#define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
#define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
#define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
#if SIZEOF_VOID_P == 4
#define bytes_to_words(n) ((size_t)(n) >> 2)
#elif SIZEOF_VOID_P == 8
#define bytes_to_words(n) ((size_t)(n) >> 3)
#else
#error We only support 32 and 64 bit architectures.
#endif
#define BZERO_WORDS(dest,words) do { \
void * volatile *__d = (void* volatile*)(dest); \
size_t __n = (words); \
size_t __i; \
for (__i = 0; __i < __n; ++__i) \
__d [__i] = NULL; \
} while (0)
/**
* mono_gc_bzero_aligned:
* \param dest address to start to clear
* \param size size of the region to clear
*
* Zero \p size bytes starting at \p dest.
* The address of \p dest MUST be aligned to word boundaries
*
* FIXME borrow faster code from some BSD libc or bionic
*/
void
mono_gc_bzero_aligned (void *dest, size_t size)
{
volatile char *d = (char*)dest;
size_t tail_bytes, word_bytes;
g_assert (unaligned_bytes (dest) == 0);
/* copy all words with memmove */
word_bytes = (size_t)align_down (size);
switch (word_bytes) {
case sizeof (void*) * 1:
BZERO_WORDS (d, 1);
break;
case sizeof (void*) * 2:
BZERO_WORDS (d, 2);
break;
case sizeof (void*) * 3:
BZERO_WORDS (d, 3);
break;
case sizeof (void*) * 4:
BZERO_WORDS (d, 4);
break;
default:
BZERO_WORDS (d, bytes_to_words (word_bytes));
}
tail_bytes = unaligned_bytes (size);
if (tail_bytes) {
d += word_bytes;
do {
*d++ = 0;
} while (--tail_bytes);
}
}
/**
* mono_gc_bzero_atomic:
* \param dest address to start to clear
* \param size size of the region to clear
*
* Zero \p size bytes starting at \p dest.
*
* Use this to zero memory without word tearing when \p dest is aligned.
*/
void
mono_gc_bzero_atomic (void *dest, size_t size)
{
if (unaligned_bytes (dest))
memset (dest, 0, size);
else
mono_gc_bzero_aligned (dest, size);
}
#define MEMMOVE_WORDS_UPWARD(dest,src,words) do { \
void * volatile *__d = (void* volatile*)(dest); \
void **__s = (void**)(src); \
size_t __n = (words); \
size_t __i; \
for (__i = 0; __i < __n; ++__i) \
__d [__i] = __s [__i]; \
} while (0)
#define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do { \
void * volatile *__d = (void* volatile*)(dest); \
void **__s = (void**)(src); \
size_t __n = (words); \
size_t __i; \
for (__i = __n; __i-- > 0;) \
__d [__i] = __s [__i]; \
} while (0)
/**
* mono_gc_memmove_aligned:
* \param dest destination of the move
* \param src source
* \param size size of the block to move
*
* Move \p size bytes from \p src to \p dest.
*
* Use this to copy memory without word tearing when both pointers are aligned
*/
void
mono_gc_memmove_aligned (void *dest, const void *src, size_t size)
{
g_assert (unaligned_bytes (dest) == 0);
g_assert (unaligned_bytes (src) == 0);
/*
If we're copying less than a word we don't need to worry about word tearing
so we bailout to memmove early.
*/
if (size < sizeof(void*)) {
memmove (dest, src, size);
return;
}
/*
* A bit of explanation on why we align only dest before doing word copies.
* Pointers to managed objects must always be stored in word aligned addresses, so
* even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
*
* We don't need to case when source and destination have different alignments since we only do word stores
* using memmove, which must handle it.
*/
if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
volatile char *p = (char*)dest + size;
char *s = (char*)src + size;
char *start = (char*)dest;
char *align_end = MAX((char*)dest, (char*)align_down (p));
char *word_start;
size_t bytes_to_memmove;
while (p > align_end)
*--p = *--s;
word_start = (char *)align_up (start);
bytes_to_memmove = p - word_start;
p -= bytes_to_memmove;
s -= bytes_to_memmove;
MEMMOVE_WORDS_DOWNWARD (p, s, bytes_to_words (bytes_to_memmove));
} else {
volatile char *d = (char*)dest;
const char *s = (const char*)src;
size_t tail_bytes;
/* copy all words with memmove */
MEMMOVE_WORDS_UPWARD (d, s, bytes_to_words (align_down (size)));
tail_bytes = unaligned_bytes (size);
if (tail_bytes) {
d += (size_t)align_down (size);
s += (size_t)align_down (size);
do {
*d++ = *s++;
} while (--tail_bytes);
}
}
}
/**
* mono_gc_memmove_atomic:
* \param dest destination of the move
* \param src source
* \param size size of the block to move
*
* Move \p size bytes from \p src to \p dest.
*
* Use this to copy memory without word tearing when both pointers are aligned
*/
void
mono_gc_memmove_atomic (void *dest, const void *src, size_t size)
{
if (unaligned_bytes (_toi (dest) | _toi (src)))
memmove (dest, src, size);
else
mono_gc_memmove_aligned (dest, src, size);
}
#define _DEFAULT_MEM_SIZE 134217728
guint64
mono_determine_physical_ram_size (void)
{
#if defined (TARGET_WIN32)
MEMORYSTATUSEX memstat;
memstat.dwLength = sizeof (memstat);
GlobalMemoryStatusEx (&memstat);
return (guint64)memstat.ullTotalPhys;
#elif defined (__NetBSD__) || defined (__APPLE__)
#ifdef __NetBSD__
unsigned long value;
#else
guint64 value;
#endif
int mib[2] = {
CTL_HW,
#ifdef __NetBSD__
HW_PHYSMEM64
#else
HW_MEMSIZE
#endif
};
size_t size_sys = sizeof (value);
sysctl (mib, 2, &value, &size_sys, NULL, 0);
if (value == 0)
return _DEFAULT_MEM_SIZE;
return (guint64)value;
#elif defined (HAVE_SYSCONF)
gint64 page_size = -1, num_pages = -1;
/* sysconf works on most *NIX operating systems, if your system doesn't have it or if it
* reports invalid values, please add your OS specific code below. */
#ifdef _SC_PAGESIZE
page_size = (gint64)sysconf (_SC_PAGESIZE);
#endif
#ifdef _SC_PHYS_PAGES
num_pages = (gint64)sysconf (_SC_PHYS_PAGES);
#endif
if (page_size == -1 || num_pages == -1) {
g_warning ("Your operating system's sysconf (3) function doesn't correctly report physical memory size!");
return _DEFAULT_MEM_SIZE;
}
return (guint64)page_size * (guint64)num_pages;
#else
return _DEFAULT_MEM_SIZE;
#endif
}
guint64
mono_determine_physical_ram_available_size (void)
{
#if defined (TARGET_WIN32)
MEMORYSTATUSEX memstat;
memstat.dwLength = sizeof (memstat);
GlobalMemoryStatusEx (&memstat);
return (guint64)memstat.ullAvailPhys;
#elif defined (__NetBSD__)
struct vmtotal vm_total;
guint64 page_size;
int mib[2];
size_t len;
mib[0] = CTL_VM;
mib[1] = VM_METER;
len = sizeof (vm_total);
sysctl (mib, 2, &vm_total, &len, NULL, 0);
mib[0] = CTL_HW;
mib[1] = HW_PAGESIZE;
len = sizeof (page_size);
sysctl (mib, 2, &page_size, &len, NULL, 0);
return ((guint64) vm_total.t_free * page_size) / 1024;
#elif defined (__APPLE__)
mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
mach_port_t host = mach_host_self ();
vm_size_t page_size;
vm_statistics_data_t vmstat;
kern_return_t ret;
do {
ret = host_statistics (host, HOST_VM_INFO, (host_info_t)&vmstat, &count);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS) {
g_warning ("Mono was unable to retrieve memory usage!");
return 0;
}
host_page_size (host, &page_size);
return (guint64) vmstat.free_count * page_size;
#elif defined (HAVE_SYSCONF)
gint64 page_size = -1, num_pages = -1;
/* sysconf works on most *NIX operating systems, if your system doesn't have it or if it
* reports invalid values, please add your OS specific code below. */
#ifdef _SC_PAGESIZE
page_size = (gint64)sysconf (_SC_PAGESIZE);
#endif
#ifdef _SC_AVPHYS_PAGES
num_pages = (gint64)sysconf (_SC_AVPHYS_PAGES);
#endif
if (page_size == -1 || num_pages == -1) {
g_warning ("Your operating system's sysconf (3) function doesn't correctly report physical memory size!");
return _DEFAULT_MEM_SIZE;
}
return (guint64)page_size * (guint64)num_pages;
#else
return _DEFAULT_MEM_SIZE;
#endif
}
| /**
* \file
* Our own bzero/memmove.
*
* Copyright (C) 2013-2015 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
/*
* SGen cannot deal with invalid pointers on the heap or in registered roots. Sometimes we
* need to copy or zero out memory in code that might be interrupted by collections. To
* guarantee that those operations will not result in invalid pointers, we must do it
* word-atomically.
*
* libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
* cases where one would assume so. For instance, some implementations (like Darwin's on
* x86) have variants of memcpy() using vector instructions. Those may copy bytewise for
* the region preceding the first vector-aligned address. That region could be
* word-aligned, but it would still be copied byte-wise.
*
* All our memory writes here are to "volatile" locations. This is so that C compilers
* don't "optimize" our code back to calls to bzero()/memmove(). LLVM, specifically, will
* do that.
*/
#include <config.h>
#include <glib.h>
#include <string.h>
#if defined (__APPLE__)
#include <mach/message.h>
#include <mach/mach_host.h>
#include <mach/host_info.h>
#include <sys/sysctl.h>
#endif
#if defined (__NetBSD__)
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#endif
#if defined(TARGET_WIN32)
#include <windows.h>
#endif
#include "memfuncs.h"
#define ptr_mask ((sizeof (void*) - 1))
#define _toi(ptr) ((size_t)ptr)
#define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
#define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
#define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
#if SIZEOF_VOID_P == 4
#define bytes_to_words(n) ((size_t)(n) >> 2)
#elif SIZEOF_VOID_P == 8
#define bytes_to_words(n) ((size_t)(n) >> 3)
#else
#error We only support 32 and 64 bit architectures.
#endif
#define BZERO_WORDS(dest,words) do { \
void * volatile *__d = (void* volatile*)(dest); \
size_t __n = (words); \
size_t __i; \
for (__i = 0; __i < __n; ++__i) \
__d [__i] = NULL; \
} while (0)
/**
* mono_gc_bzero_aligned:
* \param dest address to start to clear
* \param size size of the region to clear
*
* Zero \p size bytes starting at \p dest.
* The address of \p dest MUST be aligned to word boundaries
*
* FIXME borrow faster code from some BSD libc or bionic
*/
void
mono_gc_bzero_aligned (void *dest, size_t size)
{
volatile char *d = (char*)dest;
size_t tail_bytes, word_bytes;
g_assert (unaligned_bytes (dest) == 0);
/* copy all words with memmove */
word_bytes = (size_t)align_down (size);
switch (word_bytes) {
case sizeof (void*) * 1:
BZERO_WORDS (d, 1);
break;
case sizeof (void*) * 2:
BZERO_WORDS (d, 2);
break;
case sizeof (void*) * 3:
BZERO_WORDS (d, 3);
break;
case sizeof (void*) * 4:
BZERO_WORDS (d, 4);
break;
default:
BZERO_WORDS (d, bytes_to_words (word_bytes));
}
tail_bytes = unaligned_bytes (size);
if (tail_bytes) {
d += word_bytes;
do {
*d++ = 0;
} while (--tail_bytes);
}
}
/**
* mono_gc_bzero_atomic:
* \param dest address to start to clear
* \param size size of the region to clear
*
* Zero \p size bytes starting at \p dest.
*
* Use this to zero memory without word tearing when \p dest is aligned.
*/
void
mono_gc_bzero_atomic (void *dest, size_t size)
{
if (unaligned_bytes (dest))
memset (dest, 0, size);
else
mono_gc_bzero_aligned (dest, size);
}
#define MEMMOVE_WORDS_UPWARD(dest,src,words) do { \
void * volatile *__d = (void* volatile*)(dest); \
void **__s = (void**)(src); \
size_t __n = (words); \
size_t __i; \
for (__i = 0; __i < __n; ++__i) \
__d [__i] = __s [__i]; \
} while (0)
#define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do { \
void * volatile *__d = (void* volatile*)(dest); \
void **__s = (void**)(src); \
size_t __n = (words); \
size_t __i; \
for (__i = __n; __i-- > 0;) \
__d [__i] = __s [__i]; \
} while (0)
/**
* mono_gc_memmove_aligned:
* \param dest destination of the move
* \param src source
* \param size size of the block to move
*
* Move \p size bytes from \p src to \p dest.
*
* Use this to copy memory without word tearing when both pointers are aligned
*/
void
mono_gc_memmove_aligned (void *dest, const void *src, size_t size)
{
g_assert (unaligned_bytes (dest) == 0);
g_assert (unaligned_bytes (src) == 0);
/*
If we're copying less than a word we don't need to worry about word tearing
so we bailout to memmove early.
*/
if (size < sizeof(void*)) {
memmove (dest, src, size);
return;
}
/*
* A bit of explanation on why we align only dest before doing word copies.
* Pointers to managed objects must always be stored in word aligned addresses, so
* even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
*
* We don't need to case when source and destination have different alignments since we only do word stores
* using memmove, which must handle it.
*/
if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
volatile char *p = (char*)dest + size;
char *s = (char*)src + size;
char *start = (char*)dest;
char *align_end = MAX((char*)dest, (char*)align_down (p));
char *word_start;
size_t bytes_to_memmove;
while (p > align_end)
*--p = *--s;
word_start = (char *)align_up (start);
bytes_to_memmove = p - word_start;
p -= bytes_to_memmove;
s -= bytes_to_memmove;
MEMMOVE_WORDS_DOWNWARD (p, s, bytes_to_words (bytes_to_memmove));
} else {
volatile char *d = (char*)dest;
const char *s = (const char*)src;
size_t tail_bytes;
/* copy all words with memmove */
MEMMOVE_WORDS_UPWARD (d, s, bytes_to_words (align_down (size)));
tail_bytes = unaligned_bytes (size);
if (tail_bytes) {
d += (size_t)align_down (size);
s += (size_t)align_down (size);
do {
*d++ = *s++;
} while (--tail_bytes);
}
}
}
/**
* mono_gc_memmove_atomic:
* \param dest destination of the move
* \param src source
* \param size size of the block to move
*
* Move \p size bytes from \p src to \p dest.
*
* Use this to copy memory without word tearing when both pointers are aligned
*/
void
mono_gc_memmove_atomic (void *dest, const void *src, size_t size)
{
if (unaligned_bytes (_toi (dest) | _toi (src)))
memmove (dest, src, size);
else
mono_gc_memmove_aligned (dest, src, size);
}
#define _DEFAULT_MEM_SIZE 134217728
guint64
mono_determine_physical_ram_size (void)
{
#if defined (TARGET_WIN32)
MEMORYSTATUSEX memstat;
memstat.dwLength = sizeof (memstat);
GlobalMemoryStatusEx (&memstat);
return (guint64)memstat.ullTotalPhys;
#elif defined (__NetBSD__) || defined (__APPLE__)
#ifdef __NetBSD__
unsigned long value;
#else
guint64 value;
#endif
int mib[2] = {
CTL_HW,
#ifdef __NetBSD__
HW_PHYSMEM64
#else
HW_MEMSIZE
#endif
};
size_t size_sys = sizeof (value);
sysctl (mib, 2, &value, &size_sys, NULL, 0);
if (value == 0)
return _DEFAULT_MEM_SIZE;
return (guint64)value;
#elif defined (HAVE_SYSCONF)
gint64 page_size = -1, num_pages = -1;
/* sysconf works on most *NIX operating systems, if your system doesn't have it or if it
* reports invalid values, please add your OS specific code below. */
#ifdef _SC_PAGESIZE
page_size = (gint64)sysconf (_SC_PAGESIZE);
#endif
#ifdef _SC_PHYS_PAGES
num_pages = (gint64)sysconf (_SC_PHYS_PAGES);
#endif
if (page_size == -1 || num_pages == -1) {
g_warning ("Your operating system's sysconf (3) function doesn't correctly report physical memory size!");
return _DEFAULT_MEM_SIZE;
}
return (guint64)page_size * (guint64)num_pages;
#else
return _DEFAULT_MEM_SIZE;
#endif
}
guint64
mono_determine_physical_ram_available_size (void)
{
#if defined (TARGET_WIN32)
MEMORYSTATUSEX memstat;
memstat.dwLength = sizeof (memstat);
GlobalMemoryStatusEx (&memstat);
return (guint64)memstat.ullAvailPhys;
#elif defined (__NetBSD__)
struct vmtotal vm_total;
guint64 page_size;
int mib[2];
size_t len;
mib[0] = CTL_VM;
mib[1] = VM_METER;
len = sizeof (vm_total);
sysctl (mib, 2, &vm_total, &len, NULL, 0);
mib[0] = CTL_HW;
mib[1] = HW_PAGESIZE;
len = sizeof (page_size);
sysctl (mib, 2, &page_size, &len, NULL, 0);
return ((guint64) vm_total.t_free * page_size) / 1024;
#elif defined (__APPLE__)
mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
mach_port_t host = mach_host_self ();
vm_size_t page_size;
vm_statistics_data_t vmstat;
kern_return_t ret;
do {
ret = host_statistics (host, HOST_VM_INFO, (host_info_t)&vmstat, &count);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS) {
g_warning ("Mono was unable to retrieve memory usage!");
return 0;
}
host_page_size (host, &page_size);
return (guint64) vmstat.free_count * page_size;
#elif defined (HAVE_SYSCONF)
gint64 page_size = -1, num_pages = -1;
/* sysconf works on most *NIX operating systems, if your system doesn't have it or if it
* reports invalid values, please add your OS specific code below. */
#ifdef _SC_PAGESIZE
page_size = (gint64)sysconf (_SC_PAGESIZE);
#endif
#ifdef _SC_AVPHYS_PAGES
num_pages = (gint64)sysconf (_SC_AVPHYS_PAGES);
#endif
if (page_size == -1 || num_pages == -1) {
g_warning ("Your operating system's sysconf (3) function doesn't correctly report physical memory size!");
return _DEFAULT_MEM_SIZE;
}
return (guint64)page_size * (guint64)num_pages;
#else
return _DEFAULT_MEM_SIZE;
#endif
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/palrt/common.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// common.h
//
//
// Common include file for the palrt code.
//*****************************************************************************
#ifndef _COMMON_H_
#define _COMMON_H_
#include <switches.h>
#include <winwrap.h>
#include "shlwapip.h"
#include <minipal/utils.h>
#endif // _COMMON_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// common.h
//
//
// Common include file for the palrt code.
//*****************************************************************************
#ifndef _COMMON_H_
#define _COMMON_H_
#include <switches.h>
#include <winwrap.h>
#include "shlwapip.h"
#include <minipal/utils.h>
#endif // _COMMON_H_
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/libs/System.Native/entrypoints.c | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <minipal/entrypoints.h>
// Include System.Native headers
#include "pal_autoreleasepool.h"
#include "pal_console.h"
#include "pal_datetime.h"
#include "pal_dynamicload.h"
#include "pal_environment.h"
#include "pal_errno.h"
#include "pal_interfaceaddresses.h"
#include "pal_io.h"
#include "pal_iossupportversion.h"
#include "pal_log.h"
#include "pal_memory.h"
#include "pal_mount.h"
#include "pal_networkchange.h"
#include "pal_networking.h"
#include "pal_networkstatistics.h"
#include "pal_process.h"
#include "pal_random.h"
#include "pal_runtimeinformation.h"
#include "pal_searchpath.h"
#include "pal_signal.h"
#include "pal_string.h"
#include "pal_sysctl.h"
#include "pal_tcpstate.h"
#include "pal_threading.h"
#include "pal_time.h"
#include "pal_uid.h"
static const Entry s_sysNative[] =
{
DllImportEntry(SystemNative_FStat)
DllImportEntry(SystemNative_GetWindowSize)
DllImportEntry(SystemNative_IsATty)
DllImportEntry(SystemNative_InitializeTerminalAndSignalHandling)
DllImportEntry(SystemNative_SetKeypadXmit)
DllImportEntry(SystemNative_GetControlCharacters)
DllImportEntry(SystemNative_StdinReady)
DllImportEntry(SystemNative_InitializeConsoleBeforeRead)
DllImportEntry(SystemNative_UninitializeConsoleAfterRead)
DllImportEntry(SystemNative_ConfigureTerminalForChildProcess)
DllImportEntry(SystemNative_ReadStdin)
DllImportEntry(SystemNative_GetSignalForBreak)
DllImportEntry(SystemNative_SetSignalForBreak)
DllImportEntry(SystemNative_GetSystemTimeAsTicks)
DllImportEntry(SystemNative_ConvertErrorPlatformToPal)
DllImportEntry(SystemNative_ConvertErrorPalToPlatform)
DllImportEntry(SystemNative_StrErrorR)
DllImportEntry(SystemNative_EnumerateInterfaceAddresses)
DllImportEntry(SystemNative_GetNetworkInterfaces)
DllImportEntry(SystemNative_EnumerateGatewayAddressesForInterface)
DllImportEntry(SystemNative_Stat)
DllImportEntry(SystemNative_LStat)
DllImportEntry(SystemNative_Open)
DllImportEntry(SystemNative_Close)
DllImportEntry(SystemNative_Dup)
DllImportEntry(SystemNative_Unlink)
DllImportEntry(SystemNative_ShmOpen)
DllImportEntry(SystemNative_ShmUnlink)
DllImportEntry(SystemNative_GetReadDirRBufferSize)
DllImportEntry(SystemNative_ReadDirR)
DllImportEntry(SystemNative_OpenDir)
DllImportEntry(SystemNative_CloseDir)
DllImportEntry(SystemNative_Pipe)
DllImportEntry(SystemNative_FcntlSetFD)
DllImportEntry(SystemNative_FcntlGetFD)
DllImportEntry(SystemNative_FcntlCanGetSetPipeSz)
DllImportEntry(SystemNative_FcntlGetPipeSz)
DllImportEntry(SystemNative_FcntlSetPipeSz)
DllImportEntry(SystemNative_FcntlSetIsNonBlocking)
DllImportEntry(SystemNative_FcntlGetIsNonBlocking)
DllImportEntry(SystemNative_MkDir)
DllImportEntry(SystemNative_ChMod)
DllImportEntry(SystemNative_FChMod)
DllImportEntry(SystemNative_FSync)
DllImportEntry(SystemNative_FLock)
DllImportEntry(SystemNative_ChDir)
DllImportEntry(SystemNative_Access)
DllImportEntry(SystemNative_LSeek)
DllImportEntry(SystemNative_Link)
DllImportEntry(SystemNative_SymLink)
DllImportEntry(SystemNative_MksTemps)
DllImportEntry(SystemNative_MMap)
DllImportEntry(SystemNative_MUnmap)
DllImportEntry(SystemNative_MAdvise)
DllImportEntry(SystemNative_MSync)
DllImportEntry(SystemNative_SysConf)
DllImportEntry(SystemNative_FTruncate)
DllImportEntry(SystemNative_Poll)
DllImportEntry(SystemNative_PosixFAdvise)
DllImportEntry(SystemNative_FAllocate)
DllImportEntry(SystemNative_Read)
DllImportEntry(SystemNative_ReadLink)
DllImportEntry(SystemNative_Rename)
DllImportEntry(SystemNative_RmDir)
DllImportEntry(SystemNative_Sync)
DllImportEntry(SystemNative_Write)
DllImportEntry(SystemNative_CopyFile)
DllImportEntry(SystemNative_INotifyInit)
DllImportEntry(SystemNative_INotifyAddWatch)
DllImportEntry(SystemNative_INotifyRemoveWatch)
DllImportEntry(SystemNative_RealPath)
DllImportEntry(SystemNative_GetPeerID)
DllImportEntry(SystemNative_GetFileSystemType)
DllImportEntry(SystemNative_LockFileRegion)
DllImportEntry(SystemNative_LChflags)
DllImportEntry(SystemNative_LChflagsCanSetHiddenFlag)
DllImportEntry(SystemNative_CanGetHiddenFlag)
DllImportEntry(SystemNative_ReadProcessStatusInfo)
DllImportEntry(SystemNative_Log)
DllImportEntry(SystemNative_LogError)
DllImportEntry(SystemNative_AlignedAlloc)
DllImportEntry(SystemNative_AlignedFree)
DllImportEntry(SystemNative_AlignedRealloc)
DllImportEntry(SystemNative_Calloc)
DllImportEntry(SystemNative_Free)
DllImportEntry(SystemNative_Malloc)
DllImportEntry(SystemNative_MemSet)
DllImportEntry(SystemNative_Realloc)
DllImportEntry(SystemNative_GetSpaceInfoForMountPoint)
DllImportEntry(SystemNative_GetFormatInfoForMountPoint)
DllImportEntry(SystemNative_GetAllMountPoints)
DllImportEntry(SystemNative_ReadEvents)
DllImportEntry(SystemNative_CreateNetworkChangeListenerSocket)
DllImportEntry(SystemNative_GetHostEntryForName)
DllImportEntry(SystemNative_FreeHostEntry)
DllImportEntry(SystemNative_GetNameInfo)
DllImportEntry(SystemNative_GetDomainName)
DllImportEntry(SystemNative_GetHostName)
DllImportEntry(SystemNative_GetIPSocketAddressSizes)
DllImportEntry(SystemNative_GetAddressFamily)
DllImportEntry(SystemNative_SetAddressFamily)
DllImportEntry(SystemNative_GetPort)
DllImportEntry(SystemNative_SetPort)
DllImportEntry(SystemNative_GetIPv4Address)
DllImportEntry(SystemNative_SetIPv4Address)
DllImportEntry(SystemNative_GetIPv6Address)
DllImportEntry(SystemNative_SetIPv6Address)
DllImportEntry(SystemNative_GetControlMessageBufferSize)
DllImportEntry(SystemNative_TryGetIPPacketInformation)
DllImportEntry(SystemNative_GetIPv4MulticastOption)
DllImportEntry(SystemNative_SetIPv4MulticastOption)
DllImportEntry(SystemNative_GetIPv6MulticastOption)
DllImportEntry(SystemNative_SetIPv6MulticastOption)
DllImportEntry(SystemNative_GetLingerOption)
DllImportEntry(SystemNative_SetLingerOption)
DllImportEntry(SystemNative_SetReceiveTimeout)
DllImportEntry(SystemNative_SetSendTimeout)
DllImportEntry(SystemNative_Receive)
DllImportEntry(SystemNative_ReceiveMessage)
DllImportEntry(SystemNative_Send)
DllImportEntry(SystemNative_SendMessage)
DllImportEntry(SystemNative_Accept)
DllImportEntry(SystemNative_Bind)
DllImportEntry(SystemNative_Connect)
DllImportEntry(SystemNative_GetPeerName)
DllImportEntry(SystemNative_GetSockName)
DllImportEntry(SystemNative_Listen)
DllImportEntry(SystemNative_Shutdown)
DllImportEntry(SystemNative_GetSocketErrorOption)
DllImportEntry(SystemNative_GetSockOpt)
DllImportEntry(SystemNative_GetRawSockOpt)
DllImportEntry(SystemNative_SetSockOpt)
DllImportEntry(SystemNative_SetRawSockOpt)
DllImportEntry(SystemNative_Socket)
DllImportEntry(SystemNative_GetSocketType)
DllImportEntry(SystemNative_GetAtOutOfBandMark)
DllImportEntry(SystemNative_GetBytesAvailable)
DllImportEntry(SystemNative_CreateSocketEventPort)
DllImportEntry(SystemNative_CloseSocketEventPort)
DllImportEntry(SystemNative_CreateSocketEventBuffer)
DllImportEntry(SystemNative_FreeSocketEventBuffer)
DllImportEntry(SystemNative_TryChangeSocketEventRegistration)
DllImportEntry(SystemNative_WaitForSocketEvents)
DllImportEntry(SystemNative_PlatformSupportsDualModeIPv4PacketInfo)
DllImportEntry(SystemNative_GetPeerUserName)
DllImportEntry(SystemNative_GetDomainSocketSizes)
DllImportEntry(SystemNative_GetMaximumAddressSize)
DllImportEntry(SystemNative_SendFile)
DllImportEntry(SystemNative_Disconnect)
DllImportEntry(SystemNative_InterfaceNameToIndex)
DllImportEntry(SystemNative_GetTcpGlobalStatistics)
DllImportEntry(SystemNative_GetIPv4GlobalStatistics)
DllImportEntry(SystemNative_GetUdpGlobalStatistics)
DllImportEntry(SystemNative_GetIcmpv4GlobalStatistics)
DllImportEntry(SystemNative_GetIcmpv6GlobalStatistics)
DllImportEntry(SystemNative_GetEstimatedTcpConnectionCount)
DllImportEntry(SystemNative_GetActiveTcpConnectionInfos)
DllImportEntry(SystemNative_GetEstimatedUdpListenerCount)
DllImportEntry(SystemNative_GetActiveUdpListeners)
DllImportEntry(SystemNative_GetNativeIPInterfaceStatistics)
DllImportEntry(SystemNative_GetNumRoutes)
DllImportEntry(SystemNative_ForkAndExecProcess)
DllImportEntry(SystemNative_GetRLimit)
DllImportEntry(SystemNative_SetRLimit)
DllImportEntry(SystemNative_Kill)
DllImportEntry(SystemNative_GetPid)
DllImportEntry(SystemNative_GetSid)
DllImportEntry(SystemNative_SysLog)
DllImportEntry(SystemNative_WaitIdAnyExitedNoHangNoWait)
DllImportEntry(SystemNative_WaitPidExitedNoHang)
DllImportEntry(SystemNative_PathConf)
DllImportEntry(SystemNative_GetPriority)
DllImportEntry(SystemNative_SetPriority)
DllImportEntry(SystemNative_GetCwd)
DllImportEntry(SystemNative_SchedSetAffinity)
DllImportEntry(SystemNative_SchedGetAffinity)
DllImportEntry(SystemNative_GetProcessPath)
DllImportEntry(SystemNative_GetNonCryptographicallySecureRandomBytes)
DllImportEntry(SystemNative_GetCryptographicallySecureRandomBytes)
DllImportEntry(SystemNative_GetUnixName)
DllImportEntry(SystemNative_GetUnixRelease)
DllImportEntry(SystemNative_GetUnixVersion)
DllImportEntry(SystemNative_GetOSArchitecture)
DllImportEntry(SystemNative_SearchPath)
DllImportEntry(SystemNative_SearchPath_TempDirectory)
DllImportEntry(SystemNative_RegisterForSigChld)
DllImportEntry(SystemNative_SetDelayedSigChildConsoleConfigurationHandler)
DllImportEntry(SystemNative_SetTerminalInvalidationHandler)
DllImportEntry(SystemNative_SNPrintF)
DllImportEntry(SystemNative_Sysctl)
DllImportEntry(SystemNative_MapTcpState)
DllImportEntry(SystemNative_LowLevelMonitor_Create)
DllImportEntry(SystemNative_LowLevelMonitor_Destroy)
DllImportEntry(SystemNative_LowLevelMonitor_Acquire)
DllImportEntry(SystemNative_LowLevelMonitor_Release)
DllImportEntry(SystemNative_LowLevelMonitor_Wait)
DllImportEntry(SystemNative_LowLevelMonitor_TimedWait)
DllImportEntry(SystemNative_LowLevelMonitor_Signal_Release)
DllImportEntry(SystemNative_LoadLibrary)
DllImportEntry(SystemNative_GetProcAddress)
DllImportEntry(SystemNative_FreeLibrary)
DllImportEntry(SystemNative_SchedGetCpu)
DllImportEntry(SystemNative_Exit)
DllImportEntry(SystemNative_Abort)
DllImportEntry(SystemNative_UTimensat)
DllImportEntry(SystemNative_GetTimestamp)
DllImportEntry(SystemNative_GetCpuUtilization)
DllImportEntry(SystemNative_GetPwUidR)
DllImportEntry(SystemNative_GetPwNamR)
DllImportEntry(SystemNative_GetEUid)
DllImportEntry(SystemNative_GetEGid)
DllImportEntry(SystemNative_SetEUid)
DllImportEntry(SystemNative_GetGroupList)
DllImportEntry(SystemNative_CreateAutoreleasePool)
DllImportEntry(SystemNative_DrainAutoreleasePool)
DllImportEntry(SystemNative_iOSSupportVersion)
DllImportEntry(SystemNative_GetErrNo)
DllImportEntry(SystemNative_SetErrNo)
DllImportEntry(SystemNative_PRead)
DllImportEntry(SystemNative_PWrite)
DllImportEntry(SystemNative_PReadV)
DllImportEntry(SystemNative_PWriteV)
DllImportEntry(SystemNative_CreateThread)
DllImportEntry(SystemNative_EnablePosixSignalHandling)
DllImportEntry(SystemNative_DisablePosixSignalHandling)
DllImportEntry(SystemNative_HandleNonCanceledPosixSignal)
DllImportEntry(SystemNative_SetPosixSignalHandler)
DllImportEntry(SystemNative_GetPlatformSignalNumber)
DllImportEntry(SystemNative_GetGroups)
DllImportEntry(SystemNative_GetEnv)
DllImportEntry(SystemNative_GetEnviron)
DllImportEntry(SystemNative_FreeEnviron)
};
EXTERN_C const void* SystemResolveDllImport(const char* name);
EXTERN_C const void* SystemResolveDllImport(const char* name)
{
return minipal_resolve_dllimport(s_sysNative, ARRAY_SIZE(s_sysNative), name);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <minipal/entrypoints.h>
// Include System.Native headers
#include "pal_autoreleasepool.h"
#include "pal_console.h"
#include "pal_datetime.h"
#include "pal_dynamicload.h"
#include "pal_environment.h"
#include "pal_errno.h"
#include "pal_interfaceaddresses.h"
#include "pal_io.h"
#include "pal_iossupportversion.h"
#include "pal_log.h"
#include "pal_memory.h"
#include "pal_mount.h"
#include "pal_networkchange.h"
#include "pal_networking.h"
#include "pal_networkstatistics.h"
#include "pal_process.h"
#include "pal_random.h"
#include "pal_runtimeinformation.h"
#include "pal_searchpath.h"
#include "pal_signal.h"
#include "pal_string.h"
#include "pal_sysctl.h"
#include "pal_tcpstate.h"
#include "pal_threading.h"
#include "pal_time.h"
#include "pal_uid.h"
static const Entry s_sysNative[] =
{
DllImportEntry(SystemNative_FStat)
DllImportEntry(SystemNative_GetWindowSize)
DllImportEntry(SystemNative_IsATty)
DllImportEntry(SystemNative_InitializeTerminalAndSignalHandling)
DllImportEntry(SystemNative_SetKeypadXmit)
DllImportEntry(SystemNative_GetControlCharacters)
DllImportEntry(SystemNative_StdinReady)
DllImportEntry(SystemNative_InitializeConsoleBeforeRead)
DllImportEntry(SystemNative_UninitializeConsoleAfterRead)
DllImportEntry(SystemNative_ConfigureTerminalForChildProcess)
DllImportEntry(SystemNative_ReadStdin)
DllImportEntry(SystemNative_GetSignalForBreak)
DllImportEntry(SystemNative_SetSignalForBreak)
DllImportEntry(SystemNative_GetSystemTimeAsTicks)
DllImportEntry(SystemNative_ConvertErrorPlatformToPal)
DllImportEntry(SystemNative_ConvertErrorPalToPlatform)
DllImportEntry(SystemNative_StrErrorR)
DllImportEntry(SystemNative_EnumerateInterfaceAddresses)
DllImportEntry(SystemNative_GetNetworkInterfaces)
DllImportEntry(SystemNative_EnumerateGatewayAddressesForInterface)
DllImportEntry(SystemNative_Stat)
DllImportEntry(SystemNative_LStat)
DllImportEntry(SystemNative_Open)
DllImportEntry(SystemNative_Close)
DllImportEntry(SystemNative_Dup)
DllImportEntry(SystemNative_Unlink)
DllImportEntry(SystemNative_ShmOpen)
DllImportEntry(SystemNative_ShmUnlink)
DllImportEntry(SystemNative_GetReadDirRBufferSize)
DllImportEntry(SystemNative_ReadDirR)
DllImportEntry(SystemNative_OpenDir)
DllImportEntry(SystemNative_CloseDir)
DllImportEntry(SystemNative_Pipe)
DllImportEntry(SystemNative_FcntlSetFD)
DllImportEntry(SystemNative_FcntlGetFD)
DllImportEntry(SystemNative_FcntlCanGetSetPipeSz)
DllImportEntry(SystemNative_FcntlGetPipeSz)
DllImportEntry(SystemNative_FcntlSetPipeSz)
DllImportEntry(SystemNative_FcntlSetIsNonBlocking)
DllImportEntry(SystemNative_FcntlGetIsNonBlocking)
DllImportEntry(SystemNative_MkDir)
DllImportEntry(SystemNative_ChMod)
DllImportEntry(SystemNative_FChMod)
DllImportEntry(SystemNative_FSync)
DllImportEntry(SystemNative_FLock)
DllImportEntry(SystemNative_ChDir)
DllImportEntry(SystemNative_Access)
DllImportEntry(SystemNative_LSeek)
DllImportEntry(SystemNative_Link)
DllImportEntry(SystemNative_SymLink)
DllImportEntry(SystemNative_MksTemps)
DllImportEntry(SystemNative_MMap)
DllImportEntry(SystemNative_MUnmap)
DllImportEntry(SystemNative_MAdvise)
DllImportEntry(SystemNative_MSync)
DllImportEntry(SystemNative_SysConf)
DllImportEntry(SystemNative_FTruncate)
DllImportEntry(SystemNative_Poll)
DllImportEntry(SystemNative_PosixFAdvise)
DllImportEntry(SystemNative_FAllocate)
DllImportEntry(SystemNative_Read)
DllImportEntry(SystemNative_ReadLink)
DllImportEntry(SystemNative_Rename)
DllImportEntry(SystemNative_RmDir)
DllImportEntry(SystemNative_Sync)
DllImportEntry(SystemNative_Write)
DllImportEntry(SystemNative_CopyFile)
DllImportEntry(SystemNative_INotifyInit)
DllImportEntry(SystemNative_INotifyAddWatch)
DllImportEntry(SystemNative_INotifyRemoveWatch)
DllImportEntry(SystemNative_RealPath)
DllImportEntry(SystemNative_GetPeerID)
DllImportEntry(SystemNative_GetFileSystemType)
DllImportEntry(SystemNative_LockFileRegion)
DllImportEntry(SystemNative_LChflags)
DllImportEntry(SystemNative_LChflagsCanSetHiddenFlag)
DllImportEntry(SystemNative_CanGetHiddenFlag)
DllImportEntry(SystemNative_ReadProcessStatusInfo)
DllImportEntry(SystemNative_Log)
DllImportEntry(SystemNative_LogError)
DllImportEntry(SystemNative_AlignedAlloc)
DllImportEntry(SystemNative_AlignedFree)
DllImportEntry(SystemNative_AlignedRealloc)
DllImportEntry(SystemNative_Calloc)
DllImportEntry(SystemNative_Free)
DllImportEntry(SystemNative_Malloc)
DllImportEntry(SystemNative_MemSet)
DllImportEntry(SystemNative_Realloc)
DllImportEntry(SystemNative_GetSpaceInfoForMountPoint)
DllImportEntry(SystemNative_GetFormatInfoForMountPoint)
DllImportEntry(SystemNative_GetAllMountPoints)
DllImportEntry(SystemNative_ReadEvents)
DllImportEntry(SystemNative_CreateNetworkChangeListenerSocket)
DllImportEntry(SystemNative_GetHostEntryForName)
DllImportEntry(SystemNative_FreeHostEntry)
DllImportEntry(SystemNative_GetNameInfo)
DllImportEntry(SystemNative_GetDomainName)
DllImportEntry(SystemNative_GetHostName)
DllImportEntry(SystemNative_GetIPSocketAddressSizes)
DllImportEntry(SystemNative_GetAddressFamily)
DllImportEntry(SystemNative_SetAddressFamily)
DllImportEntry(SystemNative_GetPort)
DllImportEntry(SystemNative_SetPort)
DllImportEntry(SystemNative_GetIPv4Address)
DllImportEntry(SystemNative_SetIPv4Address)
DllImportEntry(SystemNative_GetIPv6Address)
DllImportEntry(SystemNative_SetIPv6Address)
DllImportEntry(SystemNative_GetControlMessageBufferSize)
DllImportEntry(SystemNative_TryGetIPPacketInformation)
DllImportEntry(SystemNative_GetIPv4MulticastOption)
DllImportEntry(SystemNative_SetIPv4MulticastOption)
DllImportEntry(SystemNative_GetIPv6MulticastOption)
DllImportEntry(SystemNative_SetIPv6MulticastOption)
DllImportEntry(SystemNative_GetLingerOption)
DllImportEntry(SystemNative_SetLingerOption)
DllImportEntry(SystemNative_SetReceiveTimeout)
DllImportEntry(SystemNative_SetSendTimeout)
DllImportEntry(SystemNative_Receive)
DllImportEntry(SystemNative_ReceiveMessage)
DllImportEntry(SystemNative_Send)
DllImportEntry(SystemNative_SendMessage)
DllImportEntry(SystemNative_Accept)
DllImportEntry(SystemNative_Bind)
DllImportEntry(SystemNative_Connect)
DllImportEntry(SystemNative_GetPeerName)
DllImportEntry(SystemNative_GetSockName)
DllImportEntry(SystemNative_Listen)
DllImportEntry(SystemNative_Shutdown)
DllImportEntry(SystemNative_GetSocketErrorOption)
DllImportEntry(SystemNative_GetSockOpt)
DllImportEntry(SystemNative_GetRawSockOpt)
DllImportEntry(SystemNative_SetSockOpt)
DllImportEntry(SystemNative_SetRawSockOpt)
DllImportEntry(SystemNative_Socket)
DllImportEntry(SystemNative_GetSocketType)
DllImportEntry(SystemNative_GetAtOutOfBandMark)
DllImportEntry(SystemNative_GetBytesAvailable)
DllImportEntry(SystemNative_CreateSocketEventPort)
DllImportEntry(SystemNative_CloseSocketEventPort)
DllImportEntry(SystemNative_CreateSocketEventBuffer)
DllImportEntry(SystemNative_FreeSocketEventBuffer)
DllImportEntry(SystemNative_TryChangeSocketEventRegistration)
DllImportEntry(SystemNative_WaitForSocketEvents)
DllImportEntry(SystemNative_PlatformSupportsDualModeIPv4PacketInfo)
DllImportEntry(SystemNative_GetPeerUserName)
DllImportEntry(SystemNative_GetDomainSocketSizes)
DllImportEntry(SystemNative_GetMaximumAddressSize)
DllImportEntry(SystemNative_SendFile)
DllImportEntry(SystemNative_Disconnect)
DllImportEntry(SystemNative_InterfaceNameToIndex)
DllImportEntry(SystemNative_GetTcpGlobalStatistics)
DllImportEntry(SystemNative_GetIPv4GlobalStatistics)
DllImportEntry(SystemNative_GetUdpGlobalStatistics)
DllImportEntry(SystemNative_GetIcmpv4GlobalStatistics)
DllImportEntry(SystemNative_GetIcmpv6GlobalStatistics)
DllImportEntry(SystemNative_GetEstimatedTcpConnectionCount)
DllImportEntry(SystemNative_GetActiveTcpConnectionInfos)
DllImportEntry(SystemNative_GetEstimatedUdpListenerCount)
DllImportEntry(SystemNative_GetActiveUdpListeners)
DllImportEntry(SystemNative_GetNativeIPInterfaceStatistics)
DllImportEntry(SystemNative_GetNumRoutes)
DllImportEntry(SystemNative_ForkAndExecProcess)
DllImportEntry(SystemNative_GetRLimit)
DllImportEntry(SystemNative_SetRLimit)
DllImportEntry(SystemNative_Kill)
DllImportEntry(SystemNative_GetPid)
DllImportEntry(SystemNative_GetSid)
DllImportEntry(SystemNative_SysLog)
DllImportEntry(SystemNative_WaitIdAnyExitedNoHangNoWait)
DllImportEntry(SystemNative_WaitPidExitedNoHang)
DllImportEntry(SystemNative_PathConf)
DllImportEntry(SystemNative_GetPriority)
DllImportEntry(SystemNative_SetPriority)
DllImportEntry(SystemNative_GetCwd)
DllImportEntry(SystemNative_SchedSetAffinity)
DllImportEntry(SystemNative_SchedGetAffinity)
DllImportEntry(SystemNative_GetProcessPath)
DllImportEntry(SystemNative_GetNonCryptographicallySecureRandomBytes)
DllImportEntry(SystemNative_GetCryptographicallySecureRandomBytes)
DllImportEntry(SystemNative_GetUnixName)
DllImportEntry(SystemNative_GetUnixRelease)
DllImportEntry(SystemNative_GetUnixVersion)
DllImportEntry(SystemNative_GetOSArchitecture)
DllImportEntry(SystemNative_SearchPath)
DllImportEntry(SystemNative_SearchPath_TempDirectory)
DllImportEntry(SystemNative_RegisterForSigChld)
DllImportEntry(SystemNative_SetDelayedSigChildConsoleConfigurationHandler)
DllImportEntry(SystemNative_SetTerminalInvalidationHandler)
DllImportEntry(SystemNative_SNPrintF)
DllImportEntry(SystemNative_Sysctl)
DllImportEntry(SystemNative_MapTcpState)
DllImportEntry(SystemNative_LowLevelMonitor_Create)
DllImportEntry(SystemNative_LowLevelMonitor_Destroy)
DllImportEntry(SystemNative_LowLevelMonitor_Acquire)
DllImportEntry(SystemNative_LowLevelMonitor_Release)
DllImportEntry(SystemNative_LowLevelMonitor_Wait)
DllImportEntry(SystemNative_LowLevelMonitor_TimedWait)
DllImportEntry(SystemNative_LowLevelMonitor_Signal_Release)
DllImportEntry(SystemNative_LoadLibrary)
DllImportEntry(SystemNative_GetProcAddress)
DllImportEntry(SystemNative_FreeLibrary)
DllImportEntry(SystemNative_SchedGetCpu)
DllImportEntry(SystemNative_Exit)
DllImportEntry(SystemNative_Abort)
DllImportEntry(SystemNative_UTimensat)
DllImportEntry(SystemNative_GetTimestamp)
DllImportEntry(SystemNative_GetCpuUtilization)
DllImportEntry(SystemNative_GetPwUidR)
DllImportEntry(SystemNative_GetPwNamR)
DllImportEntry(SystemNative_GetEUid)
DllImportEntry(SystemNative_GetEGid)
DllImportEntry(SystemNative_SetEUid)
DllImportEntry(SystemNative_GetGroupList)
DllImportEntry(SystemNative_CreateAutoreleasePool)
DllImportEntry(SystemNative_DrainAutoreleasePool)
DllImportEntry(SystemNative_iOSSupportVersion)
DllImportEntry(SystemNative_GetErrNo)
DllImportEntry(SystemNative_SetErrNo)
DllImportEntry(SystemNative_PRead)
DllImportEntry(SystemNative_PWrite)
DllImportEntry(SystemNative_PReadV)
DllImportEntry(SystemNative_PWriteV)
DllImportEntry(SystemNative_CreateThread)
DllImportEntry(SystemNative_EnablePosixSignalHandling)
DllImportEntry(SystemNative_DisablePosixSignalHandling)
DllImportEntry(SystemNative_HandleNonCanceledPosixSignal)
DllImportEntry(SystemNative_SetPosixSignalHandler)
DllImportEntry(SystemNative_GetPlatformSignalNumber)
DllImportEntry(SystemNative_GetGroups)
DllImportEntry(SystemNative_GetEnv)
DllImportEntry(SystemNative_GetEnviron)
DllImportEntry(SystemNative_FreeEnviron)
};
EXTERN_C const void* SystemResolveDllImport(const char* name);
EXTERN_C const void* SystemResolveDllImport(const char* name)
{
return minipal_resolve_dllimport(s_sysNative, ARRAY_SIZE(s_sysNative), name);
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/debug/daccess/dacdbiimpl.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: DacDbiImpl.cpp
//
//
// Implement DAC/DBI interface
//
//*****************************************************************************
#include "stdafx.h"
#include "dacdbiinterface.h"
#include "typestring.h"
#include "holder.h"
#include "debuginfostore.h"
#include "peimagelayout.inl"
#include "encee.h"
#include "switches.h"
#include "generics.h"
#include "stackwalk.h"
#include "virtualcallstub.h"
#include "dacdbiimpl.h"
#ifdef FEATURE_COMINTEROP
#include "runtimecallablewrapper.h"
#include "comcallablewrapper.h"
#endif // FEATURE_COMINTEROP
#include "request_common.h"
//-----------------------------------------------------------------------------
// Have standard enter and leave macros at the DacDbi boundary to enforce
// standard behavior.
// 1. catch exceptions and convert them at the boundary.
// 2. provide a space to hook logging and transitions.
// 3. provide a hook to verify return values.
//
// Usage notes:
// - use this at the DacDbi boundary; but not at internal functions
// - it's ok to Return from the middle.
//
// Expected usage is:
// Foo()
// {
// DD_ENTER_MAY_THROW
// ...
// if (...) { ThrowHr(E_SOME_FAILURE); }
// ...
// if (...) { return; } // early success case
// ...
// }
//-----------------------------------------------------------------------------
// Global allocator for DD. Access is protected under the g_dacCritSec lock.
IDacDbiInterface::IAllocator * g_pAllocator = NULL;
//---------------------------------------------------------------------------------------
//
// Extra sugar for wrapping IAllocator under friendly New/Delete operators.
//
// Sample usage:
// void Foo(TestClass ** ppOut)
// {
// *ppOut = NULL;
// TestClass * p = new (forDbi) TestClass();
// ...
// if (ok)
// {
// *ppOut = p;
// return; // DBI will then free this memory.
// }
// ...
// DeleteDbiMemory(p); // DeleteDbiMemory(p, len); if it was an array allocation.
// }
//
// Be very careful when using this on classes since Dbi and DAC may be in
// separate dlls. This is best used when operating on blittable data-structures.
// (no ctor/dtor, plain data fields) to guarantee the proper DLL isolation.
// You don't want to call the ctor in DAC's context and the dtor in DBI's context
// unless you really know what you're doing and that it's safe.
//
// Need a class to serve as a tag that we can use to overload New/Delete.
forDbiWorker forDbi;
void * operator new(size_t lenBytes, const forDbiWorker &)
{
_ASSERTE(g_pAllocator != NULL);
void *result = g_pAllocator->Alloc(lenBytes);
if (result == NULL)
{
ThrowOutOfMemory();
}
return result;
}
void * operator new[](size_t lenBytes, const forDbiWorker &)
{
_ASSERTE(g_pAllocator != NULL);
void *result = g_pAllocator->Alloc(lenBytes);
if (result == NULL)
{
ThrowOutOfMemory();
}
return result;
}
// Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that
// this delete operator will be invoked automatically to destroy the object.
void operator delete(void *p, const forDbiWorker &)
{
if (p == NULL)
{
return;
}
_ASSERTE(g_pAllocator != NULL);
g_pAllocator->Free((BYTE*) p);
}
// Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that
// this delete operator will be invoked automatically to destroy the object.
void operator delete[](void *p, const forDbiWorker &)
{
if (p == NULL)
{
return;
}
_ASSERTE(g_pAllocator != NULL);
g_pAllocator->Free((BYTE*) p);
}
// @dbgtodo dac support: determine how to handle an array of class instances to ensure the dtors get
// called correctly or document that they won't
// Delete memory and invoke dtor for memory allocated with 'operator (forDbi) new'
template<class T> void DeleteDbiMemory(T *p)
{
if (p == NULL)
{
return;
}
p->~T();
_ASSERTE(g_pAllocator != NULL);
g_pAllocator->Free((BYTE*) p);
}
// Delete memory and invoke dtor for memory allocated with 'operator (forDbi) new[]'
// There's an inherent risk here - where each element's destructor will get called within
// the context of the DAC. If the destructor tries to use the CRT allocator logic expecting
// to hit the DBI's, we could be in trouble. Those objects need to use an export allocator like this.
template<class T> void DeleteDbiArrayMemory(T *p, int count)
{
if (p == NULL)
{
return;
}
for (T *cur = p; cur < p + count; cur++)
{
cur->~T();
}
_ASSERTE(g_pAllocator != NULL);
g_pAllocator->Free((BYTE*) p);
}
//---------------------------------------------------------------------------------------
// Creates the DacDbiInterface object, used by Dbi.
//
// Arguments:
// pTarget - pointer to a Data-Target
// baseAddress - non-zero base address of mscorwks in target to debug.
// pAllocator - pointer to client allocator object. This lets DD allocate objects and
// pass them out back to the client, which can then delete them.
// DD takes a weak ref to this, so client must keep it alive until it
// calls Destroy.
// pMetadataLookup - callback interface to do internal metadata lookup. This is because
// metadata is not dac-ized.
// ppInterface - mandatory out-parameter
//
// Return Value:
// S_OK on success.
//
//
// Notes:
// On Windows, this is public function that can be retrieved by GetProcAddress.
// On Mac, this is used internally by DacDbiMarshalStubInstance below
// This will yield an IDacDbiInterface to provide structured access to the
// data-target.
//
// Must call Destroy to on interface to free its resources.
//
//---------------------------------------------------------------------------------------
STDAPI
DLLEXPORT
DacDbiInterfaceInstance(
ICorDebugDataTarget * pTarget,
CORDB_ADDRESS baseAddress,
IDacDbiInterface::IAllocator * pAllocator,
IDacDbiInterface::IMetaDataLookup * pMetaDataLookup,
IDacDbiInterface ** ppInterface)
{
// No marshalling is done by the instantiationf function - we just need to setup the infrastructure.
// We don't want to warn if this involves creating and accessing undacized data structures,
// because it's for the infrastructure, not DACized code itself.
SUPPORTS_DAC_HOST_ONLY;
// Since this is public, verify it.
if ((ppInterface == NULL) || (pTarget == NULL) || (baseAddress == 0))
{
return E_INVALIDARG;
}
*ppInterface = NULL;
//
// Actually allocate the real object and initialize it.
//
DacDbiInterfaceImpl * pDac = new (nothrow) DacDbiInterfaceImpl(pTarget, baseAddress, pAllocator, pMetaDataLookup);
if (!pDac)
{
return E_OUTOFMEMORY;
}
HRESULT hrStatus = pDac->Initialize();
if (SUCCEEDED(hrStatus))
{
*ppInterface = pDac;
}
else
{
delete pDac;
}
return hrStatus;
}
//---------------------------------------------------------------------------------------
// Constructor. Instantiates a DAC/DBI interface around a DataTarget.
//
// Arguments:
// pTarget - pointer to a Data-Target
// baseAddress - non-zero base address of mscorwks in target to debug.
// pAllocator - pointer to client allocator object. This lets DD allocate objects and
// pass them out back to the client, which can then delete them.
// DD takes a weak ref to this, so client must keep it alive until it
// calls Destroy.
// pMetadataLookup - callback interface to do internal metadata lookup. This is because
// metadata is not dac-ized.
//
// Notes:
// pAllocator is a weak reference.
//---------------------------------------------------------------------------------------
DacDbiInterfaceImpl::DacDbiInterfaceImpl(
ICorDebugDataTarget* pTarget,
CORDB_ADDRESS baseAddress,
IAllocator * pAllocator,
IMetaDataLookup * pMetaDataLookup
) : ClrDataAccess(pTarget),
m_pAllocator(pAllocator),
m_pMetaDataLookup(pMetaDataLookup),
m_pCachedPEAssembly(VMPTR_PEAssembly::NullPtr()),
m_pCachedImporter(NULL),
m_isCachedHijackFunctionValid(FALSE)
{
_ASSERTE(baseAddress != NULL);
m_globalBase = CORDB_ADDRESS_TO_TADDR(baseAddress);
_ASSERTE(pMetaDataLookup != NULL);
_ASSERTE(pAllocator != NULL);
_ASSERTE(pTarget != NULL);
#ifdef _DEBUG
// Enable verification asserts in ICorDebug scenarios. ICorDebug never guesses at the DAC path, so any
// mismatch should be fatal, and so always of interest to the user.
// This overrides the assignment in the base class ctor (which runs first).
m_fEnableDllVerificationAsserts = true;
#endif
}
//-----------------------------------------------------------------------------
// Destructor.
//
// Notes:
// This gets invoked after Destroy().
//-----------------------------------------------------------------------------
DacDbiInterfaceImpl::~DacDbiInterfaceImpl()
{
SUPPORTS_DAC_HOST_ONLY;
// This will automatically chain to the base class dtor
}
//-----------------------------------------------------------------------------
// Called from DAC-ized code to get a IMDInternalImport
//
// Arguments:
// pPEAssembly - PE file for which to get importer for
// fThrowEx - if true, throw instead of returning NULL.
//
// Returns:
// an Internal importer object for this file.
// May return NULL or throw (depending on fThrowEx).
// May throw in exceptional circumstances (eg, corrupt debuggee).
//
// Assumptions:
// This is called from DAC-ized code within the VM, which
// was in turn called from some DD primitive. The returned importer will
// be used by the DAC-ized code in the callstack, but it won't be cached.
//
// Notes:
// This is an Internal importer, not a public Metadata importer.
//
interface IMDInternalImport* DacDbiInterfaceImpl::GetMDImport(
const PEAssembly* pPEAssembly,
const ReflectionModule * pReflectionModule,
bool fThrowEx)
{
// Since this is called from an existing DAC-primitive, we already hold the g_dacCritSec lock.
// The lock conveniently protects our cache.
SUPPORTS_DAC;
IDacDbiInterface::IMetaDataLookup * pLookup = m_pMetaDataLookup;
_ASSERTE(pLookup != NULL);
VMPTR_PEAssembly vmPEAssembly = VMPTR_PEAssembly::NullPtr();
if (pPEAssembly != NULL)
{
vmPEAssembly.SetHostPtr(pPEAssembly);
}
else if (pReflectionModule != NULL)
{
// SOS and ClrDataAccess rely on special logic to find the metadata for methods in dynamic modules.
// We don't need to. The RS has already taken care of the special logic for us.
// So here we just grab the PEAssembly off of the ReflectionModule and continue down the normal
// code path. See code:ClrDataAccess::GetMDImport for comparison.
vmPEAssembly.SetHostPtr(pReflectionModule->GetPEAssembly());
}
// Optimize for the case where the VM queries the same Importer many times in a row.
if (m_pCachedPEAssembly == vmPEAssembly)
{
return m_pCachedImporter;
}
// Go to DBI to find the metadata.
IMDInternalImport * pInternal = NULL;
bool isILMetaDataForNI = false;
EX_TRY
{
// If test needs it in the future, prop isILMetaDataForNI back up to
// ClrDataAccess.m_mdImports.Add() call.
// example in code:ClrDataAccess::GetMDImport
// CordbModule::GetMetaDataInterface also looks up MetaData and would need attention.
// This is the new codepath that uses ICorDebugMetaDataLookup.
// To get the old codepath that uses the v2 metadata lookup methods,
// you'd have to load DAC only and then you'll get ClrDataAccess's implementation
// of this function.
pInternal = pLookup->LookupMetaData(vmPEAssembly, isILMetaDataForNI);
}
EX_CATCH
{
// Any expected error we should ignore.
if ((GET_EXCEPTION()->GetHR() != HRESULT_FROM_WIN32(ERROR_PARTIAL_COPY)) &&
(GET_EXCEPTION()->GetHR() != CORDBG_E_READVIRTUAL_FAILURE) &&
(GET_EXCEPTION()->GetHR() != CORDBG_E_SYMBOLS_NOT_AVAILABLE) &&
(GET_EXCEPTION()->GetHR() != CORDBG_E_MODULE_LOADED_FROM_DISK))
{
EX_RETHROW;
}
}
EX_END_CATCH(SwallowAllExceptions)
if (pInternal == NULL)
{
SIMPLIFYING_ASSUMPTION(!"MD lookup failed");
if (fThrowEx)
{
ThrowHR(E_FAIL);
}
return NULL;
}
else
{
// Cache it such that it we look for the exact same Importer again, we'll return it.
m_pCachedPEAssembly = vmPEAssembly;
m_pCachedImporter = pInternal;
}
return pInternal;
}
//-----------------------------------------------------------------------------
// Implementation of IDacDbiInterface
// See DacDbiInterface.h for full descriptions of all of these functions
//-----------------------------------------------------------------------------
// Destroy the connection, freeing up any resources.
void DacDbiInterfaceImpl::Destroy()
{
m_pAllocator = NULL;
this->Release();
// Memory is deleted, don't access this object any more
}
// Check whether the version of the DBI matches the version of the runtime.
// See code:CordbProcess::CordbProcess#DBIVersionChecking for more information regarding version checking.
HRESULT DacDbiInterfaceImpl::CheckDbiVersion(const DbiVersion * pVersion)
{
DD_ENTER_MAY_THROW;
if (pVersion->m_dwFormat != kCurrentDbiVersionFormat)
{
return CORDBG_E_INCOMPATIBLE_PROTOCOL;
}
if ((pVersion->m_dwProtocolBreakingChangeCounter != kCurrentDacDbiProtocolBreakingChangeCounter) ||
(pVersion->m_dwReservedMustBeZero1 != 0))
{
return CORDBG_E_INCOMPATIBLE_PROTOCOL;
}
return S_OK;
}
// Flush the DAC cache. This should be called when target memory changes.
HRESULT DacDbiInterfaceImpl::FlushCache()
{
// Non-reentrant. We don't want to flush cached instances from a callback.
// That would remove host DAC instances while they're being used.
DD_NON_REENTRANT_MAY_THROW;
m_pCachedPEAssembly = VMPTR_PEAssembly::NullPtr();
m_pCachedImporter = NULL;
m_isCachedHijackFunctionValid = FALSE;
HRESULT hr = ClrDataAccess::Flush();
// Current impl of Flush() should always succeed. If it ever fails, we want to know.
_ASSERTE(SUCCEEDED(hr));
return hr;
}
// enable or disable DAC target consistency checks
void DacDbiInterfaceImpl::DacSetTargetConsistencyChecks(bool fEnableAsserts)
{
// forward on to our ClrDataAccess base class
ClrDataAccess::SetTargetConsistencyChecks(fEnableAsserts);
}
// Query if Left-side is started up?
BOOL DacDbiInterfaceImpl::IsLeftSideInitialized()
{
DD_ENTER_MAY_THROW;
if (g_pDebugger != NULL)
{
// This check is "safe".
// The initialize order in the left-side is:
// 1) g_pDebugger is an RVA based global initialized to NULL when the module is loaded.
// 2) Allocate a "Debugger" object.
// 3) run the ctor, which will set m_fLeftSideInitialized = FALSE.
// 4) assign the object to g_pDebugger.
// 5) later, LS initialization code will assign g_pDebugger->m_fLeftSideInitialized = TRUE.
//
// The memory write in #5 is atomic. There is no window where we're reading unitialized data.
return (g_pDebugger->m_fLeftSideInitialized != 0);
}
return FALSE;
}
// Determines if a given address is a CLR stub.
BOOL DacDbiInterfaceImpl::IsTransitionStub(CORDB_ADDRESS address)
{
DD_ENTER_MAY_THROW;
BOOL fIsStub = FALSE;
#if defined(TARGET_UNIX)
// Currently IsIPInModule() is not implemented in the PAL. Rather than skipping the check, we should
// either E_NOTIMPL this API or implement IsIPInModule() in the PAL. Since ICDProcess::IsTransitionStub()
// is only called by VS in mixed-mode debugging scenarios, and mixed-mode debugging is not supported on
// POSIX systems, there is really no incentive to implement this API at this point.
ThrowHR(E_NOTIMPL);
#else // !TARGET_UNIX
TADDR ip = (TADDR)address;
if (ip == NULL)
{
fIsStub = FALSE;
}
else
{
fIsStub = StubManager::IsStub(ip);
}
// If it's in Mscorwks, count that as a stub too.
if (fIsStub == FALSE)
{
fIsStub = IsIPInModule(m_globalBase, ip);
}
#endif // TARGET_UNIX
return fIsStub;
}
// Gets the type of 'address'.
IDacDbiInterface::AddressType DacDbiInterfaceImpl::GetAddressType(CORDB_ADDRESS address)
{
DD_ENTER_MAY_THROW;
TADDR taAddr = CORDB_ADDRESS_TO_TADDR(address);
if (IsPossibleCodeAddress(taAddr) == S_OK)
{
if (ExecutionManager::IsManagedCode(taAddr))
{
return kAddressManagedMethod;
}
if (StubManager::IsStub(taAddr))
{
return kAddressRuntimeUnmanagedStub;
}
}
return kAddressUnrecognized;
}
// Get a VM appdomain pointer that matches the appdomain ID
VMPTR_AppDomain DacDbiInterfaceImpl::GetAppDomainFromId(ULONG appdomainId)
{
DD_ENTER_MAY_THROW;
VMPTR_AppDomain vmAppDomain;
// @dbgtodo dac support - We would like to wean ourselves off the IXClrData interfaces.
IXCLRDataProcess * pDAC = this;
ReleaseHolder<IXCLRDataAppDomain> pDacAppDomain;
HRESULT hrStatus = pDAC->GetAppDomainByUniqueID(appdomainId, &pDacAppDomain);
IfFailThrow(hrStatus);
IXCLRDataAppDomain * pIAppDomain = pDacAppDomain;
AppDomain * pAppDomain = (static_cast<ClrDataAppDomain *> (pIAppDomain))->GetAppDomain();
SIMPLIFYING_ASSUMPTION(pAppDomain != NULL);
if (pAppDomain == NULL)
{
ThrowHR(E_FAIL); // corrupted left-side?
}
TADDR addrAppDomain = PTR_HOST_TO_TADDR(pAppDomain);
vmAppDomain.SetDacTargetPtr(addrAppDomain);
return vmAppDomain;
}
// Get the AppDomain ID for an AppDomain.
ULONG DacDbiInterfaceImpl::GetAppDomainId(VMPTR_AppDomain vmAppDomain)
{
DD_ENTER_MAY_THROW;
if (vmAppDomain.IsNull())
{
return 0;
}
else
{
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
return DefaultADID;
}
}
// Get the managed AppDomain object for an AppDomain.
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetAppDomainObject(VMPTR_AppDomain vmAppDomain)
{
DD_ENTER_MAY_THROW;
AppDomain* pAppDomain = vmAppDomain.GetDacPtr();
OBJECTHANDLE hAppDomainManagedObject = pAppDomain->GetRawExposedObjectHandleForDebugger();
VMPTR_OBJECTHANDLE vmObj = VMPTR_OBJECTHANDLE::NullPtr();
vmObj.SetDacTargetPtr(hAppDomainManagedObject);
return vmObj;
}
// Get the full AD friendly name for the given EE AppDomain.
void DacDbiInterfaceImpl::GetAppDomainFullName(
VMPTR_AppDomain vmAppDomain,
IStringHolder * pStrName )
{
DD_ENTER_MAY_THROW;
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
// Get the AppDomain name from the VM without changing anything
// We might be able to simplify this, eg. by returning an SString.
bool fIsUtf8;
PVOID pRawName = pAppDomain->GetFriendlyNameNoSet(&fIsUtf8);
if (!pRawName)
{
ThrowHR(E_NOINTERFACE);
}
HRESULT hrStatus = S_OK;
if (fIsUtf8)
{
// we have to allocate a temporary string
// we could avoid this by adding a version of IStringHolder::AssignCopy that takes a UTF8 string
// We should also probably check to see when fIsUtf8 is ever true (it looks like it should normally be false).
ULONG32 dwNameLen = 0;
hrStatus = ConvertUtf8((LPCUTF8)pRawName, 0, &dwNameLen, NULL);
if (SUCCEEDED( hrStatus ))
{
NewArrayHolder<WCHAR> pwszName(new WCHAR[dwNameLen]);
hrStatus = ConvertUtf8((LPCUTF8)pRawName, dwNameLen, &dwNameLen, pwszName );
IfFailThrow(hrStatus);
hrStatus = pStrName->AssignCopy(pwszName);
}
}
else
{
hrStatus = pStrName->AssignCopy(static_cast<PCWSTR>(pRawName));
}
// Very important that this either sets pStrName or Throws.
// Don't set it and then then throw.
IfFailThrow(hrStatus);
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// JIT Compiler Flags
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Get the values of the JIT Optimization and EnC flags.
void DacDbiInterfaceImpl::GetCompilerFlags (
VMPTR_DomainAssembly vmDomainAssembly,
BOOL *pfAllowJITOpts,
BOOL *pfEnableEnC)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
if (pDomainAssembly == NULL)
{
ThrowHR(E_FAIL);
}
// Get the underlying module - none of this is AppDomain specific
Module * pModule = pDomainAssembly->GetModule();
DWORD dwBits = pModule->GetDebuggerInfoBits();
*pfAllowJITOpts = !CORDisableJITOptimizations(dwBits);
*pfEnableEnC = pModule->IsEditAndContinueEnabled();
} //GetCompilerFlags
//-----------------------------------------------------------------------------
// Helper function for SetCompilerFlags to set EnC status.
// Arguments:
// Input:
// pModule - The runtime module for which flags are being set.
//
// Return value:
// true if the Enc bits can be set on this module
//-----------------------------------------------------------------------------
bool DacDbiInterfaceImpl::CanSetEnCBits(Module * pModule)
{
_ASSERTE(pModule != NULL);
#ifdef EnC_SUPPORTED
// If we're using explicit sequence points (from the PDB), then we can't do EnC
// because EnC won't get updated pdbs and so the sequence points will be wrong.
bool fIgnorePdbs = ((pModule->GetDebuggerInfoBits() & DACF_IGNORE_PDBS) != 0);
bool fAllowEnc = pModule->IsEditAndContinueCapable() &&
#ifdef PROFILING_SUPPORTED_DATA
!CORProfilerPresent() && // this queries target
#endif
fIgnorePdbs;
#else // ! EnC_SUPPORTED
// Enc not supported on any other platforms.
bool fAllowEnc = false;
#endif
return fAllowEnc;
} // DacDbiInterfaceImpl::SetEnCBits
// Set the values of the JIT optimization and EnC flags.
HRESULT DacDbiInterfaceImpl::SetCompilerFlags(VMPTR_DomainAssembly vmDomainAssembly,
BOOL fAllowJitOpts,
BOOL fEnableEnC)
{
DD_ENTER_MAY_THROW;
DWORD dwBits = 0;
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
HRESULT hr = S_OK;
_ASSERTE(pModule != NULL);
// Initialize dwBits.
dwBits = (pModule->GetDebuggerInfoBits() & ~(DACF_ALLOW_JIT_OPTS | DACF_ENC_ENABLED));
dwBits &= DACF_CONTROL_FLAGS_MASK;
if (fAllowJitOpts)
{
dwBits |= DACF_ALLOW_JIT_OPTS;
}
if (fEnableEnC)
{
if (CanSetEnCBits(pModule))
{
dwBits |= DACF_ENC_ENABLED;
}
else
{
hr = CORDBG_S_NOT_ALL_BITS_SET;
}
}
// Settings from the debugger take precedence over all other settings.
dwBits |= DACF_USER_OVERRIDE;
// set flags. This will write back to the target
pModule->SetDebuggerInfoBits((DebuggerAssemblyControlFlags)dwBits);
LOG((LF_CORDB, LL_INFO100, "D::HIPCE, Changed Jit-Debug-Info: fOpt=%d, fEnableEnC=%d, new bits=0x%08x\n",
(dwBits & DACF_ALLOW_JIT_OPTS) != 0,
(dwBits & DACF_ENC_ENABLED) != 0,
dwBits));
_ASSERTE(SUCCEEDED(hr));
return hr;
} // DacDbiInterfaceImpl::SetCompilerFlags
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// sequence points and var info
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Initialize the native/IL sequence points and native var info for a function.
void DacDbiInterfaceImpl::GetNativeCodeSequencePointsAndVarInfo(VMPTR_MethodDesc vmMethodDesc,
CORDB_ADDRESS startAddr,
BOOL fCodeAvailable,
NativeVarData * pNativeVarData,
SequencePoints * pSequencePoints)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!vmMethodDesc.IsNull());
MethodDesc * pMD = vmMethodDesc.GetDacPtr();
_ASSERTE(fCodeAvailable != 0);
// get information about the locations of arguments and local variables
GetNativeVarData(pMD, startAddr, GetArgCount(pMD), pNativeVarData);
// get the sequence points
GetSequencePoints(pMD, startAddr, pSequencePoints);
} // GetNativeCodeSequencePointsAndVarInfo
//-----------------------------------------------------------------------------
// Get the number of fixed arguments to a function, i.e., the explicit args and the "this" pointer.
// This does not include other implicit arguments or varargs. This is used to compute a variable ID
// (see comment in CordbJITILFrame::ILVariableToNative for more detail)
// Arguments:
// input: pMD pointer to the method desc for the function
// output: none
// Return value:
// the number of fixed arguments to the function
//-----------------------------------------------------------------------------
SIZE_T DacDbiInterfaceImpl::GetArgCount(MethodDesc * pMD)
{
// Create a MetaSig for the given method's sig. (Easier than
// picking the sig apart ourselves.)
PCCOR_SIGNATURE pCallSig;
DWORD cbCallSigSize;
pMD->GetSig(&pCallSig, &cbCallSigSize);
if (pCallSig == NULL)
{
// Sig should only be null if the image is corrupted. (Even for lightweight-codegen)
// We expect the jit+verifier to catch this, so that we never land here.
// But just in case ...
CONSISTENCY_CHECK_MSGF(false, ("Corrupted image, null sig.(%s::%s)",
pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
return 0;
}
MetaSig msig(pCallSig, cbCallSigSize, pMD->GetModule(), NULL, MetaSig::sigMember);
// Get the arg count.
UINT32 NumArguments = msig.NumFixedArgs();
// Account for the 'this' argument.
if (!pMD->IsStatic())
{
NumArguments++;
}
/*
SigParser sigParser(pCallSig, cbCallSigSize);
sigParser.SkipMethodHeaderSignature(&m_allArgsCount);
*/
return NumArguments;
} //GetArgCount
// Allocator to pass to the debug-info-stores...
BYTE* InfoStoreNew(void * pData, size_t cBytes)
{
return new BYTE[cBytes];
}
//-----------------------------------------------------------------------------
// Get locations and code offsets for local variables and arguments in a function
// This information is used to find the location of a value at a given IP.
// Arguments:
// input:
// pMethodDesc pointer to the method desc for the function
// startAddr starting address of the function--used to differentiate
// EnC versions
// fixedArgCount number of fixed arguments to the function
// output:
// pVarInfo data structure containing a list of variable and
// argument locations by range of IP offsets
// Note: this function may throw
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetNativeVarData(MethodDesc * pMethodDesc,
CORDB_ADDRESS startAddr,
SIZE_T fixedArgCount,
NativeVarData * pVarInfo)
{
// make sure we haven't done this already
if (pVarInfo->IsInitialized())
{
return;
}
NewHolder<ICorDebugInfo::NativeVarInfo> nativeVars(NULL);
DebugInfoRequest request;
request.InitFromStartingAddr(pMethodDesc, CORDB_ADDRESS_TO_TADDR(startAddr));
ULONG32 entryCount;
BOOL success = DebugInfoManager::GetBoundariesAndVars(request,
InfoStoreNew, NULL, // allocator
NULL, NULL,
&entryCount, &nativeVars);
if (!success)
ThrowHR(E_FAIL);
// set key fields of pVarInfo
pVarInfo->InitVarDataList(nativeVars, (int)fixedArgCount, (int)entryCount);
} // GetNativeVarData
//-----------------------------------------------------------------------------
// Given a instrumented IL map from the profiler that maps:
// Original offset IL_A -> Instrumentend offset IL_B
// And a native mapping from the JIT that maps:
// Instrumented offset IL_B -> native offset Native_C
// This function merges the two maps and stores the result back into the nativeMap.
// The nativeMap now maps:
// Original offset IL_A -> native offset Native_C
// pEntryCount is the number of valid entries in nativeMap, and it may be adjusted downwards
// as part of the composition.
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::ComposeMapping(const InstrumentedILOffsetMapping * pProfilerILMap, ICorDebugInfo::OffsetMapping nativeMap[], ULONG32* pEntryCount)
{
// Translate the IL offset if the profiler has provided us with a mapping.
// The ICD public API should always expose the original IL offsets, but GetBoundaries()
// directly accesses the debug info, which stores the instrumented IL offsets.
ULONG32 entryCount = *pEntryCount;
// The map pointer could be NULL or there could be no entries in the map, in either case no work to do
if (pProfilerILMap && !pProfilerILMap->IsNull())
{
// If we did instrument, then we can't have any sequence points that
// are "in-between" the old-->new map that the profiler gave us.
// Ex, if map is:
// (6 old -> 36 new)
// (8 old -> 50 new)
// And the jit gives us an entry for 44 new, that will map back to 6 old.
// Since the map can only have one entry for 6 old, we remove 44 new.
// First Pass: invalidate all the duplicate entries by setting their IL offset to MAX_ILNUM
ULONG32 cDuplicate = 0;
ULONG32 prevILOffset = (ULONG32)(ICorDebugInfo::MAX_ILNUM);
for (ULONG32 i = 0; i < entryCount; i++)
{
ULONG32 origILOffset = TranslateInstrumentedILOffsetToOriginal(nativeMap[i].ilOffset, pProfilerILMap);
if (origILOffset == prevILOffset)
{
// mark this sequence point as invalid; refer to the comment above
nativeMap[i].ilOffset = (ULONG32)(ICorDebugInfo::MAX_ILNUM);
cDuplicate += 1;
}
else
{
// overwrite the instrumented IL offset with the original IL offset
nativeMap[i].ilOffset = origILOffset;
prevILOffset = origILOffset;
}
}
// Second Pass: move all the valid entries up front
ULONG32 realIndex = 0;
for (ULONG32 curIndex = 0; curIndex < entryCount; curIndex++)
{
if (nativeMap[curIndex].ilOffset != (ULONG32)(ICorDebugInfo::MAX_ILNUM))
{
// This is a valid entry. Move it up front.
nativeMap[realIndex] = nativeMap[curIndex];
realIndex += 1;
}
}
// make sure we have done the bookkeeping correctly
_ASSERTE((realIndex + cDuplicate) == entryCount);
// Final Pass: derecement entryCount
entryCount -= cDuplicate;
*pEntryCount = entryCount;
}
}
//-----------------------------------------------------------------------------
// Get the native/IL sequence points for a function
// Arguments:
// input:
// pMethodDesc pointer to the method desc for the function
// startAddr starting address of the function--used to differentiate
// output:
// pNativeMap data structure containing a list of sequence points
// Note: this function may throw
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetSequencePoints(MethodDesc * pMethodDesc,
CORDB_ADDRESS startAddr,
SequencePoints * pSeqPoints)
{
// make sure we haven't done this already
if (pSeqPoints->IsInitialized())
{
return;
}
// Use the DebugInfoStore to get IL->Native maps.
// It doesn't matter whether we're jitted, ngenned etc.
DebugInfoRequest request;
request.InitFromStartingAddr(pMethodDesc, CORDB_ADDRESS_TO_TADDR(startAddr));
// Bounds info.
NewArrayHolder<ICorDebugInfo::OffsetMapping> mapCopy(NULL);
ULONG32 entryCount;
BOOL success = DebugInfoManager::GetBoundariesAndVars(request,
InfoStoreNew, NULL, // allocator
&entryCount, &mapCopy,
NULL, NULL);
if (!success)
ThrowHR(E_FAIL);
#ifdef FEATURE_REJIT
CodeVersionManager * pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
ILCodeVersion ilVersion;
NativeCodeVersion nativeCodeVersion = pCodeVersionManager->GetNativeCodeVersion(dac_cast<PTR_MethodDesc>(pMethodDesc), (PCODE)startAddr);
if (!nativeCodeVersion.IsNull())
{
ilVersion = nativeCodeVersion.GetILCodeVersion();
}
// if there is a rejit IL map for this function, apply that in preference to load-time mapping
if (!ilVersion.IsNull() && !ilVersion.IsDefaultVersion())
{
const InstrumentedILOffsetMapping * pRejitMapping = ilVersion.GetInstrumentedILMap();
ComposeMapping(pRejitMapping, mapCopy, &entryCount);
}
else
{
#endif
// if there is a profiler load-time mapping and not a rejit mapping, apply that instead
InstrumentedILOffsetMapping loadTimeMapping =
pMethodDesc->GetModule()->GetInstrumentedILOffsetMapping(pMethodDesc->GetMemberDef());
ComposeMapping(&loadTimeMapping, mapCopy, &entryCount);
#ifdef FEATURE_REJIT
}
#endif
pSeqPoints->InitSequencePoints(entryCount);
// mapCopy and pSeqPoints have elements of different types. Thus, we
// need to copy the individual members from the elements of mapCopy to the
// elements of pSeqPoints. Once we're done, we can release mapCopy
pSeqPoints->CopyAndSortSequencePoints(mapCopy);
} // GetSequencePoints
// ----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TranslateInstrumentedILOffsetToOriginal
//
// Description:
// Helper function to convert an instrumented IL offset to the corresponding original IL offset.
//
// Arguments:
// * ilOffset - offset to be translated
// * pMapping - the profiler-provided mapping between original IL offsets and instrumented IL offsets
//
// Return Value:
// Return the translated offset.
//
ULONG DacDbiInterfaceImpl::TranslateInstrumentedILOffsetToOriginal(ULONG ilOffset,
const InstrumentedILOffsetMapping * pMapping)
{
SIZE_T cMap = pMapping->GetCount();
ARRAY_PTR_COR_IL_MAP rgMap = pMapping->GetOffsets();
_ASSERTE((cMap == 0) == (rgMap == NULL));
// Early out if there is no mapping, or if we are dealing with a special IL offset such as
// prolog, epilog, etc.
if ((cMap == 0) || ((int)ilOffset < 0))
{
return ilOffset;
}
SIZE_T i = 0;
for (i = 1; i < cMap; i++)
{
if (ilOffset < rgMap[i].newOffset)
{
return rgMap[i - 1].oldOffset;
}
}
return rgMap[i - 1].oldOffset;
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Function Data
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// GetILCodeAndSig returns the function's ILCode and SigToken given
// a module and a token. The info will come from a MethodDesc, if
// one exists or from metadata.
//
void DacDbiInterfaceImpl::GetILCodeAndSig(VMPTR_DomainAssembly vmDomainAssembly,
mdToken functionToken,
TargetBuffer * pCodeInfo,
mdToken * pLocalSigToken)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
RVA methodRVA = 0;
DWORD implFlags;
// preinitialize out params
pCodeInfo->Clear();
*pLocalSigToken = mdSignatureNil;
// Get the RVA and impl flags for this method.
IfFailThrow(pModule->GetMDImport()->GetMethodImplProps(functionToken,
&methodRVA,
&implFlags));
MethodDesc* pMethodDesc =
FindLoadedMethodRefOrDef(pModule, functionToken);
// If the RVA is 0 or it's native, then the method is not IL
if (methodRVA == 0)
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: Function is not IL - methodRVA == NULL!\n"));
// return (CORDBG_E_FUNCTION_NOT_IL);
// Sanity check this....
if(!pMethodDesc || !pMethodDesc->IsIL())
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: And the MD agrees..\n"));
ThrowHR(CORDBG_E_FUNCTION_NOT_IL);
}
else
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: But the MD says it's IL..\n"));
}
if (pMethodDesc != NULL && pMethodDesc->GetRVA() == 0)
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: Actually, MD says RVA is 0 too - keep going...!\n"));
}
}
if (IsMiNative(implFlags))
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: Function is not IL - IsMiNative!\n"));
ThrowHR(CORDBG_E_FUNCTION_NOT_IL);
}
*pLocalSigToken = GetILCodeAndSigHelper(pModule, pMethodDesc, functionToken, methodRVA, pCodeInfo);
#ifdef LOGGING
else
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: GetMethodImplProps failed!\n"));
}
#endif
} // GetILCodeAndSig
//---------------------------------------------------------------------------------------
//
// This is just a worker function for GetILCodeAndSig. It returns the function's ILCode and SigToken
// given a module, a token, and the RVA. If a MethodDesc is provided, it has to be consistent with
// the token and the RVA.
//
// Arguments:
// pModule - the Module containing the specified method
// pMD - the specified method; can be NULL
// mdMethodToken - the MethodDef token of the specified method
// methodRVA - the RVA of the IL for the specified method
// pIL - out parameter; return the target address and size of the IL of the specified method
//
// Return Value:
// Return the local variable signature token of the specified method. Can be mdSignatureNil.
//
mdSignature DacDbiInterfaceImpl::GetILCodeAndSigHelper(Module * pModule,
MethodDesc * pMD,
mdMethodDef mdMethodToken,
RVA methodRVA,
TargetBuffer * pIL)
{
_ASSERTE(pModule != NULL);
// If a MethodDesc is provided, it has to be consistent with the MethodDef token and the RVA.
_ASSERTE((pMD == NULL) || ((pMD->GetMemberDef() == mdMethodToken) && (pMD->GetRVA() == methodRVA)));
TADDR pTargetIL; // target address of start of IL blob
// This works for methods in dynamic modules, and methods overriden by a profiler.
pTargetIL = pModule->GetDynamicIL(mdMethodToken, TRUE);
// Method not overriden - get the original copy of the IL by going to the PE file/RVA
// If this is in a dynamic module then don't even attempt this since ReflectionModule::GetIL isn't
// implemend for DAC.
if (pTargetIL == 0 && !pModule->IsReflection())
{
pTargetIL = (TADDR)pModule->GetIL(methodRVA);
}
mdSignature mdSig = mdSignatureNil;
if (pTargetIL == 0)
{
// Currently this should only happen for LCG methods (including IL stubs).
// LCG methods have a 0 RVA, and so we don't currently have any way to get the IL here.
_ASSERTE(pMD->IsDynamicMethod());
_ASSERTE(pMD->AsDynamicMethodDesc()->IsLCGMethod()||
pMD->AsDynamicMethodDesc()->IsILStub());
// Clear the buffer.
pIL->Clear();
}
else
{
// Now we have the target address of the IL blob, we need to bring it over to the host.
// DacGetILMethod will copy the COR_ILMETHOD information that we need
COR_ILMETHOD * pHostIL = DacGetIlMethod(pTargetIL); // host address of start of IL blob
COR_ILMETHOD_DECODER header(pHostIL); // host address of header
// Get the IL code info. We need the address of the IL itself, which will be beyond the header
// at the beginning of the blob. We ultimately need the target address. To get this, we take
// target address of the target IL blob and add the offset from the beginning of the host IL blob
// (the header) to the beginning of the IL itself (we get this information from the header).
pIL->pAddress = pTargetIL + ((SIZE_T)(header.Code) - (SIZE_T)pHostIL);
pIL->cbSize = header.GetCodeSize();
// Now we get the signature token
if (header.LocalVarSigTok != NULL)
{
mdSig = header.GetLocalVarSigTok();
}
else
{
mdSig = mdSignatureNil;
}
}
return mdSig;
}
bool DacDbiInterfaceImpl::GetMetaDataFileInfoFromPEFile(VMPTR_PEAssembly vmPEAssembly,
DWORD &dwTimeStamp,
DWORD &dwSize,
bool &isNGEN,
IStringHolder* pStrFilename)
{
DD_ENTER_MAY_THROW;
DWORD dwDataSize;
DWORD dwRvaHint;
PEAssembly * pPEAssembly = vmPEAssembly.GetDacPtr();
_ASSERTE(pPEAssembly != NULL);
if (pPEAssembly == NULL)
return false;
WCHAR wszFilePath[MAX_LONGPATH] = {0};
DWORD cchFilePath = MAX_LONGPATH;
bool ret = ClrDataAccess::GetMetaDataFileInfoFromPEFile(pPEAssembly,
dwTimeStamp,
dwSize,
dwDataSize,
dwRvaHint,
isNGEN,
wszFilePath,
cchFilePath);
pStrFilename->AssignCopy(wszFilePath);
return ret;
}
bool DacDbiInterfaceImpl::GetILImageInfoFromNgenPEFile(VMPTR_PEAssembly vmPEAssembly,
DWORD &dwTimeStamp,
DWORD &dwSize,
IStringHolder* pStrFilename)
{
return false;
}
// Get start addresses and sizes for hot and cold regions for a native code blob.
// Arguments:
// Input:
// pMethodDesc - method desc for the function we are inspecting
// Output (required):
// pCodeInfo - initializes the m_rgCodeRegions field of this structure
// if the native code is available. Otherwise,
// pCodeInfo->IsValid() is false.
void DacDbiInterfaceImpl::GetMethodRegionInfo(MethodDesc * pMethodDesc,
NativeCodeFunctionData * pCodeInfo)
{
CONTRACTL
{
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pCodeInfo));
}
CONTRACTL_END;
IJitManager::MethodRegionInfo methodRegionInfo = {NULL, 0, NULL, 0};
PCODE functionAddress = pMethodDesc->GetNativeCode();
// get the start address of the hot region and initialize the jit manager
pCodeInfo->m_rgCodeRegions[kHot].pAddress = CORDB_ADDRESS(PCODEToPINSTR(functionAddress));
// if the start address is NULL, the code isn't available yet, so just return
if (functionAddress != NULL)
{
EECodeInfo codeInfo(functionAddress);
_ASSERTE(codeInfo.IsValid());
codeInfo.GetMethodRegionInfo(&methodRegionInfo);
// now get the rest of the region information
pCodeInfo->m_rgCodeRegions[kHot].cbSize = (ULONG)methodRegionInfo.hotSize;
pCodeInfo->m_rgCodeRegions[kCold].Init(PCODEToPINSTR(methodRegionInfo.coldStartAddress),
(ULONG)methodRegionInfo.coldSize);
_ASSERTE(pCodeInfo->IsValid());
}
else
{
_ASSERTE(!pCodeInfo->IsValid());
}
} // GetMethodRegionInfo
// Gets the following information about a native code blob:
// - its method desc
// - whether it's an instantiated generic
// - its EnC version number
// - hot and cold region information.
// If the hot region start address is NULL at the end, it means the native code
// isn't currently available. In this case, all values in pCodeInfo will be
// cleared.
void DacDbiInterfaceImpl::GetNativeCodeInfo(VMPTR_DomainAssembly vmDomainAssembly,
mdToken functionToken,
NativeCodeFunctionData * pCodeInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pCodeInfo != NULL);
// pre-initialize:
pCodeInfo->Clear();
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
MethodDesc* pMethodDesc = FindLoadedMethodRefOrDef(pModule, functionToken);
pCodeInfo->vmNativeCodeMethodDescToken.SetHostPtr(pMethodDesc);
// if we are loading a module and trying to bind a previously set breakpoint, we may not have
// a method desc yet, so check for that situation
if(pMethodDesc != NULL)
{
GetMethodRegionInfo(pMethodDesc, pCodeInfo);
if (pCodeInfo->m_rgCodeRegions[kHot].pAddress != NULL)
{
pCodeInfo->isInstantiatedGeneric = pMethodDesc->HasClassOrMethodInstantiation();
LookupEnCVersions(pModule,
pCodeInfo->vmNativeCodeMethodDescToken,
functionToken,
pCodeInfo->m_rgCodeRegions[kHot].pAddress,
&(pCodeInfo->encVersion));
}
}
} // GetNativeCodeInfo
// Gets the following information about a native code blob:
// - its method desc
// - whether it's an instantiated generic
// - its EnC version number
// - hot and cold region information.
void DacDbiInterfaceImpl::GetNativeCodeInfoForAddr(VMPTR_MethodDesc vmMethodDesc,
CORDB_ADDRESS hotCodeStartAddr,
NativeCodeFunctionData * pCodeInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pCodeInfo != NULL);
if (hotCodeStartAddr == NULL)
{
// if the start address is NULL, the code isn't available yet, so just return
_ASSERTE(!pCodeInfo->IsValid());
return;
}
IJitManager::MethodRegionInfo methodRegionInfo = {NULL, 0, NULL, 0};
TADDR codeAddr = CORDB_ADDRESS_TO_TADDR(hotCodeStartAddr);
#ifdef TARGET_ARM
// TADDR should not have the thumb code bit set.
_ASSERTE((codeAddr & THUMB_CODE) == 0);
codeAddr &= ~THUMB_CODE;
#endif
EECodeInfo codeInfo(codeAddr);
_ASSERTE(codeInfo.IsValid());
// We may not have the memory for the cold code region in a minidump.
// Do not fail stackwalking because of this.
EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY
{
codeInfo.GetMethodRegionInfo(&methodRegionInfo);
}
EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY;
// Even if GetMethodRegionInfo() fails to retrieve the cold code region info,
// we should still be able to get the hot code region info. We are counting on this for
// stackwalking to work in dump debugging scenarios.
_ASSERTE(methodRegionInfo.hotStartAddress == codeAddr);
// now get the rest of the region information
pCodeInfo->m_rgCodeRegions[kHot].Init(PCODEToPINSTR(methodRegionInfo.hotStartAddress),
(ULONG)methodRegionInfo.hotSize);
pCodeInfo->m_rgCodeRegions[kCold].Init(PCODEToPINSTR(methodRegionInfo.coldStartAddress),
(ULONG)methodRegionInfo.coldSize);
_ASSERTE(pCodeInfo->IsValid());
MethodDesc* pMethodDesc = vmMethodDesc.GetDacPtr();
pCodeInfo->isInstantiatedGeneric = pMethodDesc->HasClassOrMethodInstantiation();
pCodeInfo->vmNativeCodeMethodDescToken = vmMethodDesc;
SIZE_T unusedLatestEncVersion;
Module * pModule = pMethodDesc->GetModule();
_ASSERTE(pModule != NULL);
LookupEnCVersions(pModule,
vmMethodDesc,
pMethodDesc->GetMemberDef(),
codeAddr,
&unusedLatestEncVersion, //unused by caller
&(pCodeInfo->encVersion));
} // GetNativeCodeInfo
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
//
// Functions to get Type and Class information
//
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
//-----------------------------------------------------------------------------
//DacDbiInterfaceImpl::GetTypeHandles
// Get the approximate and exact type handles for a type
// Arguments:
// input:
// vmThExact - VMPTR of the exact type handle. If this method is called
// to get information for a new generic instantiation, this will already
// be initialized. If it's called to get type information for an arbitrary
// type (i.e., called to initialize an instance of CordbClass), it will be NULL
// vmThApprox - VMPTR of the approximate type handle. If this method is called
// to get information for a new generic instantiation, this will already
// be initialized. If it's called to get type information for an arbitrary
// type (i.e., called to initialize an instance of CordbClass), it will be NULL
// output:
// pThExact - handle for exact type information for a generic instantiation
// pThApprox - handle for type information
// Notes:
// pThExact and pTHApprox must be pointers to existing memory.
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetTypeHandles(VMPTR_TypeHandle vmThExact,
VMPTR_TypeHandle vmThApprox,
TypeHandle * pThExact,
TypeHandle * pThApprox)
{
_ASSERTE((pThExact != NULL) && (pThApprox != NULL));
*pThExact = TypeHandle::FromPtr(vmThExact.GetDacPtr());
*pThApprox = TypeHandle::FromPtr(vmThApprox.GetDacPtr());
// If we can't find the class, return the proper HR to the right side. Note: if the class is not a value class and
// the class is also not restored, then we must pretend that the class is still not loaded. We are gonna let
// unrestored value classes slide, though, and special case access to the class's parent below.
if ((pThApprox->IsNull()) || ((!pThApprox->IsValueType()) && (!pThApprox->IsRestored())))
{
LOG((LF_CORDB, LL_INFO10000, "D::GASCI: class isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
// If the exact type handle is not restored ignore it.
if (!pThExact->IsNull() && !pThExact->IsRestored())
{
*pThExact = TypeHandle();
}
} // DacDbiInterfaceImpl::GetTypeHandles
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetTotalFieldCount
// Gets the total number of fields for a type.
// Input Argument: thApprox - type handle used to determine the number of fields
// Return Value: count of the total fields of the type.
//-----------------------------------------------------------------------------
unsigned int DacDbiInterfaceImpl::GetTotalFieldCount(TypeHandle thApprox)
{
MethodTable *pMT = thApprox.GetMethodTable();
// Count the instance and static fields for this class (not including parent).
// This will not include any newly added EnC fields.
unsigned int IFCount = pMT->GetNumIntroducedInstanceFields();
unsigned int SFCount = pMT->GetNumStaticFields();
#ifdef EnC_SUPPORTED
PTR_Module pModule = pMT->GetModule();
// Stats above don't include EnC fields. So add them now.
if (pModule->IsEditAndContinueEnabled())
{
PTR_EnCEEClassData pEncData =
(dac_cast<PTR_EditAndContinueModule>(pModule))->GetEnCEEClassData(pMT, TRUE);
if (pEncData != NULL)
{
_ASSERTE(pEncData->GetMethodTable() == pMT);
// EnC only adds fields, never removes them.
IFCount += pEncData->GetAddedInstanceFields();
SFCount += pEncData->GetAddedStaticFields();
}
}
#endif
return IFCount + SFCount;
} // DacDbiInterfaceImpl::GetTotalFieldCount
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::InitClassData
// initializes various values of the ClassInfo data structure, including the
// field count, generic args count, size and value class flag
// Arguments:
// input: thApprox - used to get access to all the necessary values
// fIsInstantiatedType - used to determine how to compute the size
// output: pData - contains fields to be initialized
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::InitClassData(TypeHandle thApprox,
BOOL fIsInstantiatedType,
ClassInfo * pData)
{
pData->m_fieldList.Alloc(GetTotalFieldCount(thApprox));
// For Generic classes you must get the object size via the type handle, which
// will get you to the right information for the particular instantiation
// you're working with...
pData->m_objectSize = 0;
if ((!thApprox.GetNumGenericArgs()) || fIsInstantiatedType)
{
pData->m_objectSize = thApprox.GetMethodTable()->GetNumInstanceFieldBytes();
}
} // DacDbiInterfaceImpl::InitClassData
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetStaticsBases
// Gets the base table addresses for both GC and non-GC statics
// Arguments:
// input: thExact - exact type handle for the class
// pAppDomain - AppDomain in which the class is loaded
// output: ppGCStaticsBase - base pointer for GC statics
// ppNonGCStaticsBase - base pointer for non GC statics
// Notes:
// If this is a non-generic type, or an instantiated type, then we'll be able to get the static var bases
// If the typeHandle represents a generic type constructor (i.e. an uninstantiated generic class), then
// the static bases will be null (since statics are per-instantiation).
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetStaticsBases(TypeHandle thExact,
AppDomain * pAppDomain,
PTR_BYTE * ppGCStaticsBase,
PTR_BYTE * ppNonGCStaticsBase)
{
MethodTable * pMT = thExact.GetMethodTable();
Module * pModuleForStatics = pMT->GetModuleForStatics();
if (pModuleForStatics != NULL)
{
PTR_DomainLocalModule pLocalModule = pModuleForStatics->GetDomainLocalModule();
if (pLocalModule != NULL)
{
*ppGCStaticsBase = pLocalModule->GetGCStaticsBasePointer(pMT);
*ppNonGCStaticsBase = pLocalModule->GetNonGCStaticsBasePointer(pMT);
}
}
} // DacDbiInterfaceImpl::GetStaticsBases
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::ComputeFieldData
// Computes the field info for pFD and stores it in pcurrentFieldData
// Arguments:
// input: pFD - FieldDesc used to get necessary information
// pGCStaticsBase - base table address for GC statics
// pNonGCStaticsBase - base table address for non-GC statics
// output: pCurrentFieldData - contains fields to be initialized
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::ComputeFieldData(PTR_FieldDesc pFD,
PTR_BYTE pGCStaticsBase,
PTR_BYTE pNonGCStaticsBase,
FieldData * pCurrentFieldData)
{
pCurrentFieldData->Initialize(pFD->IsStatic(), pFD->IsPrimitive(), pFD->GetMemberDef());
#ifdef EnC_SUPPORTED
// If the field was newly introduced via EnC, and hasn't yet
// been fixed up, then we'll send back a marker indicating
// that it isn't yet available.
if (pFD->IsEnCNew())
{
// @dbgtodo Microsoft inspection: eliminate the debugger token when ICDClass and ICDType are
// completely DACized
pCurrentFieldData->m_vmFieldDesc.SetHostPtr(pFD);
pCurrentFieldData->m_fFldStorageAvailable = FALSE;
pCurrentFieldData->m_fFldIsTLS = FALSE;
pCurrentFieldData->m_fFldIsRVA = FALSE;
pCurrentFieldData->m_fFldIsCollectibleStatic = FALSE;
}
else
#endif // EnC_SUPPORTED
{
// Otherwise, we'll compute the info & send it back.
pCurrentFieldData->m_fFldStorageAvailable = TRUE;
// @dbgtodo Microsoft inspection: eliminate the debugger token when ICDClass and ICDType are
// completely DACized
pCurrentFieldData->m_vmFieldDesc.SetHostPtr(pFD);
pCurrentFieldData->m_fFldIsTLS = (pFD->IsThreadStatic() == TRUE);
pCurrentFieldData->m_fFldIsRVA = (pFD->IsRVA() == TRUE);
pCurrentFieldData->m_fFldIsCollectibleStatic = (pFD->IsStatic() == TRUE &&
pFD->GetEnclosingMethodTable()->Collectible());
// Compute the address of the field
if (pFD->IsStatic())
{
// statics are addressed using an absolute address.
if (pFD->IsRVA())
{
// RVA statics are relative to a base module address
DWORD offset = pFD->GetOffset();
PTR_VOID addr = pFD->GetModule()->GetRvaField(offset);
if (pCurrentFieldData->OkToGetOrSetStaticAddress())
{
pCurrentFieldData->SetStaticAddress(PTR_TO_TADDR(addr));
}
}
else if (pFD->IsThreadStatic() ||
pCurrentFieldData->m_fFldIsCollectibleStatic)
{
// this is a special type of static that must be queried using DB_IPCE_GET_SPECIAL_STATIC
}
else
{
// This is a normal static variable in the GC or Non-GC static base table
PTR_BYTE base = pFD->IsPrimitive() ? pNonGCStaticsBase : pGCStaticsBase;
if (base == NULL)
{
// static var not available. This may be an open generic class (not an instantiated type),
// or we might only have approximate type information because the type hasn't been
// initialized yet.
if (pCurrentFieldData->OkToGetOrSetStaticAddress())
{
pCurrentFieldData->SetStaticAddress(NULL);
}
}
else
{
if (pCurrentFieldData->OkToGetOrSetStaticAddress())
{
// calculate the absolute address using the base and the offset from the base
pCurrentFieldData->SetStaticAddress(PTR_TO_TADDR(base) + pFD->GetOffset());
}
}
}
}
else
{
// instance variables are addressed using an offset within the instance
if (pCurrentFieldData->OkToGetOrSetInstanceOffset())
{
pCurrentFieldData->SetInstanceOffset(pFD->GetOffset());
}
}
}
} // DacDbiInterfaceImpl::ComputeFieldData
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::CollectFields
// Gets information for all the fields for a given type
// Arguments:
// input: thExact - used to determine whether we need to get statics base tables
// thApprox - used to get the field desc iterator
// pAppDomain - used to get statics base tables
// output:
// pFieldList - contains fields to be initialized
// Note: the caller must ensure that *ppFields is NULL (i.e., any previously allocated memory
// must have been deallocated.
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::CollectFields(TypeHandle thExact,
TypeHandle thApprox,
AppDomain * pAppDomain,
DacDbiArrayList<FieldData> * pFieldList)
{
PTR_BYTE pGCStaticsBase = NULL;
PTR_BYTE pNonGCStaticsBase = NULL;
if (!thExact.IsNull() && !thExact.GetMethodTable()->Collectible())
{
// get base tables for static fields
GetStaticsBases(thExact, pAppDomain, &pGCStaticsBase, &pNonGCStaticsBase);
}
unsigned int fieldCount = 0;
// <TODO> we are losing exact type information for static fields in generic types. We have
// field desc iterators only for approximate types, but statics are per instantiation, so we
// need an exact type to be able to handle these correctly. We need to use
// FieldDesc::GetExactDeclaringType to get at the correct field. This requires the exact
// TypeHandle. </TODO>
EncApproxFieldDescIterator fdIterator(thApprox.GetMethodTable(),
ApproxFieldDescIterator::ALL_FIELDS,
FALSE); // don't fixup EnC (we can't, we're stopped)
PTR_FieldDesc pCurrentFD;
unsigned int index = 0;
while (((pCurrentFD = fdIterator.Next()) != NULL) && (index < pFieldList->Count()))
{
// fill in the pCurrentEntry structure
ComputeFieldData(pCurrentFD, pGCStaticsBase, pNonGCStaticsBase, &((*pFieldList)[index]));
// Bump our counts and pointers.
fieldCount++;
index++;
}
_ASSERTE(fieldCount == (unsigned int)pFieldList->Count());
} // DacDbiInterfaceImpl::CollectFields
// Determine if a type is a ValueType
BOOL DacDbiInterfaceImpl::IsValueType (VMPTR_TypeHandle vmTypeHandle)
{
DD_ENTER_MAY_THROW;
TypeHandle th = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
return th.IsValueType();
}
// Determine if a type has generic parameters
BOOL DacDbiInterfaceImpl::HasTypeParams (VMPTR_TypeHandle vmTypeHandle)
{
DD_ENTER_MAY_THROW;
TypeHandle th = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
return th.ContainsGenericVariables();
}
// DacDbi API: Get type information for a class
void DacDbiInterfaceImpl::GetClassInfo(VMPTR_AppDomain vmAppDomain,
VMPTR_TypeHandle vmThExact,
ClassInfo * pData)
{
DD_ENTER_MAY_THROW;
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
TypeHandle thExact;
TypeHandle thApprox;
GetTypeHandles(vmThExact, vmThExact, &thExact, &thApprox);
// initialize field count, generic args count, size and value class flag
InitClassData(thApprox, false, pData);
if (pAppDomain != NULL)
CollectFields(thExact, thApprox, pAppDomain, &(pData->m_fieldList));
} // DacDbiInterfaceImpl::GetClassInfo
// DacDbi API: Get field information and object size for an instantiated generic type
void DacDbiInterfaceImpl::GetInstantiationFieldInfo (VMPTR_DomainAssembly vmDomainAssembly,
VMPTR_TypeHandle vmThExact,
VMPTR_TypeHandle vmThApprox,
DacDbiArrayList<FieldData> * pFieldList,
SIZE_T * pObjectSize)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
_ASSERTE(pDomainAssembly != NULL);
AppDomain * pAppDomain = pDomainAssembly->GetAppDomain();
TypeHandle thExact;
TypeHandle thApprox;
GetTypeHandles(vmThExact, vmThApprox, &thExact, &thApprox);
*pObjectSize = thApprox.GetMethodTable()->GetNumInstanceFieldBytes();
pFieldList->Alloc(GetTotalFieldCount(thApprox));
CollectFields(thExact, thApprox, pAppDomain, pFieldList);
} // DacDbiInterfaceImpl::GetInstantiationFieldInfo
//-----------------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk member functions
//-----------------------------------------------------------------------------------
//-----------------------------------------------------------------------------
// TypeDataWalk constructor--initialize the buffer and number of remaining items from input data
// Arguments: pData - pointer to a list of records containing information about type parameters for an
// instantiated type
// nData - number of entries in pData
//-----------------------------------------------------------------------------
DacDbiInterfaceImpl::TypeDataWalk::TypeDataWalk(DebuggerIPCE_TypeArgData * pData, unsigned int nData)
{
m_pCurrentData = pData;
m_nRemaining = nData;
} // DacDbiInterfaceImpl::TypeDataWalk::TypeDataWalk
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadOne
// read and return a single node from the list of type parameters
// Arguments: none (uses internal state)
// Return value: information about the next type parameter in m_pCurrentData
//-----------------------------------------------------------------------------
DebuggerIPCE_TypeArgData * DacDbiInterfaceImpl::TypeDataWalk::ReadOne()
{
LIMITED_METHOD_CONTRACT;
if (m_nRemaining)
{
m_nRemaining--;
return m_pCurrentData++;
}
else
{
return NULL;
}
} // DacDbiInterfaceImpl::TypeDataWalk::ReadOne
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::Skip
// Skip a single node from the list of type handles along with any children it might have
// Arguments: none (uses internal state)
// Return value: none (updates internal state)
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::TypeDataWalk::Skip()
{
LIMITED_METHOD_CONTRACT;
DebuggerIPCE_TypeArgData * pData = ReadOne();
if (pData)
{
for (unsigned int i = 0; i < pData->numTypeArgs; i++)
{
Skip();
}
}
} // DacDbiInterfaceImpl::TypeDataWalk::Skip
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeArg
// Read a type handle when it is used in the position of a generic argument or
// argument of an array or address type. Take into account generic code sharing if we
// have been requested to find the canonical representation amongst a set of shared-
// code generic types. That is, if generics code sharing is enabled then return "Object"
// for all reference types, and canonicalize underneath value types, e.g. V<string> --> V<object>.
// Return TypeHandle() if any of the type handles are not loaded.
//
// Arguments: retrieveWhich - indicates whether to retrieve a canonical representation or
// an exact representation
// Return value: the type handle for the type parameter
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeArg(TypeHandleReadType retrieveWhich)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
#if !defined(FEATURE_SHARE_GENERIC_CODE)
return ReadLoadedTypeHandle(kGetExact);
#else
if (retrieveWhich == kGetExact)
return ReadLoadedTypeHandle(kGetExact);
// This nasty bit of code works out what the "canonicalization" of a
// parameter to a generic is once we take into account generics code sharing.
//
// This logic is somewhat a duplication of logic in vm\typehandle.cpp, though
// that logic operates on a TypeHandle format, i.e. assumes we're finding the
// canonical form of a type that has already been loaded. Here we are finding
// the canonical form of a type that may not have been loaded (but where we expect
// its canonical form to have been loaded).
//
// Ideally this logic would not be duplicated in this way, but it is difficult
// to arrange for that.
DebuggerIPCE_TypeArgData * pData = ReadOne();
if (!pData)
return TypeHandle();
// If we have code sharing then the process of canonicalizing is trickier.
// unfortunately we have to include the exact specification of compatibility at
// this point.
CorElementType elementType = pData->data.elementType;
switch (elementType)
{
case ELEMENT_TYPE_PTR:
_ASSERTE(pData->numTypeArgs == 1);
return PtrOrByRefTypeArg(pData, retrieveWhich);
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
return ClassTypeArg(pData, retrieveWhich);
break;
case ELEMENT_TYPE_FNPTR:
return FnPtrTypeArg(pData, retrieveWhich);
break;
default:
return ObjRefOrPrimitiveTypeArg(pData, elementType);
break;
}
#endif // FEATURE_SHARE_GENERIC_CODE
} // DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandles
// Iterate through the type argument data, creating type handles as we go.
//
// Arguments:
// input: retrieveWhich - indicates whether we can return a canonical type handle
// or we must return an exact type handle
// nTypeArgs - number of type arguments to be read
// output: ppResults - pointer to a list of TypeHandles that will hold the type handles
// for each type parameter
//
// Return Value: FALSE iff any of the type handles are not loaded.
//-----------------------------------------------------------------------------
BOOL DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandles(TypeHandleReadType retrieveWhich,
unsigned int nTypeArgs,
TypeHandle * ppResults)
{
WRAPPER_NO_CONTRACT;
BOOL allOK = true;
for (unsigned int i = 0; i < nTypeArgs; i++)
{
ppResults[i] = ReadLoadedTypeArg(retrieveWhich);
allOK &= !ppResults[i].IsNull();
}
return allOK;
} // DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandles
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedInstantiation
// Read an instantiation of a generic type if it has already been created.
//
// Arguments:
// input: retrieveWhich - indicates whether we can return a canonical type handle
// or we must return an exact type handle
// pModule - module in which the instantiated type is loaded
// mdToken - metadata token for the type
// nTypeArgs - number of type arguments to be read
// Return value: the type handle for the instantiated type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedInstantiation(TypeHandleReadType retrieveWhich,
Module * pModule,
mdTypeDef mdToken,
unsigned int nTypeArgs)
{
WRAPPER_NO_CONTRACT;
NewHolder<TypeHandle> pInst(new TypeHandle[nTypeArgs]);
// get the type handle for each of the type parameters
if (!ReadLoadedTypeHandles(retrieveWhich, nTypeArgs, pInst))
{
return TypeHandle();
}
// get the type handle for the particular instantiation that corresponds to
// the given type parameters
return FindLoadedInstantiation(pModule, mdToken, nTypeArgs, pInst);
} // DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedInstantiation
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandle
//
// Compute the type handle for a given type.
// This is the top-level function that will return the type handle for an
// arbitrary type. It uses mutual recursion with ReadLoadedTypeArg to get
// the type handle for a (possibly parameterized) type. Note that the referent of
// address types or the element type of an array type are viewed as type parameters.
//
// For example, assume that we are retrieving only exact types, and we have as our
// top level type an array defined as int [][].
// We start by noting that the type is an array type, so we call ReadLoadedTypeArg to
// get the element type. We find that the element type is also an array:int [].
// ReadLoadedTypeArg will call ReadLoadedTypeHandle with this type information.
// Again, we determine that the top-level type is an array, so we call ReadLoadedTypeArg
// to get the element type, int. ReadLoadedTypeArg will again call ReadLoadedTypeHandle
// which will find that this time, the top-level type is a primitive type. It will request
// the loaded type handle from the loader and return it. On return, we get the type handle
// for an array of int from the loader. We return again and request the type handle for an
// array of arrays of int. This is the type handle we will return.
//
// Arguments:
// input: retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// we use the list of type data stored in the TypeDataWalk data members
// for other input information
// Return value: type handle for the current type.
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandle(TypeHandleReadType retrieveWhich)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
// get the type information at the head of the list m_pCurrentData
DebuggerIPCE_TypeArgData * pData = ReadOne();
if (!pData)
return TypeHandle();
// get the type handle that corresponds to its elementType
TypeHandle typeHandle;
switch (pData->data.elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
typeHandle = ArrayTypeArg(pData, retrieveWhich);
break;
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
typeHandle = PtrOrByRefTypeArg(pData, retrieveWhich);
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
{
Module * pModule = pData->data.ClassTypeData.vmModule.GetDacPtr();
typeHandle = ReadLoadedInstantiation(retrieveWhich,
pModule,
pData->data.ClassTypeData.metadataToken,
pData->numTypeArgs);
}
break;
case ELEMENT_TYPE_FNPTR:
{
typeHandle = FnPtrTypeArg(pData, retrieveWhich);
}
break;
default:
typeHandle = FindLoadedElementType(pData->data.elementType);
break;
}
return typeHandle;
} // DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ArrayTypeArg
// get a loaded type handle for an array type (E_T_ARRAY or E_T_SZARRAY)
//
// Arguments:
// input: pArrayTypeInfo - type information for an array type
// Although this is in fact a pointer (in)to a list, we treat it here
// simply as a pointer to a single instance of DebuggerIPCE_TypeArgData
// which holds type information for an array.
// This is the most recent type node (for an array type) retrieved
// by TypeDataWalk::ReadOne(). The call to ReadLoadedTypeArg will
// result in call(s) to ReadOne to retrieve one or more type nodes
// that are needed to compute the type handle for the
// element type of the array. When we return from that call, we pass
// pArrayTypeInfo along with arrayElementTypeArg to FindLoadedArrayType
// to get the type handle for this particular array type.
// Note:
// On entry, we know that pArrayTypeInfo is the same as m_pCurrentData - 1,
// but by the time we need to use it, this is no longer true. Because
// we can't predict how many nodes will be consumed by the call to
// ReadLoadedTypeArg, we can't compute this value from the member fields
// of TypeDataWalk and therefore pass it as a parameter.
// retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// Return value: the type handle corresponding to the array type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ArrayTypeArg(DebuggerIPCE_TypeArgData * pArrayTypeInfo,
TypeHandleReadType retrieveWhich)
{
TypeHandle arrayElementTypeArg = ReadLoadedTypeArg(retrieveWhich);
if (!arrayElementTypeArg.IsNull())
{
return FindLoadedArrayType(pArrayTypeInfo->data.elementType,
arrayElementTypeArg,
pArrayTypeInfo->data.ArrayTypeData.arrayRank);
}
return TypeHandle();
} // DacDbiInterfaceImpl::TypeDataWalk::ArrayTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::PtrOrByRefTypeArg
// get a loaded type handle for an address type (E_T_PTR or E_T_BYREF)
//
// Arguments:
// input: pPtrOrByRefTypeInfo - type information for a pointer or byref type
// Although this is in fact a pointer (in)to a list, we treat it here
// simply as a pointer to a single instance of DebuggerIPCE_TypeArgData
// which holds type information for a pointer or byref type.
// This is the most recent type node (for a pointer or byref type) retrieved
// by TypeDataWalk::ReadOne(). The call to ReadLoadedTypeArg will
// result in call(s) to ReadOne to retrieve one or more type nodes
// that are needed to compute the type handle for the
// referent type of the pointer. When we return from that call, we pass
// pPtrOrByRefTypeInfo along with referentTypeArg to FindLoadedPointerOrByrefType
// to get the type handle for this particular pointer or byref type.
// Note:
// On entry, we know that pPtrOrByRefTypeInfo is the same as m_pCurrentData - 1,
// but by the time we need to use it, this is no longer true. Because
// we can't predict how many nodes will be consumed by the call to
// ReadLoadedTypeArg, we can't compute this value from the member fields
// of TypeDataWalk and therefore pass it as a parameter.
// retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// Return value: the type handle corresponding to the address type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::PtrOrByRefTypeArg(DebuggerIPCE_TypeArgData * pPtrOrByRefTypeInfo,
TypeHandleReadType retrieveWhich)
{
TypeHandle referentTypeArg = ReadLoadedTypeArg(retrieveWhich);
if (!referentTypeArg.IsNull())
{
return FindLoadedPointerOrByrefType(pPtrOrByRefTypeInfo->data.elementType, referentTypeArg);
}
return TypeHandle();
} // DacDbiInterfaceImpl::TypeDataWalk::PtrOrByRefTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ClassTypeArg
// get a loaded type handle for a class type (E_T_CLASS or E_T_VALUETYPE)
//
// Arguments:
// input: pClassTypeInfo - type information for a class type
// Although this is in fact a pointer (in)to a list, we treat it here
// simply as a pointer to a single instance of DebuggerIPCE_TypeArgData
// which holds type information for a pointer or byref type.
// This is the most recent type node (for a pointer or byref type) retrieved
// by TypeDataWalk::ReadOne(). The call to ReadLoadedInstantiation will
// result in call(s) to ReadOne to retrieve one or more type nodes
// that are needed to compute the type handle for the type parameters
// for the class. If we can't find an exact loaded type for the class, we will
// instead return a canonical method table. In this case, we need to skip
// the type parameter information for each actual parameter to the class.
// This is necessary because we may be getting a type handle for a class which is
// in turn an argument to a parent type. If the parent type has more arguments, we
// need to be at the right place in the list when we return. We use
// pClassTypeInfo to get the number of type arguments that we need to skip.
// retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// Return value: the type handle corresponding to the class type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ClassTypeArg(DebuggerIPCE_TypeArgData * pClassTypeInfo,
TypeHandleReadType retrieveWhich)
{
Module * pModule = pClassTypeInfo->data.ClassTypeData.vmModule.GetDacPtr();
TypeHandle typeDef = ClassLoader::LookupTypeDefOrRefInModule(pModule,
pClassTypeInfo->data.ClassTypeData.metadataToken);
if ((!typeDef.IsNull() && typeDef.IsValueType()) || (pClassTypeInfo->data.elementType == ELEMENT_TYPE_VALUETYPE))
{
return ReadLoadedInstantiation(retrieveWhich,
pModule,
pClassTypeInfo->data.ClassTypeData.metadataToken,
pClassTypeInfo->numTypeArgs);
}
else
{
_ASSERTE(retrieveWhich == kGetCanonical);
// skip the instantiation - no need to look at it since the type canonicalizes to "Object"
for (unsigned int i = 0; i < pClassTypeInfo->numTypeArgs; i++)
{
Skip();
}
return TypeHandle(g_pCanonMethodTableClass);
}
}// DacDbiInterfaceImpl::TypeDataWalk::ClassTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::FnPtrTypeArg
// get a loaded type handle for a function pointer type (E_T_FNPTR)
//
// Arguments:
// input: pFnPtrTypeInfo - type information for a pointer or byref type
// Although this is in fact a pointer (in)to a list, we treat it here
// simply as a pointer to a single instance of DebuggerIPCE_TypeArgData
// which holds type information for a function pointer type.
// This is the most recent type node (for a function pointer type) retrieved
// by TypeDataWalk::ReadOne(). The call to ReadLoadedTypeHandles will
// result in call(s) to ReadOne to retrieve one or more type nodes
// that are needed to compute the type handle for the return type and
// parameter types of the function. When we return from that call, we pass
// pFnPtrTypeInfo along with pInst to FindLoadedFnptrType
// to get the type handle for this particular function pointer type.
// retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// Return value: the type handle corresponding to the function pointer type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::FnPtrTypeArg(DebuggerIPCE_TypeArgData * pFnPtrTypeInfo,
TypeHandleReadType retrieveWhich)
{
// allocate space to store a list of type handles, one for the return type and one for each
// of the parameter types of the function to which the FnPtr type refers.
NewArrayHolder<TypeHandle> pInst(new TypeHandle[sizeof(TypeHandle) * pFnPtrTypeInfo->numTypeArgs]);
if (ReadLoadedTypeHandles(retrieveWhich, pFnPtrTypeInfo->numTypeArgs, pInst))
{
return FindLoadedFnptrType(pFnPtrTypeInfo->numTypeArgs, pInst);
}
return TypeHandle();
} // DacDbiInterfaceImpl::TypeDataWalk::FnPtrTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ObjRefOrPrimitiveTypeArg
// get a loaded type handle for a primitive type or ObjRef
//
// Arguments:
// input: pArgInfo - type information for an objref or primitive type.
// This is called only when the objref or primitive type
// is a type argument for a parent type. In this case,
// we treat all objrefs the same, that is, we don't care
// about type parameters for the referent. Instead, we will
// simply return the canonical object type handle as the type
// of the referent. <@dbgtodo Microsoft: why is this?>
// If this is a primitive type, we'll simply get the
// type handle for that type.
// elementType - type of the argument
// Return value: the type handle corresponding to the elementType
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ObjRefOrPrimitiveTypeArg(DebuggerIPCE_TypeArgData * pArgInfo,
CorElementType elementType)
{
// If there are any type args (e.g. for arrays) they can be skipped. The thing
// is a reference type anyway.
for (unsigned int i = 0; i < pArgInfo->numTypeArgs; i++)
{
Skip();
}
// for an ObjRef, just return the CLASS____CANON type handle
if (CorTypeInfo::IsObjRef_NoThrow(elementType))
{
return TypeHandle(g_pCanonMethodTableClass);
}
else
{
return FindLoadedElementType(elementType);
}
} // DacDbiInterfaceImpl::TypeDataWalk::ObjRefOrPrimitiveTypeArg
//-------------------------------------------------------------------------
// end of TypeDataWalk implementations
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
// functions to use loader to get type handles
// ------------------------------------------------------------------------
// Note, in these functions, the use of ClassLoader::DontLoadTypes was chosen
// instead of FailIfNotLoaded because, although we may want to debug unrestored
// VCs, we can't do it because the debug API is not set up to handle them.
//
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedArrayType
// Use ClassLoader to find a loaded type handle for an array type (E_T_ARRAY or E_T_SZARRAY)
// Arguments:
// input: arrayType - type of the array
// TypeArg - type handle for the base type
// rank - array rank
// Return Value: type handle for the array type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedArrayType(CorElementType arrayType,
TypeHandle typeArg,
unsigned rank)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
if (typeArg.IsNull())
{
return TypeHandle();
}
else
{
return ClassLoader::LoadArrayTypeThrowing(typeArg,
arrayType,
rank,
ClassLoader::DontLoadTypes );
}
} // DacDbiInterfaceImpl::FindLoadedArrayType;
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedPointerOrByrefType
// Use ClassLoader to find a loaded type handle for an address type (E_T_PTR or E_T_BYREF)
// Arguments:
// input: addressType - type of the address type
// TypeArg - type handle for the base type
// Return Value: type handle for the address type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedPointerOrByrefType(CorElementType addressType, TypeHandle typeArg)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
return ClassLoader::LoadPointerOrByrefTypeThrowing(addressType,
typeArg,
ClassLoader::DontLoadTypes);
} // DacDbiInterfaceImpl::FindLoadedPointerOrByrefType
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedFnptrType
// Use ClassLoader to find a loaded type handle for a function pointer type (E_T_FNPTR)
// Arguments:
// input: pInst - type handles of the function's return value and arguments
// numTypeArgs - number of type handles in pInst
// Return Value: type handle for the function pointer type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedFnptrType(DWORD numTypeArgs, TypeHandle * pInst)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
// @dbgtodo : Do we need to worry about calling convention here?
// LoadFnptrTypeThrowing expects the count of arguments, not
// including return value, so we subtract 1 from numTypeArgs.
return ClassLoader::LoadFnptrTypeThrowing(0,
numTypeArgs - 1,
pInst,
ClassLoader::DontLoadTypes);
} // DacDbiInterfaceImpl::FindLoadedFnptrType
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedInstantiation
// Use ClassLoader to find a loaded type handle for a particular instantiation of a
// class type (E_T_CLASS or E_T_VALUECLASS)
//
// Arguments:
// input: pModule - module in which the type is loaded
// mdToken - metadata token for the type
// nTypeArgs - number of type arguments in pInst
// pInst - list of type handles for the type parameters
// Return value: type handle for the instantiated class type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedInstantiation(Module * pModule,
mdTypeDef mdToken,
DWORD nTypeArgs,
TypeHandle * pInst)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
return ClassLoader::LoadGenericInstantiationThrowing(pModule,
mdToken,
Instantiation(pInst,nTypeArgs),
ClassLoader::DontLoadTypes);
} // DacDbiInterfaceImpl::FindLoadedInstantiation
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedElementType
// Get the type handle for a primitive type
// Arguments:
// input: elementType - type of the primitive type
// Return Value: Type handle for the primitive type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedElementType(CorElementType elementType)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
MethodTable * pMethodTable = (&g_CoreLib)->GetElementType(elementType);
return TypeHandle(pMethodTable);
} // DacDbiInterfaceImpl::FindLoadedElementType
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetArrayTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType if the type is E_T_ARRAY.
// Specifically, we get the rank and the type of the array elements
//
// Arguments:
// input: typeHandle - type handle for the array type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the array rank and element type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetArrayTypeInfo(TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
_ASSERTE(typeHandle.IsArray());
pTypeInfo->ArrayTypeData.arrayRank = typeHandle.GetRank();
TypeHandleToBasicTypeInfo(typeHandle.GetArrayElementTypeHandle(),
&(pTypeInfo->ArrayTypeData.arrayTypeArg),
pAppDomain);
} // DacDbiInterfaceImpl::GetArrayTypeInfo
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetPtrTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType if the type is
// E_T_PTR or E_T_BYREF. Specifically, we get the type for the referent of the address type
//
// Arguments:
// input: boxed - indicates what, if anything, is boxed (see code:AreValueTypesBoxed for
// more specific information)
// typeHandle - type handle for the address type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the referent type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetPtrTypeInfo(AreValueTypesBoxed boxed,
TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
if (boxed == AllBoxed)
{
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
}
else
{
_ASSERTE(typeHandle.IsTypeDesc());
TypeHandleToBasicTypeInfo(typeHandle.AsTypeDesc()->GetTypeParam(),
&(pTypeInfo->UnaryTypeData.unaryTypeArg),
pAppDomain);
}
} // DacDbiInterfaceImpl::GetPtrTypeInfo
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetFnPtrTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType if the type is
// E_T_FNPTR, specifically the typehandle for the referent.
//
// Arguments
// input: boxed - indicates what, if anything, is boxed (see code:AreValueTypesBoxed for
// more specific information)
// typeHandle - type handle for the address type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the referent type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetFnPtrTypeInfo(AreValueTypesBoxed boxed,
TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
if (boxed == AllBoxed)
{
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
}
else
{
pTypeInfo->NaryTypeData.typeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
} // DacDbiInterfaceImpl::GetFnPtrTypeInfo
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetClassTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType if the type is
// E_T_CLASS or E_T_VALUETYPE
//
// Arguments
// input: typeHandle - type handle for the address type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the referent type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetClassTypeInfo(TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
Module * pModule = typeHandle.GetModule();
if (typeHandle.HasInstantiation()) // the type handle represents a generic instantiation
{
pTypeInfo->ClassTypeData.typeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
else // non-generic
{
pTypeInfo->ClassTypeData.typeHandle = VMPTR_TypeHandle::NullPtr();
}
pTypeInfo->ClassTypeData.metadataToken = typeHandle.GetCl();
_ASSERTE(pModule);
pTypeInfo->ClassTypeData.vmModule.SetDacTargetPtr(PTR_HOST_TO_TADDR(pModule));
if (pAppDomain)
{
pTypeInfo->ClassTypeData.vmDomainAssembly.SetDacTargetPtr(PTR_HOST_TO_TADDR(pModule->GetDomainAssembly()));
}
else
{
pTypeInfo->ClassTypeData.vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
}
} // DacDbiInterfaceImpl::GetClassTypeInfo
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetElementType
// Gets the correct CorElementType value from a type handle
//
// Arguments
// input: typeHandle - type handle for the address type
// Return Value: the CorElementType enum value for the type handle
//-----------------------------------------------------------------------------
CorElementType DacDbiInterfaceImpl::GetElementType (TypeHandle typeHandle)
{
if (typeHandle.IsNull())
{
return ELEMENT_TYPE_VOID;
}
else if (typeHandle.GetMethodTable() == g_pObjectClass)
{
return ELEMENT_TYPE_OBJECT;
}
else if (typeHandle.GetMethodTable() == g_pStringClass)
{
return ELEMENT_TYPE_STRING;
}
else
{
// GetSignatureCorElementType returns E_T_CLASS for E_T_STRING... :-(
return typeHandle.GetSignatureCorElementType();
}
} // DacDbiInterfaceImpl::GetElementType
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeHandleToBasicTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType for the referent of an
// E_T_BYREF or E_T_PTR or for the element type of an E_T_ARRAY or E_T_SZARRAY
//
// Arguments:
// input: typeHandle - type handle for the address type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the referent type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::TypeHandleToBasicTypeInfo(TypeHandle typeHandle,
DebuggerIPCE_BasicTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
pTypeInfo->elementType = GetElementType(typeHandle);
switch (pTypeInfo->elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
case ELEMENT_TYPE_FNPTR:
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
pTypeInfo->vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
pTypeInfo->metadataToken = mdTokenNil;
pTypeInfo->vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
{
Module * pModule = typeHandle.GetModule();
if (typeHandle.HasInstantiation()) // only set if instantiated
{
pTypeInfo->vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
else
{
pTypeInfo->vmTypeHandle = VMPTR_TypeHandle::NullPtr();
}
pTypeInfo->metadataToken = typeHandle.GetCl();
_ASSERTE(pModule);
pTypeInfo->vmModule.SetDacTargetPtr(PTR_HOST_TO_TADDR(pModule));
if (pAppDomain)
{
pTypeInfo->vmDomainAssembly.SetDacTargetPtr(PTR_HOST_TO_TADDR(pModule->GetDomainAssembly()));
}
else
{
pTypeInfo->vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
}
break;
}
default:
pTypeInfo->vmTypeHandle = VMPTR_TypeHandle::NullPtr();
pTypeInfo->metadataToken = mdTokenNil;
pTypeInfo->vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
break;
}
return;
} // DacDbiInterfaceImpl::TypeHandleToBasicTypeInfo
void DacDbiInterfaceImpl::GetObjectExpandedTypeInfoFromID(AreValueTypesBoxed boxed,
VMPTR_AppDomain vmAppDomain,
COR_TYPEID id,
DebuggerIPCE_ExpandedTypeData *pTypeInfo)
{
DD_ENTER_MAY_THROW;
TypeHandleToExpandedTypeInfoImpl(boxed, vmAppDomain, TypeHandle::FromPtr(TO_TADDR(id.token1)), pTypeInfo);
}
void DacDbiInterfaceImpl::GetObjectExpandedTypeInfo(AreValueTypesBoxed boxed,
VMPTR_AppDomain vmAppDomain,
CORDB_ADDRESS addr,
DebuggerIPCE_ExpandedTypeData *pTypeInfo)
{
DD_ENTER_MAY_THROW;
PTR_Object obj(TO_TADDR(addr));
TypeHandleToExpandedTypeInfoImpl(boxed, vmAppDomain, obj->GetGCSafeTypeHandle(), pTypeInfo);
}
// DacDbi API: use a type handle to get the information needed to create the corresponding RS CordbType instance
void DacDbiInterfaceImpl::TypeHandleToExpandedTypeInfo(AreValueTypesBoxed boxed,
VMPTR_AppDomain vmAppDomain,
VMPTR_TypeHandle vmTypeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo)
{
DD_ENTER_MAY_THROW;
TypeHandle typeHandle = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
TypeHandleToExpandedTypeInfoImpl(boxed, vmAppDomain, typeHandle, pTypeInfo);
}
void DacDbiInterfaceImpl::TypeHandleToExpandedTypeInfoImpl(AreValueTypesBoxed boxed,
VMPTR_AppDomain vmAppDomain,
TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo)
{
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
pTypeInfo->elementType = GetElementType(typeHandle);
switch (pTypeInfo->elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
GetArrayTypeInfo(typeHandle, pTypeInfo, pAppDomain);
break;
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
GetPtrTypeInfo(boxed, typeHandle, pTypeInfo, pAppDomain);
break;
case ELEMENT_TYPE_VALUETYPE:
if (boxed == OnlyPrimitivesUnboxed || boxed == AllBoxed)
{
pTypeInfo->elementType = ELEMENT_TYPE_CLASS;
}
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
break;
case ELEMENT_TYPE_CLASS:
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
break;
case ELEMENT_TYPE_FNPTR:
GetFnPtrTypeInfo(boxed, typeHandle, pTypeInfo, pAppDomain);
break;
default:
if (boxed == AllBoxed)
{
pTypeInfo->elementType = ELEMENT_TYPE_CLASS;
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
}
// else the element type is sufficient
break;
}
LOG((LF_CORDB, LL_INFO10000, "D::THTETI: converted left-side type handle to expanded right-side type info, pTypeInfo->ClassTypeData.typeHandle = 0x%08x.\n", pTypeInfo->ClassTypeData.typeHandle.GetRawPtr()));
return;
} // DacDbiInterfaceImpl::TypeHandleToExpandedTypeInfo
// Get type handle for a TypeDef token, if one exists. For generics this returns the open type.
VMPTR_TypeHandle DacDbiInterfaceImpl::GetTypeHandle(VMPTR_Module vmModule,
mdTypeDef metadataToken)
{
DD_ENTER_MAY_THROW;
Module* pModule = vmModule.GetDacPtr();
VMPTR_TypeHandle vmTypeHandle = VMPTR_TypeHandle::NullPtr();
TypeHandle th = ClassLoader::LookupTypeDefOrRefInModule(pModule, metadataToken);
if (th.IsNull())
{
LOG((LF_CORDB, LL_INFO10000, "D::GTH: class isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
vmTypeHandle.SetDacTargetPtr(th.AsTAddr());
return vmTypeHandle;
}
// DacDbi API: GetAndSendApproxTypeHandle finds the type handle for the layout of the instance fields of an
// instantiated type if it is available.
VMPTR_TypeHandle DacDbiInterfaceImpl::GetApproxTypeHandle(TypeInfoList * pTypeData)
{
DD_ENTER_MAY_THROW;
LOG((LF_CORDB, LL_INFO10000, "D::GATH: getting info.\n"));
TypeDataWalk walk(&((*pTypeData)[0]), pTypeData->Count());
TypeHandle typeHandle = walk.ReadLoadedTypeHandle(TypeDataWalk::kGetCanonical);
VMPTR_TypeHandle vmTypeHandle = VMPTR_TypeHandle::NullPtr();
vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
if (!typeHandle.IsNull())
{
vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
else
{
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
LOG((LF_CORDB, LL_INFO10000,
"D::GATH: sending result, result = 0x%0x8\n",
typeHandle));
return vmTypeHandle;
} // DacDbiInterfaceImpl::GetApproxTypeHandle
// DacDbiInterface API: Get the exact type handle from type data
HRESULT DacDbiInterfaceImpl::GetExactTypeHandle(DebuggerIPCE_ExpandedTypeData * pTypeData,
ArgInfoList * pArgInfo,
VMPTR_TypeHandle& vmTypeHandle)
{
DD_ENTER_MAY_THROW;
LOG((LF_CORDB, LL_INFO10000, "D::GETH: getting info.\n"));
HRESULT hr = S_OK;
EX_TRY
{
vmTypeHandle = vmTypeHandle.NullPtr();
// convert the type information to a type handle
TypeHandle typeHandle = ExpandedTypeInfoToTypeHandle(pTypeData, pArgInfo);
_ASSERTE(!typeHandle.IsNull());
vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
EX_CATCH_HRESULT(hr);
return hr;
} // DacDbiInterfaceImpl::GetExactTypeHandle
// Retrieve the generic type params for a given MethodDesc. This function is specifically
// for stackwalking because it requires the generic type token on the stack.
void DacDbiInterfaceImpl::GetMethodDescParams(
VMPTR_AppDomain vmAppDomain,
VMPTR_MethodDesc vmMethodDesc,
GENERICS_TYPE_TOKEN genericsToken,
UINT32 * pcGenericClassTypeParams,
TypeParamsList * pGenericTypeParams)
{
DD_ENTER_MAY_THROW;
if (vmAppDomain.IsNull() || vmMethodDesc.IsNull())
{
ThrowHR(E_INVALIDARG);
}
_ASSERTE((pcGenericClassTypeParams != NULL) && (pGenericTypeParams != NULL));
MethodDesc * pMD = vmMethodDesc.GetDacPtr();
// Retrieve the number of type parameters for the class and
// the number of type parameters for the method itself.
// For example, the method Foo<T, U>::Bar<V>() has 2 class type parameters and 1 method type parameters.
UINT32 cGenericClassTypeParams = pMD->GetNumGenericClassArgs();
UINT32 cGenericMethodTypeParams = pMD->GetNumGenericMethodArgs();
UINT32 cTotalGenericTypeParams = cGenericClassTypeParams + cGenericMethodTypeParams;
// Set the out parameter.
*pcGenericClassTypeParams = cGenericClassTypeParams;
TypeHandle thSpecificClass;
MethodDesc * pSpecificMethod;
// Try to retrieve a more specific MethodDesc and TypeHandle via the generics type token.
// The generics token is not always guaranteed to be available.
// For example, it may be unavailable in prologs and epilogs.
// In dumps, not available can also mean a thrown exception for missing memory.
BOOL fExact = FALSE;
ALLOW_DATATARGET_MISSING_MEMORY(
fExact = Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation(
pMD,
PTR_VOID((TADDR)genericsToken),
&thSpecificClass,
&pSpecificMethod);
);
if (!fExact ||
!thSpecificClass.GetMethodTable()->SanityCheck() ||
!pSpecificMethod->GetMethodTable()->SanityCheck())
{
// Use the canonical MethodTable and MethodDesc if the exact generics token is not available.
thSpecificClass = TypeHandle(pMD->GetMethodTable());
pSpecificMethod = pMD;
}
// Retrieve the array of class type parameters and the array of method type parameters.
Instantiation classInst = pSpecificMethod->GetExactClassInstantiation(thSpecificClass);
Instantiation methodInst = pSpecificMethod->GetMethodInstantiation();
_ASSERTE((classInst.IsEmpty()) == (cGenericClassTypeParams == 0));
_ASSERTE((methodInst.IsEmpty()) == (cGenericMethodTypeParams == 0));
// allocate memory for the return array
pGenericTypeParams->Alloc(cTotalGenericTypeParams);
for (UINT32 i = 0; i < cTotalGenericTypeParams; i++)
{
// Retrieve the current type parameter depending on the index.
TypeHandle thCurrent;
if (i < cGenericClassTypeParams)
{
thCurrent = classInst[i];
}
else
{
thCurrent = methodInst[i - cGenericClassTypeParams];
}
// There is the possiblity that we'll get this far with a dump and not fail, but still
// not be able to get full info for a particular param.
EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY_WITH_HANDLER
{
// Fill in the struct using the TypeHandle of the current type parameter if we can.
VMPTR_TypeHandle vmTypeHandle = VMPTR_TypeHandle::NullPtr();
vmTypeHandle.SetDacTargetPtr(thCurrent.AsTAddr());
TypeHandleToExpandedTypeInfo(NoValueTypeBoxing,
vmAppDomain,
vmTypeHandle,
&((*pGenericTypeParams)[i]));
}
EX_CATCH_ALLOW_DATATARGET_MISSING_MEMORY_WITH_HANDLER
{
// On failure for a particular type, default it back to System.__Canon.
VMPTR_TypeHandle vmTHCanon = VMPTR_TypeHandle::NullPtr();
TypeHandle thCanon = TypeHandle(g_pCanonMethodTableClass);
vmTHCanon.SetDacTargetPtr(thCanon.AsTAddr());
TypeHandleToExpandedTypeInfo(NoValueTypeBoxing,
vmAppDomain,
vmTHCanon,
&((*pGenericTypeParams)[i]));
}
EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY_WITH_HANDLER
}
}
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetClassOrValueTypeHandle
// get a typehandle for a class or valuetype from basic type data (metadata token
// and domain file).
// Arguments:
// input: pData - contains the metadata token and domain file
// Return value: the type handle for the corresponding type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetClassOrValueTypeHandle(DebuggerIPCE_BasicTypeData * pData)
{
TypeHandle typeHandle;
// if we already have a type handle, just return it
if (!pData->vmTypeHandle.IsNull())
{
typeHandle = TypeHandle::FromPtr(pData->vmTypeHandle.GetDacPtr());
}
// otherwise, have the loader look it up using the metadata token and domain file
else
{
DomainAssembly * pDomainAssembly = pData->vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
typeHandle = ClassLoader::LookupTypeDefOrRefInModule(pModule, pData->metadataToken);
if (typeHandle.IsNull())
{
LOG((LF_CORDB, LL_INFO10000, "D::BTITTH: class isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
_ASSERTE(typeHandle.GetNumGenericArgs() == 0);
}
return typeHandle;
} // DacDbiInterfaceImpl::GetClassOrValueTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetExactArrayTypeHandle
// get an exact type handle for an array type
// Arguments:
// input: pTopLevelTypeData - type information for a top-level array type
// pArgInfo - contains the following information:
// m_genericArgsCount - number of generic parameters for the element type--this should be 1
// m_pGenericArgs - pointer to the generic parameter for the element type--this is
// effectively a one-element list. These are the actual parameters
// Return Value: the exact type handle for the type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetExactArrayTypeHandle(DebuggerIPCE_ExpandedTypeData * pTopLevelTypeData,
ArgInfoList * pArgInfo)
{
TypeHandle typeArg;
_ASSERTE(pArgInfo->Count() == 1);
// get the type handle for the element type
typeArg = BasicTypeInfoToTypeHandle(&((*pArgInfo)[0]));
// get the exact type handle for the array type
return FindLoadedArrayType(pTopLevelTypeData->elementType,
typeArg,
pTopLevelTypeData->ArrayTypeData.arrayRank);
} // DacDbiInterfaceImpl::GetExactArrayTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetExactPtrOrByRefTypeHandle
// get an exact type handle for a PTR or BYREF type
// Arguments:
// input: pTopLevelTypeData - type information for the PTR or BYREF type
// pArgInfo - contains the following information:
// m_genericArgsCount - number of generic parameters for the element type--this should be 1
// m_pGenericArgs - pointer to the generic parameter for the element type--this is
// effectively a one-element list. These are the actual parameters
// Return Value: the exact type handle for the type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetExactPtrOrByRefTypeHandle(DebuggerIPCE_ExpandedTypeData * pTopLevelTypeData,
ArgInfoList * pArgInfo)
{
TypeHandle typeArg;
_ASSERTE(pArgInfo->Count() == 1);
// get the type handle for the referent
typeArg = BasicTypeInfoToTypeHandle(&((*pArgInfo)[0]));
// get the exact type handle for the PTR or BYREF type
return FindLoadedPointerOrByrefType(pTopLevelTypeData->elementType, typeArg);
} // DacDbiInterfaceImpl::GetExactPtrOrByRefTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetExactClassTypeHandle
// get an exact type handle for a CLASS or VALUETYPE type
// Arguments:
// input: pTopLevelTypeData - type information for the CLASS or VALUETYPE type
// pArgInfo - contains the following information:
// m_genericArgsCount - number of generic parameters for the class
// m_pGenericArgs - list of generic parameters for the class--these
// are the actual parameters
// Return Value: the exact type handle for the type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetExactClassTypeHandle(DebuggerIPCE_ExpandedTypeData * pTopLevelTypeData,
ArgInfoList * pArgInfo)
{
Module * pModule = pTopLevelTypeData->ClassTypeData.vmModule.GetDacPtr();
int argCount = pArgInfo->Count();
TypeHandle typeConstructor =
ClassLoader::LookupTypeDefOrRefInModule(pModule, pTopLevelTypeData->ClassTypeData.metadataToken);
// If we can't find the class, throw the appropriate HR. Note: if the class is not a value class and
// the class is also not restored, then we must pretend that the class is still not loaded. We are gonna let
// unrestored value classes slide, though, and special case access to the class's parent below.
if (typeConstructor.IsNull())
{
LOG((LF_CORDB, LL_INFO10000, "D::ETITTH: class isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
// if there are no generic parameters, we already have the correct type handle
if (argCount == 0)
{
return typeConstructor;
}
// we have generic parameters--first validate we have a number consistent with the list
// of parameters we received
if ((unsigned int)argCount != typeConstructor.GetNumGenericArgs())
{
LOG((LF_CORDB, LL_INFO10000,
"D::ETITTH: wrong number of type parameters, %d given, %d expected\n",
argCount, typeConstructor.GetNumGenericArgs()));
_ASSERTE((unsigned int)argCount == typeConstructor.GetNumGenericArgs());
ThrowHR(E_FAIL);
}
// now we allocate a list to store the type handles for each parameter
S_UINT32 allocSize = S_UINT32(argCount) * S_UINT32(sizeof(TypeHandle));
if (allocSize.IsOverflow())
{
ThrowHR(E_OUTOFMEMORY);
}
NewArrayHolder<TypeHandle> pInst(new TypeHandle[allocSize.Value()]);
// convert the type information for each parameter to its corresponding type handle
// and store it in the list
for (unsigned int i = 0; i < (unsigned int)argCount; i++)
{
pInst[i] = BasicTypeInfoToTypeHandle(&((*pArgInfo)[i]));
}
// Finally, we find the type handle corresponding to this particular instantiation
return FindLoadedInstantiation(typeConstructor.GetModule(),
typeConstructor.GetCl(),
argCount,
pInst);
} // DacDbiInterfaceImpl::GetExactClassTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetExactFnPtrTypeHandle
// get an exact type handle for a FNPTR type
// Arguments:
// input: pArgInfo - Contains the following information:
// m_genericArgsCount - number of generic parameters for the referent
// m_pGenericArgs - list of generic parameters for the referent--these
// are the actual parameters for the function signature
// Return Value: the exact type handle for the type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetExactFnPtrTypeHandle(ArgInfoList * pArgInfo)
{
// allocate a list to store the type handles for each parameter
S_UINT32 allocSize = S_UINT32(pArgInfo->Count()) * S_UINT32(sizeof(TypeHandle));
if( allocSize.IsOverflow() )
{
ThrowHR(E_OUTOFMEMORY);
}
NewArrayHolder<TypeHandle> pInst(new TypeHandle[allocSize.Value()]);
// convert the type information for each parameter to its corresponding type handle
// and store it in the list
for (unsigned int i = 0; i < pArgInfo->Count(); i++)
{
pInst[i] = BasicTypeInfoToTypeHandle(&((*pArgInfo)[i]));
}
// find the type handle corresponding to this particular FNPTR
return FindLoadedFnptrType(pArgInfo->Count(), pInst);
} // DacDbiInterfaceImpl::GetExactFnPtrTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::BasicTypeInfoToTypeHandle
// Convert basic type info for a type parameter that came from a top-level type to
// the corresponding type handle. If the type parameter is an array or pointer
// type, we simply extract the LS type handle from the VMPTR_TypeHandle that is
// part of the type information. If the type parameter is a class or value type,
// we use the metadata token and domain file in the type info to look up the
// appropriate type handle. If the type parameter is any other types, we get the
// type handle by having the loader look up the type handle for the element type.
// Arguments:
// input: pArgTypeData - basic type information for the type.
// Return Value: the type handle for the type.
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::BasicTypeInfoToTypeHandle(DebuggerIPCE_BasicTypeData * pArgTypeData)
{
LOG((LF_CORDB, LL_INFO10000,
"D::BTITTH: expanding basic right-side type to left-side type, ELEMENT_TYPE: %d.\n",
pArgTypeData->elementType));
TypeHandle typeHandle = TypeHandle();
switch (pArgTypeData->elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
case ELEMENT_TYPE_FNPTR:
_ASSERTE(!pArgTypeData->vmTypeHandle.IsNull());
typeHandle = TypeHandle::FromPtr(pArgTypeData->vmTypeHandle.GetDacPtr());
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
typeHandle = GetClassOrValueTypeHandle(pArgTypeData);
break;
default:
typeHandle = FindLoadedElementType(pArgTypeData->elementType);
break;
}
if (typeHandle.IsNull())
{
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
return typeHandle;
} // DacDbiInterfaceImpl::BasicTypeInfoToTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::ExpandedTypeInfoToTypeHandle
// Convert type information for a top-level type to an exact type handle. This
// information includes information about the element type if the top-level type is
// an array type, the referent if the top-level type is a pointer type, or actual
// parameters if the top-level type is a generic class or value type.
// Arguments:
// input: pTopLevelTypeData - type information for the top-level type
// pArgInfo - contains the following information:
// m_genericArtsCount - number of parameters
// m_pGenericArgs - list of actual parameters
// Return Value: the exact type handle corresponding to the type represented by
// pTopLevelTypeData
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::ExpandedTypeInfoToTypeHandle(DebuggerIPCE_ExpandedTypeData * pTopLevelTypeData,
ArgInfoList * pArgInfo)
{
WRAPPER_NO_CONTRACT;
LOG((LF_CORDB, LL_INFO10000,
"D::ETITTH: expanding right-side type to left-side type, ELEMENT_TYPE: %d.\n",
pData->elementType));
TypeHandle typeHandle = TypeHandle();
// depending on the top-level type, get the type handle incorporating information about any type arguments
switch (pTopLevelTypeData->elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
typeHandle = GetExactArrayTypeHandle(pTopLevelTypeData, pArgInfo);
break;
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
typeHandle = GetExactPtrOrByRefTypeHandle(pTopLevelTypeData, pArgInfo);
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
typeHandle = GetExactClassTypeHandle(pTopLevelTypeData, pArgInfo);
break;
case ELEMENT_TYPE_FNPTR:
typeHandle = GetExactFnPtrTypeHandle(pArgInfo);
break;
default:
typeHandle = FindLoadedElementType(pTopLevelTypeData->elementType);
break;
} // end switch (pData->elementType)
if (typeHandle.IsNull())
{
// This may fail because there are cases when a type can be used (and so visible to the
// debugger), but not yet loaded to the point of being available in the EETypeHashTable.
// For example, generic value types (without explicit constructors) may not need their
// exact instantiation type to be loaded in order to be used as a field of an object
// created on the heap
LOG((LF_CORDB, LL_INFO10000, "D::ETITTH: type isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
return typeHandle;
} // DacDbiInterfaceImpl::ExpandedTypeInfoToTypeHandle
// ----------------------------------------------------------------------------
// DacDbi API: GetThreadStaticAddress
// Get the target field address of a thread local static.
//
// Notes:
// The address is constant and could be cached.
//
// This can commonly fail, in which case, it will return NULL.
// ----------------------------------------------------------------------------
CORDB_ADDRESS DacDbiInterfaceImpl::GetThreadStaticAddress(VMPTR_FieldDesc vmField,
VMPTR_Thread vmRuntimeThread)
{
DD_ENTER_MAY_THROW;
Thread * pRuntimeThread = vmRuntimeThread.GetDacPtr();
PTR_FieldDesc pFieldDesc = vmField.GetDacPtr();
TADDR fieldAddress = NULL;
_ASSERTE(pRuntimeThread != NULL);
// Find out whether the field is thread local and get its address.
if (pFieldDesc->IsThreadStatic())
{
fieldAddress = pRuntimeThread->GetStaticFieldAddrNoCreate(pFieldDesc);
}
else
{
// In case we have more special cases added later, this will allow us to notice the need to
// update this function.
ThrowHR(E_NOTIMPL);
}
return fieldAddress;
} // DacDbiInterfaceImpl::GetThreadStaticAddress
// Get the target field address of a collectible types static.
CORDB_ADDRESS DacDbiInterfaceImpl::GetCollectibleTypeStaticAddress(VMPTR_FieldDesc vmField,
VMPTR_AppDomain vmAppDomain)
{
DD_ENTER_MAY_THROW;
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
PTR_FieldDesc pFieldDesc = vmField.GetDacPtr();
_ASSERTE(pAppDomain != NULL);
//
// Verify this field is of the right type
//
if(!pFieldDesc->IsStatic() ||
pFieldDesc->IsSpecialStatic())
{
_ASSERTE(!"BUG: Unsupported static field type for collectible types");
}
//
// Check that the data is available
//
/* TODO: Ideally we should be checking if the class is allocated first, however
we don't appear to be doing this even for non-collectible statics and
we have never seen an issue.
*/
//
// Get the address
//
PTR_VOID base = pFieldDesc->GetBase();
if (base == PTR_NULL)
{
return PTR_HOST_TO_TADDR(NULL);
}
//
// Store the result and return
//
PTR_VOID addr = pFieldDesc->GetStaticAddressHandle(base);
return PTR_TO_TADDR(addr);
} // DacDbiInterfaceImpl::GetCollectibleTypeStaticAddress
// DacDbi API: GetTypeHandleParams
// - gets the necessary data for a type handle, i.e. its type parameters, e.g. "String" and "List<int>" from the type handle
// for "Dict<String,List<int>>", and sends it back to the right side.
// - pParams is allocated and initialized by this function
// - This should not fail except for OOM
void DacDbiInterfaceImpl::GetTypeHandleParams(VMPTR_AppDomain vmAppDomain,
VMPTR_TypeHandle vmTypeHandle,
TypeParamsList * pParams)
{
DD_ENTER_MAY_THROW
TypeHandle typeHandle = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
LOG((LF_CORDB, LL_INFO10000, "D::GTHP: getting type parameters for 0x%08x 0x%0x8.\n",
vmAppDomain.GetDacPtr(), typeHandle.AsPtr()));
// Find the class given its type handle.
_ASSERTE(pParams->IsEmpty());
pParams->Alloc(typeHandle.GetNumGenericArgs());
// collect type information for each type parameter
for (unsigned int i = 0; i < pParams->Count(); ++i)
{
VMPTR_TypeHandle thInst = VMPTR_TypeHandle::NullPtr();
thInst.SetDacTargetPtr(typeHandle.GetInstantiation()[i].AsTAddr());
TypeHandleToExpandedTypeInfo(NoValueTypeBoxing,
vmAppDomain,
thInst,
&((*pParams)[i]));
}
LOG((LF_CORDB, LL_INFO10000, "D::GTHP: sending result"));
} // DacDbiInterfaceImpl::GetTypeHandleParams
//-----------------------------------------------------------------------------
// DacDbi API: GetSimpleType
// gets the metadata token and domain file corresponding to a simple type
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetSimpleType(VMPTR_AppDomain vmAppDomain,
CorElementType simpleType,
mdTypeDef *pMetadataToken,
VMPTR_Module *pVmModule,
VMPTR_DomainAssembly *pVmDomainAssembly)
{
DD_ENTER_MAY_THROW;
AppDomain *pAppDomain = vmAppDomain.GetDacPtr();
// if we fail to get either a valid type handle or module, we will want to send back
// a NULL domain file too, so we'll to preinitialize this here.
_ASSERTE(pVmDomainAssembly != NULL);
*pVmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
// FindLoadedElementType will return NULL if the type hasn't been loaded yet.
TypeHandle typeHandle = FindLoadedElementType(simpleType);
if (typeHandle.IsNull())
{
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
else
{
_ASSERTE(pMetadataToken != NULL);
*pMetadataToken = typeHandle.GetCl();
Module * pModule = typeHandle.GetModule();
if (pModule == NULL)
ThrowHR(CORDBG_E_TARGET_INCONSISTENT);
pVmModule->SetHostPtr(pModule);
if (pAppDomain)
{
pVmDomainAssembly->SetHostPtr(pModule->GetDomainAssembly());
if (pVmDomainAssembly->IsNull())
ThrowHR(CORDBG_E_TARGET_INCONSISTENT);
}
}
LOG((LF_CORDB, LL_INFO10000, "D::STI: sending result.\n"));
} // DacDbiInterfaceImpl::GetSimpleType
BOOL DacDbiInterfaceImpl::IsExceptionObject(VMPTR_Object vmObject)
{
DD_ENTER_MAY_THROW;
Object* objPtr = vmObject.GetDacPtr();
MethodTable* pMT = objPtr->GetMethodTable();
return IsExceptionObject(pMT);
}
BOOL DacDbiInterfaceImpl::IsExceptionObject(MethodTable* pMT)
{
PTR_MethodTable pExMT = g_pExceptionClass;
TADDR targetMT = dac_cast<TADDR>(pMT);
TADDR exceptionMT = dac_cast<TADDR>(pExMT);
do
{
if (targetMT == exceptionMT)
return TRUE;
pMT = pMT->GetParentMethodTable();
targetMT = dac_cast<TADDR>(pMT);
} while (pMT);
return FALSE;
}
HRESULT DacDbiInterfaceImpl::GetMethodDescPtrFromIpEx(TADDR funcIp, VMPTR_MethodDesc* ppMD)
{
DD_ENTER_MAY_THROW;
// The fast path is check if the code is jitted and the code manager has it available.
CLRDATA_ADDRESS mdAddr;
HRESULT hr = g_dacImpl->GetMethodDescPtrFromIP(TO_CDADDR(funcIp), &mdAddr);
if (S_OK == hr)
{
ppMD->SetDacTargetPtr(CLRDATA_ADDRESS_TO_TADDR(mdAddr));
return hr;
}
// Otherwise try to see if a method desc is available for the method that isn't jitted by walking the code stubs.
MethodDesc* pMD = MethodTable::GetMethodDescForSlotAddress(PINSTRToPCODE(funcIp));
if (pMD == NULL)
return E_INVALIDARG;
ppMD->SetDacTargetPtr(PTR_HOST_TO_TADDR(pMD));
return S_OK;
}
BOOL DacDbiInterfaceImpl::IsDelegate(VMPTR_Object vmObject)
{
DD_ENTER_MAY_THROW;
if (vmObject.IsNull())
return FALSE;
Object *pObj = vmObject.GetDacPtr();
return pObj->GetGCSafeMethodTable()->IsDelegate();
}
//-----------------------------------------------------------------------------
// DacDbi API: GetDelegateType
// Given a delegate pointer, compute the type of delegate according to the data held in it.
//-----------------------------------------------------------------------------
HRESULT DacDbiInterfaceImpl::GetDelegateType(VMPTR_Object delegateObject, DelegateType *delegateType)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!delegateObject.IsNull());
_ASSERTE(delegateType != NULL);
#ifdef _DEBUG
// ensure we have a Delegate object
IsDelegate(delegateObject);
#endif
// Ideally, we would share the implementation of this method with the runtime, or get the same information
// we are getting from here from other EE methods. Nonetheless, currently the implementation is sharded across
// several pieces of logic so this replicates the logic mostly due to time constraints. The Mainly from:
// - System.Private.CoreLib!System.Delegate.GetMethodImpl and System.Private.CoreLib!System.MulticastDelegate.GetMethodImpl
// - System.Private.CoreLib!System.Delegate.GetTarget and System.Private.CoreLib!System.MulticastDelegate.GetTarget
// - coreclr!COMDelegate::GetMethodDesc and coreclr!COMDelegate::FindMethodHandle
// - coreclr!COMDelegate::DelegateConstruct and the delegate type table in
// - DELEGATE KINDS TABLE in comdelegate.cpp
*delegateType = DelegateType::kUnknownDelegateType;
PTR_DelegateObject pDelObj = dac_cast<PTR_DelegateObject>(delegateObject.GetDacPtr());
INT_PTR invocationCount = pDelObj->GetInvocationCount();
if (invocationCount == -1)
{
// We could get a native code for this case from _methodPtr, but not a methodDef as we'll need.
// We can also get the shuffling thunk. However, this doesn't have a token and there's
// no easy way to expose through the DBI now.
*delegateType = kUnmanagedFunctionDelegate;
return S_OK;
}
PTR_Object pInvocationList = OBJECTREFToObject(pDelObj->GetInvocationList());
if (invocationCount == NULL)
{
if (pInvocationList == NULL)
{
// If this delegate points to a static function or this is a open virtual delegate, this should be non-null
// Special case: This might fail in a VSD delegate (instance open virtual)...
// TODO: There is the special signatures cases missing.
TADDR targetMethodPtr = PCODEToPINSTR(pDelObj->GetMethodPtrAux());
if (targetMethodPtr == NULL)
{
// Static extension methods, other closed static delegates, and instance delegates fall into this category.
*delegateType = kClosedDelegate;
}
else {
*delegateType = kOpenDelegate;
}
return S_OK;
}
}
else
{
if (pInvocationList != NULL)
{
PTR_MethodTable invocationListMT = pInvocationList->GetGCSafeMethodTable();
if (invocationListMT->IsArray())
*delegateType = kTrueMulticastDelegate;
if (invocationListMT->IsDelegate())
*delegateType = kWrapperDelegate;
// Cases missing: Loader allocator, or dynamic resolver.
return S_OK;
}
// According to the table in comdelegates.cpp, there shouldn't be a case where .
// Multicast falls outside of the table, so not
}
_ASSERT(FALSE);
*delegateType = kUnknownDelegateType;
return CORDBG_E_UNSUPPORTED_DELEGATE;
}
HRESULT DacDbiInterfaceImpl::GetDelegateFunctionData(
DelegateType delegateType,
VMPTR_Object delegateObject,
OUT VMPTR_DomainAssembly *ppFunctionDomainAssembly,
OUT mdMethodDef *pMethodDef)
{
DD_ENTER_MAY_THROW;
#ifdef _DEBUG
// ensure we have a Delegate object
IsDelegate(delegateObject);
#endif
HRESULT hr = S_OK;
PTR_DelegateObject pDelObj = dac_cast<PTR_DelegateObject>(delegateObject.GetDacPtr());
TADDR targetMethodPtr = NULL;
VMPTR_MethodDesc pMD;
switch (delegateType)
{
case kClosedDelegate:
targetMethodPtr = PCODEToPINSTR(pDelObj->GetMethodPtr());
break;
case kOpenDelegate:
targetMethodPtr = PCODEToPINSTR(pDelObj->GetMethodPtrAux());
break;
default:
return E_FAIL;
}
hr = GetMethodDescPtrFromIpEx(targetMethodPtr, &pMD);
if (hr != S_OK)
return hr;
ppFunctionDomainAssembly->SetDacTargetPtr(dac_cast<TADDR>(pMD.GetDacPtr()->GetModule()->GetDomainAssembly()));
*pMethodDef = pMD.GetDacPtr()->GetMemberDef();
return hr;
}
HRESULT DacDbiInterfaceImpl::GetDelegateTargetObject(
DelegateType delegateType,
VMPTR_Object delegateObject,
OUT VMPTR_Object *ppTargetObj,
OUT VMPTR_AppDomain *ppTargetAppDomain)
{
DD_ENTER_MAY_THROW;
#ifdef _DEBUG
// ensure we have a Delegate object
IsDelegate(delegateObject);
#endif
HRESULT hr = S_OK;
PTR_DelegateObject pDelObj = dac_cast<PTR_DelegateObject>(delegateObject.GetDacPtr());
switch (delegateType)
{
case kClosedDelegate:
{
PTR_Object pRemoteTargetObj = OBJECTREFToObject(pDelObj->GetTarget());
ppTargetObj->SetDacTargetPtr(pRemoteTargetObj.GetAddr());
ppTargetAppDomain->SetDacTargetPtr(dac_cast<TADDR>(pRemoteTargetObj->GetGCSafeMethodTable()->GetDomain()->AsAppDomain()));
break;
}
default:
ppTargetObj->SetDacTargetPtr(NULL);
ppTargetAppDomain->SetDacTargetPtr(dac_cast<TADDR>(pDelObj->GetGCSafeMethodTable()->GetDomain()->AsAppDomain()));
break;
}
return hr;
}
static bool TrackMemoryRangeHelper(PTR_VOID pvArgs, PTR_VOID pvAllocationBase, SIZE_T cbReserved)
{
// The pvArgs is really pointing to a debugger-side container. Sadly the callback only takes a PTR_VOID.
CQuickArrayList<COR_MEMORY_RANGE> *rangeCollection =
(CQuickArrayList<COR_MEMORY_RANGE>*)(dac_cast<TADDR>(pvArgs));
TADDR rangeStart = dac_cast<TADDR>(pvAllocationBase);
TADDR rangeEnd = rangeStart + cbReserved;
rangeCollection->Push({rangeStart, rangeEnd});
// This is a tracking function, not a search callback. Pretend we never found what we were looking for
// to get all possible ranges.
return false;
}
void DacDbiInterfaceImpl::EnumerateMemRangesForLoaderAllocator(PTR_LoaderAllocator pLoaderAllocator, CQuickArrayList<COR_MEMORY_RANGE> *rangeAcummulator)
{
CQuickArrayList<PTR_LoaderHeap> heapsToEnumerate;
// We always expect to see these three heaps
_ASSERTE(pLoaderAllocator->GetLowFrequencyHeap() != NULL);
heapsToEnumerate.Push(pLoaderAllocator->GetLowFrequencyHeap());
_ASSERTE(pLoaderAllocator->GetHighFrequencyHeap() != NULL);
heapsToEnumerate.Push(pLoaderAllocator->GetHighFrequencyHeap());
_ASSERTE(pLoaderAllocator->GetStubHeap() != NULL);
heapsToEnumerate.Push(pLoaderAllocator->GetStubHeap());
// GetVirtualCallStubManager returns VirtualCallStubManager*, but it's really an address to target as
// pLoaderAllocator is DACized. Cast it so we don't try to to a Host to Target translation.
VirtualCallStubManager *pVcsMgr = PTR_VirtualCallStubManager(TO_TADDR(pLoaderAllocator->GetVirtualCallStubManager()));
LOG((LF_CORDB, LL_INFO10000, "DDBII::EMRFLA: VirtualCallStubManager 0x%x\n", PTR_HOST_TO_TADDR(pVcsMgr)));
if (pVcsMgr)
{
if (pVcsMgr->indcell_heap != NULL) heapsToEnumerate.Push(pVcsMgr->indcell_heap);
if (pVcsMgr->lookup_heap != NULL) heapsToEnumerate.Push(pVcsMgr->lookup_heap);
if (pVcsMgr->resolve_heap != NULL) heapsToEnumerate.Push(pVcsMgr->resolve_heap);
if (pVcsMgr->dispatch_heap != NULL) heapsToEnumerate.Push(pVcsMgr->dispatch_heap);
if (pVcsMgr->cache_entry_heap != NULL) heapsToEnumerate.Push(pVcsMgr->cache_entry_heap);
}
TADDR rangeAccumAsTaddr = TO_TADDR(rangeAcummulator);
for (uint32_t i = 0; i < (uint32_t)heapsToEnumerate.Size(); i++)
{
LOG((LF_CORDB, LL_INFO10000, "DDBII::EMRFLA: LoaderHeap 0x%x\n", heapsToEnumerate[i].GetAddr()));
heapsToEnumerate[i]->EnumPageRegions(TrackMemoryRangeHelper, rangeAccumAsTaddr);
}
}
void DacDbiInterfaceImpl::EnumerateMemRangesForJitCodeHeaps(CQuickArrayList<COR_MEMORY_RANGE> *rangeAcummulator)
{
// We should always have a valid EEJitManager with at least one code heap.
EEJitManager *pEM = ExecutionManager::GetEEJitManager();
_ASSERTE(pEM != NULL && pEM->m_pCodeHeap.IsValid());
PTR_HeapList pHeapList = pEM->m_pCodeHeap;
while (pHeapList != NULL)
{
CodeHeap *pHeap = pHeapList->pHeap;
DacpJitCodeHeapInfo jitCodeHeapInfo = DACGetHeapInfoForCodeHeap(pHeap);
switch (jitCodeHeapInfo.codeHeapType)
{
case CODEHEAP_LOADER:
{
TADDR targetLoaderHeap = CLRDATA_ADDRESS_TO_TADDR(jitCodeHeapInfo.LoaderHeap);
LOG((LF_CORDB, LL_INFO10000,
"DDBII::EMRFJCH: LoaderCodeHeap 0x%x with LoaderHeap at 0x%x\n",
PTR_HOST_TO_TADDR(pHeap), targetLoaderHeap));
PTR_ExplicitControlLoaderHeap pLoaderHeap = PTR_ExplicitControlLoaderHeap(targetLoaderHeap);
pLoaderHeap->EnumPageRegions(TrackMemoryRangeHelper, TO_TADDR(rangeAcummulator));
break;
}
case CODEHEAP_HOST:
{
LOG((LF_CORDB, LL_INFO10000,
"DDBII::EMRFJCH: HostCodeHeap 0x%x\n",
PTR_HOST_TO_TADDR(pHeap)));
rangeAcummulator->Push({
CLRDATA_ADDRESS_TO_TADDR(jitCodeHeapInfo.HostData.baseAddr),
CLRDATA_ADDRESS_TO_TADDR(jitCodeHeapInfo.HostData.currentAddr)
});
break;
}
default:
{
LOG((LF_CORDB, LL_INFO10000, "DDBII::EMRFJCH: unknown heap type at 0x%x\n\n", pHeap));
_ASSERTE("Unknown heap type enumerating code ranges.");
break;
}
}
pHeapList = pHeapList->GetNext();
}
}
HRESULT DacDbiInterfaceImpl::GetLoaderHeapMemoryRanges(DacDbiArrayList<COR_MEMORY_RANGE> *pRanges)
{
LOG((LF_CORDB, LL_INFO10000, "DDBII::GLHMR\n"));
DD_ENTER_MAY_THROW;
HRESULT hr = S_OK;
EX_TRY
{
CQuickArrayList<COR_MEMORY_RANGE> memoryRanges;
// Anything that's loaded in the SystemDomain or into the main AppDomain's default context in .NET Core
// and after uses only one global allocator. Enumerating that one is enough for most purposes.
// This doesn't consider any uses of AssemblyLoadingContexts (Unloadable or not). Each context has
// it's own LoaderAllocator, but there's no easy way of getting a hand at them other than going through
// the heap, getting a managed LoaderAllocators, from there getting a Scout, and from there getting a native
// pointer to the LoaderAllocator tos enumerate.
PTR_LoaderAllocator pGlobalAllocator = SystemDomain::System()->GetLoaderAllocator();
_ASSERTE(pGlobalAllocator);
EnumerateMemRangesForLoaderAllocator(pGlobalAllocator, &memoryRanges);
EnumerateMemRangesForJitCodeHeaps(&memoryRanges);
// This code doesn't enumerate module thunk heaps to support IJW.
// It's a fairly rare scenario and requires to enumerate all modules.
// The return for such added time is minimal.
_ASSERTE(memoryRanges.Size() < INT_MAX);
pRanges->Init(memoryRanges.Ptr(), (UINT) memoryRanges.Size());
}
EX_CATCH_HRESULT(hr);
return hr;
}
void DacDbiInterfaceImpl::GetStackFramesFromException(VMPTR_Object vmObject, DacDbiArrayList<DacExceptionCallStackData>& dacStackFrames)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = vmObject.GetDacPtr();
#ifdef _DEBUG
// ensure we have an Exception object
MethodTable* pMT = objPtr->GetMethodTable();
_ASSERTE(IsExceptionObject(pMT));
#endif
OBJECTREF objRef = ObjectToOBJECTREF(objPtr);
DebugStackTrace::GetStackFramesData stackFramesData;
stackFramesData.pDomain = NULL;
stackFramesData.skip = 0;
stackFramesData.NumFramesRequested = 0;
DebugStackTrace::GetStackFramesFromException(&objRef, &stackFramesData);
INT32 dacStackFramesLength = stackFramesData.cElements;
if (dacStackFramesLength > 0)
{
dacStackFrames.Alloc(dacStackFramesLength);
for (INT32 index = 0; index < dacStackFramesLength; ++index)
{
DebugStackTrace::DebugStackTraceElement const& currentElement = stackFramesData.pElements[index];
DacExceptionCallStackData& currentFrame = dacStackFrames[index];
Module* pModule = currentElement.pFunc->GetModule();
BaseDomain* pBaseDomain = currentElement.pFunc->GetAssembly()->GetDomain();
AppDomain* pDomain = NULL;
DomainAssembly* pDomainAssembly = NULL;
pDomain = pBaseDomain->AsAppDomain();
_ASSERTE(pDomain != NULL);
pDomainAssembly = pModule->GetDomainAssembly();
_ASSERTE(pDomainAssembly != NULL);
currentFrame.vmAppDomain.SetHostPtr(pDomain);
currentFrame.vmDomainAssembly.SetHostPtr(pDomainAssembly);
currentFrame.ip = currentElement.ip;
currentFrame.methodDef = currentElement.pFunc->GetMemberDef();
currentFrame.isLastForeignExceptionFrame = (currentElement.flags & STEF_LAST_FRAME_FROM_FOREIGN_STACK_TRACE) != 0;
}
}
}
#ifdef FEATURE_COMINTEROP
PTR_RCW GetRcwFromVmptrObject(VMPTR_Object vmObject)
{
PTR_RCW pRCW = NULL;
Object* objPtr = vmObject.GetDacPtr();
PTR_SyncBlock pSyncBlock = NULL;
pSyncBlock = objPtr->PassiveGetSyncBlock();
if (pSyncBlock == NULL)
return pRCW;
PTR_InteropSyncBlockInfo pInfo = NULL;
pInfo = pSyncBlock->GetInteropInfoNoCreate();
if (pInfo == NULL)
return pRCW;
pRCW = dac_cast<PTR_RCW>(pInfo->DacGetRawRCW());
return pRCW;
}
#endif
BOOL DacDbiInterfaceImpl::IsRcw(VMPTR_Object vmObject)
{
#ifdef FEATURE_COMINTEROP
DD_ENTER_MAY_THROW;
return GetRcwFromVmptrObject(vmObject) != NULL;
#else
return FALSE;
#endif // FEATURE_COMINTEROP
}
void DacDbiInterfaceImpl::GetRcwCachedInterfaceTypes(
VMPTR_Object vmObject,
VMPTR_AppDomain vmAppDomain,
BOOL bIInspectableOnly,
DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pDacInterfaces)
{
// Legacy WinRT API.
pDacInterfaces->Alloc(0);
}
void DacDbiInterfaceImpl::GetRcwCachedInterfacePointers(
VMPTR_Object vmObject,
BOOL bIInspectableOnly,
DacDbiArrayList<CORDB_ADDRESS> * pDacItfPtrs)
{
#ifdef FEATURE_COMINTEROP
DD_ENTER_MAY_THROW;
Object* objPtr = vmObject.GetDacPtr();
InlineSArray<TADDR, INTERFACE_ENTRY_CACHE_SIZE> rgUnks;
PTR_RCW pRCW = GetRcwFromVmptrObject(vmObject);
if (pRCW != NULL)
{
pRCW->GetCachedInterfacePointers(bIInspectableOnly, &rgUnks);
pDacItfPtrs->Alloc(rgUnks.GetCount());
for (COUNT_T i = 0; i < rgUnks.GetCount(); ++i)
{
(*pDacItfPtrs)[i] = (CORDB_ADDRESS)(rgUnks[i]);
}
}
else
#endif // FEATURE_COMINTEROP
{
pDacItfPtrs->Alloc(0);
}
}
void DacDbiInterfaceImpl::GetCachedWinRTTypesForIIDs(
VMPTR_AppDomain vmAppDomain,
DacDbiArrayList<GUID> & iids,
OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes)
{
pTypes->Alloc(0);
}
void DacDbiInterfaceImpl::GetCachedWinRTTypes(
VMPTR_AppDomain vmAppDomain,
OUT DacDbiArrayList<GUID> * pGuids,
OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes)
{
pTypes->Alloc(0);
}
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindField
// Finds information for a particular class field
// Arguments:
// input: thApprox - type handle for the type to which the field belongs
// fldToken - metadata token for the field
// Return Value: FieldDesc containing information for the field if found or NULL otherwise
//-----------------------------------------------------------------------------
PTR_FieldDesc DacDbiInterfaceImpl::FindField(TypeHandle thApprox, mdFieldDef fldToken)
{
EncApproxFieldDescIterator fdIterator(thApprox.GetMethodTable(),
ApproxFieldDescIterator::ALL_FIELDS,
FALSE); // don't fixup EnC (we can't, we're stopped)
PTR_FieldDesc pCurrentFD;
while ((pCurrentFD = fdIterator.Next()) != NULL)
{
// We're looking for a specific fieldDesc, see if we got it.
if (pCurrentFD->GetMemberDef() == fldToken)
{
return pCurrentFD;
}
}
// we never found it...
return NULL;
} // DacDbiInterfaceImpl::FindField
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetEnCFieldDesc
// Get the FieldDesc corresponding to a particular EnC field token
// Arguments:
// input: pEnCFieldInfo
// Return Value: pointer to the FieldDesc that corresponds to the EnC field
// Note: this function may throw
//-----------------------------------------------------------------------------
FieldDesc * DacDbiInterfaceImpl::GetEnCFieldDesc(const EnCHangingFieldInfo * pEnCFieldInfo)
{
FieldDesc * pFD = NULL;
DomainAssembly * pDomainAssembly = pEnCFieldInfo->GetObjectTypeData().vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
// get the type handle for the object
TypeHandle typeHandle = ClassLoader::LookupTypeDefOrRefInModule(pModule,
pEnCFieldInfo->GetObjectTypeData().metadataToken);
if (typeHandle == NULL)
{
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
// and find the field desc
pFD = FindField(typeHandle, pEnCFieldInfo->GetFieldToken());
if (pFD == NULL)
{
// FieldDesc is not yet available, so can't get EnC field info
ThrowHR(CORDBG_E_ENC_HANGING_FIELD);
}
return pFD;
} // DacDbiInterfaceImpl::GetEnCFieldDesc
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetPtrToEnCField
// Get the address of a field added with EnC.
// Arguments:
// input: pFD - field desc for the added field
// pEnCFieldInfo - information about the new field
// Return Value: The field address if the field is available (i.e., it has been accessed)
// or NULL otherwise
// Note: this function may throw
//-----------------------------------------------------------------------------
PTR_CBYTE DacDbiInterfaceImpl::GetPtrToEnCField(FieldDesc * pFD, const EnCHangingFieldInfo * pEnCFieldInfo)
{
#ifndef EnC_SUPPORTED
_ASSERTE(!"Trying to get the address of an EnC field where EnC is not supported! ");
return NULL;
#else
PTR_EditAndContinueModule pEnCModule;
DomainAssembly * pDomainAssembly = pEnCFieldInfo->GetObjectTypeData().vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
// make sure we actually have an EditAndContinueModule
_ASSERTE(pModule->IsEditAndContinueCapable());
pEnCModule = dac_cast<PTR_EditAndContinueModule>(pModule);
// we should also have an EnCFieldDesc
_ASSERTE(pFD->IsEnCNew());
EnCFieldDesc * pEnCFieldDesc;
pEnCFieldDesc = dac_cast<PTR_EnCFieldDesc>(pFD);
// If it hasn't been fixed up yet, then we can't return the pointer.
if (pEnCFieldDesc->NeedsFixup())
{
ThrowHR(CORDBG_E_ENC_HANGING_FIELD);
}
// Get a pointer to the field
PTR_CBYTE pORField = NULL;
PTR_Object pObject = pEnCFieldInfo->GetVmObject().GetDacPtr();
pORField = pEnCModule->ResolveField(ObjectToOBJECTREF(pObject),
pEnCFieldDesc);
// The field could be absent because the code hasn't accessed it yet. If so, we're not going to add it
// since we can't allocate anyway.
if (pORField == NULL)
{
ThrowHR(CORDBG_E_ENC_HANGING_FIELD);
}
return pORField;
#endif // EnC_SUPPORTED
} // DacDbiInterfaceImpl::GetPtrToEnCField
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::InitFieldData
// Initialize information about a field added with EnC
// Arguments :
// input:
// pFD - provides information about whether the field is static,
// the metadata token, etc.
// pORField - provides the field address or offset
// pEnCFieldData - provides the offset to the fields of the object
// output: pFieldData - initialized in accordance with the input information
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::InitFieldData(const FieldDesc * pFD,
const PTR_CBYTE pORField,
const EnCHangingFieldInfo * pEnCFieldData,
FieldData * pFieldData)
{
pFieldData->ClearFields();
pFieldData->m_fFldIsStatic = (pFD->IsStatic() != 0);
pFieldData->m_vmFieldDesc.SetHostPtr(pFD);
pFieldData->m_fFldIsTLS = (pFD->IsThreadStatic() == TRUE);
pFieldData->m_fldMetadataToken = pFD->GetMemberDef();
pFieldData->m_fFldIsRVA = (pFD->IsRVA() == TRUE);
pFieldData->m_fFldIsCollectibleStatic = FALSE;
pFieldData->m_fFldStorageAvailable = true;
if (pFieldData->m_fFldIsStatic)
{
//EnC is only supported on regular static fields
_ASSERTE(!pFieldData->m_fFldIsTLS);
_ASSERTE(!pFieldData->m_fFldIsRVA);
// pORField contains the absolute address
pFieldData->SetStaticAddress(PTR_TO_TADDR(pORField));
}
else
{
// fldInstanceOffset is computed to work correctly with GetFieldValue
// which computes:
// addr of pORField = object + pEnCFieldInfo->m_offsetToVars + offsetToFld
pFieldData->SetInstanceOffset(PTR_TO_TADDR(pORField) -
(PTR_TO_TADDR(pEnCFieldData->GetVmObject().GetDacPtr()) +
pEnCFieldData->GetOffsetToVars()));
}
} // DacDbiInterfaceImpl::InitFieldData
// ----------------------------------------------------------------------------
// DacDbi API: GetEnCHangingFieldInfo
// After a class has been loaded, if a field has been added via EnC we'll have to jump through
// some hoops to get at it (it hangs off the sync block or FieldDesc).
//
// GENERICS: TODO: this method will need to be modified if we ever support EnC on
// generic classes.
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetEnCHangingFieldInfo(const EnCHangingFieldInfo * pEnCFieldInfo,
FieldData * pFieldData,
BOOL * pfStatic)
{
DD_ENTER_MAY_THROW;
LOG((LF_CORDB, LL_INFO100000, "DDI::IEnCHFI: Obj:0x%x, objType"
":0x%x, offset:0x%x\n", pEnCFieldInfo->m_pObject, pEnCFieldInfo->m_objectTypeData.elementType,
pEnCFieldInfo->m_offsetToVars));
FieldDesc * pFD = NULL;
PTR_CBYTE pORField = NULL;
pFD = GetEnCFieldDesc(pEnCFieldInfo);
_ASSERTE(pFD->IsEnCNew()); // We shouldn't be here if it wasn't added to an
// already loaded class.
#ifdef EnC_SUPPORTED
pORField = GetPtrToEnCField(pFD, pEnCFieldInfo);
#else
_ASSERTE(!"We shouldn't be here: EnC not supported");
#endif // EnC_SUPPORTED
InitFieldData(pFD, pORField, pEnCFieldInfo, pFieldData);
*pfStatic = (pFD->IsStatic() != 0);
} // DacDbiInterfaceImpl::GetEnCHangingFieldInfo
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
void DacDbiInterfaceImpl::GetAssemblyFromDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, VMPTR_Assembly *vmAssembly)
{
DD_ENTER_MAY_THROW;
_ASSERTE(vmAssembly != NULL);
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
vmAssembly->SetHostPtr(pDomainAssembly->GetAssembly());
}
// Determines whether the runtime security system has assigned full-trust to this assembly.
BOOL DacDbiInterfaceImpl::IsAssemblyFullyTrusted(VMPTR_DomainAssembly vmDomainAssembly)
{
DD_ENTER_MAY_THROW;
return TRUE;
}
// Get the full path and file name to the assembly's manifest module.
BOOL DacDbiInterfaceImpl::GetAssemblyPath(
VMPTR_Assembly vmAssembly,
IStringHolder * pStrFilename)
{
DD_ENTER_MAY_THROW;
// Get the manifest module for this assembly
Assembly * pAssembly = vmAssembly.GetDacPtr();
Module * pManifestModule = pAssembly->GetModule();
// Get the path for the manifest module.
// since we no longer support Win9x, we assume all paths will be in unicode format already
const WCHAR * szPath = pManifestModule->GetPath().DacGetRawUnicode();
HRESULT hrStatus = pStrFilename->AssignCopy(szPath);
IfFailThrow(hrStatus);
if(szPath == NULL || *szPath=='\0')
{
// The asembly has no (and will never have a) file name, but we didn't really fail
return FALSE;
}
return TRUE;
}
// DAC/DBI API
// Get a resolved type def from a type ref. The type ref may come from a module other than the
// referencing module.
void DacDbiInterfaceImpl::ResolveTypeReference(const TypeRefData * pTypeRefInfo,
TypeRefData * pTargetRefInfo)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = pTypeRefInfo->vmDomainAssembly.GetDacPtr();
Module * pReferencingModule = pDomainAssembly->GetModule();
BOOL fSuccess = FALSE;
// Resolve the type ref
// g_pEEInterface->FindLoadedClass is almost what we want, but it isn't guaranteed to work if
// the typeRef was originally loaded from a different assembly. Also, we need to ensure that
// we can resolve even unloaded types in fully loaded assemblies, so APIs such as
// LoadTypeDefOrRefThrowing aren't acceptable.
Module * pTargetModule = NULL;
mdTypeDef targetTypeDef = mdTokenNil;
// The loader won't need to trigger a GC or throw because we've told it not to load anything
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
fSuccess = ClassLoader::ResolveTokenToTypeDefThrowing(pReferencingModule,
pTypeRefInfo->typeToken,
&pTargetModule,
&targetTypeDef,
Loader::SafeLookup //don't load, no locks/allocations
);
if (fSuccess)
{
_ASSERTE(pTargetModule != NULL);
_ASSERTE( TypeFromToken(targetTypeDef) == mdtTypeDef );
AppDomain * pAppDomain = pDomainAssembly->GetAppDomain();
pTargetRefInfo->vmDomainAssembly.SetDacTargetPtr(PTR_HOST_TO_TADDR(pTargetModule->GetDomainAssembly()));
pTargetRefInfo->typeToken = targetTypeDef;
}
else
{
// failed - presumably because the target assembly isn't loaded
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
} // DacDbiInterfaceImpl::ResolveTypeReference
// Get the full path and file name to the module (if any).
BOOL DacDbiInterfaceImpl::GetModulePath(VMPTR_Module vmModule,
IStringHolder * pStrFilename)
{
DD_ENTER_MAY_THROW;
Module * pModule = vmModule.GetDacPtr();
PEAssembly * pPEAssembly = pModule->GetPEAssembly();
if (pPEAssembly != NULL)
{
if( !pPEAssembly->GetPath().IsEmpty() )
{
// Module has an on-disk path
const WCHAR * szPath = pPEAssembly->GetPath().DacGetRawUnicode();
if (szPath == NULL)
{
szPath = pPEAssembly->GetModuleFileNameHint().DacGetRawUnicode();
if (szPath == NULL)
{
goto NoFileName;
}
}
IfFailThrow(pStrFilename->AssignCopy(szPath));
return TRUE;
}
}
NoFileName:
// no filename
IfFailThrow(pStrFilename->AssignCopy(W("")));
return FALSE;
}
// Get the full path and file name to the ngen image for the module (if any).
BOOL DacDbiInterfaceImpl::GetModuleNGenPath(VMPTR_Module vmModule,
IStringHolder * pStrFilename)
{
DD_ENTER_MAY_THROW;
// no ngen filename
IfFailThrow(pStrFilename->AssignCopy(W("")));
return FALSE;
}
// Implementation of IDacDbiInterface::GetModuleSimpleName
void DacDbiInterfaceImpl::GetModuleSimpleName(VMPTR_Module vmModule, IStringHolder * pStrFilename)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pStrFilename != NULL);
Module * pModule = vmModule.GetDacPtr();
LPCUTF8 szNameUtf8 = pModule->GetSimpleName();
SString convert(SString::Utf8, szNameUtf8);
IfFailThrow(pStrFilename->AssignCopy(convert.GetUnicode()));
}
HRESULT DacDbiInterfaceImpl::IsModuleMapped(VMPTR_Module pModule, OUT BOOL *isModuleMapped)
{
LOG((LF_CORDB, LL_INFO10000, "DDBII::IMM - TADDR 0x%x\n", pModule));
DD_ENTER_MAY_THROW;
HRESULT hr = S_FALSE;
PTR_Module pTargetModule = pModule.GetDacPtr();
EX_TRY
{
PTR_PEAssembly pPEAssembly = pTargetModule->GetPEAssembly();
_ASSERTE(pPEAssembly != NULL);
if (pPEAssembly->HasLoadedPEImage())
{
*isModuleMapped = pPEAssembly->GetLoadedLayout()->IsMapped();
hr = S_OK;
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
bool DacDbiInterfaceImpl::MetadataUpdatesApplied()
{
DD_ENTER_MAY_THROW;
#ifdef EnC_SUPPORTED
return g_metadataUpdatesApplied;
#else
return false;
#endif
}
// Helper to intialize a TargetBuffer from a MemoryRange
//
// Arguments:
// memoryRange - memory range.
// pTargetBuffer - required out parameter to be initialized to value of memory range.
//
// Notes:
// MemoryRange and TargetBuffer both conceptually describe a single contiguous buffer of memory in the
// target. MemoryRange is a VM structure, which can't bleed across the DacDbi boundary. TargetBuffer is
// a DacDbi structure, which can cross the DacDbi boundary.
void InitTargetBufferFromMemoryRange(const MemoryRange memoryRange, TargetBuffer * pTargetBuffer)
{
SUPPORTS_DAC;
_ASSERTE(pTargetBuffer != NULL);
PTR_CVOID p = memoryRange.StartAddress();
CORDB_ADDRESS addr = PTR_TO_CORDB_ADDRESS(PTR_TO_TADDR(p));
_ASSERTE(memoryRange.Size() <= 0xffffffff);
pTargetBuffer->Init(addr, (ULONG)memoryRange.Size());
}
// Helper to intialize a TargetBuffer (host representation of target) from an SBuffer (target)
//
// Arguments:
// pBuffer - target pointer to a SBuffer structure. If pBuffer is NULL, then target buffer will be empty.
// pTargetBuffer - required out pointer to hold buffer description.
//
// Notes:
// PTR_SBuffer and TargetBuffer are both semantically equivalent structures. They both are a pointer and length
// describing a buffer in the target address space. (SBufer also has ownership semantics, but for DAC's
// read-only nature, that doesn't matter).
// Neither of these will actually copy the target buffer into the host without explicit action.
// The important difference is that TargetBuffer is a host datastructure and so easier to manipulate.
//
void InitTargetBufferFromTargetSBuffer(PTR_SBuffer pBuffer, TargetBuffer * pTargetBuffer)
{
SUPPORTS_DAC;
_ASSERTE(pTargetBuffer != NULL);
SBuffer * pBufferHost = pBuffer;
if (pBufferHost == NULL)
{
pTargetBuffer->Clear();
return;
}
MemoryRange m = pBufferHost->DacGetRawBuffer();
InitTargetBufferFromMemoryRange(m, pTargetBuffer);
}
// Implementation of IDacDbiInterface::GetMetadata
void DacDbiInterfaceImpl::GetMetadata(VMPTR_Module vmModule, TargetBuffer * pTargetBuffer)
{
DD_ENTER_MAY_THROW;
pTargetBuffer->Clear();
Module * pModule = vmModule.GetDacPtr();
// Target should only be asking about modules that are visible to debugger.
_ASSERTE(pModule->IsVisibleToDebugger());
// For dynamic modules, metadata is stored as an eagerly-serialized buffer hanging off the Reflection Module.
if (pModule->IsReflection())
{
// Here is the fetch.
ReflectionModule * pReflectionModule = pModule->GetReflectionModule();
InitTargetBufferFromTargetSBuffer(pReflectionModule->GetDynamicMetadataBuffer(), pTargetBuffer);
}
else
{
PEAssembly * pPEAssembly = pModule->GetPEAssembly();
// For non-dynamic modules, metadata is in the pe-image.
COUNT_T size;
CORDB_ADDRESS address = PTR_TO_CORDB_ADDRESS(dac_cast<TADDR>(pPEAssembly->GetLoadedMetadata(&size)));
pTargetBuffer->Init(address, (ULONG) size);
}
if (pTargetBuffer->IsEmpty())
{
// We never expect this to happen in a well-behaved scenario. But just in case.
ThrowHR(CORDBG_E_MISSING_METADATA);
}
}
// Implementation of IDacDbiInterface::GetSymbolsBuffer
void DacDbiInterfaceImpl::GetSymbolsBuffer(VMPTR_Module vmModule, TargetBuffer * pTargetBuffer, SymbolFormat * pSymbolFormat)
{
DD_ENTER_MAY_THROW;
pTargetBuffer->Clear();
*pSymbolFormat = kSymbolFormatNone;
Module * pModule = vmModule.GetDacPtr();
// Target should only be asking about modules that are visible to debugger.
_ASSERTE(pModule->IsVisibleToDebugger());
PTR_CGrowableStream pStream = pModule->GetInMemorySymbolStream();
if (pStream == NULL)
{
// Common case is to not have PDBs in-memory.
return;
}
const MemoryRange m = pStream->GetRawBuffer();
if (m.Size() == 0)
{
// We may be prepared to store symbols (in some particular format) but none are there yet.
// We treat this the same as not having any symbols above.
return;
}
InitTargetBufferFromMemoryRange(m, pTargetBuffer);
*pSymbolFormat = kSymbolFormatPDB;
}
void DacDbiInterfaceImpl::GetModuleForDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, OUT VMPTR_Module * pModule)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pModule != NULL);
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
pModule->SetHostPtr(pDomainAssembly->GetModule());
}
// Implement IDacDbiInterface::GetDomainAssemblyData
void DacDbiInterfaceImpl::GetDomainAssemblyData(VMPTR_DomainAssembly vmDomainAssembly, DomainAssemblyInfo * pData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pData != NULL);
ZeroMemory(pData, sizeof(*pData));
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
AppDomain * pAppDomain = pDomainAssembly->GetAppDomain();
// @dbgtodo - is this efficient DAC usage (perhaps a dac-cop rule)? Are we round-tripping the pointer?
pData->vmDomainAssembly.SetHostPtr(pDomainAssembly);
pData->vmAppDomain.SetHostPtr(pAppDomain);
}
// Implement IDacDbiInterface::GetModuleData
void DacDbiInterfaceImpl::GetModuleData(VMPTR_Module vmModule, ModuleInfo * pData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pData != NULL);
ZeroMemory(pData, sizeof(*pData));
Module * pModule = vmModule.GetDacPtr();
PEAssembly * pPEAssembly = pModule->GetPEAssembly();
pData->vmPEAssembly.SetHostPtr(pPEAssembly);
pData->vmAssembly.SetHostPtr(pModule->GetAssembly());
// Is it dynamic?
BOOL fIsDynamic = pModule->IsReflection();
pData->fIsDynamic = fIsDynamic;
// Get PE BaseAddress and Size
// For dynamic modules, these are 0. Else,
pData->pPEBaseAddress = NULL;
pData->nPESize = 0;
if (!fIsDynamic)
{
COUNT_T size = 0;
pData->pPEBaseAddress = PTR_TO_TADDR(pPEAssembly->GetDebuggerContents(&size));
pData->nPESize = (ULONG) size;
}
// In-memory is determined by whether the module has a filename.
pData->fInMemory = FALSE;
if (pPEAssembly != NULL)
{
pData->fInMemory = pPEAssembly->GetPath().IsEmpty();
}
}
// Enumerate all AppDomains in the process.
void DacDbiInterfaceImpl::EnumerateAppDomains(
FP_APPDOMAIN_ENUMERATION_CALLBACK fpCallback,
void * pUserData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(fpCallback != NULL);
// Only include active appdomains in the enumeration.
// This includes appdomains sent before the AD load event,
// and does not include appdomains that are in shutdown after the AD exit event.
const BOOL bOnlyActive = TRUE;
AppDomainIterator iterator(bOnlyActive);
while(iterator.Next())
{
// It's critical that we don't yield appdomains after the unload event has been sent.
// See code:IDacDbiInterface#Enumeration for details.
AppDomain * pAppDomain = iterator.GetDomain();
VMPTR_AppDomain vmAppDomain = VMPTR_AppDomain::NullPtr();
vmAppDomain.SetHostPtr(pAppDomain);
fpCallback(vmAppDomain, pUserData);
}
}
// Enumerate all Assemblies in an appdomain.
void DacDbiInterfaceImpl::EnumerateAssembliesInAppDomain(
VMPTR_AppDomain vmAppDomain,
FP_ASSEMBLY_ENUMERATION_CALLBACK fpCallback,
void * pUserData
)
{
DD_ENTER_MAY_THROW;
_ASSERTE(fpCallback != NULL);
// Iterate through all Assemblies (including shared) in the appdomain.
AppDomain::AssemblyIterator iterator;
// If the containing appdomain is unloading, then don't enumerate any assemblies
// in the domain. This is to enforce rules at code:IDacDbiInterface#Enumeration.
// See comment in code:DacDbiInterfaceImpl::EnumerateModulesInAssembly code for details.
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
// Pass the magical flags to the loader enumerator to get all Execution-only assemblies.
iterator = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoading | kIncludeLoaded | kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
while (iterator.Next(pDomainAssembly.This()))
{
if (!pDomainAssembly->IsVisibleToDebugger())
{
continue;
}
VMPTR_DomainAssembly vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
vmDomainAssembly.SetHostPtr(pDomainAssembly);
fpCallback(vmDomainAssembly, pUserData);
}
}
// Implementation of IDacDbiInterface::EnumerateModulesInAssembly,
// Enumerate all the modules (non-resource) in an assembly.
void DacDbiInterfaceImpl::EnumerateModulesInAssembly(
VMPTR_DomainAssembly vmAssembly,
FP_MODULE_ENUMERATION_CALLBACK fpCallback,
void * pUserData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(fpCallback != NULL);
DomainAssembly * pDomainAssembly = vmAssembly.GetDacPtr();
// Debugger isn't notified of Resource / Inspection-only modules.
if (pDomainAssembly->GetModule()->IsVisibleToDebugger())
{
_ASSERTE(pDomainAssembly->IsLoaded());
VMPTR_DomainAssembly vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
vmDomainAssembly.SetHostPtr(pDomainAssembly);
fpCallback(vmDomainAssembly, pUserData);
}
}
// Implementation of IDacDbiInterface::ResolveAssembly
// Returns NULL if not found.
VMPTR_DomainAssembly DacDbiInterfaceImpl::ResolveAssembly(
VMPTR_DomainAssembly vmScope,
mdToken tkAssemblyRef)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = vmScope.GetDacPtr();
AppDomain * pAppDomain = pDomainAssembly->GetAppDomain();
Module * pModule = pDomainAssembly->GetModule();
VMPTR_DomainAssembly vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
Assembly * pAssembly = pModule->LookupAssemblyRef(tkAssemblyRef);
if (pAssembly != NULL)
{
DomainAssembly * pDomainAssembly = pAssembly->GetDomainAssembly();
vmDomainAssembly.SetHostPtr(pDomainAssembly);
}
return vmDomainAssembly;
}
// When stopped at an event, request a synchronization.
// See DacDbiInterface.h for full comments
void DacDbiInterfaceImpl::RequestSyncAtEvent()
{
DD_ENTER_MAY_THROW;
// To request a sync, we just need to set g_pDebugger->m_RSRequestedSync high.
if (g_pDebugger != NULL)
{
TADDR addr = PTR_HOST_MEMBER_TADDR(Debugger, g_pDebugger, m_RSRequestedSync);
BOOL fTrue = TRUE;
SafeWriteStructOrThrow<BOOL>(addr, &fTrue);
}
}
HRESULT DacDbiInterfaceImpl::SetSendExceptionsOutsideOfJMC(BOOL sendExceptionsOutsideOfJMC)
{
DD_ENTER_MAY_THROW
HRESULT hr = S_OK;
EX_TRY
{
if (g_pDebugger != NULL)
{
TADDR addr = PTR_HOST_MEMBER_TADDR(Debugger, g_pDebugger, m_sendExceptionsOutsideOfJMC);
SafeWriteStructOrThrow<BOOL>(addr, &sendExceptionsOutsideOfJMC);
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
// Notify the debuggee that a debugger attach is pending.
// See DacDbiInterface.h for full comments
void DacDbiInterfaceImpl::MarkDebuggerAttachPending()
{
DD_ENTER_MAY_THROW;
if (g_pDebugger != NULL)
{
DWORD flags = g_CORDebuggerControlFlags;
flags |= DBCF_PENDING_ATTACH;
// Uses special DAC writing. PTR_TO_TADDR doesn't fetch for globals.
// @dbgtodo dac support - the exact mechanism of writing to the target needs to be flushed out,
// especially as it relates to DAC cop and enforcing undac-ized writes.
g_CORDebuggerControlFlags = flags;
}
else
{
// Caller should have guaranteed that the LS is loaded.
// If we're detaching, then don't throw because we don't care.
ThrowHR(CORDBG_E_NOTREADY);
}
}
// Notify the debuggee that a debugger is attached.
// See DacDbiInterface.h for full comments
void DacDbiInterfaceImpl::MarkDebuggerAttached(BOOL fAttached)
{
DD_ENTER_MAY_THROW;
if (g_pDebugger != NULL)
{
// To be attached, we need to set the following
// g_CORDebuggerControlFlags |= DBCF_ATTACHED;
// To detach (if !fAttached), we need to do the opposite.
DWORD flags = g_CORDebuggerControlFlags;
if (fAttached)
{
flags |= DBCF_ATTACHED;
}
else
{
flags &= ~ (DBCF_ATTACHED | DBCF_PENDING_ATTACH);
}
// Uses special DAC writing. PTR_TO_TADDR doesn't fetch for globals.
// @dbgtodo dac support - the exact mechanism of writing to the target needs to be flushed out,
// especially as it relates to DAC cop and enforcing undac-ized writes.
g_CORDebuggerControlFlags = flags;
}
else if (fAttached)
{
// Caller should have guaranteed that the LS is loaded.
// If we're detaching, then don't throw because we don't care.
ThrowHR(CORDBG_E_NOTREADY);
}
}
// Enumerate all threads in the process.
void DacDbiInterfaceImpl::EnumerateThreads(FP_THREAD_ENUMERATION_CALLBACK fpCallback, void * pUserData)
{
DD_ENTER_MAY_THROW;
if (ThreadStore::s_pThreadStore == NULL)
{
return;
}
Thread *pThread = ThreadStore::GetThreadList(NULL);
while (pThread != NULL)
{
// Don't want to publish threads via enumeration before they're ready to be inspected.
// Use the same window that we used in whidbey.
Thread::ThreadState threadState = pThread->GetSnapshotState();
if (!((IsThreadMarkedDeadWorker(pThread)) || (threadState & Thread::TS_Unstarted)))
{
VMPTR_Thread vmThread = VMPTR_Thread::NullPtr();
vmThread.SetHostPtr(pThread);
fpCallback(vmThread, pUserData);
}
pThread = ThreadStore::GetThreadList(pThread);
}
}
// public implementation of IsThreadMarkedDead
bool DacDbiInterfaceImpl::IsThreadMarkedDead(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
return IsThreadMarkedDeadWorker(pThread);
}
// Private worker for IsThreadMarkedDead
//
// Arguments:
// pThread - valid thread to check if dead
//
// Returns:
// true iff thread is marked as dead.
//
// Notes:
// This is an internal method that skips public validation.
// See code:IDacDbiInterface::#IsThreadMarkedDead for purpose.
bool DacDbiInterfaceImpl::IsThreadMarkedDeadWorker(Thread * pThread)
{
_ASSERTE(pThread != NULL);
Thread::ThreadState threadState = pThread->GetSnapshotState();
bool fIsDead = (threadState & Thread::TS_Dead) != 0;
return fIsDead;
}
// Return the handle of the specified thread.
HANDLE DacDbiInterfaceImpl::GetThreadHandle(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
return pThread->GetThreadHandle();
}
// Return the object handle for the managed Thread object corresponding to the specified thread.
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetThreadObject(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
Thread::ThreadState threadState = pThread->GetSnapshotState();
if ( (threadState & Thread::TS_Dead) ||
(threadState & Thread::TS_Unstarted) ||
(threadState & Thread::TS_Detached) ||
g_fProcessDetach )
{
ThrowHR(CORDBG_E_BAD_THREAD_STATE);
}
else
{
VMPTR_OBJECTHANDLE vmObjHandle = VMPTR_OBJECTHANDLE::NullPtr();
vmObjHandle.SetDacTargetPtr(pThread->GetExposedObjectHandleForDebugger());
return vmObjHandle;
}
}
void DacDbiInterfaceImpl::GetThreadAllocInfo(VMPTR_Thread vmThread,
DacThreadAllocInfo* threadAllocInfo)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
gc_alloc_context* allocContext = pThread->GetAllocContext();
threadAllocInfo->m_allocBytesSOH = allocContext->alloc_bytes - (allocContext->alloc_limit - allocContext->alloc_ptr);
threadAllocInfo->m_allocBytesUOH = allocContext->alloc_bytes_uoh;
}
// Set and reset the TSNC_DebuggerUserSuspend bit on the state of the specified thread
// according to the CorDebugThreadState.
void DacDbiInterfaceImpl::SetDebugState(VMPTR_Thread vmThread,
CorDebugThreadState debugState)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
// update the field on the host copy
if (debugState == THREAD_SUSPEND)
{
pThread->SetThreadStateNC(Thread::TSNC_DebuggerUserSuspend);
}
else if (debugState == THREAD_RUN)
{
pThread->ResetThreadStateNC(Thread::TSNC_DebuggerUserSuspend);
}
else
{
ThrowHR(E_INVALIDARG);
}
// update the field on the target copy
TADDR taThreadState = PTR_HOST_MEMBER_TADDR(Thread, pThread, m_StateNC);
SafeWriteStructOrThrow<Thread::ThreadStateNoConcurrency>(taThreadState, &(pThread->m_StateNC));
}
// Gets the debugger unhandled exception threadstate flag
BOOL DacDbiInterfaceImpl::HasUnhandledException(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
// some managed exceptions don't have any underlying
// native exception processing going on. They just consist
// of a managed throwable that we have stashed away followed
// by a debugger notification and some form of failfast.
// Everything that comes through EEFatalError is in this category
if(pThread->IsLastThrownObjectUnhandled())
{
return TRUE;
}
// most managed exceptions are just a throwable bound to a
// native exception. In that case this handle will be non-null
OBJECTHANDLE ohException = pThread->GetThrowableAsHandle();
if (ohException != NULL)
{
// during the UEF we set the unhandled bit, if it is set the exception
// was unhandled
// however if the exception has intercept info then we consider it handled
// again
return pThread->GetExceptionState()->GetFlags()->IsUnhandled() &&
!(pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo());
}
return FALSE;
}
// Return the user state of the specified thread.
CorDebugUserState DacDbiInterfaceImpl::GetUserState(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
UINT result = 0;
result = GetPartialUserState(vmThread);
if (!IsThreadAtGCSafePlace(vmThread))
{
result |= USER_UNSAFE_POINT;
}
return (CorDebugUserState)result;
}
// Return the connection ID of the specified thread.
CONNID DacDbiInterfaceImpl::GetConnectionID(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
return INVALID_CONNECTION_ID;
}
// Return the task ID of the specified thread.
TASKID DacDbiInterfaceImpl::GetTaskID(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
return INVALID_TASK_ID;
}
// Return the OS thread ID of the specified thread
DWORD DacDbiInterfaceImpl::TryGetVolatileOSThreadID(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
_ASSERTE(pThread != NULL);
DWORD dwThreadId = pThread->GetOSThreadIdForDebugger();
// If the thread ID is a the magical cookie value, then this is really
// a switched out thread and doesn't have an OS tid. In that case, the
// DD contract is to return 0 (a much more sane value)
const DWORD dwSwitchedOutThreadId = SWITCHED_OUT_FIBER_OSID;
if (dwThreadId == dwSwitchedOutThreadId)
{
return 0;
}
return dwThreadId;
}
// Return the unique thread ID of the specified thread.
DWORD DacDbiInterfaceImpl::GetUniqueThreadID(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
_ASSERTE(pThread != NULL);
return pThread->GetOSThreadId();
}
// Return the object handle to the managed Exception object of the current exception
// on the specified thread. The return value could be NULL if there is no current exception.
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetCurrentException(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
// OBJECTHANDLEs are really just TADDRs.
OBJECTHANDLE ohException = pThread->GetThrowableAsHandle(); // ohException can be NULL
if (ohException == NULL)
{
if (pThread->IsLastThrownObjectUnhandled())
{
ohException = pThread->LastThrownObjectHandle();
}
}
VMPTR_OBJECTHANDLE vmObjHandle;
vmObjHandle.SetDacTargetPtr(ohException);
return vmObjHandle;
}
// Return the object handle to the managed object for a given CCW pointer.
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetObjectForCCW(CORDB_ADDRESS ccwPtr)
{
DD_ENTER_MAY_THROW;
OBJECTHANDLE ohCCW = NULL;
#ifdef FEATURE_COMWRAPPERS
if (DACTryGetComWrappersHandleFromCCW(ccwPtr, &ohCCW) != S_OK)
{
#endif
#ifdef FEATURE_COMINTEROP
ComCallWrapper *pCCW = DACGetCCWFromAddress(ccwPtr);
if (pCCW)
{
ohCCW = pCCW->GetObjectHandle();
}
#endif
#ifdef FEATURE_COMWRAPPERS
}
#endif
VMPTR_OBJECTHANDLE vmObjHandle;
vmObjHandle.SetDacTargetPtr(ohCCW);
return vmObjHandle;
}
// Return the object handle to the managed CustomNotification object of the current notification
// on the specified thread. The return value could be NULL if there is no current notification.
// Arguments:
// input: vmThread - the thread on which the notification occurred
// Return value: object handle for the current notification (if any) on the thread. This will return non-null
// if and only if we are currently inside a CustomNotification Callback (or a dump was generated while in this
// callback)
//
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetCurrentCustomDebuggerNotification(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
// OBJECTHANDLEs are really just TADDRs.
OBJECTHANDLE ohNotification = pThread->GetThreadCurrNotification(); // ohNotification can be NULL
VMPTR_OBJECTHANDLE vmObjHandle;
vmObjHandle.SetDacTargetPtr(ohNotification);
return vmObjHandle;
}
// Return the current appdomain the specified thread is in.
VMPTR_AppDomain DacDbiInterfaceImpl::GetCurrentAppDomain(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
AppDomain * pAppDomain = pThread->GetDomain();
if (pAppDomain == NULL)
{
ThrowHR(E_FAIL);
}
VMPTR_AppDomain vmAppDomain = VMPTR_AppDomain::NullPtr();
vmAppDomain.SetDacTargetPtr(PTR_HOST_TO_TADDR(pAppDomain));
return vmAppDomain;
}
// Returns a bitfield reflecting the managed debugging state at the time of
// the jit attach.
CLR_DEBUGGING_PROCESS_FLAGS DacDbiInterfaceImpl::GetAttachStateFlags()
{
DD_ENTER_MAY_THROW;
CLR_DEBUGGING_PROCESS_FLAGS res = (CLR_DEBUGGING_PROCESS_FLAGS)0;
if (g_pDebugger != NULL)
{
res = g_pDebugger->GetAttachStateFlags();
}
else
{
// When launching the process under a managed debugger we
// request these flags when CLR is loaded (before g_pDebugger
// had a chance to be initialized). In these cases simply
// return 0
}
return res;
}
//---------------------------------------------------------------------------------------
// Helper to get the address of the 2nd-chance hijack function Or throw
//
// Returns:
// Non-null Target Address of hijack function.
TADDR DacDbiInterfaceImpl::GetHijackAddress()
{
TADDR addr = NULL;
if (g_pDebugger != NULL)
{
// Get the start address of the redirect function for unhandled exceptions.
addr = dac_cast<TADDR>(g_pDebugger->m_rgHijackFunction[Debugger::kUnhandledException].StartAddress());
}
if (addr == NULL)
{
ThrowHR(CORDBG_E_NOTREADY);
}
return addr;
}
//---------------------------------------------------------------------------------------
// Helper to determine whether a control PC is in any native stub which the runtime knows how to unwind.
//
// Arguments:
// taControlPC - control PC to be checked
//
// Returns:
// Returns true if the control PC is in a runtime unwindable stub.
//
// Notes:
// Currently this function only recognizes the ExceptionHijack() stub,
// which is used for unhandled exceptions.
//
bool DacDbiInterfaceImpl::IsRuntimeUnwindableStub(PCODE targetControlPC)
{
TADDR controlPC = PCODEToPINSTR(targetControlPC);
// we call this function a lot while walking the stack and the values here will never change
// Getting the g_pDebugger and each entry in the m_rgHijackFunction is potentially ~7 DAC
// accesses per frame. Caching the data into a single local array is much faster. This optimization
// recovered a few % of DAC stackwalking time
if(!m_isCachedHijackFunctionValid)
{
Debugger* pDebugger = g_pDebugger;
if ((pDebugger == NULL) || (pDebugger->m_rgHijackFunction == NULL))
{
// The in-process debugging infrastructure hasn't been fully initialized, which means that we could
// NOT have hijacked anything yet.
return false;
}
// PERF NOTE: if needed this array copy could probably be made more efficient
// hitting the DAC only once for a single memory block, or even better
// put the array inline in the Debugger object so that we only do 1 DAC
// access for this entire thing
for (int i = 0; i < Debugger::kMaxHijackFunctions; i++)
{
InitTargetBufferFromMemoryRange(pDebugger->m_rgHijackFunction[i], &m_pCachedHijackFunction[i] );
}
m_isCachedHijackFunctionValid = TRUE;
}
// Check whether the control PC is in any of the thread redirection functions.
for (int i = 0; i < Debugger::kMaxHijackFunctions; i++)
{
CORDB_ADDRESS start = m_pCachedHijackFunction[i].pAddress;
CORDB_ADDRESS end = start + m_pCachedHijackFunction[i].cbSize;
if ((start <= controlPC) && (controlPC < end))
{
return true;
}
}
return false;
}
//---------------------------------------------------------------------------------------
// Align a stack pointer for the given architecture
//
// Arguments:
// pEsp - in/out: pointer to stack pointer.
//
void DacDbiInterfaceImpl::AlignStackPointer(CORDB_ADDRESS * pEsp)
{
SUPPORTS_DAC;
// Nop on x86.
#if defined(HOST_64BIT)
// on 64-bit, stack pointer must be 16-byte aligned.
// Stacks grown down, so round down to nearest 0xF bits.
*pEsp &= ~((CORDB_ADDRESS) 0xF);
#endif
}
//---------------------------------------------------------------------------------------
// Emulate pushing something on a thread's stack.
//
// Arguments:
// pEsp - in/out: pointer to stack pointer to push object at. On output,
// updated stack pointer.
// pData - object to push on the stack.
// fAlignStack - whether to align the stack pointer before and after the push.
// Callers which specify FALSE must be very careful and know exactly
// what they are doing.
//
// Return:
// address of pushed object. Throws on error.
template <class T>
CORDB_ADDRESS DacDbiInterfaceImpl::PushHelper(CORDB_ADDRESS * pEsp,
const T * pData,
BOOL fAlignStack)
{
SUPPORTS_DAC;
if (fAlignStack == TRUE)
{
AlignStackPointer(pEsp);
}
*pEsp -= sizeof(T);
if (fAlignStack == TRUE)
{
AlignStackPointer(pEsp);
}
SafeWriteStructOrThrow(*pEsp, pData);
return *pEsp;
}
//---------------------------------------------------------------------------------------
// Write an EXCEPTION_RECORD structure to the remote target at the specified address while taking
// into account the number of exception parameters. On 64-bit OS and on the WOW64, the OS always
// pushes the entire EXCEPTION_RECORD onto the stack. However, on native x86 OS, the OS only pushes
// enough of the EXCEPTION_RECORD to cover the specified number of exception parameters. Thus we
// need to be extra careful when we overwrite an EXCEPTION_RECORD on the stack.
//
// Arguments:
// pRemotePtr - address of the EXCEPTION_RECORD in the remote target
// pExcepRecord - EXCEPTION_RECORD to be written
//
// Notes:
// This function is only used by the code which hijacks a therad when there's an unhandled exception.
// It only works when we are actually debugging a live process, not a dump.
//
void DacDbiInterfaceImpl::WriteExceptionRecordHelper(CORDB_ADDRESS pRemotePtr,
const EXCEPTION_RECORD * pExcepRecord)
{
// Calculate the correct size to push onto the stack.
ULONG32 cbSize = offsetof(EXCEPTION_RECORD, ExceptionInformation);
cbSize += pExcepRecord->NumberParameters * sizeof(pExcepRecord->ExceptionInformation[0]);
// Use the data target to write to the remote target. Here we are assuming that we are debugging a
// live process, since this function is only called by the hijacking code for unhandled exceptions.
HRESULT hr = m_pMutableTarget->WriteVirtual(pRemotePtr,
reinterpret_cast<const BYTE *>(pExcepRecord),
cbSize);
if (FAILED(hr))
{
ThrowHR(hr);
}
}
// Implement IDacDbiInterface::Hijack
void DacDbiInterfaceImpl::Hijack(
VMPTR_Thread vmThread,
ULONG32 dwThreadId,
const EXCEPTION_RECORD * pRecord,
T_CONTEXT * pOriginalContext,
ULONG32 cbSizeContext,
EHijackReason::EHijackReason reason,
void * pUserData,
CORDB_ADDRESS * pRemoteContextAddr)
{
DD_ENTER_MAY_THROW;
//
// Validate parameters
//
// pRecord may be NULL if we're not hijacking at an exception
// pOriginalContext may be NULL if caller doesn't want a copy of the context.
// (The hijack function already has the context)
_ASSERTE((pOriginalContext == NULL) == (cbSizeContext == 0));
_ASSERTE(EHijackReason::IsValid(reason));
#ifdef TARGET_UNIX
_ASSERTE(!"Not supported on this platform");
#endif
//
// If we hijack a thread which might not be managed we can set vmThread = NULL
// The only side-effect in this case is that we can't reuse CONTEXT and
// EXCEPTION_RECORD space on the stack by an already underway in-process exception
// filter. If you depend on those being used and updated you must provide the vmThread
//
Thread* pThread = NULL;
if(!vmThread.IsNull())
{
pThread = vmThread.GetDacPtr();
_ASSERTE(pThread->GetOSThreadIdForDebugger() == dwThreadId);
}
TADDR pfnHijackFunction = GetHijackAddress();
//
// Setup context for hijack
//
T_CONTEXT ctx;
HRESULT hr = m_pTarget->GetThreadContext(
dwThreadId,
CONTEXT_FULL,
sizeof(ctx),
(BYTE*) &ctx);
IfFailThrow(hr);
// If caller requested, copy back the original context that we're hijacking from.
if (pOriginalContext != NULL)
{
// Since Dac + DBI are tightly coupled, context sizes should be the same.
if (cbSizeContext != sizeof(T_CONTEXT))
{
ThrowHR(E_INVALIDARG);
}
memcpy(pOriginalContext, &ctx, cbSizeContext);
}
// Make sure the trace flag isn't on. This can happen if we were single stepping the thread when we faulted. This
// will ensure that we don't try to single step through the OS's exception logic, which greatly confuses our second
// chance hijack logic. This also mimics what the OS does for us automaically when single stepping in process, i.e.,
// when you turn the trace flag on in-process and go, if there is a fault, the fault is reported and the trace flag
// is automatically turned off.
//
// The debugger could always re-enable the single-step flag if it wants to.
#ifndef FEATURE_EMULATE_SINGLESTEP
UnsetSSFlag(reinterpret_cast<DT_CONTEXT *>(&ctx));
#endif
// Push pointers
void* espContext = NULL;
void* espRecord = NULL;
const void* pData = pUserData;
// @dbgtodo cross-plat - this is not cross plat
CORDB_ADDRESS esp = GetSP(&ctx);
//
// Find out where the OS exception dispatcher has pushed the EXCEPTION_RECORD and CONTEXT. The ExInfo and
// ExceptionTracker have pointers to these data structures, but when we get the unhandled exception
// notification, the OS exception dispatcher is no longer on the stack, so these pointers are no longer
// valid. We need to either update these pointers in the ExInfo/ExcepionTracker, or reuse the stack
// space used by the OS exception dispatcher. We are using the latter approach here.
//
CORDB_ADDRESS espOSContext = NULL;
CORDB_ADDRESS espOSRecord = NULL;
if (pThread != NULL && pThread->IsExceptionInProgress())
{
espOSContext = (CORDB_ADDRESS)PTR_TO_TADDR(pThread->GetExceptionState()->GetContextRecord());
espOSRecord = (CORDB_ADDRESS)PTR_TO_TADDR(pThread->GetExceptionState()->GetExceptionRecord());
// The managed exception may not be related to the unhandled exception for which we are trying to
// hijack. An example would be when a thread hits a managed exception, VS tries to do func eval on
// the thread, but the func eval causes an unhandled exception (e.g. AV in mscorwks.dll). In this
// case, the pointers stored on the ExInfo/ExceptionTracker are closer to the root than the current
// SP of the thread. The check below makes sure we don't reuse the pointers in this case.
if (espOSContext < esp)
{
SafeWriteStructOrThrow(espOSContext, &ctx);
espContext = CORDB_ADDRESS_TO_PTR(espOSContext);
// We should have an EXCEPTION_RECORD if we are hijacked at an exception.
// We need to be careful when we overwrite the exception record. On x86, the OS doesn't
// always push the full record onto the stack, and so we can't blindly use sizeof(EXCEPTION_RECORD).
// Instead, we have to look at the number of exception parameters and calculate the size.
_ASSERTE(pRecord != NULL);
WriteExceptionRecordHelper(espOSRecord, pRecord);
espRecord = CORDB_ADDRESS_TO_PTR(espOSRecord);
esp = min(espOSContext, espOSRecord);
}
}
// If we haven't reused the pointers, then push everything at the leaf of the stack.
if (espContext == NULL)
{
_ASSERTE(espRecord == NULL);
// Push on full Context and ExceptionRecord structures. We'll then push pointers to these,
// and those pointers will serve as the actual args to the function.
espContext = CORDB_ADDRESS_TO_PTR(PushHelper(&esp, &ctx, TRUE));
// If caller didn't pass an exception-record, then we're not being hijacked at an exception.
// We'll just pass NULL for the exception-record to the Hijack function.
if (pRecord != NULL)
{
espRecord = CORDB_ADDRESS_TO_PTR(PushHelper(&esp, pRecord, TRUE));
}
}
if(pRemoteContextAddr != NULL)
{
*pRemoteContextAddr = PTR_TO_CORDB_ADDRESS(espContext);
}
//
// Push args onto the stack to be able to call the hijack function
//
// Prototype of hijack is:
// void __stdcall ExceptionHijackWorker(CONTEXT * pContext, EXCEPTION_RECORD * pRecord, EHijackReason, void * pData)
// Set up everything so that the hijack stub can just do a "call" instruction.
//
// Regarding stack overflow: We could do an explicit check against the thread's stack base limit.
// However, we don't need an explicit overflow check because if the stack does overflow,
// the hijack will just hit a regular stack-overflow exception.
#if defined(TARGET_X86) // TARGET
// X86 calling convention is to push args on the stack in reverse order.
// If we fail here, the stack is written, but esp hasn't been committed yet so it shouldn't matter.
PushHelper(&esp, &pData, TRUE);
PushHelper(&esp, &reason, TRUE);
PushHelper(&esp, &espRecord, TRUE);
PushHelper(&esp, &espContext, TRUE);
#elif defined (TARGET_AMD64) // TARGET
// AMD64 calling convention is to place first 4 parameters in: rcx, rdx, r8 and r9
ctx.Rcx = (DWORD64) espContext;
ctx.Rdx = (DWORD64) espRecord;
ctx.R8 = (DWORD64) reason;
ctx.R9 = (DWORD64) pData;
// Caller must allocate stack space to spill for args.
// Push the arguments onto the outgoing argument homes.
// Make sure we push pointer-sized values to keep the stack aligned.
PushHelper(&esp, reinterpret_cast<SIZE_T *>(&(ctx.R9)), FALSE);
PushHelper(&esp, reinterpret_cast<SIZE_T *>(&(ctx.R8)), FALSE);
PushHelper(&esp, reinterpret_cast<SIZE_T *>(&(ctx.Rdx)), FALSE);
PushHelper(&esp, reinterpret_cast<SIZE_T *>(&(ctx.Rcx)), FALSE);
#elif defined(TARGET_ARM)
ctx.R0 = (DWORD)espContext;
ctx.R1 = (DWORD)espRecord;
ctx.R2 = (DWORD)reason;
ctx.R3 = (DWORD)pData;
#elif defined(TARGET_ARM64)
ctx.X0 = (DWORD64)espContext;
ctx.X1 = (DWORD64)espRecord;
ctx.X2 = (DWORD64)reason;
ctx.X3 = (DWORD64)pData;
#else
PORTABILITY_ASSERT("CordbThread::HijackForUnhandledException is not implemented on this platform.");
#endif
SetSP(&ctx, CORDB_ADDRESS_TO_TADDR(esp));
// @dbgtodo cross-plat - not cross-platform safe
SetIP(&ctx, pfnHijackFunction);
//
// Commit the context.
//
hr = m_pMutableTarget->SetThreadContext(dwThreadId, sizeof(ctx), reinterpret_cast<BYTE*> (&ctx));
IfFailThrow(hr);
}
// Return the filter CONTEXT on the LS.
VMPTR_CONTEXT DacDbiInterfaceImpl::GetManagedStoppedContext(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
VMPTR_CONTEXT vmContext = VMPTR_CONTEXT::NullPtr();
Thread * pThread = vmThread.GetDacPtr();
if (pThread->GetInteropDebuggingHijacked())
{
_ASSERTE(!ISREDIRECTEDTHREAD(pThread));
vmContext = VMPTR_CONTEXT::NullPtr();
}
else
{
DT_CONTEXT * pLSContext = reinterpret_cast<DT_CONTEXT *>(pThread->GetFilterContext());
if (pLSContext != NULL)
{
_ASSERTE(!ISREDIRECTEDTHREAD(pThread));
vmContext.SetHostPtr(pLSContext);
}
else if (ISREDIRECTEDTHREAD(pThread))
{
pLSContext = reinterpret_cast<DT_CONTEXT *>(GETREDIRECTEDCONTEXT(pThread));
_ASSERTE(pLSContext != NULL);
if (pLSContext != NULL)
{
vmContext.SetHostPtr(pLSContext);
}
}
}
return vmContext;
}
// Return a TargetBuffer for the raw vararg signature.
TargetBuffer DacDbiInterfaceImpl::GetVarArgSig(CORDB_ADDRESS VASigCookieAddr,
CORDB_ADDRESS * pArgBase)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pArgBase != NULL);
*pArgBase = NULL;
// First, read the VASigCookie pointer.
TADDR taVASigCookie = NULL;
SafeReadStructOrThrow(VASigCookieAddr, &taVASigCookie);
// Now create a DAC copy of VASigCookie.
VASigCookie * pVACookie = PTR_VASigCookie(taVASigCookie);
// Figure out where the first argument is.
#if defined(TARGET_X86) // (STACK_GROWS_DOWN_ON_ARGS_WALK)
*pArgBase = VASigCookieAddr + pVACookie->sizeOfArgs;
#else // !TARGET_X86 (STACK_GROWS_UP_ON_ARGS_WALK)
*pArgBase = VASigCookieAddr + sizeof(VASigCookie *);
#endif // !TARGET_X86 (STACK_GROWS_UP_ON_ARGS_WALK)
return TargetBuffer(PTR_TO_CORDB_ADDRESS(pVACookie->signature.GetRawSig()),
pVACookie->signature.GetRawSigLen());
}
// returns TRUE if the type requires 8-byte alignment
BOOL DacDbiInterfaceImpl::RequiresAlign8(VMPTR_TypeHandle thExact)
{
DD_ENTER_MAY_THROW;
#ifdef FEATURE_64BIT_ALIGNMENT
TypeHandle th = TypeHandle::FromPtr(thExact.GetDacPtr());
PTR_MethodTable mt = th.AsMethodTable();
return mt->RequiresAlign8();
#else
ThrowHR(E_NOTIMPL);
#endif
}
// Resolve the raw generics token to the real generics type token. The resolution is based on the
// given index.
GENERICS_TYPE_TOKEN DacDbiInterfaceImpl::ResolveExactGenericArgsToken(DWORD dwExactGenericArgsTokenIndex,
GENERICS_TYPE_TOKEN rawToken)
{
DD_ENTER_MAY_THROW;
if (dwExactGenericArgsTokenIndex == 0)
{
// In a rare case of VS4Mac debugging VS4Mac ARM64 optimized code we get a null generics argument token. We aren't sure
// why the token is null, it may be a bug or it may be by design in the runtime. In the interest of time we are working
// around the issue rather than investigating the root cause. This workaround should only cause us to degrade generic
// types from exact type parameters to approximate or canonical type parameters. In the future if we discover this issue
// is happening more frequently than we expect or the workaround is more impactful than we expect we may need to remove
// this workaround and resolve the underlying issue.
if (rawToken == 0)
{
return rawToken;
}
// In this case the real generics type token is the MethodTable of the "this" object.
// Note that we want the target address here.
// Incoming rawToken is actually a PTR_Object for the 'this' pointer.
// Need to do some casting to convert GENERICS_TYPE_TOKEN --> PTR_Object
TADDR addrObjThis = CORDB_ADDRESS_TO_TADDR(rawToken);
PTR_Object pObjThis = dac_cast<PTR_Object>(addrObjThis);
PTR_MethodTable pMT = pObjThis->GetMethodTable();
// Now package up the PTR_MethodTable back into a GENERICS_TYPE_TOKEN
TADDR addrMT = dac_cast<TADDR>(pMT);
GENERICS_TYPE_TOKEN realToken = (GENERICS_TYPE_TOKEN) addrMT;
return realToken;
}
else if (dwExactGenericArgsTokenIndex == (DWORD)ICorDebugInfo::TYPECTXT_ILNUM)
{
// rawToken is already initialized correctly. Nothing to do here.
return rawToken;
}
// The index of the generics type token should not be anything else.
// This is indeed an error condition, and so we throw here.
_ASSERTE(!"DDII::REGAT - Unexpected generics type token index.");
ThrowHR(CORDBG_E_TARGET_INCONSISTENT);
}
// Check if the given method is an IL stub or an LCD method.
IDacDbiInterface::DynamicMethodType DacDbiInterfaceImpl::IsILStubOrLCGMethod(VMPTR_MethodDesc vmMethodDesc)
{
DD_ENTER_MAY_THROW;
MethodDesc * pMD = vmMethodDesc.GetDacPtr();
if (pMD->IsILStub())
{
return kILStub;
}
else if (pMD->IsLCGMethod())
{
return kLCGMethod;
}
else
{
return kNone;
}
}
//---------------------------------------------------------------------------------------
//
// Determine whether the specified thread is at a GC safe place.
//
// Arguments:
// vmThread - the thread to be examined
//
// Return Value:
// Return TRUE if the thread is at a GC safe place.
// and under what conditions
//
// Notes:
// This function basically does a one-frame stackwalk.
// The logic is adopted from Debugger::IsThreadAtSafePlace().
//
BOOL DacDbiInterfaceImpl::IsThreadAtGCSafePlace(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
BOOL fIsGCSafe = FALSE;
Thread * pThread = vmThread.GetDacPtr();
// Check if the runtime has entered "Shutdown for Finalizer" mode.
if ((g_fEEShutDown & ShutDown_Finalize2) != 0)
{
fIsGCSafe = TRUE;
}
else
{
T_CONTEXT ctx;
REGDISPLAY rd;
SetUpRegdisplayForStackWalk(pThread, &ctx, &rd);
ULONG32 flags = (QUICKUNWIND | HANDLESKIPPEDFRAMES | DISABLE_MISSING_FRAME_DETECTION);
StackFrameIterator iter;
iter.Init(pThread, pThread->GetFrame(), &rd, flags);
CrawlFrame * pCF = &(iter.m_crawl);
if (pCF->IsFrameless() && pCF->IsActiveFunc())
{
if (pCF->IsGcSafe())
{
fIsGCSafe = TRUE;
}
}
}
return fIsGCSafe;
}
//---------------------------------------------------------------------------------------
//
// Return a partial user state of the specified thread. The returned user state doesn't contain
// information about USER_UNSAFE_POINT. The caller needs to call IsThreadAtGCSafePlace() to get
// the full user state.
//
// Arguments:
// vmThread - the specified thread
//
// Return Value:
// Return the partial user state except for USER_UNSAFE_POINT
//
CorDebugUserState DacDbiInterfaceImpl::GetPartialUserState(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
Thread::ThreadState ts = pThread->GetSnapshotState();
UINT result = 0;
if (ts & Thread::TS_Background)
{
result |= USER_BACKGROUND;
}
if (ts & Thread::TS_Unstarted)
{
result |= USER_UNSTARTED;
}
// Don't report a StopRequested if the thread has actually stopped.
if (ts & Thread::TS_Dead)
{
result |= USER_STOPPED;
}
// The interruptible flag is unreliable (see issue 699245)
// The Debugger_SleepWaitJoin is always accurate when it is present, but it is still
// just a band-aid fix to cover some of the race conditions interruptible has.
if (ts & Thread::TS_Interruptible || pThread->HasThreadStateNC(Thread::TSNC_DebuggerSleepWaitJoin))
{
result |= USER_WAIT_SLEEP_JOIN;
}
if (pThread->IsThreadPoolThread())
{
result |= USER_THREADPOOL;
}
return (CorDebugUserState)result;
}
//---------------------------------------------------------------------------------------
//
// Look up the EnC version number of a particular jitted instance of a managed method.
//
// Arguments:
// pModule - the module containing the managed method
// vmMethodDesc - the MethodDesc of the managed method
// mdMethod - the MethodDef metadata token of the managed method
// pNativeStartAddress - the native start address of the jitted code
// pJittedInstanceEnCVersion - out parameter; the version number of the version
// corresponding to the specified native start address
// pLatestEnCVersion - out parameter; the version number of the latest version
//
// Assumptions:
// vmMethodDesc and mdMethod must match (see below).
//
// Notes:
// mdMethod is not strictly necessary, since we can always get that from vmMethodDesc.
// It is just a perf optimization since the caller has the metadata token around already.
//
// Today, there is no way to retrieve the EnC version number from the RS data structures.
// This primitive uses DAC to retrieve it from the LS data structures. This function may
// very well be ripped out in the future if we DACize this information, but the current
// thinking is that some of the RS data structures will remain, most likely in a reduced form.
//
void DacDbiInterfaceImpl::LookupEnCVersions(Module* pModule,
VMPTR_MethodDesc vmMethodDesc,
mdMethodDef mdMethod,
CORDB_ADDRESS pNativeStartAddress,
SIZE_T * pLatestEnCVersion,
SIZE_T * pJittedInstanceEnCVersion /* = NULL */)
{
MethodDesc * pMD = vmMethodDesc.GetDacPtr();
// make sure the vmMethodDesc and mdMethod match
_ASSERTE(pMD->GetMemberDef() == mdMethod);
_ASSERTE(pLatestEnCVersion != NULL);
// @dbgtodo inspection - once we do EnC, stop using DMIs.
// If the method wasn't EnCed, DMIs may not exist. And since this is DAC, we can't create them.
// We may not have the memory for the DebuggerMethodInfos in a minidump.
// When dump debugging EnC information isn't very useful so just fallback
// to default version.
DebuggerMethodInfo * pDMI = NULL;
DebuggerJitInfo * pDJI = NULL;
EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY
{
pDMI = g_pDebugger->GetOrCreateMethodInfo(pModule, mdMethod);
if (pDMI != NULL)
{
pDJI = pDMI->FindJitInfo(pMD, CORDB_ADDRESS_TO_TADDR(pNativeStartAddress));
}
}
EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY;
if (pDJI != NULL)
{
if (pJittedInstanceEnCVersion != NULL)
{
*pJittedInstanceEnCVersion = pDJI->m_encVersion;
}
*pLatestEnCVersion = pDMI->GetCurrentEnCVersion();
}
else
{
// If we have no DMI/DJI, then we must never have EnCed. So we can use default EnC info
// Several cases where we don't have a DMI/DJI:
// - LCG methods
// - method was never "touched" by debugger. (DJIs are created lazily).
if (pJittedInstanceEnCVersion != NULL)
{
*pJittedInstanceEnCVersion = CorDB_DEFAULT_ENC_FUNCTION_VERSION;
}
*pLatestEnCVersion = CorDB_DEFAULT_ENC_FUNCTION_VERSION;
}
}
// Get the address of the Debugger control block on the helper thread
// Arguments: none
// Return Value: The remote address of the Debugger control block allocated on the helper thread
// if it has been successfully allocated or NULL otherwise.
CORDB_ADDRESS DacDbiInterfaceImpl::GetDebuggerControlBlockAddress()
{
DD_ENTER_MAY_THROW;
if ((g_pDebugger != NULL) &&
(g_pDebugger->m_pRCThread != NULL))
{
return CORDB_ADDRESS(dac_cast<TADDR>(g_pDebugger->m_pRCThread->GetDCB()));
}
return NULL;
}
// DacDbi API: Get the context for a particular thread of the target process
void DacDbiInterfaceImpl::GetContext(VMPTR_Thread vmThread, DT_CONTEXT * pContextBuffer)
{
DD_ENTER_MAY_THROW
_ASSERTE(pContextBuffer != NULL);
Thread * pThread = vmThread.GetDacPtr();
// @dbgtodo Once the filter context is removed, then we should always
// start with the leaf CONTEXT.
DT_CONTEXT * pFilterContext = reinterpret_cast<DT_CONTEXT *>(pThread->GetFilterContext());
if (pFilterContext == NULL)
{
// If the filter context is NULL, then we use the true context of the thread.
pContextBuffer->ContextFlags = DT_CONTEXT_ALL;
HRESULT hr = m_pTarget->GetThreadContext(pThread->GetOSThreadId(),
pContextBuffer->ContextFlags,
sizeof(*pContextBuffer),
reinterpret_cast<BYTE *>(pContextBuffer));
if (hr == E_NOTIMPL)
{
// GetThreadContext is not implemented on this data target.
// That's why we have to make do with context we can obtain from Frames explicitly stored in Thread object.
// It suffices for managed debugging stackwalk.
REGDISPLAY tmpRd = {};
T_CONTEXT tmpContext = {};
FillRegDisplay(&tmpRd, &tmpContext);
// Going through thread Frames and looking for first (deepest one) one that
// that has context available for stackwalking (SP and PC)
// For example: RedirectedThreadFrame, InlinedCallFrame, HelperMethodFrame, ComPlusMethodFrame
Frame *frame = pThread->GetFrame();
while (frame != NULL && frame != FRAME_TOP)
{
frame->UpdateRegDisplay(&tmpRd);
if (GetRegdisplaySP(&tmpRd) != 0 && GetControlPC(&tmpRd) != 0)
{
UpdateContextFromRegDisp(&tmpRd, &tmpContext);
CopyMemory(pContextBuffer, &tmpContext, sizeof(*pContextBuffer));
pContextBuffer->ContextFlags = DT_CONTEXT_CONTROL;
return;
}
frame = frame->Next();
}
// It looks like this thread is not running managed code.
ZeroMemory(pContextBuffer, sizeof(*pContextBuffer));
}
else
{
IfFailThrow(hr);
}
}
else
{
*pContextBuffer = *pFilterContext;
}
} // DacDbiInterfaceImpl::GetContext
// Create a VMPTR_Object from a target object address
// @dbgtodo validate the VMPTR_Object is in fact a object, possibly by DACizing
// Object::Validate
VMPTR_Object DacDbiInterfaceImpl::GetObject(CORDB_ADDRESS ptr)
{
DD_ENTER_MAY_THROW;
VMPTR_Object vmObj = VMPTR_Object::NullPtr();
vmObj.SetDacTargetPtr(CORDB_ADDRESS_TO_TADDR(ptr));
return vmObj;
}
HRESULT DacDbiInterfaceImpl::EnableNGENPolicy(CorDebugNGENPolicy ePolicy)
{
return E_NOTIMPL;
}
HRESULT DacDbiInterfaceImpl::SetNGENCompilerFlags(DWORD dwFlags)
{
DD_ENTER_MAY_THROW;
return CORDBG_E_NGEN_NOT_SUPPORTED;
}
HRESULT DacDbiInterfaceImpl::GetNGENCompilerFlags(DWORD *pdwFlags)
{
DD_ENTER_MAY_THROW;
return CORDBG_E_NGEN_NOT_SUPPORTED;
}
typedef DPTR(OBJECTREF) PTR_ObjectRef;
// Create a VMPTR_Object from an address which points to a reference to an object
// @dbgtodo validate the VMPTR_Object is in fact a object, possibly by DACizing
// Object::Validate
VMPTR_Object DacDbiInterfaceImpl::GetObjectFromRefPtr(CORDB_ADDRESS ptr)
{
DD_ENTER_MAY_THROW;
VMPTR_Object vmObj = VMPTR_Object::NullPtr();
PTR_ObjectRef objRef = PTR_ObjectRef(CORDB_ADDRESS_TO_TADDR(ptr));
vmObj.SetDacTargetPtr(PTR_TO_TADDR(*objRef));
return vmObj;
}
// Create a VMPTR_OBJECTHANDLE from a handle
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetVmObjectHandle(CORDB_ADDRESS handleAddress)
{
DD_ENTER_MAY_THROW;
VMPTR_OBJECTHANDLE vmObjHandle = VMPTR_OBJECTHANDLE::NullPtr();
vmObjHandle.SetDacTargetPtr(CORDB_ADDRESS_TO_TADDR(handleAddress));
return vmObjHandle;
}
// Validate that the VMPTR_OBJECTHANDLE refers to a legitimate managed object
BOOL DacDbiInterfaceImpl::IsVmObjectHandleValid(VMPTR_OBJECTHANDLE vmHandle)
{
DD_ENTER_MAY_THROW;
BOOL ret = FALSE;
// this may cause unallocated debuggee memory to be read
// SEH exceptions will be caught
EX_TRY
{
OBJECTREF objRef = ObjectFromHandle((OBJECTHANDLE)vmHandle.GetDacPtr());
// NULL is certainly valid...
if (objRef != NULL)
{
if (objRef->ValidateObjectWithPossibleAV())
{
ret = TRUE;
}
}
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
return ret;
}
// determines if the specified module is a WinRT module
HRESULT DacDbiInterfaceImpl::IsWinRTModule(VMPTR_Module vmModule, BOOL& isWinRT)
{
DD_ENTER_MAY_THROW;
HRESULT hr = S_OK;
isWinRT = FALSE;
return hr;
}
// Determines the app domain id for the object refered to by a given VMPTR_OBJECTHANDLE
ULONG DacDbiInterfaceImpl::GetAppDomainIdFromVmObjectHandle(VMPTR_OBJECTHANDLE vmHandle)
{
DD_ENTER_MAY_THROW;
return DefaultADID;
}
// Get the target address from a VMPTR_OBJECTHANDLE, i.e., the handle address
CORDB_ADDRESS DacDbiInterfaceImpl::GetHandleAddressFromVmHandle(VMPTR_OBJECTHANDLE vmHandle)
{
DD_ENTER_MAY_THROW;
CORDB_ADDRESS handle = vmHandle.GetDacPtr();
return handle;
}
// Create a TargetBuffer which describes the location of the object
TargetBuffer DacDbiInterfaceImpl::GetObjectContents(VMPTR_Object vmObj)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = vmObj.GetDacPtr();
_ASSERTE(objPtr->GetSize() <= 0xffffffff);
return TargetBuffer(PTR_TO_TADDR(objPtr), (ULONG)objPtr->GetSize());
}
// ============================================================================
// functions to get information about objects referenced via an instance of CordbReferenceValue or
// CordbHandleValue
// ============================================================================
// DacDbiInterfaceImpl::FastSanityCheckObject
// Helper function for CheckRef. Sanity check an object.
// We use a fast and easy check to improve confidence that objPtr points to a valid object.
// We can't tell cheaply if this is really a valid object (that would require walking the GC heap), but at
// least we can check if we get an EEClass from the supposed method table and then get the method table from
// the class. If we can, we have improved the probability that the object is valid.
// Arguments:
// input: objPtr - address of the object we are checking
// Return Value: E_INVALIDARG or S_OK.
HRESULT DacDbiInterfaceImpl::FastSanityCheckObject(PTR_Object objPtr)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
HRESULT hr = S_OK;
EX_TRY
{
// NULL is certainly valid...
if (objPtr != NULL)
{
if (!objPtr->ValidateObjectWithPossibleAV())
{
LOG((LF_CORDB, LL_INFO10000, "GOI: object methodtable-class invariant doesn't hold.\n"));
hr = E_INVALIDARG;
}
}
}
EX_CATCH
{
LOG((LF_CORDB, LL_INFO10000, "GOI: exception indicated ref is bad.\n"));
hr = E_INVALIDARG;
}
EX_END_CATCH(SwallowAllExceptions);
return hr;
} // DacDbiInterfaceImpl::FastSanityCheckObject
// Perform a sanity check on an object address to determine if this _could be_ a valid object.
// We can't tell this for certain without walking the GC heap, but we do some fast tests to rule
// out clearly invalid object addresses. See code:DacDbiInterfaceImpl::FastSanityCheckObject for more
// details.
// Arguments:
// input: objPtr - address of the object we are checking
// Return Value:
// objRefBad - true iff we have determined the address cannot be pointing to a valid object.
// Note that a value of false doesn't necessarily guarantee the object is really
// valid
bool DacDbiInterfaceImpl::CheckRef(PTR_Object objPtr)
{
bool objRefBad = false;
// Shortcut null references now...
if (objPtr == NULL)
{
LOG((LF_CORDB, LL_INFO10000, "D::GOI: ref is NULL.\n"));
objRefBad = true;
}
else
{
// Try to verify the integrity of the object. This is not fool proof.
// @todo - this whole idea of expecting AVs is broken, but it does rule
// out a fair bit of rubbish. Find another
// way to test if the object is valid?
if (FAILED(FastSanityCheckObject(objPtr)))
{
LOG((LF_CORDB, LL_INFO10000, "D::GOI: address is not a valid object.\n"));
objRefBad = true;
}
}
return objRefBad;
} // DacDbiInterfaceImpl::CheckRef
// DacDbiInterfaceImpl::InitObjectData
// Initialize basic object information: type handle, object size, offset to fields and expanded type
// information.
// Arguments:
// input: objPtr - address of object of interest
// vmAppDomain - AppDomain for the type f the object
// output: pObjectData - object information
// Note: It is assumed that pObjectData is non-null.
void DacDbiInterfaceImpl::InitObjectData(PTR_Object objPtr,
VMPTR_AppDomain vmAppDomain,
DebuggerIPCE_ObjectData * pObjectData)
{
_ASSERTE(pObjectData != NULL);
// @todo - this is still dangerous because the object may still be invalid.
VMPTR_TypeHandle vmTypeHandle = VMPTR_TypeHandle::NullPtr();
vmTypeHandle.SetDacTargetPtr(objPtr->GetGCSafeTypeHandle().AsTAddr());
// Save basic object info.
pObjectData->objSize = objPtr->GetSize();
pObjectData->objOffsetToVars = dac_cast<TADDR>((objPtr)->GetData()) - dac_cast<TADDR>(objPtr);
TypeHandleToExpandedTypeInfo(AllBoxed, vmAppDomain, vmTypeHandle, &(pObjectData->objTypeData));
// If this is a string object, set the type to ELEMENT_TYPE_STRING.
if (objPtr->GetGCSafeMethodTable() == g_pStringClass)
{
pObjectData->objTypeData.elementType = ELEMENT_TYPE_STRING;
if(pObjectData->objSize < MIN_OBJECT_SIZE)
{
pObjectData->objSize = PtrAlign(pObjectData->objSize);
}
}
} // DacDbiInterfaceImpl::InitObjectData
// DAC/DBI API
// Get object information for a TypedByRef object (System.TypedReference).
// These are objects that contain a managed pointer to a location and the type of the value at that location.
// They are most commonly used for varargs but also may be used for parameters and locals. They are
// stack-allocated. They provide a means for adding dynamic type information to a value type, whereas boxing
// provides only static type information. This means they can be passed as reference parameters to
// polymorphic methods that don't statically restrict the type of arguments they can receive.
// Although they are represented simply as an address, unlike other object references, they don't point
// directly to the object. Instead, there is an extra level of indirection. The reference points to a struct
// that contains the address of the object, so we need to treat them differently. They have their own
// CorElementType (ELEMENT_TYPE_TYPEDBYREF) which makes it possible to identify this special case.
// Example:
// static int AddABunchOfInts (__arglist)
// {
// int result = 0;
//
// System.ArgIterator iter = new System.ArgIterator (__arglist);
// int argCount = iter.GetRemainingCount();
//
// for (int i = 0; i < argCount; i++)
// {
// System.TypedReference typedRef = iter.GetNextArg();
// result += (int)TypedReference.ToObject(typedRef);
// }
//
// return result;
// }
//
// static int Main (string[] args)
// {
// int result = AddABunchOfInts (__arglist (2, 3, 4));
// Console.WriteLine ("Answer: {0}", result);
//
// if (result != 9)
// return 1;
//
// return 0;
// }
// Initializes the objRef and typedByRefType fields of pObjectData (type info for the referent).
void DacDbiInterfaceImpl::GetTypedByRefInfo(CORDB_ADDRESS pTypedByRef,
VMPTR_AppDomain vmAppDomain,
DebuggerIPCE_ObjectData * pObjectData)
{
DD_ENTER_MAY_THROW;
// pTypedByRef is really the address of a TypedByRef struct rather than of a normal object.
// The data field of the TypedByRef struct is the actual object ref.
PTR_TypedByRef refAddr = PTR_TypedByRef(TADDR(pTypedByRef));
_ASSERTE(refAddr != NULL);
_ASSERTE(pObjectData != NULL);
// The type of the referent is in the type field of the TypedByRef. We need to initialize the object
// data type information.
TypeHandleToBasicTypeInfo(refAddr->type,
&(pObjectData->typedByrefInfo.typedByrefType),
vmAppDomain.GetDacPtr());
// The reference to the object is in the data field of the TypedByRef.
CORDB_ADDRESS tempRef = dac_cast<TADDR>(refAddr->data);
pObjectData->objRef = CORDB_ADDRESS_TO_PTR(tempRef);
LOG((LF_CORDB, LL_INFO10000, "D::GASOI: sending REFANY result: "
"ref=0x%08x, cls=0x%08x, mod=0x%p\n",
pObjectData->objRef,
pObjectData->typedByrefType.metadataToken,
pObjectData->typedByrefType.vmDomainAssembly.GetDacPtr()));
} // DacDbiInterfaceImpl::GetTypedByRefInfo
// Get the string data associated withn obj and put it into the pointers
// DAC/DBI API
// Get the string length and offset to string base for a string object
void DacDbiInterfaceImpl::GetStringData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = PTR_Object(TADDR(objectAddress));
LOG((LF_CORDB, LL_INFO10000, "D::GOI: The referent is a string.\n"));
if (objPtr->GetGCSafeMethodTable() != g_pStringClass)
{
ThrowHR(CORDBG_E_TARGET_INCONSISTENT);
}
PTR_StringObject pStrObj = dac_cast<PTR_StringObject>(objPtr);
_ASSERTE(pStrObj != NULL);
pObjectData->stringInfo.length = pStrObj->GetStringLength();
pObjectData->stringInfo.offsetToStringBase = (UINT_PTR) pStrObj->GetBufferOffset();
} // DacDbiInterfaceImpl::GetStringData
// DAC/DBI API
// Get information for an array type referent of an objRef, including rank, upper and lower
// bounds, element size and type, and the number of elements.
void DacDbiInterfaceImpl::GetArrayData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = PTR_Object(TADDR(objectAddress));
PTR_MethodTable pMT = objPtr->GetGCSafeMethodTable();
if (!objPtr->GetGCSafeTypeHandle().IsArray())
{
LOG((LF_CORDB, LL_INFO10000,
"D::GASOI: object should be an array.\n"));
pObjectData->objRefBad = true;
}
else
{
PTR_ArrayBase arrPtr = dac_cast<PTR_ArrayBase>(objPtr);
// this is also returned in the type information for the array - we return both for sanity checking...
pObjectData->arrayInfo.rank = arrPtr->GetRank();
pObjectData->arrayInfo.componentCount = arrPtr->GetNumComponents();
pObjectData->arrayInfo.offsetToArrayBase = arrPtr->GetDataPtrOffset(pMT);
if (arrPtr->IsMultiDimArray())
{
pObjectData->arrayInfo.offsetToUpperBounds = SIZE_T(arrPtr->GetBoundsOffset(pMT));
pObjectData->arrayInfo.offsetToLowerBounds = SIZE_T(arrPtr->GetLowerBoundsOffset(pMT));
}
else
{
pObjectData->arrayInfo.offsetToUpperBounds = 0;
pObjectData->arrayInfo.offsetToLowerBounds = 0;
}
pObjectData->arrayInfo.elementSize = arrPtr->GetComponentSize();
LOG((LF_CORDB, LL_INFO10000, "D::GOI: array info: "
"baseOff=%d, lowerOff=%d, upperOff=%d, cnt=%d, rank=%d, rank (2) = %d,"
"eleSize=%d, eleType=0x%02x\n",
pObjectData->arrayInfo.offsetToArrayBase,
pObjectData->arrayInfo.offsetToLowerBounds,
pObjectData->arrayInfo.offsetToUpperBounds,
pObjectData->arrayInfo.componentCount,
pObjectData->arrayInfo.rank,
pObjectData->objTypeData.ArrayTypeData.arrayRank,
pObjectData->arrayInfo.elementSize,
pObjectData->objTypeData.ArrayTypeData.arrayTypeArg.elementType));
}
} // DacDbiInterfaceImpl::GetArrayData
// DAC/DBI API: Get information about an object for which we have a reference, including the object size and
// type information.
void DacDbiInterfaceImpl::GetBasicObjectInfo(CORDB_ADDRESS objectAddress,
CorElementType type,
VMPTR_AppDomain vmAppDomain,
DebuggerIPCE_ObjectData * pObjectData)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = PTR_Object(TADDR(objectAddress));
pObjectData->objRefBad = CheckRef(objPtr);
if (pObjectData->objRefBad != true)
{
// initialize object type, size, offset information. Note: We may have a different element type
// after this. For example, we may start with E_T_CLASS but return with something more specific.
InitObjectData (objPtr, vmAppDomain, pObjectData);
}
} // DacDbiInterfaceImpl::GetBasicObjectInfo
// This is the data passed to EnumerateBlockingObjectsCallback below
struct BlockingObjectUserDataWrapper
{
CALLBACK_DATA pUserData;
IDacDbiInterface::FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK fpCallback;
};
// The callback helper used by EnumerateBlockingObjects below, this
// callback in turn invokes the user's callback with the right arguments
void EnumerateBlockingObjectsCallback(PTR_DebugBlockingItem obj, VOID* pUserData)
{
BlockingObjectUserDataWrapper* wrapper = (BlockingObjectUserDataWrapper*)pUserData;
DacBlockingObject dacObj;
// init to an arbitrary value to avoid mac compiler error about unintialized use
// it will be correctly set in the switch and is never used with only this init here
dacObj.blockingReason = DacBlockReason_MonitorCriticalSection;
dacObj.vmBlockingObject.SetDacTargetPtr(dac_cast<TADDR>(OBJECTREFToObject(obj->pMonitor->GetOwningObject())));
dacObj.dwTimeout = obj->dwTimeout;
dacObj.vmAppDomain.SetDacTargetPtr(dac_cast<TADDR>(obj->pAppDomain));
switch(obj->type)
{
case DebugBlock_MonitorCriticalSection:
dacObj.blockingReason = DacBlockReason_MonitorCriticalSection;
break;
case DebugBlock_MonitorEvent:
dacObj.blockingReason = DacBlockReason_MonitorEvent;
break;
default:
_ASSERTE(!"obj->type has an invalid value");
return;
}
wrapper->fpCallback(dacObj, wrapper->pUserData);
}
// DAC/DBI API:
// Enumerate all monitors blocking a thread
void DacDbiInterfaceImpl::EnumerateBlockingObjects(VMPTR_Thread vmThread,
FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK fpCallback,
CALLBACK_DATA pUserData)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
_ASSERTE(pThread != NULL);
BlockingObjectUserDataWrapper wrapper;
wrapper.fpCallback = fpCallback;
wrapper.pUserData = pUserData;
pThread->DebugBlockingInfo.VisitBlockingItems((DebugBlockingItemVisitor)EnumerateBlockingObjectsCallback,
(VOID*)&wrapper);
}
// DAC/DBI API:
// Returns the thread which owns the monitor lock on an object and the acquisition count
MonitorLockInfo DacDbiInterfaceImpl::GetThreadOwningMonitorLock(VMPTR_Object vmObject)
{
DD_ENTER_MAY_THROW;
MonitorLockInfo info;
info.lockOwner = VMPTR_Thread::NullPtr();
info.acquisitionCount = 0;
Object* pObj = vmObject.GetDacPtr();
DWORD threadId;
DWORD acquisitionCount;
if(!pObj->GetHeader()->GetThreadOwningMonitorLock(&threadId, &acquisitionCount))
{
return info;
}
Thread *pThread = ThreadStore::GetThreadList(NULL);
while (pThread != NULL)
{
if(pThread->GetThreadId() == threadId)
{
info.lockOwner.SetDacTargetPtr(PTR_HOST_TO_TADDR(pThread));
info.acquisitionCount = acquisitionCount;
return info;
}
pThread = ThreadStore::GetThreadList(pThread);
}
_ASSERTE(!"A thread should have been found");
return info;
}
// The data passed to EnumerateThreadsCallback below
struct ThreadUserDataWrapper
{
CALLBACK_DATA pUserData;
IDacDbiInterface::FP_THREAD_ENUMERATION_CALLBACK fpCallback;
};
// The callback helper used for EnumerateMonitorEventWaitList below. This callback
// invokes the user's callback with the correct arguments.
void EnumerateThreadsCallback(PTR_Thread pThread, VOID* pUserData)
{
ThreadUserDataWrapper* wrapper = (ThreadUserDataWrapper*)pUserData;
VMPTR_Thread vmThread = VMPTR_Thread::NullPtr();
vmThread.SetDacTargetPtr(dac_cast<TADDR>(pThread));
wrapper->fpCallback(vmThread, wrapper->pUserData);
}
// DAC/DBI API:
// Enumerate all threads waiting on the monitor event for an object
void DacDbiInterfaceImpl::EnumerateMonitorEventWaitList(VMPTR_Object vmObject,
FP_THREAD_ENUMERATION_CALLBACK fpCallback,
CALLBACK_DATA pUserData)
{
DD_ENTER_MAY_THROW;
Object* pObj = vmObject.GetDacPtr();
SyncBlock* psb = pObj->PassiveGetSyncBlock();
// no sync block means no wait list
if(psb == NULL)
return;
ThreadUserDataWrapper wrapper;
wrapper.fpCallback = fpCallback;
wrapper.pUserData = pUserData;
ThreadQueue::EnumerateThreads(psb, (FP_TQ_THREAD_ENUMERATION_CALLBACK)EnumerateThreadsCallback, (VOID*) &wrapper);
}
bool DacDbiInterfaceImpl::AreGCStructuresValid()
{
return true;
}
HeapData::HeapData()
: YoungestGenPtr(0), YoungestGenLimit(0), Gen0Start(0), Gen0End(0), SegmentCount(0), Segments(0)
{
}
HeapData::~HeapData()
{
if (Segments)
delete [] Segments;
}
LinearReadCache::LinearReadCache()
: mCurrPageStart(0), mPageSize(0), mCurrPageSize(0), mPage(0)
{
SYSTEM_INFO si;
GetSystemInfo(&si);
mPageSize = si.dwPageSize;
mPage = new (nothrow) BYTE[mPageSize];
}
LinearReadCache::~LinearReadCache()
{
if (mPage)
delete [] mPage;
}
bool LinearReadCache::MoveToPage(CORDB_ADDRESS addr)
{
mCurrPageStart = addr - (addr % mPageSize);
HRESULT hr = g_dacImpl->m_pTarget->ReadVirtual(mCurrPageStart, mPage, mPageSize, &mCurrPageSize);
if (hr != S_OK)
{
mCurrPageStart = 0;
mCurrPageSize = 0;
return false;
}
return true;
}
CORDB_ADDRESS DacHeapWalker::HeapStart = 0;
CORDB_ADDRESS DacHeapWalker::HeapEnd = ~0;
DacHeapWalker::DacHeapWalker()
: mThreadCount(0), mAllocInfo(0), mHeapCount(0), mHeaps(0),
mCurrObj(0), mCurrSize(0), mCurrMT(0),
mCurrHeap(0), mCurrSeg(0), mStart((TADDR)HeapStart), mEnd((TADDR)HeapEnd)
{
}
DacHeapWalker::~DacHeapWalker()
{
if (mAllocInfo)
delete [] mAllocInfo;
if (mHeaps)
delete [] mHeaps;
}
SegmentData *DacHeapWalker::FindSegment(CORDB_ADDRESS obj)
{
for (size_t i = 0; i < mHeapCount; ++i)
for (size_t j = 0; j < mHeaps[i].SegmentCount; ++j)
if (mHeaps[i].Segments[j].Start <= obj && obj <= mHeaps[i].Segments[j].End)
return &mHeaps[i].Segments[j];
return NULL;
}
HRESULT DacHeapWalker::Next(CORDB_ADDRESS *pValue, CORDB_ADDRESS *pMT, ULONG64 *pSize)
{
if (!HasMoreObjects())
return E_FAIL;
if (pValue)
*pValue = mCurrObj;
if (pMT)
*pMT = (CORDB_ADDRESS)mCurrMT;
if (pSize)
*pSize = (ULONG64)mCurrSize;
HRESULT hr = MoveToNextObject();
return FAILED(hr) ? hr : S_OK;
}
HRESULT DacHeapWalker::MoveToNextObject()
{
do
{
// Move to the next object
mCurrObj += mCurrSize;
// Check to see if we are in the correct bounds.
bool isGen0 = IsRegionGCEnabled() ? (mHeaps[mCurrHeap].Segments[mCurrSeg].Generation == 0) :
(mHeaps[mCurrHeap].Gen0Start <= mCurrObj && mHeaps[mCurrHeap].Gen0End > mCurrObj);
if (isGen0)
CheckAllocAndSegmentRange();
// Check to see if we've moved off the end of a segment
if (mCurrObj >= mHeaps[mCurrHeap].Segments[mCurrSeg].End || mCurrObj > mEnd)
{
HRESULT hr = NextSegment();
if (FAILED(hr) || hr == S_FALSE)
return hr;
}
// Get the method table pointer
if (!mCache.ReadMT(mCurrObj, &mCurrMT))
return E_FAIL;
if (!GetSize(mCurrMT, mCurrSize))
return E_FAIL;
} while (mCurrObj < mStart);
_ASSERTE(mStart <= mCurrObj && mCurrObj <= mEnd);
return S_OK;
}
bool DacHeapWalker::GetSize(TADDR tMT, size_t &size)
{
// With heap corruption, it's entierly possible that the MethodTable
// we get is bad. This could cause exceptions, which we will catch
// and return false. This causes the heapwalker to move to the next
// segment.
bool ret = true;
EX_TRY
{
MethodTable *mt = PTR_MethodTable(tMT);
size_t cs = mt->GetComponentSize();
if (cs)
{
DWORD tmp = 0;
if (mCache.Read(mCurrObj+sizeof(TADDR), &tmp))
cs *= tmp;
else
ret = false;
}
size = mt->GetBaseSize() + cs;
// The size is not guaranteed to be aligned, we have to
// do that ourself.
if (mHeaps[mCurrHeap].Segments[mCurrSeg].Generation == 3
|| mHeaps[mCurrHeap].Segments[mCurrSeg].Generation == 4)
size = AlignLarge(size);
else
size = Align(size);
}
EX_CATCH
{
ret = false;
}
EX_END_CATCH(SwallowAllExceptions)
return ret;
}
HRESULT DacHeapWalker::NextSegment()
{
mCurrObj = 0;
mCurrMT = 0;
mCurrSize = 0;
do
{
do
{
mCurrSeg++;
while (mCurrSeg >= mHeaps[mCurrHeap].SegmentCount)
{
mCurrSeg = 0;
mCurrHeap++;
if (mCurrHeap >= mHeapCount)
{
return S_FALSE;
}
}
} while (mHeaps[mCurrHeap].Segments[mCurrSeg].Start >= mHeaps[mCurrHeap].Segments[mCurrSeg].End);
mCurrObj = mHeaps[mCurrHeap].Segments[mCurrSeg].Start;
bool isGen0 = IsRegionGCEnabled() ? (mHeaps[mCurrHeap].Segments[mCurrSeg].Generation == 0) :
(mHeaps[mCurrHeap].Gen0Start <= mCurrObj && mHeaps[mCurrHeap].Gen0End > mCurrObj);
if (isGen0)
CheckAllocAndSegmentRange();
if (!mCache.ReadMT(mCurrObj, &mCurrMT))
{
return E_FAIL;
}
if (!GetSize(mCurrMT, mCurrSize))
{
return E_FAIL;
}
} while((mHeaps[mCurrHeap].Segments[mCurrSeg].Start > mEnd) || (mHeaps[mCurrHeap].Segments[mCurrSeg].End < mStart));
return S_OK;
}
void DacHeapWalker::CheckAllocAndSegmentRange()
{
const size_t MinObjSize = sizeof(TADDR)*3;
for (int i = 0; i < mThreadCount; ++i)
if (mCurrObj == mAllocInfo[i].Ptr)
{
mCurrObj = mAllocInfo[i].Limit + Align(MinObjSize);
break;
}
if (mCurrObj == mHeaps[mCurrHeap].YoungestGenPtr)
{
mCurrObj = mHeaps[mCurrHeap].YoungestGenLimit + Align(MinObjSize);
}
}
HRESULT DacHeapWalker::Init(CORDB_ADDRESS start, CORDB_ADDRESS end)
{
// Collect information about the allocation contexts in the process.
ThreadStore* threadStore = ThreadStore::s_pThreadStore;
if (threadStore != NULL)
{
int count = (int)threadStore->ThreadCountInEE();
mAllocInfo = new (nothrow) AllocInfo[count];
if (mAllocInfo == NULL)
return E_OUTOFMEMORY;
Thread *thread = NULL;
int j = 0;
for (int i = 0; i < count; ++i)
{
// The thread or allocation context being null is troubling, but not fatal.
// We may have stopped the process where the thread list or thread's alloc
// context was in an inconsistent state. We will simply skip over affected
// segments during the heap walk if we encounter problems due to this.
thread = ThreadStore::GetThreadList(thread);
if (thread == NULL)
continue;
gc_alloc_context *ctx = thread->GetAllocContext();
if (ctx == NULL)
continue;
if ((CORDB_ADDRESS)ctx->alloc_ptr != NULL)
{
mAllocInfo[j].Ptr = (CORDB_ADDRESS)ctx->alloc_ptr;
mAllocInfo[j].Limit = (CORDB_ADDRESS)ctx->alloc_limit;
j++;
}
}
mThreadCount = j;
}
#ifdef FEATURE_SVR_GC
HRESULT hr = GCHeapUtilities::IsServerHeap() ? InitHeapDataSvr(mHeaps, mHeapCount) : InitHeapDataWks(mHeaps, mHeapCount);
#else
HRESULT hr = InitHeapDataWks(mHeaps, mHeapCount);
#endif
// Set up mCurrObj/mCurrMT.
if (SUCCEEDED(hr))
hr = Reset(start, end);
// Collect information about GC heaps
return hr;
}
HRESULT DacHeapWalker::Reset(CORDB_ADDRESS start, CORDB_ADDRESS end)
{
_ASSERTE(mHeaps);
_ASSERTE(mHeapCount > 0);
_ASSERTE(mHeaps[0].Segments);
_ASSERTE(mHeaps[0].SegmentCount > 0);
mStart = start;
mEnd = end;
// Set up first object
mCurrObj = mHeaps[0].Segments[0].Start;
mCurrMT = 0;
mCurrSize = 0;
mCurrHeap = 0;
mCurrSeg = 0;
HRESULT hr = S_OK;
// it's possible the first segment is empty
if (mCurrObj >= mHeaps[0].Segments[0].End)
hr = MoveToNextObject();
if (!mCache.ReadMT(mCurrObj, &mCurrMT))
return E_FAIL;
if (!GetSize(mCurrMT, mCurrSize))
return E_FAIL;
if (mCurrObj < mStart || mCurrObj > mEnd)
hr = MoveToNextObject();
return hr;
}
HRESULT DacHeapWalker::ListNearObjects(CORDB_ADDRESS obj, CORDB_ADDRESS *pPrev, CORDB_ADDRESS *pContaining, CORDB_ADDRESS *pNext)
{
SegmentData *seg = FindSegment(obj);
if (seg == NULL)
return E_FAIL;
HRESULT hr = Reset(seg->Start, seg->End);
if (SUCCEEDED(hr))
{
CORDB_ADDRESS prev = 0;
CORDB_ADDRESS curr = 0;
ULONG64 size = 0;
bool found = false;
while (!found && HasMoreObjects())
{
prev = curr;
hr = Next(&curr, NULL, &size);
if (FAILED(hr))
break;
if (obj >= curr && obj < curr + size)
found = true;
}
if (found)
{
if (pPrev)
*pPrev = prev;
if (pContaining)
*pContaining = curr;
if (pNext)
{
if (HasMoreObjects())
{
hr = Next(&curr, NULL, NULL);
if (SUCCEEDED(hr))
*pNext = curr;
}
else
{
*pNext = 0;
}
}
hr = S_OK;
}
else if (SUCCEEDED(hr))
{
hr = E_FAIL;
}
}
return hr;
}
HRESULT DacHeapWalker::InitHeapDataWks(HeapData *&pHeaps, size_t &pCount)
{
bool regions = IsRegionGCEnabled();
// Scrape basic heap details
pCount = 1;
pHeaps = new (nothrow) HeapData[1];
if (pHeaps == NULL)
return E_OUTOFMEMORY;
dac_generation gen0 = GenerationTableIndex(g_gcDacGlobals->generation_table, 0);
dac_generation gen1 = GenerationTableIndex(g_gcDacGlobals->generation_table, 1);
dac_generation gen2 = GenerationTableIndex(g_gcDacGlobals->generation_table, 2);
dac_generation loh = GenerationTableIndex(g_gcDacGlobals->generation_table, 3);
dac_generation poh = GenerationTableIndex(g_gcDacGlobals->generation_table, 4);
pHeaps[0].YoungestGenPtr = (CORDB_ADDRESS)gen0.allocation_context.alloc_ptr;
pHeaps[0].YoungestGenLimit = (CORDB_ADDRESS)gen0.allocation_context.alloc_limit;
if (!regions)
{
pHeaps[0].Gen0Start = (CORDB_ADDRESS)gen0.allocation_start;
pHeaps[0].Gen0End = (CORDB_ADDRESS)*g_gcDacGlobals->alloc_allocated;
pHeaps[0].Gen1Start = (CORDB_ADDRESS)gen1.allocation_start;
}
// Segments
int count = GetSegmentCount(loh.start_segment);
count += GetSegmentCount(poh.start_segment);
count += GetSegmentCount(gen2.start_segment);
if (regions)
{
count += GetSegmentCount(gen1.start_segment);
count += GetSegmentCount(gen0.start_segment);
}
pHeaps[0].SegmentCount = count;
pHeaps[0].Segments = new (nothrow) SegmentData[count];
if (pHeaps[0].Segments == NULL)
return E_OUTOFMEMORY;
DPTR(dac_heap_segment) seg;
int i = 0;
// Small object heap segments
if (regions)
{
seg = gen2.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Generation = 2;
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
seg = seg->next;
}
seg = gen1.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Generation = 1;
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
seg = seg->next;
}
seg = gen0.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
if (seg.GetAddr() == (TADDR)*g_gcDacGlobals->ephemeral_heap_segment)
{
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)*g_gcDacGlobals->alloc_allocated;
pHeaps[0].EphemeralSegment = i;
}
else
{
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
}
pHeaps[0].Segments[i].Generation = 0;
seg = seg->next;
}
}
else
{
DPTR(dac_heap_segment) seg = gen2.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
if (seg.GetAddr() == (TADDR)*g_gcDacGlobals->ephemeral_heap_segment)
{
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)*g_gcDacGlobals->alloc_allocated;
pHeaps[0].Segments[i].Generation = 1;
pHeaps[0].EphemeralSegment = i;
}
else
{
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
pHeaps[0].Segments[i].Generation = 2;
}
seg = seg->next;
}
}
// Large object heap segments
seg = loh.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Generation = 3;
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
seg = seg->next;
}
// Pinned object heap segments
seg = poh.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Generation = 4;
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
seg = seg->next;
}
_ASSERTE(count == i);
return S_OK;
}
HRESULT DacDbiInterfaceImpl::CreateHeapWalk(IDacDbiInterface::HeapWalkHandle *pHandle)
{
DD_ENTER_MAY_THROW;
DacHeapWalker *data = new (nothrow) DacHeapWalker;
if (data == NULL)
return E_OUTOFMEMORY;
HRESULT hr = data->Init();
if (SUCCEEDED(hr))
*pHandle = reinterpret_cast<HeapWalkHandle>(data);
else
delete data;
return hr;
}
void DacDbiInterfaceImpl::DeleteHeapWalk(HeapWalkHandle handle)
{
DD_ENTER_MAY_THROW;
DacHeapWalker *data = reinterpret_cast<DacHeapWalker*>(handle);
if (data)
delete data;
}
HRESULT DacDbiInterfaceImpl::WalkHeap(HeapWalkHandle handle,
ULONG count,
OUT COR_HEAPOBJECT * objects,
OUT ULONG *fetched)
{
DD_ENTER_MAY_THROW;
if (fetched == NULL)
return E_INVALIDARG;
DacHeapWalker *walk = reinterpret_cast<DacHeapWalker*>(handle);
*fetched = 0;
if (!walk->HasMoreObjects())
return S_FALSE;
CORDB_ADDRESS freeMT = (CORDB_ADDRESS)g_pFreeObjectMethodTable.GetAddr();
HRESULT hr = S_OK;
CORDB_ADDRESS addr, mt;
ULONG64 size;
ULONG i = 0;
while (i < count && walk->HasMoreObjects())
{
hr = walk->Next(&addr, &mt, &size);
if (FAILED(hr))
break;
if (mt != freeMT)
{
objects[i].address = addr;
objects[i].type.token1 = mt;
objects[i].type.token2 = NULL;
objects[i].size = size;
i++;
}
}
if (SUCCEEDED(hr))
hr = (i < count) ? S_FALSE : S_OK;
*fetched = i;
return hr;
}
HRESULT DacDbiInterfaceImpl::GetHeapSegments(OUT DacDbiArrayList<COR_SEGMENT> *pSegments)
{
DD_ENTER_MAY_THROW;
size_t heapCount = 0;
HeapData *heaps = 0;
bool region = IsRegionGCEnabled();
#ifdef FEATURE_SVR_GC
HRESULT hr = GCHeapUtilities::IsServerHeap() ? DacHeapWalker::InitHeapDataSvr(heaps, heapCount) : DacHeapWalker::InitHeapDataWks(heaps, heapCount);
#else
HRESULT hr = DacHeapWalker::InitHeapDataWks(heaps, heapCount);
#endif
NewArrayHolder<HeapData> _heapHolder = heaps;
// Count the number of segments to know how much to allocate.
int total = 0;
for (size_t i = 0; i < heapCount; ++i)
{
total += (int)heaps[i].SegmentCount;
if (!region)
{
// SegmentCount is +1 due to the ephemeral segment containing more than one
// generation (Gen1 + Gen0, and sometimes part of Gen2).
total++;
// It's possible that part of Gen2 lives on the ephemeral segment. If so,
// we need to add one more to the output.
const size_t eph = heaps[i].EphemeralSegment;
_ASSERTE(eph < heaps[i].SegmentCount);
if (heaps[i].Segments[eph].Start != heaps[i].Gen1Start)
total++;
}
}
pSegments->Alloc(total);
// Now walk all segments and write them to the array.
int curr = 0;
for (size_t i = 0; i < heapCount; ++i)
{
_ASSERTE(curr < total);
if (!region)
{
// Generation 0 is not in the segment list.
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Gen0Start;
seg.end = heaps[i].Gen0End;
seg.type = CorDebug_Gen0;
seg.heap = (ULONG)i;
}
for (size_t j = 0; j < heaps[i].SegmentCount; ++j)
{
if (region)
{
_ASSERTE(curr < total);
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Segments[j].Start;
seg.end = heaps[i].Segments[j].End;
seg.type = (CorDebugGenerationTypes)heaps[i].Segments[j].Generation;
seg.heap = (ULONG)i;
}
else if (heaps[i].Segments[j].Generation == 1)
{
// This is the ephemeral segment. We have already written Gen0,
// now write Gen1.
_ASSERTE(heaps[i].Segments[j].Start <= heaps[i].Gen1Start);
_ASSERTE(heaps[i].Segments[j].End > heaps[i].Gen1Start);
{
_ASSERTE(curr < total);
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Gen1Start;
seg.end = heaps[i].Gen0Start;
seg.type = CorDebug_Gen1;
seg.heap = (ULONG)i;
}
// It's possible for Gen2 to take up a portion of the ephemeral segment.
// We test for that here.
if (heaps[i].Segments[j].Start != heaps[i].Gen1Start)
{
_ASSERTE(curr < total);
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Segments[j].Start;
seg.end = heaps[i].Gen1Start;
seg.type = CorDebug_Gen2;
seg.heap = (ULONG)i;
}
}
else
{
// Otherwise, we have a gen2 or gen3 (LOH) segment
_ASSERTE(curr < total);
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Segments[j].Start;
seg.end = heaps[i].Segments[j].End;
_ASSERTE(heaps[i].Segments[j].Generation <= CorDebug_POH);
seg.type = (CorDebugGenerationTypes)heaps[i].Segments[j].Generation;
seg.heap = (ULONG)i;
}
}
}
_ASSERTE(total == curr);
return hr;
}
bool DacDbiInterfaceImpl::IsValidObject(CORDB_ADDRESS addr)
{
DD_ENTER_MAY_THROW;
bool isValid = false;
if (addr != 0 && addr != (CORDB_ADDRESS)-1)
{
EX_TRY
{
PTR_Object obj(TO_TADDR(addr));
PTR_MethodTable mt = obj->GetMethodTable();
PTR_EEClass cls = mt->GetClass();
if (mt == cls->GetMethodTable())
isValid = true;
else if (!mt->IsCanonicalMethodTable())
isValid = cls->GetMethodTable()->GetClass() == cls;
}
EX_CATCH
{
isValid = false;
}
EX_END_CATCH(SwallowAllExceptions)
}
return isValid;
}
bool DacDbiInterfaceImpl::GetAppDomainForObject(CORDB_ADDRESS addr, OUT VMPTR_AppDomain * pAppDomain,
OUT VMPTR_Module *pModule, OUT VMPTR_DomainAssembly *pDomainAssembly)
{
DD_ENTER_MAY_THROW;
if (addr == 0 || addr == (CORDB_ADDRESS)-1)
{
return false;
}
PTR_Object obj(TO_TADDR(addr));
MethodTable *mt = obj->GetMethodTable();
PTR_Module module = mt->GetModule();
PTR_Assembly assembly = module->GetAssembly();
BaseDomain *baseDomain = assembly->GetDomain();
if (baseDomain->IsAppDomain())
{
pAppDomain->SetDacTargetPtr(PTR_HOST_TO_TADDR(baseDomain->AsAppDomain()));
pModule->SetDacTargetPtr(PTR_HOST_TO_TADDR(module));
pDomainAssembly->SetDacTargetPtr(PTR_HOST_TO_TADDR(module->GetDomainAssembly()));
}
else
{
return false;
}
return true;
}
HRESULT DacDbiInterfaceImpl::CreateRefWalk(OUT RefWalkHandle * pHandle, BOOL walkStacks, BOOL walkFQ, UINT32 handleWalkMask)
{
DD_ENTER_MAY_THROW;
DacRefWalker *walker = new (nothrow) DacRefWalker(this, walkStacks, walkFQ, handleWalkMask);
if (walker == NULL)
return E_OUTOFMEMORY;
HRESULT hr = walker->Init();
if (FAILED(hr))
{
delete walker;
}
else
{
*pHandle = reinterpret_cast<RefWalkHandle>(walker);
}
return hr;
}
void DacDbiInterfaceImpl::DeleteRefWalk(IN RefWalkHandle handle)
{
DD_ENTER_MAY_THROW;
DacRefWalker *walker = reinterpret_cast<DacRefWalker*>(handle);
if (walker)
delete walker;
}
HRESULT DacDbiInterfaceImpl::WalkRefs(RefWalkHandle handle, ULONG count, OUT DacGcReference * objects, OUT ULONG *pFetched)
{
if (objects == NULL || pFetched == NULL)
return E_POINTER;
DD_ENTER_MAY_THROW;
DacRefWalker *walker = reinterpret_cast<DacRefWalker*>(handle);
if (!walker)
return E_INVALIDARG;
return walker->Next(count, objects, pFetched);
}
HRESULT DacDbiInterfaceImpl::GetTypeID(CORDB_ADDRESS dbgObj, COR_TYPEID *pID)
{
DD_ENTER_MAY_THROW;
TADDR obj[3];
ULONG32 read = 0;
HRESULT hr = g_dacImpl->m_pTarget->ReadVirtual(dbgObj, (BYTE*)obj, sizeof(obj), &read);
if (FAILED(hr))
return hr;
pID->token1 = (UINT64)(obj[0] & ~1);
pID->token2 = 0;
return hr;
}
HRESULT DacDbiInterfaceImpl::GetTypeIDForType(VMPTR_TypeHandle vmTypeHandle, COR_TYPEID *pID)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pID != NULL);
_ASSERTE(!vmTypeHandle.IsNull());
TypeHandle th = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
PTR_MethodTable pMT = th.GetMethodTable();
pID->token1 = pMT.GetAddr();
_ASSERTE(pID->token1 != 0);
pID->token2 = 0;
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetObjectFields(COR_TYPEID id, ULONG32 celt, COR_FIELD *layout, ULONG32 *pceltFetched)
{
if (layout == NULL || pceltFetched == NULL)
return E_POINTER;
if (id.token1 == 0)
return CORDBG_E_CLASS_NOT_LOADED;
DD_ENTER_MAY_THROW;
HRESULT hr = S_OK;
TypeHandle typeHandle = TypeHandle::FromPtr(TO_TADDR(id.token1));
if (typeHandle.IsTypeDesc())
return E_INVALIDARG;
ApproxFieldDescIterator fieldDescIterator(typeHandle.AsMethodTable(), ApproxFieldDescIterator::INSTANCE_FIELDS);
ULONG32 cFields = fieldDescIterator.Count();
// Handle case where user only wanted to know the number of fields.
if (layout == NULL)
{
*pceltFetched = cFields;
return S_FALSE;
}
if (celt < cFields)
{
cFields = celt;
// we are returning less than the total
hr = S_FALSE;
}
// This must be non-null due to check at beginning of function.
*pceltFetched = celt;
CorElementType componentType = typeHandle.AsMethodTable()->GetInternalCorElementType();
BOOL fReferenceType = CorTypeInfo::IsObjRef_NoThrow(componentType);
for (ULONG32 i = 0; i < cFields; ++i)
{
FieldDesc *pField = fieldDescIterator.Next();
COR_FIELD* corField = layout + i;
corField->token = pField->GetMemberDef();
corField->offset = (ULONG32)pField->GetOffset() + (fReferenceType ? Object::GetOffsetOfFirstField() : 0);
TypeHandle fieldHandle = pField->LookupFieldTypeHandle();
if (fieldHandle.IsNull())
{
corField->id = {};
corField->fieldType = (CorElementType)0;
}
else if (fieldHandle.IsByRef())
{
corField->fieldType = ELEMENT_TYPE_BYREF;
// All ByRefs intentionally return IntPtr's MethodTable.
corField->id.token1 = CoreLibBinder::GetElementType(ELEMENT_TYPE_I).GetAddr();
corField->id.token2 = 0;
}
else
{
// Note that pointer types are handled in this path.
// IntPtr's MethodTable is set for all pointer types and is expected.
PTR_MethodTable mt = fieldHandle.GetMethodTable();
corField->fieldType = mt->GetInternalCorElementType();
corField->id.token1 = (ULONG64)mt.GetAddr();
corField->id.token2 = 0;
}
}
return hr;
}
HRESULT DacDbiInterfaceImpl::GetTypeLayout(COR_TYPEID id, COR_TYPE_LAYOUT *pLayout)
{
if (pLayout == NULL)
return E_POINTER;
if (id.token1 == 0)
return CORDBG_E_CLASS_NOT_LOADED;
DD_ENTER_MAY_THROW;
PTR_MethodTable mt = PTR_MethodTable(TO_TADDR(id.token1));
PTR_MethodTable parentMT = mt->GetParentMethodTable();
COR_TYPEID parent = {parentMT.GetAddr(), 0};
pLayout->parentID = parent;
DWORD size = mt->GetBaseSize();
ApproxFieldDescIterator fieldDescIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS);
pLayout->objectSize = size;
pLayout->numFields = fieldDescIterator.Count();
// Get type
CorElementType componentType = mt->IsString() ? ELEMENT_TYPE_STRING : mt->GetInternalCorElementType();
pLayout->type = componentType;
pLayout->boxOffset = CorTypeInfo::IsObjRef_NoThrow(componentType) ? 0 : sizeof(TADDR);
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetArrayLayout(COR_TYPEID id, COR_ARRAY_LAYOUT *pLayout)
{
if (pLayout == NULL)
return E_POINTER;
if (id.token1 == 0)
return CORDBG_E_CLASS_NOT_LOADED;
DD_ENTER_MAY_THROW;
PTR_MethodTable mt = PTR_MethodTable(TO_TADDR(id.token1));
if (!mt->IsStringOrArray())
return E_INVALIDARG;
if (mt->IsString())
{
COR_TYPEID token;
token.token1 = CoreLibBinder::GetElementType(ELEMENT_TYPE_CHAR).GetAddr();
token.token2 = 0;
pLayout->componentID = token;
pLayout->rankSize = 4;
pLayout->numRanks = 1;
pLayout->rankOffset = sizeof(TADDR);
pLayout->firstElementOffset = sizeof(TADDR) + 4;
pLayout->countOffset = sizeof(TADDR);
pLayout->componentType = ELEMENT_TYPE_CHAR;
pLayout->elementSize = 2;
}
else
{
DWORD ranks = mt->GetRank();
pLayout->rankSize = 4;
pLayout->numRanks = ranks;
bool multiDim = (ranks > 1);
pLayout->rankOffset = multiDim ? sizeof(TADDR)*2 : sizeof(TADDR);
pLayout->countOffset = sizeof(TADDR);
pLayout->firstElementOffset = ArrayBase::GetDataPtrOffset(mt);
TypeHandle hnd = mt->GetArrayElementTypeHandle();
PTR_MethodTable cmt = hnd.GetMethodTable();
CorElementType componentType = cmt->GetInternalCorElementType();
if ((UINT64)cmt.GetAddr() == (UINT64)g_pStringClass.GetAddr())
componentType = ELEMENT_TYPE_STRING;
COR_TYPEID token;
token.token1 = cmt.GetAddr(); // This could be type handle
token.token2 = 0;
pLayout->componentID = token;
pLayout->componentType = componentType;
if (CorTypeInfo::IsObjRef_NoThrow(componentType))
pLayout->elementSize = sizeof(TADDR);
else if (CorIsPrimitiveType(componentType))
pLayout->elementSize = gElementTypeInfo[componentType].m_cbSize;
else
pLayout->elementSize = cmt->GetNumInstanceFieldBytes();
}
return S_OK;
}
void DacDbiInterfaceImpl::GetGCHeapInformation(COR_HEAPINFO * pHeapInfo)
{
DD_ENTER_MAY_THROW;
size_t heapCount = 0;
pHeapInfo->areGCStructuresValid = *g_gcDacGlobals->gc_structures_invalid_cnt == 0;
#ifdef FEATURE_SVR_GC
if (GCHeapUtilities::IsServerHeap())
{
pHeapInfo->gcType = CorDebugServerGC;
pHeapInfo->numHeaps = DacGetNumHeaps();
}
else
#endif
{
pHeapInfo->gcType = CorDebugWorkstationGC;
pHeapInfo->numHeaps = 1;
}
pHeapInfo->pointerSize = sizeof(TADDR);
pHeapInfo->concurrent = g_pConfig->GetGCconcurrent() ? TRUE : FALSE;
}
HRESULT DacDbiInterfaceImpl::GetPEFileMDInternalRW(VMPTR_PEAssembly vmPEAssembly, OUT TADDR* pAddrMDInternalRW)
{
DD_ENTER_MAY_THROW;
if (pAddrMDInternalRW == NULL)
return E_INVALIDARG;
PEAssembly * pPEAssembly = vmPEAssembly.GetDacPtr();
*pAddrMDInternalRW = pPEAssembly->GetMDInternalRWAddress();
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetReJitInfo(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ReJitInfo* pvmReJitInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!"You shouldn't be calling this - use GetActiveRejitILCodeVersionNode instead");
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetActiveRejitILCodeVersionNode(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ILCodeVersionNode* pVmILCodeVersionNode)
{
DD_ENTER_MAY_THROW;
if (pVmILCodeVersionNode == NULL)
return E_INVALIDARG;
#ifdef FEATURE_REJIT
PTR_Module pModule = vmModule.GetDacPtr();
CodeVersionManager * pCodeVersionManager = pModule->GetCodeVersionManager();
// Be careful, there are two different definitions of 'active' being used here
// For the CodeVersionManager, the active IL version is whatever one should be used in the next invocation of the method
// 'rejit active' narrows that to only include rejit IL bodies where the profiler has already provided the definition
// for the new IL (ilCodeVersion.GetRejitState()==ILCodeVersion::kStateActive). It is possible that the code version
// manager's active IL version hasn't yet asked the profiler for the IL body to use, in which case we want to filter it
// out from the return in this method.
ILCodeVersion activeILVersion = pCodeVersionManager->GetActiveILCodeVersion(pModule, methodTk);
if (activeILVersion.IsNull() || activeILVersion.IsDefaultVersion() || activeILVersion.GetRejitState() != ILCodeVersion::kStateActive)
{
pVmILCodeVersionNode->SetDacTargetPtr(0);
}
else
{
pVmILCodeVersionNode->SetDacTargetPtr(PTR_TO_TADDR(activeILVersion.AsNode()));
}
#else
_ASSERTE(!"You shouldn't be calling this - rejit is not supported in this build");
pVmILCodeVersionNode->SetDacTargetPtr(0);
#endif
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetReJitInfo(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_ReJitInfo* pvmReJitInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!"You shouldn't be calling this - use GetNativeCodeVersionNode instead");
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetNativeCodeVersionNode(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_NativeCodeVersionNode* pVmNativeCodeVersionNode)
{
DD_ENTER_MAY_THROW;
if (pVmNativeCodeVersionNode == NULL)
return E_INVALIDARG;
#ifdef FEATURE_REJIT
PTR_MethodDesc pMD = vmMethod.GetDacPtr();
CodeVersionManager * pCodeVersionManager = pMD->GetCodeVersionManager();
NativeCodeVersion codeVersion = pCodeVersionManager->GetNativeCodeVersion(pMD, (PCODE)codeStartAddress);
pVmNativeCodeVersionNode->SetDacTargetPtr(PTR_TO_TADDR(codeVersion.AsNode()));
#else
pVmNativeCodeVersionNode->SetDacTargetPtr(0);
#endif
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetSharedReJitInfo(VMPTR_ReJitInfo vmReJitInfo, OUT VMPTR_SharedReJitInfo* pvmSharedReJitInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!"You shouldn't be calling this - use GetLCodeVersionNode instead");
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetILCodeVersionNode(VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode, VMPTR_ILCodeVersionNode* pVmILCodeVersionNode)
{
DD_ENTER_MAY_THROW;
if (pVmILCodeVersionNode == NULL)
return E_INVALIDARG;
#ifdef FEATURE_REJIT
NativeCodeVersionNode* pNativeCodeVersionNode = vmNativeCodeVersionNode.GetDacPtr();
ILCodeVersion ilCodeVersion = pNativeCodeVersionNode->GetILCodeVersion();
if (ilCodeVersion.IsDefaultVersion())
{
pVmILCodeVersionNode->SetDacTargetPtr(0);
}
else
{
pVmILCodeVersionNode->SetDacTargetPtr(PTR_TO_TADDR(ilCodeVersion.AsNode()));
}
#else
_ASSERTE(!"You shouldn't be calling this - rejit is not supported in this build");
pVmILCodeVersionNode->SetDacTargetPtr(0);
#endif
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetSharedReJitInfoData(VMPTR_SharedReJitInfo vmSharedReJitInfo, DacSharedReJitInfo* pData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!"You shouldn't be calling this - use GetILCodeVersionNodeData instead");
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetILCodeVersionNodeData(VMPTR_ILCodeVersionNode vmILCodeVersionNode, DacSharedReJitInfo* pData)
{
DD_ENTER_MAY_THROW;
#ifdef FEATURE_REJIT
ILCodeVersion ilCode(vmILCodeVersionNode.GetDacPtr());
pData->m_state = ilCode.GetRejitState();
pData->m_pbIL = PTR_TO_CORDB_ADDRESS(dac_cast<ULONG_PTR>(ilCode.GetIL()));
pData->m_dwCodegenFlags = ilCode.GetJitFlags();
const InstrumentedILOffsetMapping* pMapping = ilCode.GetInstrumentedILMap();
if (pMapping)
{
pData->m_cInstrumentedMapEntries = (ULONG)pMapping->GetCount();
pData->m_rgInstrumentedMapEntries = PTR_TO_CORDB_ADDRESS(dac_cast<ULONG_PTR>(pMapping->GetOffsets()));
}
else
{
pData->m_cInstrumentedMapEntries = 0;
pData->m_rgInstrumentedMapEntries = 0;
}
#else
_ASSERTE(!"You shouldn't be calling this - rejit isn't supported in this build");
#endif
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetDefinesBitField(ULONG32 *pDefines)
{
DD_ENTER_MAY_THROW;
if (pDefines == NULL)
return E_INVALIDARG;
*pDefines = g_pDebugger->m_defines;
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetMDStructuresVersion(ULONG32* pMDStructuresVersion)
{
DD_ENTER_MAY_THROW;
if (pMDStructuresVersion == NULL)
return E_INVALIDARG;
*pMDStructuresVersion = g_pDebugger->m_mdDataStructureVersion;
return S_OK;
}
HRESULT DacDbiInterfaceImpl::EnableGCNotificationEvents(BOOL fEnable)
{
DD_ENTER_MAY_THROW
HRESULT hr = S_OK;
EX_TRY
{
if (g_pDebugger != NULL)
{
TADDR addr = PTR_HOST_MEMBER_TADDR(Debugger, g_pDebugger, m_isGarbageCollectionEventsEnabled);
SafeWriteStructOrThrow<BOOL>(addr, &fEnable);
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
DacRefWalker::DacRefWalker(ClrDataAccess *dac, BOOL walkStacks, BOOL walkFQ, UINT32 handleMask)
: mDac(dac), mWalkStacks(walkStacks), mWalkFQ(walkFQ), mHandleMask(handleMask), mStackWalker(NULL),
mHandleWalker(NULL), mFQStart(PTR_NULL), mFQEnd(PTR_NULL), mFQCurr(PTR_NULL)
{
}
DacRefWalker::~DacRefWalker()
{
Clear();
}
HRESULT DacRefWalker::Init()
{
HRESULT hr = S_OK;
if (mHandleMask)
{
// Will throw on OOM, which is fine.
mHandleWalker = new DacHandleWalker();
hr = mHandleWalker->Init(GetHandleWalkerMask());
}
if (mWalkStacks && SUCCEEDED(hr))
{
hr = NextThread();
}
return hr;
}
void DacRefWalker::Clear()
{
if (mHandleWalker)
{
delete mHandleWalker;
mHandleWalker = NULL;
}
if (mStackWalker)
{
delete mStackWalker;
mStackWalker = NULL;
}
}
UINT32 DacRefWalker::GetHandleWalkerMask()
{
UINT32 result = 0;
if (mHandleMask & CorHandleStrong)
result |= (1 << HNDTYPE_STRONG);
if (mHandleMask & CorHandleStrongPinning)
result |= (1 << HNDTYPE_PINNED);
if (mHandleMask & CorHandleWeakShort)
result |= (1 << HNDTYPE_WEAK_SHORT);
if (mHandleMask & CorHandleWeakLong)
result |= (1 << HNDTYPE_WEAK_LONG);
#if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL)
if ((mHandleMask & CorHandleWeakRefCount) || (mHandleMask & CorHandleStrongRefCount))
result |= (1 << HNDTYPE_REFCOUNTED);
#endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL
#if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS)
if (mHandleMask & CorHandleWeakNativeCom)
result |= (1 << HNDTYPE_WEAK_NATIVE_COM);
#endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS
if (mHandleMask & CorHandleStrongDependent)
result |= (1 << HNDTYPE_DEPENDENT);
if (mHandleMask & CorHandleStrongAsyncPinned)
result |= (1 << HNDTYPE_ASYNCPINNED);
if (mHandleMask & CorHandleStrongSizedByref)
result |= (1 << HNDTYPE_SIZEDREF);
return result;
}
HRESULT DacRefWalker::Next(ULONG celt, DacGcReference roots[], ULONG *pceltFetched)
{
if (roots == NULL || pceltFetched == NULL)
return E_POINTER;
ULONG total = 0;
HRESULT hr = S_OK;
if (mHandleWalker)
{
hr = mHandleWalker->Next(celt, roots, &total);
if (hr == S_FALSE || FAILED(hr))
{
delete mHandleWalker;
mHandleWalker = NULL;
if (FAILED(hr))
return hr;
}
}
if (total < celt)
{
while (total < celt && mFQCurr < mFQEnd)
{
DacGcReference &ref = roots[total++];
ref.vmDomain = VMPTR_AppDomain::NullPtr();
ref.objHnd.SetDacTargetPtr(mFQCurr.GetAddr());
ref.dwType = (DWORD)CorReferenceFinalizer;
ref.i64ExtraData = 0;
mFQCurr++;
}
}
while (total < celt && mStackWalker)
{
ULONG fetched = 0;
hr = mStackWalker->Next(celt-total, roots+total, &fetched);
if (FAILED(hr))
return hr;
if (hr == S_FALSE)
{
hr = NextThread();
if (FAILED(hr))
return hr;
}
total += fetched;
}
*pceltFetched = total;
return total < celt ? S_FALSE : S_OK;
}
HRESULT DacRefWalker::NextThread()
{
Thread *pThread = NULL;
if (mStackWalker)
{
pThread = mStackWalker->GetThread();
delete mStackWalker;
mStackWalker = NULL;
}
pThread = ThreadStore::GetThreadList(pThread);
if (!pThread)
return S_FALSE;
mStackWalker = new DacStackReferenceWalker(mDac, pThread->GetOSThreadId());
return mStackWalker->Init();
}
HRESULT DacHandleWalker::Next(ULONG celt, DacGcReference roots[], ULONG *pceltFetched)
{
SUPPORTS_DAC;
if (roots == NULL || pceltFetched == NULL)
return E_POINTER;
return DoHandleWalk<DacGcReference, ULONG, DacHandleWalker::EnumCallbackDac>(celt, roots, pceltFetched);
}
void CALLBACK DacHandleWalker::EnumCallbackDac(PTR_UNCHECKED_OBJECTREF handle, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2)
{
SUPPORTS_DAC;
DacHandleWalkerParam *param = (DacHandleWalkerParam *)param1;
HandleChunkHead *curr = param->Curr;
// If we failed on a previous call (OOM) don't keep trying to allocate, it's not going to work.
if (FAILED(param->Result))
return;
// We've moved past the size of the current chunk. We'll allocate a new chunk
// and stuff the handles there. These are cleaned up by the destructor
if (curr->Count >= (curr->Size/sizeof(DacGcReference)))
{
if (curr->Next == NULL)
{
HandleChunk *next = new (nothrow) HandleChunk;
if (next != NULL)
{
curr->Next = next;
}
else
{
param->Result = E_OUTOFMEMORY;
return;
}
}
curr = param->Curr = param->Curr->Next;
}
// Fill the current handle.
DacGcReference *dataArray = (DacGcReference*)curr->pData;
DacGcReference &data = dataArray[curr->Count++];
data.objHnd.SetDacTargetPtr(handle.GetAddr());
data.vmDomain.SetDacTargetPtr(TO_TADDR(param->AppDomain));
data.i64ExtraData = 0;
unsigned int refCnt = 0;
switch (param->Type)
{
case HNDTYPE_STRONG:
data.dwType = (DWORD)CorHandleStrong;
break;
case HNDTYPE_PINNED:
data.dwType = (DWORD)CorHandleStrongPinning;
break;
case HNDTYPE_WEAK_SHORT:
data.dwType = (DWORD)CorHandleWeakShort;
break;
case HNDTYPE_WEAK_LONG:
data.dwType = (DWORD)CorHandleWeakLong;
break;
#if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL)
case HNDTYPE_REFCOUNTED:
data.dwType = (DWORD)(data.i64ExtraData ? CorHandleStrongRefCount : CorHandleWeakRefCount);
GetRefCountedHandleInfo((OBJECTREF)*handle, param->Type, &refCnt, NULL, NULL, NULL);
data.i64ExtraData = refCnt;
break;
#endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL
#if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS)
case HNDTYPE_WEAK_NATIVE_COM:
data.dwType = (DWORD)CorHandleWeakNativeCom;
break;
#endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS
case HNDTYPE_DEPENDENT:
data.dwType = (DWORD)CorHandleStrongDependent;
data.i64ExtraData = GetDependentHandleSecondary(handle.GetAddr()).GetAddr();
break;
case HNDTYPE_ASYNCPINNED:
data.dwType = (DWORD)CorHandleStrongAsyncPinned;
break;
case HNDTYPE_SIZEDREF:
data.dwType = (DWORD)CorHandleStrongSizedByref;
break;
}
}
void DacStackReferenceWalker::GCEnumCallbackDac(LPVOID hCallback, OBJECTREF *pObject, uint32_t flags, DacSlotLocation loc)
{
GCCONTEXT *gcctx = (GCCONTEXT *)hCallback;
DacScanContext *dsc = (DacScanContext*)gcctx->sc;
CORDB_ADDRESS obj = 0;
if (flags & GC_CALL_INTERIOR)
{
if (loc.targetPtr)
obj = (CORDB_ADDRESS)(*PTR_PTR_Object((TADDR)pObject)).GetAddr();
else
obj = (CORDB_ADDRESS)TO_TADDR(pObject);
HRESULT hr = dsc->pWalker->mHeap.ListNearObjects(obj, NULL, &obj, NULL);
// If we failed don't add this instance to the list. ICorDebug doesn't handle invalid pointers
// very well, and the only way the heap walker's ListNearObjects will fail is if we have heap
// corruption...which ICorDebug doesn't deal with anyway.
if (FAILED(hr))
return;
}
DacGcReference *data = dsc->pWalker->GetNextObject<DacGcReference>(dsc);
if (data != NULL)
{
data->vmDomain.SetDacTargetPtr(AppDomain::GetCurrentDomain().GetAddr());
if (obj)
data->pObject = obj | 1;
else if (loc.targetPtr)
data->objHnd.SetDacTargetPtr(TO_TADDR(pObject));
else
data->pObject = pObject->GetAddr() | 1;
data->dwType = CorReferenceStack;
data->i64ExtraData = 0;
}
}
void DacStackReferenceWalker::GCReportCallbackDac(PTR_PTR_Object ppObj, ScanContext *sc, uint32_t flags)
{
DacScanContext *dsc = (DacScanContext*)sc;
TADDR obj = ppObj.GetAddr();
if (flags & GC_CALL_INTERIOR)
{
CORDB_ADDRESS fixed_addr = 0;
HRESULT hr = dsc->pWalker->mHeap.ListNearObjects((CORDB_ADDRESS)obj, NULL, &fixed_addr, NULL);
// If we failed don't add this instance to the list. ICorDebug doesn't handle invalid pointers
// very well, and the only way the heap walker's ListNearObjects will fail is if we have heap
// corruption...which ICorDebug doesn't deal with anyway.
if (FAILED(hr))
return;
obj = TO_TADDR(fixed_addr);
}
DacGcReference *data = dsc->pWalker->GetNextObject<DacGcReference>(dsc);
if (data != NULL)
{
data->vmDomain.SetDacTargetPtr(AppDomain::GetCurrentDomain().GetAddr());
data->objHnd.SetDacTargetPtr(obj);
data->dwType = CorReferenceStack;
data->i64ExtraData = 0;
}
}
HRESULT DacStackReferenceWalker::Next(ULONG count, DacGcReference stackRefs[], ULONG *pFetched)
{
if (stackRefs == NULL || pFetched == NULL)
return E_POINTER;
HRESULT hr = DoStackWalk<ULONG, DacGcReference,
DacStackReferenceWalker::GCReportCallbackDac,
DacStackReferenceWalker::GCEnumCallbackDac>
(count, stackRefs, pFetched);
return hr;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: DacDbiImpl.cpp
//
//
// Implement DAC/DBI interface
//
//*****************************************************************************
#include "stdafx.h"
#include "dacdbiinterface.h"
#include "typestring.h"
#include "holder.h"
#include "debuginfostore.h"
#include "peimagelayout.inl"
#include "encee.h"
#include "switches.h"
#include "generics.h"
#include "stackwalk.h"
#include "virtualcallstub.h"
#include "dacdbiimpl.h"
#ifdef FEATURE_COMINTEROP
#include "runtimecallablewrapper.h"
#include "comcallablewrapper.h"
#endif // FEATURE_COMINTEROP
#include "request_common.h"
//-----------------------------------------------------------------------------
// Have standard enter and leave macros at the DacDbi boundary to enforce
// standard behavior.
// 1. catch exceptions and convert them at the boundary.
// 2. provide a space to hook logging and transitions.
// 3. provide a hook to verify return values.
//
// Usage notes:
// - use this at the DacDbi boundary; but not at internal functions
// - it's ok to Return from the middle.
//
// Expected usage is:
// Foo()
// {
// DD_ENTER_MAY_THROW
// ...
// if (...) { ThrowHr(E_SOME_FAILURE); }
// ...
// if (...) { return; } // early success case
// ...
// }
//-----------------------------------------------------------------------------
// Global allocator for DD. Access is protected under the g_dacCritSec lock.
IDacDbiInterface::IAllocator * g_pAllocator = NULL;
//---------------------------------------------------------------------------------------
//
// Extra sugar for wrapping IAllocator under friendly New/Delete operators.
//
// Sample usage:
// void Foo(TestClass ** ppOut)
// {
// *ppOut = NULL;
// TestClass * p = new (forDbi) TestClass();
// ...
// if (ok)
// {
// *ppOut = p;
// return; // DBI will then free this memory.
// }
// ...
// DeleteDbiMemory(p); // DeleteDbiMemory(p, len); if it was an array allocation.
// }
//
// Be very careful when using this on classes since Dbi and DAC may be in
// separate dlls. This is best used when operating on blittable data-structures.
// (no ctor/dtor, plain data fields) to guarantee the proper DLL isolation.
// You don't want to call the ctor in DAC's context and the dtor in DBI's context
// unless you really know what you're doing and that it's safe.
//
// Need a class to serve as a tag that we can use to overload New/Delete.
forDbiWorker forDbi;
void * operator new(size_t lenBytes, const forDbiWorker &)
{
_ASSERTE(g_pAllocator != NULL);
void *result = g_pAllocator->Alloc(lenBytes);
if (result == NULL)
{
ThrowOutOfMemory();
}
return result;
}
void * operator new[](size_t lenBytes, const forDbiWorker &)
{
_ASSERTE(g_pAllocator != NULL);
void *result = g_pAllocator->Alloc(lenBytes);
if (result == NULL)
{
ThrowOutOfMemory();
}
return result;
}
// Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that
// this delete operator will be invoked automatically to destroy the object.
void operator delete(void *p, const forDbiWorker &)
{
if (p == NULL)
{
return;
}
_ASSERTE(g_pAllocator != NULL);
g_pAllocator->Free((BYTE*) p);
}
// Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that
// this delete operator will be invoked automatically to destroy the object.
void operator delete[](void *p, const forDbiWorker &)
{
if (p == NULL)
{
return;
}
_ASSERTE(g_pAllocator != NULL);
g_pAllocator->Free((BYTE*) p);
}
// @dbgtodo dac support: determine how to handle an array of class instances to ensure the dtors get
// called correctly or document that they won't
// Delete memory and invoke dtor for memory allocated with 'operator (forDbi) new'
template<class T> void DeleteDbiMemory(T *p)
{
if (p == NULL)
{
return;
}
p->~T();
_ASSERTE(g_pAllocator != NULL);
g_pAllocator->Free((BYTE*) p);
}
// Delete memory and invoke dtor for memory allocated with 'operator (forDbi) new[]'
// There's an inherent risk here - where each element's destructor will get called within
// the context of the DAC. If the destructor tries to use the CRT allocator logic expecting
// to hit the DBI's, we could be in trouble. Those objects need to use an export allocator like this.
template<class T> void DeleteDbiArrayMemory(T *p, int count)
{
if (p == NULL)
{
return;
}
for (T *cur = p; cur < p + count; cur++)
{
cur->~T();
}
_ASSERTE(g_pAllocator != NULL);
g_pAllocator->Free((BYTE*) p);
}
//---------------------------------------------------------------------------------------
// Creates the DacDbiInterface object, used by Dbi.
//
// Arguments:
// pTarget - pointer to a Data-Target
// baseAddress - non-zero base address of mscorwks in target to debug.
// pAllocator - pointer to client allocator object. This lets DD allocate objects and
// pass them out back to the client, which can then delete them.
// DD takes a weak ref to this, so client must keep it alive until it
// calls Destroy.
// pMetadataLookup - callback interface to do internal metadata lookup. This is because
// metadata is not dac-ized.
// ppInterface - mandatory out-parameter
//
// Return Value:
// S_OK on success.
//
//
// Notes:
// On Windows, this is public function that can be retrieved by GetProcAddress.
// On Mac, this is used internally by DacDbiMarshalStubInstance below
// This will yield an IDacDbiInterface to provide structured access to the
// data-target.
//
// Must call Destroy to on interface to free its resources.
//
//---------------------------------------------------------------------------------------
STDAPI
DLLEXPORT
DacDbiInterfaceInstance(
ICorDebugDataTarget * pTarget,
CORDB_ADDRESS baseAddress,
IDacDbiInterface::IAllocator * pAllocator,
IDacDbiInterface::IMetaDataLookup * pMetaDataLookup,
IDacDbiInterface ** ppInterface)
{
// No marshalling is done by the instantiationf function - we just need to setup the infrastructure.
// We don't want to warn if this involves creating and accessing undacized data structures,
// because it's for the infrastructure, not DACized code itself.
SUPPORTS_DAC_HOST_ONLY;
// Since this is public, verify it.
if ((ppInterface == NULL) || (pTarget == NULL) || (baseAddress == 0))
{
return E_INVALIDARG;
}
*ppInterface = NULL;
//
// Actually allocate the real object and initialize it.
//
DacDbiInterfaceImpl * pDac = new (nothrow) DacDbiInterfaceImpl(pTarget, baseAddress, pAllocator, pMetaDataLookup);
if (!pDac)
{
return E_OUTOFMEMORY;
}
HRESULT hrStatus = pDac->Initialize();
if (SUCCEEDED(hrStatus))
{
*ppInterface = pDac;
}
else
{
delete pDac;
}
return hrStatus;
}
//---------------------------------------------------------------------------------------
// Constructor. Instantiates a DAC/DBI interface around a DataTarget.
//
// Arguments:
// pTarget - pointer to a Data-Target
// baseAddress - non-zero base address of mscorwks in target to debug.
// pAllocator - pointer to client allocator object. This lets DD allocate objects and
// pass them out back to the client, which can then delete them.
// DD takes a weak ref to this, so client must keep it alive until it
// calls Destroy.
// pMetadataLookup - callback interface to do internal metadata lookup. This is because
// metadata is not dac-ized.
//
// Notes:
// pAllocator is a weak reference.
//---------------------------------------------------------------------------------------
DacDbiInterfaceImpl::DacDbiInterfaceImpl(
ICorDebugDataTarget* pTarget,
CORDB_ADDRESS baseAddress,
IAllocator * pAllocator,
IMetaDataLookup * pMetaDataLookup
) : ClrDataAccess(pTarget),
m_pAllocator(pAllocator),
m_pMetaDataLookup(pMetaDataLookup),
m_pCachedPEAssembly(VMPTR_PEAssembly::NullPtr()),
m_pCachedImporter(NULL),
m_isCachedHijackFunctionValid(FALSE)
{
_ASSERTE(baseAddress != NULL);
m_globalBase = CORDB_ADDRESS_TO_TADDR(baseAddress);
_ASSERTE(pMetaDataLookup != NULL);
_ASSERTE(pAllocator != NULL);
_ASSERTE(pTarget != NULL);
#ifdef _DEBUG
// Enable verification asserts in ICorDebug scenarios. ICorDebug never guesses at the DAC path, so any
// mismatch should be fatal, and so always of interest to the user.
// This overrides the assignment in the base class ctor (which runs first).
m_fEnableDllVerificationAsserts = true;
#endif
}
//-----------------------------------------------------------------------------
// Destructor.
//
// Notes:
// This gets invoked after Destroy().
//-----------------------------------------------------------------------------
DacDbiInterfaceImpl::~DacDbiInterfaceImpl()
{
SUPPORTS_DAC_HOST_ONLY;
// This will automatically chain to the base class dtor
}
//-----------------------------------------------------------------------------
// Called from DAC-ized code to get a IMDInternalImport
//
// Arguments:
// pPEAssembly - PE file for which to get importer for
// fThrowEx - if true, throw instead of returning NULL.
//
// Returns:
// an Internal importer object for this file.
// May return NULL or throw (depending on fThrowEx).
// May throw in exceptional circumstances (eg, corrupt debuggee).
//
// Assumptions:
// This is called from DAC-ized code within the VM, which
// was in turn called from some DD primitive. The returned importer will
// be used by the DAC-ized code in the callstack, but it won't be cached.
//
// Notes:
// This is an Internal importer, not a public Metadata importer.
//
interface IMDInternalImport* DacDbiInterfaceImpl::GetMDImport(
const PEAssembly* pPEAssembly,
const ReflectionModule * pReflectionModule,
bool fThrowEx)
{
// Since this is called from an existing DAC-primitive, we already hold the g_dacCritSec lock.
// The lock conveniently protects our cache.
SUPPORTS_DAC;
IDacDbiInterface::IMetaDataLookup * pLookup = m_pMetaDataLookup;
_ASSERTE(pLookup != NULL);
VMPTR_PEAssembly vmPEAssembly = VMPTR_PEAssembly::NullPtr();
if (pPEAssembly != NULL)
{
vmPEAssembly.SetHostPtr(pPEAssembly);
}
else if (pReflectionModule != NULL)
{
// SOS and ClrDataAccess rely on special logic to find the metadata for methods in dynamic modules.
// We don't need to. The RS has already taken care of the special logic for us.
// So here we just grab the PEAssembly off of the ReflectionModule and continue down the normal
// code path. See code:ClrDataAccess::GetMDImport for comparison.
vmPEAssembly.SetHostPtr(pReflectionModule->GetPEAssembly());
}
// Optimize for the case where the VM queries the same Importer many times in a row.
if (m_pCachedPEAssembly == vmPEAssembly)
{
return m_pCachedImporter;
}
// Go to DBI to find the metadata.
IMDInternalImport * pInternal = NULL;
bool isILMetaDataForNI = false;
EX_TRY
{
// If test needs it in the future, prop isILMetaDataForNI back up to
// ClrDataAccess.m_mdImports.Add() call.
// example in code:ClrDataAccess::GetMDImport
// CordbModule::GetMetaDataInterface also looks up MetaData and would need attention.
// This is the new codepath that uses ICorDebugMetaDataLookup.
// To get the old codepath that uses the v2 metadata lookup methods,
// you'd have to load DAC only and then you'll get ClrDataAccess's implementation
// of this function.
pInternal = pLookup->LookupMetaData(vmPEAssembly, isILMetaDataForNI);
}
EX_CATCH
{
// Any expected error we should ignore.
if ((GET_EXCEPTION()->GetHR() != HRESULT_FROM_WIN32(ERROR_PARTIAL_COPY)) &&
(GET_EXCEPTION()->GetHR() != CORDBG_E_READVIRTUAL_FAILURE) &&
(GET_EXCEPTION()->GetHR() != CORDBG_E_SYMBOLS_NOT_AVAILABLE) &&
(GET_EXCEPTION()->GetHR() != CORDBG_E_MODULE_LOADED_FROM_DISK))
{
EX_RETHROW;
}
}
EX_END_CATCH(SwallowAllExceptions)
if (pInternal == NULL)
{
SIMPLIFYING_ASSUMPTION(!"MD lookup failed");
if (fThrowEx)
{
ThrowHR(E_FAIL);
}
return NULL;
}
else
{
// Cache it such that it we look for the exact same Importer again, we'll return it.
m_pCachedPEAssembly = vmPEAssembly;
m_pCachedImporter = pInternal;
}
return pInternal;
}
//-----------------------------------------------------------------------------
// Implementation of IDacDbiInterface
// See DacDbiInterface.h for full descriptions of all of these functions
//-----------------------------------------------------------------------------
// Destroy the connection, freeing up any resources.
void DacDbiInterfaceImpl::Destroy()
{
m_pAllocator = NULL;
this->Release();
// Memory is deleted, don't access this object any more
}
// Check whether the version of the DBI matches the version of the runtime.
// See code:CordbProcess::CordbProcess#DBIVersionChecking for more information regarding version checking.
HRESULT DacDbiInterfaceImpl::CheckDbiVersion(const DbiVersion * pVersion)
{
DD_ENTER_MAY_THROW;
if (pVersion->m_dwFormat != kCurrentDbiVersionFormat)
{
return CORDBG_E_INCOMPATIBLE_PROTOCOL;
}
if ((pVersion->m_dwProtocolBreakingChangeCounter != kCurrentDacDbiProtocolBreakingChangeCounter) ||
(pVersion->m_dwReservedMustBeZero1 != 0))
{
return CORDBG_E_INCOMPATIBLE_PROTOCOL;
}
return S_OK;
}
// Flush the DAC cache. This should be called when target memory changes.
HRESULT DacDbiInterfaceImpl::FlushCache()
{
// Non-reentrant. We don't want to flush cached instances from a callback.
// That would remove host DAC instances while they're being used.
DD_NON_REENTRANT_MAY_THROW;
m_pCachedPEAssembly = VMPTR_PEAssembly::NullPtr();
m_pCachedImporter = NULL;
m_isCachedHijackFunctionValid = FALSE;
HRESULT hr = ClrDataAccess::Flush();
// Current impl of Flush() should always succeed. If it ever fails, we want to know.
_ASSERTE(SUCCEEDED(hr));
return hr;
}
// enable or disable DAC target consistency checks
void DacDbiInterfaceImpl::DacSetTargetConsistencyChecks(bool fEnableAsserts)
{
// forward on to our ClrDataAccess base class
ClrDataAccess::SetTargetConsistencyChecks(fEnableAsserts);
}
// Query if Left-side is started up?
BOOL DacDbiInterfaceImpl::IsLeftSideInitialized()
{
DD_ENTER_MAY_THROW;
if (g_pDebugger != NULL)
{
// This check is "safe".
// The initialize order in the left-side is:
// 1) g_pDebugger is an RVA based global initialized to NULL when the module is loaded.
// 2) Allocate a "Debugger" object.
// 3) run the ctor, which will set m_fLeftSideInitialized = FALSE.
// 4) assign the object to g_pDebugger.
// 5) later, LS initialization code will assign g_pDebugger->m_fLeftSideInitialized = TRUE.
//
// The memory write in #5 is atomic. There is no window where we're reading unitialized data.
return (g_pDebugger->m_fLeftSideInitialized != 0);
}
return FALSE;
}
// Determines if a given address is a CLR stub.
BOOL DacDbiInterfaceImpl::IsTransitionStub(CORDB_ADDRESS address)
{
DD_ENTER_MAY_THROW;
BOOL fIsStub = FALSE;
#if defined(TARGET_UNIX)
// Currently IsIPInModule() is not implemented in the PAL. Rather than skipping the check, we should
// either E_NOTIMPL this API or implement IsIPInModule() in the PAL. Since ICDProcess::IsTransitionStub()
// is only called by VS in mixed-mode debugging scenarios, and mixed-mode debugging is not supported on
// POSIX systems, there is really no incentive to implement this API at this point.
ThrowHR(E_NOTIMPL);
#else // !TARGET_UNIX
TADDR ip = (TADDR)address;
if (ip == NULL)
{
fIsStub = FALSE;
}
else
{
fIsStub = StubManager::IsStub(ip);
}
// If it's in Mscorwks, count that as a stub too.
if (fIsStub == FALSE)
{
fIsStub = IsIPInModule(m_globalBase, ip);
}
#endif // TARGET_UNIX
return fIsStub;
}
// Gets the type of 'address'.
IDacDbiInterface::AddressType DacDbiInterfaceImpl::GetAddressType(CORDB_ADDRESS address)
{
DD_ENTER_MAY_THROW;
TADDR taAddr = CORDB_ADDRESS_TO_TADDR(address);
if (IsPossibleCodeAddress(taAddr) == S_OK)
{
if (ExecutionManager::IsManagedCode(taAddr))
{
return kAddressManagedMethod;
}
if (StubManager::IsStub(taAddr))
{
return kAddressRuntimeUnmanagedStub;
}
}
return kAddressUnrecognized;
}
// Get a VM appdomain pointer that matches the appdomain ID
VMPTR_AppDomain DacDbiInterfaceImpl::GetAppDomainFromId(ULONG appdomainId)
{
DD_ENTER_MAY_THROW;
VMPTR_AppDomain vmAppDomain;
// @dbgtodo dac support - We would like to wean ourselves off the IXClrData interfaces.
IXCLRDataProcess * pDAC = this;
ReleaseHolder<IXCLRDataAppDomain> pDacAppDomain;
HRESULT hrStatus = pDAC->GetAppDomainByUniqueID(appdomainId, &pDacAppDomain);
IfFailThrow(hrStatus);
IXCLRDataAppDomain * pIAppDomain = pDacAppDomain;
AppDomain * pAppDomain = (static_cast<ClrDataAppDomain *> (pIAppDomain))->GetAppDomain();
SIMPLIFYING_ASSUMPTION(pAppDomain != NULL);
if (pAppDomain == NULL)
{
ThrowHR(E_FAIL); // corrupted left-side?
}
TADDR addrAppDomain = PTR_HOST_TO_TADDR(pAppDomain);
vmAppDomain.SetDacTargetPtr(addrAppDomain);
return vmAppDomain;
}
// Get the AppDomain ID for an AppDomain.
ULONG DacDbiInterfaceImpl::GetAppDomainId(VMPTR_AppDomain vmAppDomain)
{
DD_ENTER_MAY_THROW;
if (vmAppDomain.IsNull())
{
return 0;
}
else
{
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
return DefaultADID;
}
}
// Get the managed AppDomain object for an AppDomain.
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetAppDomainObject(VMPTR_AppDomain vmAppDomain)
{
DD_ENTER_MAY_THROW;
AppDomain* pAppDomain = vmAppDomain.GetDacPtr();
OBJECTHANDLE hAppDomainManagedObject = pAppDomain->GetRawExposedObjectHandleForDebugger();
VMPTR_OBJECTHANDLE vmObj = VMPTR_OBJECTHANDLE::NullPtr();
vmObj.SetDacTargetPtr(hAppDomainManagedObject);
return vmObj;
}
// Get the full AD friendly name for the given EE AppDomain.
void DacDbiInterfaceImpl::GetAppDomainFullName(
VMPTR_AppDomain vmAppDomain,
IStringHolder * pStrName )
{
DD_ENTER_MAY_THROW;
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
// Get the AppDomain name from the VM without changing anything
// We might be able to simplify this, eg. by returning an SString.
bool fIsUtf8;
PVOID pRawName = pAppDomain->GetFriendlyNameNoSet(&fIsUtf8);
if (!pRawName)
{
ThrowHR(E_NOINTERFACE);
}
HRESULT hrStatus = S_OK;
if (fIsUtf8)
{
// we have to allocate a temporary string
// we could avoid this by adding a version of IStringHolder::AssignCopy that takes a UTF8 string
// We should also probably check to see when fIsUtf8 is ever true (it looks like it should normally be false).
ULONG32 dwNameLen = 0;
hrStatus = ConvertUtf8((LPCUTF8)pRawName, 0, &dwNameLen, NULL);
if (SUCCEEDED( hrStatus ))
{
NewArrayHolder<WCHAR> pwszName(new WCHAR[dwNameLen]);
hrStatus = ConvertUtf8((LPCUTF8)pRawName, dwNameLen, &dwNameLen, pwszName );
IfFailThrow(hrStatus);
hrStatus = pStrName->AssignCopy(pwszName);
}
}
else
{
hrStatus = pStrName->AssignCopy(static_cast<PCWSTR>(pRawName));
}
// Very important that this either sets pStrName or Throws.
// Don't set it and then then throw.
IfFailThrow(hrStatus);
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// JIT Compiler Flags
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Get the values of the JIT Optimization and EnC flags.
void DacDbiInterfaceImpl::GetCompilerFlags (
VMPTR_DomainAssembly vmDomainAssembly,
BOOL *pfAllowJITOpts,
BOOL *pfEnableEnC)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
if (pDomainAssembly == NULL)
{
ThrowHR(E_FAIL);
}
// Get the underlying module - none of this is AppDomain specific
Module * pModule = pDomainAssembly->GetModule();
DWORD dwBits = pModule->GetDebuggerInfoBits();
*pfAllowJITOpts = !CORDisableJITOptimizations(dwBits);
*pfEnableEnC = pModule->IsEditAndContinueEnabled();
} //GetCompilerFlags
//-----------------------------------------------------------------------------
// Helper function for SetCompilerFlags to set EnC status.
// Arguments:
// Input:
// pModule - The runtime module for which flags are being set.
//
// Return value:
// true if the Enc bits can be set on this module
//-----------------------------------------------------------------------------
bool DacDbiInterfaceImpl::CanSetEnCBits(Module * pModule)
{
_ASSERTE(pModule != NULL);
#ifdef EnC_SUPPORTED
// If we're using explicit sequence points (from the PDB), then we can't do EnC
// because EnC won't get updated pdbs and so the sequence points will be wrong.
bool fIgnorePdbs = ((pModule->GetDebuggerInfoBits() & DACF_IGNORE_PDBS) != 0);
bool fAllowEnc = pModule->IsEditAndContinueCapable() &&
#ifdef PROFILING_SUPPORTED_DATA
!CORProfilerPresent() && // this queries target
#endif
fIgnorePdbs;
#else // ! EnC_SUPPORTED
// Enc not supported on any other platforms.
bool fAllowEnc = false;
#endif
return fAllowEnc;
} // DacDbiInterfaceImpl::SetEnCBits
// Set the values of the JIT optimization and EnC flags.
HRESULT DacDbiInterfaceImpl::SetCompilerFlags(VMPTR_DomainAssembly vmDomainAssembly,
BOOL fAllowJitOpts,
BOOL fEnableEnC)
{
DD_ENTER_MAY_THROW;
DWORD dwBits = 0;
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
HRESULT hr = S_OK;
_ASSERTE(pModule != NULL);
// Initialize dwBits.
dwBits = (pModule->GetDebuggerInfoBits() & ~(DACF_ALLOW_JIT_OPTS | DACF_ENC_ENABLED));
dwBits &= DACF_CONTROL_FLAGS_MASK;
if (fAllowJitOpts)
{
dwBits |= DACF_ALLOW_JIT_OPTS;
}
if (fEnableEnC)
{
if (CanSetEnCBits(pModule))
{
dwBits |= DACF_ENC_ENABLED;
}
else
{
hr = CORDBG_S_NOT_ALL_BITS_SET;
}
}
// Settings from the debugger take precedence over all other settings.
dwBits |= DACF_USER_OVERRIDE;
// set flags. This will write back to the target
pModule->SetDebuggerInfoBits((DebuggerAssemblyControlFlags)dwBits);
LOG((LF_CORDB, LL_INFO100, "D::HIPCE, Changed Jit-Debug-Info: fOpt=%d, fEnableEnC=%d, new bits=0x%08x\n",
(dwBits & DACF_ALLOW_JIT_OPTS) != 0,
(dwBits & DACF_ENC_ENABLED) != 0,
dwBits));
_ASSERTE(SUCCEEDED(hr));
return hr;
} // DacDbiInterfaceImpl::SetCompilerFlags
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// sequence points and var info
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Initialize the native/IL sequence points and native var info for a function.
void DacDbiInterfaceImpl::GetNativeCodeSequencePointsAndVarInfo(VMPTR_MethodDesc vmMethodDesc,
CORDB_ADDRESS startAddr,
BOOL fCodeAvailable,
NativeVarData * pNativeVarData,
SequencePoints * pSequencePoints)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!vmMethodDesc.IsNull());
MethodDesc * pMD = vmMethodDesc.GetDacPtr();
_ASSERTE(fCodeAvailable != 0);
// get information about the locations of arguments and local variables
GetNativeVarData(pMD, startAddr, GetArgCount(pMD), pNativeVarData);
// get the sequence points
GetSequencePoints(pMD, startAddr, pSequencePoints);
} // GetNativeCodeSequencePointsAndVarInfo
//-----------------------------------------------------------------------------
// Get the number of fixed arguments to a function, i.e., the explicit args and the "this" pointer.
// This does not include other implicit arguments or varargs. This is used to compute a variable ID
// (see comment in CordbJITILFrame::ILVariableToNative for more detail)
// Arguments:
// input: pMD pointer to the method desc for the function
// output: none
// Return value:
// the number of fixed arguments to the function
//-----------------------------------------------------------------------------
SIZE_T DacDbiInterfaceImpl::GetArgCount(MethodDesc * pMD)
{
// Create a MetaSig for the given method's sig. (Easier than
// picking the sig apart ourselves.)
PCCOR_SIGNATURE pCallSig;
DWORD cbCallSigSize;
pMD->GetSig(&pCallSig, &cbCallSigSize);
if (pCallSig == NULL)
{
// Sig should only be null if the image is corrupted. (Even for lightweight-codegen)
// We expect the jit+verifier to catch this, so that we never land here.
// But just in case ...
CONSISTENCY_CHECK_MSGF(false, ("Corrupted image, null sig.(%s::%s)",
pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
return 0;
}
MetaSig msig(pCallSig, cbCallSigSize, pMD->GetModule(), NULL, MetaSig::sigMember);
// Get the arg count.
UINT32 NumArguments = msig.NumFixedArgs();
// Account for the 'this' argument.
if (!pMD->IsStatic())
{
NumArguments++;
}
/*
SigParser sigParser(pCallSig, cbCallSigSize);
sigParser.SkipMethodHeaderSignature(&m_allArgsCount);
*/
return NumArguments;
} //GetArgCount
// Allocator to pass to the debug-info-stores...
BYTE* InfoStoreNew(void * pData, size_t cBytes)
{
return new BYTE[cBytes];
}
//-----------------------------------------------------------------------------
// Get locations and code offsets for local variables and arguments in a function
// This information is used to find the location of a value at a given IP.
// Arguments:
// input:
// pMethodDesc pointer to the method desc for the function
// startAddr starting address of the function--used to differentiate
// EnC versions
// fixedArgCount number of fixed arguments to the function
// output:
// pVarInfo data structure containing a list of variable and
// argument locations by range of IP offsets
// Note: this function may throw
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetNativeVarData(MethodDesc * pMethodDesc,
CORDB_ADDRESS startAddr,
SIZE_T fixedArgCount,
NativeVarData * pVarInfo)
{
// make sure we haven't done this already
if (pVarInfo->IsInitialized())
{
return;
}
NewHolder<ICorDebugInfo::NativeVarInfo> nativeVars(NULL);
DebugInfoRequest request;
request.InitFromStartingAddr(pMethodDesc, CORDB_ADDRESS_TO_TADDR(startAddr));
ULONG32 entryCount;
BOOL success = DebugInfoManager::GetBoundariesAndVars(request,
InfoStoreNew, NULL, // allocator
NULL, NULL,
&entryCount, &nativeVars);
if (!success)
ThrowHR(E_FAIL);
// set key fields of pVarInfo
pVarInfo->InitVarDataList(nativeVars, (int)fixedArgCount, (int)entryCount);
} // GetNativeVarData
//-----------------------------------------------------------------------------
// Given a instrumented IL map from the profiler that maps:
// Original offset IL_A -> Instrumentend offset IL_B
// And a native mapping from the JIT that maps:
// Instrumented offset IL_B -> native offset Native_C
// This function merges the two maps and stores the result back into the nativeMap.
// The nativeMap now maps:
// Original offset IL_A -> native offset Native_C
// pEntryCount is the number of valid entries in nativeMap, and it may be adjusted downwards
// as part of the composition.
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::ComposeMapping(const InstrumentedILOffsetMapping * pProfilerILMap, ICorDebugInfo::OffsetMapping nativeMap[], ULONG32* pEntryCount)
{
// Translate the IL offset if the profiler has provided us with a mapping.
// The ICD public API should always expose the original IL offsets, but GetBoundaries()
// directly accesses the debug info, which stores the instrumented IL offsets.
ULONG32 entryCount = *pEntryCount;
// The map pointer could be NULL or there could be no entries in the map, in either case no work to do
if (pProfilerILMap && !pProfilerILMap->IsNull())
{
// If we did instrument, then we can't have any sequence points that
// are "in-between" the old-->new map that the profiler gave us.
// Ex, if map is:
// (6 old -> 36 new)
// (8 old -> 50 new)
// And the jit gives us an entry for 44 new, that will map back to 6 old.
// Since the map can only have one entry for 6 old, we remove 44 new.
// First Pass: invalidate all the duplicate entries by setting their IL offset to MAX_ILNUM
ULONG32 cDuplicate = 0;
ULONG32 prevILOffset = (ULONG32)(ICorDebugInfo::MAX_ILNUM);
for (ULONG32 i = 0; i < entryCount; i++)
{
ULONG32 origILOffset = TranslateInstrumentedILOffsetToOriginal(nativeMap[i].ilOffset, pProfilerILMap);
if (origILOffset == prevILOffset)
{
// mark this sequence point as invalid; refer to the comment above
nativeMap[i].ilOffset = (ULONG32)(ICorDebugInfo::MAX_ILNUM);
cDuplicate += 1;
}
else
{
// overwrite the instrumented IL offset with the original IL offset
nativeMap[i].ilOffset = origILOffset;
prevILOffset = origILOffset;
}
}
// Second Pass: move all the valid entries up front
ULONG32 realIndex = 0;
for (ULONG32 curIndex = 0; curIndex < entryCount; curIndex++)
{
if (nativeMap[curIndex].ilOffset != (ULONG32)(ICorDebugInfo::MAX_ILNUM))
{
// This is a valid entry. Move it up front.
nativeMap[realIndex] = nativeMap[curIndex];
realIndex += 1;
}
}
// make sure we have done the bookkeeping correctly
_ASSERTE((realIndex + cDuplicate) == entryCount);
// Final Pass: derecement entryCount
entryCount -= cDuplicate;
*pEntryCount = entryCount;
}
}
//-----------------------------------------------------------------------------
// Get the native/IL sequence points for a function
// Arguments:
// input:
// pMethodDesc pointer to the method desc for the function
// startAddr starting address of the function--used to differentiate
// output:
// pNativeMap data structure containing a list of sequence points
// Note: this function may throw
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetSequencePoints(MethodDesc * pMethodDesc,
CORDB_ADDRESS startAddr,
SequencePoints * pSeqPoints)
{
// make sure we haven't done this already
if (pSeqPoints->IsInitialized())
{
return;
}
// Use the DebugInfoStore to get IL->Native maps.
// It doesn't matter whether we're jitted, ngenned etc.
DebugInfoRequest request;
request.InitFromStartingAddr(pMethodDesc, CORDB_ADDRESS_TO_TADDR(startAddr));
// Bounds info.
NewArrayHolder<ICorDebugInfo::OffsetMapping> mapCopy(NULL);
ULONG32 entryCount;
BOOL success = DebugInfoManager::GetBoundariesAndVars(request,
InfoStoreNew, NULL, // allocator
&entryCount, &mapCopy,
NULL, NULL);
if (!success)
ThrowHR(E_FAIL);
#ifdef FEATURE_REJIT
CodeVersionManager * pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
ILCodeVersion ilVersion;
NativeCodeVersion nativeCodeVersion = pCodeVersionManager->GetNativeCodeVersion(dac_cast<PTR_MethodDesc>(pMethodDesc), (PCODE)startAddr);
if (!nativeCodeVersion.IsNull())
{
ilVersion = nativeCodeVersion.GetILCodeVersion();
}
// if there is a rejit IL map for this function, apply that in preference to load-time mapping
if (!ilVersion.IsNull() && !ilVersion.IsDefaultVersion())
{
const InstrumentedILOffsetMapping * pRejitMapping = ilVersion.GetInstrumentedILMap();
ComposeMapping(pRejitMapping, mapCopy, &entryCount);
}
else
{
#endif
// if there is a profiler load-time mapping and not a rejit mapping, apply that instead
InstrumentedILOffsetMapping loadTimeMapping =
pMethodDesc->GetModule()->GetInstrumentedILOffsetMapping(pMethodDesc->GetMemberDef());
ComposeMapping(&loadTimeMapping, mapCopy, &entryCount);
#ifdef FEATURE_REJIT
}
#endif
pSeqPoints->InitSequencePoints(entryCount);
// mapCopy and pSeqPoints have elements of different types. Thus, we
// need to copy the individual members from the elements of mapCopy to the
// elements of pSeqPoints. Once we're done, we can release mapCopy
pSeqPoints->CopyAndSortSequencePoints(mapCopy);
} // GetSequencePoints
// ----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TranslateInstrumentedILOffsetToOriginal
//
// Description:
// Helper function to convert an instrumented IL offset to the corresponding original IL offset.
//
// Arguments:
// * ilOffset - offset to be translated
// * pMapping - the profiler-provided mapping between original IL offsets and instrumented IL offsets
//
// Return Value:
// Return the translated offset.
//
ULONG DacDbiInterfaceImpl::TranslateInstrumentedILOffsetToOriginal(ULONG ilOffset,
const InstrumentedILOffsetMapping * pMapping)
{
SIZE_T cMap = pMapping->GetCount();
ARRAY_PTR_COR_IL_MAP rgMap = pMapping->GetOffsets();
_ASSERTE((cMap == 0) == (rgMap == NULL));
// Early out if there is no mapping, or if we are dealing with a special IL offset such as
// prolog, epilog, etc.
if ((cMap == 0) || ((int)ilOffset < 0))
{
return ilOffset;
}
SIZE_T i = 0;
for (i = 1; i < cMap; i++)
{
if (ilOffset < rgMap[i].newOffset)
{
return rgMap[i - 1].oldOffset;
}
}
return rgMap[i - 1].oldOffset;
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Function Data
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// GetILCodeAndSig returns the function's ILCode and SigToken given
// a module and a token. The info will come from a MethodDesc, if
// one exists or from metadata.
//
void DacDbiInterfaceImpl::GetILCodeAndSig(VMPTR_DomainAssembly vmDomainAssembly,
mdToken functionToken,
TargetBuffer * pCodeInfo,
mdToken * pLocalSigToken)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
RVA methodRVA = 0;
DWORD implFlags;
// preinitialize out params
pCodeInfo->Clear();
*pLocalSigToken = mdSignatureNil;
// Get the RVA and impl flags for this method.
IfFailThrow(pModule->GetMDImport()->GetMethodImplProps(functionToken,
&methodRVA,
&implFlags));
MethodDesc* pMethodDesc =
FindLoadedMethodRefOrDef(pModule, functionToken);
// If the RVA is 0 or it's native, then the method is not IL
if (methodRVA == 0)
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: Function is not IL - methodRVA == NULL!\n"));
// return (CORDBG_E_FUNCTION_NOT_IL);
// Sanity check this....
if(!pMethodDesc || !pMethodDesc->IsIL())
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: And the MD agrees..\n"));
ThrowHR(CORDBG_E_FUNCTION_NOT_IL);
}
else
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: But the MD says it's IL..\n"));
}
if (pMethodDesc != NULL && pMethodDesc->GetRVA() == 0)
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: Actually, MD says RVA is 0 too - keep going...!\n"));
}
}
if (IsMiNative(implFlags))
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: Function is not IL - IsMiNative!\n"));
ThrowHR(CORDBG_E_FUNCTION_NOT_IL);
}
*pLocalSigToken = GetILCodeAndSigHelper(pModule, pMethodDesc, functionToken, methodRVA, pCodeInfo);
#ifdef LOGGING
else
{
LOG((LF_CORDB,LL_INFO100000, "DDI::GICAS: GetMethodImplProps failed!\n"));
}
#endif
} // GetILCodeAndSig
//---------------------------------------------------------------------------------------
//
// This is just a worker function for GetILCodeAndSig. It returns the function's ILCode and SigToken
// given a module, a token, and the RVA. If a MethodDesc is provided, it has to be consistent with
// the token and the RVA.
//
// Arguments:
// pModule - the Module containing the specified method
// pMD - the specified method; can be NULL
// mdMethodToken - the MethodDef token of the specified method
// methodRVA - the RVA of the IL for the specified method
// pIL - out parameter; return the target address and size of the IL of the specified method
//
// Return Value:
// Return the local variable signature token of the specified method. Can be mdSignatureNil.
//
mdSignature DacDbiInterfaceImpl::GetILCodeAndSigHelper(Module * pModule,
MethodDesc * pMD,
mdMethodDef mdMethodToken,
RVA methodRVA,
TargetBuffer * pIL)
{
_ASSERTE(pModule != NULL);
// If a MethodDesc is provided, it has to be consistent with the MethodDef token and the RVA.
_ASSERTE((pMD == NULL) || ((pMD->GetMemberDef() == mdMethodToken) && (pMD->GetRVA() == methodRVA)));
TADDR pTargetIL; // target address of start of IL blob
// This works for methods in dynamic modules, and methods overriden by a profiler.
pTargetIL = pModule->GetDynamicIL(mdMethodToken, TRUE);
// Method not overriden - get the original copy of the IL by going to the PE file/RVA
// If this is in a dynamic module then don't even attempt this since ReflectionModule::GetIL isn't
// implemend for DAC.
if (pTargetIL == 0 && !pModule->IsReflection())
{
pTargetIL = (TADDR)pModule->GetIL(methodRVA);
}
mdSignature mdSig = mdSignatureNil;
if (pTargetIL == 0)
{
// Currently this should only happen for LCG methods (including IL stubs).
// LCG methods have a 0 RVA, and so we don't currently have any way to get the IL here.
_ASSERTE(pMD->IsDynamicMethod());
_ASSERTE(pMD->AsDynamicMethodDesc()->IsLCGMethod()||
pMD->AsDynamicMethodDesc()->IsILStub());
// Clear the buffer.
pIL->Clear();
}
else
{
// Now we have the target address of the IL blob, we need to bring it over to the host.
// DacGetILMethod will copy the COR_ILMETHOD information that we need
COR_ILMETHOD * pHostIL = DacGetIlMethod(pTargetIL); // host address of start of IL blob
COR_ILMETHOD_DECODER header(pHostIL); // host address of header
// Get the IL code info. We need the address of the IL itself, which will be beyond the header
// at the beginning of the blob. We ultimately need the target address. To get this, we take
// target address of the target IL blob and add the offset from the beginning of the host IL blob
// (the header) to the beginning of the IL itself (we get this information from the header).
pIL->pAddress = pTargetIL + ((SIZE_T)(header.Code) - (SIZE_T)pHostIL);
pIL->cbSize = header.GetCodeSize();
// Now we get the signature token
if (header.LocalVarSigTok != NULL)
{
mdSig = header.GetLocalVarSigTok();
}
else
{
mdSig = mdSignatureNil;
}
}
return mdSig;
}
bool DacDbiInterfaceImpl::GetMetaDataFileInfoFromPEFile(VMPTR_PEAssembly vmPEAssembly,
DWORD &dwTimeStamp,
DWORD &dwSize,
bool &isNGEN,
IStringHolder* pStrFilename)
{
DD_ENTER_MAY_THROW;
DWORD dwDataSize;
DWORD dwRvaHint;
PEAssembly * pPEAssembly = vmPEAssembly.GetDacPtr();
_ASSERTE(pPEAssembly != NULL);
if (pPEAssembly == NULL)
return false;
WCHAR wszFilePath[MAX_LONGPATH] = {0};
DWORD cchFilePath = MAX_LONGPATH;
bool ret = ClrDataAccess::GetMetaDataFileInfoFromPEFile(pPEAssembly,
dwTimeStamp,
dwSize,
dwDataSize,
dwRvaHint,
isNGEN,
wszFilePath,
cchFilePath);
pStrFilename->AssignCopy(wszFilePath);
return ret;
}
bool DacDbiInterfaceImpl::GetILImageInfoFromNgenPEFile(VMPTR_PEAssembly vmPEAssembly,
DWORD &dwTimeStamp,
DWORD &dwSize,
IStringHolder* pStrFilename)
{
return false;
}
// Get start addresses and sizes for hot and cold regions for a native code blob.
// Arguments:
// Input:
// pMethodDesc - method desc for the function we are inspecting
// Output (required):
// pCodeInfo - initializes the m_rgCodeRegions field of this structure
// if the native code is available. Otherwise,
// pCodeInfo->IsValid() is false.
void DacDbiInterfaceImpl::GetMethodRegionInfo(MethodDesc * pMethodDesc,
NativeCodeFunctionData * pCodeInfo)
{
CONTRACTL
{
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pCodeInfo));
}
CONTRACTL_END;
IJitManager::MethodRegionInfo methodRegionInfo = {NULL, 0, NULL, 0};
PCODE functionAddress = pMethodDesc->GetNativeCode();
// get the start address of the hot region and initialize the jit manager
pCodeInfo->m_rgCodeRegions[kHot].pAddress = CORDB_ADDRESS(PCODEToPINSTR(functionAddress));
// if the start address is NULL, the code isn't available yet, so just return
if (functionAddress != NULL)
{
EECodeInfo codeInfo(functionAddress);
_ASSERTE(codeInfo.IsValid());
codeInfo.GetMethodRegionInfo(&methodRegionInfo);
// now get the rest of the region information
pCodeInfo->m_rgCodeRegions[kHot].cbSize = (ULONG)methodRegionInfo.hotSize;
pCodeInfo->m_rgCodeRegions[kCold].Init(PCODEToPINSTR(methodRegionInfo.coldStartAddress),
(ULONG)methodRegionInfo.coldSize);
_ASSERTE(pCodeInfo->IsValid());
}
else
{
_ASSERTE(!pCodeInfo->IsValid());
}
} // GetMethodRegionInfo
// Gets the following information about a native code blob:
// - its method desc
// - whether it's an instantiated generic
// - its EnC version number
// - hot and cold region information.
// If the hot region start address is NULL at the end, it means the native code
// isn't currently available. In this case, all values in pCodeInfo will be
// cleared.
void DacDbiInterfaceImpl::GetNativeCodeInfo(VMPTR_DomainAssembly vmDomainAssembly,
mdToken functionToken,
NativeCodeFunctionData * pCodeInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pCodeInfo != NULL);
// pre-initialize:
pCodeInfo->Clear();
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
MethodDesc* pMethodDesc = FindLoadedMethodRefOrDef(pModule, functionToken);
pCodeInfo->vmNativeCodeMethodDescToken.SetHostPtr(pMethodDesc);
// if we are loading a module and trying to bind a previously set breakpoint, we may not have
// a method desc yet, so check for that situation
if(pMethodDesc != NULL)
{
GetMethodRegionInfo(pMethodDesc, pCodeInfo);
if (pCodeInfo->m_rgCodeRegions[kHot].pAddress != NULL)
{
pCodeInfo->isInstantiatedGeneric = pMethodDesc->HasClassOrMethodInstantiation();
LookupEnCVersions(pModule,
pCodeInfo->vmNativeCodeMethodDescToken,
functionToken,
pCodeInfo->m_rgCodeRegions[kHot].pAddress,
&(pCodeInfo->encVersion));
}
}
} // GetNativeCodeInfo
// Gets the following information about a native code blob:
// - its method desc
// - whether it's an instantiated generic
// - its EnC version number
// - hot and cold region information.
void DacDbiInterfaceImpl::GetNativeCodeInfoForAddr(VMPTR_MethodDesc vmMethodDesc,
CORDB_ADDRESS hotCodeStartAddr,
NativeCodeFunctionData * pCodeInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pCodeInfo != NULL);
if (hotCodeStartAddr == NULL)
{
// if the start address is NULL, the code isn't available yet, so just return
_ASSERTE(!pCodeInfo->IsValid());
return;
}
IJitManager::MethodRegionInfo methodRegionInfo = {NULL, 0, NULL, 0};
TADDR codeAddr = CORDB_ADDRESS_TO_TADDR(hotCodeStartAddr);
#ifdef TARGET_ARM
// TADDR should not have the thumb code bit set.
_ASSERTE((codeAddr & THUMB_CODE) == 0);
codeAddr &= ~THUMB_CODE;
#endif
EECodeInfo codeInfo(codeAddr);
_ASSERTE(codeInfo.IsValid());
// We may not have the memory for the cold code region in a minidump.
// Do not fail stackwalking because of this.
EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY
{
codeInfo.GetMethodRegionInfo(&methodRegionInfo);
}
EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY;
// Even if GetMethodRegionInfo() fails to retrieve the cold code region info,
// we should still be able to get the hot code region info. We are counting on this for
// stackwalking to work in dump debugging scenarios.
_ASSERTE(methodRegionInfo.hotStartAddress == codeAddr);
// now get the rest of the region information
pCodeInfo->m_rgCodeRegions[kHot].Init(PCODEToPINSTR(methodRegionInfo.hotStartAddress),
(ULONG)methodRegionInfo.hotSize);
pCodeInfo->m_rgCodeRegions[kCold].Init(PCODEToPINSTR(methodRegionInfo.coldStartAddress),
(ULONG)methodRegionInfo.coldSize);
_ASSERTE(pCodeInfo->IsValid());
MethodDesc* pMethodDesc = vmMethodDesc.GetDacPtr();
pCodeInfo->isInstantiatedGeneric = pMethodDesc->HasClassOrMethodInstantiation();
pCodeInfo->vmNativeCodeMethodDescToken = vmMethodDesc;
SIZE_T unusedLatestEncVersion;
Module * pModule = pMethodDesc->GetModule();
_ASSERTE(pModule != NULL);
LookupEnCVersions(pModule,
vmMethodDesc,
pMethodDesc->GetMemberDef(),
codeAddr,
&unusedLatestEncVersion, //unused by caller
&(pCodeInfo->encVersion));
} // GetNativeCodeInfo
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
//
// Functions to get Type and Class information
//
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
//-----------------------------------------------------------------------------
//DacDbiInterfaceImpl::GetTypeHandles
// Get the approximate and exact type handles for a type
// Arguments:
// input:
// vmThExact - VMPTR of the exact type handle. If this method is called
// to get information for a new generic instantiation, this will already
// be initialized. If it's called to get type information for an arbitrary
// type (i.e., called to initialize an instance of CordbClass), it will be NULL
// vmThApprox - VMPTR of the approximate type handle. If this method is called
// to get information for a new generic instantiation, this will already
// be initialized. If it's called to get type information for an arbitrary
// type (i.e., called to initialize an instance of CordbClass), it will be NULL
// output:
// pThExact - handle for exact type information for a generic instantiation
// pThApprox - handle for type information
// Notes:
// pThExact and pTHApprox must be pointers to existing memory.
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetTypeHandles(VMPTR_TypeHandle vmThExact,
VMPTR_TypeHandle vmThApprox,
TypeHandle * pThExact,
TypeHandle * pThApprox)
{
_ASSERTE((pThExact != NULL) && (pThApprox != NULL));
*pThExact = TypeHandle::FromPtr(vmThExact.GetDacPtr());
*pThApprox = TypeHandle::FromPtr(vmThApprox.GetDacPtr());
// If we can't find the class, return the proper HR to the right side. Note: if the class is not a value class and
// the class is also not restored, then we must pretend that the class is still not loaded. We are gonna let
// unrestored value classes slide, though, and special case access to the class's parent below.
if ((pThApprox->IsNull()) || ((!pThApprox->IsValueType()) && (!pThApprox->IsRestored())))
{
LOG((LF_CORDB, LL_INFO10000, "D::GASCI: class isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
// If the exact type handle is not restored ignore it.
if (!pThExact->IsNull() && !pThExact->IsRestored())
{
*pThExact = TypeHandle();
}
} // DacDbiInterfaceImpl::GetTypeHandles
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetTotalFieldCount
// Gets the total number of fields for a type.
// Input Argument: thApprox - type handle used to determine the number of fields
// Return Value: count of the total fields of the type.
//-----------------------------------------------------------------------------
unsigned int DacDbiInterfaceImpl::GetTotalFieldCount(TypeHandle thApprox)
{
MethodTable *pMT = thApprox.GetMethodTable();
// Count the instance and static fields for this class (not including parent).
// This will not include any newly added EnC fields.
unsigned int IFCount = pMT->GetNumIntroducedInstanceFields();
unsigned int SFCount = pMT->GetNumStaticFields();
#ifdef EnC_SUPPORTED
PTR_Module pModule = pMT->GetModule();
// Stats above don't include EnC fields. So add them now.
if (pModule->IsEditAndContinueEnabled())
{
PTR_EnCEEClassData pEncData =
(dac_cast<PTR_EditAndContinueModule>(pModule))->GetEnCEEClassData(pMT, TRUE);
if (pEncData != NULL)
{
_ASSERTE(pEncData->GetMethodTable() == pMT);
// EnC only adds fields, never removes them.
IFCount += pEncData->GetAddedInstanceFields();
SFCount += pEncData->GetAddedStaticFields();
}
}
#endif
return IFCount + SFCount;
} // DacDbiInterfaceImpl::GetTotalFieldCount
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::InitClassData
// initializes various values of the ClassInfo data structure, including the
// field count, generic args count, size and value class flag
// Arguments:
// input: thApprox - used to get access to all the necessary values
// fIsInstantiatedType - used to determine how to compute the size
// output: pData - contains fields to be initialized
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::InitClassData(TypeHandle thApprox,
BOOL fIsInstantiatedType,
ClassInfo * pData)
{
pData->m_fieldList.Alloc(GetTotalFieldCount(thApprox));
// For Generic classes you must get the object size via the type handle, which
// will get you to the right information for the particular instantiation
// you're working with...
pData->m_objectSize = 0;
if ((!thApprox.GetNumGenericArgs()) || fIsInstantiatedType)
{
pData->m_objectSize = thApprox.GetMethodTable()->GetNumInstanceFieldBytes();
}
} // DacDbiInterfaceImpl::InitClassData
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetStaticsBases
// Gets the base table addresses for both GC and non-GC statics
// Arguments:
// input: thExact - exact type handle for the class
// pAppDomain - AppDomain in which the class is loaded
// output: ppGCStaticsBase - base pointer for GC statics
// ppNonGCStaticsBase - base pointer for non GC statics
// Notes:
// If this is a non-generic type, or an instantiated type, then we'll be able to get the static var bases
// If the typeHandle represents a generic type constructor (i.e. an uninstantiated generic class), then
// the static bases will be null (since statics are per-instantiation).
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetStaticsBases(TypeHandle thExact,
AppDomain * pAppDomain,
PTR_BYTE * ppGCStaticsBase,
PTR_BYTE * ppNonGCStaticsBase)
{
MethodTable * pMT = thExact.GetMethodTable();
Module * pModuleForStatics = pMT->GetModuleForStatics();
if (pModuleForStatics != NULL)
{
PTR_DomainLocalModule pLocalModule = pModuleForStatics->GetDomainLocalModule();
if (pLocalModule != NULL)
{
*ppGCStaticsBase = pLocalModule->GetGCStaticsBasePointer(pMT);
*ppNonGCStaticsBase = pLocalModule->GetNonGCStaticsBasePointer(pMT);
}
}
} // DacDbiInterfaceImpl::GetStaticsBases
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::ComputeFieldData
// Computes the field info for pFD and stores it in pcurrentFieldData
// Arguments:
// input: pFD - FieldDesc used to get necessary information
// pGCStaticsBase - base table address for GC statics
// pNonGCStaticsBase - base table address for non-GC statics
// output: pCurrentFieldData - contains fields to be initialized
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::ComputeFieldData(PTR_FieldDesc pFD,
PTR_BYTE pGCStaticsBase,
PTR_BYTE pNonGCStaticsBase,
FieldData * pCurrentFieldData)
{
pCurrentFieldData->Initialize(pFD->IsStatic(), pFD->IsPrimitive(), pFD->GetMemberDef());
#ifdef EnC_SUPPORTED
// If the field was newly introduced via EnC, and hasn't yet
// been fixed up, then we'll send back a marker indicating
// that it isn't yet available.
if (pFD->IsEnCNew())
{
// @dbgtodo Microsoft inspection: eliminate the debugger token when ICDClass and ICDType are
// completely DACized
pCurrentFieldData->m_vmFieldDesc.SetHostPtr(pFD);
pCurrentFieldData->m_fFldStorageAvailable = FALSE;
pCurrentFieldData->m_fFldIsTLS = FALSE;
pCurrentFieldData->m_fFldIsRVA = FALSE;
pCurrentFieldData->m_fFldIsCollectibleStatic = FALSE;
}
else
#endif // EnC_SUPPORTED
{
// Otherwise, we'll compute the info & send it back.
pCurrentFieldData->m_fFldStorageAvailable = TRUE;
// @dbgtodo Microsoft inspection: eliminate the debugger token when ICDClass and ICDType are
// completely DACized
pCurrentFieldData->m_vmFieldDesc.SetHostPtr(pFD);
pCurrentFieldData->m_fFldIsTLS = (pFD->IsThreadStatic() == TRUE);
pCurrentFieldData->m_fFldIsRVA = (pFD->IsRVA() == TRUE);
pCurrentFieldData->m_fFldIsCollectibleStatic = (pFD->IsStatic() == TRUE &&
pFD->GetEnclosingMethodTable()->Collectible());
// Compute the address of the field
if (pFD->IsStatic())
{
// statics are addressed using an absolute address.
if (pFD->IsRVA())
{
// RVA statics are relative to a base module address
DWORD offset = pFD->GetOffset();
PTR_VOID addr = pFD->GetModule()->GetRvaField(offset);
if (pCurrentFieldData->OkToGetOrSetStaticAddress())
{
pCurrentFieldData->SetStaticAddress(PTR_TO_TADDR(addr));
}
}
else if (pFD->IsThreadStatic() ||
pCurrentFieldData->m_fFldIsCollectibleStatic)
{
// this is a special type of static that must be queried using DB_IPCE_GET_SPECIAL_STATIC
}
else
{
// This is a normal static variable in the GC or Non-GC static base table
PTR_BYTE base = pFD->IsPrimitive() ? pNonGCStaticsBase : pGCStaticsBase;
if (base == NULL)
{
// static var not available. This may be an open generic class (not an instantiated type),
// or we might only have approximate type information because the type hasn't been
// initialized yet.
if (pCurrentFieldData->OkToGetOrSetStaticAddress())
{
pCurrentFieldData->SetStaticAddress(NULL);
}
}
else
{
if (pCurrentFieldData->OkToGetOrSetStaticAddress())
{
// calculate the absolute address using the base and the offset from the base
pCurrentFieldData->SetStaticAddress(PTR_TO_TADDR(base) + pFD->GetOffset());
}
}
}
}
else
{
// instance variables are addressed using an offset within the instance
if (pCurrentFieldData->OkToGetOrSetInstanceOffset())
{
pCurrentFieldData->SetInstanceOffset(pFD->GetOffset());
}
}
}
} // DacDbiInterfaceImpl::ComputeFieldData
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::CollectFields
// Gets information for all the fields for a given type
// Arguments:
// input: thExact - used to determine whether we need to get statics base tables
// thApprox - used to get the field desc iterator
// pAppDomain - used to get statics base tables
// output:
// pFieldList - contains fields to be initialized
// Note: the caller must ensure that *ppFields is NULL (i.e., any previously allocated memory
// must have been deallocated.
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::CollectFields(TypeHandle thExact,
TypeHandle thApprox,
AppDomain * pAppDomain,
DacDbiArrayList<FieldData> * pFieldList)
{
PTR_BYTE pGCStaticsBase = NULL;
PTR_BYTE pNonGCStaticsBase = NULL;
if (!thExact.IsNull() && !thExact.GetMethodTable()->Collectible())
{
// get base tables for static fields
GetStaticsBases(thExact, pAppDomain, &pGCStaticsBase, &pNonGCStaticsBase);
}
unsigned int fieldCount = 0;
// <TODO> we are losing exact type information for static fields in generic types. We have
// field desc iterators only for approximate types, but statics are per instantiation, so we
// need an exact type to be able to handle these correctly. We need to use
// FieldDesc::GetExactDeclaringType to get at the correct field. This requires the exact
// TypeHandle. </TODO>
EncApproxFieldDescIterator fdIterator(thApprox.GetMethodTable(),
ApproxFieldDescIterator::ALL_FIELDS,
FALSE); // don't fixup EnC (we can't, we're stopped)
PTR_FieldDesc pCurrentFD;
unsigned int index = 0;
while (((pCurrentFD = fdIterator.Next()) != NULL) && (index < pFieldList->Count()))
{
// fill in the pCurrentEntry structure
ComputeFieldData(pCurrentFD, pGCStaticsBase, pNonGCStaticsBase, &((*pFieldList)[index]));
// Bump our counts and pointers.
fieldCount++;
index++;
}
_ASSERTE(fieldCount == (unsigned int)pFieldList->Count());
} // DacDbiInterfaceImpl::CollectFields
// Determine if a type is a ValueType
BOOL DacDbiInterfaceImpl::IsValueType (VMPTR_TypeHandle vmTypeHandle)
{
DD_ENTER_MAY_THROW;
TypeHandle th = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
return th.IsValueType();
}
// Determine if a type has generic parameters
BOOL DacDbiInterfaceImpl::HasTypeParams (VMPTR_TypeHandle vmTypeHandle)
{
DD_ENTER_MAY_THROW;
TypeHandle th = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
return th.ContainsGenericVariables();
}
// DacDbi API: Get type information for a class
void DacDbiInterfaceImpl::GetClassInfo(VMPTR_AppDomain vmAppDomain,
VMPTR_TypeHandle vmThExact,
ClassInfo * pData)
{
DD_ENTER_MAY_THROW;
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
TypeHandle thExact;
TypeHandle thApprox;
GetTypeHandles(vmThExact, vmThExact, &thExact, &thApprox);
// initialize field count, generic args count, size and value class flag
InitClassData(thApprox, false, pData);
if (pAppDomain != NULL)
CollectFields(thExact, thApprox, pAppDomain, &(pData->m_fieldList));
} // DacDbiInterfaceImpl::GetClassInfo
// DacDbi API: Get field information and object size for an instantiated generic type
void DacDbiInterfaceImpl::GetInstantiationFieldInfo (VMPTR_DomainAssembly vmDomainAssembly,
VMPTR_TypeHandle vmThExact,
VMPTR_TypeHandle vmThApprox,
DacDbiArrayList<FieldData> * pFieldList,
SIZE_T * pObjectSize)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
_ASSERTE(pDomainAssembly != NULL);
AppDomain * pAppDomain = pDomainAssembly->GetAppDomain();
TypeHandle thExact;
TypeHandle thApprox;
GetTypeHandles(vmThExact, vmThApprox, &thExact, &thApprox);
*pObjectSize = thApprox.GetMethodTable()->GetNumInstanceFieldBytes();
pFieldList->Alloc(GetTotalFieldCount(thApprox));
CollectFields(thExact, thApprox, pAppDomain, pFieldList);
} // DacDbiInterfaceImpl::GetInstantiationFieldInfo
//-----------------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk member functions
//-----------------------------------------------------------------------------------
//-----------------------------------------------------------------------------
// TypeDataWalk constructor--initialize the buffer and number of remaining items from input data
// Arguments: pData - pointer to a list of records containing information about type parameters for an
// instantiated type
// nData - number of entries in pData
//-----------------------------------------------------------------------------
DacDbiInterfaceImpl::TypeDataWalk::TypeDataWalk(DebuggerIPCE_TypeArgData * pData, unsigned int nData)
{
m_pCurrentData = pData;
m_nRemaining = nData;
} // DacDbiInterfaceImpl::TypeDataWalk::TypeDataWalk
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadOne
// read and return a single node from the list of type parameters
// Arguments: none (uses internal state)
// Return value: information about the next type parameter in m_pCurrentData
//-----------------------------------------------------------------------------
DebuggerIPCE_TypeArgData * DacDbiInterfaceImpl::TypeDataWalk::ReadOne()
{
LIMITED_METHOD_CONTRACT;
if (m_nRemaining)
{
m_nRemaining--;
return m_pCurrentData++;
}
else
{
return NULL;
}
} // DacDbiInterfaceImpl::TypeDataWalk::ReadOne
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::Skip
// Skip a single node from the list of type handles along with any children it might have
// Arguments: none (uses internal state)
// Return value: none (updates internal state)
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::TypeDataWalk::Skip()
{
LIMITED_METHOD_CONTRACT;
DebuggerIPCE_TypeArgData * pData = ReadOne();
if (pData)
{
for (unsigned int i = 0; i < pData->numTypeArgs; i++)
{
Skip();
}
}
} // DacDbiInterfaceImpl::TypeDataWalk::Skip
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeArg
// Read a type handle when it is used in the position of a generic argument or
// argument of an array or address type. Take into account generic code sharing if we
// have been requested to find the canonical representation amongst a set of shared-
// code generic types. That is, if generics code sharing is enabled then return "Object"
// for all reference types, and canonicalize underneath value types, e.g. V<string> --> V<object>.
// Return TypeHandle() if any of the type handles are not loaded.
//
// Arguments: retrieveWhich - indicates whether to retrieve a canonical representation or
// an exact representation
// Return value: the type handle for the type parameter
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeArg(TypeHandleReadType retrieveWhich)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
#if !defined(FEATURE_SHARE_GENERIC_CODE)
return ReadLoadedTypeHandle(kGetExact);
#else
if (retrieveWhich == kGetExact)
return ReadLoadedTypeHandle(kGetExact);
// This nasty bit of code works out what the "canonicalization" of a
// parameter to a generic is once we take into account generics code sharing.
//
// This logic is somewhat a duplication of logic in vm\typehandle.cpp, though
// that logic operates on a TypeHandle format, i.e. assumes we're finding the
// canonical form of a type that has already been loaded. Here we are finding
// the canonical form of a type that may not have been loaded (but where we expect
// its canonical form to have been loaded).
//
// Ideally this logic would not be duplicated in this way, but it is difficult
// to arrange for that.
DebuggerIPCE_TypeArgData * pData = ReadOne();
if (!pData)
return TypeHandle();
// If we have code sharing then the process of canonicalizing is trickier.
// unfortunately we have to include the exact specification of compatibility at
// this point.
CorElementType elementType = pData->data.elementType;
switch (elementType)
{
case ELEMENT_TYPE_PTR:
_ASSERTE(pData->numTypeArgs == 1);
return PtrOrByRefTypeArg(pData, retrieveWhich);
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
return ClassTypeArg(pData, retrieveWhich);
break;
case ELEMENT_TYPE_FNPTR:
return FnPtrTypeArg(pData, retrieveWhich);
break;
default:
return ObjRefOrPrimitiveTypeArg(pData, elementType);
break;
}
#endif // FEATURE_SHARE_GENERIC_CODE
} // DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandles
// Iterate through the type argument data, creating type handles as we go.
//
// Arguments:
// input: retrieveWhich - indicates whether we can return a canonical type handle
// or we must return an exact type handle
// nTypeArgs - number of type arguments to be read
// output: ppResults - pointer to a list of TypeHandles that will hold the type handles
// for each type parameter
//
// Return Value: FALSE iff any of the type handles are not loaded.
//-----------------------------------------------------------------------------
BOOL DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandles(TypeHandleReadType retrieveWhich,
unsigned int nTypeArgs,
TypeHandle * ppResults)
{
WRAPPER_NO_CONTRACT;
BOOL allOK = true;
for (unsigned int i = 0; i < nTypeArgs; i++)
{
ppResults[i] = ReadLoadedTypeArg(retrieveWhich);
allOK &= !ppResults[i].IsNull();
}
return allOK;
} // DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandles
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedInstantiation
// Read an instantiation of a generic type if it has already been created.
//
// Arguments:
// input: retrieveWhich - indicates whether we can return a canonical type handle
// or we must return an exact type handle
// pModule - module in which the instantiated type is loaded
// mdToken - metadata token for the type
// nTypeArgs - number of type arguments to be read
// Return value: the type handle for the instantiated type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedInstantiation(TypeHandleReadType retrieveWhich,
Module * pModule,
mdTypeDef mdToken,
unsigned int nTypeArgs)
{
WRAPPER_NO_CONTRACT;
NewHolder<TypeHandle> pInst(new TypeHandle[nTypeArgs]);
// get the type handle for each of the type parameters
if (!ReadLoadedTypeHandles(retrieveWhich, nTypeArgs, pInst))
{
return TypeHandle();
}
// get the type handle for the particular instantiation that corresponds to
// the given type parameters
return FindLoadedInstantiation(pModule, mdToken, nTypeArgs, pInst);
} // DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedInstantiation
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandle
//
// Compute the type handle for a given type.
// This is the top-level function that will return the type handle for an
// arbitrary type. It uses mutual recursion with ReadLoadedTypeArg to get
// the type handle for a (possibly parameterized) type. Note that the referent of
// address types or the element type of an array type are viewed as type parameters.
//
// For example, assume that we are retrieving only exact types, and we have as our
// top level type an array defined as int [][].
// We start by noting that the type is an array type, so we call ReadLoadedTypeArg to
// get the element type. We find that the element type is also an array:int [].
// ReadLoadedTypeArg will call ReadLoadedTypeHandle with this type information.
// Again, we determine that the top-level type is an array, so we call ReadLoadedTypeArg
// to get the element type, int. ReadLoadedTypeArg will again call ReadLoadedTypeHandle
// which will find that this time, the top-level type is a primitive type. It will request
// the loaded type handle from the loader and return it. On return, we get the type handle
// for an array of int from the loader. We return again and request the type handle for an
// array of arrays of int. This is the type handle we will return.
//
// Arguments:
// input: retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// we use the list of type data stored in the TypeDataWalk data members
// for other input information
// Return value: type handle for the current type.
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandle(TypeHandleReadType retrieveWhich)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
// get the type information at the head of the list m_pCurrentData
DebuggerIPCE_TypeArgData * pData = ReadOne();
if (!pData)
return TypeHandle();
// get the type handle that corresponds to its elementType
TypeHandle typeHandle;
switch (pData->data.elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
typeHandle = ArrayTypeArg(pData, retrieveWhich);
break;
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
typeHandle = PtrOrByRefTypeArg(pData, retrieveWhich);
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
{
Module * pModule = pData->data.ClassTypeData.vmModule.GetDacPtr();
typeHandle = ReadLoadedInstantiation(retrieveWhich,
pModule,
pData->data.ClassTypeData.metadataToken,
pData->numTypeArgs);
}
break;
case ELEMENT_TYPE_FNPTR:
{
typeHandle = FnPtrTypeArg(pData, retrieveWhich);
}
break;
default:
typeHandle = FindLoadedElementType(pData->data.elementType);
break;
}
return typeHandle;
} // DacDbiInterfaceImpl::TypeDataWalk::ReadLoadedTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ArrayTypeArg
// get a loaded type handle for an array type (E_T_ARRAY or E_T_SZARRAY)
//
// Arguments:
// input: pArrayTypeInfo - type information for an array type
// Although this is in fact a pointer (in)to a list, we treat it here
// simply as a pointer to a single instance of DebuggerIPCE_TypeArgData
// which holds type information for an array.
// This is the most recent type node (for an array type) retrieved
// by TypeDataWalk::ReadOne(). The call to ReadLoadedTypeArg will
// result in call(s) to ReadOne to retrieve one or more type nodes
// that are needed to compute the type handle for the
// element type of the array. When we return from that call, we pass
// pArrayTypeInfo along with arrayElementTypeArg to FindLoadedArrayType
// to get the type handle for this particular array type.
// Note:
// On entry, we know that pArrayTypeInfo is the same as m_pCurrentData - 1,
// but by the time we need to use it, this is no longer true. Because
// we can't predict how many nodes will be consumed by the call to
// ReadLoadedTypeArg, we can't compute this value from the member fields
// of TypeDataWalk and therefore pass it as a parameter.
// retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// Return value: the type handle corresponding to the array type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ArrayTypeArg(DebuggerIPCE_TypeArgData * pArrayTypeInfo,
TypeHandleReadType retrieveWhich)
{
TypeHandle arrayElementTypeArg = ReadLoadedTypeArg(retrieveWhich);
if (!arrayElementTypeArg.IsNull())
{
return FindLoadedArrayType(pArrayTypeInfo->data.elementType,
arrayElementTypeArg,
pArrayTypeInfo->data.ArrayTypeData.arrayRank);
}
return TypeHandle();
} // DacDbiInterfaceImpl::TypeDataWalk::ArrayTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::PtrOrByRefTypeArg
// get a loaded type handle for an address type (E_T_PTR or E_T_BYREF)
//
// Arguments:
// input: pPtrOrByRefTypeInfo - type information for a pointer or byref type
// Although this is in fact a pointer (in)to a list, we treat it here
// simply as a pointer to a single instance of DebuggerIPCE_TypeArgData
// which holds type information for a pointer or byref type.
// This is the most recent type node (for a pointer or byref type) retrieved
// by TypeDataWalk::ReadOne(). The call to ReadLoadedTypeArg will
// result in call(s) to ReadOne to retrieve one or more type nodes
// that are needed to compute the type handle for the
// referent type of the pointer. When we return from that call, we pass
// pPtrOrByRefTypeInfo along with referentTypeArg to FindLoadedPointerOrByrefType
// to get the type handle for this particular pointer or byref type.
// Note:
// On entry, we know that pPtrOrByRefTypeInfo is the same as m_pCurrentData - 1,
// but by the time we need to use it, this is no longer true. Because
// we can't predict how many nodes will be consumed by the call to
// ReadLoadedTypeArg, we can't compute this value from the member fields
// of TypeDataWalk and therefore pass it as a parameter.
// retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// Return value: the type handle corresponding to the address type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::PtrOrByRefTypeArg(DebuggerIPCE_TypeArgData * pPtrOrByRefTypeInfo,
TypeHandleReadType retrieveWhich)
{
TypeHandle referentTypeArg = ReadLoadedTypeArg(retrieveWhich);
if (!referentTypeArg.IsNull())
{
return FindLoadedPointerOrByrefType(pPtrOrByRefTypeInfo->data.elementType, referentTypeArg);
}
return TypeHandle();
} // DacDbiInterfaceImpl::TypeDataWalk::PtrOrByRefTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ClassTypeArg
// get a loaded type handle for a class type (E_T_CLASS or E_T_VALUETYPE)
//
// Arguments:
// input: pClassTypeInfo - type information for a class type
// Although this is in fact a pointer (in)to a list, we treat it here
// simply as a pointer to a single instance of DebuggerIPCE_TypeArgData
// which holds type information for a pointer or byref type.
// This is the most recent type node (for a pointer or byref type) retrieved
// by TypeDataWalk::ReadOne(). The call to ReadLoadedInstantiation will
// result in call(s) to ReadOne to retrieve one or more type nodes
// that are needed to compute the type handle for the type parameters
// for the class. If we can't find an exact loaded type for the class, we will
// instead return a canonical method table. In this case, we need to skip
// the type parameter information for each actual parameter to the class.
// This is necessary because we may be getting a type handle for a class which is
// in turn an argument to a parent type. If the parent type has more arguments, we
// need to be at the right place in the list when we return. We use
// pClassTypeInfo to get the number of type arguments that we need to skip.
// retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// Return value: the type handle corresponding to the class type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ClassTypeArg(DebuggerIPCE_TypeArgData * pClassTypeInfo,
TypeHandleReadType retrieveWhich)
{
Module * pModule = pClassTypeInfo->data.ClassTypeData.vmModule.GetDacPtr();
TypeHandle typeDef = ClassLoader::LookupTypeDefOrRefInModule(pModule,
pClassTypeInfo->data.ClassTypeData.metadataToken);
if ((!typeDef.IsNull() && typeDef.IsValueType()) || (pClassTypeInfo->data.elementType == ELEMENT_TYPE_VALUETYPE))
{
return ReadLoadedInstantiation(retrieveWhich,
pModule,
pClassTypeInfo->data.ClassTypeData.metadataToken,
pClassTypeInfo->numTypeArgs);
}
else
{
_ASSERTE(retrieveWhich == kGetCanonical);
// skip the instantiation - no need to look at it since the type canonicalizes to "Object"
for (unsigned int i = 0; i < pClassTypeInfo->numTypeArgs; i++)
{
Skip();
}
return TypeHandle(g_pCanonMethodTableClass);
}
}// DacDbiInterfaceImpl::TypeDataWalk::ClassTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::FnPtrTypeArg
// get a loaded type handle for a function pointer type (E_T_FNPTR)
//
// Arguments:
// input: pFnPtrTypeInfo - type information for a pointer or byref type
// Although this is in fact a pointer (in)to a list, we treat it here
// simply as a pointer to a single instance of DebuggerIPCE_TypeArgData
// which holds type information for a function pointer type.
// This is the most recent type node (for a function pointer type) retrieved
// by TypeDataWalk::ReadOne(). The call to ReadLoadedTypeHandles will
// result in call(s) to ReadOne to retrieve one or more type nodes
// that are needed to compute the type handle for the return type and
// parameter types of the function. When we return from that call, we pass
// pFnPtrTypeInfo along with pInst to FindLoadedFnptrType
// to get the type handle for this particular function pointer type.
// retrieveWhich - determines whether we can return the type handle for
// a canonical type or only for an exact type
// Return value: the type handle corresponding to the function pointer type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::FnPtrTypeArg(DebuggerIPCE_TypeArgData * pFnPtrTypeInfo,
TypeHandleReadType retrieveWhich)
{
// allocate space to store a list of type handles, one for the return type and one for each
// of the parameter types of the function to which the FnPtr type refers.
NewArrayHolder<TypeHandle> pInst(new TypeHandle[sizeof(TypeHandle) * pFnPtrTypeInfo->numTypeArgs]);
if (ReadLoadedTypeHandles(retrieveWhich, pFnPtrTypeInfo->numTypeArgs, pInst))
{
return FindLoadedFnptrType(pFnPtrTypeInfo->numTypeArgs, pInst);
}
return TypeHandle();
} // DacDbiInterfaceImpl::TypeDataWalk::FnPtrTypeArg
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeDataWalk::ObjRefOrPrimitiveTypeArg
// get a loaded type handle for a primitive type or ObjRef
//
// Arguments:
// input: pArgInfo - type information for an objref or primitive type.
// This is called only when the objref or primitive type
// is a type argument for a parent type. In this case,
// we treat all objrefs the same, that is, we don't care
// about type parameters for the referent. Instead, we will
// simply return the canonical object type handle as the type
// of the referent. <@dbgtodo Microsoft: why is this?>
// If this is a primitive type, we'll simply get the
// type handle for that type.
// elementType - type of the argument
// Return value: the type handle corresponding to the elementType
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::TypeDataWalk::ObjRefOrPrimitiveTypeArg(DebuggerIPCE_TypeArgData * pArgInfo,
CorElementType elementType)
{
// If there are any type args (e.g. for arrays) they can be skipped. The thing
// is a reference type anyway.
for (unsigned int i = 0; i < pArgInfo->numTypeArgs; i++)
{
Skip();
}
// for an ObjRef, just return the CLASS____CANON type handle
if (CorTypeInfo::IsObjRef_NoThrow(elementType))
{
return TypeHandle(g_pCanonMethodTableClass);
}
else
{
return FindLoadedElementType(elementType);
}
} // DacDbiInterfaceImpl::TypeDataWalk::ObjRefOrPrimitiveTypeArg
//-------------------------------------------------------------------------
// end of TypeDataWalk implementations
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
// functions to use loader to get type handles
// ------------------------------------------------------------------------
// Note, in these functions, the use of ClassLoader::DontLoadTypes was chosen
// instead of FailIfNotLoaded because, although we may want to debug unrestored
// VCs, we can't do it because the debug API is not set up to handle them.
//
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedArrayType
// Use ClassLoader to find a loaded type handle for an array type (E_T_ARRAY or E_T_SZARRAY)
// Arguments:
// input: arrayType - type of the array
// TypeArg - type handle for the base type
// rank - array rank
// Return Value: type handle for the array type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedArrayType(CorElementType arrayType,
TypeHandle typeArg,
unsigned rank)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
if (typeArg.IsNull())
{
return TypeHandle();
}
else
{
return ClassLoader::LoadArrayTypeThrowing(typeArg,
arrayType,
rank,
ClassLoader::DontLoadTypes );
}
} // DacDbiInterfaceImpl::FindLoadedArrayType;
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedPointerOrByrefType
// Use ClassLoader to find a loaded type handle for an address type (E_T_PTR or E_T_BYREF)
// Arguments:
// input: addressType - type of the address type
// TypeArg - type handle for the base type
// Return Value: type handle for the address type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedPointerOrByrefType(CorElementType addressType, TypeHandle typeArg)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
return ClassLoader::LoadPointerOrByrefTypeThrowing(addressType,
typeArg,
ClassLoader::DontLoadTypes);
} // DacDbiInterfaceImpl::FindLoadedPointerOrByrefType
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedFnptrType
// Use ClassLoader to find a loaded type handle for a function pointer type (E_T_FNPTR)
// Arguments:
// input: pInst - type handles of the function's return value and arguments
// numTypeArgs - number of type handles in pInst
// Return Value: type handle for the function pointer type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedFnptrType(DWORD numTypeArgs, TypeHandle * pInst)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
// @dbgtodo : Do we need to worry about calling convention here?
// LoadFnptrTypeThrowing expects the count of arguments, not
// including return value, so we subtract 1 from numTypeArgs.
return ClassLoader::LoadFnptrTypeThrowing(0,
numTypeArgs - 1,
pInst,
ClassLoader::DontLoadTypes);
} // DacDbiInterfaceImpl::FindLoadedFnptrType
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedInstantiation
// Use ClassLoader to find a loaded type handle for a particular instantiation of a
// class type (E_T_CLASS or E_T_VALUECLASS)
//
// Arguments:
// input: pModule - module in which the type is loaded
// mdToken - metadata token for the type
// nTypeArgs - number of type arguments in pInst
// pInst - list of type handles for the type parameters
// Return value: type handle for the instantiated class type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedInstantiation(Module * pModule,
mdTypeDef mdToken,
DWORD nTypeArgs,
TypeHandle * pInst)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
return ClassLoader::LoadGenericInstantiationThrowing(pModule,
mdToken,
Instantiation(pInst,nTypeArgs),
ClassLoader::DontLoadTypes);
} // DacDbiInterfaceImpl::FindLoadedInstantiation
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindLoadedElementType
// Get the type handle for a primitive type
// Arguments:
// input: elementType - type of the primitive type
// Return Value: Type handle for the primitive type
//-----------------------------------------------------------------------------
// static
TypeHandle DacDbiInterfaceImpl::FindLoadedElementType(CorElementType elementType)
{
// Lookup operations run the class loader in non-load mode.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
MethodTable * pMethodTable = (&g_CoreLib)->GetElementType(elementType);
return TypeHandle(pMethodTable);
} // DacDbiInterfaceImpl::FindLoadedElementType
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetArrayTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType if the type is E_T_ARRAY.
// Specifically, we get the rank and the type of the array elements
//
// Arguments:
// input: typeHandle - type handle for the array type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the array rank and element type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetArrayTypeInfo(TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
_ASSERTE(typeHandle.IsArray());
pTypeInfo->ArrayTypeData.arrayRank = typeHandle.GetRank();
TypeHandleToBasicTypeInfo(typeHandle.GetArrayElementTypeHandle(),
&(pTypeInfo->ArrayTypeData.arrayTypeArg),
pAppDomain);
} // DacDbiInterfaceImpl::GetArrayTypeInfo
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetPtrTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType if the type is
// E_T_PTR or E_T_BYREF. Specifically, we get the type for the referent of the address type
//
// Arguments:
// input: boxed - indicates what, if anything, is boxed (see code:AreValueTypesBoxed for
// more specific information)
// typeHandle - type handle for the address type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the referent type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetPtrTypeInfo(AreValueTypesBoxed boxed,
TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
if (boxed == AllBoxed)
{
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
}
else
{
_ASSERTE(typeHandle.IsTypeDesc());
TypeHandleToBasicTypeInfo(typeHandle.AsTypeDesc()->GetTypeParam(),
&(pTypeInfo->UnaryTypeData.unaryTypeArg),
pAppDomain);
}
} // DacDbiInterfaceImpl::GetPtrTypeInfo
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetFnPtrTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType if the type is
// E_T_FNPTR, specifically the typehandle for the referent.
//
// Arguments
// input: boxed - indicates what, if anything, is boxed (see code:AreValueTypesBoxed for
// more specific information)
// typeHandle - type handle for the address type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the referent type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetFnPtrTypeInfo(AreValueTypesBoxed boxed,
TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
if (boxed == AllBoxed)
{
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
}
else
{
pTypeInfo->NaryTypeData.typeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
} // DacDbiInterfaceImpl::GetFnPtrTypeInfo
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetClassTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType if the type is
// E_T_CLASS or E_T_VALUETYPE
//
// Arguments
// input: typeHandle - type handle for the address type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the referent type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetClassTypeInfo(TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
Module * pModule = typeHandle.GetModule();
if (typeHandle.HasInstantiation()) // the type handle represents a generic instantiation
{
pTypeInfo->ClassTypeData.typeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
else // non-generic
{
pTypeInfo->ClassTypeData.typeHandle = VMPTR_TypeHandle::NullPtr();
}
pTypeInfo->ClassTypeData.metadataToken = typeHandle.GetCl();
_ASSERTE(pModule);
pTypeInfo->ClassTypeData.vmModule.SetDacTargetPtr(PTR_HOST_TO_TADDR(pModule));
if (pAppDomain)
{
pTypeInfo->ClassTypeData.vmDomainAssembly.SetDacTargetPtr(PTR_HOST_TO_TADDR(pModule->GetDomainAssembly()));
}
else
{
pTypeInfo->ClassTypeData.vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
}
} // DacDbiInterfaceImpl::GetClassTypeInfo
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetElementType
// Gets the correct CorElementType value from a type handle
//
// Arguments
// input: typeHandle - type handle for the address type
// Return Value: the CorElementType enum value for the type handle
//-----------------------------------------------------------------------------
CorElementType DacDbiInterfaceImpl::GetElementType (TypeHandle typeHandle)
{
if (typeHandle.IsNull())
{
return ELEMENT_TYPE_VOID;
}
else if (typeHandle.GetMethodTable() == g_pObjectClass)
{
return ELEMENT_TYPE_OBJECT;
}
else if (typeHandle.GetMethodTable() == g_pStringClass)
{
return ELEMENT_TYPE_STRING;
}
else
{
// GetSignatureCorElementType returns E_T_CLASS for E_T_STRING... :-(
return typeHandle.GetSignatureCorElementType();
}
} // DacDbiInterfaceImpl::GetElementType
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::TypeHandleToBasicTypeInfo
// Gets additional information to convert a type handle to an instance of CordbType for the referent of an
// E_T_BYREF or E_T_PTR or for the element type of an E_T_ARRAY or E_T_SZARRAY
//
// Arguments:
// input: typeHandle - type handle for the address type
// pAppDomain - AppDomain into which the type is loaded
// output: pTypeInfo - information for the referent type
//
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::TypeHandleToBasicTypeInfo(TypeHandle typeHandle,
DebuggerIPCE_BasicTypeData * pTypeInfo,
AppDomain * pAppDomain)
{
pTypeInfo->elementType = GetElementType(typeHandle);
switch (pTypeInfo->elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
case ELEMENT_TYPE_FNPTR:
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
pTypeInfo->vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
pTypeInfo->metadataToken = mdTokenNil;
pTypeInfo->vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
{
Module * pModule = typeHandle.GetModule();
if (typeHandle.HasInstantiation()) // only set if instantiated
{
pTypeInfo->vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
else
{
pTypeInfo->vmTypeHandle = VMPTR_TypeHandle::NullPtr();
}
pTypeInfo->metadataToken = typeHandle.GetCl();
_ASSERTE(pModule);
pTypeInfo->vmModule.SetDacTargetPtr(PTR_HOST_TO_TADDR(pModule));
if (pAppDomain)
{
pTypeInfo->vmDomainAssembly.SetDacTargetPtr(PTR_HOST_TO_TADDR(pModule->GetDomainAssembly()));
}
else
{
pTypeInfo->vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
}
break;
}
default:
pTypeInfo->vmTypeHandle = VMPTR_TypeHandle::NullPtr();
pTypeInfo->metadataToken = mdTokenNil;
pTypeInfo->vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
break;
}
return;
} // DacDbiInterfaceImpl::TypeHandleToBasicTypeInfo
void DacDbiInterfaceImpl::GetObjectExpandedTypeInfoFromID(AreValueTypesBoxed boxed,
VMPTR_AppDomain vmAppDomain,
COR_TYPEID id,
DebuggerIPCE_ExpandedTypeData *pTypeInfo)
{
DD_ENTER_MAY_THROW;
TypeHandleToExpandedTypeInfoImpl(boxed, vmAppDomain, TypeHandle::FromPtr(TO_TADDR(id.token1)), pTypeInfo);
}
void DacDbiInterfaceImpl::GetObjectExpandedTypeInfo(AreValueTypesBoxed boxed,
VMPTR_AppDomain vmAppDomain,
CORDB_ADDRESS addr,
DebuggerIPCE_ExpandedTypeData *pTypeInfo)
{
DD_ENTER_MAY_THROW;
PTR_Object obj(TO_TADDR(addr));
TypeHandleToExpandedTypeInfoImpl(boxed, vmAppDomain, obj->GetGCSafeTypeHandle(), pTypeInfo);
}
// DacDbi API: use a type handle to get the information needed to create the corresponding RS CordbType instance
void DacDbiInterfaceImpl::TypeHandleToExpandedTypeInfo(AreValueTypesBoxed boxed,
VMPTR_AppDomain vmAppDomain,
VMPTR_TypeHandle vmTypeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo)
{
DD_ENTER_MAY_THROW;
TypeHandle typeHandle = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
TypeHandleToExpandedTypeInfoImpl(boxed, vmAppDomain, typeHandle, pTypeInfo);
}
void DacDbiInterfaceImpl::TypeHandleToExpandedTypeInfoImpl(AreValueTypesBoxed boxed,
VMPTR_AppDomain vmAppDomain,
TypeHandle typeHandle,
DebuggerIPCE_ExpandedTypeData * pTypeInfo)
{
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
pTypeInfo->elementType = GetElementType(typeHandle);
switch (pTypeInfo->elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
GetArrayTypeInfo(typeHandle, pTypeInfo, pAppDomain);
break;
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
GetPtrTypeInfo(boxed, typeHandle, pTypeInfo, pAppDomain);
break;
case ELEMENT_TYPE_VALUETYPE:
if (boxed == OnlyPrimitivesUnboxed || boxed == AllBoxed)
{
pTypeInfo->elementType = ELEMENT_TYPE_CLASS;
}
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
break;
case ELEMENT_TYPE_CLASS:
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
break;
case ELEMENT_TYPE_FNPTR:
GetFnPtrTypeInfo(boxed, typeHandle, pTypeInfo, pAppDomain);
break;
default:
if (boxed == AllBoxed)
{
pTypeInfo->elementType = ELEMENT_TYPE_CLASS;
GetClassTypeInfo(typeHandle, pTypeInfo, pAppDomain);
}
// else the element type is sufficient
break;
}
LOG((LF_CORDB, LL_INFO10000, "D::THTETI: converted left-side type handle to expanded right-side type info, pTypeInfo->ClassTypeData.typeHandle = 0x%08x.\n", pTypeInfo->ClassTypeData.typeHandle.GetRawPtr()));
return;
} // DacDbiInterfaceImpl::TypeHandleToExpandedTypeInfo
// Get type handle for a TypeDef token, if one exists. For generics this returns the open type.
VMPTR_TypeHandle DacDbiInterfaceImpl::GetTypeHandle(VMPTR_Module vmModule,
mdTypeDef metadataToken)
{
DD_ENTER_MAY_THROW;
Module* pModule = vmModule.GetDacPtr();
VMPTR_TypeHandle vmTypeHandle = VMPTR_TypeHandle::NullPtr();
TypeHandle th = ClassLoader::LookupTypeDefOrRefInModule(pModule, metadataToken);
if (th.IsNull())
{
LOG((LF_CORDB, LL_INFO10000, "D::GTH: class isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
vmTypeHandle.SetDacTargetPtr(th.AsTAddr());
return vmTypeHandle;
}
// DacDbi API: GetAndSendApproxTypeHandle finds the type handle for the layout of the instance fields of an
// instantiated type if it is available.
VMPTR_TypeHandle DacDbiInterfaceImpl::GetApproxTypeHandle(TypeInfoList * pTypeData)
{
DD_ENTER_MAY_THROW;
LOG((LF_CORDB, LL_INFO10000, "D::GATH: getting info.\n"));
TypeDataWalk walk(&((*pTypeData)[0]), pTypeData->Count());
TypeHandle typeHandle = walk.ReadLoadedTypeHandle(TypeDataWalk::kGetCanonical);
VMPTR_TypeHandle vmTypeHandle = VMPTR_TypeHandle::NullPtr();
vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
if (!typeHandle.IsNull())
{
vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
else
{
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
LOG((LF_CORDB, LL_INFO10000,
"D::GATH: sending result, result = 0x%0x8\n",
typeHandle));
return vmTypeHandle;
} // DacDbiInterfaceImpl::GetApproxTypeHandle
// DacDbiInterface API: Get the exact type handle from type data
HRESULT DacDbiInterfaceImpl::GetExactTypeHandle(DebuggerIPCE_ExpandedTypeData * pTypeData,
ArgInfoList * pArgInfo,
VMPTR_TypeHandle& vmTypeHandle)
{
DD_ENTER_MAY_THROW;
LOG((LF_CORDB, LL_INFO10000, "D::GETH: getting info.\n"));
HRESULT hr = S_OK;
EX_TRY
{
vmTypeHandle = vmTypeHandle.NullPtr();
// convert the type information to a type handle
TypeHandle typeHandle = ExpandedTypeInfoToTypeHandle(pTypeData, pArgInfo);
_ASSERTE(!typeHandle.IsNull());
vmTypeHandle.SetDacTargetPtr(typeHandle.AsTAddr());
}
EX_CATCH_HRESULT(hr);
return hr;
} // DacDbiInterfaceImpl::GetExactTypeHandle
// Retrieve the generic type params for a given MethodDesc. This function is specifically
// for stackwalking because it requires the generic type token on the stack.
void DacDbiInterfaceImpl::GetMethodDescParams(
VMPTR_AppDomain vmAppDomain,
VMPTR_MethodDesc vmMethodDesc,
GENERICS_TYPE_TOKEN genericsToken,
UINT32 * pcGenericClassTypeParams,
TypeParamsList * pGenericTypeParams)
{
DD_ENTER_MAY_THROW;
if (vmAppDomain.IsNull() || vmMethodDesc.IsNull())
{
ThrowHR(E_INVALIDARG);
}
_ASSERTE((pcGenericClassTypeParams != NULL) && (pGenericTypeParams != NULL));
MethodDesc * pMD = vmMethodDesc.GetDacPtr();
// Retrieve the number of type parameters for the class and
// the number of type parameters for the method itself.
// For example, the method Foo<T, U>::Bar<V>() has 2 class type parameters and 1 method type parameters.
UINT32 cGenericClassTypeParams = pMD->GetNumGenericClassArgs();
UINT32 cGenericMethodTypeParams = pMD->GetNumGenericMethodArgs();
UINT32 cTotalGenericTypeParams = cGenericClassTypeParams + cGenericMethodTypeParams;
// Set the out parameter.
*pcGenericClassTypeParams = cGenericClassTypeParams;
TypeHandle thSpecificClass;
MethodDesc * pSpecificMethod;
// Try to retrieve a more specific MethodDesc and TypeHandle via the generics type token.
// The generics token is not always guaranteed to be available.
// For example, it may be unavailable in prologs and epilogs.
// In dumps, not available can also mean a thrown exception for missing memory.
BOOL fExact = FALSE;
ALLOW_DATATARGET_MISSING_MEMORY(
fExact = Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation(
pMD,
PTR_VOID((TADDR)genericsToken),
&thSpecificClass,
&pSpecificMethod);
);
if (!fExact ||
!thSpecificClass.GetMethodTable()->SanityCheck() ||
!pSpecificMethod->GetMethodTable()->SanityCheck())
{
// Use the canonical MethodTable and MethodDesc if the exact generics token is not available.
thSpecificClass = TypeHandle(pMD->GetMethodTable());
pSpecificMethod = pMD;
}
// Retrieve the array of class type parameters and the array of method type parameters.
Instantiation classInst = pSpecificMethod->GetExactClassInstantiation(thSpecificClass);
Instantiation methodInst = pSpecificMethod->GetMethodInstantiation();
_ASSERTE((classInst.IsEmpty()) == (cGenericClassTypeParams == 0));
_ASSERTE((methodInst.IsEmpty()) == (cGenericMethodTypeParams == 0));
// allocate memory for the return array
pGenericTypeParams->Alloc(cTotalGenericTypeParams);
for (UINT32 i = 0; i < cTotalGenericTypeParams; i++)
{
// Retrieve the current type parameter depending on the index.
TypeHandle thCurrent;
if (i < cGenericClassTypeParams)
{
thCurrent = classInst[i];
}
else
{
thCurrent = methodInst[i - cGenericClassTypeParams];
}
// There is the possiblity that we'll get this far with a dump and not fail, but still
// not be able to get full info for a particular param.
EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY_WITH_HANDLER
{
// Fill in the struct using the TypeHandle of the current type parameter if we can.
VMPTR_TypeHandle vmTypeHandle = VMPTR_TypeHandle::NullPtr();
vmTypeHandle.SetDacTargetPtr(thCurrent.AsTAddr());
TypeHandleToExpandedTypeInfo(NoValueTypeBoxing,
vmAppDomain,
vmTypeHandle,
&((*pGenericTypeParams)[i]));
}
EX_CATCH_ALLOW_DATATARGET_MISSING_MEMORY_WITH_HANDLER
{
// On failure for a particular type, default it back to System.__Canon.
VMPTR_TypeHandle vmTHCanon = VMPTR_TypeHandle::NullPtr();
TypeHandle thCanon = TypeHandle(g_pCanonMethodTableClass);
vmTHCanon.SetDacTargetPtr(thCanon.AsTAddr());
TypeHandleToExpandedTypeInfo(NoValueTypeBoxing,
vmAppDomain,
vmTHCanon,
&((*pGenericTypeParams)[i]));
}
EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY_WITH_HANDLER
}
}
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetClassOrValueTypeHandle
// get a typehandle for a class or valuetype from basic type data (metadata token
// and domain file).
// Arguments:
// input: pData - contains the metadata token and domain file
// Return value: the type handle for the corresponding type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetClassOrValueTypeHandle(DebuggerIPCE_BasicTypeData * pData)
{
TypeHandle typeHandle;
// if we already have a type handle, just return it
if (!pData->vmTypeHandle.IsNull())
{
typeHandle = TypeHandle::FromPtr(pData->vmTypeHandle.GetDacPtr());
}
// otherwise, have the loader look it up using the metadata token and domain file
else
{
DomainAssembly * pDomainAssembly = pData->vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
typeHandle = ClassLoader::LookupTypeDefOrRefInModule(pModule, pData->metadataToken);
if (typeHandle.IsNull())
{
LOG((LF_CORDB, LL_INFO10000, "D::BTITTH: class isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
_ASSERTE(typeHandle.GetNumGenericArgs() == 0);
}
return typeHandle;
} // DacDbiInterfaceImpl::GetClassOrValueTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetExactArrayTypeHandle
// get an exact type handle for an array type
// Arguments:
// input: pTopLevelTypeData - type information for a top-level array type
// pArgInfo - contains the following information:
// m_genericArgsCount - number of generic parameters for the element type--this should be 1
// m_pGenericArgs - pointer to the generic parameter for the element type--this is
// effectively a one-element list. These are the actual parameters
// Return Value: the exact type handle for the type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetExactArrayTypeHandle(DebuggerIPCE_ExpandedTypeData * pTopLevelTypeData,
ArgInfoList * pArgInfo)
{
TypeHandle typeArg;
_ASSERTE(pArgInfo->Count() == 1);
// get the type handle for the element type
typeArg = BasicTypeInfoToTypeHandle(&((*pArgInfo)[0]));
// get the exact type handle for the array type
return FindLoadedArrayType(pTopLevelTypeData->elementType,
typeArg,
pTopLevelTypeData->ArrayTypeData.arrayRank);
} // DacDbiInterfaceImpl::GetExactArrayTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetExactPtrOrByRefTypeHandle
// get an exact type handle for a PTR or BYREF type
// Arguments:
// input: pTopLevelTypeData - type information for the PTR or BYREF type
// pArgInfo - contains the following information:
// m_genericArgsCount - number of generic parameters for the element type--this should be 1
// m_pGenericArgs - pointer to the generic parameter for the element type--this is
// effectively a one-element list. These are the actual parameters
// Return Value: the exact type handle for the type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetExactPtrOrByRefTypeHandle(DebuggerIPCE_ExpandedTypeData * pTopLevelTypeData,
ArgInfoList * pArgInfo)
{
TypeHandle typeArg;
_ASSERTE(pArgInfo->Count() == 1);
// get the type handle for the referent
typeArg = BasicTypeInfoToTypeHandle(&((*pArgInfo)[0]));
// get the exact type handle for the PTR or BYREF type
return FindLoadedPointerOrByrefType(pTopLevelTypeData->elementType, typeArg);
} // DacDbiInterfaceImpl::GetExactPtrOrByRefTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetExactClassTypeHandle
// get an exact type handle for a CLASS or VALUETYPE type
// Arguments:
// input: pTopLevelTypeData - type information for the CLASS or VALUETYPE type
// pArgInfo - contains the following information:
// m_genericArgsCount - number of generic parameters for the class
// m_pGenericArgs - list of generic parameters for the class--these
// are the actual parameters
// Return Value: the exact type handle for the type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetExactClassTypeHandle(DebuggerIPCE_ExpandedTypeData * pTopLevelTypeData,
ArgInfoList * pArgInfo)
{
Module * pModule = pTopLevelTypeData->ClassTypeData.vmModule.GetDacPtr();
int argCount = pArgInfo->Count();
TypeHandle typeConstructor =
ClassLoader::LookupTypeDefOrRefInModule(pModule, pTopLevelTypeData->ClassTypeData.metadataToken);
// If we can't find the class, throw the appropriate HR. Note: if the class is not a value class and
// the class is also not restored, then we must pretend that the class is still not loaded. We are gonna let
// unrestored value classes slide, though, and special case access to the class's parent below.
if (typeConstructor.IsNull())
{
LOG((LF_CORDB, LL_INFO10000, "D::ETITTH: class isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
// if there are no generic parameters, we already have the correct type handle
if (argCount == 0)
{
return typeConstructor;
}
// we have generic parameters--first validate we have a number consistent with the list
// of parameters we received
if ((unsigned int)argCount != typeConstructor.GetNumGenericArgs())
{
LOG((LF_CORDB, LL_INFO10000,
"D::ETITTH: wrong number of type parameters, %d given, %d expected\n",
argCount, typeConstructor.GetNumGenericArgs()));
_ASSERTE((unsigned int)argCount == typeConstructor.GetNumGenericArgs());
ThrowHR(E_FAIL);
}
// now we allocate a list to store the type handles for each parameter
S_UINT32 allocSize = S_UINT32(argCount) * S_UINT32(sizeof(TypeHandle));
if (allocSize.IsOverflow())
{
ThrowHR(E_OUTOFMEMORY);
}
NewArrayHolder<TypeHandle> pInst(new TypeHandle[allocSize.Value()]);
// convert the type information for each parameter to its corresponding type handle
// and store it in the list
for (unsigned int i = 0; i < (unsigned int)argCount; i++)
{
pInst[i] = BasicTypeInfoToTypeHandle(&((*pArgInfo)[i]));
}
// Finally, we find the type handle corresponding to this particular instantiation
return FindLoadedInstantiation(typeConstructor.GetModule(),
typeConstructor.GetCl(),
argCount,
pInst);
} // DacDbiInterfaceImpl::GetExactClassTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetExactFnPtrTypeHandle
// get an exact type handle for a FNPTR type
// Arguments:
// input: pArgInfo - Contains the following information:
// m_genericArgsCount - number of generic parameters for the referent
// m_pGenericArgs - list of generic parameters for the referent--these
// are the actual parameters for the function signature
// Return Value: the exact type handle for the type
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::GetExactFnPtrTypeHandle(ArgInfoList * pArgInfo)
{
// allocate a list to store the type handles for each parameter
S_UINT32 allocSize = S_UINT32(pArgInfo->Count()) * S_UINT32(sizeof(TypeHandle));
if( allocSize.IsOverflow() )
{
ThrowHR(E_OUTOFMEMORY);
}
NewArrayHolder<TypeHandle> pInst(new TypeHandle[allocSize.Value()]);
// convert the type information for each parameter to its corresponding type handle
// and store it in the list
for (unsigned int i = 0; i < pArgInfo->Count(); i++)
{
pInst[i] = BasicTypeInfoToTypeHandle(&((*pArgInfo)[i]));
}
// find the type handle corresponding to this particular FNPTR
return FindLoadedFnptrType(pArgInfo->Count(), pInst);
} // DacDbiInterfaceImpl::GetExactFnPtrTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::BasicTypeInfoToTypeHandle
// Convert basic type info for a type parameter that came from a top-level type to
// the corresponding type handle. If the type parameter is an array or pointer
// type, we simply extract the LS type handle from the VMPTR_TypeHandle that is
// part of the type information. If the type parameter is a class or value type,
// we use the metadata token and domain file in the type info to look up the
// appropriate type handle. If the type parameter is any other types, we get the
// type handle by having the loader look up the type handle for the element type.
// Arguments:
// input: pArgTypeData - basic type information for the type.
// Return Value: the type handle for the type.
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::BasicTypeInfoToTypeHandle(DebuggerIPCE_BasicTypeData * pArgTypeData)
{
LOG((LF_CORDB, LL_INFO10000,
"D::BTITTH: expanding basic right-side type to left-side type, ELEMENT_TYPE: %d.\n",
pArgTypeData->elementType));
TypeHandle typeHandle = TypeHandle();
switch (pArgTypeData->elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
case ELEMENT_TYPE_FNPTR:
_ASSERTE(!pArgTypeData->vmTypeHandle.IsNull());
typeHandle = TypeHandle::FromPtr(pArgTypeData->vmTypeHandle.GetDacPtr());
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
typeHandle = GetClassOrValueTypeHandle(pArgTypeData);
break;
default:
typeHandle = FindLoadedElementType(pArgTypeData->elementType);
break;
}
if (typeHandle.IsNull())
{
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
return typeHandle;
} // DacDbiInterfaceImpl::BasicTypeInfoToTypeHandle
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::ExpandedTypeInfoToTypeHandle
// Convert type information for a top-level type to an exact type handle. This
// information includes information about the element type if the top-level type is
// an array type, the referent if the top-level type is a pointer type, or actual
// parameters if the top-level type is a generic class or value type.
// Arguments:
// input: pTopLevelTypeData - type information for the top-level type
// pArgInfo - contains the following information:
// m_genericArtsCount - number of parameters
// m_pGenericArgs - list of actual parameters
// Return Value: the exact type handle corresponding to the type represented by
// pTopLevelTypeData
//-----------------------------------------------------------------------------
TypeHandle DacDbiInterfaceImpl::ExpandedTypeInfoToTypeHandle(DebuggerIPCE_ExpandedTypeData * pTopLevelTypeData,
ArgInfoList * pArgInfo)
{
WRAPPER_NO_CONTRACT;
LOG((LF_CORDB, LL_INFO10000,
"D::ETITTH: expanding right-side type to left-side type, ELEMENT_TYPE: %d.\n",
pData->elementType));
TypeHandle typeHandle = TypeHandle();
// depending on the top-level type, get the type handle incorporating information about any type arguments
switch (pTopLevelTypeData->elementType)
{
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_SZARRAY:
typeHandle = GetExactArrayTypeHandle(pTopLevelTypeData, pArgInfo);
break;
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_BYREF:
typeHandle = GetExactPtrOrByRefTypeHandle(pTopLevelTypeData, pArgInfo);
break;
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_VALUETYPE:
typeHandle = GetExactClassTypeHandle(pTopLevelTypeData, pArgInfo);
break;
case ELEMENT_TYPE_FNPTR:
typeHandle = GetExactFnPtrTypeHandle(pArgInfo);
break;
default:
typeHandle = FindLoadedElementType(pTopLevelTypeData->elementType);
break;
} // end switch (pData->elementType)
if (typeHandle.IsNull())
{
// This may fail because there are cases when a type can be used (and so visible to the
// debugger), but not yet loaded to the point of being available in the EETypeHashTable.
// For example, generic value types (without explicit constructors) may not need their
// exact instantiation type to be loaded in order to be used as a field of an object
// created on the heap
LOG((LF_CORDB, LL_INFO10000, "D::ETITTH: type isn't loaded.\n"));
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
return typeHandle;
} // DacDbiInterfaceImpl::ExpandedTypeInfoToTypeHandle
// ----------------------------------------------------------------------------
// DacDbi API: GetThreadStaticAddress
// Get the target field address of a thread local static.
//
// Notes:
// The address is constant and could be cached.
//
// This can commonly fail, in which case, it will return NULL.
// ----------------------------------------------------------------------------
CORDB_ADDRESS DacDbiInterfaceImpl::GetThreadStaticAddress(VMPTR_FieldDesc vmField,
VMPTR_Thread vmRuntimeThread)
{
DD_ENTER_MAY_THROW;
Thread * pRuntimeThread = vmRuntimeThread.GetDacPtr();
PTR_FieldDesc pFieldDesc = vmField.GetDacPtr();
TADDR fieldAddress = NULL;
_ASSERTE(pRuntimeThread != NULL);
// Find out whether the field is thread local and get its address.
if (pFieldDesc->IsThreadStatic())
{
fieldAddress = pRuntimeThread->GetStaticFieldAddrNoCreate(pFieldDesc);
}
else
{
// In case we have more special cases added later, this will allow us to notice the need to
// update this function.
ThrowHR(E_NOTIMPL);
}
return fieldAddress;
} // DacDbiInterfaceImpl::GetThreadStaticAddress
// Get the target field address of a collectible types static.
CORDB_ADDRESS DacDbiInterfaceImpl::GetCollectibleTypeStaticAddress(VMPTR_FieldDesc vmField,
VMPTR_AppDomain vmAppDomain)
{
DD_ENTER_MAY_THROW;
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
PTR_FieldDesc pFieldDesc = vmField.GetDacPtr();
_ASSERTE(pAppDomain != NULL);
//
// Verify this field is of the right type
//
if(!pFieldDesc->IsStatic() ||
pFieldDesc->IsSpecialStatic())
{
_ASSERTE(!"BUG: Unsupported static field type for collectible types");
}
//
// Check that the data is available
//
/* TODO: Ideally we should be checking if the class is allocated first, however
we don't appear to be doing this even for non-collectible statics and
we have never seen an issue.
*/
//
// Get the address
//
PTR_VOID base = pFieldDesc->GetBase();
if (base == PTR_NULL)
{
return PTR_HOST_TO_TADDR(NULL);
}
//
// Store the result and return
//
PTR_VOID addr = pFieldDesc->GetStaticAddressHandle(base);
return PTR_TO_TADDR(addr);
} // DacDbiInterfaceImpl::GetCollectibleTypeStaticAddress
// DacDbi API: GetTypeHandleParams
// - gets the necessary data for a type handle, i.e. its type parameters, e.g. "String" and "List<int>" from the type handle
// for "Dict<String,List<int>>", and sends it back to the right side.
// - pParams is allocated and initialized by this function
// - This should not fail except for OOM
void DacDbiInterfaceImpl::GetTypeHandleParams(VMPTR_AppDomain vmAppDomain,
VMPTR_TypeHandle vmTypeHandle,
TypeParamsList * pParams)
{
DD_ENTER_MAY_THROW
TypeHandle typeHandle = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
LOG((LF_CORDB, LL_INFO10000, "D::GTHP: getting type parameters for 0x%08x 0x%0x8.\n",
vmAppDomain.GetDacPtr(), typeHandle.AsPtr()));
// Find the class given its type handle.
_ASSERTE(pParams->IsEmpty());
pParams->Alloc(typeHandle.GetNumGenericArgs());
// collect type information for each type parameter
for (unsigned int i = 0; i < pParams->Count(); ++i)
{
VMPTR_TypeHandle thInst = VMPTR_TypeHandle::NullPtr();
thInst.SetDacTargetPtr(typeHandle.GetInstantiation()[i].AsTAddr());
TypeHandleToExpandedTypeInfo(NoValueTypeBoxing,
vmAppDomain,
thInst,
&((*pParams)[i]));
}
LOG((LF_CORDB, LL_INFO10000, "D::GTHP: sending result"));
} // DacDbiInterfaceImpl::GetTypeHandleParams
//-----------------------------------------------------------------------------
// DacDbi API: GetSimpleType
// gets the metadata token and domain file corresponding to a simple type
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetSimpleType(VMPTR_AppDomain vmAppDomain,
CorElementType simpleType,
mdTypeDef *pMetadataToken,
VMPTR_Module *pVmModule,
VMPTR_DomainAssembly *pVmDomainAssembly)
{
DD_ENTER_MAY_THROW;
AppDomain *pAppDomain = vmAppDomain.GetDacPtr();
// if we fail to get either a valid type handle or module, we will want to send back
// a NULL domain file too, so we'll to preinitialize this here.
_ASSERTE(pVmDomainAssembly != NULL);
*pVmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
// FindLoadedElementType will return NULL if the type hasn't been loaded yet.
TypeHandle typeHandle = FindLoadedElementType(simpleType);
if (typeHandle.IsNull())
{
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
else
{
_ASSERTE(pMetadataToken != NULL);
*pMetadataToken = typeHandle.GetCl();
Module * pModule = typeHandle.GetModule();
if (pModule == NULL)
ThrowHR(CORDBG_E_TARGET_INCONSISTENT);
pVmModule->SetHostPtr(pModule);
if (pAppDomain)
{
pVmDomainAssembly->SetHostPtr(pModule->GetDomainAssembly());
if (pVmDomainAssembly->IsNull())
ThrowHR(CORDBG_E_TARGET_INCONSISTENT);
}
}
LOG((LF_CORDB, LL_INFO10000, "D::STI: sending result.\n"));
} // DacDbiInterfaceImpl::GetSimpleType
BOOL DacDbiInterfaceImpl::IsExceptionObject(VMPTR_Object vmObject)
{
DD_ENTER_MAY_THROW;
Object* objPtr = vmObject.GetDacPtr();
MethodTable* pMT = objPtr->GetMethodTable();
return IsExceptionObject(pMT);
}
BOOL DacDbiInterfaceImpl::IsExceptionObject(MethodTable* pMT)
{
PTR_MethodTable pExMT = g_pExceptionClass;
TADDR targetMT = dac_cast<TADDR>(pMT);
TADDR exceptionMT = dac_cast<TADDR>(pExMT);
do
{
if (targetMT == exceptionMT)
return TRUE;
pMT = pMT->GetParentMethodTable();
targetMT = dac_cast<TADDR>(pMT);
} while (pMT);
return FALSE;
}
HRESULT DacDbiInterfaceImpl::GetMethodDescPtrFromIpEx(TADDR funcIp, VMPTR_MethodDesc* ppMD)
{
DD_ENTER_MAY_THROW;
// The fast path is check if the code is jitted and the code manager has it available.
CLRDATA_ADDRESS mdAddr;
HRESULT hr = g_dacImpl->GetMethodDescPtrFromIP(TO_CDADDR(funcIp), &mdAddr);
if (S_OK == hr)
{
ppMD->SetDacTargetPtr(CLRDATA_ADDRESS_TO_TADDR(mdAddr));
return hr;
}
// Otherwise try to see if a method desc is available for the method that isn't jitted by walking the code stubs.
MethodDesc* pMD = MethodTable::GetMethodDescForSlotAddress(PINSTRToPCODE(funcIp));
if (pMD == NULL)
return E_INVALIDARG;
ppMD->SetDacTargetPtr(PTR_HOST_TO_TADDR(pMD));
return S_OK;
}
BOOL DacDbiInterfaceImpl::IsDelegate(VMPTR_Object vmObject)
{
DD_ENTER_MAY_THROW;
if (vmObject.IsNull())
return FALSE;
Object *pObj = vmObject.GetDacPtr();
return pObj->GetGCSafeMethodTable()->IsDelegate();
}
//-----------------------------------------------------------------------------
// DacDbi API: GetDelegateType
// Given a delegate pointer, compute the type of delegate according to the data held in it.
//-----------------------------------------------------------------------------
HRESULT DacDbiInterfaceImpl::GetDelegateType(VMPTR_Object delegateObject, DelegateType *delegateType)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!delegateObject.IsNull());
_ASSERTE(delegateType != NULL);
#ifdef _DEBUG
// ensure we have a Delegate object
IsDelegate(delegateObject);
#endif
// Ideally, we would share the implementation of this method with the runtime, or get the same information
// we are getting from here from other EE methods. Nonetheless, currently the implementation is sharded across
// several pieces of logic so this replicates the logic mostly due to time constraints. The Mainly from:
// - System.Private.CoreLib!System.Delegate.GetMethodImpl and System.Private.CoreLib!System.MulticastDelegate.GetMethodImpl
// - System.Private.CoreLib!System.Delegate.GetTarget and System.Private.CoreLib!System.MulticastDelegate.GetTarget
// - coreclr!COMDelegate::GetMethodDesc and coreclr!COMDelegate::FindMethodHandle
// - coreclr!COMDelegate::DelegateConstruct and the delegate type table in
// - DELEGATE KINDS TABLE in comdelegate.cpp
*delegateType = DelegateType::kUnknownDelegateType;
PTR_DelegateObject pDelObj = dac_cast<PTR_DelegateObject>(delegateObject.GetDacPtr());
INT_PTR invocationCount = pDelObj->GetInvocationCount();
if (invocationCount == -1)
{
// We could get a native code for this case from _methodPtr, but not a methodDef as we'll need.
// We can also get the shuffling thunk. However, this doesn't have a token and there's
// no easy way to expose through the DBI now.
*delegateType = kUnmanagedFunctionDelegate;
return S_OK;
}
PTR_Object pInvocationList = OBJECTREFToObject(pDelObj->GetInvocationList());
if (invocationCount == NULL)
{
if (pInvocationList == NULL)
{
// If this delegate points to a static function or this is a open virtual delegate, this should be non-null
// Special case: This might fail in a VSD delegate (instance open virtual)...
// TODO: There is the special signatures cases missing.
TADDR targetMethodPtr = PCODEToPINSTR(pDelObj->GetMethodPtrAux());
if (targetMethodPtr == NULL)
{
// Static extension methods, other closed static delegates, and instance delegates fall into this category.
*delegateType = kClosedDelegate;
}
else {
*delegateType = kOpenDelegate;
}
return S_OK;
}
}
else
{
if (pInvocationList != NULL)
{
PTR_MethodTable invocationListMT = pInvocationList->GetGCSafeMethodTable();
if (invocationListMT->IsArray())
*delegateType = kTrueMulticastDelegate;
if (invocationListMT->IsDelegate())
*delegateType = kWrapperDelegate;
// Cases missing: Loader allocator, or dynamic resolver.
return S_OK;
}
// According to the table in comdelegates.cpp, there shouldn't be a case where .
// Multicast falls outside of the table, so not
}
_ASSERT(FALSE);
*delegateType = kUnknownDelegateType;
return CORDBG_E_UNSUPPORTED_DELEGATE;
}
HRESULT DacDbiInterfaceImpl::GetDelegateFunctionData(
DelegateType delegateType,
VMPTR_Object delegateObject,
OUT VMPTR_DomainAssembly *ppFunctionDomainAssembly,
OUT mdMethodDef *pMethodDef)
{
DD_ENTER_MAY_THROW;
#ifdef _DEBUG
// ensure we have a Delegate object
IsDelegate(delegateObject);
#endif
HRESULT hr = S_OK;
PTR_DelegateObject pDelObj = dac_cast<PTR_DelegateObject>(delegateObject.GetDacPtr());
TADDR targetMethodPtr = NULL;
VMPTR_MethodDesc pMD;
switch (delegateType)
{
case kClosedDelegate:
targetMethodPtr = PCODEToPINSTR(pDelObj->GetMethodPtr());
break;
case kOpenDelegate:
targetMethodPtr = PCODEToPINSTR(pDelObj->GetMethodPtrAux());
break;
default:
return E_FAIL;
}
hr = GetMethodDescPtrFromIpEx(targetMethodPtr, &pMD);
if (hr != S_OK)
return hr;
ppFunctionDomainAssembly->SetDacTargetPtr(dac_cast<TADDR>(pMD.GetDacPtr()->GetModule()->GetDomainAssembly()));
*pMethodDef = pMD.GetDacPtr()->GetMemberDef();
return hr;
}
HRESULT DacDbiInterfaceImpl::GetDelegateTargetObject(
DelegateType delegateType,
VMPTR_Object delegateObject,
OUT VMPTR_Object *ppTargetObj,
OUT VMPTR_AppDomain *ppTargetAppDomain)
{
DD_ENTER_MAY_THROW;
#ifdef _DEBUG
// ensure we have a Delegate object
IsDelegate(delegateObject);
#endif
HRESULT hr = S_OK;
PTR_DelegateObject pDelObj = dac_cast<PTR_DelegateObject>(delegateObject.GetDacPtr());
switch (delegateType)
{
case kClosedDelegate:
{
PTR_Object pRemoteTargetObj = OBJECTREFToObject(pDelObj->GetTarget());
ppTargetObj->SetDacTargetPtr(pRemoteTargetObj.GetAddr());
ppTargetAppDomain->SetDacTargetPtr(dac_cast<TADDR>(pRemoteTargetObj->GetGCSafeMethodTable()->GetDomain()->AsAppDomain()));
break;
}
default:
ppTargetObj->SetDacTargetPtr(NULL);
ppTargetAppDomain->SetDacTargetPtr(dac_cast<TADDR>(pDelObj->GetGCSafeMethodTable()->GetDomain()->AsAppDomain()));
break;
}
return hr;
}
static bool TrackMemoryRangeHelper(PTR_VOID pvArgs, PTR_VOID pvAllocationBase, SIZE_T cbReserved)
{
// The pvArgs is really pointing to a debugger-side container. Sadly the callback only takes a PTR_VOID.
CQuickArrayList<COR_MEMORY_RANGE> *rangeCollection =
(CQuickArrayList<COR_MEMORY_RANGE>*)(dac_cast<TADDR>(pvArgs));
TADDR rangeStart = dac_cast<TADDR>(pvAllocationBase);
TADDR rangeEnd = rangeStart + cbReserved;
rangeCollection->Push({rangeStart, rangeEnd});
// This is a tracking function, not a search callback. Pretend we never found what we were looking for
// to get all possible ranges.
return false;
}
void DacDbiInterfaceImpl::EnumerateMemRangesForLoaderAllocator(PTR_LoaderAllocator pLoaderAllocator, CQuickArrayList<COR_MEMORY_RANGE> *rangeAcummulator)
{
CQuickArrayList<PTR_LoaderHeap> heapsToEnumerate;
// We always expect to see these three heaps
_ASSERTE(pLoaderAllocator->GetLowFrequencyHeap() != NULL);
heapsToEnumerate.Push(pLoaderAllocator->GetLowFrequencyHeap());
_ASSERTE(pLoaderAllocator->GetHighFrequencyHeap() != NULL);
heapsToEnumerate.Push(pLoaderAllocator->GetHighFrequencyHeap());
_ASSERTE(pLoaderAllocator->GetStubHeap() != NULL);
heapsToEnumerate.Push(pLoaderAllocator->GetStubHeap());
// GetVirtualCallStubManager returns VirtualCallStubManager*, but it's really an address to target as
// pLoaderAllocator is DACized. Cast it so we don't try to to a Host to Target translation.
VirtualCallStubManager *pVcsMgr = PTR_VirtualCallStubManager(TO_TADDR(pLoaderAllocator->GetVirtualCallStubManager()));
LOG((LF_CORDB, LL_INFO10000, "DDBII::EMRFLA: VirtualCallStubManager 0x%x\n", PTR_HOST_TO_TADDR(pVcsMgr)));
if (pVcsMgr)
{
if (pVcsMgr->indcell_heap != NULL) heapsToEnumerate.Push(pVcsMgr->indcell_heap);
if (pVcsMgr->lookup_heap != NULL) heapsToEnumerate.Push(pVcsMgr->lookup_heap);
if (pVcsMgr->resolve_heap != NULL) heapsToEnumerate.Push(pVcsMgr->resolve_heap);
if (pVcsMgr->dispatch_heap != NULL) heapsToEnumerate.Push(pVcsMgr->dispatch_heap);
if (pVcsMgr->cache_entry_heap != NULL) heapsToEnumerate.Push(pVcsMgr->cache_entry_heap);
}
TADDR rangeAccumAsTaddr = TO_TADDR(rangeAcummulator);
for (uint32_t i = 0; i < (uint32_t)heapsToEnumerate.Size(); i++)
{
LOG((LF_CORDB, LL_INFO10000, "DDBII::EMRFLA: LoaderHeap 0x%x\n", heapsToEnumerate[i].GetAddr()));
heapsToEnumerate[i]->EnumPageRegions(TrackMemoryRangeHelper, rangeAccumAsTaddr);
}
}
void DacDbiInterfaceImpl::EnumerateMemRangesForJitCodeHeaps(CQuickArrayList<COR_MEMORY_RANGE> *rangeAcummulator)
{
// We should always have a valid EEJitManager with at least one code heap.
EEJitManager *pEM = ExecutionManager::GetEEJitManager();
_ASSERTE(pEM != NULL && pEM->m_pCodeHeap.IsValid());
PTR_HeapList pHeapList = pEM->m_pCodeHeap;
while (pHeapList != NULL)
{
CodeHeap *pHeap = pHeapList->pHeap;
DacpJitCodeHeapInfo jitCodeHeapInfo = DACGetHeapInfoForCodeHeap(pHeap);
switch (jitCodeHeapInfo.codeHeapType)
{
case CODEHEAP_LOADER:
{
TADDR targetLoaderHeap = CLRDATA_ADDRESS_TO_TADDR(jitCodeHeapInfo.LoaderHeap);
LOG((LF_CORDB, LL_INFO10000,
"DDBII::EMRFJCH: LoaderCodeHeap 0x%x with LoaderHeap at 0x%x\n",
PTR_HOST_TO_TADDR(pHeap), targetLoaderHeap));
PTR_ExplicitControlLoaderHeap pLoaderHeap = PTR_ExplicitControlLoaderHeap(targetLoaderHeap);
pLoaderHeap->EnumPageRegions(TrackMemoryRangeHelper, TO_TADDR(rangeAcummulator));
break;
}
case CODEHEAP_HOST:
{
LOG((LF_CORDB, LL_INFO10000,
"DDBII::EMRFJCH: HostCodeHeap 0x%x\n",
PTR_HOST_TO_TADDR(pHeap)));
rangeAcummulator->Push({
CLRDATA_ADDRESS_TO_TADDR(jitCodeHeapInfo.HostData.baseAddr),
CLRDATA_ADDRESS_TO_TADDR(jitCodeHeapInfo.HostData.currentAddr)
});
break;
}
default:
{
LOG((LF_CORDB, LL_INFO10000, "DDBII::EMRFJCH: unknown heap type at 0x%x\n\n", pHeap));
_ASSERTE("Unknown heap type enumerating code ranges.");
break;
}
}
pHeapList = pHeapList->GetNext();
}
}
HRESULT DacDbiInterfaceImpl::GetLoaderHeapMemoryRanges(DacDbiArrayList<COR_MEMORY_RANGE> *pRanges)
{
LOG((LF_CORDB, LL_INFO10000, "DDBII::GLHMR\n"));
DD_ENTER_MAY_THROW;
HRESULT hr = S_OK;
EX_TRY
{
CQuickArrayList<COR_MEMORY_RANGE> memoryRanges;
// Anything that's loaded in the SystemDomain or into the main AppDomain's default context in .NET Core
// and after uses only one global allocator. Enumerating that one is enough for most purposes.
// This doesn't consider any uses of AssemblyLoadingContexts (Unloadable or not). Each context has
// it's own LoaderAllocator, but there's no easy way of getting a hand at them other than going through
// the heap, getting a managed LoaderAllocators, from there getting a Scout, and from there getting a native
// pointer to the LoaderAllocator tos enumerate.
PTR_LoaderAllocator pGlobalAllocator = SystemDomain::System()->GetLoaderAllocator();
_ASSERTE(pGlobalAllocator);
EnumerateMemRangesForLoaderAllocator(pGlobalAllocator, &memoryRanges);
EnumerateMemRangesForJitCodeHeaps(&memoryRanges);
// This code doesn't enumerate module thunk heaps to support IJW.
// It's a fairly rare scenario and requires to enumerate all modules.
// The return for such added time is minimal.
_ASSERTE(memoryRanges.Size() < INT_MAX);
pRanges->Init(memoryRanges.Ptr(), (UINT) memoryRanges.Size());
}
EX_CATCH_HRESULT(hr);
return hr;
}
void DacDbiInterfaceImpl::GetStackFramesFromException(VMPTR_Object vmObject, DacDbiArrayList<DacExceptionCallStackData>& dacStackFrames)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = vmObject.GetDacPtr();
#ifdef _DEBUG
// ensure we have an Exception object
MethodTable* pMT = objPtr->GetMethodTable();
_ASSERTE(IsExceptionObject(pMT));
#endif
OBJECTREF objRef = ObjectToOBJECTREF(objPtr);
DebugStackTrace::GetStackFramesData stackFramesData;
stackFramesData.pDomain = NULL;
stackFramesData.skip = 0;
stackFramesData.NumFramesRequested = 0;
DebugStackTrace::GetStackFramesFromException(&objRef, &stackFramesData);
INT32 dacStackFramesLength = stackFramesData.cElements;
if (dacStackFramesLength > 0)
{
dacStackFrames.Alloc(dacStackFramesLength);
for (INT32 index = 0; index < dacStackFramesLength; ++index)
{
DebugStackTrace::DebugStackTraceElement const& currentElement = stackFramesData.pElements[index];
DacExceptionCallStackData& currentFrame = dacStackFrames[index];
Module* pModule = currentElement.pFunc->GetModule();
BaseDomain* pBaseDomain = currentElement.pFunc->GetAssembly()->GetDomain();
AppDomain* pDomain = NULL;
DomainAssembly* pDomainAssembly = NULL;
pDomain = pBaseDomain->AsAppDomain();
_ASSERTE(pDomain != NULL);
pDomainAssembly = pModule->GetDomainAssembly();
_ASSERTE(pDomainAssembly != NULL);
currentFrame.vmAppDomain.SetHostPtr(pDomain);
currentFrame.vmDomainAssembly.SetHostPtr(pDomainAssembly);
currentFrame.ip = currentElement.ip;
currentFrame.methodDef = currentElement.pFunc->GetMemberDef();
currentFrame.isLastForeignExceptionFrame = (currentElement.flags & STEF_LAST_FRAME_FROM_FOREIGN_STACK_TRACE) != 0;
}
}
}
#ifdef FEATURE_COMINTEROP
PTR_RCW GetRcwFromVmptrObject(VMPTR_Object vmObject)
{
PTR_RCW pRCW = NULL;
Object* objPtr = vmObject.GetDacPtr();
PTR_SyncBlock pSyncBlock = NULL;
pSyncBlock = objPtr->PassiveGetSyncBlock();
if (pSyncBlock == NULL)
return pRCW;
PTR_InteropSyncBlockInfo pInfo = NULL;
pInfo = pSyncBlock->GetInteropInfoNoCreate();
if (pInfo == NULL)
return pRCW;
pRCW = dac_cast<PTR_RCW>(pInfo->DacGetRawRCW());
return pRCW;
}
#endif
BOOL DacDbiInterfaceImpl::IsRcw(VMPTR_Object vmObject)
{
#ifdef FEATURE_COMINTEROP
DD_ENTER_MAY_THROW;
return GetRcwFromVmptrObject(vmObject) != NULL;
#else
return FALSE;
#endif // FEATURE_COMINTEROP
}
void DacDbiInterfaceImpl::GetRcwCachedInterfaceTypes(
VMPTR_Object vmObject,
VMPTR_AppDomain vmAppDomain,
BOOL bIInspectableOnly,
DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pDacInterfaces)
{
// Legacy WinRT API.
pDacInterfaces->Alloc(0);
}
void DacDbiInterfaceImpl::GetRcwCachedInterfacePointers(
VMPTR_Object vmObject,
BOOL bIInspectableOnly,
DacDbiArrayList<CORDB_ADDRESS> * pDacItfPtrs)
{
#ifdef FEATURE_COMINTEROP
DD_ENTER_MAY_THROW;
Object* objPtr = vmObject.GetDacPtr();
InlineSArray<TADDR, INTERFACE_ENTRY_CACHE_SIZE> rgUnks;
PTR_RCW pRCW = GetRcwFromVmptrObject(vmObject);
if (pRCW != NULL)
{
pRCW->GetCachedInterfacePointers(bIInspectableOnly, &rgUnks);
pDacItfPtrs->Alloc(rgUnks.GetCount());
for (COUNT_T i = 0; i < rgUnks.GetCount(); ++i)
{
(*pDacItfPtrs)[i] = (CORDB_ADDRESS)(rgUnks[i]);
}
}
else
#endif // FEATURE_COMINTEROP
{
pDacItfPtrs->Alloc(0);
}
}
void DacDbiInterfaceImpl::GetCachedWinRTTypesForIIDs(
VMPTR_AppDomain vmAppDomain,
DacDbiArrayList<GUID> & iids,
OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes)
{
pTypes->Alloc(0);
}
void DacDbiInterfaceImpl::GetCachedWinRTTypes(
VMPTR_AppDomain vmAppDomain,
OUT DacDbiArrayList<GUID> * pGuids,
OUT DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes)
{
pTypes->Alloc(0);
}
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::FindField
// Finds information for a particular class field
// Arguments:
// input: thApprox - type handle for the type to which the field belongs
// fldToken - metadata token for the field
// Return Value: FieldDesc containing information for the field if found or NULL otherwise
//-----------------------------------------------------------------------------
PTR_FieldDesc DacDbiInterfaceImpl::FindField(TypeHandle thApprox, mdFieldDef fldToken)
{
EncApproxFieldDescIterator fdIterator(thApprox.GetMethodTable(),
ApproxFieldDescIterator::ALL_FIELDS,
FALSE); // don't fixup EnC (we can't, we're stopped)
PTR_FieldDesc pCurrentFD;
while ((pCurrentFD = fdIterator.Next()) != NULL)
{
// We're looking for a specific fieldDesc, see if we got it.
if (pCurrentFD->GetMemberDef() == fldToken)
{
return pCurrentFD;
}
}
// we never found it...
return NULL;
} // DacDbiInterfaceImpl::FindField
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetEnCFieldDesc
// Get the FieldDesc corresponding to a particular EnC field token
// Arguments:
// input: pEnCFieldInfo
// Return Value: pointer to the FieldDesc that corresponds to the EnC field
// Note: this function may throw
//-----------------------------------------------------------------------------
FieldDesc * DacDbiInterfaceImpl::GetEnCFieldDesc(const EnCHangingFieldInfo * pEnCFieldInfo)
{
FieldDesc * pFD = NULL;
DomainAssembly * pDomainAssembly = pEnCFieldInfo->GetObjectTypeData().vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
// get the type handle for the object
TypeHandle typeHandle = ClassLoader::LookupTypeDefOrRefInModule(pModule,
pEnCFieldInfo->GetObjectTypeData().metadataToken);
if (typeHandle == NULL)
{
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
// and find the field desc
pFD = FindField(typeHandle, pEnCFieldInfo->GetFieldToken());
if (pFD == NULL)
{
// FieldDesc is not yet available, so can't get EnC field info
ThrowHR(CORDBG_E_ENC_HANGING_FIELD);
}
return pFD;
} // DacDbiInterfaceImpl::GetEnCFieldDesc
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::GetPtrToEnCField
// Get the address of a field added with EnC.
// Arguments:
// input: pFD - field desc for the added field
// pEnCFieldInfo - information about the new field
// Return Value: The field address if the field is available (i.e., it has been accessed)
// or NULL otherwise
// Note: this function may throw
//-----------------------------------------------------------------------------
PTR_CBYTE DacDbiInterfaceImpl::GetPtrToEnCField(FieldDesc * pFD, const EnCHangingFieldInfo * pEnCFieldInfo)
{
#ifndef EnC_SUPPORTED
_ASSERTE(!"Trying to get the address of an EnC field where EnC is not supported! ");
return NULL;
#else
PTR_EditAndContinueModule pEnCModule;
DomainAssembly * pDomainAssembly = pEnCFieldInfo->GetObjectTypeData().vmDomainAssembly.GetDacPtr();
Module * pModule = pDomainAssembly->GetModule();
// make sure we actually have an EditAndContinueModule
_ASSERTE(pModule->IsEditAndContinueCapable());
pEnCModule = dac_cast<PTR_EditAndContinueModule>(pModule);
// we should also have an EnCFieldDesc
_ASSERTE(pFD->IsEnCNew());
EnCFieldDesc * pEnCFieldDesc;
pEnCFieldDesc = dac_cast<PTR_EnCFieldDesc>(pFD);
// If it hasn't been fixed up yet, then we can't return the pointer.
if (pEnCFieldDesc->NeedsFixup())
{
ThrowHR(CORDBG_E_ENC_HANGING_FIELD);
}
// Get a pointer to the field
PTR_CBYTE pORField = NULL;
PTR_Object pObject = pEnCFieldInfo->GetVmObject().GetDacPtr();
pORField = pEnCModule->ResolveField(ObjectToOBJECTREF(pObject),
pEnCFieldDesc);
// The field could be absent because the code hasn't accessed it yet. If so, we're not going to add it
// since we can't allocate anyway.
if (pORField == NULL)
{
ThrowHR(CORDBG_E_ENC_HANGING_FIELD);
}
return pORField;
#endif // EnC_SUPPORTED
} // DacDbiInterfaceImpl::GetPtrToEnCField
//-----------------------------------------------------------------------------
// DacDbiInterfaceImpl::InitFieldData
// Initialize information about a field added with EnC
// Arguments :
// input:
// pFD - provides information about whether the field is static,
// the metadata token, etc.
// pORField - provides the field address or offset
// pEnCFieldData - provides the offset to the fields of the object
// output: pFieldData - initialized in accordance with the input information
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::InitFieldData(const FieldDesc * pFD,
const PTR_CBYTE pORField,
const EnCHangingFieldInfo * pEnCFieldData,
FieldData * pFieldData)
{
pFieldData->ClearFields();
pFieldData->m_fFldIsStatic = (pFD->IsStatic() != 0);
pFieldData->m_vmFieldDesc.SetHostPtr(pFD);
pFieldData->m_fFldIsTLS = (pFD->IsThreadStatic() == TRUE);
pFieldData->m_fldMetadataToken = pFD->GetMemberDef();
pFieldData->m_fFldIsRVA = (pFD->IsRVA() == TRUE);
pFieldData->m_fFldIsCollectibleStatic = FALSE;
pFieldData->m_fFldStorageAvailable = true;
if (pFieldData->m_fFldIsStatic)
{
//EnC is only supported on regular static fields
_ASSERTE(!pFieldData->m_fFldIsTLS);
_ASSERTE(!pFieldData->m_fFldIsRVA);
// pORField contains the absolute address
pFieldData->SetStaticAddress(PTR_TO_TADDR(pORField));
}
else
{
// fldInstanceOffset is computed to work correctly with GetFieldValue
// which computes:
// addr of pORField = object + pEnCFieldInfo->m_offsetToVars + offsetToFld
pFieldData->SetInstanceOffset(PTR_TO_TADDR(pORField) -
(PTR_TO_TADDR(pEnCFieldData->GetVmObject().GetDacPtr()) +
pEnCFieldData->GetOffsetToVars()));
}
} // DacDbiInterfaceImpl::InitFieldData
// ----------------------------------------------------------------------------
// DacDbi API: GetEnCHangingFieldInfo
// After a class has been loaded, if a field has been added via EnC we'll have to jump through
// some hoops to get at it (it hangs off the sync block or FieldDesc).
//
// GENERICS: TODO: this method will need to be modified if we ever support EnC on
// generic classes.
//-----------------------------------------------------------------------------
void DacDbiInterfaceImpl::GetEnCHangingFieldInfo(const EnCHangingFieldInfo * pEnCFieldInfo,
FieldData * pFieldData,
BOOL * pfStatic)
{
DD_ENTER_MAY_THROW;
LOG((LF_CORDB, LL_INFO100000, "DDI::IEnCHFI: Obj:0x%x, objType"
":0x%x, offset:0x%x\n", pEnCFieldInfo->m_pObject, pEnCFieldInfo->m_objectTypeData.elementType,
pEnCFieldInfo->m_offsetToVars));
FieldDesc * pFD = NULL;
PTR_CBYTE pORField = NULL;
pFD = GetEnCFieldDesc(pEnCFieldInfo);
_ASSERTE(pFD->IsEnCNew()); // We shouldn't be here if it wasn't added to an
// already loaded class.
#ifdef EnC_SUPPORTED
pORField = GetPtrToEnCField(pFD, pEnCFieldInfo);
#else
_ASSERTE(!"We shouldn't be here: EnC not supported");
#endif // EnC_SUPPORTED
InitFieldData(pFD, pORField, pEnCFieldInfo, pFieldData);
*pfStatic = (pFD->IsStatic() != 0);
} // DacDbiInterfaceImpl::GetEnCHangingFieldInfo
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
void DacDbiInterfaceImpl::GetAssemblyFromDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, VMPTR_Assembly *vmAssembly)
{
DD_ENTER_MAY_THROW;
_ASSERTE(vmAssembly != NULL);
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
vmAssembly->SetHostPtr(pDomainAssembly->GetAssembly());
}
// Determines whether the runtime security system has assigned full-trust to this assembly.
BOOL DacDbiInterfaceImpl::IsAssemblyFullyTrusted(VMPTR_DomainAssembly vmDomainAssembly)
{
DD_ENTER_MAY_THROW;
return TRUE;
}
// Get the full path and file name to the assembly's manifest module.
BOOL DacDbiInterfaceImpl::GetAssemblyPath(
VMPTR_Assembly vmAssembly,
IStringHolder * pStrFilename)
{
DD_ENTER_MAY_THROW;
// Get the manifest module for this assembly
Assembly * pAssembly = vmAssembly.GetDacPtr();
Module * pManifestModule = pAssembly->GetModule();
// Get the path for the manifest module.
// since we no longer support Win9x, we assume all paths will be in unicode format already
const WCHAR * szPath = pManifestModule->GetPath().DacGetRawUnicode();
HRESULT hrStatus = pStrFilename->AssignCopy(szPath);
IfFailThrow(hrStatus);
if(szPath == NULL || *szPath=='\0')
{
// The asembly has no (and will never have a) file name, but we didn't really fail
return FALSE;
}
return TRUE;
}
// DAC/DBI API
// Get a resolved type def from a type ref. The type ref may come from a module other than the
// referencing module.
void DacDbiInterfaceImpl::ResolveTypeReference(const TypeRefData * pTypeRefInfo,
TypeRefData * pTargetRefInfo)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = pTypeRefInfo->vmDomainAssembly.GetDacPtr();
Module * pReferencingModule = pDomainAssembly->GetModule();
BOOL fSuccess = FALSE;
// Resolve the type ref
// g_pEEInterface->FindLoadedClass is almost what we want, but it isn't guaranteed to work if
// the typeRef was originally loaded from a different assembly. Also, we need to ensure that
// we can resolve even unloaded types in fully loaded assemblies, so APIs such as
// LoadTypeDefOrRefThrowing aren't acceptable.
Module * pTargetModule = NULL;
mdTypeDef targetTypeDef = mdTokenNil;
// The loader won't need to trigger a GC or throw because we've told it not to load anything
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
fSuccess = ClassLoader::ResolveTokenToTypeDefThrowing(pReferencingModule,
pTypeRefInfo->typeToken,
&pTargetModule,
&targetTypeDef,
Loader::SafeLookup //don't load, no locks/allocations
);
if (fSuccess)
{
_ASSERTE(pTargetModule != NULL);
_ASSERTE( TypeFromToken(targetTypeDef) == mdtTypeDef );
AppDomain * pAppDomain = pDomainAssembly->GetAppDomain();
pTargetRefInfo->vmDomainAssembly.SetDacTargetPtr(PTR_HOST_TO_TADDR(pTargetModule->GetDomainAssembly()));
pTargetRefInfo->typeToken = targetTypeDef;
}
else
{
// failed - presumably because the target assembly isn't loaded
ThrowHR(CORDBG_E_CLASS_NOT_LOADED);
}
} // DacDbiInterfaceImpl::ResolveTypeReference
// Get the full path and file name to the module (if any).
BOOL DacDbiInterfaceImpl::GetModulePath(VMPTR_Module vmModule,
IStringHolder * pStrFilename)
{
DD_ENTER_MAY_THROW;
Module * pModule = vmModule.GetDacPtr();
PEAssembly * pPEAssembly = pModule->GetPEAssembly();
if (pPEAssembly != NULL)
{
if( !pPEAssembly->GetPath().IsEmpty() )
{
// Module has an on-disk path
const WCHAR * szPath = pPEAssembly->GetPath().DacGetRawUnicode();
if (szPath == NULL)
{
szPath = pPEAssembly->GetModuleFileNameHint().DacGetRawUnicode();
if (szPath == NULL)
{
goto NoFileName;
}
}
IfFailThrow(pStrFilename->AssignCopy(szPath));
return TRUE;
}
}
NoFileName:
// no filename
IfFailThrow(pStrFilename->AssignCopy(W("")));
return FALSE;
}
// Get the full path and file name to the ngen image for the module (if any).
BOOL DacDbiInterfaceImpl::GetModuleNGenPath(VMPTR_Module vmModule,
IStringHolder * pStrFilename)
{
DD_ENTER_MAY_THROW;
// no ngen filename
IfFailThrow(pStrFilename->AssignCopy(W("")));
return FALSE;
}
// Implementation of IDacDbiInterface::GetModuleSimpleName
void DacDbiInterfaceImpl::GetModuleSimpleName(VMPTR_Module vmModule, IStringHolder * pStrFilename)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pStrFilename != NULL);
Module * pModule = vmModule.GetDacPtr();
LPCUTF8 szNameUtf8 = pModule->GetSimpleName();
SString convert(SString::Utf8, szNameUtf8);
IfFailThrow(pStrFilename->AssignCopy(convert.GetUnicode()));
}
HRESULT DacDbiInterfaceImpl::IsModuleMapped(VMPTR_Module pModule, OUT BOOL *isModuleMapped)
{
LOG((LF_CORDB, LL_INFO10000, "DDBII::IMM - TADDR 0x%x\n", pModule));
DD_ENTER_MAY_THROW;
HRESULT hr = S_FALSE;
PTR_Module pTargetModule = pModule.GetDacPtr();
EX_TRY
{
PTR_PEAssembly pPEAssembly = pTargetModule->GetPEAssembly();
_ASSERTE(pPEAssembly != NULL);
if (pPEAssembly->HasLoadedPEImage())
{
*isModuleMapped = pPEAssembly->GetLoadedLayout()->IsMapped();
hr = S_OK;
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
bool DacDbiInterfaceImpl::MetadataUpdatesApplied()
{
DD_ENTER_MAY_THROW;
#ifdef EnC_SUPPORTED
return g_metadataUpdatesApplied;
#else
return false;
#endif
}
// Helper to intialize a TargetBuffer from a MemoryRange
//
// Arguments:
// memoryRange - memory range.
// pTargetBuffer - required out parameter to be initialized to value of memory range.
//
// Notes:
// MemoryRange and TargetBuffer both conceptually describe a single contiguous buffer of memory in the
// target. MemoryRange is a VM structure, which can't bleed across the DacDbi boundary. TargetBuffer is
// a DacDbi structure, which can cross the DacDbi boundary.
void InitTargetBufferFromMemoryRange(const MemoryRange memoryRange, TargetBuffer * pTargetBuffer)
{
SUPPORTS_DAC;
_ASSERTE(pTargetBuffer != NULL);
PTR_CVOID p = memoryRange.StartAddress();
CORDB_ADDRESS addr = PTR_TO_CORDB_ADDRESS(PTR_TO_TADDR(p));
_ASSERTE(memoryRange.Size() <= 0xffffffff);
pTargetBuffer->Init(addr, (ULONG)memoryRange.Size());
}
// Helper to intialize a TargetBuffer (host representation of target) from an SBuffer (target)
//
// Arguments:
// pBuffer - target pointer to a SBuffer structure. If pBuffer is NULL, then target buffer will be empty.
// pTargetBuffer - required out pointer to hold buffer description.
//
// Notes:
// PTR_SBuffer and TargetBuffer are both semantically equivalent structures. They both are a pointer and length
// describing a buffer in the target address space. (SBufer also has ownership semantics, but for DAC's
// read-only nature, that doesn't matter).
// Neither of these will actually copy the target buffer into the host without explicit action.
// The important difference is that TargetBuffer is a host datastructure and so easier to manipulate.
//
void InitTargetBufferFromTargetSBuffer(PTR_SBuffer pBuffer, TargetBuffer * pTargetBuffer)
{
SUPPORTS_DAC;
_ASSERTE(pTargetBuffer != NULL);
SBuffer * pBufferHost = pBuffer;
if (pBufferHost == NULL)
{
pTargetBuffer->Clear();
return;
}
MemoryRange m = pBufferHost->DacGetRawBuffer();
InitTargetBufferFromMemoryRange(m, pTargetBuffer);
}
// Implementation of IDacDbiInterface::GetMetadata
void DacDbiInterfaceImpl::GetMetadata(VMPTR_Module vmModule, TargetBuffer * pTargetBuffer)
{
DD_ENTER_MAY_THROW;
pTargetBuffer->Clear();
Module * pModule = vmModule.GetDacPtr();
// Target should only be asking about modules that are visible to debugger.
_ASSERTE(pModule->IsVisibleToDebugger());
// For dynamic modules, metadata is stored as an eagerly-serialized buffer hanging off the Reflection Module.
if (pModule->IsReflection())
{
// Here is the fetch.
ReflectionModule * pReflectionModule = pModule->GetReflectionModule();
InitTargetBufferFromTargetSBuffer(pReflectionModule->GetDynamicMetadataBuffer(), pTargetBuffer);
}
else
{
PEAssembly * pPEAssembly = pModule->GetPEAssembly();
// For non-dynamic modules, metadata is in the pe-image.
COUNT_T size;
CORDB_ADDRESS address = PTR_TO_CORDB_ADDRESS(dac_cast<TADDR>(pPEAssembly->GetLoadedMetadata(&size)));
pTargetBuffer->Init(address, (ULONG) size);
}
if (pTargetBuffer->IsEmpty())
{
// We never expect this to happen in a well-behaved scenario. But just in case.
ThrowHR(CORDBG_E_MISSING_METADATA);
}
}
// Implementation of IDacDbiInterface::GetSymbolsBuffer
void DacDbiInterfaceImpl::GetSymbolsBuffer(VMPTR_Module vmModule, TargetBuffer * pTargetBuffer, SymbolFormat * pSymbolFormat)
{
DD_ENTER_MAY_THROW;
pTargetBuffer->Clear();
*pSymbolFormat = kSymbolFormatNone;
Module * pModule = vmModule.GetDacPtr();
// Target should only be asking about modules that are visible to debugger.
_ASSERTE(pModule->IsVisibleToDebugger());
PTR_CGrowableStream pStream = pModule->GetInMemorySymbolStream();
if (pStream == NULL)
{
// Common case is to not have PDBs in-memory.
return;
}
const MemoryRange m = pStream->GetRawBuffer();
if (m.Size() == 0)
{
// We may be prepared to store symbols (in some particular format) but none are there yet.
// We treat this the same as not having any symbols above.
return;
}
InitTargetBufferFromMemoryRange(m, pTargetBuffer);
*pSymbolFormat = kSymbolFormatPDB;
}
void DacDbiInterfaceImpl::GetModuleForDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, OUT VMPTR_Module * pModule)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pModule != NULL);
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
pModule->SetHostPtr(pDomainAssembly->GetModule());
}
// Implement IDacDbiInterface::GetDomainAssemblyData
void DacDbiInterfaceImpl::GetDomainAssemblyData(VMPTR_DomainAssembly vmDomainAssembly, DomainAssemblyInfo * pData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pData != NULL);
ZeroMemory(pData, sizeof(*pData));
DomainAssembly * pDomainAssembly = vmDomainAssembly.GetDacPtr();
AppDomain * pAppDomain = pDomainAssembly->GetAppDomain();
// @dbgtodo - is this efficient DAC usage (perhaps a dac-cop rule)? Are we round-tripping the pointer?
pData->vmDomainAssembly.SetHostPtr(pDomainAssembly);
pData->vmAppDomain.SetHostPtr(pAppDomain);
}
// Implement IDacDbiInterface::GetModuleData
void DacDbiInterfaceImpl::GetModuleData(VMPTR_Module vmModule, ModuleInfo * pData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pData != NULL);
ZeroMemory(pData, sizeof(*pData));
Module * pModule = vmModule.GetDacPtr();
PEAssembly * pPEAssembly = pModule->GetPEAssembly();
pData->vmPEAssembly.SetHostPtr(pPEAssembly);
pData->vmAssembly.SetHostPtr(pModule->GetAssembly());
// Is it dynamic?
BOOL fIsDynamic = pModule->IsReflection();
pData->fIsDynamic = fIsDynamic;
// Get PE BaseAddress and Size
// For dynamic modules, these are 0. Else,
pData->pPEBaseAddress = NULL;
pData->nPESize = 0;
if (!fIsDynamic)
{
COUNT_T size = 0;
pData->pPEBaseAddress = PTR_TO_TADDR(pPEAssembly->GetDebuggerContents(&size));
pData->nPESize = (ULONG) size;
}
// In-memory is determined by whether the module has a filename.
pData->fInMemory = FALSE;
if (pPEAssembly != NULL)
{
pData->fInMemory = pPEAssembly->GetPath().IsEmpty();
}
}
// Enumerate all AppDomains in the process.
void DacDbiInterfaceImpl::EnumerateAppDomains(
FP_APPDOMAIN_ENUMERATION_CALLBACK fpCallback,
void * pUserData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(fpCallback != NULL);
// Only include active appdomains in the enumeration.
// This includes appdomains sent before the AD load event,
// and does not include appdomains that are in shutdown after the AD exit event.
const BOOL bOnlyActive = TRUE;
AppDomainIterator iterator(bOnlyActive);
while(iterator.Next())
{
// It's critical that we don't yield appdomains after the unload event has been sent.
// See code:IDacDbiInterface#Enumeration for details.
AppDomain * pAppDomain = iterator.GetDomain();
VMPTR_AppDomain vmAppDomain = VMPTR_AppDomain::NullPtr();
vmAppDomain.SetHostPtr(pAppDomain);
fpCallback(vmAppDomain, pUserData);
}
}
// Enumerate all Assemblies in an appdomain.
void DacDbiInterfaceImpl::EnumerateAssembliesInAppDomain(
VMPTR_AppDomain vmAppDomain,
FP_ASSEMBLY_ENUMERATION_CALLBACK fpCallback,
void * pUserData
)
{
DD_ENTER_MAY_THROW;
_ASSERTE(fpCallback != NULL);
// Iterate through all Assemblies (including shared) in the appdomain.
AppDomain::AssemblyIterator iterator;
// If the containing appdomain is unloading, then don't enumerate any assemblies
// in the domain. This is to enforce rules at code:IDacDbiInterface#Enumeration.
// See comment in code:DacDbiInterfaceImpl::EnumerateModulesInAssembly code for details.
AppDomain * pAppDomain = vmAppDomain.GetDacPtr();
// Pass the magical flags to the loader enumerator to get all Execution-only assemblies.
iterator = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoading | kIncludeLoaded | kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
while (iterator.Next(pDomainAssembly.This()))
{
if (!pDomainAssembly->IsVisibleToDebugger())
{
continue;
}
VMPTR_DomainAssembly vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
vmDomainAssembly.SetHostPtr(pDomainAssembly);
fpCallback(vmDomainAssembly, pUserData);
}
}
// Implementation of IDacDbiInterface::EnumerateModulesInAssembly,
// Enumerate all the modules (non-resource) in an assembly.
void DacDbiInterfaceImpl::EnumerateModulesInAssembly(
VMPTR_DomainAssembly vmAssembly,
FP_MODULE_ENUMERATION_CALLBACK fpCallback,
void * pUserData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(fpCallback != NULL);
DomainAssembly * pDomainAssembly = vmAssembly.GetDacPtr();
// Debugger isn't notified of Resource / Inspection-only modules.
if (pDomainAssembly->GetModule()->IsVisibleToDebugger())
{
_ASSERTE(pDomainAssembly->IsLoaded());
VMPTR_DomainAssembly vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
vmDomainAssembly.SetHostPtr(pDomainAssembly);
fpCallback(vmDomainAssembly, pUserData);
}
}
// Implementation of IDacDbiInterface::ResolveAssembly
// Returns NULL if not found.
VMPTR_DomainAssembly DacDbiInterfaceImpl::ResolveAssembly(
VMPTR_DomainAssembly vmScope,
mdToken tkAssemblyRef)
{
DD_ENTER_MAY_THROW;
DomainAssembly * pDomainAssembly = vmScope.GetDacPtr();
AppDomain * pAppDomain = pDomainAssembly->GetAppDomain();
Module * pModule = pDomainAssembly->GetModule();
VMPTR_DomainAssembly vmDomainAssembly = VMPTR_DomainAssembly::NullPtr();
Assembly * pAssembly = pModule->LookupAssemblyRef(tkAssemblyRef);
if (pAssembly != NULL)
{
DomainAssembly * pDomainAssembly = pAssembly->GetDomainAssembly();
vmDomainAssembly.SetHostPtr(pDomainAssembly);
}
return vmDomainAssembly;
}
// When stopped at an event, request a synchronization.
// See DacDbiInterface.h for full comments
void DacDbiInterfaceImpl::RequestSyncAtEvent()
{
DD_ENTER_MAY_THROW;
// To request a sync, we just need to set g_pDebugger->m_RSRequestedSync high.
if (g_pDebugger != NULL)
{
TADDR addr = PTR_HOST_MEMBER_TADDR(Debugger, g_pDebugger, m_RSRequestedSync);
BOOL fTrue = TRUE;
SafeWriteStructOrThrow<BOOL>(addr, &fTrue);
}
}
HRESULT DacDbiInterfaceImpl::SetSendExceptionsOutsideOfJMC(BOOL sendExceptionsOutsideOfJMC)
{
DD_ENTER_MAY_THROW
HRESULT hr = S_OK;
EX_TRY
{
if (g_pDebugger != NULL)
{
TADDR addr = PTR_HOST_MEMBER_TADDR(Debugger, g_pDebugger, m_sendExceptionsOutsideOfJMC);
SafeWriteStructOrThrow<BOOL>(addr, &sendExceptionsOutsideOfJMC);
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
// Notify the debuggee that a debugger attach is pending.
// See DacDbiInterface.h for full comments
void DacDbiInterfaceImpl::MarkDebuggerAttachPending()
{
DD_ENTER_MAY_THROW;
if (g_pDebugger != NULL)
{
DWORD flags = g_CORDebuggerControlFlags;
flags |= DBCF_PENDING_ATTACH;
// Uses special DAC writing. PTR_TO_TADDR doesn't fetch for globals.
// @dbgtodo dac support - the exact mechanism of writing to the target needs to be flushed out,
// especially as it relates to DAC cop and enforcing undac-ized writes.
g_CORDebuggerControlFlags = flags;
}
else
{
// Caller should have guaranteed that the LS is loaded.
// If we're detaching, then don't throw because we don't care.
ThrowHR(CORDBG_E_NOTREADY);
}
}
// Notify the debuggee that a debugger is attached.
// See DacDbiInterface.h for full comments
void DacDbiInterfaceImpl::MarkDebuggerAttached(BOOL fAttached)
{
DD_ENTER_MAY_THROW;
if (g_pDebugger != NULL)
{
// To be attached, we need to set the following
// g_CORDebuggerControlFlags |= DBCF_ATTACHED;
// To detach (if !fAttached), we need to do the opposite.
DWORD flags = g_CORDebuggerControlFlags;
if (fAttached)
{
flags |= DBCF_ATTACHED;
}
else
{
flags &= ~ (DBCF_ATTACHED | DBCF_PENDING_ATTACH);
}
// Uses special DAC writing. PTR_TO_TADDR doesn't fetch for globals.
// @dbgtodo dac support - the exact mechanism of writing to the target needs to be flushed out,
// especially as it relates to DAC cop and enforcing undac-ized writes.
g_CORDebuggerControlFlags = flags;
}
else if (fAttached)
{
// Caller should have guaranteed that the LS is loaded.
// If we're detaching, then don't throw because we don't care.
ThrowHR(CORDBG_E_NOTREADY);
}
}
// Enumerate all threads in the process.
void DacDbiInterfaceImpl::EnumerateThreads(FP_THREAD_ENUMERATION_CALLBACK fpCallback, void * pUserData)
{
DD_ENTER_MAY_THROW;
if (ThreadStore::s_pThreadStore == NULL)
{
return;
}
Thread *pThread = ThreadStore::GetThreadList(NULL);
while (pThread != NULL)
{
// Don't want to publish threads via enumeration before they're ready to be inspected.
// Use the same window that we used in whidbey.
Thread::ThreadState threadState = pThread->GetSnapshotState();
if (!((IsThreadMarkedDeadWorker(pThread)) || (threadState & Thread::TS_Unstarted)))
{
VMPTR_Thread vmThread = VMPTR_Thread::NullPtr();
vmThread.SetHostPtr(pThread);
fpCallback(vmThread, pUserData);
}
pThread = ThreadStore::GetThreadList(pThread);
}
}
// public implementation of IsThreadMarkedDead
bool DacDbiInterfaceImpl::IsThreadMarkedDead(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
return IsThreadMarkedDeadWorker(pThread);
}
// Private worker for IsThreadMarkedDead
//
// Arguments:
// pThread - valid thread to check if dead
//
// Returns:
// true iff thread is marked as dead.
//
// Notes:
// This is an internal method that skips public validation.
// See code:IDacDbiInterface::#IsThreadMarkedDead for purpose.
bool DacDbiInterfaceImpl::IsThreadMarkedDeadWorker(Thread * pThread)
{
_ASSERTE(pThread != NULL);
Thread::ThreadState threadState = pThread->GetSnapshotState();
bool fIsDead = (threadState & Thread::TS_Dead) != 0;
return fIsDead;
}
// Return the handle of the specified thread.
HANDLE DacDbiInterfaceImpl::GetThreadHandle(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
return pThread->GetThreadHandle();
}
// Return the object handle for the managed Thread object corresponding to the specified thread.
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetThreadObject(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
Thread::ThreadState threadState = pThread->GetSnapshotState();
if ( (threadState & Thread::TS_Dead) ||
(threadState & Thread::TS_Unstarted) ||
(threadState & Thread::TS_Detached) ||
g_fProcessDetach )
{
ThrowHR(CORDBG_E_BAD_THREAD_STATE);
}
else
{
VMPTR_OBJECTHANDLE vmObjHandle = VMPTR_OBJECTHANDLE::NullPtr();
vmObjHandle.SetDacTargetPtr(pThread->GetExposedObjectHandleForDebugger());
return vmObjHandle;
}
}
void DacDbiInterfaceImpl::GetThreadAllocInfo(VMPTR_Thread vmThread,
DacThreadAllocInfo* threadAllocInfo)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
gc_alloc_context* allocContext = pThread->GetAllocContext();
threadAllocInfo->m_allocBytesSOH = allocContext->alloc_bytes - (allocContext->alloc_limit - allocContext->alloc_ptr);
threadAllocInfo->m_allocBytesUOH = allocContext->alloc_bytes_uoh;
}
// Set and reset the TSNC_DebuggerUserSuspend bit on the state of the specified thread
// according to the CorDebugThreadState.
void DacDbiInterfaceImpl::SetDebugState(VMPTR_Thread vmThread,
CorDebugThreadState debugState)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
// update the field on the host copy
if (debugState == THREAD_SUSPEND)
{
pThread->SetThreadStateNC(Thread::TSNC_DebuggerUserSuspend);
}
else if (debugState == THREAD_RUN)
{
pThread->ResetThreadStateNC(Thread::TSNC_DebuggerUserSuspend);
}
else
{
ThrowHR(E_INVALIDARG);
}
// update the field on the target copy
TADDR taThreadState = PTR_HOST_MEMBER_TADDR(Thread, pThread, m_StateNC);
SafeWriteStructOrThrow<Thread::ThreadStateNoConcurrency>(taThreadState, &(pThread->m_StateNC));
}
// Gets the debugger unhandled exception threadstate flag
BOOL DacDbiInterfaceImpl::HasUnhandledException(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
// some managed exceptions don't have any underlying
// native exception processing going on. They just consist
// of a managed throwable that we have stashed away followed
// by a debugger notification and some form of failfast.
// Everything that comes through EEFatalError is in this category
if(pThread->IsLastThrownObjectUnhandled())
{
return TRUE;
}
// most managed exceptions are just a throwable bound to a
// native exception. In that case this handle will be non-null
OBJECTHANDLE ohException = pThread->GetThrowableAsHandle();
if (ohException != NULL)
{
// during the UEF we set the unhandled bit, if it is set the exception
// was unhandled
// however if the exception has intercept info then we consider it handled
// again
return pThread->GetExceptionState()->GetFlags()->IsUnhandled() &&
!(pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo());
}
return FALSE;
}
// Return the user state of the specified thread.
CorDebugUserState DacDbiInterfaceImpl::GetUserState(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
UINT result = 0;
result = GetPartialUserState(vmThread);
if (!IsThreadAtGCSafePlace(vmThread))
{
result |= USER_UNSAFE_POINT;
}
return (CorDebugUserState)result;
}
// Return the connection ID of the specified thread.
CONNID DacDbiInterfaceImpl::GetConnectionID(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
return INVALID_CONNECTION_ID;
}
// Return the task ID of the specified thread.
TASKID DacDbiInterfaceImpl::GetTaskID(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
return INVALID_TASK_ID;
}
// Return the OS thread ID of the specified thread
DWORD DacDbiInterfaceImpl::TryGetVolatileOSThreadID(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
_ASSERTE(pThread != NULL);
DWORD dwThreadId = pThread->GetOSThreadIdForDebugger();
// If the thread ID is a the magical cookie value, then this is really
// a switched out thread and doesn't have an OS tid. In that case, the
// DD contract is to return 0 (a much more sane value)
const DWORD dwSwitchedOutThreadId = SWITCHED_OUT_FIBER_OSID;
if (dwThreadId == dwSwitchedOutThreadId)
{
return 0;
}
return dwThreadId;
}
// Return the unique thread ID of the specified thread.
DWORD DacDbiInterfaceImpl::GetUniqueThreadID(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
_ASSERTE(pThread != NULL);
return pThread->GetOSThreadId();
}
// Return the object handle to the managed Exception object of the current exception
// on the specified thread. The return value could be NULL if there is no current exception.
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetCurrentException(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
// OBJECTHANDLEs are really just TADDRs.
OBJECTHANDLE ohException = pThread->GetThrowableAsHandle(); // ohException can be NULL
if (ohException == NULL)
{
if (pThread->IsLastThrownObjectUnhandled())
{
ohException = pThread->LastThrownObjectHandle();
}
}
VMPTR_OBJECTHANDLE vmObjHandle;
vmObjHandle.SetDacTargetPtr(ohException);
return vmObjHandle;
}
// Return the object handle to the managed object for a given CCW pointer.
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetObjectForCCW(CORDB_ADDRESS ccwPtr)
{
DD_ENTER_MAY_THROW;
OBJECTHANDLE ohCCW = NULL;
#ifdef FEATURE_COMWRAPPERS
if (DACTryGetComWrappersHandleFromCCW(ccwPtr, &ohCCW) != S_OK)
{
#endif
#ifdef FEATURE_COMINTEROP
ComCallWrapper *pCCW = DACGetCCWFromAddress(ccwPtr);
if (pCCW)
{
ohCCW = pCCW->GetObjectHandle();
}
#endif
#ifdef FEATURE_COMWRAPPERS
}
#endif
VMPTR_OBJECTHANDLE vmObjHandle;
vmObjHandle.SetDacTargetPtr(ohCCW);
return vmObjHandle;
}
// Return the object handle to the managed CustomNotification object of the current notification
// on the specified thread. The return value could be NULL if there is no current notification.
// Arguments:
// input: vmThread - the thread on which the notification occurred
// Return value: object handle for the current notification (if any) on the thread. This will return non-null
// if and only if we are currently inside a CustomNotification Callback (or a dump was generated while in this
// callback)
//
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetCurrentCustomDebuggerNotification(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
// OBJECTHANDLEs are really just TADDRs.
OBJECTHANDLE ohNotification = pThread->GetThreadCurrNotification(); // ohNotification can be NULL
VMPTR_OBJECTHANDLE vmObjHandle;
vmObjHandle.SetDacTargetPtr(ohNotification);
return vmObjHandle;
}
// Return the current appdomain the specified thread is in.
VMPTR_AppDomain DacDbiInterfaceImpl::GetCurrentAppDomain(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
AppDomain * pAppDomain = pThread->GetDomain();
if (pAppDomain == NULL)
{
ThrowHR(E_FAIL);
}
VMPTR_AppDomain vmAppDomain = VMPTR_AppDomain::NullPtr();
vmAppDomain.SetDacTargetPtr(PTR_HOST_TO_TADDR(pAppDomain));
return vmAppDomain;
}
// Returns a bitfield reflecting the managed debugging state at the time of
// the jit attach.
CLR_DEBUGGING_PROCESS_FLAGS DacDbiInterfaceImpl::GetAttachStateFlags()
{
DD_ENTER_MAY_THROW;
CLR_DEBUGGING_PROCESS_FLAGS res = (CLR_DEBUGGING_PROCESS_FLAGS)0;
if (g_pDebugger != NULL)
{
res = g_pDebugger->GetAttachStateFlags();
}
else
{
// When launching the process under a managed debugger we
// request these flags when CLR is loaded (before g_pDebugger
// had a chance to be initialized). In these cases simply
// return 0
}
return res;
}
//---------------------------------------------------------------------------------------
// Helper to get the address of the 2nd-chance hijack function Or throw
//
// Returns:
// Non-null Target Address of hijack function.
TADDR DacDbiInterfaceImpl::GetHijackAddress()
{
TADDR addr = NULL;
if (g_pDebugger != NULL)
{
// Get the start address of the redirect function for unhandled exceptions.
addr = dac_cast<TADDR>(g_pDebugger->m_rgHijackFunction[Debugger::kUnhandledException].StartAddress());
}
if (addr == NULL)
{
ThrowHR(CORDBG_E_NOTREADY);
}
return addr;
}
//---------------------------------------------------------------------------------------
// Helper to determine whether a control PC is in any native stub which the runtime knows how to unwind.
//
// Arguments:
// taControlPC - control PC to be checked
//
// Returns:
// Returns true if the control PC is in a runtime unwindable stub.
//
// Notes:
// Currently this function only recognizes the ExceptionHijack() stub,
// which is used for unhandled exceptions.
//
bool DacDbiInterfaceImpl::IsRuntimeUnwindableStub(PCODE targetControlPC)
{
TADDR controlPC = PCODEToPINSTR(targetControlPC);
// we call this function a lot while walking the stack and the values here will never change
// Getting the g_pDebugger and each entry in the m_rgHijackFunction is potentially ~7 DAC
// accesses per frame. Caching the data into a single local array is much faster. This optimization
// recovered a few % of DAC stackwalking time
if(!m_isCachedHijackFunctionValid)
{
Debugger* pDebugger = g_pDebugger;
if ((pDebugger == NULL) || (pDebugger->m_rgHijackFunction == NULL))
{
// The in-process debugging infrastructure hasn't been fully initialized, which means that we could
// NOT have hijacked anything yet.
return false;
}
// PERF NOTE: if needed this array copy could probably be made more efficient
// hitting the DAC only once for a single memory block, or even better
// put the array inline in the Debugger object so that we only do 1 DAC
// access for this entire thing
for (int i = 0; i < Debugger::kMaxHijackFunctions; i++)
{
InitTargetBufferFromMemoryRange(pDebugger->m_rgHijackFunction[i], &m_pCachedHijackFunction[i] );
}
m_isCachedHijackFunctionValid = TRUE;
}
// Check whether the control PC is in any of the thread redirection functions.
for (int i = 0; i < Debugger::kMaxHijackFunctions; i++)
{
CORDB_ADDRESS start = m_pCachedHijackFunction[i].pAddress;
CORDB_ADDRESS end = start + m_pCachedHijackFunction[i].cbSize;
if ((start <= controlPC) && (controlPC < end))
{
return true;
}
}
return false;
}
//---------------------------------------------------------------------------------------
// Align a stack pointer for the given architecture
//
// Arguments:
// pEsp - in/out: pointer to stack pointer.
//
void DacDbiInterfaceImpl::AlignStackPointer(CORDB_ADDRESS * pEsp)
{
SUPPORTS_DAC;
// Nop on x86.
#if defined(HOST_64BIT)
// on 64-bit, stack pointer must be 16-byte aligned.
// Stacks grown down, so round down to nearest 0xF bits.
*pEsp &= ~((CORDB_ADDRESS) 0xF);
#endif
}
//---------------------------------------------------------------------------------------
// Emulate pushing something on a thread's stack.
//
// Arguments:
// pEsp - in/out: pointer to stack pointer to push object at. On output,
// updated stack pointer.
// pData - object to push on the stack.
// fAlignStack - whether to align the stack pointer before and after the push.
// Callers which specify FALSE must be very careful and know exactly
// what they are doing.
//
// Return:
// address of pushed object. Throws on error.
template <class T>
CORDB_ADDRESS DacDbiInterfaceImpl::PushHelper(CORDB_ADDRESS * pEsp,
const T * pData,
BOOL fAlignStack)
{
SUPPORTS_DAC;
if (fAlignStack == TRUE)
{
AlignStackPointer(pEsp);
}
*pEsp -= sizeof(T);
if (fAlignStack == TRUE)
{
AlignStackPointer(pEsp);
}
SafeWriteStructOrThrow(*pEsp, pData);
return *pEsp;
}
//---------------------------------------------------------------------------------------
// Write an EXCEPTION_RECORD structure to the remote target at the specified address while taking
// into account the number of exception parameters. On 64-bit OS and on the WOW64, the OS always
// pushes the entire EXCEPTION_RECORD onto the stack. However, on native x86 OS, the OS only pushes
// enough of the EXCEPTION_RECORD to cover the specified number of exception parameters. Thus we
// need to be extra careful when we overwrite an EXCEPTION_RECORD on the stack.
//
// Arguments:
// pRemotePtr - address of the EXCEPTION_RECORD in the remote target
// pExcepRecord - EXCEPTION_RECORD to be written
//
// Notes:
// This function is only used by the code which hijacks a therad when there's an unhandled exception.
// It only works when we are actually debugging a live process, not a dump.
//
void DacDbiInterfaceImpl::WriteExceptionRecordHelper(CORDB_ADDRESS pRemotePtr,
const EXCEPTION_RECORD * pExcepRecord)
{
// Calculate the correct size to push onto the stack.
ULONG32 cbSize = offsetof(EXCEPTION_RECORD, ExceptionInformation);
cbSize += pExcepRecord->NumberParameters * sizeof(pExcepRecord->ExceptionInformation[0]);
// Use the data target to write to the remote target. Here we are assuming that we are debugging a
// live process, since this function is only called by the hijacking code for unhandled exceptions.
HRESULT hr = m_pMutableTarget->WriteVirtual(pRemotePtr,
reinterpret_cast<const BYTE *>(pExcepRecord),
cbSize);
if (FAILED(hr))
{
ThrowHR(hr);
}
}
// Implement IDacDbiInterface::Hijack
void DacDbiInterfaceImpl::Hijack(
VMPTR_Thread vmThread,
ULONG32 dwThreadId,
const EXCEPTION_RECORD * pRecord,
T_CONTEXT * pOriginalContext,
ULONG32 cbSizeContext,
EHijackReason::EHijackReason reason,
void * pUserData,
CORDB_ADDRESS * pRemoteContextAddr)
{
DD_ENTER_MAY_THROW;
//
// Validate parameters
//
// pRecord may be NULL if we're not hijacking at an exception
// pOriginalContext may be NULL if caller doesn't want a copy of the context.
// (The hijack function already has the context)
_ASSERTE((pOriginalContext == NULL) == (cbSizeContext == 0));
_ASSERTE(EHijackReason::IsValid(reason));
#ifdef TARGET_UNIX
_ASSERTE(!"Not supported on this platform");
#endif
//
// If we hijack a thread which might not be managed we can set vmThread = NULL
// The only side-effect in this case is that we can't reuse CONTEXT and
// EXCEPTION_RECORD space on the stack by an already underway in-process exception
// filter. If you depend on those being used and updated you must provide the vmThread
//
Thread* pThread = NULL;
if(!vmThread.IsNull())
{
pThread = vmThread.GetDacPtr();
_ASSERTE(pThread->GetOSThreadIdForDebugger() == dwThreadId);
}
TADDR pfnHijackFunction = GetHijackAddress();
//
// Setup context for hijack
//
T_CONTEXT ctx;
HRESULT hr = m_pTarget->GetThreadContext(
dwThreadId,
CONTEXT_FULL,
sizeof(ctx),
(BYTE*) &ctx);
IfFailThrow(hr);
// If caller requested, copy back the original context that we're hijacking from.
if (pOriginalContext != NULL)
{
// Since Dac + DBI are tightly coupled, context sizes should be the same.
if (cbSizeContext != sizeof(T_CONTEXT))
{
ThrowHR(E_INVALIDARG);
}
memcpy(pOriginalContext, &ctx, cbSizeContext);
}
// Make sure the trace flag isn't on. This can happen if we were single stepping the thread when we faulted. This
// will ensure that we don't try to single step through the OS's exception logic, which greatly confuses our second
// chance hijack logic. This also mimics what the OS does for us automaically when single stepping in process, i.e.,
// when you turn the trace flag on in-process and go, if there is a fault, the fault is reported and the trace flag
// is automatically turned off.
//
// The debugger could always re-enable the single-step flag if it wants to.
#ifndef FEATURE_EMULATE_SINGLESTEP
UnsetSSFlag(reinterpret_cast<DT_CONTEXT *>(&ctx));
#endif
// Push pointers
void* espContext = NULL;
void* espRecord = NULL;
const void* pData = pUserData;
// @dbgtodo cross-plat - this is not cross plat
CORDB_ADDRESS esp = GetSP(&ctx);
//
// Find out where the OS exception dispatcher has pushed the EXCEPTION_RECORD and CONTEXT. The ExInfo and
// ExceptionTracker have pointers to these data structures, but when we get the unhandled exception
// notification, the OS exception dispatcher is no longer on the stack, so these pointers are no longer
// valid. We need to either update these pointers in the ExInfo/ExcepionTracker, or reuse the stack
// space used by the OS exception dispatcher. We are using the latter approach here.
//
CORDB_ADDRESS espOSContext = NULL;
CORDB_ADDRESS espOSRecord = NULL;
if (pThread != NULL && pThread->IsExceptionInProgress())
{
espOSContext = (CORDB_ADDRESS)PTR_TO_TADDR(pThread->GetExceptionState()->GetContextRecord());
espOSRecord = (CORDB_ADDRESS)PTR_TO_TADDR(pThread->GetExceptionState()->GetExceptionRecord());
// The managed exception may not be related to the unhandled exception for which we are trying to
// hijack. An example would be when a thread hits a managed exception, VS tries to do func eval on
// the thread, but the func eval causes an unhandled exception (e.g. AV in mscorwks.dll). In this
// case, the pointers stored on the ExInfo/ExceptionTracker are closer to the root than the current
// SP of the thread. The check below makes sure we don't reuse the pointers in this case.
if (espOSContext < esp)
{
SafeWriteStructOrThrow(espOSContext, &ctx);
espContext = CORDB_ADDRESS_TO_PTR(espOSContext);
// We should have an EXCEPTION_RECORD if we are hijacked at an exception.
// We need to be careful when we overwrite the exception record. On x86, the OS doesn't
// always push the full record onto the stack, and so we can't blindly use sizeof(EXCEPTION_RECORD).
// Instead, we have to look at the number of exception parameters and calculate the size.
_ASSERTE(pRecord != NULL);
WriteExceptionRecordHelper(espOSRecord, pRecord);
espRecord = CORDB_ADDRESS_TO_PTR(espOSRecord);
esp = min(espOSContext, espOSRecord);
}
}
// If we haven't reused the pointers, then push everything at the leaf of the stack.
if (espContext == NULL)
{
_ASSERTE(espRecord == NULL);
// Push on full Context and ExceptionRecord structures. We'll then push pointers to these,
// and those pointers will serve as the actual args to the function.
espContext = CORDB_ADDRESS_TO_PTR(PushHelper(&esp, &ctx, TRUE));
// If caller didn't pass an exception-record, then we're not being hijacked at an exception.
// We'll just pass NULL for the exception-record to the Hijack function.
if (pRecord != NULL)
{
espRecord = CORDB_ADDRESS_TO_PTR(PushHelper(&esp, pRecord, TRUE));
}
}
if(pRemoteContextAddr != NULL)
{
*pRemoteContextAddr = PTR_TO_CORDB_ADDRESS(espContext);
}
//
// Push args onto the stack to be able to call the hijack function
//
// Prototype of hijack is:
// void __stdcall ExceptionHijackWorker(CONTEXT * pContext, EXCEPTION_RECORD * pRecord, EHijackReason, void * pData)
// Set up everything so that the hijack stub can just do a "call" instruction.
//
// Regarding stack overflow: We could do an explicit check against the thread's stack base limit.
// However, we don't need an explicit overflow check because if the stack does overflow,
// the hijack will just hit a regular stack-overflow exception.
#if defined(TARGET_X86) // TARGET
// X86 calling convention is to push args on the stack in reverse order.
// If we fail here, the stack is written, but esp hasn't been committed yet so it shouldn't matter.
PushHelper(&esp, &pData, TRUE);
PushHelper(&esp, &reason, TRUE);
PushHelper(&esp, &espRecord, TRUE);
PushHelper(&esp, &espContext, TRUE);
#elif defined (TARGET_AMD64) // TARGET
// AMD64 calling convention is to place first 4 parameters in: rcx, rdx, r8 and r9
ctx.Rcx = (DWORD64) espContext;
ctx.Rdx = (DWORD64) espRecord;
ctx.R8 = (DWORD64) reason;
ctx.R9 = (DWORD64) pData;
// Caller must allocate stack space to spill for args.
// Push the arguments onto the outgoing argument homes.
// Make sure we push pointer-sized values to keep the stack aligned.
PushHelper(&esp, reinterpret_cast<SIZE_T *>(&(ctx.R9)), FALSE);
PushHelper(&esp, reinterpret_cast<SIZE_T *>(&(ctx.R8)), FALSE);
PushHelper(&esp, reinterpret_cast<SIZE_T *>(&(ctx.Rdx)), FALSE);
PushHelper(&esp, reinterpret_cast<SIZE_T *>(&(ctx.Rcx)), FALSE);
#elif defined(TARGET_ARM)
ctx.R0 = (DWORD)espContext;
ctx.R1 = (DWORD)espRecord;
ctx.R2 = (DWORD)reason;
ctx.R3 = (DWORD)pData;
#elif defined(TARGET_ARM64)
ctx.X0 = (DWORD64)espContext;
ctx.X1 = (DWORD64)espRecord;
ctx.X2 = (DWORD64)reason;
ctx.X3 = (DWORD64)pData;
#else
PORTABILITY_ASSERT("CordbThread::HijackForUnhandledException is not implemented on this platform.");
#endif
SetSP(&ctx, CORDB_ADDRESS_TO_TADDR(esp));
// @dbgtodo cross-plat - not cross-platform safe
SetIP(&ctx, pfnHijackFunction);
//
// Commit the context.
//
hr = m_pMutableTarget->SetThreadContext(dwThreadId, sizeof(ctx), reinterpret_cast<BYTE*> (&ctx));
IfFailThrow(hr);
}
// Return the filter CONTEXT on the LS.
VMPTR_CONTEXT DacDbiInterfaceImpl::GetManagedStoppedContext(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
VMPTR_CONTEXT vmContext = VMPTR_CONTEXT::NullPtr();
Thread * pThread = vmThread.GetDacPtr();
if (pThread->GetInteropDebuggingHijacked())
{
_ASSERTE(!ISREDIRECTEDTHREAD(pThread));
vmContext = VMPTR_CONTEXT::NullPtr();
}
else
{
DT_CONTEXT * pLSContext = reinterpret_cast<DT_CONTEXT *>(pThread->GetFilterContext());
if (pLSContext != NULL)
{
_ASSERTE(!ISREDIRECTEDTHREAD(pThread));
vmContext.SetHostPtr(pLSContext);
}
else if (ISREDIRECTEDTHREAD(pThread))
{
pLSContext = reinterpret_cast<DT_CONTEXT *>(GETREDIRECTEDCONTEXT(pThread));
_ASSERTE(pLSContext != NULL);
if (pLSContext != NULL)
{
vmContext.SetHostPtr(pLSContext);
}
}
}
return vmContext;
}
// Return a TargetBuffer for the raw vararg signature.
TargetBuffer DacDbiInterfaceImpl::GetVarArgSig(CORDB_ADDRESS VASigCookieAddr,
CORDB_ADDRESS * pArgBase)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pArgBase != NULL);
*pArgBase = NULL;
// First, read the VASigCookie pointer.
TADDR taVASigCookie = NULL;
SafeReadStructOrThrow(VASigCookieAddr, &taVASigCookie);
// Now create a DAC copy of VASigCookie.
VASigCookie * pVACookie = PTR_VASigCookie(taVASigCookie);
// Figure out where the first argument is.
#if defined(TARGET_X86) // (STACK_GROWS_DOWN_ON_ARGS_WALK)
*pArgBase = VASigCookieAddr + pVACookie->sizeOfArgs;
#else // !TARGET_X86 (STACK_GROWS_UP_ON_ARGS_WALK)
*pArgBase = VASigCookieAddr + sizeof(VASigCookie *);
#endif // !TARGET_X86 (STACK_GROWS_UP_ON_ARGS_WALK)
return TargetBuffer(PTR_TO_CORDB_ADDRESS(pVACookie->signature.GetRawSig()),
pVACookie->signature.GetRawSigLen());
}
// returns TRUE if the type requires 8-byte alignment
BOOL DacDbiInterfaceImpl::RequiresAlign8(VMPTR_TypeHandle thExact)
{
DD_ENTER_MAY_THROW;
#ifdef FEATURE_64BIT_ALIGNMENT
TypeHandle th = TypeHandle::FromPtr(thExact.GetDacPtr());
PTR_MethodTable mt = th.AsMethodTable();
return mt->RequiresAlign8();
#else
ThrowHR(E_NOTIMPL);
#endif
}
// Resolve the raw generics token to the real generics type token. The resolution is based on the
// given index.
GENERICS_TYPE_TOKEN DacDbiInterfaceImpl::ResolveExactGenericArgsToken(DWORD dwExactGenericArgsTokenIndex,
GENERICS_TYPE_TOKEN rawToken)
{
DD_ENTER_MAY_THROW;
if (dwExactGenericArgsTokenIndex == 0)
{
// In a rare case of VS4Mac debugging VS4Mac ARM64 optimized code we get a null generics argument token. We aren't sure
// why the token is null, it may be a bug or it may be by design in the runtime. In the interest of time we are working
// around the issue rather than investigating the root cause. This workaround should only cause us to degrade generic
// types from exact type parameters to approximate or canonical type parameters. In the future if we discover this issue
// is happening more frequently than we expect or the workaround is more impactful than we expect we may need to remove
// this workaround and resolve the underlying issue.
if (rawToken == 0)
{
return rawToken;
}
// In this case the real generics type token is the MethodTable of the "this" object.
// Note that we want the target address here.
// Incoming rawToken is actually a PTR_Object for the 'this' pointer.
// Need to do some casting to convert GENERICS_TYPE_TOKEN --> PTR_Object
TADDR addrObjThis = CORDB_ADDRESS_TO_TADDR(rawToken);
PTR_Object pObjThis = dac_cast<PTR_Object>(addrObjThis);
PTR_MethodTable pMT = pObjThis->GetMethodTable();
// Now package up the PTR_MethodTable back into a GENERICS_TYPE_TOKEN
TADDR addrMT = dac_cast<TADDR>(pMT);
GENERICS_TYPE_TOKEN realToken = (GENERICS_TYPE_TOKEN) addrMT;
return realToken;
}
else if (dwExactGenericArgsTokenIndex == (DWORD)ICorDebugInfo::TYPECTXT_ILNUM)
{
// rawToken is already initialized correctly. Nothing to do here.
return rawToken;
}
// The index of the generics type token should not be anything else.
// This is indeed an error condition, and so we throw here.
_ASSERTE(!"DDII::REGAT - Unexpected generics type token index.");
ThrowHR(CORDBG_E_TARGET_INCONSISTENT);
}
// Check if the given method is an IL stub or an LCD method.
IDacDbiInterface::DynamicMethodType DacDbiInterfaceImpl::IsILStubOrLCGMethod(VMPTR_MethodDesc vmMethodDesc)
{
DD_ENTER_MAY_THROW;
MethodDesc * pMD = vmMethodDesc.GetDacPtr();
if (pMD->IsILStub())
{
return kILStub;
}
else if (pMD->IsLCGMethod())
{
return kLCGMethod;
}
else
{
return kNone;
}
}
//---------------------------------------------------------------------------------------
//
// Determine whether the specified thread is at a GC safe place.
//
// Arguments:
// vmThread - the thread to be examined
//
// Return Value:
// Return TRUE if the thread is at a GC safe place.
// and under what conditions
//
// Notes:
// This function basically does a one-frame stackwalk.
// The logic is adopted from Debugger::IsThreadAtSafePlace().
//
BOOL DacDbiInterfaceImpl::IsThreadAtGCSafePlace(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
BOOL fIsGCSafe = FALSE;
Thread * pThread = vmThread.GetDacPtr();
// Check if the runtime has entered "Shutdown for Finalizer" mode.
if ((g_fEEShutDown & ShutDown_Finalize2) != 0)
{
fIsGCSafe = TRUE;
}
else
{
T_CONTEXT ctx;
REGDISPLAY rd;
SetUpRegdisplayForStackWalk(pThread, &ctx, &rd);
ULONG32 flags = (QUICKUNWIND | HANDLESKIPPEDFRAMES | DISABLE_MISSING_FRAME_DETECTION);
StackFrameIterator iter;
iter.Init(pThread, pThread->GetFrame(), &rd, flags);
CrawlFrame * pCF = &(iter.m_crawl);
if (pCF->IsFrameless() && pCF->IsActiveFunc())
{
if (pCF->IsGcSafe())
{
fIsGCSafe = TRUE;
}
}
}
return fIsGCSafe;
}
//---------------------------------------------------------------------------------------
//
// Return a partial user state of the specified thread. The returned user state doesn't contain
// information about USER_UNSAFE_POINT. The caller needs to call IsThreadAtGCSafePlace() to get
// the full user state.
//
// Arguments:
// vmThread - the specified thread
//
// Return Value:
// Return the partial user state except for USER_UNSAFE_POINT
//
CorDebugUserState DacDbiInterfaceImpl::GetPartialUserState(VMPTR_Thread vmThread)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
Thread::ThreadState ts = pThread->GetSnapshotState();
UINT result = 0;
if (ts & Thread::TS_Background)
{
result |= USER_BACKGROUND;
}
if (ts & Thread::TS_Unstarted)
{
result |= USER_UNSTARTED;
}
// Don't report a StopRequested if the thread has actually stopped.
if (ts & Thread::TS_Dead)
{
result |= USER_STOPPED;
}
// The interruptible flag is unreliable (see issue 699245)
// The Debugger_SleepWaitJoin is always accurate when it is present, but it is still
// just a band-aid fix to cover some of the race conditions interruptible has.
if (ts & Thread::TS_Interruptible || pThread->HasThreadStateNC(Thread::TSNC_DebuggerSleepWaitJoin))
{
result |= USER_WAIT_SLEEP_JOIN;
}
if (pThread->IsThreadPoolThread())
{
result |= USER_THREADPOOL;
}
return (CorDebugUserState)result;
}
//---------------------------------------------------------------------------------------
//
// Look up the EnC version number of a particular jitted instance of a managed method.
//
// Arguments:
// pModule - the module containing the managed method
// vmMethodDesc - the MethodDesc of the managed method
// mdMethod - the MethodDef metadata token of the managed method
// pNativeStartAddress - the native start address of the jitted code
// pJittedInstanceEnCVersion - out parameter; the version number of the version
// corresponding to the specified native start address
// pLatestEnCVersion - out parameter; the version number of the latest version
//
// Assumptions:
// vmMethodDesc and mdMethod must match (see below).
//
// Notes:
// mdMethod is not strictly necessary, since we can always get that from vmMethodDesc.
// It is just a perf optimization since the caller has the metadata token around already.
//
// Today, there is no way to retrieve the EnC version number from the RS data structures.
// This primitive uses DAC to retrieve it from the LS data structures. This function may
// very well be ripped out in the future if we DACize this information, but the current
// thinking is that some of the RS data structures will remain, most likely in a reduced form.
//
void DacDbiInterfaceImpl::LookupEnCVersions(Module* pModule,
VMPTR_MethodDesc vmMethodDesc,
mdMethodDef mdMethod,
CORDB_ADDRESS pNativeStartAddress,
SIZE_T * pLatestEnCVersion,
SIZE_T * pJittedInstanceEnCVersion /* = NULL */)
{
MethodDesc * pMD = vmMethodDesc.GetDacPtr();
// make sure the vmMethodDesc and mdMethod match
_ASSERTE(pMD->GetMemberDef() == mdMethod);
_ASSERTE(pLatestEnCVersion != NULL);
// @dbgtodo inspection - once we do EnC, stop using DMIs.
// If the method wasn't EnCed, DMIs may not exist. And since this is DAC, we can't create them.
// We may not have the memory for the DebuggerMethodInfos in a minidump.
// When dump debugging EnC information isn't very useful so just fallback
// to default version.
DebuggerMethodInfo * pDMI = NULL;
DebuggerJitInfo * pDJI = NULL;
EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY
{
pDMI = g_pDebugger->GetOrCreateMethodInfo(pModule, mdMethod);
if (pDMI != NULL)
{
pDJI = pDMI->FindJitInfo(pMD, CORDB_ADDRESS_TO_TADDR(pNativeStartAddress));
}
}
EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY;
if (pDJI != NULL)
{
if (pJittedInstanceEnCVersion != NULL)
{
*pJittedInstanceEnCVersion = pDJI->m_encVersion;
}
*pLatestEnCVersion = pDMI->GetCurrentEnCVersion();
}
else
{
// If we have no DMI/DJI, then we must never have EnCed. So we can use default EnC info
// Several cases where we don't have a DMI/DJI:
// - LCG methods
// - method was never "touched" by debugger. (DJIs are created lazily).
if (pJittedInstanceEnCVersion != NULL)
{
*pJittedInstanceEnCVersion = CorDB_DEFAULT_ENC_FUNCTION_VERSION;
}
*pLatestEnCVersion = CorDB_DEFAULT_ENC_FUNCTION_VERSION;
}
}
// Get the address of the Debugger control block on the helper thread
// Arguments: none
// Return Value: The remote address of the Debugger control block allocated on the helper thread
// if it has been successfully allocated or NULL otherwise.
CORDB_ADDRESS DacDbiInterfaceImpl::GetDebuggerControlBlockAddress()
{
DD_ENTER_MAY_THROW;
if ((g_pDebugger != NULL) &&
(g_pDebugger->m_pRCThread != NULL))
{
return CORDB_ADDRESS(dac_cast<TADDR>(g_pDebugger->m_pRCThread->GetDCB()));
}
return NULL;
}
// DacDbi API: Get the context for a particular thread of the target process
void DacDbiInterfaceImpl::GetContext(VMPTR_Thread vmThread, DT_CONTEXT * pContextBuffer)
{
DD_ENTER_MAY_THROW
_ASSERTE(pContextBuffer != NULL);
Thread * pThread = vmThread.GetDacPtr();
// @dbgtodo Once the filter context is removed, then we should always
// start with the leaf CONTEXT.
DT_CONTEXT * pFilterContext = reinterpret_cast<DT_CONTEXT *>(pThread->GetFilterContext());
if (pFilterContext == NULL)
{
// If the filter context is NULL, then we use the true context of the thread.
pContextBuffer->ContextFlags = DT_CONTEXT_ALL;
HRESULT hr = m_pTarget->GetThreadContext(pThread->GetOSThreadId(),
pContextBuffer->ContextFlags,
sizeof(*pContextBuffer),
reinterpret_cast<BYTE *>(pContextBuffer));
if (hr == E_NOTIMPL)
{
// GetThreadContext is not implemented on this data target.
// That's why we have to make do with context we can obtain from Frames explicitly stored in Thread object.
// It suffices for managed debugging stackwalk.
REGDISPLAY tmpRd = {};
T_CONTEXT tmpContext = {};
FillRegDisplay(&tmpRd, &tmpContext);
// Going through thread Frames and looking for first (deepest one) one that
// that has context available for stackwalking (SP and PC)
// For example: RedirectedThreadFrame, InlinedCallFrame, HelperMethodFrame, ComPlusMethodFrame
Frame *frame = pThread->GetFrame();
while (frame != NULL && frame != FRAME_TOP)
{
frame->UpdateRegDisplay(&tmpRd);
if (GetRegdisplaySP(&tmpRd) != 0 && GetControlPC(&tmpRd) != 0)
{
UpdateContextFromRegDisp(&tmpRd, &tmpContext);
CopyMemory(pContextBuffer, &tmpContext, sizeof(*pContextBuffer));
pContextBuffer->ContextFlags = DT_CONTEXT_CONTROL;
return;
}
frame = frame->Next();
}
// It looks like this thread is not running managed code.
ZeroMemory(pContextBuffer, sizeof(*pContextBuffer));
}
else
{
IfFailThrow(hr);
}
}
else
{
*pContextBuffer = *pFilterContext;
}
} // DacDbiInterfaceImpl::GetContext
// Create a VMPTR_Object from a target object address
// @dbgtodo validate the VMPTR_Object is in fact a object, possibly by DACizing
// Object::Validate
VMPTR_Object DacDbiInterfaceImpl::GetObject(CORDB_ADDRESS ptr)
{
DD_ENTER_MAY_THROW;
VMPTR_Object vmObj = VMPTR_Object::NullPtr();
vmObj.SetDacTargetPtr(CORDB_ADDRESS_TO_TADDR(ptr));
return vmObj;
}
HRESULT DacDbiInterfaceImpl::EnableNGENPolicy(CorDebugNGENPolicy ePolicy)
{
return E_NOTIMPL;
}
HRESULT DacDbiInterfaceImpl::SetNGENCompilerFlags(DWORD dwFlags)
{
DD_ENTER_MAY_THROW;
return CORDBG_E_NGEN_NOT_SUPPORTED;
}
HRESULT DacDbiInterfaceImpl::GetNGENCompilerFlags(DWORD *pdwFlags)
{
DD_ENTER_MAY_THROW;
return CORDBG_E_NGEN_NOT_SUPPORTED;
}
typedef DPTR(OBJECTREF) PTR_ObjectRef;
// Create a VMPTR_Object from an address which points to a reference to an object
// @dbgtodo validate the VMPTR_Object is in fact a object, possibly by DACizing
// Object::Validate
VMPTR_Object DacDbiInterfaceImpl::GetObjectFromRefPtr(CORDB_ADDRESS ptr)
{
DD_ENTER_MAY_THROW;
VMPTR_Object vmObj = VMPTR_Object::NullPtr();
PTR_ObjectRef objRef = PTR_ObjectRef(CORDB_ADDRESS_TO_TADDR(ptr));
vmObj.SetDacTargetPtr(PTR_TO_TADDR(*objRef));
return vmObj;
}
// Create a VMPTR_OBJECTHANDLE from a handle
VMPTR_OBJECTHANDLE DacDbiInterfaceImpl::GetVmObjectHandle(CORDB_ADDRESS handleAddress)
{
DD_ENTER_MAY_THROW;
VMPTR_OBJECTHANDLE vmObjHandle = VMPTR_OBJECTHANDLE::NullPtr();
vmObjHandle.SetDacTargetPtr(CORDB_ADDRESS_TO_TADDR(handleAddress));
return vmObjHandle;
}
// Validate that the VMPTR_OBJECTHANDLE refers to a legitimate managed object
BOOL DacDbiInterfaceImpl::IsVmObjectHandleValid(VMPTR_OBJECTHANDLE vmHandle)
{
DD_ENTER_MAY_THROW;
BOOL ret = FALSE;
// this may cause unallocated debuggee memory to be read
// SEH exceptions will be caught
EX_TRY
{
OBJECTREF objRef = ObjectFromHandle((OBJECTHANDLE)vmHandle.GetDacPtr());
// NULL is certainly valid...
if (objRef != NULL)
{
if (objRef->ValidateObjectWithPossibleAV())
{
ret = TRUE;
}
}
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
return ret;
}
// determines if the specified module is a WinRT module
HRESULT DacDbiInterfaceImpl::IsWinRTModule(VMPTR_Module vmModule, BOOL& isWinRT)
{
DD_ENTER_MAY_THROW;
HRESULT hr = S_OK;
isWinRT = FALSE;
return hr;
}
// Determines the app domain id for the object refered to by a given VMPTR_OBJECTHANDLE
ULONG DacDbiInterfaceImpl::GetAppDomainIdFromVmObjectHandle(VMPTR_OBJECTHANDLE vmHandle)
{
DD_ENTER_MAY_THROW;
return DefaultADID;
}
// Get the target address from a VMPTR_OBJECTHANDLE, i.e., the handle address
CORDB_ADDRESS DacDbiInterfaceImpl::GetHandleAddressFromVmHandle(VMPTR_OBJECTHANDLE vmHandle)
{
DD_ENTER_MAY_THROW;
CORDB_ADDRESS handle = vmHandle.GetDacPtr();
return handle;
}
// Create a TargetBuffer which describes the location of the object
TargetBuffer DacDbiInterfaceImpl::GetObjectContents(VMPTR_Object vmObj)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = vmObj.GetDacPtr();
_ASSERTE(objPtr->GetSize() <= 0xffffffff);
return TargetBuffer(PTR_TO_TADDR(objPtr), (ULONG)objPtr->GetSize());
}
// ============================================================================
// functions to get information about objects referenced via an instance of CordbReferenceValue or
// CordbHandleValue
// ============================================================================
// DacDbiInterfaceImpl::FastSanityCheckObject
// Helper function for CheckRef. Sanity check an object.
// We use a fast and easy check to improve confidence that objPtr points to a valid object.
// We can't tell cheaply if this is really a valid object (that would require walking the GC heap), but at
// least we can check if we get an EEClass from the supposed method table and then get the method table from
// the class. If we can, we have improved the probability that the object is valid.
// Arguments:
// input: objPtr - address of the object we are checking
// Return Value: E_INVALIDARG or S_OK.
HRESULT DacDbiInterfaceImpl::FastSanityCheckObject(PTR_Object objPtr)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
HRESULT hr = S_OK;
EX_TRY
{
// NULL is certainly valid...
if (objPtr != NULL)
{
if (!objPtr->ValidateObjectWithPossibleAV())
{
LOG((LF_CORDB, LL_INFO10000, "GOI: object methodtable-class invariant doesn't hold.\n"));
hr = E_INVALIDARG;
}
}
}
EX_CATCH
{
LOG((LF_CORDB, LL_INFO10000, "GOI: exception indicated ref is bad.\n"));
hr = E_INVALIDARG;
}
EX_END_CATCH(SwallowAllExceptions);
return hr;
} // DacDbiInterfaceImpl::FastSanityCheckObject
// Perform a sanity check on an object address to determine if this _could be_ a valid object.
// We can't tell this for certain without walking the GC heap, but we do some fast tests to rule
// out clearly invalid object addresses. See code:DacDbiInterfaceImpl::FastSanityCheckObject for more
// details.
// Arguments:
// input: objPtr - address of the object we are checking
// Return Value:
// objRefBad - true iff we have determined the address cannot be pointing to a valid object.
// Note that a value of false doesn't necessarily guarantee the object is really
// valid
bool DacDbiInterfaceImpl::CheckRef(PTR_Object objPtr)
{
bool objRefBad = false;
// Shortcut null references now...
if (objPtr == NULL)
{
LOG((LF_CORDB, LL_INFO10000, "D::GOI: ref is NULL.\n"));
objRefBad = true;
}
else
{
// Try to verify the integrity of the object. This is not fool proof.
// @todo - this whole idea of expecting AVs is broken, but it does rule
// out a fair bit of rubbish. Find another
// way to test if the object is valid?
if (FAILED(FastSanityCheckObject(objPtr)))
{
LOG((LF_CORDB, LL_INFO10000, "D::GOI: address is not a valid object.\n"));
objRefBad = true;
}
}
return objRefBad;
} // DacDbiInterfaceImpl::CheckRef
// DacDbiInterfaceImpl::InitObjectData
// Initialize basic object information: type handle, object size, offset to fields and expanded type
// information.
// Arguments:
// input: objPtr - address of object of interest
// vmAppDomain - AppDomain for the type f the object
// output: pObjectData - object information
// Note: It is assumed that pObjectData is non-null.
void DacDbiInterfaceImpl::InitObjectData(PTR_Object objPtr,
VMPTR_AppDomain vmAppDomain,
DebuggerIPCE_ObjectData * pObjectData)
{
_ASSERTE(pObjectData != NULL);
// @todo - this is still dangerous because the object may still be invalid.
VMPTR_TypeHandle vmTypeHandle = VMPTR_TypeHandle::NullPtr();
vmTypeHandle.SetDacTargetPtr(objPtr->GetGCSafeTypeHandle().AsTAddr());
// Save basic object info.
pObjectData->objSize = objPtr->GetSize();
pObjectData->objOffsetToVars = dac_cast<TADDR>((objPtr)->GetData()) - dac_cast<TADDR>(objPtr);
TypeHandleToExpandedTypeInfo(AllBoxed, vmAppDomain, vmTypeHandle, &(pObjectData->objTypeData));
// If this is a string object, set the type to ELEMENT_TYPE_STRING.
if (objPtr->GetGCSafeMethodTable() == g_pStringClass)
{
pObjectData->objTypeData.elementType = ELEMENT_TYPE_STRING;
if(pObjectData->objSize < MIN_OBJECT_SIZE)
{
pObjectData->objSize = PtrAlign(pObjectData->objSize);
}
}
} // DacDbiInterfaceImpl::InitObjectData
// DAC/DBI API
// Get object information for a TypedByRef object (System.TypedReference).
// These are objects that contain a managed pointer to a location and the type of the value at that location.
// They are most commonly used for varargs but also may be used for parameters and locals. They are
// stack-allocated. They provide a means for adding dynamic type information to a value type, whereas boxing
// provides only static type information. This means they can be passed as reference parameters to
// polymorphic methods that don't statically restrict the type of arguments they can receive.
// Although they are represented simply as an address, unlike other object references, they don't point
// directly to the object. Instead, there is an extra level of indirection. The reference points to a struct
// that contains the address of the object, so we need to treat them differently. They have their own
// CorElementType (ELEMENT_TYPE_TYPEDBYREF) which makes it possible to identify this special case.
// Example:
// static int AddABunchOfInts (__arglist)
// {
// int result = 0;
//
// System.ArgIterator iter = new System.ArgIterator (__arglist);
// int argCount = iter.GetRemainingCount();
//
// for (int i = 0; i < argCount; i++)
// {
// System.TypedReference typedRef = iter.GetNextArg();
// result += (int)TypedReference.ToObject(typedRef);
// }
//
// return result;
// }
//
// static int Main (string[] args)
// {
// int result = AddABunchOfInts (__arglist (2, 3, 4));
// Console.WriteLine ("Answer: {0}", result);
//
// if (result != 9)
// return 1;
//
// return 0;
// }
// Initializes the objRef and typedByRefType fields of pObjectData (type info for the referent).
void DacDbiInterfaceImpl::GetTypedByRefInfo(CORDB_ADDRESS pTypedByRef,
VMPTR_AppDomain vmAppDomain,
DebuggerIPCE_ObjectData * pObjectData)
{
DD_ENTER_MAY_THROW;
// pTypedByRef is really the address of a TypedByRef struct rather than of a normal object.
// The data field of the TypedByRef struct is the actual object ref.
PTR_TypedByRef refAddr = PTR_TypedByRef(TADDR(pTypedByRef));
_ASSERTE(refAddr != NULL);
_ASSERTE(pObjectData != NULL);
// The type of the referent is in the type field of the TypedByRef. We need to initialize the object
// data type information.
TypeHandleToBasicTypeInfo(refAddr->type,
&(pObjectData->typedByrefInfo.typedByrefType),
vmAppDomain.GetDacPtr());
// The reference to the object is in the data field of the TypedByRef.
CORDB_ADDRESS tempRef = dac_cast<TADDR>(refAddr->data);
pObjectData->objRef = CORDB_ADDRESS_TO_PTR(tempRef);
LOG((LF_CORDB, LL_INFO10000, "D::GASOI: sending REFANY result: "
"ref=0x%08x, cls=0x%08x, mod=0x%p\n",
pObjectData->objRef,
pObjectData->typedByrefType.metadataToken,
pObjectData->typedByrefType.vmDomainAssembly.GetDacPtr()));
} // DacDbiInterfaceImpl::GetTypedByRefInfo
// Get the string data associated withn obj and put it into the pointers
// DAC/DBI API
// Get the string length and offset to string base for a string object
void DacDbiInterfaceImpl::GetStringData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = PTR_Object(TADDR(objectAddress));
LOG((LF_CORDB, LL_INFO10000, "D::GOI: The referent is a string.\n"));
if (objPtr->GetGCSafeMethodTable() != g_pStringClass)
{
ThrowHR(CORDBG_E_TARGET_INCONSISTENT);
}
PTR_StringObject pStrObj = dac_cast<PTR_StringObject>(objPtr);
_ASSERTE(pStrObj != NULL);
pObjectData->stringInfo.length = pStrObj->GetStringLength();
pObjectData->stringInfo.offsetToStringBase = (UINT_PTR) pStrObj->GetBufferOffset();
} // DacDbiInterfaceImpl::GetStringData
// DAC/DBI API
// Get information for an array type referent of an objRef, including rank, upper and lower
// bounds, element size and type, and the number of elements.
void DacDbiInterfaceImpl::GetArrayData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = PTR_Object(TADDR(objectAddress));
PTR_MethodTable pMT = objPtr->GetGCSafeMethodTable();
if (!objPtr->GetGCSafeTypeHandle().IsArray())
{
LOG((LF_CORDB, LL_INFO10000,
"D::GASOI: object should be an array.\n"));
pObjectData->objRefBad = true;
}
else
{
PTR_ArrayBase arrPtr = dac_cast<PTR_ArrayBase>(objPtr);
// this is also returned in the type information for the array - we return both for sanity checking...
pObjectData->arrayInfo.rank = arrPtr->GetRank();
pObjectData->arrayInfo.componentCount = arrPtr->GetNumComponents();
pObjectData->arrayInfo.offsetToArrayBase = arrPtr->GetDataPtrOffset(pMT);
if (arrPtr->IsMultiDimArray())
{
pObjectData->arrayInfo.offsetToUpperBounds = SIZE_T(arrPtr->GetBoundsOffset(pMT));
pObjectData->arrayInfo.offsetToLowerBounds = SIZE_T(arrPtr->GetLowerBoundsOffset(pMT));
}
else
{
pObjectData->arrayInfo.offsetToUpperBounds = 0;
pObjectData->arrayInfo.offsetToLowerBounds = 0;
}
pObjectData->arrayInfo.elementSize = arrPtr->GetComponentSize();
LOG((LF_CORDB, LL_INFO10000, "D::GOI: array info: "
"baseOff=%d, lowerOff=%d, upperOff=%d, cnt=%d, rank=%d, rank (2) = %d,"
"eleSize=%d, eleType=0x%02x\n",
pObjectData->arrayInfo.offsetToArrayBase,
pObjectData->arrayInfo.offsetToLowerBounds,
pObjectData->arrayInfo.offsetToUpperBounds,
pObjectData->arrayInfo.componentCount,
pObjectData->arrayInfo.rank,
pObjectData->objTypeData.ArrayTypeData.arrayRank,
pObjectData->arrayInfo.elementSize,
pObjectData->objTypeData.ArrayTypeData.arrayTypeArg.elementType));
}
} // DacDbiInterfaceImpl::GetArrayData
// DAC/DBI API: Get information about an object for which we have a reference, including the object size and
// type information.
void DacDbiInterfaceImpl::GetBasicObjectInfo(CORDB_ADDRESS objectAddress,
CorElementType type,
VMPTR_AppDomain vmAppDomain,
DebuggerIPCE_ObjectData * pObjectData)
{
DD_ENTER_MAY_THROW;
PTR_Object objPtr = PTR_Object(TADDR(objectAddress));
pObjectData->objRefBad = CheckRef(objPtr);
if (pObjectData->objRefBad != true)
{
// initialize object type, size, offset information. Note: We may have a different element type
// after this. For example, we may start with E_T_CLASS but return with something more specific.
InitObjectData (objPtr, vmAppDomain, pObjectData);
}
} // DacDbiInterfaceImpl::GetBasicObjectInfo
// This is the data passed to EnumerateBlockingObjectsCallback below
struct BlockingObjectUserDataWrapper
{
CALLBACK_DATA pUserData;
IDacDbiInterface::FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK fpCallback;
};
// The callback helper used by EnumerateBlockingObjects below, this
// callback in turn invokes the user's callback with the right arguments
void EnumerateBlockingObjectsCallback(PTR_DebugBlockingItem obj, VOID* pUserData)
{
BlockingObjectUserDataWrapper* wrapper = (BlockingObjectUserDataWrapper*)pUserData;
DacBlockingObject dacObj;
// init to an arbitrary value to avoid mac compiler error about unintialized use
// it will be correctly set in the switch and is never used with only this init here
dacObj.blockingReason = DacBlockReason_MonitorCriticalSection;
dacObj.vmBlockingObject.SetDacTargetPtr(dac_cast<TADDR>(OBJECTREFToObject(obj->pMonitor->GetOwningObject())));
dacObj.dwTimeout = obj->dwTimeout;
dacObj.vmAppDomain.SetDacTargetPtr(dac_cast<TADDR>(obj->pAppDomain));
switch(obj->type)
{
case DebugBlock_MonitorCriticalSection:
dacObj.blockingReason = DacBlockReason_MonitorCriticalSection;
break;
case DebugBlock_MonitorEvent:
dacObj.blockingReason = DacBlockReason_MonitorEvent;
break;
default:
_ASSERTE(!"obj->type has an invalid value");
return;
}
wrapper->fpCallback(dacObj, wrapper->pUserData);
}
// DAC/DBI API:
// Enumerate all monitors blocking a thread
void DacDbiInterfaceImpl::EnumerateBlockingObjects(VMPTR_Thread vmThread,
FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK fpCallback,
CALLBACK_DATA pUserData)
{
DD_ENTER_MAY_THROW;
Thread * pThread = vmThread.GetDacPtr();
_ASSERTE(pThread != NULL);
BlockingObjectUserDataWrapper wrapper;
wrapper.fpCallback = fpCallback;
wrapper.pUserData = pUserData;
pThread->DebugBlockingInfo.VisitBlockingItems((DebugBlockingItemVisitor)EnumerateBlockingObjectsCallback,
(VOID*)&wrapper);
}
// DAC/DBI API:
// Returns the thread which owns the monitor lock on an object and the acquisition count
MonitorLockInfo DacDbiInterfaceImpl::GetThreadOwningMonitorLock(VMPTR_Object vmObject)
{
DD_ENTER_MAY_THROW;
MonitorLockInfo info;
info.lockOwner = VMPTR_Thread::NullPtr();
info.acquisitionCount = 0;
Object* pObj = vmObject.GetDacPtr();
DWORD threadId;
DWORD acquisitionCount;
if(!pObj->GetHeader()->GetThreadOwningMonitorLock(&threadId, &acquisitionCount))
{
return info;
}
Thread *pThread = ThreadStore::GetThreadList(NULL);
while (pThread != NULL)
{
if(pThread->GetThreadId() == threadId)
{
info.lockOwner.SetDacTargetPtr(PTR_HOST_TO_TADDR(pThread));
info.acquisitionCount = acquisitionCount;
return info;
}
pThread = ThreadStore::GetThreadList(pThread);
}
_ASSERTE(!"A thread should have been found");
return info;
}
// The data passed to EnumerateThreadsCallback below
struct ThreadUserDataWrapper
{
CALLBACK_DATA pUserData;
IDacDbiInterface::FP_THREAD_ENUMERATION_CALLBACK fpCallback;
};
// The callback helper used for EnumerateMonitorEventWaitList below. This callback
// invokes the user's callback with the correct arguments.
void EnumerateThreadsCallback(PTR_Thread pThread, VOID* pUserData)
{
ThreadUserDataWrapper* wrapper = (ThreadUserDataWrapper*)pUserData;
VMPTR_Thread vmThread = VMPTR_Thread::NullPtr();
vmThread.SetDacTargetPtr(dac_cast<TADDR>(pThread));
wrapper->fpCallback(vmThread, wrapper->pUserData);
}
// DAC/DBI API:
// Enumerate all threads waiting on the monitor event for an object
void DacDbiInterfaceImpl::EnumerateMonitorEventWaitList(VMPTR_Object vmObject,
FP_THREAD_ENUMERATION_CALLBACK fpCallback,
CALLBACK_DATA pUserData)
{
DD_ENTER_MAY_THROW;
Object* pObj = vmObject.GetDacPtr();
SyncBlock* psb = pObj->PassiveGetSyncBlock();
// no sync block means no wait list
if(psb == NULL)
return;
ThreadUserDataWrapper wrapper;
wrapper.fpCallback = fpCallback;
wrapper.pUserData = pUserData;
ThreadQueue::EnumerateThreads(psb, (FP_TQ_THREAD_ENUMERATION_CALLBACK)EnumerateThreadsCallback, (VOID*) &wrapper);
}
bool DacDbiInterfaceImpl::AreGCStructuresValid()
{
return true;
}
HeapData::HeapData()
: YoungestGenPtr(0), YoungestGenLimit(0), Gen0Start(0), Gen0End(0), SegmentCount(0), Segments(0)
{
}
HeapData::~HeapData()
{
if (Segments)
delete [] Segments;
}
LinearReadCache::LinearReadCache()
: mCurrPageStart(0), mPageSize(0), mCurrPageSize(0), mPage(0)
{
SYSTEM_INFO si;
GetSystemInfo(&si);
mPageSize = si.dwPageSize;
mPage = new (nothrow) BYTE[mPageSize];
}
LinearReadCache::~LinearReadCache()
{
if (mPage)
delete [] mPage;
}
bool LinearReadCache::MoveToPage(CORDB_ADDRESS addr)
{
mCurrPageStart = addr - (addr % mPageSize);
HRESULT hr = g_dacImpl->m_pTarget->ReadVirtual(mCurrPageStart, mPage, mPageSize, &mCurrPageSize);
if (hr != S_OK)
{
mCurrPageStart = 0;
mCurrPageSize = 0;
return false;
}
return true;
}
CORDB_ADDRESS DacHeapWalker::HeapStart = 0;
CORDB_ADDRESS DacHeapWalker::HeapEnd = ~0;
DacHeapWalker::DacHeapWalker()
: mThreadCount(0), mAllocInfo(0), mHeapCount(0), mHeaps(0),
mCurrObj(0), mCurrSize(0), mCurrMT(0),
mCurrHeap(0), mCurrSeg(0), mStart((TADDR)HeapStart), mEnd((TADDR)HeapEnd)
{
}
DacHeapWalker::~DacHeapWalker()
{
if (mAllocInfo)
delete [] mAllocInfo;
if (mHeaps)
delete [] mHeaps;
}
SegmentData *DacHeapWalker::FindSegment(CORDB_ADDRESS obj)
{
for (size_t i = 0; i < mHeapCount; ++i)
for (size_t j = 0; j < mHeaps[i].SegmentCount; ++j)
if (mHeaps[i].Segments[j].Start <= obj && obj <= mHeaps[i].Segments[j].End)
return &mHeaps[i].Segments[j];
return NULL;
}
HRESULT DacHeapWalker::Next(CORDB_ADDRESS *pValue, CORDB_ADDRESS *pMT, ULONG64 *pSize)
{
if (!HasMoreObjects())
return E_FAIL;
if (pValue)
*pValue = mCurrObj;
if (pMT)
*pMT = (CORDB_ADDRESS)mCurrMT;
if (pSize)
*pSize = (ULONG64)mCurrSize;
HRESULT hr = MoveToNextObject();
return FAILED(hr) ? hr : S_OK;
}
HRESULT DacHeapWalker::MoveToNextObject()
{
do
{
// Move to the next object
mCurrObj += mCurrSize;
// Check to see if we are in the correct bounds.
bool isGen0 = IsRegionGCEnabled() ? (mHeaps[mCurrHeap].Segments[mCurrSeg].Generation == 0) :
(mHeaps[mCurrHeap].Gen0Start <= mCurrObj && mHeaps[mCurrHeap].Gen0End > mCurrObj);
if (isGen0)
CheckAllocAndSegmentRange();
// Check to see if we've moved off the end of a segment
if (mCurrObj >= mHeaps[mCurrHeap].Segments[mCurrSeg].End || mCurrObj > mEnd)
{
HRESULT hr = NextSegment();
if (FAILED(hr) || hr == S_FALSE)
return hr;
}
// Get the method table pointer
if (!mCache.ReadMT(mCurrObj, &mCurrMT))
return E_FAIL;
if (!GetSize(mCurrMT, mCurrSize))
return E_FAIL;
} while (mCurrObj < mStart);
_ASSERTE(mStart <= mCurrObj && mCurrObj <= mEnd);
return S_OK;
}
bool DacHeapWalker::GetSize(TADDR tMT, size_t &size)
{
// With heap corruption, it's entierly possible that the MethodTable
// we get is bad. This could cause exceptions, which we will catch
// and return false. This causes the heapwalker to move to the next
// segment.
bool ret = true;
EX_TRY
{
MethodTable *mt = PTR_MethodTable(tMT);
size_t cs = mt->GetComponentSize();
if (cs)
{
DWORD tmp = 0;
if (mCache.Read(mCurrObj+sizeof(TADDR), &tmp))
cs *= tmp;
else
ret = false;
}
size = mt->GetBaseSize() + cs;
// The size is not guaranteed to be aligned, we have to
// do that ourself.
if (mHeaps[mCurrHeap].Segments[mCurrSeg].Generation == 3
|| mHeaps[mCurrHeap].Segments[mCurrSeg].Generation == 4)
size = AlignLarge(size);
else
size = Align(size);
}
EX_CATCH
{
ret = false;
}
EX_END_CATCH(SwallowAllExceptions)
return ret;
}
HRESULT DacHeapWalker::NextSegment()
{
mCurrObj = 0;
mCurrMT = 0;
mCurrSize = 0;
do
{
do
{
mCurrSeg++;
while (mCurrSeg >= mHeaps[mCurrHeap].SegmentCount)
{
mCurrSeg = 0;
mCurrHeap++;
if (mCurrHeap >= mHeapCount)
{
return S_FALSE;
}
}
} while (mHeaps[mCurrHeap].Segments[mCurrSeg].Start >= mHeaps[mCurrHeap].Segments[mCurrSeg].End);
mCurrObj = mHeaps[mCurrHeap].Segments[mCurrSeg].Start;
bool isGen0 = IsRegionGCEnabled() ? (mHeaps[mCurrHeap].Segments[mCurrSeg].Generation == 0) :
(mHeaps[mCurrHeap].Gen0Start <= mCurrObj && mHeaps[mCurrHeap].Gen0End > mCurrObj);
if (isGen0)
CheckAllocAndSegmentRange();
if (!mCache.ReadMT(mCurrObj, &mCurrMT))
{
return E_FAIL;
}
if (!GetSize(mCurrMT, mCurrSize))
{
return E_FAIL;
}
} while((mHeaps[mCurrHeap].Segments[mCurrSeg].Start > mEnd) || (mHeaps[mCurrHeap].Segments[mCurrSeg].End < mStart));
return S_OK;
}
void DacHeapWalker::CheckAllocAndSegmentRange()
{
const size_t MinObjSize = sizeof(TADDR)*3;
for (int i = 0; i < mThreadCount; ++i)
if (mCurrObj == mAllocInfo[i].Ptr)
{
mCurrObj = mAllocInfo[i].Limit + Align(MinObjSize);
break;
}
if (mCurrObj == mHeaps[mCurrHeap].YoungestGenPtr)
{
mCurrObj = mHeaps[mCurrHeap].YoungestGenLimit + Align(MinObjSize);
}
}
HRESULT DacHeapWalker::Init(CORDB_ADDRESS start, CORDB_ADDRESS end)
{
// Collect information about the allocation contexts in the process.
ThreadStore* threadStore = ThreadStore::s_pThreadStore;
if (threadStore != NULL)
{
int count = (int)threadStore->ThreadCountInEE();
mAllocInfo = new (nothrow) AllocInfo[count];
if (mAllocInfo == NULL)
return E_OUTOFMEMORY;
Thread *thread = NULL;
int j = 0;
for (int i = 0; i < count; ++i)
{
// The thread or allocation context being null is troubling, but not fatal.
// We may have stopped the process where the thread list or thread's alloc
// context was in an inconsistent state. We will simply skip over affected
// segments during the heap walk if we encounter problems due to this.
thread = ThreadStore::GetThreadList(thread);
if (thread == NULL)
continue;
gc_alloc_context *ctx = thread->GetAllocContext();
if (ctx == NULL)
continue;
if ((CORDB_ADDRESS)ctx->alloc_ptr != NULL)
{
mAllocInfo[j].Ptr = (CORDB_ADDRESS)ctx->alloc_ptr;
mAllocInfo[j].Limit = (CORDB_ADDRESS)ctx->alloc_limit;
j++;
}
}
mThreadCount = j;
}
#ifdef FEATURE_SVR_GC
HRESULT hr = GCHeapUtilities::IsServerHeap() ? InitHeapDataSvr(mHeaps, mHeapCount) : InitHeapDataWks(mHeaps, mHeapCount);
#else
HRESULT hr = InitHeapDataWks(mHeaps, mHeapCount);
#endif
// Set up mCurrObj/mCurrMT.
if (SUCCEEDED(hr))
hr = Reset(start, end);
// Collect information about GC heaps
return hr;
}
HRESULT DacHeapWalker::Reset(CORDB_ADDRESS start, CORDB_ADDRESS end)
{
_ASSERTE(mHeaps);
_ASSERTE(mHeapCount > 0);
_ASSERTE(mHeaps[0].Segments);
_ASSERTE(mHeaps[0].SegmentCount > 0);
mStart = start;
mEnd = end;
// Set up first object
mCurrObj = mHeaps[0].Segments[0].Start;
mCurrMT = 0;
mCurrSize = 0;
mCurrHeap = 0;
mCurrSeg = 0;
HRESULT hr = S_OK;
// it's possible the first segment is empty
if (mCurrObj >= mHeaps[0].Segments[0].End)
hr = MoveToNextObject();
if (!mCache.ReadMT(mCurrObj, &mCurrMT))
return E_FAIL;
if (!GetSize(mCurrMT, mCurrSize))
return E_FAIL;
if (mCurrObj < mStart || mCurrObj > mEnd)
hr = MoveToNextObject();
return hr;
}
HRESULT DacHeapWalker::ListNearObjects(CORDB_ADDRESS obj, CORDB_ADDRESS *pPrev, CORDB_ADDRESS *pContaining, CORDB_ADDRESS *pNext)
{
SegmentData *seg = FindSegment(obj);
if (seg == NULL)
return E_FAIL;
HRESULT hr = Reset(seg->Start, seg->End);
if (SUCCEEDED(hr))
{
CORDB_ADDRESS prev = 0;
CORDB_ADDRESS curr = 0;
ULONG64 size = 0;
bool found = false;
while (!found && HasMoreObjects())
{
prev = curr;
hr = Next(&curr, NULL, &size);
if (FAILED(hr))
break;
if (obj >= curr && obj < curr + size)
found = true;
}
if (found)
{
if (pPrev)
*pPrev = prev;
if (pContaining)
*pContaining = curr;
if (pNext)
{
if (HasMoreObjects())
{
hr = Next(&curr, NULL, NULL);
if (SUCCEEDED(hr))
*pNext = curr;
}
else
{
*pNext = 0;
}
}
hr = S_OK;
}
else if (SUCCEEDED(hr))
{
hr = E_FAIL;
}
}
return hr;
}
HRESULT DacHeapWalker::InitHeapDataWks(HeapData *&pHeaps, size_t &pCount)
{
bool regions = IsRegionGCEnabled();
// Scrape basic heap details
pCount = 1;
pHeaps = new (nothrow) HeapData[1];
if (pHeaps == NULL)
return E_OUTOFMEMORY;
dac_generation gen0 = GenerationTableIndex(g_gcDacGlobals->generation_table, 0);
dac_generation gen1 = GenerationTableIndex(g_gcDacGlobals->generation_table, 1);
dac_generation gen2 = GenerationTableIndex(g_gcDacGlobals->generation_table, 2);
dac_generation loh = GenerationTableIndex(g_gcDacGlobals->generation_table, 3);
dac_generation poh = GenerationTableIndex(g_gcDacGlobals->generation_table, 4);
pHeaps[0].YoungestGenPtr = (CORDB_ADDRESS)gen0.allocation_context.alloc_ptr;
pHeaps[0].YoungestGenLimit = (CORDB_ADDRESS)gen0.allocation_context.alloc_limit;
if (!regions)
{
pHeaps[0].Gen0Start = (CORDB_ADDRESS)gen0.allocation_start;
pHeaps[0].Gen0End = (CORDB_ADDRESS)*g_gcDacGlobals->alloc_allocated;
pHeaps[0].Gen1Start = (CORDB_ADDRESS)gen1.allocation_start;
}
// Segments
int count = GetSegmentCount(loh.start_segment);
count += GetSegmentCount(poh.start_segment);
count += GetSegmentCount(gen2.start_segment);
if (regions)
{
count += GetSegmentCount(gen1.start_segment);
count += GetSegmentCount(gen0.start_segment);
}
pHeaps[0].SegmentCount = count;
pHeaps[0].Segments = new (nothrow) SegmentData[count];
if (pHeaps[0].Segments == NULL)
return E_OUTOFMEMORY;
DPTR(dac_heap_segment) seg;
int i = 0;
// Small object heap segments
if (regions)
{
seg = gen2.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Generation = 2;
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
seg = seg->next;
}
seg = gen1.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Generation = 1;
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
seg = seg->next;
}
seg = gen0.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
if (seg.GetAddr() == (TADDR)*g_gcDacGlobals->ephemeral_heap_segment)
{
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)*g_gcDacGlobals->alloc_allocated;
pHeaps[0].EphemeralSegment = i;
}
else
{
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
}
pHeaps[0].Segments[i].Generation = 0;
seg = seg->next;
}
}
else
{
DPTR(dac_heap_segment) seg = gen2.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
if (seg.GetAddr() == (TADDR)*g_gcDacGlobals->ephemeral_heap_segment)
{
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)*g_gcDacGlobals->alloc_allocated;
pHeaps[0].Segments[i].Generation = 1;
pHeaps[0].EphemeralSegment = i;
}
else
{
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
pHeaps[0].Segments[i].Generation = 2;
}
seg = seg->next;
}
}
// Large object heap segments
seg = loh.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Generation = 3;
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
seg = seg->next;
}
// Pinned object heap segments
seg = poh.start_segment;
for (; seg && (i < count); ++i)
{
pHeaps[0].Segments[i].Generation = 4;
pHeaps[0].Segments[i].Start = (CORDB_ADDRESS)seg->mem;
pHeaps[0].Segments[i].End = (CORDB_ADDRESS)seg->allocated;
seg = seg->next;
}
_ASSERTE(count == i);
return S_OK;
}
HRESULT DacDbiInterfaceImpl::CreateHeapWalk(IDacDbiInterface::HeapWalkHandle *pHandle)
{
DD_ENTER_MAY_THROW;
DacHeapWalker *data = new (nothrow) DacHeapWalker;
if (data == NULL)
return E_OUTOFMEMORY;
HRESULT hr = data->Init();
if (SUCCEEDED(hr))
*pHandle = reinterpret_cast<HeapWalkHandle>(data);
else
delete data;
return hr;
}
void DacDbiInterfaceImpl::DeleteHeapWalk(HeapWalkHandle handle)
{
DD_ENTER_MAY_THROW;
DacHeapWalker *data = reinterpret_cast<DacHeapWalker*>(handle);
if (data)
delete data;
}
HRESULT DacDbiInterfaceImpl::WalkHeap(HeapWalkHandle handle,
ULONG count,
OUT COR_HEAPOBJECT * objects,
OUT ULONG *fetched)
{
DD_ENTER_MAY_THROW;
if (fetched == NULL)
return E_INVALIDARG;
DacHeapWalker *walk = reinterpret_cast<DacHeapWalker*>(handle);
*fetched = 0;
if (!walk->HasMoreObjects())
return S_FALSE;
CORDB_ADDRESS freeMT = (CORDB_ADDRESS)g_pFreeObjectMethodTable.GetAddr();
HRESULT hr = S_OK;
CORDB_ADDRESS addr, mt;
ULONG64 size;
ULONG i = 0;
while (i < count && walk->HasMoreObjects())
{
hr = walk->Next(&addr, &mt, &size);
if (FAILED(hr))
break;
if (mt != freeMT)
{
objects[i].address = addr;
objects[i].type.token1 = mt;
objects[i].type.token2 = NULL;
objects[i].size = size;
i++;
}
}
if (SUCCEEDED(hr))
hr = (i < count) ? S_FALSE : S_OK;
*fetched = i;
return hr;
}
HRESULT DacDbiInterfaceImpl::GetHeapSegments(OUT DacDbiArrayList<COR_SEGMENT> *pSegments)
{
DD_ENTER_MAY_THROW;
size_t heapCount = 0;
HeapData *heaps = 0;
bool region = IsRegionGCEnabled();
#ifdef FEATURE_SVR_GC
HRESULT hr = GCHeapUtilities::IsServerHeap() ? DacHeapWalker::InitHeapDataSvr(heaps, heapCount) : DacHeapWalker::InitHeapDataWks(heaps, heapCount);
#else
HRESULT hr = DacHeapWalker::InitHeapDataWks(heaps, heapCount);
#endif
NewArrayHolder<HeapData> _heapHolder = heaps;
// Count the number of segments to know how much to allocate.
int total = 0;
for (size_t i = 0; i < heapCount; ++i)
{
total += (int)heaps[i].SegmentCount;
if (!region)
{
// SegmentCount is +1 due to the ephemeral segment containing more than one
// generation (Gen1 + Gen0, and sometimes part of Gen2).
total++;
// It's possible that part of Gen2 lives on the ephemeral segment. If so,
// we need to add one more to the output.
const size_t eph = heaps[i].EphemeralSegment;
_ASSERTE(eph < heaps[i].SegmentCount);
if (heaps[i].Segments[eph].Start != heaps[i].Gen1Start)
total++;
}
}
pSegments->Alloc(total);
// Now walk all segments and write them to the array.
int curr = 0;
for (size_t i = 0; i < heapCount; ++i)
{
_ASSERTE(curr < total);
if (!region)
{
// Generation 0 is not in the segment list.
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Gen0Start;
seg.end = heaps[i].Gen0End;
seg.type = CorDebug_Gen0;
seg.heap = (ULONG)i;
}
for (size_t j = 0; j < heaps[i].SegmentCount; ++j)
{
if (region)
{
_ASSERTE(curr < total);
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Segments[j].Start;
seg.end = heaps[i].Segments[j].End;
seg.type = (CorDebugGenerationTypes)heaps[i].Segments[j].Generation;
seg.heap = (ULONG)i;
}
else if (heaps[i].Segments[j].Generation == 1)
{
// This is the ephemeral segment. We have already written Gen0,
// now write Gen1.
_ASSERTE(heaps[i].Segments[j].Start <= heaps[i].Gen1Start);
_ASSERTE(heaps[i].Segments[j].End > heaps[i].Gen1Start);
{
_ASSERTE(curr < total);
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Gen1Start;
seg.end = heaps[i].Gen0Start;
seg.type = CorDebug_Gen1;
seg.heap = (ULONG)i;
}
// It's possible for Gen2 to take up a portion of the ephemeral segment.
// We test for that here.
if (heaps[i].Segments[j].Start != heaps[i].Gen1Start)
{
_ASSERTE(curr < total);
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Segments[j].Start;
seg.end = heaps[i].Gen1Start;
seg.type = CorDebug_Gen2;
seg.heap = (ULONG)i;
}
}
else
{
// Otherwise, we have a gen2 or gen3 (LOH) segment
_ASSERTE(curr < total);
COR_SEGMENT &seg = (*pSegments)[curr++];
seg.start = heaps[i].Segments[j].Start;
seg.end = heaps[i].Segments[j].End;
_ASSERTE(heaps[i].Segments[j].Generation <= CorDebug_POH);
seg.type = (CorDebugGenerationTypes)heaps[i].Segments[j].Generation;
seg.heap = (ULONG)i;
}
}
}
_ASSERTE(total == curr);
return hr;
}
bool DacDbiInterfaceImpl::IsValidObject(CORDB_ADDRESS addr)
{
DD_ENTER_MAY_THROW;
bool isValid = false;
if (addr != 0 && addr != (CORDB_ADDRESS)-1)
{
EX_TRY
{
PTR_Object obj(TO_TADDR(addr));
PTR_MethodTable mt = obj->GetMethodTable();
PTR_EEClass cls = mt->GetClass();
if (mt == cls->GetMethodTable())
isValid = true;
else if (!mt->IsCanonicalMethodTable())
isValid = cls->GetMethodTable()->GetClass() == cls;
}
EX_CATCH
{
isValid = false;
}
EX_END_CATCH(SwallowAllExceptions)
}
return isValid;
}
bool DacDbiInterfaceImpl::GetAppDomainForObject(CORDB_ADDRESS addr, OUT VMPTR_AppDomain * pAppDomain,
OUT VMPTR_Module *pModule, OUT VMPTR_DomainAssembly *pDomainAssembly)
{
DD_ENTER_MAY_THROW;
if (addr == 0 || addr == (CORDB_ADDRESS)-1)
{
return false;
}
PTR_Object obj(TO_TADDR(addr));
MethodTable *mt = obj->GetMethodTable();
PTR_Module module = mt->GetModule();
PTR_Assembly assembly = module->GetAssembly();
BaseDomain *baseDomain = assembly->GetDomain();
if (baseDomain->IsAppDomain())
{
pAppDomain->SetDacTargetPtr(PTR_HOST_TO_TADDR(baseDomain->AsAppDomain()));
pModule->SetDacTargetPtr(PTR_HOST_TO_TADDR(module));
pDomainAssembly->SetDacTargetPtr(PTR_HOST_TO_TADDR(module->GetDomainAssembly()));
}
else
{
return false;
}
return true;
}
HRESULT DacDbiInterfaceImpl::CreateRefWalk(OUT RefWalkHandle * pHandle, BOOL walkStacks, BOOL walkFQ, UINT32 handleWalkMask)
{
DD_ENTER_MAY_THROW;
DacRefWalker *walker = new (nothrow) DacRefWalker(this, walkStacks, walkFQ, handleWalkMask);
if (walker == NULL)
return E_OUTOFMEMORY;
HRESULT hr = walker->Init();
if (FAILED(hr))
{
delete walker;
}
else
{
*pHandle = reinterpret_cast<RefWalkHandle>(walker);
}
return hr;
}
void DacDbiInterfaceImpl::DeleteRefWalk(IN RefWalkHandle handle)
{
DD_ENTER_MAY_THROW;
DacRefWalker *walker = reinterpret_cast<DacRefWalker*>(handle);
if (walker)
delete walker;
}
HRESULT DacDbiInterfaceImpl::WalkRefs(RefWalkHandle handle, ULONG count, OUT DacGcReference * objects, OUT ULONG *pFetched)
{
if (objects == NULL || pFetched == NULL)
return E_POINTER;
DD_ENTER_MAY_THROW;
DacRefWalker *walker = reinterpret_cast<DacRefWalker*>(handle);
if (!walker)
return E_INVALIDARG;
return walker->Next(count, objects, pFetched);
}
HRESULT DacDbiInterfaceImpl::GetTypeID(CORDB_ADDRESS dbgObj, COR_TYPEID *pID)
{
DD_ENTER_MAY_THROW;
TADDR obj[3];
ULONG32 read = 0;
HRESULT hr = g_dacImpl->m_pTarget->ReadVirtual(dbgObj, (BYTE*)obj, sizeof(obj), &read);
if (FAILED(hr))
return hr;
pID->token1 = (UINT64)(obj[0] & ~1);
pID->token2 = 0;
return hr;
}
HRESULT DacDbiInterfaceImpl::GetTypeIDForType(VMPTR_TypeHandle vmTypeHandle, COR_TYPEID *pID)
{
DD_ENTER_MAY_THROW;
_ASSERTE(pID != NULL);
_ASSERTE(!vmTypeHandle.IsNull());
TypeHandle th = TypeHandle::FromPtr(vmTypeHandle.GetDacPtr());
PTR_MethodTable pMT = th.GetMethodTable();
pID->token1 = pMT.GetAddr();
_ASSERTE(pID->token1 != 0);
pID->token2 = 0;
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetObjectFields(COR_TYPEID id, ULONG32 celt, COR_FIELD *layout, ULONG32 *pceltFetched)
{
if (layout == NULL || pceltFetched == NULL)
return E_POINTER;
if (id.token1 == 0)
return CORDBG_E_CLASS_NOT_LOADED;
DD_ENTER_MAY_THROW;
HRESULT hr = S_OK;
TypeHandle typeHandle = TypeHandle::FromPtr(TO_TADDR(id.token1));
if (typeHandle.IsTypeDesc())
return E_INVALIDARG;
ApproxFieldDescIterator fieldDescIterator(typeHandle.AsMethodTable(), ApproxFieldDescIterator::INSTANCE_FIELDS);
ULONG32 cFields = fieldDescIterator.Count();
// Handle case where user only wanted to know the number of fields.
if (layout == NULL)
{
*pceltFetched = cFields;
return S_FALSE;
}
if (celt < cFields)
{
cFields = celt;
// we are returning less than the total
hr = S_FALSE;
}
// This must be non-null due to check at beginning of function.
*pceltFetched = celt;
CorElementType componentType = typeHandle.AsMethodTable()->GetInternalCorElementType();
BOOL fReferenceType = CorTypeInfo::IsObjRef_NoThrow(componentType);
for (ULONG32 i = 0; i < cFields; ++i)
{
FieldDesc *pField = fieldDescIterator.Next();
COR_FIELD* corField = layout + i;
corField->token = pField->GetMemberDef();
corField->offset = (ULONG32)pField->GetOffset() + (fReferenceType ? Object::GetOffsetOfFirstField() : 0);
TypeHandle fieldHandle = pField->LookupFieldTypeHandle();
if (fieldHandle.IsNull())
{
corField->id = {};
corField->fieldType = (CorElementType)0;
}
else if (fieldHandle.IsByRef())
{
corField->fieldType = ELEMENT_TYPE_BYREF;
// All ByRefs intentionally return IntPtr's MethodTable.
corField->id.token1 = CoreLibBinder::GetElementType(ELEMENT_TYPE_I).GetAddr();
corField->id.token2 = 0;
}
else
{
// Note that pointer types are handled in this path.
// IntPtr's MethodTable is set for all pointer types and is expected.
PTR_MethodTable mt = fieldHandle.GetMethodTable();
corField->fieldType = mt->GetInternalCorElementType();
corField->id.token1 = (ULONG64)mt.GetAddr();
corField->id.token2 = 0;
}
}
return hr;
}
HRESULT DacDbiInterfaceImpl::GetTypeLayout(COR_TYPEID id, COR_TYPE_LAYOUT *pLayout)
{
if (pLayout == NULL)
return E_POINTER;
if (id.token1 == 0)
return CORDBG_E_CLASS_NOT_LOADED;
DD_ENTER_MAY_THROW;
PTR_MethodTable mt = PTR_MethodTable(TO_TADDR(id.token1));
PTR_MethodTable parentMT = mt->GetParentMethodTable();
COR_TYPEID parent = {parentMT.GetAddr(), 0};
pLayout->parentID = parent;
DWORD size = mt->GetBaseSize();
ApproxFieldDescIterator fieldDescIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS);
pLayout->objectSize = size;
pLayout->numFields = fieldDescIterator.Count();
// Get type
CorElementType componentType = mt->IsString() ? ELEMENT_TYPE_STRING : mt->GetInternalCorElementType();
pLayout->type = componentType;
pLayout->boxOffset = CorTypeInfo::IsObjRef_NoThrow(componentType) ? 0 : sizeof(TADDR);
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetArrayLayout(COR_TYPEID id, COR_ARRAY_LAYOUT *pLayout)
{
if (pLayout == NULL)
return E_POINTER;
if (id.token1 == 0)
return CORDBG_E_CLASS_NOT_LOADED;
DD_ENTER_MAY_THROW;
PTR_MethodTable mt = PTR_MethodTable(TO_TADDR(id.token1));
if (!mt->IsStringOrArray())
return E_INVALIDARG;
if (mt->IsString())
{
COR_TYPEID token;
token.token1 = CoreLibBinder::GetElementType(ELEMENT_TYPE_CHAR).GetAddr();
token.token2 = 0;
pLayout->componentID = token;
pLayout->rankSize = 4;
pLayout->numRanks = 1;
pLayout->rankOffset = sizeof(TADDR);
pLayout->firstElementOffset = sizeof(TADDR) + 4;
pLayout->countOffset = sizeof(TADDR);
pLayout->componentType = ELEMENT_TYPE_CHAR;
pLayout->elementSize = 2;
}
else
{
DWORD ranks = mt->GetRank();
pLayout->rankSize = 4;
pLayout->numRanks = ranks;
bool multiDim = (ranks > 1);
pLayout->rankOffset = multiDim ? sizeof(TADDR)*2 : sizeof(TADDR);
pLayout->countOffset = sizeof(TADDR);
pLayout->firstElementOffset = ArrayBase::GetDataPtrOffset(mt);
TypeHandle hnd = mt->GetArrayElementTypeHandle();
PTR_MethodTable cmt = hnd.GetMethodTable();
CorElementType componentType = cmt->GetInternalCorElementType();
if ((UINT64)cmt.GetAddr() == (UINT64)g_pStringClass.GetAddr())
componentType = ELEMENT_TYPE_STRING;
COR_TYPEID token;
token.token1 = cmt.GetAddr(); // This could be type handle
token.token2 = 0;
pLayout->componentID = token;
pLayout->componentType = componentType;
if (CorTypeInfo::IsObjRef_NoThrow(componentType))
pLayout->elementSize = sizeof(TADDR);
else if (CorIsPrimitiveType(componentType))
pLayout->elementSize = gElementTypeInfo[componentType].m_cbSize;
else
pLayout->elementSize = cmt->GetNumInstanceFieldBytes();
}
return S_OK;
}
void DacDbiInterfaceImpl::GetGCHeapInformation(COR_HEAPINFO * pHeapInfo)
{
DD_ENTER_MAY_THROW;
size_t heapCount = 0;
pHeapInfo->areGCStructuresValid = *g_gcDacGlobals->gc_structures_invalid_cnt == 0;
#ifdef FEATURE_SVR_GC
if (GCHeapUtilities::IsServerHeap())
{
pHeapInfo->gcType = CorDebugServerGC;
pHeapInfo->numHeaps = DacGetNumHeaps();
}
else
#endif
{
pHeapInfo->gcType = CorDebugWorkstationGC;
pHeapInfo->numHeaps = 1;
}
pHeapInfo->pointerSize = sizeof(TADDR);
pHeapInfo->concurrent = g_pConfig->GetGCconcurrent() ? TRUE : FALSE;
}
HRESULT DacDbiInterfaceImpl::GetPEFileMDInternalRW(VMPTR_PEAssembly vmPEAssembly, OUT TADDR* pAddrMDInternalRW)
{
DD_ENTER_MAY_THROW;
if (pAddrMDInternalRW == NULL)
return E_INVALIDARG;
PEAssembly * pPEAssembly = vmPEAssembly.GetDacPtr();
*pAddrMDInternalRW = pPEAssembly->GetMDInternalRWAddress();
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetReJitInfo(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ReJitInfo* pvmReJitInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!"You shouldn't be calling this - use GetActiveRejitILCodeVersionNode instead");
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetActiveRejitILCodeVersionNode(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ILCodeVersionNode* pVmILCodeVersionNode)
{
DD_ENTER_MAY_THROW;
if (pVmILCodeVersionNode == NULL)
return E_INVALIDARG;
#ifdef FEATURE_REJIT
PTR_Module pModule = vmModule.GetDacPtr();
CodeVersionManager * pCodeVersionManager = pModule->GetCodeVersionManager();
// Be careful, there are two different definitions of 'active' being used here
// For the CodeVersionManager, the active IL version is whatever one should be used in the next invocation of the method
// 'rejit active' narrows that to only include rejit IL bodies where the profiler has already provided the definition
// for the new IL (ilCodeVersion.GetRejitState()==ILCodeVersion::kStateActive). It is possible that the code version
// manager's active IL version hasn't yet asked the profiler for the IL body to use, in which case we want to filter it
// out from the return in this method.
ILCodeVersion activeILVersion = pCodeVersionManager->GetActiveILCodeVersion(pModule, methodTk);
if (activeILVersion.IsNull() || activeILVersion.IsDefaultVersion() || activeILVersion.GetRejitState() != ILCodeVersion::kStateActive)
{
pVmILCodeVersionNode->SetDacTargetPtr(0);
}
else
{
pVmILCodeVersionNode->SetDacTargetPtr(PTR_TO_TADDR(activeILVersion.AsNode()));
}
#else
_ASSERTE(!"You shouldn't be calling this - rejit is not supported in this build");
pVmILCodeVersionNode->SetDacTargetPtr(0);
#endif
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetReJitInfo(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_ReJitInfo* pvmReJitInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!"You shouldn't be calling this - use GetNativeCodeVersionNode instead");
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetNativeCodeVersionNode(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_NativeCodeVersionNode* pVmNativeCodeVersionNode)
{
DD_ENTER_MAY_THROW;
if (pVmNativeCodeVersionNode == NULL)
return E_INVALIDARG;
#ifdef FEATURE_REJIT
PTR_MethodDesc pMD = vmMethod.GetDacPtr();
CodeVersionManager * pCodeVersionManager = pMD->GetCodeVersionManager();
NativeCodeVersion codeVersion = pCodeVersionManager->GetNativeCodeVersion(pMD, (PCODE)codeStartAddress);
pVmNativeCodeVersionNode->SetDacTargetPtr(PTR_TO_TADDR(codeVersion.AsNode()));
#else
pVmNativeCodeVersionNode->SetDacTargetPtr(0);
#endif
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetSharedReJitInfo(VMPTR_ReJitInfo vmReJitInfo, OUT VMPTR_SharedReJitInfo* pvmSharedReJitInfo)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!"You shouldn't be calling this - use GetLCodeVersionNode instead");
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetILCodeVersionNode(VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode, VMPTR_ILCodeVersionNode* pVmILCodeVersionNode)
{
DD_ENTER_MAY_THROW;
if (pVmILCodeVersionNode == NULL)
return E_INVALIDARG;
#ifdef FEATURE_REJIT
NativeCodeVersionNode* pNativeCodeVersionNode = vmNativeCodeVersionNode.GetDacPtr();
ILCodeVersion ilCodeVersion = pNativeCodeVersionNode->GetILCodeVersion();
if (ilCodeVersion.IsDefaultVersion())
{
pVmILCodeVersionNode->SetDacTargetPtr(0);
}
else
{
pVmILCodeVersionNode->SetDacTargetPtr(PTR_TO_TADDR(ilCodeVersion.AsNode()));
}
#else
_ASSERTE(!"You shouldn't be calling this - rejit is not supported in this build");
pVmILCodeVersionNode->SetDacTargetPtr(0);
#endif
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetSharedReJitInfoData(VMPTR_SharedReJitInfo vmSharedReJitInfo, DacSharedReJitInfo* pData)
{
DD_ENTER_MAY_THROW;
_ASSERTE(!"You shouldn't be calling this - use GetILCodeVersionNodeData instead");
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetILCodeVersionNodeData(VMPTR_ILCodeVersionNode vmILCodeVersionNode, DacSharedReJitInfo* pData)
{
DD_ENTER_MAY_THROW;
#ifdef FEATURE_REJIT
ILCodeVersion ilCode(vmILCodeVersionNode.GetDacPtr());
pData->m_state = ilCode.GetRejitState();
pData->m_pbIL = PTR_TO_CORDB_ADDRESS(dac_cast<ULONG_PTR>(ilCode.GetIL()));
pData->m_dwCodegenFlags = ilCode.GetJitFlags();
const InstrumentedILOffsetMapping* pMapping = ilCode.GetInstrumentedILMap();
if (pMapping)
{
pData->m_cInstrumentedMapEntries = (ULONG)pMapping->GetCount();
pData->m_rgInstrumentedMapEntries = PTR_TO_CORDB_ADDRESS(dac_cast<ULONG_PTR>(pMapping->GetOffsets()));
}
else
{
pData->m_cInstrumentedMapEntries = 0;
pData->m_rgInstrumentedMapEntries = 0;
}
#else
_ASSERTE(!"You shouldn't be calling this - rejit isn't supported in this build");
#endif
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetDefinesBitField(ULONG32 *pDefines)
{
DD_ENTER_MAY_THROW;
if (pDefines == NULL)
return E_INVALIDARG;
*pDefines = g_pDebugger->m_defines;
return S_OK;
}
HRESULT DacDbiInterfaceImpl::GetMDStructuresVersion(ULONG32* pMDStructuresVersion)
{
DD_ENTER_MAY_THROW;
if (pMDStructuresVersion == NULL)
return E_INVALIDARG;
*pMDStructuresVersion = g_pDebugger->m_mdDataStructureVersion;
return S_OK;
}
HRESULT DacDbiInterfaceImpl::EnableGCNotificationEvents(BOOL fEnable)
{
DD_ENTER_MAY_THROW
HRESULT hr = S_OK;
EX_TRY
{
if (g_pDebugger != NULL)
{
TADDR addr = PTR_HOST_MEMBER_TADDR(Debugger, g_pDebugger, m_isGarbageCollectionEventsEnabled);
SafeWriteStructOrThrow<BOOL>(addr, &fEnable);
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
DacRefWalker::DacRefWalker(ClrDataAccess *dac, BOOL walkStacks, BOOL walkFQ, UINT32 handleMask)
: mDac(dac), mWalkStacks(walkStacks), mWalkFQ(walkFQ), mHandleMask(handleMask), mStackWalker(NULL),
mHandleWalker(NULL), mFQStart(PTR_NULL), mFQEnd(PTR_NULL), mFQCurr(PTR_NULL)
{
}
DacRefWalker::~DacRefWalker()
{
Clear();
}
HRESULT DacRefWalker::Init()
{
HRESULT hr = S_OK;
if (mHandleMask)
{
// Will throw on OOM, which is fine.
mHandleWalker = new DacHandleWalker();
hr = mHandleWalker->Init(GetHandleWalkerMask());
}
if (mWalkStacks && SUCCEEDED(hr))
{
hr = NextThread();
}
return hr;
}
void DacRefWalker::Clear()
{
if (mHandleWalker)
{
delete mHandleWalker;
mHandleWalker = NULL;
}
if (mStackWalker)
{
delete mStackWalker;
mStackWalker = NULL;
}
}
UINT32 DacRefWalker::GetHandleWalkerMask()
{
UINT32 result = 0;
if (mHandleMask & CorHandleStrong)
result |= (1 << HNDTYPE_STRONG);
if (mHandleMask & CorHandleStrongPinning)
result |= (1 << HNDTYPE_PINNED);
if (mHandleMask & CorHandleWeakShort)
result |= (1 << HNDTYPE_WEAK_SHORT);
if (mHandleMask & CorHandleWeakLong)
result |= (1 << HNDTYPE_WEAK_LONG);
#if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL)
if ((mHandleMask & CorHandleWeakRefCount) || (mHandleMask & CorHandleStrongRefCount))
result |= (1 << HNDTYPE_REFCOUNTED);
#endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL
#if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS)
if (mHandleMask & CorHandleWeakNativeCom)
result |= (1 << HNDTYPE_WEAK_NATIVE_COM);
#endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS
if (mHandleMask & CorHandleStrongDependent)
result |= (1 << HNDTYPE_DEPENDENT);
if (mHandleMask & CorHandleStrongAsyncPinned)
result |= (1 << HNDTYPE_ASYNCPINNED);
if (mHandleMask & CorHandleStrongSizedByref)
result |= (1 << HNDTYPE_SIZEDREF);
return result;
}
HRESULT DacRefWalker::Next(ULONG celt, DacGcReference roots[], ULONG *pceltFetched)
{
if (roots == NULL || pceltFetched == NULL)
return E_POINTER;
ULONG total = 0;
HRESULT hr = S_OK;
if (mHandleWalker)
{
hr = mHandleWalker->Next(celt, roots, &total);
if (hr == S_FALSE || FAILED(hr))
{
delete mHandleWalker;
mHandleWalker = NULL;
if (FAILED(hr))
return hr;
}
}
if (total < celt)
{
while (total < celt && mFQCurr < mFQEnd)
{
DacGcReference &ref = roots[total++];
ref.vmDomain = VMPTR_AppDomain::NullPtr();
ref.objHnd.SetDacTargetPtr(mFQCurr.GetAddr());
ref.dwType = (DWORD)CorReferenceFinalizer;
ref.i64ExtraData = 0;
mFQCurr++;
}
}
while (total < celt && mStackWalker)
{
ULONG fetched = 0;
hr = mStackWalker->Next(celt-total, roots+total, &fetched);
if (FAILED(hr))
return hr;
if (hr == S_FALSE)
{
hr = NextThread();
if (FAILED(hr))
return hr;
}
total += fetched;
}
*pceltFetched = total;
return total < celt ? S_FALSE : S_OK;
}
HRESULT DacRefWalker::NextThread()
{
Thread *pThread = NULL;
if (mStackWalker)
{
pThread = mStackWalker->GetThread();
delete mStackWalker;
mStackWalker = NULL;
}
pThread = ThreadStore::GetThreadList(pThread);
if (!pThread)
return S_FALSE;
mStackWalker = new DacStackReferenceWalker(mDac, pThread->GetOSThreadId());
return mStackWalker->Init();
}
HRESULT DacHandleWalker::Next(ULONG celt, DacGcReference roots[], ULONG *pceltFetched)
{
SUPPORTS_DAC;
if (roots == NULL || pceltFetched == NULL)
return E_POINTER;
return DoHandleWalk<DacGcReference, ULONG, DacHandleWalker::EnumCallbackDac>(celt, roots, pceltFetched);
}
void CALLBACK DacHandleWalker::EnumCallbackDac(PTR_UNCHECKED_OBJECTREF handle, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2)
{
SUPPORTS_DAC;
DacHandleWalkerParam *param = (DacHandleWalkerParam *)param1;
HandleChunkHead *curr = param->Curr;
// If we failed on a previous call (OOM) don't keep trying to allocate, it's not going to work.
if (FAILED(param->Result))
return;
// We've moved past the size of the current chunk. We'll allocate a new chunk
// and stuff the handles there. These are cleaned up by the destructor
if (curr->Count >= (curr->Size/sizeof(DacGcReference)))
{
if (curr->Next == NULL)
{
HandleChunk *next = new (nothrow) HandleChunk;
if (next != NULL)
{
curr->Next = next;
}
else
{
param->Result = E_OUTOFMEMORY;
return;
}
}
curr = param->Curr = param->Curr->Next;
}
// Fill the current handle.
DacGcReference *dataArray = (DacGcReference*)curr->pData;
DacGcReference &data = dataArray[curr->Count++];
data.objHnd.SetDacTargetPtr(handle.GetAddr());
data.vmDomain.SetDacTargetPtr(TO_TADDR(param->AppDomain));
data.i64ExtraData = 0;
unsigned int refCnt = 0;
switch (param->Type)
{
case HNDTYPE_STRONG:
data.dwType = (DWORD)CorHandleStrong;
break;
case HNDTYPE_PINNED:
data.dwType = (DWORD)CorHandleStrongPinning;
break;
case HNDTYPE_WEAK_SHORT:
data.dwType = (DWORD)CorHandleWeakShort;
break;
case HNDTYPE_WEAK_LONG:
data.dwType = (DWORD)CorHandleWeakLong;
break;
#if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS) || defined(FEATURE_OBJCMARSHAL)
case HNDTYPE_REFCOUNTED:
data.dwType = (DWORD)(data.i64ExtraData ? CorHandleStrongRefCount : CorHandleWeakRefCount);
GetRefCountedHandleInfo((OBJECTREF)*handle, param->Type, &refCnt, NULL, NULL, NULL);
data.i64ExtraData = refCnt;
break;
#endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS || FEATURE_OBJCMARSHAL
#if defined(FEATURE_COMINTEROP) || defined(FEATURE_COMWRAPPERS)
case HNDTYPE_WEAK_NATIVE_COM:
data.dwType = (DWORD)CorHandleWeakNativeCom;
break;
#endif // FEATURE_COMINTEROP || FEATURE_COMWRAPPERS
case HNDTYPE_DEPENDENT:
data.dwType = (DWORD)CorHandleStrongDependent;
data.i64ExtraData = GetDependentHandleSecondary(handle.GetAddr()).GetAddr();
break;
case HNDTYPE_ASYNCPINNED:
data.dwType = (DWORD)CorHandleStrongAsyncPinned;
break;
case HNDTYPE_SIZEDREF:
data.dwType = (DWORD)CorHandleStrongSizedByref;
break;
}
}
void DacStackReferenceWalker::GCEnumCallbackDac(LPVOID hCallback, OBJECTREF *pObject, uint32_t flags, DacSlotLocation loc)
{
GCCONTEXT *gcctx = (GCCONTEXT *)hCallback;
DacScanContext *dsc = (DacScanContext*)gcctx->sc;
CORDB_ADDRESS obj = 0;
if (flags & GC_CALL_INTERIOR)
{
if (loc.targetPtr)
obj = (CORDB_ADDRESS)(*PTR_PTR_Object((TADDR)pObject)).GetAddr();
else
obj = (CORDB_ADDRESS)TO_TADDR(pObject);
HRESULT hr = dsc->pWalker->mHeap.ListNearObjects(obj, NULL, &obj, NULL);
// If we failed don't add this instance to the list. ICorDebug doesn't handle invalid pointers
// very well, and the only way the heap walker's ListNearObjects will fail is if we have heap
// corruption...which ICorDebug doesn't deal with anyway.
if (FAILED(hr))
return;
}
DacGcReference *data = dsc->pWalker->GetNextObject<DacGcReference>(dsc);
if (data != NULL)
{
data->vmDomain.SetDacTargetPtr(AppDomain::GetCurrentDomain().GetAddr());
if (obj)
data->pObject = obj | 1;
else if (loc.targetPtr)
data->objHnd.SetDacTargetPtr(TO_TADDR(pObject));
else
data->pObject = pObject->GetAddr() | 1;
data->dwType = CorReferenceStack;
data->i64ExtraData = 0;
}
}
void DacStackReferenceWalker::GCReportCallbackDac(PTR_PTR_Object ppObj, ScanContext *sc, uint32_t flags)
{
DacScanContext *dsc = (DacScanContext*)sc;
TADDR obj = ppObj.GetAddr();
if (flags & GC_CALL_INTERIOR)
{
CORDB_ADDRESS fixed_addr = 0;
HRESULT hr = dsc->pWalker->mHeap.ListNearObjects((CORDB_ADDRESS)obj, NULL, &fixed_addr, NULL);
// If we failed don't add this instance to the list. ICorDebug doesn't handle invalid pointers
// very well, and the only way the heap walker's ListNearObjects will fail is if we have heap
// corruption...which ICorDebug doesn't deal with anyway.
if (FAILED(hr))
return;
obj = TO_TADDR(fixed_addr);
}
DacGcReference *data = dsc->pWalker->GetNextObject<DacGcReference>(dsc);
if (data != NULL)
{
data->vmDomain.SetDacTargetPtr(AppDomain::GetCurrentDomain().GetAddr());
data->objHnd.SetDacTargetPtr(obj);
data->dwType = CorReferenceStack;
data->i64ExtraData = 0;
}
}
HRESULT DacStackReferenceWalker::Next(ULONG count, DacGcReference stackRefs[], ULONG *pFetched)
{
if (stackRefs == NULL || pFetched == NULL)
return E_POINTER;
HRESULT hr = DoStackWalk<ULONG, DacGcReference,
DacStackReferenceWalker::GCReportCallbackDac,
DacStackReferenceWalker::GCEnumCallbackDac>
(count, stackRefs, pFetched);
return hr;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/vm/profilinghelper.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// ProfilingHelper.cpp
//
//
// Implementation of helper classes used for miscellaneous purposes within the profiling
// API
//
// ======================================================================================
//
// #LoadUnloadCallbackSynchronization
//
// There is synchronization around loading profilers, unloading profilers, and issuing
// callbacks to profilers, to ensure that we know when it's safe to detach profilers or
// to call into profilers. The synchronization scheme is intentionally lockless on the
// mainline path (issuing callbacks into the profiler), with heavy locking on the
// non-mainline path (loading / unloading profilers).
//
// PROTECTED DATA
//
// The synchronization protects the following data:
//
// * ProfilingAPIDetach::s_profilerDetachInfo
// * (volatile) g_profControlBlock.curProfStatus.m_profStatus
// * (volatile) g_profControlBlock.pProfInterface
// * latter implies the profiler DLL's load status is protected as well, as
// pProfInterface changes between non-NULL and NULL as a profiler DLL is
// loaded and unloaded, respectively.
//
// SYNCHRONIZATION COMPONENTS
//
// * Simple Crst: code:ProfilingAPIUtility::s_csStatus
// * Lockless, volatile per-thread counters: code:EvacuationCounterHolder
// * Profiler status transition invariants and CPU buffer flushing:
// code:CurrentProfilerStatus::Set
//
// WRITERS
//
// The above data is considered to be "written to" when a profiler is loaded or unloaded,
// or the status changes (see code:ProfilerStatus), or a request to detach the profiler
// is received (see code:ProfilingAPIDetach::RequestProfilerDetach), or the DetachThread
// consumes or modifies the contents of code:ProfilingAPIDetach::s_profilerDetachInfo.
// All these cases are serialized with each other by the simple Crst:
// code:ProfilingAPIUtility::s_csStatus
//
// READERS
//
// Readers are the mainline case and are lockless. A "reader" is anyone who wants to
// issue a profiler callback. Readers are scattered throughout the runtime, and have the
// following format:
// {
// BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
// g_profControlBlock.pProfInterface->AppDomainCreationStarted(MyAppDomainID);
// END_PROFILER_CALLBACK();
// }
// The BEGIN / END macros do the following:
// * Evaluate the expression argument (e.g., CORProfilerTrackAppDomainLoads()). This is a
// "dirty read" as the profiler could be detached at any moment during or after that
// evaluation.
// * If true, push a code:EvacuationCounterHolder on the stack, which increments the
// per-thread evacuation counter (not interlocked).
// * Re-evaluate the expression argument. This time, it's a "clean read" (see below for
// why).
// * If still true, execute the statements inside the BEGIN/END block. Inside that block,
// the profiler is guaranteed to remain loaded, because the evacuation counter
// remains nonzero (again, see below).
// * Once the BEGIN/END block is exited, the evacuation counter is decremented, and the
// profiler is unpinned and allowed to detach.
//
// READER / WRITER COORDINATION
//
// The above ensures that a reader never touches g_profControlBlock.pProfInterface and
// all it embodies (including the profiler DLL code and callback implementations) unless
// the reader was able to increment its thread's evacuation counter AND re-verify that
// the profiler's status is still active (the status check is included in the macro's
// expression argument, such as CORProfilerTrackAppDomainLoads()).
//
// At the same time, a profiler DLL is never unloaded (nor
// g_profControlBlock.pProfInterface deleted and NULLed out) UNLESS the writer performs
// these actions:
// * (a) Set the profiler's status to a non-active state like kProfStatusDetaching or
// kProfStatusNone
// * (b) Call FlushProcessWriteBuffers()
// * (c) Grab thread store lock, iterate through all threads, and verify each per-thread
// evacuation counter is zero.
//
// The above steps are why it's considered a "clean read" if a reader first increments
// its evacuation counter and then checks the profiler status. Once the writer flushes
// the CPU buffers (b), the reader will see the updated status (from a) and know not to
// use g_profControlBlock.pProfInterface. And if the reader clean-reads the status before
// the buffers were flushed, then the reader will have incremented its evacuation counter
// first, which the writer will be sure to see in (c). For more details about how the
// evacuation counters work, see code:ProfilingAPIUtility::IsProfilerEvacuated.
//
#include "common.h"
#ifdef PROFILING_SUPPORTED
#include "eeprofinterfaces.h"
#include "eetoprofinterfaceimpl.h"
#include "eetoprofinterfaceimpl.inl"
#include "corprof.h"
#include "proftoeeinterfaceimpl.h"
#include "proftoeeinterfaceimpl.inl"
#include "profilinghelper.h"
#include "profilinghelper.inl"
#include "eemessagebox.h"
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
#include "profdetach.h"
#endif // FEATURE_PROFAPI_ATTACH_DETACH
#include "utilcode.h"
#ifndef TARGET_UNIX
#include "securitywrapper.h"
#endif // !TARGET_UNIX
// ----------------------------------------------------------------------------
// CurrentProfilerStatus methods
//---------------------------------------------------------------------------------------
//
// Updates the value indicating the profiler's current status
//
// Arguments:
// profStatus - New value (from enum ProfilerStatus) to set.
//
// Notes:
// Sets the status under a lock, and performs a debug-only check to verify that the
// status transition is a legal one. Also performs a FlushStoreBuffers() after
// changing the status when necessary.
//
void CurrentProfilerStatus::Set(ProfilerStatus newProfStatus)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
_ASSERTE(ProfilingAPIUtility::GetStatusCrst() != NULL);
{
// Need to serialize attempts to transition the profiler status. For example, a
// profiler in one thread could request a detach, while the CLR in another
// thread is transitioning the profiler from kProfStatusInitializing* to
// kProfStatusActive
CRITSEC_Holder csh(ProfilingAPIUtility::GetStatusCrst());
// Based on what the old status is, verify the new status is a legal transition.
switch(m_profStatus)
{
default:
_ASSERTE(!"Unknown ProfilerStatus");
break;
case kProfStatusNone:
_ASSERTE((newProfStatus == kProfStatusPreInitialize) ||
(newProfStatus == kProfStatusInitializingForStartupLoad) ||
(newProfStatus == kProfStatusInitializingForAttachLoad));
break;
case kProfStatusDetaching:
_ASSERTE(newProfStatus == kProfStatusNone);
break;
case kProfStatusInitializingForStartupLoad:
case kProfStatusInitializingForAttachLoad:
_ASSERTE((newProfStatus == kProfStatusActive) ||
(newProfStatus == kProfStatusNone));
break;
case kProfStatusActive:
_ASSERTE((newProfStatus == kProfStatusNone) ||
(newProfStatus == kProfStatusDetaching));
break;
case kProfStatusPreInitialize:
_ASSERTE((newProfStatus == kProfStatusNone) ||
(newProfStatus == kProfStatusInitializingForStartupLoad) ||
(newProfStatus == kProfStatusInitializingForAttachLoad));
break;
}
m_profStatus = newProfStatus;
}
#if !defined(DACCESS_COMPILE)
if (((newProfStatus == kProfStatusNone) ||
(newProfStatus == kProfStatusDetaching) ||
(newProfStatus == kProfStatusActive)))
{
// Flush the store buffers on all CPUs, to ensure other threads see that
// g_profControlBlock.curProfStatus has changed. The important status changes to
// flush are:
// * to kProfStatusNone or kProfStatusDetaching so other threads know to stop
// making calls into the profiler
// * to kProfStatusActive, to ensure callbacks can be issued by the time an
// attaching profiler receives ProfilerAttachComplete(), so the profiler
// can safely perform catchup at that time (see
// code:#ProfCatchUpSynchronization).
//
::FlushProcessWriteBuffers();
}
#endif // !defined(DACCESS_COMPILE)
}
//---------------------------------------------------------------------------------------
// ProfilingAPIUtility members
// See code:#LoadUnloadCallbackSynchronization.
CRITSEC_COOKIE ProfilingAPIUtility::s_csStatus = NULL;
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::AppendSupplementaryInformation
//
// Description:
// Helper to the event logging functions to append the process ID and string
// resource ID to the end of the message.
//
// Arguments:
// * iStringResource - [in] String resource ID to append to message.
// * pString - [in/out] On input, the string to log so far. On output, the original
// string with the process ID info appended.
//
// static
void ProfilingAPIUtility::AppendSupplementaryInformation(int iStringResource, SString * pString)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
// This loads resource strings, which takes locks.
CAN_TAKE_LOCK;
}
CONTRACTL_END;
StackSString supplementaryInformation;
if (!supplementaryInformation.LoadResource(
CCompRC::Debugging,
IDS_PROF_SUPPLEMENTARY_INFO
))
{
// Resource not found; should never happen.
return;
}
pString->Append(W(" "));
pString->AppendPrintf(
supplementaryInformation,
GetCurrentProcessId(),
iStringResource);
}
//---------------------------------------------------------------------------------------
//
// Helper function to log publicly-viewable errors about profiler loading and
// initialization.
//
//
// Arguments:
// * iStringResourceID - resource ID of string containing message to log
// * wEventType - same constant used in win32 to specify the type of event:
// usually EVENTLOG_ERROR_TYPE, EVENTLOG_WARNING_TYPE, or
// EVENTLOG_INFORMATION_TYPE
// * insertionArgs - 0 or more values to be inserted into the string to be logged
// (>0 only if iStringResourceID contains format arguments (%)).
//
// static
void ProfilingAPIUtility::LogProfEventVA(
int iStringResourceID,
WORD wEventType,
va_list insertionArgs)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
// This loads resource strings, which takes locks.
CAN_TAKE_LOCK;
}
CONTRACTL_END;
StackSString messageFromResource;
StackSString messageToLog;
if (!messageFromResource.LoadResource(
CCompRC::Debugging,
iStringResourceID
))
{
// Resource not found; should never happen.
return;
}
messageToLog.VPrintf(messageFromResource, insertionArgs);
AppendSupplementaryInformation(iStringResourceID, &messageToLog);
// Write to ETW and EventPipe with the message
FireEtwProfilerMessage(GetClrInstanceId(), messageToLog.GetUnicode());
// Ouput debug strings for diagnostic messages.
WszOutputDebugString(messageToLog);
}
// See code:ProfilingAPIUtility.LogProfEventVA for description of arguments.
// static
void ProfilingAPIUtility::LogProfError(int iStringResourceID, ...)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
// This loads resource strings, which takes locks.
CAN_TAKE_LOCK;
}
CONTRACTL_END;
va_list insertionArgs;
va_start(insertionArgs, iStringResourceID);
LogProfEventVA(
iStringResourceID,
EVENTLOG_ERROR_TYPE,
insertionArgs);
va_end(insertionArgs);
}
// See code:ProfilingAPIUtility.LogProfEventVA for description of arguments.
// static
void ProfilingAPIUtility::LogProfInfo(int iStringResourceID, ...)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
// This loads resource strings, which takes locks.
CAN_TAKE_LOCK;
}
CONTRACTL_END;
va_list insertionArgs;
va_start(insertionArgs, iStringResourceID);
LogProfEventVA(
iStringResourceID,
EVENTLOG_INFORMATION_TYPE,
insertionArgs);
va_end(insertionArgs);
}
#ifdef PROF_TEST_ONLY_FORCE_ELT
// Special forward-declarations of the profiling API's slow-path enter/leave/tailcall
// hooks. These need to be forward-declared here so that they may be referenced in
// InitializeProfiling() below solely for the debug-only, test-only code to allow
// enter/leave/tailcall to be turned on at startup without a profiler. See
// code:ProfControlBlock#TestOnlyELT
EXTERN_C void STDMETHODCALLTYPE ProfileEnterNaked(UINT_PTR clientData);
EXTERN_C void STDMETHODCALLTYPE ProfileLeaveNaked(UINT_PTR clientData);
EXTERN_C void STDMETHODCALLTYPE ProfileTailcallNaked(UINT_PTR clientData);
#endif //PROF_TEST_ONLY_FORCE_ELT
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::InitializeProfiling
//
// This is the top-most level of profiling API initialization, and is called directly by
// EEStartupHelper() (in ceemain.cpp). This initializes internal structures relating to the
// Profiling API. This also orchestrates loading the profiler and initializing it (if
// its GUID is specified in the environment).
//
// Return Value:
// HRESULT indicating success or failure. This is generally very lenient about internal
// failures, as we don't want them to prevent the startup of the app:
// S_OK = Environment didn't request a profiler, or
// Environment did request a profiler, and it was loaded successfully
// S_FALSE = There was a problem loading the profiler, but that shouldn't prevent the app
// from starting up
// else (failure) = There was a serious problem that should be dealt with by the caller
//
// Notes:
// This function (or one of its callees) will log an error to the event log
// if there is a failure
//
// Assumptions:
// InitializeProfiling is called during startup, AFTER the host has initialized its
// settings and the config variables have been read, but BEFORE the finalizer thread
// has entered its first wait state. ASSERTs are placed in
// code:ProfilingAPIAttachDetach::Initialize (which is called by this function, and
// which depends on these assumptions) to verify.
// static
HRESULT ProfilingAPIUtility::InitializeProfiling()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
InitializeLogging();
// NULL out / initialize members of the global profapi structure
g_profControlBlock.Init();
AttemptLoadProfilerForStartup();
AttemptLoadDelayedStartupProfilers();
AttemptLoadProfilerList();
// For now, the return value from AttemptLoadProfilerForStartup is of no use to us.
// Any event has been logged already by AttemptLoadProfilerForStartup, and
// regardless of whether a profiler got loaded, we still need to continue.
#ifdef PROF_TEST_ONLY_FORCE_ELT
// Test-only, debug-only code to enable ELT on startup regardless of whether a
// startup profiler is loaded. See code:ProfControlBlock#TestOnlyELT.
DWORD dwEnableSlowELTHooks = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyEnableSlowELTHooks);
if (dwEnableSlowELTHooks != 0)
{
(&g_profControlBlock)->fTestOnlyForceEnterLeave = TRUE;
SetJitHelperFunction(CORINFO_HELP_PROF_FCN_ENTER, (void *) ProfileEnterNaked);
SetJitHelperFunction(CORINFO_HELP_PROF_FCN_LEAVE, (void *) ProfileLeaveNaked);
SetJitHelperFunction(CORINFO_HELP_PROF_FCN_TAILCALL, (void *) ProfileTailcallNaked);
LOG((LF_CORPROF, LL_INFO10, "**PROF: Enabled test-only slow ELT hooks.\n"));
}
#endif //PROF_TEST_ONLY_FORCE_ELT
#ifdef PROF_TEST_ONLY_FORCE_OBJECT_ALLOCATED
// Test-only, debug-only code to enable ObjectAllocated callbacks on startup regardless of whether a
// startup profiler is loaded. See code:ProfControlBlock#TestOnlyObjectAllocated.
DWORD dwEnableObjectAllocated = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyEnableObjectAllocatedHook);
if (dwEnableObjectAllocated != 0)
{
(&g_profControlBlock)->fTestOnlyForceObjectAllocated = TRUE;
LOG((LF_CORPROF, LL_INFO10, "**PROF: Enabled test-only object ObjectAllocated hooks.\n"));
}
#endif //PROF_TEST_ONLY_FORCE_ELT
#ifdef _DEBUG
// Test-only, debug-only code to allow attaching profilers to call ICorProfilerInfo inteface,
// which would otherwise be disallowed for attaching profilers
DWORD dwTestOnlyEnableICorProfilerInfo = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyEnableICorProfilerInfo);
if (dwTestOnlyEnableICorProfilerInfo != 0)
{
(&g_profControlBlock)->fTestOnlyEnableICorProfilerInfo = TRUE;
}
#endif // _DEBUG
return S_OK;
}
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::ProfilerCLSIDFromString
//
// Description:
// Takes a string form of a CLSID (or progid, believe it or not), and returns the
// corresponding CLSID structure.
//
// Arguments:
// * wszClsid - [in / out] CLSID string to convert. This may also be a progid. This
// ensures our behavior is backward-compatible with previous CLR versions. I don't
// know why previous versions allowed the user to set a progid in the environment,
// but well whatever. On [out], this string is normalized in-place (e.g.,
// double-quotes around progid are removed).
// * pClsid - [out] CLSID structure corresponding to wszClsid
//
// Return Value:
// HRESULT indicating success or failure.
//
// Notes:
// * An event is logged if there is a failure.
//
// static
HRESULT ProfilingAPIUtility::ProfilerCLSIDFromString(
__inout_z LPWSTR wszClsid,
CLSID * pClsid)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
_ASSERTE(wszClsid != NULL);
_ASSERTE(pClsid != NULL);
HRESULT hr;
// Translate the string into a CLSID
if (*wszClsid == W('{'))
{
hr = IIDFromString(wszClsid, pClsid);
}
else
{
#ifndef TARGET_UNIX
WCHAR *szFrom, *szTo;
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:26000) // "espX thinks there is an overflow here, but there isn't any"
#endif
for (szFrom=szTo=wszClsid; *szFrom; )
{
if (*szFrom == W('"'))
{
++szFrom;
continue;
}
*szTo++ = *szFrom++;
}
*szTo = 0;
hr = CLSIDFromProgID(wszClsid, pClsid);
#ifdef _PREFAST_
#pragma warning(pop)
#endif /*_PREFAST_*/
#else // !TARGET_UNIX
// ProgID not supported on TARGET_UNIX
hr = E_INVALIDARG;
#endif // !TARGET_UNIX
}
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_INFO10,
"**PROF: Invalid CLSID or ProgID (%S). hr=0x%x.\n",
wszClsid,
hr));
ProfilingAPIUtility::LogProfError(IDS_E_PROF_BAD_CLSID, wszClsid, hr);
return hr;
}
return S_OK;
}
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::AttemptLoadProfilerForStartup
//
// Description:
// Checks environment or registry to see if the app is configured to run with a
// profiler loaded on startup. If so, this calls LoadProfiler() to load it up.
//
// Arguments:
//
// Return Value:
// * S_OK: Startup-profiler has been loaded
// * S_FALSE: No profiler is configured for startup load
// * else, HRESULT indicating failure that occurred
//
// Assumptions:
// * This should be called on startup, after g_profControlBlock is initialized, but
// before any attach infrastructure is initialized. This ensures we don't receive
// an attach request while startup-loading a profiler.
//
// Notes:
// * This or its callees will ensure an event is logged on failure (though will be
// silent if no profiler is configured for startup load (which causes S_FALSE to
// be returned)
//
// static
HRESULT ProfilingAPIUtility::AttemptLoadProfilerForStartup()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
HRESULT hr;
// Find out if profiling is enabled
DWORD fProfEnabled = 0;
fProfEnabled = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_ENABLE_PROFILING);
NewArrayHolder<WCHAR> wszClsid(NULL);
NewArrayHolder<WCHAR> wszProfilerDLL(NULL);
CLSID *pClsid;
CLSID clsid;
if (fProfEnabled == 0)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling not enabled.\n"));
return S_FALSE;
}
LOG((LF_CORPROF, LL_INFO10, "**PROF: Initializing Profiling Services.\n"));
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER, &wszClsid));
#if defined(TARGET_ARM64)
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_ARM64, &wszProfilerDLL));
#elif defined(TARGET_ARM)
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_ARM32, &wszProfilerDLL));
#endif
if(wszProfilerDLL == NULL)
{
#ifdef TARGET_64BIT
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_64, &wszProfilerDLL));
#else
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_32, &wszProfilerDLL));
#endif
if(wszProfilerDLL == NULL)
{
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH, &wszProfilerDLL));
}
}
// If the environment variable doesn't exist, profiling is not enabled.
if (wszClsid == NULL)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling flag set, but required "
"environment variable does not exist.\n"));
LogProfError(IDS_E_PROF_NO_CLSID);
return S_FALSE;
}
if ((wszProfilerDLL != NULL) && (wcslen(wszProfilerDLL) >= MAX_LONGPATH))
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling flag set, but COR_PROFILER_PATH was not set properly.\n"));
LogProfError(IDS_E_PROF_BAD_PATH);
return S_FALSE;
}
#ifdef TARGET_UNIX
// If the environment variable doesn't exist, profiling is not enabled.
if (wszProfilerDLL == NULL)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling flag set, but required "
"environment variable does not exist.\n"));
LogProfError(IDS_E_PROF_BAD_PATH);
return S_FALSE;
}
#endif // TARGET_UNIX
hr = ProfilingAPIUtility::ProfilerCLSIDFromString(wszClsid, &clsid);
if (FAILED(hr))
{
// ProfilerCLSIDFromString already logged an event if there was a failure
return hr;
}
pClsid = &clsid;
hr = LoadProfiler(
kStartupLoad,
pClsid,
wszClsid,
wszProfilerDLL,
NULL, // No client data for startup load
0); // No client data for startup load
if (FAILED(hr))
{
// A failure in either the CLR or the profiler prevented it from
// loading. Event has been logged. Propagate hr
return hr;
}
return S_OK;
}
//static
HRESULT ProfilingAPIUtility::AttemptLoadDelayedStartupProfilers()
{
if (g_profControlBlock.storedProfilers.IsEmpty())
{
return S_OK;
}
HRESULT storedHr = S_OK;
STOREDPROFILERLIST *profilers = &g_profControlBlock.storedProfilers;
for (StoredProfilerNode* item = profilers->GetHead(); item != NULL; item = STOREDPROFILERLIST::GetNext(item))
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiler loading from GUID/Path stored from the IPC channel."));
CLSID *pClsid = &(item->guid);
// Convert to string for logging
constexpr size_t guidStringSize = 39;
NewArrayHolder<WCHAR> wszClsid(new (nothrow) WCHAR[guidStringSize]);
// GUIDs should always be the same number of characters...
_ASSERTE(wszClsid != NULL);
if (wszClsid != NULL)
{
StringFromGUID2(*pClsid, wszClsid, guidStringSize);
}
HRESULT hr = LoadProfiler(
kStartupLoad,
pClsid,
wszClsid,
item->path.GetUnicode(),
NULL, // No client data for startup load
0); // No client data for startup load
if (FAILED(hr))
{
// LoadProfiler logs if there is an error
storedHr = hr;
}
}
return storedHr;
}
// static
HRESULT ProfilingAPIUtility::AttemptLoadProfilerList()
{
HRESULT hr = S_OK;
DWORD dwEnabled = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_ENABLE_NOTIFICATION_PROFILERS);
if (dwEnabled == 0)
{
// Profiler list explicitly disabled, bail
LogProfInfo(IDS_E_PROF_NOTIFICATION_DISABLED);
return S_OK;
}
NewArrayHolder<WCHAR> wszProfilerList(NULL);
#if defined(TARGET_ARM64)
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_ARM64, &wszProfilerList);
#elif defined(TARGET_ARM)
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_ARM32, &wszProfilerList);
#endif
if (wszProfilerList == NULL)
{
#ifdef TARGET_64BIT
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_64, &wszProfilerList);
#else
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_32, &wszProfilerList);
#endif
if (wszProfilerList == NULL)
{
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS, &wszProfilerList);
if (wszProfilerList == NULL)
{
// No profiler list specified, bail
return S_OK;
}
}
}
WCHAR *pOuter = NULL;
WCHAR *pInner = NULL;
WCHAR *currentSection = NULL;
WCHAR *currentPath = NULL;
WCHAR *currentGuid = NULL;
HRESULT storedHr = S_OK;
// Get each semicolon delimited config
currentSection = wcstok_s(wszProfilerList, W(";"), &pOuter);
while (currentSection != NULL)
{
// Parse this config "path={guid}"
currentPath = wcstok_s(currentSection, W("="), &pInner);
currentGuid = wcstok_s(NULL, W("="), &pInner);
CLSID clsid;
hr = ProfilingAPIUtility::ProfilerCLSIDFromString(currentGuid, &clsid);
if (FAILED(hr))
{
// ProfilerCLSIDFromString already logged an event if there was a failure
storedHr = hr;
goto NextSection;
}
hr = LoadProfiler(
kStartupLoad,
&clsid,
currentGuid,
currentPath,
NULL, // No client data for startup load
0); // No client data for startup load
if (FAILED(hr))
{
// LoadProfiler already logged if there was an error
storedHr = hr;
goto NextSection;
}
NextSection:
// Get next config
currentSection = wcstok_s(NULL, W(";"), &pOuter);
}
return storedHr;
}
//---------------------------------------------------------------------------------------
//
// Performs lazy initialization that need not occur on startup, but does need to occur
// before trying to load a profiler.
//
// Return Value:
// HRESULT indicating success or failure.
//
HRESULT ProfilingAPIUtility::PerformDeferredInit()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
CAN_TAKE_LOCK;
MODE_ANY;
}
CONTRACTL_END;
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
// Initialize internal resources for detaching
HRESULT hr = ProfilingAPIDetach::Initialize();
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_ERROR,
"**PROF: Unable to initialize resources for detaching. hr=0x%x.\n",
hr));
return hr;
}
#endif // FEATURE_PROFAPI_ATTACH_DETACH
if (s_csStatus == NULL)
{
s_csStatus = ClrCreateCriticalSection(
CrstProfilingAPIStatus,
(CrstFlags) (CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
if (s_csStatus == NULL)
{
return E_OUTOFMEMORY;
}
}
return S_OK;
}
// static
HRESULT ProfilingAPIUtility::DoPreInitialization(
EEToProfInterfaceImpl *pEEProf,
const CLSID *pClsid,
LPCWSTR wszClsid,
LPCWSTR wszProfilerDLL,
LoadType loadType,
DWORD dwConcurrentGCWaitTimeoutInMs)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_ANY;
PRECONDITION(pEEProf != NULL);
PRECONDITION(pClsid != NULL);
PRECONDITION(wszClsid != NULL);
}
CONTRACTL_END;
_ASSERTE(s_csStatus != NULL);
ProfilerCompatibilityFlag profilerCompatibilityFlag = kDisableV2Profiler;
NewArrayHolder<WCHAR> wszProfilerCompatibilitySetting(NULL);
if (loadType == kStartupLoad)
{
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ProfAPI_ProfilerCompatibilitySetting, &wszProfilerCompatibilitySetting);
if (wszProfilerCompatibilitySetting != NULL)
{
if (SString::_wcsicmp(wszProfilerCompatibilitySetting, W("EnableV2Profiler")) == 0)
{
profilerCompatibilityFlag = kEnableV2Profiler;
}
else if (SString::_wcsicmp(wszProfilerCompatibilitySetting, W("PreventLoad")) == 0)
{
profilerCompatibilityFlag = kPreventLoad;
}
}
if (profilerCompatibilityFlag == kPreventLoad)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: COMPlus_ProfAPI_ProfilerCompatibilitySetting is set to PreventLoad. "
"Profiler will not be loaded.\n"));
LogProfInfo(IDS_PROF_PROFILER_DISABLED,
CLRConfig::EXTERNAL_ProfAPI_ProfilerCompatibilitySetting.name,
wszProfilerCompatibilitySetting.GetValue(),
wszClsid);
return S_OK;
}
}
HRESULT hr = S_OK;
NewHolder<ProfToEEInterfaceImpl> pProfEE(new (nothrow) ProfToEEInterfaceImpl());
if (pProfEE == NULL)
{
LOG((LF_CORPROF, LL_ERROR, "**PROF: Unable to allocate ProfToEEInterfaceImpl.\n"));
LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, E_OUTOFMEMORY);
return E_OUTOFMEMORY;
}
// Initialize the interface
hr = pProfEE->Init();
if (FAILED(hr))
{
LOG((LF_CORPROF, LL_ERROR, "**PROF: ProfToEEInterface::Init failed.\n"));
LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, hr);
return hr;
}
// Provide the newly created and inited interface
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling code being provided with EE interface.\n"));
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
// We're about to load the profiler, so first make sure we successfully create the
// DetachThread and abort the load of the profiler if we can't. This ensures we don't
// load a profiler unless we're prepared to detach it later.
hr = ProfilingAPIDetach::CreateDetachThread();
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_ERROR,
"**PROF: Unable to create DetachThread. hr=0x%x.\n",
hr));
ProfilingAPIUtility::LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, hr);
return hr;
}
#endif // FEATURE_PROFAPI_ATTACH_DETACH
// Initialize internal state of our EEToProfInterfaceImpl. This also loads the
// profiler itself, but does not yet call its Initalize() callback
hr = pEEProf->Init(pProfEE, pClsid, wszClsid, wszProfilerDLL, (loadType == kAttachLoad), dwConcurrentGCWaitTimeoutInMs);
if (FAILED(hr))
{
LOG((LF_CORPROF, LL_ERROR, "**PROF: EEToProfInterfaceImpl::Init failed.\n"));
// EEToProfInterfaceImpl::Init logs an event log error on failure
return hr;
}
// EEToProfInterfaceImpl::Init takes over the ownership of pProfEE when Init succeeds, and
// EEToProfInterfaceImpl::~EEToProfInterfaceImpl is responsible for releasing the resource pointed
// by pProfEE. Calling SuppressRelease here is necessary to avoid double release that
// the resource pointed by pProfEE are released by both pProfEE and pEEProf's destructor.
pProfEE.SuppressRelease();
pProfEE = NULL;
if (loadType == kAttachLoad) // V4 profiler from attach
{
// Profiler must support ICorProfilerCallback3 to be attachable
if (!pEEProf->IsCallback3Supported())
{
LogProfError(IDS_E_PROF_NOT_ATTACHABLE, wszClsid);
return CORPROF_E_PROFILER_NOT_ATTACHABLE;
}
}
else if (!pEEProf->IsCallback3Supported()) // V2 profiler from startup
{
if (profilerCompatibilityFlag == kDisableV2Profiler)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: COMPlus_ProfAPI_ProfilerCompatibilitySetting is set to DisableV2Profiler (the default). "
"V2 profilers are not allowed, so that the configured V2 profiler is going to be unloaded.\n"));
LogProfInfo(IDS_PROF_V2PROFILER_DISABLED, wszClsid);
return S_OK;
}
_ASSERTE(profilerCompatibilityFlag == kEnableV2Profiler);
LOG((LF_CORPROF, LL_INFO10, "**PROF: COMPlus_ProfAPI_ProfilerCompatibilitySetting is set to EnableV2Profiler. "
"The configured V2 profiler is going to be initialized.\n"));
LogProfInfo(IDS_PROF_V2PROFILER_ENABLED,
CLRConfig::EXTERNAL_ProfAPI_ProfilerCompatibilitySetting.name,
wszProfilerCompatibilitySetting.GetValue(),
wszClsid);
}
return hr;
}
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::LoadProfiler
//
// Description:
// Outermost common code for loading the profiler DLL. Both startup and attach code
// paths use this.
//
// Arguments:
// * loadType - Startup load or attach load?
// * pClsid - Profiler's CLSID
// * wszClsid - Profiler's CLSID (or progid) in string form, for event log messages
// * wszProfilerDLL - Profiler's DLL path
// * pvClientData - For attach loads, this is the client data the trigger wants to
// pass to the profiler DLL
// * cbClientData - For attach loads, size of client data in bytes
// * dwConcurrentGCWaitTimeoutInMs - Time out for wait operation on concurrent GC. Attach scenario only
//
// Return Value:
// HRESULT indicating success or failure of the load
//
// Notes:
// * On failure, this function or a callee will have logged an event
//
// static
HRESULT ProfilingAPIUtility::LoadProfiler(
LoadType loadType,
const CLSID * pClsid,
LPCWSTR wszClsid,
LPCWSTR wszProfilerDLL,
LPVOID pvClientData,
UINT cbClientData,
DWORD dwConcurrentGCWaitTimeoutInMs)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_ANY;
}
CONTRACTL_END;
if (g_fEEShutDown)
{
return CORPROF_E_RUNTIME_UNINITIALIZED;
}
// Valid loadType?
_ASSERTE((loadType == kStartupLoad) || (loadType == kAttachLoad));
// If a nonzero client data size is reported, there'd better be client data!
_ASSERTE((cbClientData == 0) || (pvClientData != NULL));
// Client data is currently only specified on attach
_ASSERTE((pvClientData == NULL) || (loadType == kAttachLoad));
ProfilerInfo profilerInfo;
profilerInfo.Init();
profilerInfo.inUse = TRUE;
HRESULT hr = PerformDeferredInit();
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_ERROR,
"**PROF: ProfilingAPIUtility::PerformDeferredInit failed. hr=0x%x.\n",
hr));
LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, hr);
return hr;
}
{
// Usually we need to take the lock when modifying profiler status, but at this
// point no one else could have a pointer to this ProfilerInfo so we don't
// need to synchronize. Once we store it in g_profControlBlock we need to.
profilerInfo.curProfStatus.Set(kProfStatusPreInitialize);
}
NewHolder<EEToProfInterfaceImpl> pEEProf(new (nothrow) EEToProfInterfaceImpl());
if (pEEProf == NULL)
{
LOG((LF_CORPROF, LL_ERROR, "**PROF: Unable to allocate EEToProfInterfaceImpl.\n"));
LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, E_OUTOFMEMORY);
return E_OUTOFMEMORY;
}
// Create the ProfToEE interface to provide to the profiling services
hr = DoPreInitialization(pEEProf, pClsid, wszClsid, wszProfilerDLL, loadType, dwConcurrentGCWaitTimeoutInMs);
if (FAILED(hr))
{
return hr;
}
{
// Usually we need to take the lock when modifying profiler status, but at this
// point no one else could have a pointer to this ProfilerInfo so we don't
// need to synchronize. Once we store it in g_profControlBlock we need to.
// We've successfully allocated and initialized the callback wrapper object and the
// Info interface implementation objects. The profiler DLL is therefore also
// successfully loaded (but not yet Initialized). Transfer ownership of the
// callback wrapper object to globals (thus suppress a release when the local
// vars go out of scope).
//
// Setting this state now enables us to call into the profiler's Initialize()
// callback (which we do immediately below), and have it successfully call
// back into us via the Info interface (ProfToEEInterfaceImpl) to perform its
// initialization.
profilerInfo.pProfInterface = pEEProf.GetValue();
pEEProf.SuppressRelease();
pEEProf = NULL;
// Set global status to reflect the proper type of Init we're doing (attach vs
// startup)
profilerInfo.curProfStatus.Set(
(loadType == kStartupLoad) ?
kProfStatusInitializingForStartupLoad :
kProfStatusInitializingForAttachLoad);
}
ProfilerInfo *pProfilerInfo = NULL;
{
// Now we register the profiler, from this point on we need to worry about
// synchronization
CRITSEC_Holder csh(s_csStatus);
// Check if this profiler is notification only and load as appropriate
BOOL notificationOnly = FALSE;
{
HRESULT callHr = profilerInfo.pProfInterface->LoadAsNotificationOnly(¬ificationOnly);
if (FAILED(callHr))
{
notificationOnly = FALSE;
}
}
if (notificationOnly)
{
pProfilerInfo = g_profControlBlock.FindNextFreeProfilerInfoSlot();
if (pProfilerInfo == NULL)
{
LogProfError(IDS_E_PROF_NOTIFICATION_LIMIT_EXCEEDED);
return CORPROF_E_PROFILER_ALREADY_ACTIVE;
}
}
else
{
// "main" profiler, there can only be one
if (g_profControlBlock.mainProfilerInfo.curProfStatus.Get() != kProfStatusNone)
{
LogProfError(IDS_PROF_ALREADY_LOADED);
return CORPROF_E_PROFILER_ALREADY_ACTIVE;
}
// This profiler cannot be a notification only profiler
pProfilerInfo = &(g_profControlBlock.mainProfilerInfo);
}
pProfilerInfo->curProfStatus.Set(profilerInfo.curProfStatus.Get());
pProfilerInfo->pProfInterface = profilerInfo.pProfInterface;
pProfilerInfo->pProfInterface->SetProfilerInfo(pProfilerInfo);
pProfilerInfo->inUse = TRUE;
}
// Now that the profiler is officially loaded and in Init status, call into the
// profiler's appropriate Initialize() callback. Note that if the profiler fails this
// call, we should abort the rest of the profiler loading, and reset our state so we
// appear as if we never attempted to load the profiler.
if (loadType == kStartupLoad)
{
// This EvacuationCounterHolder is just to make asserts in EEToProfInterfaceImpl happy.
// Using it like this without the dirty read/evac counter increment/clean read pattern
// is not safe generally, but in this specific case we can skip all that since we haven't
// published it yet, so we are the only thread that can access it.
EvacuationCounterHolder holder(pProfilerInfo);
hr = pProfilerInfo->pProfInterface->Initialize();
}
else
{
// This EvacuationCounterHolder is just to make asserts in EEToProfInterfaceImpl happy.
// Using it like this without the dirty read/evac counter increment/clean read pattern
// is not safe generally, but in this specific case we can skip all that since we haven't
// published it yet, so we are the only thread that can access it.
EvacuationCounterHolder holder(pProfilerInfo);
_ASSERTE(loadType == kAttachLoad);
_ASSERTE(pProfilerInfo->pProfInterface->IsCallback3Supported());
hr = pProfilerInfo->pProfInterface->InitializeForAttach(pvClientData, cbClientData);
}
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_INFO10,
"**PROF: Profiler failed its Initialize callback. hr=0x%x.\n",
hr));
// If we timed out due to waiting on concurrent GC to finish, it is very likely this is
// the reason InitializeForAttach callback failed even though we cannot be sure and we cannot
// cannot assume hr is going to be CORPROF_E_TIMEOUT_WAITING_FOR_CONCURRENT_GC.
// The best we can do in this case is to report this failure anyway.
if (pProfilerInfo->pProfInterface->HasTimedOutWaitingForConcurrentGC())
{
ProfilingAPIUtility::LogProfError(IDS_E_PROF_TIMEOUT_WAITING_FOR_CONCURRENT_GC, dwConcurrentGCWaitTimeoutInMs, wszClsid);
}
// Check for known failure types, to customize the event we log
if ((loadType == kAttachLoad) &&
((hr == CORPROF_E_PROFILER_NOT_ATTACHABLE) || (hr == E_NOTIMPL)))
{
_ASSERTE(pProfilerInfo->pProfInterface->IsCallback3Supported());
// Profiler supports ICorProfilerCallback3, but explicitly doesn't support
// Attach loading. So log specialized event
LogProfError(IDS_E_PROF_NOT_ATTACHABLE, wszClsid);
// Normalize (CORPROF_E_PROFILER_NOT_ATTACHABLE || E_NOTIMPL) down to
// CORPROF_E_PROFILER_NOT_ATTACHABLE
hr = CORPROF_E_PROFILER_NOT_ATTACHABLE;
}
else if (hr == CORPROF_E_PROFILER_CANCEL_ACTIVATION)
{
// Profiler didn't encounter a bad error, but is voluntarily choosing not to
// profile this runtime. Profilers that need to set system environment
// variables to be able to profile services may use this HRESULT to avoid
// profiling all the other managed apps on the box.
LogProfInfo(IDS_PROF_CANCEL_ACTIVATION, wszClsid);
}
else
{
LogProfError(IDS_E_PROF_INIT_CALLBACK_FAILED, wszClsid, hr);
}
// Profiler failed; reset everything. This will automatically reset
// g_profControlBlock and will unload the profiler's DLL.
TerminateProfiling(pProfilerInfo);
return hr;
}
#ifdef FEATURE_MULTICOREJIT
// Disable multicore JIT when profiling is enabled
if (pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_JIT_COMPILATION))
{
MulticoreJitManager::DisableMulticoreJit();
}
#endif
// Indicate that profiling is properly initialized. On an attach-load, this will
// force a FlushStoreBuffers(), which is important for catch-up synchronization (see
// code:#ProfCatchUpSynchronization)
pProfilerInfo->curProfStatus.Set(kProfStatusActive);
LOG((
LF_CORPROF,
LL_INFO10,
"**PROF: Profiler successfully loaded and initialized.\n"));
LogProfInfo(IDS_PROF_LOAD_COMPLETE, wszClsid);
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiler created and enabled.\n"));
if (loadType == kStartupLoad)
{
// For startup profilers only: If the profiler is interested in tracking GC
// events, then we must disable concurrent GC since concurrent GC can allocate
// and kill objects without relocating and thus not doing a heap walk.
if (pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_GC))
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Turning off concurrent GC at startup.\n"));
// Previously we would use SetGCConcurrent(0) to indicate to the GC that it shouldn't even
// attempt to use concurrent GC. The standalone GC feature create a cycle during startup,
// where the profiler couldn't set startup flags for the GC. To overcome this, we call
// TempraryDisableConcurrentGC and never enable it again. This has a perf cost, since the
// GC will create concurrent GC data structures, but it is acceptable in the context of
// this kind of profiling.
GCHeapUtilities::GetGCHeap()->TemporaryDisableConcurrentGC();
LOG((LF_CORPROF, LL_INFO10, "**PROF: Concurrent GC has been turned off at startup.\n"));
}
}
if (loadType == kAttachLoad)
{
// #ProfCatchUpSynchronization
//
// Now that callbacks are enabled (and all threads are aware), tell an attaching
// profiler that it's safe to request catchup information.
//
// There's a race we're preventing that's worthwhile to spell out. An attaching
// profiler should be able to get a COMPLETE set of data through the use of
// callbacks unioned with the use of catch-up enumeration Info functions. To
// achieve this, we must ensure that there is no "hole"--any new data the
// profiler seeks must be available from a callback or a catch-up info function
// (or both, as dupes are ok). That means that:
//
// * callbacks must be enabled on other threads NO LATER THAN the profiler begins
// requesting catch-up information on this thread
// * Abbreviate: callbacks <= catch-up.
//
// Otherwise, if catch-up < callbacks, then it would be possible to have this:
//
// * catch-up < new data arrives < callbacks.
//
// In this nightmare scenario, the new data would not be accessible from the
// catch-up calls made by the profiler (cuz the profiler made the calls too
// early) or the callbacks made into the profiler (cuz the callbacks were enabled
// too late). That's a hole, and that's bad. So we ensure callbacks <= catch-up
// by the following order of operations:
//
// * This thread:
// * a: Set (volatile) currentProfStatus = kProfStatusActive (done above) and
// event mask bits (profiler did this in Initialize() callback above,
// when it called SetEventMask)
// * b: Flush CPU buffers (done automatically when we set status to
// kProfStatusActive)
// * c: CLR->Profiler call: ProfilerAttachComplete() (below). Inside this
// call:
// * Profiler->CLR calls: Catch-up Info functions
// * Other threads:
// * a: New data (thread, JIT info, etc.) is created
// * b: This new data is now available to a catch-up Info call
// * c: currentProfStatus & event mask bits are accurately visible to thread
// in determining whether to make a callback
// * d: Read currentProfStatus & event mask bits and make callback
// (CLR->Profiler) if necessary
//
// So as long as OtherThreads.c <= ThisThread.c we're ok. This means other
// threads must be able to get a clean read of the (volatile) currentProfStatus &
// event mask bits BEFORE this thread calls ProfilerAttachComplete(). Use of the
// "volatile" keyword ensures that compiler optimizations and (w/ VC2005+
// compilers) the CPU's instruction reordering optimizations at runtime are
// disabled enough such that they do not hinder the order above. Use of
// FlushStoreBuffers() ensures that multiple caches on multiple CPUs do not
// hinder the order above (by causing other threads to get stale reads of the
// volatiles).
//
// For more information about catch-up enumerations and exactly which entities,
// and which stage of loading, are permitted to appear in the enumerations, see
// code:ProfilerFunctionEnum::Init#ProfilerEnumGeneral
{
// This EvacuationCounterHolder is just to make asserts in EEToProfInterfaceImpl happy.
// Using it like this without the dirty read/evac counter increment/clean read pattern
// is not safe generally, but in this specific case we can skip all that since we haven't
// published it yet, so we are the only thread that can access it.
EvacuationCounterHolder holder(pProfilerInfo);
pProfilerInfo->pProfInterface->ProfilerAttachComplete();
}
}
return S_OK;
}
//---------------------------------------------------------------------------------------
//
// Performs the evacuation checks by grabbing the thread store lock, iterating through
// all EE Threads, and querying each one's evacuation counter. If they're all 0, the
// profiler is ready to be unloaded.
//
// Return Value:
// Nonzero iff the profiler is fully evacuated and ready to be unloaded.
//
// static
BOOL ProfilingAPIUtility::IsProfilerEvacuated(ProfilerInfo *pProfilerInfo)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
_ASSERTE(pProfilerInfo->curProfStatus.Get() == kProfStatusDetaching);
// Note that threads are still in motion as we check its evacuation counter.
// This is ok, because we've already changed the profiler status to
// kProfStatusDetaching and flushed CPU buffers. So at this point the counter
// will typically only go down to 0 (and not increment anymore), with one
// small exception (below). So if we get a read of 0 below, the counter will
// typically stay there. Specifically:
// * Profiler is most likely not about to increment its evacuation counter
// from 0 to 1 because pThread sees that the status is
// kProfStatusDetaching.
// * Note that there is a small race where pThread might actually
// increment its evac counter from 0 to 1 (if it dirty-read the
// profiler status a tad too early), but that implies that when
// pThread rechecks the profiler status (clean read) then pThread
// will immediately decrement the evac counter back to 0 and avoid
// calling into the EEToProfInterfaceImpl pointer.
//
// (see
// code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization
// for details)Doing this under the thread store lock not only ensures we can
// iterate through the Thread objects safely, but also forces us to serialize with
// the GC. The latter is important, as server GC enters the profiler on non-EE
// Threads, and so no evacuation counters might be incremented during server GC even
// though control could be entering the profiler.
{
ThreadStoreLockHolder TSLockHolder;
Thread * pThread = ThreadStore::GetAllThreadList(
NULL, // cursor thread; always NULL to begin with
0, // mask to AND with Thread::m_State to filter returned threads
0); // bits to match the result of the above AND. (m_State & 0 == 0,
// so we won't filter out any threads)
// Note that, by not filtering out any of the threads, we're intentionally including
// stuff like TS_Dead or TS_Unstarted. But that keeps us on the safe
// side. If an EE Thread object exists, we want to check its counters to be
// absolutely certain it isn't executing in a profiler.
while (pThread != NULL)
{
// Note that pThread is still in motion as we check its evacuation counter.
// This is ok, because we've already changed the profiler status to
// kProfStatusDetaching and flushed CPU buffers. So at this point the counter
// will typically only go down to 0 (and not increment anymore), with one
// small exception (below). So if we get a read of 0 below, the counter will
// typically stay there. Specifically:
// * pThread is most likely not about to increment its evacuation counter
// from 0 to 1 because pThread sees that the status is
// kProfStatusDetaching.
// * Note that there is a small race where pThread might actually
// increment its evac counter from 0 to 1 (if it dirty-read the
// profiler status a tad too early), but that implies that when
// pThread rechecks the profiler status (clean read) then pThread
// will immediately decrement the evac counter back to 0 and avoid
// calling into the EEToProfInterfaceImpl pointer.
//
// (see
// code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization
// for details)
DWORD dwEvacCounter = pThread->GetProfilerEvacuationCounter(pProfilerInfo->slot);
if (dwEvacCounter != 0)
{
LOG((
LF_CORPROF,
LL_INFO100,
"**PROF: Profiler not yet evacuated because OS Thread ID 0x%x has evac counter of %d (decimal).\n",
pThread->GetOSThreadId(),
dwEvacCounter));
return FALSE;
}
pThread = ThreadStore::GetAllThreadList(pThread, 0, 0);
}
}
// FUTURE: When rejit feature crew complete, add code to verify all rejitted
// functions are fully reverted and off of all stacks. If this is very easy to
// verify (e.g., checking a single value), consider putting it above the loop
// above so we can early-out quicker if rejitted code is still around.
// We got this far without returning, so the profiler is fully evacuated
return TRUE;
}
//---------------------------------------------------------------------------------------
//
// This is the top-most level of profiling API teardown, and is called directly by
// EEShutDownHelper() (in ceemain.cpp). This cleans up internal structures relating to
// the Profiling API. If we're not in process teardown, then this also releases the
// profiler COM object and frees the profiler DLL
//
// static
void ProfilingAPIUtility::TerminateProfiling(ProfilerInfo *pProfilerInfo)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
if (IsAtProcessExit())
{
// We're tearing down the process so don't bother trying to clean everything up.
// There's no reliable way to verify other threads won't be trying to re-enter
// the profiler anyway, so cleaning up here could cause AVs.
return;
}
_ASSERTE(s_csStatus != NULL);
{
// We're modifying status and possibly unloading the profiler DLL below, so
// serialize this code with any other loading / unloading / detaching code.
CRITSEC_Holder csh(s_csStatus);
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
if (pProfilerInfo->curProfStatus.Get() == kProfStatusDetaching && pProfilerInfo->pProfInterface.Load() != NULL)
{
// The profiler is still being referenced by
// ProfilingAPIDetach::s_profilerDetachInfo, so don't try to release and
// unload it. This can happen if Shutdown and Detach race, and Shutdown wins.
// For example, we could be called as part of Shutdown, but the profiler
// called RequestProfilerDetach near shutdown time as well (or even earlier
// but remains un-evacuated as shutdown begins). Whatever the cause, just
// don't unload the profiler here (as part of shutdown), and let the Detach
// Thread deal with it (if it gets the chance).
//
// Note: Since this check occurs inside s_csStatus, we don't have to worry
// that ProfilingAPIDetach::GetEEToProfPtr() will suddenly change during the
// code below.
//
// FUTURE: For reattach-with-neutered-profilers feature crew, change the
// above to scan through list of detaching profilers to make sure none of
// them give a GetEEToProfPtr() equal to g_profControlBlock.pProfInterface.
return;
}
#endif // FEATURE_PROFAPI_ATTACH_DETACH
if (pProfilerInfo->curProfStatus.Get() == kProfStatusActive)
{
pProfilerInfo->curProfStatus.Set(kProfStatusDetaching);
// Profiler was active when TerminateProfiling() was called, so we're unloading
// it due to shutdown. But other threads may still be trying to enter profiler
// callbacks (e.g., ClassUnloadStarted() can get called during shutdown). Now
// that the status has been changed to kProfStatusDetaching, no new threads will
// attempt to enter the profiler. But use the detach evacuation counters to see
// if other threads already began to enter the profiler.
if (!ProfilingAPIUtility::IsProfilerEvacuated(pProfilerInfo))
{
// Other threads might be entering the profiler, so just skip cleanup
return;
}
}
// If we have a profiler callback wrapper and / or info implementation
// active, then terminate them.
if (pProfilerInfo->pProfInterface.Load() != NULL)
{
// This destructor takes care of releasing the profiler's ICorProfilerCallback*
// interface, and unloading the DLL when we're not in process teardown.
delete pProfilerInfo->pProfInterface;
pProfilerInfo->pProfInterface.Store(NULL);
}
// NOTE: Intentionally not destroying / NULLing s_csStatus. If
// s_csStatus is already initialized, we can reuse it each time we do another
// attach / detach, so no need to destroy it.
// Attach/Load/Detach are all synchronized with the Status Crst, don't need to worry about races
// If we disabled concurrent GC and somehow failed later during the initialization
if (g_profControlBlock.fConcurrentGCDisabledForAttach.Load() && g_profControlBlock.IsMainProfiler(pProfilerInfo->pProfInterface))
{
g_profControlBlock.fConcurrentGCDisabledForAttach = FALSE;
// We know for sure GC has been fully initialized as we've turned off concurrent GC before
_ASSERTE(IsGarbageCollectorFullyInitialized());
GCHeapUtilities::GetGCHeap()->TemporaryEnableConcurrentGC();
}
// #ProfileResetSessionStatus Reset all the status variables that are for the current
// profiling attach session.
// When you are adding new status in g_profControlBlock, you need to think about whether
// your new status is per-session, or consistent across sessions
pProfilerInfo->ResetPerSessionStatus();
pProfilerInfo->curProfStatus.Set(kProfStatusNone);
g_profControlBlock.DeRegisterProfilerInfo(pProfilerInfo);
g_profControlBlock.UpdateGlobalEventMask();
}
}
#endif // PROFILING_SUPPORTED
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// ProfilingHelper.cpp
//
//
// Implementation of helper classes used for miscellaneous purposes within the profiling
// API
//
// ======================================================================================
//
// #LoadUnloadCallbackSynchronization
//
// There is synchronization around loading profilers, unloading profilers, and issuing
// callbacks to profilers, to ensure that we know when it's safe to detach profilers or
// to call into profilers. The synchronization scheme is intentionally lockless on the
// mainline path (issuing callbacks into the profiler), with heavy locking on the
// non-mainline path (loading / unloading profilers).
//
// PROTECTED DATA
//
// The synchronization protects the following data:
//
// * ProfilingAPIDetach::s_profilerDetachInfo
// * (volatile) g_profControlBlock.curProfStatus.m_profStatus
// * (volatile) g_profControlBlock.pProfInterface
// * latter implies the profiler DLL's load status is protected as well, as
// pProfInterface changes between non-NULL and NULL as a profiler DLL is
// loaded and unloaded, respectively.
//
// SYNCHRONIZATION COMPONENTS
//
// * Simple Crst: code:ProfilingAPIUtility::s_csStatus
// * Lockless, volatile per-thread counters: code:EvacuationCounterHolder
// * Profiler status transition invariants and CPU buffer flushing:
// code:CurrentProfilerStatus::Set
//
// WRITERS
//
// The above data is considered to be "written to" when a profiler is loaded or unloaded,
// or the status changes (see code:ProfilerStatus), or a request to detach the profiler
// is received (see code:ProfilingAPIDetach::RequestProfilerDetach), or the DetachThread
// consumes or modifies the contents of code:ProfilingAPIDetach::s_profilerDetachInfo.
// All these cases are serialized with each other by the simple Crst:
// code:ProfilingAPIUtility::s_csStatus
//
// READERS
//
// Readers are the mainline case and are lockless. A "reader" is anyone who wants to
// issue a profiler callback. Readers are scattered throughout the runtime, and have the
// following format:
// {
// BEGIN_PROFILER_CALLBACK(CORProfilerTrackAppDomainLoads());
// g_profControlBlock.pProfInterface->AppDomainCreationStarted(MyAppDomainID);
// END_PROFILER_CALLBACK();
// }
// The BEGIN / END macros do the following:
// * Evaluate the expression argument (e.g., CORProfilerTrackAppDomainLoads()). This is a
// "dirty read" as the profiler could be detached at any moment during or after that
// evaluation.
// * If true, push a code:EvacuationCounterHolder on the stack, which increments the
// per-thread evacuation counter (not interlocked).
// * Re-evaluate the expression argument. This time, it's a "clean read" (see below for
// why).
// * If still true, execute the statements inside the BEGIN/END block. Inside that block,
// the profiler is guaranteed to remain loaded, because the evacuation counter
// remains nonzero (again, see below).
// * Once the BEGIN/END block is exited, the evacuation counter is decremented, and the
// profiler is unpinned and allowed to detach.
//
// READER / WRITER COORDINATION
//
// The above ensures that a reader never touches g_profControlBlock.pProfInterface and
// all it embodies (including the profiler DLL code and callback implementations) unless
// the reader was able to increment its thread's evacuation counter AND re-verify that
// the profiler's status is still active (the status check is included in the macro's
// expression argument, such as CORProfilerTrackAppDomainLoads()).
//
// At the same time, a profiler DLL is never unloaded (nor
// g_profControlBlock.pProfInterface deleted and NULLed out) UNLESS the writer performs
// these actions:
// * (a) Set the profiler's status to a non-active state like kProfStatusDetaching or
// kProfStatusNone
// * (b) Call FlushProcessWriteBuffers()
// * (c) Grab thread store lock, iterate through all threads, and verify each per-thread
// evacuation counter is zero.
//
// The above steps are why it's considered a "clean read" if a reader first increments
// its evacuation counter and then checks the profiler status. Once the writer flushes
// the CPU buffers (b), the reader will see the updated status (from a) and know not to
// use g_profControlBlock.pProfInterface. And if the reader clean-reads the status before
// the buffers were flushed, then the reader will have incremented its evacuation counter
// first, which the writer will be sure to see in (c). For more details about how the
// evacuation counters work, see code:ProfilingAPIUtility::IsProfilerEvacuated.
//
#include "common.h"
#ifdef PROFILING_SUPPORTED
#include "eeprofinterfaces.h"
#include "eetoprofinterfaceimpl.h"
#include "eetoprofinterfaceimpl.inl"
#include "corprof.h"
#include "proftoeeinterfaceimpl.h"
#include "proftoeeinterfaceimpl.inl"
#include "profilinghelper.h"
#include "profilinghelper.inl"
#include "eemessagebox.h"
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
#include "profdetach.h"
#endif // FEATURE_PROFAPI_ATTACH_DETACH
#include "utilcode.h"
#ifndef TARGET_UNIX
#include "securitywrapper.h"
#endif // !TARGET_UNIX
// ----------------------------------------------------------------------------
// CurrentProfilerStatus methods
//---------------------------------------------------------------------------------------
//
// Updates the value indicating the profiler's current status
//
// Arguments:
// profStatus - New value (from enum ProfilerStatus) to set.
//
// Notes:
// Sets the status under a lock, and performs a debug-only check to verify that the
// status transition is a legal one. Also performs a FlushStoreBuffers() after
// changing the status when necessary.
//
void CurrentProfilerStatus::Set(ProfilerStatus newProfStatus)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
_ASSERTE(ProfilingAPIUtility::GetStatusCrst() != NULL);
{
// Need to serialize attempts to transition the profiler status. For example, a
// profiler in one thread could request a detach, while the CLR in another
// thread is transitioning the profiler from kProfStatusInitializing* to
// kProfStatusActive
CRITSEC_Holder csh(ProfilingAPIUtility::GetStatusCrst());
// Based on what the old status is, verify the new status is a legal transition.
switch(m_profStatus)
{
default:
_ASSERTE(!"Unknown ProfilerStatus");
break;
case kProfStatusNone:
_ASSERTE((newProfStatus == kProfStatusPreInitialize) ||
(newProfStatus == kProfStatusInitializingForStartupLoad) ||
(newProfStatus == kProfStatusInitializingForAttachLoad));
break;
case kProfStatusDetaching:
_ASSERTE(newProfStatus == kProfStatusNone);
break;
case kProfStatusInitializingForStartupLoad:
case kProfStatusInitializingForAttachLoad:
_ASSERTE((newProfStatus == kProfStatusActive) ||
(newProfStatus == kProfStatusNone));
break;
case kProfStatusActive:
_ASSERTE((newProfStatus == kProfStatusNone) ||
(newProfStatus == kProfStatusDetaching));
break;
case kProfStatusPreInitialize:
_ASSERTE((newProfStatus == kProfStatusNone) ||
(newProfStatus == kProfStatusInitializingForStartupLoad) ||
(newProfStatus == kProfStatusInitializingForAttachLoad));
break;
}
m_profStatus = newProfStatus;
}
#if !defined(DACCESS_COMPILE)
if (((newProfStatus == kProfStatusNone) ||
(newProfStatus == kProfStatusDetaching) ||
(newProfStatus == kProfStatusActive)))
{
// Flush the store buffers on all CPUs, to ensure other threads see that
// g_profControlBlock.curProfStatus has changed. The important status changes to
// flush are:
// * to kProfStatusNone or kProfStatusDetaching so other threads know to stop
// making calls into the profiler
// * to kProfStatusActive, to ensure callbacks can be issued by the time an
// attaching profiler receives ProfilerAttachComplete(), so the profiler
// can safely perform catchup at that time (see
// code:#ProfCatchUpSynchronization).
//
::FlushProcessWriteBuffers();
}
#endif // !defined(DACCESS_COMPILE)
}
//---------------------------------------------------------------------------------------
// ProfilingAPIUtility members
// See code:#LoadUnloadCallbackSynchronization.
CRITSEC_COOKIE ProfilingAPIUtility::s_csStatus = NULL;
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::AppendSupplementaryInformation
//
// Description:
// Helper to the event logging functions to append the process ID and string
// resource ID to the end of the message.
//
// Arguments:
// * iStringResource - [in] String resource ID to append to message.
// * pString - [in/out] On input, the string to log so far. On output, the original
// string with the process ID info appended.
//
// static
void ProfilingAPIUtility::AppendSupplementaryInformation(int iStringResource, SString * pString)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
// This loads resource strings, which takes locks.
CAN_TAKE_LOCK;
}
CONTRACTL_END;
StackSString supplementaryInformation;
if (!supplementaryInformation.LoadResource(
CCompRC::Debugging,
IDS_PROF_SUPPLEMENTARY_INFO
))
{
// Resource not found; should never happen.
return;
}
pString->Append(W(" "));
pString->AppendPrintf(
supplementaryInformation,
GetCurrentProcessId(),
iStringResource);
}
//---------------------------------------------------------------------------------------
//
// Helper function to log publicly-viewable errors about profiler loading and
// initialization.
//
//
// Arguments:
// * iStringResourceID - resource ID of string containing message to log
// * wEventType - same constant used in win32 to specify the type of event:
// usually EVENTLOG_ERROR_TYPE, EVENTLOG_WARNING_TYPE, or
// EVENTLOG_INFORMATION_TYPE
// * insertionArgs - 0 or more values to be inserted into the string to be logged
// (>0 only if iStringResourceID contains format arguments (%)).
//
// static
void ProfilingAPIUtility::LogProfEventVA(
int iStringResourceID,
WORD wEventType,
va_list insertionArgs)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
// This loads resource strings, which takes locks.
CAN_TAKE_LOCK;
}
CONTRACTL_END;
StackSString messageFromResource;
StackSString messageToLog;
if (!messageFromResource.LoadResource(
CCompRC::Debugging,
iStringResourceID
))
{
// Resource not found; should never happen.
return;
}
messageToLog.VPrintf(messageFromResource, insertionArgs);
AppendSupplementaryInformation(iStringResourceID, &messageToLog);
// Write to ETW and EventPipe with the message
FireEtwProfilerMessage(GetClrInstanceId(), messageToLog.GetUnicode());
// Ouput debug strings for diagnostic messages.
WszOutputDebugString(messageToLog);
}
// See code:ProfilingAPIUtility.LogProfEventVA for description of arguments.
// static
void ProfilingAPIUtility::LogProfError(int iStringResourceID, ...)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
// This loads resource strings, which takes locks.
CAN_TAKE_LOCK;
}
CONTRACTL_END;
va_list insertionArgs;
va_start(insertionArgs, iStringResourceID);
LogProfEventVA(
iStringResourceID,
EVENTLOG_ERROR_TYPE,
insertionArgs);
va_end(insertionArgs);
}
// See code:ProfilingAPIUtility.LogProfEventVA for description of arguments.
// static
void ProfilingAPIUtility::LogProfInfo(int iStringResourceID, ...)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
// This loads resource strings, which takes locks.
CAN_TAKE_LOCK;
}
CONTRACTL_END;
va_list insertionArgs;
va_start(insertionArgs, iStringResourceID);
LogProfEventVA(
iStringResourceID,
EVENTLOG_INFORMATION_TYPE,
insertionArgs);
va_end(insertionArgs);
}
#ifdef PROF_TEST_ONLY_FORCE_ELT
// Special forward-declarations of the profiling API's slow-path enter/leave/tailcall
// hooks. These need to be forward-declared here so that they may be referenced in
// InitializeProfiling() below solely for the debug-only, test-only code to allow
// enter/leave/tailcall to be turned on at startup without a profiler. See
// code:ProfControlBlock#TestOnlyELT
EXTERN_C void STDMETHODCALLTYPE ProfileEnterNaked(UINT_PTR clientData);
EXTERN_C void STDMETHODCALLTYPE ProfileLeaveNaked(UINT_PTR clientData);
EXTERN_C void STDMETHODCALLTYPE ProfileTailcallNaked(UINT_PTR clientData);
#endif //PROF_TEST_ONLY_FORCE_ELT
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::InitializeProfiling
//
// This is the top-most level of profiling API initialization, and is called directly by
// EEStartupHelper() (in ceemain.cpp). This initializes internal structures relating to the
// Profiling API. This also orchestrates loading the profiler and initializing it (if
// its GUID is specified in the environment).
//
// Return Value:
// HRESULT indicating success or failure. This is generally very lenient about internal
// failures, as we don't want them to prevent the startup of the app:
// S_OK = Environment didn't request a profiler, or
// Environment did request a profiler, and it was loaded successfully
// S_FALSE = There was a problem loading the profiler, but that shouldn't prevent the app
// from starting up
// else (failure) = There was a serious problem that should be dealt with by the caller
//
// Notes:
// This function (or one of its callees) will log an error to the event log
// if there is a failure
//
// Assumptions:
// InitializeProfiling is called during startup, AFTER the host has initialized its
// settings and the config variables have been read, but BEFORE the finalizer thread
// has entered its first wait state. ASSERTs are placed in
// code:ProfilingAPIAttachDetach::Initialize (which is called by this function, and
// which depends on these assumptions) to verify.
// static
HRESULT ProfilingAPIUtility::InitializeProfiling()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
InitializeLogging();
// NULL out / initialize members of the global profapi structure
g_profControlBlock.Init();
AttemptLoadProfilerForStartup();
AttemptLoadDelayedStartupProfilers();
AttemptLoadProfilerList();
// For now, the return value from AttemptLoadProfilerForStartup is of no use to us.
// Any event has been logged already by AttemptLoadProfilerForStartup, and
// regardless of whether a profiler got loaded, we still need to continue.
#ifdef PROF_TEST_ONLY_FORCE_ELT
// Test-only, debug-only code to enable ELT on startup regardless of whether a
// startup profiler is loaded. See code:ProfControlBlock#TestOnlyELT.
DWORD dwEnableSlowELTHooks = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyEnableSlowELTHooks);
if (dwEnableSlowELTHooks != 0)
{
(&g_profControlBlock)->fTestOnlyForceEnterLeave = TRUE;
SetJitHelperFunction(CORINFO_HELP_PROF_FCN_ENTER, (void *) ProfileEnterNaked);
SetJitHelperFunction(CORINFO_HELP_PROF_FCN_LEAVE, (void *) ProfileLeaveNaked);
SetJitHelperFunction(CORINFO_HELP_PROF_FCN_TAILCALL, (void *) ProfileTailcallNaked);
LOG((LF_CORPROF, LL_INFO10, "**PROF: Enabled test-only slow ELT hooks.\n"));
}
#endif //PROF_TEST_ONLY_FORCE_ELT
#ifdef PROF_TEST_ONLY_FORCE_OBJECT_ALLOCATED
// Test-only, debug-only code to enable ObjectAllocated callbacks on startup regardless of whether a
// startup profiler is loaded. See code:ProfControlBlock#TestOnlyObjectAllocated.
DWORD dwEnableObjectAllocated = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyEnableObjectAllocatedHook);
if (dwEnableObjectAllocated != 0)
{
(&g_profControlBlock)->fTestOnlyForceObjectAllocated = TRUE;
LOG((LF_CORPROF, LL_INFO10, "**PROF: Enabled test-only object ObjectAllocated hooks.\n"));
}
#endif //PROF_TEST_ONLY_FORCE_ELT
#ifdef _DEBUG
// Test-only, debug-only code to allow attaching profilers to call ICorProfilerInfo inteface,
// which would otherwise be disallowed for attaching profilers
DWORD dwTestOnlyEnableICorProfilerInfo = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyEnableICorProfilerInfo);
if (dwTestOnlyEnableICorProfilerInfo != 0)
{
(&g_profControlBlock)->fTestOnlyEnableICorProfilerInfo = TRUE;
}
#endif // _DEBUG
return S_OK;
}
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::ProfilerCLSIDFromString
//
// Description:
// Takes a string form of a CLSID (or progid, believe it or not), and returns the
// corresponding CLSID structure.
//
// Arguments:
// * wszClsid - [in / out] CLSID string to convert. This may also be a progid. This
// ensures our behavior is backward-compatible with previous CLR versions. I don't
// know why previous versions allowed the user to set a progid in the environment,
// but well whatever. On [out], this string is normalized in-place (e.g.,
// double-quotes around progid are removed).
// * pClsid - [out] CLSID structure corresponding to wszClsid
//
// Return Value:
// HRESULT indicating success or failure.
//
// Notes:
// * An event is logged if there is a failure.
//
// static
HRESULT ProfilingAPIUtility::ProfilerCLSIDFromString(
__inout_z LPWSTR wszClsid,
CLSID * pClsid)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
_ASSERTE(wszClsid != NULL);
_ASSERTE(pClsid != NULL);
HRESULT hr;
// Translate the string into a CLSID
if (*wszClsid == W('{'))
{
hr = IIDFromString(wszClsid, pClsid);
}
else
{
#ifndef TARGET_UNIX
WCHAR *szFrom, *szTo;
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:26000) // "espX thinks there is an overflow here, but there isn't any"
#endif
for (szFrom=szTo=wszClsid; *szFrom; )
{
if (*szFrom == W('"'))
{
++szFrom;
continue;
}
*szTo++ = *szFrom++;
}
*szTo = 0;
hr = CLSIDFromProgID(wszClsid, pClsid);
#ifdef _PREFAST_
#pragma warning(pop)
#endif /*_PREFAST_*/
#else // !TARGET_UNIX
// ProgID not supported on TARGET_UNIX
hr = E_INVALIDARG;
#endif // !TARGET_UNIX
}
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_INFO10,
"**PROF: Invalid CLSID or ProgID (%S). hr=0x%x.\n",
wszClsid,
hr));
ProfilingAPIUtility::LogProfError(IDS_E_PROF_BAD_CLSID, wszClsid, hr);
return hr;
}
return S_OK;
}
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::AttemptLoadProfilerForStartup
//
// Description:
// Checks environment or registry to see if the app is configured to run with a
// profiler loaded on startup. If so, this calls LoadProfiler() to load it up.
//
// Arguments:
//
// Return Value:
// * S_OK: Startup-profiler has been loaded
// * S_FALSE: No profiler is configured for startup load
// * else, HRESULT indicating failure that occurred
//
// Assumptions:
// * This should be called on startup, after g_profControlBlock is initialized, but
// before any attach infrastructure is initialized. This ensures we don't receive
// an attach request while startup-loading a profiler.
//
// Notes:
// * This or its callees will ensure an event is logged on failure (though will be
// silent if no profiler is configured for startup load (which causes S_FALSE to
// be returned)
//
// static
HRESULT ProfilingAPIUtility::AttemptLoadProfilerForStartup()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
HRESULT hr;
// Find out if profiling is enabled
DWORD fProfEnabled = 0;
fProfEnabled = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_ENABLE_PROFILING);
NewArrayHolder<WCHAR> wszClsid(NULL);
NewArrayHolder<WCHAR> wszProfilerDLL(NULL);
CLSID *pClsid;
CLSID clsid;
if (fProfEnabled == 0)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling not enabled.\n"));
return S_FALSE;
}
LOG((LF_CORPROF, LL_INFO10, "**PROF: Initializing Profiling Services.\n"));
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER, &wszClsid));
#if defined(TARGET_ARM64)
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_ARM64, &wszProfilerDLL));
#elif defined(TARGET_ARM)
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_ARM32, &wszProfilerDLL));
#endif
if(wszProfilerDLL == NULL)
{
#ifdef TARGET_64BIT
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_64, &wszProfilerDLL));
#else
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_32, &wszProfilerDLL));
#endif
if(wszProfilerDLL == NULL)
{
IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH, &wszProfilerDLL));
}
}
// If the environment variable doesn't exist, profiling is not enabled.
if (wszClsid == NULL)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling flag set, but required "
"environment variable does not exist.\n"));
LogProfError(IDS_E_PROF_NO_CLSID);
return S_FALSE;
}
if ((wszProfilerDLL != NULL) && (wcslen(wszProfilerDLL) >= MAX_LONGPATH))
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling flag set, but COR_PROFILER_PATH was not set properly.\n"));
LogProfError(IDS_E_PROF_BAD_PATH);
return S_FALSE;
}
#ifdef TARGET_UNIX
// If the environment variable doesn't exist, profiling is not enabled.
if (wszProfilerDLL == NULL)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling flag set, but required "
"environment variable does not exist.\n"));
LogProfError(IDS_E_PROF_BAD_PATH);
return S_FALSE;
}
#endif // TARGET_UNIX
hr = ProfilingAPIUtility::ProfilerCLSIDFromString(wszClsid, &clsid);
if (FAILED(hr))
{
// ProfilerCLSIDFromString already logged an event if there was a failure
return hr;
}
pClsid = &clsid;
hr = LoadProfiler(
kStartupLoad,
pClsid,
wszClsid,
wszProfilerDLL,
NULL, // No client data for startup load
0); // No client data for startup load
if (FAILED(hr))
{
// A failure in either the CLR or the profiler prevented it from
// loading. Event has been logged. Propagate hr
return hr;
}
return S_OK;
}
//static
HRESULT ProfilingAPIUtility::AttemptLoadDelayedStartupProfilers()
{
if (g_profControlBlock.storedProfilers.IsEmpty())
{
return S_OK;
}
HRESULT storedHr = S_OK;
STOREDPROFILERLIST *profilers = &g_profControlBlock.storedProfilers;
for (StoredProfilerNode* item = profilers->GetHead(); item != NULL; item = STOREDPROFILERLIST::GetNext(item))
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiler loading from GUID/Path stored from the IPC channel."));
CLSID *pClsid = &(item->guid);
// Convert to string for logging
constexpr size_t guidStringSize = 39;
NewArrayHolder<WCHAR> wszClsid(new (nothrow) WCHAR[guidStringSize]);
// GUIDs should always be the same number of characters...
_ASSERTE(wszClsid != NULL);
if (wszClsid != NULL)
{
StringFromGUID2(*pClsid, wszClsid, guidStringSize);
}
HRESULT hr = LoadProfiler(
kStartupLoad,
pClsid,
wszClsid,
item->path.GetUnicode(),
NULL, // No client data for startup load
0); // No client data for startup load
if (FAILED(hr))
{
// LoadProfiler logs if there is an error
storedHr = hr;
}
}
return storedHr;
}
// static
HRESULT ProfilingAPIUtility::AttemptLoadProfilerList()
{
HRESULT hr = S_OK;
DWORD dwEnabled = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_ENABLE_NOTIFICATION_PROFILERS);
if (dwEnabled == 0)
{
// Profiler list explicitly disabled, bail
LogProfInfo(IDS_E_PROF_NOTIFICATION_DISABLED);
return S_OK;
}
NewArrayHolder<WCHAR> wszProfilerList(NULL);
#if defined(TARGET_ARM64)
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_ARM64, &wszProfilerList);
#elif defined(TARGET_ARM)
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_ARM32, &wszProfilerList);
#endif
if (wszProfilerList == NULL)
{
#ifdef TARGET_64BIT
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_64, &wszProfilerList);
#else
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS_32, &wszProfilerList);
#endif
if (wszProfilerList == NULL)
{
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_NOTIFICATION_PROFILERS, &wszProfilerList);
if (wszProfilerList == NULL)
{
// No profiler list specified, bail
return S_OK;
}
}
}
WCHAR *pOuter = NULL;
WCHAR *pInner = NULL;
WCHAR *currentSection = NULL;
WCHAR *currentPath = NULL;
WCHAR *currentGuid = NULL;
HRESULT storedHr = S_OK;
// Get each semicolon delimited config
currentSection = wcstok_s(wszProfilerList, W(";"), &pOuter);
while (currentSection != NULL)
{
// Parse this config "path={guid}"
currentPath = wcstok_s(currentSection, W("="), &pInner);
currentGuid = wcstok_s(NULL, W("="), &pInner);
CLSID clsid;
hr = ProfilingAPIUtility::ProfilerCLSIDFromString(currentGuid, &clsid);
if (FAILED(hr))
{
// ProfilerCLSIDFromString already logged an event if there was a failure
storedHr = hr;
goto NextSection;
}
hr = LoadProfiler(
kStartupLoad,
&clsid,
currentGuid,
currentPath,
NULL, // No client data for startup load
0); // No client data for startup load
if (FAILED(hr))
{
// LoadProfiler already logged if there was an error
storedHr = hr;
goto NextSection;
}
NextSection:
// Get next config
currentSection = wcstok_s(NULL, W(";"), &pOuter);
}
return storedHr;
}
//---------------------------------------------------------------------------------------
//
// Performs lazy initialization that need not occur on startup, but does need to occur
// before trying to load a profiler.
//
// Return Value:
// HRESULT indicating success or failure.
//
HRESULT ProfilingAPIUtility::PerformDeferredInit()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
CAN_TAKE_LOCK;
MODE_ANY;
}
CONTRACTL_END;
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
// Initialize internal resources for detaching
HRESULT hr = ProfilingAPIDetach::Initialize();
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_ERROR,
"**PROF: Unable to initialize resources for detaching. hr=0x%x.\n",
hr));
return hr;
}
#endif // FEATURE_PROFAPI_ATTACH_DETACH
if (s_csStatus == NULL)
{
s_csStatus = ClrCreateCriticalSection(
CrstProfilingAPIStatus,
(CrstFlags) (CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
if (s_csStatus == NULL)
{
return E_OUTOFMEMORY;
}
}
return S_OK;
}
// static
HRESULT ProfilingAPIUtility::DoPreInitialization(
EEToProfInterfaceImpl *pEEProf,
const CLSID *pClsid,
LPCWSTR wszClsid,
LPCWSTR wszProfilerDLL,
LoadType loadType,
DWORD dwConcurrentGCWaitTimeoutInMs)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_ANY;
PRECONDITION(pEEProf != NULL);
PRECONDITION(pClsid != NULL);
PRECONDITION(wszClsid != NULL);
}
CONTRACTL_END;
_ASSERTE(s_csStatus != NULL);
ProfilerCompatibilityFlag profilerCompatibilityFlag = kDisableV2Profiler;
NewArrayHolder<WCHAR> wszProfilerCompatibilitySetting(NULL);
if (loadType == kStartupLoad)
{
CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ProfAPI_ProfilerCompatibilitySetting, &wszProfilerCompatibilitySetting);
if (wszProfilerCompatibilitySetting != NULL)
{
if (SString::_wcsicmp(wszProfilerCompatibilitySetting, W("EnableV2Profiler")) == 0)
{
profilerCompatibilityFlag = kEnableV2Profiler;
}
else if (SString::_wcsicmp(wszProfilerCompatibilitySetting, W("PreventLoad")) == 0)
{
profilerCompatibilityFlag = kPreventLoad;
}
}
if (profilerCompatibilityFlag == kPreventLoad)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: COMPlus_ProfAPI_ProfilerCompatibilitySetting is set to PreventLoad. "
"Profiler will not be loaded.\n"));
LogProfInfo(IDS_PROF_PROFILER_DISABLED,
CLRConfig::EXTERNAL_ProfAPI_ProfilerCompatibilitySetting.name,
wszProfilerCompatibilitySetting.GetValue(),
wszClsid);
return S_OK;
}
}
HRESULT hr = S_OK;
NewHolder<ProfToEEInterfaceImpl> pProfEE(new (nothrow) ProfToEEInterfaceImpl());
if (pProfEE == NULL)
{
LOG((LF_CORPROF, LL_ERROR, "**PROF: Unable to allocate ProfToEEInterfaceImpl.\n"));
LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, E_OUTOFMEMORY);
return E_OUTOFMEMORY;
}
// Initialize the interface
hr = pProfEE->Init();
if (FAILED(hr))
{
LOG((LF_CORPROF, LL_ERROR, "**PROF: ProfToEEInterface::Init failed.\n"));
LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, hr);
return hr;
}
// Provide the newly created and inited interface
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling code being provided with EE interface.\n"));
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
// We're about to load the profiler, so first make sure we successfully create the
// DetachThread and abort the load of the profiler if we can't. This ensures we don't
// load a profiler unless we're prepared to detach it later.
hr = ProfilingAPIDetach::CreateDetachThread();
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_ERROR,
"**PROF: Unable to create DetachThread. hr=0x%x.\n",
hr));
ProfilingAPIUtility::LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, hr);
return hr;
}
#endif // FEATURE_PROFAPI_ATTACH_DETACH
// Initialize internal state of our EEToProfInterfaceImpl. This also loads the
// profiler itself, but does not yet call its Initalize() callback
hr = pEEProf->Init(pProfEE, pClsid, wszClsid, wszProfilerDLL, (loadType == kAttachLoad), dwConcurrentGCWaitTimeoutInMs);
if (FAILED(hr))
{
LOG((LF_CORPROF, LL_ERROR, "**PROF: EEToProfInterfaceImpl::Init failed.\n"));
// EEToProfInterfaceImpl::Init logs an event log error on failure
return hr;
}
// EEToProfInterfaceImpl::Init takes over the ownership of pProfEE when Init succeeds, and
// EEToProfInterfaceImpl::~EEToProfInterfaceImpl is responsible for releasing the resource pointed
// by pProfEE. Calling SuppressRelease here is necessary to avoid double release that
// the resource pointed by pProfEE are released by both pProfEE and pEEProf's destructor.
pProfEE.SuppressRelease();
pProfEE = NULL;
if (loadType == kAttachLoad) // V4 profiler from attach
{
// Profiler must support ICorProfilerCallback3 to be attachable
if (!pEEProf->IsCallback3Supported())
{
LogProfError(IDS_E_PROF_NOT_ATTACHABLE, wszClsid);
return CORPROF_E_PROFILER_NOT_ATTACHABLE;
}
}
else if (!pEEProf->IsCallback3Supported()) // V2 profiler from startup
{
if (profilerCompatibilityFlag == kDisableV2Profiler)
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: COMPlus_ProfAPI_ProfilerCompatibilitySetting is set to DisableV2Profiler (the default). "
"V2 profilers are not allowed, so that the configured V2 profiler is going to be unloaded.\n"));
LogProfInfo(IDS_PROF_V2PROFILER_DISABLED, wszClsid);
return S_OK;
}
_ASSERTE(profilerCompatibilityFlag == kEnableV2Profiler);
LOG((LF_CORPROF, LL_INFO10, "**PROF: COMPlus_ProfAPI_ProfilerCompatibilitySetting is set to EnableV2Profiler. "
"The configured V2 profiler is going to be initialized.\n"));
LogProfInfo(IDS_PROF_V2PROFILER_ENABLED,
CLRConfig::EXTERNAL_ProfAPI_ProfilerCompatibilitySetting.name,
wszProfilerCompatibilitySetting.GetValue(),
wszClsid);
}
return hr;
}
// ----------------------------------------------------------------------------
// ProfilingAPIUtility::LoadProfiler
//
// Description:
// Outermost common code for loading the profiler DLL. Both startup and attach code
// paths use this.
//
// Arguments:
// * loadType - Startup load or attach load?
// * pClsid - Profiler's CLSID
// * wszClsid - Profiler's CLSID (or progid) in string form, for event log messages
// * wszProfilerDLL - Profiler's DLL path
// * pvClientData - For attach loads, this is the client data the trigger wants to
// pass to the profiler DLL
// * cbClientData - For attach loads, size of client data in bytes
// * dwConcurrentGCWaitTimeoutInMs - Time out for wait operation on concurrent GC. Attach scenario only
//
// Return Value:
// HRESULT indicating success or failure of the load
//
// Notes:
// * On failure, this function or a callee will have logged an event
//
// static
HRESULT ProfilingAPIUtility::LoadProfiler(
LoadType loadType,
const CLSID * pClsid,
LPCWSTR wszClsid,
LPCWSTR wszProfilerDLL,
LPVOID pvClientData,
UINT cbClientData,
DWORD dwConcurrentGCWaitTimeoutInMs)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// This causes events to be logged, which loads resource strings,
// which takes locks.
CAN_TAKE_LOCK;
MODE_ANY;
}
CONTRACTL_END;
if (g_fEEShutDown)
{
return CORPROF_E_RUNTIME_UNINITIALIZED;
}
// Valid loadType?
_ASSERTE((loadType == kStartupLoad) || (loadType == kAttachLoad));
// If a nonzero client data size is reported, there'd better be client data!
_ASSERTE((cbClientData == 0) || (pvClientData != NULL));
// Client data is currently only specified on attach
_ASSERTE((pvClientData == NULL) || (loadType == kAttachLoad));
ProfilerInfo profilerInfo;
profilerInfo.Init();
profilerInfo.inUse = TRUE;
HRESULT hr = PerformDeferredInit();
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_ERROR,
"**PROF: ProfilingAPIUtility::PerformDeferredInit failed. hr=0x%x.\n",
hr));
LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, hr);
return hr;
}
{
// Usually we need to take the lock when modifying profiler status, but at this
// point no one else could have a pointer to this ProfilerInfo so we don't
// need to synchronize. Once we store it in g_profControlBlock we need to.
profilerInfo.curProfStatus.Set(kProfStatusPreInitialize);
}
NewHolder<EEToProfInterfaceImpl> pEEProf(new (nothrow) EEToProfInterfaceImpl());
if (pEEProf == NULL)
{
LOG((LF_CORPROF, LL_ERROR, "**PROF: Unable to allocate EEToProfInterfaceImpl.\n"));
LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, E_OUTOFMEMORY);
return E_OUTOFMEMORY;
}
// Create the ProfToEE interface to provide to the profiling services
hr = DoPreInitialization(pEEProf, pClsid, wszClsid, wszProfilerDLL, loadType, dwConcurrentGCWaitTimeoutInMs);
if (FAILED(hr))
{
return hr;
}
{
// Usually we need to take the lock when modifying profiler status, but at this
// point no one else could have a pointer to this ProfilerInfo so we don't
// need to synchronize. Once we store it in g_profControlBlock we need to.
// We've successfully allocated and initialized the callback wrapper object and the
// Info interface implementation objects. The profiler DLL is therefore also
// successfully loaded (but not yet Initialized). Transfer ownership of the
// callback wrapper object to globals (thus suppress a release when the local
// vars go out of scope).
//
// Setting this state now enables us to call into the profiler's Initialize()
// callback (which we do immediately below), and have it successfully call
// back into us via the Info interface (ProfToEEInterfaceImpl) to perform its
// initialization.
profilerInfo.pProfInterface = pEEProf.GetValue();
pEEProf.SuppressRelease();
pEEProf = NULL;
// Set global status to reflect the proper type of Init we're doing (attach vs
// startup)
profilerInfo.curProfStatus.Set(
(loadType == kStartupLoad) ?
kProfStatusInitializingForStartupLoad :
kProfStatusInitializingForAttachLoad);
}
ProfilerInfo *pProfilerInfo = NULL;
{
// Now we register the profiler, from this point on we need to worry about
// synchronization
CRITSEC_Holder csh(s_csStatus);
// Check if this profiler is notification only and load as appropriate
BOOL notificationOnly = FALSE;
{
HRESULT callHr = profilerInfo.pProfInterface->LoadAsNotificationOnly(¬ificationOnly);
if (FAILED(callHr))
{
notificationOnly = FALSE;
}
}
if (notificationOnly)
{
pProfilerInfo = g_profControlBlock.FindNextFreeProfilerInfoSlot();
if (pProfilerInfo == NULL)
{
LogProfError(IDS_E_PROF_NOTIFICATION_LIMIT_EXCEEDED);
return CORPROF_E_PROFILER_ALREADY_ACTIVE;
}
}
else
{
// "main" profiler, there can only be one
if (g_profControlBlock.mainProfilerInfo.curProfStatus.Get() != kProfStatusNone)
{
LogProfError(IDS_PROF_ALREADY_LOADED);
return CORPROF_E_PROFILER_ALREADY_ACTIVE;
}
// This profiler cannot be a notification only profiler
pProfilerInfo = &(g_profControlBlock.mainProfilerInfo);
}
pProfilerInfo->curProfStatus.Set(profilerInfo.curProfStatus.Get());
pProfilerInfo->pProfInterface = profilerInfo.pProfInterface;
pProfilerInfo->pProfInterface->SetProfilerInfo(pProfilerInfo);
pProfilerInfo->inUse = TRUE;
}
// Now that the profiler is officially loaded and in Init status, call into the
// profiler's appropriate Initialize() callback. Note that if the profiler fails this
// call, we should abort the rest of the profiler loading, and reset our state so we
// appear as if we never attempted to load the profiler.
if (loadType == kStartupLoad)
{
// This EvacuationCounterHolder is just to make asserts in EEToProfInterfaceImpl happy.
// Using it like this without the dirty read/evac counter increment/clean read pattern
// is not safe generally, but in this specific case we can skip all that since we haven't
// published it yet, so we are the only thread that can access it.
EvacuationCounterHolder holder(pProfilerInfo);
hr = pProfilerInfo->pProfInterface->Initialize();
}
else
{
// This EvacuationCounterHolder is just to make asserts in EEToProfInterfaceImpl happy.
// Using it like this without the dirty read/evac counter increment/clean read pattern
// is not safe generally, but in this specific case we can skip all that since we haven't
// published it yet, so we are the only thread that can access it.
EvacuationCounterHolder holder(pProfilerInfo);
_ASSERTE(loadType == kAttachLoad);
_ASSERTE(pProfilerInfo->pProfInterface->IsCallback3Supported());
hr = pProfilerInfo->pProfInterface->InitializeForAttach(pvClientData, cbClientData);
}
if (FAILED(hr))
{
LOG((
LF_CORPROF,
LL_INFO10,
"**PROF: Profiler failed its Initialize callback. hr=0x%x.\n",
hr));
// If we timed out due to waiting on concurrent GC to finish, it is very likely this is
// the reason InitializeForAttach callback failed even though we cannot be sure and we cannot
// cannot assume hr is going to be CORPROF_E_TIMEOUT_WAITING_FOR_CONCURRENT_GC.
// The best we can do in this case is to report this failure anyway.
if (pProfilerInfo->pProfInterface->HasTimedOutWaitingForConcurrentGC())
{
ProfilingAPIUtility::LogProfError(IDS_E_PROF_TIMEOUT_WAITING_FOR_CONCURRENT_GC, dwConcurrentGCWaitTimeoutInMs, wszClsid);
}
// Check for known failure types, to customize the event we log
if ((loadType == kAttachLoad) &&
((hr == CORPROF_E_PROFILER_NOT_ATTACHABLE) || (hr == E_NOTIMPL)))
{
_ASSERTE(pProfilerInfo->pProfInterface->IsCallback3Supported());
// Profiler supports ICorProfilerCallback3, but explicitly doesn't support
// Attach loading. So log specialized event
LogProfError(IDS_E_PROF_NOT_ATTACHABLE, wszClsid);
// Normalize (CORPROF_E_PROFILER_NOT_ATTACHABLE || E_NOTIMPL) down to
// CORPROF_E_PROFILER_NOT_ATTACHABLE
hr = CORPROF_E_PROFILER_NOT_ATTACHABLE;
}
else if (hr == CORPROF_E_PROFILER_CANCEL_ACTIVATION)
{
// Profiler didn't encounter a bad error, but is voluntarily choosing not to
// profile this runtime. Profilers that need to set system environment
// variables to be able to profile services may use this HRESULT to avoid
// profiling all the other managed apps on the box.
LogProfInfo(IDS_PROF_CANCEL_ACTIVATION, wszClsid);
}
else
{
LogProfError(IDS_E_PROF_INIT_CALLBACK_FAILED, wszClsid, hr);
}
// Profiler failed; reset everything. This will automatically reset
// g_profControlBlock and will unload the profiler's DLL.
TerminateProfiling(pProfilerInfo);
return hr;
}
#ifdef FEATURE_MULTICOREJIT
// Disable multicore JIT when profiling is enabled
if (pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_JIT_COMPILATION))
{
MulticoreJitManager::DisableMulticoreJit();
}
#endif
// Indicate that profiling is properly initialized. On an attach-load, this will
// force a FlushStoreBuffers(), which is important for catch-up synchronization (see
// code:#ProfCatchUpSynchronization)
pProfilerInfo->curProfStatus.Set(kProfStatusActive);
LOG((
LF_CORPROF,
LL_INFO10,
"**PROF: Profiler successfully loaded and initialized.\n"));
LogProfInfo(IDS_PROF_LOAD_COMPLETE, wszClsid);
LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiler created and enabled.\n"));
if (loadType == kStartupLoad)
{
// For startup profilers only: If the profiler is interested in tracking GC
// events, then we must disable concurrent GC since concurrent GC can allocate
// and kill objects without relocating and thus not doing a heap walk.
if (pProfilerInfo->eventMask.IsEventMaskSet(COR_PRF_MONITOR_GC))
{
LOG((LF_CORPROF, LL_INFO10, "**PROF: Turning off concurrent GC at startup.\n"));
// Previously we would use SetGCConcurrent(0) to indicate to the GC that it shouldn't even
// attempt to use concurrent GC. The standalone GC feature create a cycle during startup,
// where the profiler couldn't set startup flags for the GC. To overcome this, we call
// TempraryDisableConcurrentGC and never enable it again. This has a perf cost, since the
// GC will create concurrent GC data structures, but it is acceptable in the context of
// this kind of profiling.
GCHeapUtilities::GetGCHeap()->TemporaryDisableConcurrentGC();
LOG((LF_CORPROF, LL_INFO10, "**PROF: Concurrent GC has been turned off at startup.\n"));
}
}
if (loadType == kAttachLoad)
{
// #ProfCatchUpSynchronization
//
// Now that callbacks are enabled (and all threads are aware), tell an attaching
// profiler that it's safe to request catchup information.
//
// There's a race we're preventing that's worthwhile to spell out. An attaching
// profiler should be able to get a COMPLETE set of data through the use of
// callbacks unioned with the use of catch-up enumeration Info functions. To
// achieve this, we must ensure that there is no "hole"--any new data the
// profiler seeks must be available from a callback or a catch-up info function
// (or both, as dupes are ok). That means that:
//
// * callbacks must be enabled on other threads NO LATER THAN the profiler begins
// requesting catch-up information on this thread
// * Abbreviate: callbacks <= catch-up.
//
// Otherwise, if catch-up < callbacks, then it would be possible to have this:
//
// * catch-up < new data arrives < callbacks.
//
// In this nightmare scenario, the new data would not be accessible from the
// catch-up calls made by the profiler (cuz the profiler made the calls too
// early) or the callbacks made into the profiler (cuz the callbacks were enabled
// too late). That's a hole, and that's bad. So we ensure callbacks <= catch-up
// by the following order of operations:
//
// * This thread:
// * a: Set (volatile) currentProfStatus = kProfStatusActive (done above) and
// event mask bits (profiler did this in Initialize() callback above,
// when it called SetEventMask)
// * b: Flush CPU buffers (done automatically when we set status to
// kProfStatusActive)
// * c: CLR->Profiler call: ProfilerAttachComplete() (below). Inside this
// call:
// * Profiler->CLR calls: Catch-up Info functions
// * Other threads:
// * a: New data (thread, JIT info, etc.) is created
// * b: This new data is now available to a catch-up Info call
// * c: currentProfStatus & event mask bits are accurately visible to thread
// in determining whether to make a callback
// * d: Read currentProfStatus & event mask bits and make callback
// (CLR->Profiler) if necessary
//
// So as long as OtherThreads.c <= ThisThread.c we're ok. This means other
// threads must be able to get a clean read of the (volatile) currentProfStatus &
// event mask bits BEFORE this thread calls ProfilerAttachComplete(). Use of the
// "volatile" keyword ensures that compiler optimizations and (w/ VC2005+
// compilers) the CPU's instruction reordering optimizations at runtime are
// disabled enough such that they do not hinder the order above. Use of
// FlushStoreBuffers() ensures that multiple caches on multiple CPUs do not
// hinder the order above (by causing other threads to get stale reads of the
// volatiles).
//
// For more information about catch-up enumerations and exactly which entities,
// and which stage of loading, are permitted to appear in the enumerations, see
// code:ProfilerFunctionEnum::Init#ProfilerEnumGeneral
{
// This EvacuationCounterHolder is just to make asserts in EEToProfInterfaceImpl happy.
// Using it like this without the dirty read/evac counter increment/clean read pattern
// is not safe generally, but in this specific case we can skip all that since we haven't
// published it yet, so we are the only thread that can access it.
EvacuationCounterHolder holder(pProfilerInfo);
pProfilerInfo->pProfInterface->ProfilerAttachComplete();
}
}
return S_OK;
}
//---------------------------------------------------------------------------------------
//
// Performs the evacuation checks by grabbing the thread store lock, iterating through
// all EE Threads, and querying each one's evacuation counter. If they're all 0, the
// profiler is ready to be unloaded.
//
// Return Value:
// Nonzero iff the profiler is fully evacuated and ready to be unloaded.
//
// static
BOOL ProfilingAPIUtility::IsProfilerEvacuated(ProfilerInfo *pProfilerInfo)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
_ASSERTE(pProfilerInfo->curProfStatus.Get() == kProfStatusDetaching);
// Note that threads are still in motion as we check its evacuation counter.
// This is ok, because we've already changed the profiler status to
// kProfStatusDetaching and flushed CPU buffers. So at this point the counter
// will typically only go down to 0 (and not increment anymore), with one
// small exception (below). So if we get a read of 0 below, the counter will
// typically stay there. Specifically:
// * Profiler is most likely not about to increment its evacuation counter
// from 0 to 1 because pThread sees that the status is
// kProfStatusDetaching.
// * Note that there is a small race where pThread might actually
// increment its evac counter from 0 to 1 (if it dirty-read the
// profiler status a tad too early), but that implies that when
// pThread rechecks the profiler status (clean read) then pThread
// will immediately decrement the evac counter back to 0 and avoid
// calling into the EEToProfInterfaceImpl pointer.
//
// (see
// code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization
// for details)Doing this under the thread store lock not only ensures we can
// iterate through the Thread objects safely, but also forces us to serialize with
// the GC. The latter is important, as server GC enters the profiler on non-EE
// Threads, and so no evacuation counters might be incremented during server GC even
// though control could be entering the profiler.
{
ThreadStoreLockHolder TSLockHolder;
Thread * pThread = ThreadStore::GetAllThreadList(
NULL, // cursor thread; always NULL to begin with
0, // mask to AND with Thread::m_State to filter returned threads
0); // bits to match the result of the above AND. (m_State & 0 == 0,
// so we won't filter out any threads)
// Note that, by not filtering out any of the threads, we're intentionally including
// stuff like TS_Dead or TS_Unstarted. But that keeps us on the safe
// side. If an EE Thread object exists, we want to check its counters to be
// absolutely certain it isn't executing in a profiler.
while (pThread != NULL)
{
// Note that pThread is still in motion as we check its evacuation counter.
// This is ok, because we've already changed the profiler status to
// kProfStatusDetaching and flushed CPU buffers. So at this point the counter
// will typically only go down to 0 (and not increment anymore), with one
// small exception (below). So if we get a read of 0 below, the counter will
// typically stay there. Specifically:
// * pThread is most likely not about to increment its evacuation counter
// from 0 to 1 because pThread sees that the status is
// kProfStatusDetaching.
// * Note that there is a small race where pThread might actually
// increment its evac counter from 0 to 1 (if it dirty-read the
// profiler status a tad too early), but that implies that when
// pThread rechecks the profiler status (clean read) then pThread
// will immediately decrement the evac counter back to 0 and avoid
// calling into the EEToProfInterfaceImpl pointer.
//
// (see
// code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization
// for details)
DWORD dwEvacCounter = pThread->GetProfilerEvacuationCounter(pProfilerInfo->slot);
if (dwEvacCounter != 0)
{
LOG((
LF_CORPROF,
LL_INFO100,
"**PROF: Profiler not yet evacuated because OS Thread ID 0x%x has evac counter of %d (decimal).\n",
pThread->GetOSThreadId(),
dwEvacCounter));
return FALSE;
}
pThread = ThreadStore::GetAllThreadList(pThread, 0, 0);
}
}
// FUTURE: When rejit feature crew complete, add code to verify all rejitted
// functions are fully reverted and off of all stacks. If this is very easy to
// verify (e.g., checking a single value), consider putting it above the loop
// above so we can early-out quicker if rejitted code is still around.
// We got this far without returning, so the profiler is fully evacuated
return TRUE;
}
//---------------------------------------------------------------------------------------
//
// This is the top-most level of profiling API teardown, and is called directly by
// EEShutDownHelper() (in ceemain.cpp). This cleans up internal structures relating to
// the Profiling API. If we're not in process teardown, then this also releases the
// profiler COM object and frees the profiler DLL
//
// static
void ProfilingAPIUtility::TerminateProfiling(ProfilerInfo *pProfilerInfo)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
if (IsAtProcessExit())
{
// We're tearing down the process so don't bother trying to clean everything up.
// There's no reliable way to verify other threads won't be trying to re-enter
// the profiler anyway, so cleaning up here could cause AVs.
return;
}
_ASSERTE(s_csStatus != NULL);
{
// We're modifying status and possibly unloading the profiler DLL below, so
// serialize this code with any other loading / unloading / detaching code.
CRITSEC_Holder csh(s_csStatus);
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
if (pProfilerInfo->curProfStatus.Get() == kProfStatusDetaching && pProfilerInfo->pProfInterface.Load() != NULL)
{
// The profiler is still being referenced by
// ProfilingAPIDetach::s_profilerDetachInfo, so don't try to release and
// unload it. This can happen if Shutdown and Detach race, and Shutdown wins.
// For example, we could be called as part of Shutdown, but the profiler
// called RequestProfilerDetach near shutdown time as well (or even earlier
// but remains un-evacuated as shutdown begins). Whatever the cause, just
// don't unload the profiler here (as part of shutdown), and let the Detach
// Thread deal with it (if it gets the chance).
//
// Note: Since this check occurs inside s_csStatus, we don't have to worry
// that ProfilingAPIDetach::GetEEToProfPtr() will suddenly change during the
// code below.
//
// FUTURE: For reattach-with-neutered-profilers feature crew, change the
// above to scan through list of detaching profilers to make sure none of
// them give a GetEEToProfPtr() equal to g_profControlBlock.pProfInterface.
return;
}
#endif // FEATURE_PROFAPI_ATTACH_DETACH
if (pProfilerInfo->curProfStatus.Get() == kProfStatusActive)
{
pProfilerInfo->curProfStatus.Set(kProfStatusDetaching);
// Profiler was active when TerminateProfiling() was called, so we're unloading
// it due to shutdown. But other threads may still be trying to enter profiler
// callbacks (e.g., ClassUnloadStarted() can get called during shutdown). Now
// that the status has been changed to kProfStatusDetaching, no new threads will
// attempt to enter the profiler. But use the detach evacuation counters to see
// if other threads already began to enter the profiler.
if (!ProfilingAPIUtility::IsProfilerEvacuated(pProfilerInfo))
{
// Other threads might be entering the profiler, so just skip cleanup
return;
}
}
// If we have a profiler callback wrapper and / or info implementation
// active, then terminate them.
if (pProfilerInfo->pProfInterface.Load() != NULL)
{
// This destructor takes care of releasing the profiler's ICorProfilerCallback*
// interface, and unloading the DLL when we're not in process teardown.
delete pProfilerInfo->pProfInterface;
pProfilerInfo->pProfInterface.Store(NULL);
}
// NOTE: Intentionally not destroying / NULLing s_csStatus. If
// s_csStatus is already initialized, we can reuse it each time we do another
// attach / detach, so no need to destroy it.
// Attach/Load/Detach are all synchronized with the Status Crst, don't need to worry about races
// If we disabled concurrent GC and somehow failed later during the initialization
if (g_profControlBlock.fConcurrentGCDisabledForAttach.Load() && g_profControlBlock.IsMainProfiler(pProfilerInfo->pProfInterface))
{
g_profControlBlock.fConcurrentGCDisabledForAttach = FALSE;
// We know for sure GC has been fully initialized as we've turned off concurrent GC before
_ASSERTE(IsGarbageCollectorFullyInitialized());
GCHeapUtilities::GetGCHeap()->TemporaryEnableConcurrentGC();
}
// #ProfileResetSessionStatus Reset all the status variables that are for the current
// profiling attach session.
// When you are adding new status in g_profControlBlock, you need to think about whether
// your new status is per-session, or consistent across sessions
pProfilerInfo->ResetPerSessionStatus();
pProfilerInfo->curProfStatus.Set(kProfStatusNone);
g_profControlBlock.DeRegisterProfilerInfo(pProfilerInfo);
g_profControlBlock.UpdateGlobalEventMask();
}
}
#endif // PROFILING_SUPPORTED
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/metadata/memory-manager.c | #include <mono/metadata/loader-internals.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/reflection-cache.h>
#include <mono/metadata/mono-hash-internals.h>
#include <mono/metadata/debug-internals.h>
#include <mono/utils/unlocked.h>
static LockFreeMempool*
lock_free_mempool_new (void)
{
return g_new0 (LockFreeMempool, 1);
}
static void
lock_free_mempool_free (LockFreeMempool *mp)
{
LockFreeMempoolChunk *chunk, *next;
chunk = mp->chunks;
while (chunk) {
next = (LockFreeMempoolChunk *)chunk->prev;
mono_vfree (chunk, mono_pagesize (), MONO_MEM_ACCOUNT_MEM_MANAGER);
chunk = next;
}
g_free (mp);
}
/*
* This is async safe
*/
static LockFreeMempoolChunk*
lock_free_mempool_chunk_new (LockFreeMempool *mp, int len)
{
LockFreeMempoolChunk *chunk, *prev;
int size;
size = mono_pagesize ();
while (size - sizeof (LockFreeMempoolChunk) < len)
size += mono_pagesize ();
chunk = (LockFreeMempoolChunk *)mono_valloc (0, size, MONO_MMAP_READ|MONO_MMAP_WRITE, MONO_MEM_ACCOUNT_MEM_MANAGER);
g_assert (chunk);
chunk->mem = (guint8 *)ALIGN_PTR_TO ((char*)chunk + sizeof (LockFreeMempoolChunk), 16);
chunk->size = ((char*)chunk + size) - (char*)chunk->mem;
chunk->pos = 0;
/* Add to list of chunks lock-free */
while (TRUE) {
prev = mp->chunks;
if (mono_atomic_cas_ptr ((volatile gpointer*)&mp->chunks, chunk, prev) == prev)
break;
}
chunk->prev = prev;
return chunk;
}
/*
* This is async safe
*/
static gpointer
lock_free_mempool_alloc0 (LockFreeMempool *mp, guint size)
{
LockFreeMempoolChunk *chunk;
gpointer res;
int oldpos;
// FIXME: Free the allocator
size = ALIGN_TO (size, 8);
chunk = mp->current;
if (!chunk) {
chunk = lock_free_mempool_chunk_new (mp, size);
mono_memory_barrier ();
/* Publish */
mp->current = chunk;
}
/* The code below is lock-free, 'chunk' is shared state */
oldpos = mono_atomic_fetch_add_i32 (&chunk->pos, size);
if (oldpos + size > chunk->size) {
chunk = lock_free_mempool_chunk_new (mp, size);
g_assert (chunk->pos + size <= chunk->size);
res = chunk->mem;
chunk->pos += size;
mono_memory_barrier ();
mp->current = chunk;
} else {
res = (char*)chunk->mem + oldpos;
}
return res;
}
MonoMemoryManager*
mono_mem_manager_new (MonoAssemblyLoadContext **alcs, int nalcs, gboolean collectible)
{
MonoDomain *domain = mono_get_root_domain ();
MonoMemoryManager *memory_manager;
memory_manager = g_new0 (MonoMemoryManager, 1);
memory_manager->collectible = collectible;
memory_manager->n_alcs = nalcs;
mono_coop_mutex_init_recursive (&memory_manager->lock);
mono_os_mutex_init (&memory_manager->mp_mutex);
memory_manager->_mp = mono_mempool_new ();
if (mono_runtime_get_no_exec()) {
memory_manager->code_mp = mono_code_manager_new_aot ();
} else {
memory_manager->code_mp = mono_code_manager_new ();
}
memory_manager->lock_free_mp = lock_free_mempool_new ();
memory_manager->alcs = mono_mempool_alloc0 (memory_manager->_mp, sizeof (MonoAssemblyLoadContext *) * nalcs);
memcpy (memory_manager->alcs, alcs, sizeof (MonoAssemblyLoadContext *) * nalcs);
memory_manager->class_vtable_array = g_ptr_array_new ();
// TODO: make these not linked to the domain for debugging
memory_manager->type_hash = mono_g_hash_table_new_type_internal ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal, MONO_HASH_VALUE_GC, MONO_ROOT_SOURCE_DOMAIN, domain, "Domain Reflection Type Table");
memory_manager->refobject_hash = mono_conc_g_hash_table_new_type (mono_reflected_hash, mono_reflected_equal, MONO_HASH_VALUE_GC, MONO_ROOT_SOURCE_DOMAIN, domain, "Domain Reflection Object Table");
memory_manager->type_init_exception_hash = mono_g_hash_table_new_type_internal (mono_aligned_addr_hash, NULL, MONO_HASH_VALUE_GC, MONO_ROOT_SOURCE_DOMAIN, domain, "Domain Type Initialization Exception Table");
if (mono_get_runtime_callbacks ()->init_mem_manager)
mono_get_runtime_callbacks ()->init_mem_manager (memory_manager);
return memory_manager;
}
static void
cleanup_refobject_hash (gpointer key, gpointer value, gpointer user_data)
{
free_reflected_entry ((ReflectedEntry *)key);
}
static void
unregister_vtable_reflection_type (MonoVTable *vtable)
{
MonoObject *type = (MonoObject *)vtable->type;
if (type->vtable->klass != mono_defaults.runtimetype_class)
MONO_GC_UNREGISTER_ROOT_IF_MOVING (vtable->type);
}
// First phase of deletion
static void
memory_manager_delete_objects (MonoMemoryManager *memory_manager)
{
memory_manager->freeing = TRUE;
// Must be done before type_hash is freed
for (int i = 0; i < memory_manager->class_vtable_array->len; i++)
unregister_vtable_reflection_type ((MonoVTable *)g_ptr_array_index (memory_manager->class_vtable_array, i));
g_ptr_array_free (memory_manager->class_vtable_array, TRUE);
memory_manager->class_vtable_array = NULL;
mono_g_hash_table_destroy (memory_manager->type_hash);
memory_manager->type_hash = NULL;
mono_conc_g_hash_table_foreach (memory_manager->refobject_hash, cleanup_refobject_hash, NULL);
mono_conc_g_hash_table_destroy (memory_manager->refobject_hash);
memory_manager->refobject_hash = NULL;
mono_g_hash_table_destroy (memory_manager->type_init_exception_hash);
memory_manager->type_init_exception_hash = NULL;
}
// Full deletion
static void
memory_manager_delete (MonoMemoryManager *memory_manager, gboolean debug_unload)
{
// Scan here to assert no lingering references in vtables?
if (mono_get_runtime_callbacks ()->free_mem_manager)
mono_get_runtime_callbacks ()->free_mem_manager (memory_manager);
if (memory_manager->debug_info) {
mono_mem_manager_free_debug_info (memory_manager);
memory_manager->debug_info = NULL;
}
if (!memory_manager->freeing)
memory_manager_delete_objects (memory_manager);
mono_coop_mutex_destroy (&memory_manager->lock);
// FIXME: Free generics caches
if (debug_unload) {
mono_mempool_invalidate (memory_manager->_mp);
mono_code_manager_invalidate (memory_manager->code_mp);
} else {
#ifndef DISABLE_PERFCOUNTERS
/* FIXME: use an explicit subtraction method as soon as it's available */
mono_atomic_fetch_add_i32 (&mono_perfcounters->loader_bytes, -1 * mono_mempool_get_allocated (memory_manager->_mp));
#endif
mono_mempool_destroy (memory_manager->_mp);
memory_manager->_mp = NULL;
mono_code_manager_destroy (memory_manager->code_mp);
memory_manager->code_mp = NULL;
}
}
void
mono_mem_manager_free_objects (MonoMemoryManager *memory_manager)
{
g_assert (!memory_manager->freeing);
memory_manager_delete_objects (memory_manager);
}
void
mono_mem_manager_free (MonoMemoryManager *memory_manager, gboolean debug_unload)
{
g_assert (!memory_manager->is_generic);
memory_manager_delete (memory_manager, debug_unload);
g_free (memory_manager);
}
void
mono_mem_manager_lock (MonoMemoryManager *memory_manager)
{
mono_locks_coop_acquire (&memory_manager->lock, MemoryManagerLock);
}
void
mono_mem_manager_unlock (MonoMemoryManager *memory_manager)
{
mono_locks_coop_release (&memory_manager->lock, MemoryManagerLock);
}
static inline void
alloc_lock (MonoMemoryManager *memory_manager)
{
mono_os_mutex_lock (&memory_manager->mp_mutex);
}
static inline void
alloc_unlock (MonoMemoryManager *memory_manager)
{
mono_os_mutex_unlock (&memory_manager->mp_mutex);
}
void *
mono_mem_manager_alloc (MonoMemoryManager *memory_manager, guint size)
{
void *res;
alloc_lock (memory_manager);
#ifndef DISABLE_PERFCOUNTERS
mono_atomic_fetch_add_i32 (&mono_perfcounters->loader_bytes, size);
#endif
res = mono_mempool_alloc (memory_manager->_mp, size);
alloc_unlock (memory_manager);
return res;
}
void *
mono_mem_manager_alloc0 (MonoMemoryManager *memory_manager, guint size)
{
void *res;
alloc_lock (memory_manager);
#ifndef DISABLE_PERFCOUNTERS
mono_atomic_fetch_add_i32 (&mono_perfcounters->loader_bytes, size);
#endif
res = mono_mempool_alloc0 (memory_manager->_mp, size);
alloc_unlock (memory_manager);
return res;
}
char*
mono_mem_manager_strdup (MonoMemoryManager *memory_manager, const char *s)
{
char *res;
alloc_lock (memory_manager);
res = mono_mempool_strdup (memory_manager->_mp, s);
alloc_unlock (memory_manager);
return res;
}
gboolean
mono_mem_manager_mp_contains_addr (MonoMemoryManager *memory_manager, gpointer addr)
{
gboolean res;
alloc_lock (memory_manager);
res = mono_mempool_contains_addr (memory_manager->_mp, addr);
alloc_unlock (memory_manager);
return res;
}
void *
(mono_mem_manager_code_reserve) (MonoMemoryManager *memory_manager, int size)
{
void *res;
mono_mem_manager_lock (memory_manager);
res = mono_code_manager_reserve (memory_manager->code_mp, size);
mono_mem_manager_unlock (memory_manager);
return res;
}
void *
mono_mem_manager_code_reserve_align (MonoMemoryManager *memory_manager, int size, int alignment)
{
void *res;
mono_mem_manager_lock (memory_manager);
res = mono_code_manager_reserve_align (memory_manager->code_mp, size, alignment);
mono_mem_manager_unlock (memory_manager);
return res;
}
void
mono_mem_manager_code_commit (MonoMemoryManager *memory_manager, void *data, int size, int newsize)
{
mono_mem_manager_lock (memory_manager);
mono_code_manager_commit (memory_manager->code_mp, data, size, newsize);
mono_mem_manager_unlock (memory_manager);
}
/*
* mono_mem_manager_code_foreach:
* Iterate over the code thunks of the code manager of @memory_manager.
*
* The @func callback MUST not take any locks. If it really needs to, it must respect
* the locking rules of the runtime: http://www.mono-project.com/Mono:Runtime:Documentation:ThreadSafety
* LOCKING: Acquires the memory manager lock.
*/
void
mono_mem_manager_code_foreach (MonoMemoryManager *memory_manager, MonoCodeManagerFunc func, void *user_data)
{
mono_mem_manager_lock (memory_manager);
mono_code_manager_foreach (memory_manager->code_mp, func, user_data);
mono_mem_manager_unlock (memory_manager);
}
gpointer
(mono_mem_manager_alloc0_lock_free) (MonoMemoryManager *memory_manager, guint size)
{
return lock_free_mempool_alloc0 (memory_manager->lock_free_mp, size);
}
//107, 131, 163
#define HASH_TABLE_SIZE 163
static MonoMemoryManager *mem_manager_cache [HASH_TABLE_SIZE];
static gint32 mem_manager_cache_hit, mem_manager_cache_miss;
static guint32
mix_hash (uintptr_t source)
{
unsigned int hash = source;
// Actual hash
hash = (((hash * 215497) >> 16) ^ ((hash * 1823231) + hash));
// Mix in highest bits on 64-bit systems only
if (sizeof (source) > 4)
hash = hash ^ ((source >> 31) >> 1);
return hash;
}
static guint32
hash_alcs (MonoAssemblyLoadContext **alcs, int nalcs)
{
guint32 res = 0;
int i;
for (i = 0; i < nalcs; ++i)
res += mix_hash ((size_t)alcs [i]);
return res;
}
static gboolean
match_mem_manager (MonoMemoryManager *mm, MonoAssemblyLoadContext **alcs, int nalcs)
{
int j, k;
if (mm->n_alcs != nalcs)
return FALSE;
/* The order might differ so check all pairs */
for (j = 0; j < nalcs; ++j) {
for (k = 0; k < nalcs; ++k)
if (mm->alcs [k] == alcs [j])
break;
if (k == nalcs)
/* Not found */
break;
}
return j == nalcs;
}
static MonoMemoryManager*
mem_manager_cache_get (MonoAssemblyLoadContext **alcs, int nalcs)
{
guint32 hash_code = hash_alcs (alcs, nalcs);
int index = hash_code % HASH_TABLE_SIZE;
MonoMemoryManager *mm = mem_manager_cache [index];
if (!mm || !match_mem_manager (mm, alcs, nalcs)) {
UnlockedIncrement (&mem_manager_cache_miss);
return NULL;
}
UnlockedIncrement (&mem_manager_cache_hit);
return mm;
}
static void
mem_manager_cache_add (MonoMemoryManager *mem_manager)
{
guint32 hash_code = hash_alcs (mem_manager->alcs, mem_manager->n_alcs);
int index = hash_code % HASH_TABLE_SIZE;
mem_manager_cache [index] = mem_manager;
}
static MonoMemoryManager*
get_mem_manager_for_alcs (MonoAssemblyLoadContext **alcs, int nalcs)
{
MonoAssemblyLoadContext *alc;
GPtrArray *mem_managers;
MonoMemoryManager *res;
gboolean collectible;
/* Can happen for dynamic images */
if (nalcs == 0)
return mono_alc_get_default ()->memory_manager;
/* Common case */
if (nalcs == 1)
return alcs [0]->memory_manager;
collectible = FALSE;
for (int i = 0; i < nalcs; ++i)
collectible |= alcs [i]->collectible;
if (!collectible)
/* Can use the default alc */
return mono_alc_get_default ()->memory_manager;
// Check in a lock free cache
res = mem_manager_cache_get (alcs, nalcs);
if (res)
return res;
/*
* Find an existing mem manager for these ALCs.
* This can exist even if the cache lookup fails since the cache is very simple.
*/
/* We can search any ALC in the list, use the first one for now */
alc = alcs [0];
mono_alc_memory_managers_lock (alc);
mem_managers = alc->generic_memory_managers;
res = NULL;
for (int mindex = 0; mindex < mem_managers->len; ++mindex) {
MonoMemoryManager *mm = (MonoMemoryManager*)g_ptr_array_index (mem_managers, mindex);
if (match_mem_manager (mm, alcs, nalcs)) {
res = mm;
break;
}
}
mono_alc_memory_managers_unlock (alc);
if (res)
return res;
/* Create new mem manager */
res = mono_mem_manager_new (alcs, nalcs, collectible);
res->is_generic = TRUE;
/* The hashes are lazily inited in metadata.c */
/* Register it into its ALCs */
for (int i = 0; i < nalcs; ++i) {
mono_alc_memory_managers_lock (alcs [i]);
g_ptr_array_add (alcs [i]->generic_memory_managers, res);
mono_alc_memory_managers_unlock (alcs [i]);
}
mono_memory_barrier ();
mem_manager_cache_add (res);
return res;
}
/*
* mono_mem_manager_get_generic:
*
* Return a memory manager for allocating memory owned by the set of IMAGES.
*/
MonoMemoryManager*
mono_mem_manager_get_generic (MonoImage **images, int nimages)
{
MonoAssemblyLoadContext **alcs = g_newa (MonoAssemblyLoadContext*, nimages);
int nalcs, j;
/* Collect the set of ALCs owning the images */
nalcs = 0;
for (int i = 0; i < nimages; ++i) {
MonoAssemblyLoadContext *alc = mono_image_get_alc (images [i]);
if (!alc)
continue;
/* O(n^2), but shouldn't be a problem in practice */
for (j = 0; j < nalcs; ++j)
if (alcs [j] == alc)
break;
if (j == nalcs)
alcs [nalcs ++] = alc;
}
return get_mem_manager_for_alcs (alcs, nalcs);
}
/*
* mono_mem_manager_merge:
*
* Return a mem manager which depends on the ALCs of MM1/MM2.
*/
MonoMemoryManager*
mono_mem_manager_merge (MonoMemoryManager *mm1, MonoMemoryManager *mm2)
{
MonoAssemblyLoadContext **alcs;
// Common case
if (mm1 == mm2)
return mm1;
alcs = g_newa (MonoAssemblyLoadContext*, mm1->n_alcs + mm2->n_alcs);
memcpy (alcs, mm1->alcs, sizeof (MonoAssemblyLoadContext*) * mm1->n_alcs);
int nalcs = mm1->n_alcs;
/* O(n^2), but shouldn't be a problem in practice */
for (int i = 0; i < mm2->n_alcs; ++i) {
int j;
for (j = 0; j < mm1->n_alcs; ++j) {
if (mm2->alcs [i] == mm1->alcs [j])
break;
}
if (j == mm1->n_alcs)
alcs [nalcs ++] = mm2->alcs [i];
}
return get_mem_manager_for_alcs (alcs, nalcs);
}
| #include <mono/metadata/loader-internals.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/reflection-cache.h>
#include <mono/metadata/mono-hash-internals.h>
#include <mono/metadata/debug-internals.h>
#include <mono/utils/unlocked.h>
static LockFreeMempool*
lock_free_mempool_new (void)
{
return g_new0 (LockFreeMempool, 1);
}
static void
lock_free_mempool_free (LockFreeMempool *mp)
{
LockFreeMempoolChunk *chunk, *next;
chunk = mp->chunks;
while (chunk) {
next = (LockFreeMempoolChunk *)chunk->prev;
mono_vfree (chunk, mono_pagesize (), MONO_MEM_ACCOUNT_MEM_MANAGER);
chunk = next;
}
g_free (mp);
}
/*
* This is async safe
*/
static LockFreeMempoolChunk*
lock_free_mempool_chunk_new (LockFreeMempool *mp, int len)
{
LockFreeMempoolChunk *chunk, *prev;
int size;
size = mono_pagesize ();
while (size - sizeof (LockFreeMempoolChunk) < len)
size += mono_pagesize ();
chunk = (LockFreeMempoolChunk *)mono_valloc (0, size, MONO_MMAP_READ|MONO_MMAP_WRITE, MONO_MEM_ACCOUNT_MEM_MANAGER);
g_assert (chunk);
chunk->mem = (guint8 *)ALIGN_PTR_TO ((char*)chunk + sizeof (LockFreeMempoolChunk), 16);
chunk->size = ((char*)chunk + size) - (char*)chunk->mem;
chunk->pos = 0;
/* Add to list of chunks lock-free */
while (TRUE) {
prev = mp->chunks;
if (mono_atomic_cas_ptr ((volatile gpointer*)&mp->chunks, chunk, prev) == prev)
break;
}
chunk->prev = prev;
return chunk;
}
/*
* This is async safe
*/
static gpointer
lock_free_mempool_alloc0 (LockFreeMempool *mp, guint size)
{
LockFreeMempoolChunk *chunk;
gpointer res;
int oldpos;
// FIXME: Free the allocator
size = ALIGN_TO (size, 8);
chunk = mp->current;
if (!chunk) {
chunk = lock_free_mempool_chunk_new (mp, size);
mono_memory_barrier ();
/* Publish */
mp->current = chunk;
}
/* The code below is lock-free, 'chunk' is shared state */
oldpos = mono_atomic_fetch_add_i32 (&chunk->pos, size);
if (oldpos + size > chunk->size) {
chunk = lock_free_mempool_chunk_new (mp, size);
g_assert (chunk->pos + size <= chunk->size);
res = chunk->mem;
chunk->pos += size;
mono_memory_barrier ();
mp->current = chunk;
} else {
res = (char*)chunk->mem + oldpos;
}
return res;
}
MonoMemoryManager*
mono_mem_manager_new (MonoAssemblyLoadContext **alcs, int nalcs, gboolean collectible)
{
MonoDomain *domain = mono_get_root_domain ();
MonoMemoryManager *memory_manager;
memory_manager = g_new0 (MonoMemoryManager, 1);
memory_manager->collectible = collectible;
memory_manager->n_alcs = nalcs;
mono_coop_mutex_init_recursive (&memory_manager->lock);
mono_os_mutex_init (&memory_manager->mp_mutex);
memory_manager->_mp = mono_mempool_new ();
if (mono_runtime_get_no_exec()) {
memory_manager->code_mp = mono_code_manager_new_aot ();
} else {
memory_manager->code_mp = mono_code_manager_new ();
}
memory_manager->lock_free_mp = lock_free_mempool_new ();
memory_manager->alcs = mono_mempool_alloc0 (memory_manager->_mp, sizeof (MonoAssemblyLoadContext *) * nalcs);
memcpy (memory_manager->alcs, alcs, sizeof (MonoAssemblyLoadContext *) * nalcs);
memory_manager->class_vtable_array = g_ptr_array_new ();
// TODO: make these not linked to the domain for debugging
memory_manager->type_hash = mono_g_hash_table_new_type_internal ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal, MONO_HASH_VALUE_GC, MONO_ROOT_SOURCE_DOMAIN, domain, "Domain Reflection Type Table");
memory_manager->refobject_hash = mono_conc_g_hash_table_new_type (mono_reflected_hash, mono_reflected_equal, MONO_HASH_VALUE_GC, MONO_ROOT_SOURCE_DOMAIN, domain, "Domain Reflection Object Table");
memory_manager->type_init_exception_hash = mono_g_hash_table_new_type_internal (mono_aligned_addr_hash, NULL, MONO_HASH_VALUE_GC, MONO_ROOT_SOURCE_DOMAIN, domain, "Domain Type Initialization Exception Table");
if (mono_get_runtime_callbacks ()->init_mem_manager)
mono_get_runtime_callbacks ()->init_mem_manager (memory_manager);
return memory_manager;
}
static void
cleanup_refobject_hash (gpointer key, gpointer value, gpointer user_data)
{
free_reflected_entry ((ReflectedEntry *)key);
}
static void
unregister_vtable_reflection_type (MonoVTable *vtable)
{
MonoObject *type = (MonoObject *)vtable->type;
if (type->vtable->klass != mono_defaults.runtimetype_class)
MONO_GC_UNREGISTER_ROOT_IF_MOVING (vtable->type);
}
// First phase of deletion
static void
memory_manager_delete_objects (MonoMemoryManager *memory_manager)
{
memory_manager->freeing = TRUE;
// Must be done before type_hash is freed
for (int i = 0; i < memory_manager->class_vtable_array->len; i++)
unregister_vtable_reflection_type ((MonoVTable *)g_ptr_array_index (memory_manager->class_vtable_array, i));
g_ptr_array_free (memory_manager->class_vtable_array, TRUE);
memory_manager->class_vtable_array = NULL;
mono_g_hash_table_destroy (memory_manager->type_hash);
memory_manager->type_hash = NULL;
mono_conc_g_hash_table_foreach (memory_manager->refobject_hash, cleanup_refobject_hash, NULL);
mono_conc_g_hash_table_destroy (memory_manager->refobject_hash);
memory_manager->refobject_hash = NULL;
mono_g_hash_table_destroy (memory_manager->type_init_exception_hash);
memory_manager->type_init_exception_hash = NULL;
}
// Full deletion
static void
memory_manager_delete (MonoMemoryManager *memory_manager, gboolean debug_unload)
{
// Scan here to assert no lingering references in vtables?
if (mono_get_runtime_callbacks ()->free_mem_manager)
mono_get_runtime_callbacks ()->free_mem_manager (memory_manager);
if (memory_manager->debug_info) {
mono_mem_manager_free_debug_info (memory_manager);
memory_manager->debug_info = NULL;
}
if (!memory_manager->freeing)
memory_manager_delete_objects (memory_manager);
mono_coop_mutex_destroy (&memory_manager->lock);
// FIXME: Free generics caches
if (debug_unload) {
mono_mempool_invalidate (memory_manager->_mp);
mono_code_manager_invalidate (memory_manager->code_mp);
} else {
#ifndef DISABLE_PERFCOUNTERS
/* FIXME: use an explicit subtraction method as soon as it's available */
mono_atomic_fetch_add_i32 (&mono_perfcounters->loader_bytes, -1 * mono_mempool_get_allocated (memory_manager->_mp));
#endif
mono_mempool_destroy (memory_manager->_mp);
memory_manager->_mp = NULL;
mono_code_manager_destroy (memory_manager->code_mp);
memory_manager->code_mp = NULL;
}
}
void
mono_mem_manager_free_objects (MonoMemoryManager *memory_manager)
{
g_assert (!memory_manager->freeing);
memory_manager_delete_objects (memory_manager);
}
void
mono_mem_manager_free (MonoMemoryManager *memory_manager, gboolean debug_unload)
{
g_assert (!memory_manager->is_generic);
memory_manager_delete (memory_manager, debug_unload);
g_free (memory_manager);
}
void
mono_mem_manager_lock (MonoMemoryManager *memory_manager)
{
mono_locks_coop_acquire (&memory_manager->lock, MemoryManagerLock);
}
void
mono_mem_manager_unlock (MonoMemoryManager *memory_manager)
{
mono_locks_coop_release (&memory_manager->lock, MemoryManagerLock);
}
static inline void
alloc_lock (MonoMemoryManager *memory_manager)
{
mono_os_mutex_lock (&memory_manager->mp_mutex);
}
static inline void
alloc_unlock (MonoMemoryManager *memory_manager)
{
mono_os_mutex_unlock (&memory_manager->mp_mutex);
}
void *
mono_mem_manager_alloc (MonoMemoryManager *memory_manager, guint size)
{
void *res;
alloc_lock (memory_manager);
#ifndef DISABLE_PERFCOUNTERS
mono_atomic_fetch_add_i32 (&mono_perfcounters->loader_bytes, size);
#endif
res = mono_mempool_alloc (memory_manager->_mp, size);
alloc_unlock (memory_manager);
return res;
}
void *
mono_mem_manager_alloc0 (MonoMemoryManager *memory_manager, guint size)
{
void *res;
alloc_lock (memory_manager);
#ifndef DISABLE_PERFCOUNTERS
mono_atomic_fetch_add_i32 (&mono_perfcounters->loader_bytes, size);
#endif
res = mono_mempool_alloc0 (memory_manager->_mp, size);
alloc_unlock (memory_manager);
return res;
}
char*
mono_mem_manager_strdup (MonoMemoryManager *memory_manager, const char *s)
{
char *res;
alloc_lock (memory_manager);
res = mono_mempool_strdup (memory_manager->_mp, s);
alloc_unlock (memory_manager);
return res;
}
gboolean
mono_mem_manager_mp_contains_addr (MonoMemoryManager *memory_manager, gpointer addr)
{
gboolean res;
alloc_lock (memory_manager);
res = mono_mempool_contains_addr (memory_manager->_mp, addr);
alloc_unlock (memory_manager);
return res;
}
void *
(mono_mem_manager_code_reserve) (MonoMemoryManager *memory_manager, int size)
{
void *res;
mono_mem_manager_lock (memory_manager);
res = mono_code_manager_reserve (memory_manager->code_mp, size);
mono_mem_manager_unlock (memory_manager);
return res;
}
void *
mono_mem_manager_code_reserve_align (MonoMemoryManager *memory_manager, int size, int alignment)
{
void *res;
mono_mem_manager_lock (memory_manager);
res = mono_code_manager_reserve_align (memory_manager->code_mp, size, alignment);
mono_mem_manager_unlock (memory_manager);
return res;
}
void
mono_mem_manager_code_commit (MonoMemoryManager *memory_manager, void *data, int size, int newsize)
{
mono_mem_manager_lock (memory_manager);
mono_code_manager_commit (memory_manager->code_mp, data, size, newsize);
mono_mem_manager_unlock (memory_manager);
}
/*
* mono_mem_manager_code_foreach:
* Iterate over the code thunks of the code manager of @memory_manager.
*
* The @func callback MUST not take any locks. If it really needs to, it must respect
* the locking rules of the runtime: http://www.mono-project.com/Mono:Runtime:Documentation:ThreadSafety
* LOCKING: Acquires the memory manager lock.
*/
void
mono_mem_manager_code_foreach (MonoMemoryManager *memory_manager, MonoCodeManagerFunc func, void *user_data)
{
mono_mem_manager_lock (memory_manager);
mono_code_manager_foreach (memory_manager->code_mp, func, user_data);
mono_mem_manager_unlock (memory_manager);
}
gpointer
(mono_mem_manager_alloc0_lock_free) (MonoMemoryManager *memory_manager, guint size)
{
return lock_free_mempool_alloc0 (memory_manager->lock_free_mp, size);
}
//107, 131, 163
#define HASH_TABLE_SIZE 163
static MonoMemoryManager *mem_manager_cache [HASH_TABLE_SIZE];
static gint32 mem_manager_cache_hit, mem_manager_cache_miss;
static guint32
mix_hash (uintptr_t source)
{
unsigned int hash = source;
// Actual hash
hash = (((hash * 215497) >> 16) ^ ((hash * 1823231) + hash));
// Mix in highest bits on 64-bit systems only
if (sizeof (source) > 4)
hash = hash ^ ((source >> 31) >> 1);
return hash;
}
static guint32
hash_alcs (MonoAssemblyLoadContext **alcs, int nalcs)
{
guint32 res = 0;
int i;
for (i = 0; i < nalcs; ++i)
res += mix_hash ((size_t)alcs [i]);
return res;
}
static gboolean
match_mem_manager (MonoMemoryManager *mm, MonoAssemblyLoadContext **alcs, int nalcs)
{
int j, k;
if (mm->n_alcs != nalcs)
return FALSE;
/* The order might differ so check all pairs */
for (j = 0; j < nalcs; ++j) {
for (k = 0; k < nalcs; ++k)
if (mm->alcs [k] == alcs [j])
break;
if (k == nalcs)
/* Not found */
break;
}
return j == nalcs;
}
static MonoMemoryManager*
mem_manager_cache_get (MonoAssemblyLoadContext **alcs, int nalcs)
{
guint32 hash_code = hash_alcs (alcs, nalcs);
int index = hash_code % HASH_TABLE_SIZE;
MonoMemoryManager *mm = mem_manager_cache [index];
if (!mm || !match_mem_manager (mm, alcs, nalcs)) {
UnlockedIncrement (&mem_manager_cache_miss);
return NULL;
}
UnlockedIncrement (&mem_manager_cache_hit);
return mm;
}
static void
mem_manager_cache_add (MonoMemoryManager *mem_manager)
{
guint32 hash_code = hash_alcs (mem_manager->alcs, mem_manager->n_alcs);
int index = hash_code % HASH_TABLE_SIZE;
mem_manager_cache [index] = mem_manager;
}
static MonoMemoryManager*
get_mem_manager_for_alcs (MonoAssemblyLoadContext **alcs, int nalcs)
{
MonoAssemblyLoadContext *alc;
GPtrArray *mem_managers;
MonoMemoryManager *res;
gboolean collectible;
/* Can happen for dynamic images */
if (nalcs == 0)
return mono_alc_get_default ()->memory_manager;
/* Common case */
if (nalcs == 1)
return alcs [0]->memory_manager;
collectible = FALSE;
for (int i = 0; i < nalcs; ++i)
collectible |= alcs [i]->collectible;
if (!collectible)
/* Can use the default alc */
return mono_alc_get_default ()->memory_manager;
// Check in a lock free cache
res = mem_manager_cache_get (alcs, nalcs);
if (res)
return res;
/*
* Find an existing mem manager for these ALCs.
* This can exist even if the cache lookup fails since the cache is very simple.
*/
/* We can search any ALC in the list, use the first one for now */
alc = alcs [0];
mono_alc_memory_managers_lock (alc);
mem_managers = alc->generic_memory_managers;
res = NULL;
for (int mindex = 0; mindex < mem_managers->len; ++mindex) {
MonoMemoryManager *mm = (MonoMemoryManager*)g_ptr_array_index (mem_managers, mindex);
if (match_mem_manager (mm, alcs, nalcs)) {
res = mm;
break;
}
}
mono_alc_memory_managers_unlock (alc);
if (res)
return res;
/* Create new mem manager */
res = mono_mem_manager_new (alcs, nalcs, collectible);
res->is_generic = TRUE;
/* The hashes are lazily inited in metadata.c */
/* Register it into its ALCs */
for (int i = 0; i < nalcs; ++i) {
mono_alc_memory_managers_lock (alcs [i]);
g_ptr_array_add (alcs [i]->generic_memory_managers, res);
mono_alc_memory_managers_unlock (alcs [i]);
}
mono_memory_barrier ();
mem_manager_cache_add (res);
return res;
}
/*
* mono_mem_manager_get_generic:
*
* Return a memory manager for allocating memory owned by the set of IMAGES.
*/
MonoMemoryManager*
mono_mem_manager_get_generic (MonoImage **images, int nimages)
{
MonoAssemblyLoadContext **alcs = g_newa (MonoAssemblyLoadContext*, nimages);
int nalcs, j;
/* Collect the set of ALCs owning the images */
nalcs = 0;
for (int i = 0; i < nimages; ++i) {
MonoAssemblyLoadContext *alc = mono_image_get_alc (images [i]);
if (!alc)
continue;
/* O(n^2), but shouldn't be a problem in practice */
for (j = 0; j < nalcs; ++j)
if (alcs [j] == alc)
break;
if (j == nalcs)
alcs [nalcs ++] = alc;
}
return get_mem_manager_for_alcs (alcs, nalcs);
}
/*
* mono_mem_manager_merge:
*
* Return a mem manager which depends on the ALCs of MM1/MM2.
*/
MonoMemoryManager*
mono_mem_manager_merge (MonoMemoryManager *mm1, MonoMemoryManager *mm2)
{
MonoAssemblyLoadContext **alcs;
// Common case
if (mm1 == mm2)
return mm1;
alcs = g_newa (MonoAssemblyLoadContext*, mm1->n_alcs + mm2->n_alcs);
memcpy (alcs, mm1->alcs, sizeof (MonoAssemblyLoadContext*) * mm1->n_alcs);
int nalcs = mm1->n_alcs;
/* O(n^2), but shouldn't be a problem in practice */
for (int i = 0; i < mm2->n_alcs; ++i) {
int j;
for (j = 0; j < mm1->n_alcs; ++j) {
if (mm2->alcs [i] == mm1->alcs [j])
break;
}
if (j == mm1->n_alcs)
alcs [nalcs ++] = mm2->alcs [i];
}
return get_mem_manager_for_alcs (alcs, nalcs);
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/src/libunwind/src/s390x/Lglobal.c | #define UNW_LOCAL_ONLY
#include "config.h"
#include <libunwind.h>
#if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY)
#include "Gglobal.c"
#endif
| #define UNW_LOCAL_ONLY
#include "config.h"
#include <libunwind.h>
#if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY)
#include "Gglobal.c"
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/tests/Interop/COM/NativeServer/InspectableTesting.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "Servers.h"
class InspectableTesting : public UnknownImpl, public IInspectableTesting, public IInspectableTesting2
{
public: // IInspectableTesting2
DEF_FUNC(Add)(
/*[in]*/ int a,
/*[in]*/ int b,
/*[out] [retval] */ int* retVal)
{
*retVal = a + b;
return S_OK;
}
public: // IInspectable
STDMETHOD(GetIids)(
/* [out] */ ULONG *iidCount,
/* [size_is][size_is][out] */ IID **iids)
{
return E_NOTIMPL;
}
STDMETHOD(GetRuntimeClassName)(
/* [out] */ HSTRING *className)
{
className = nullptr;
return S_OK;
}
STDMETHOD(GetTrustLevel)(
/* [out] */ TrustLevel *trustLevel)
{
*trustLevel = TrustLevel::FullTrust;
return S_OK;
}
public: // IUnknown
STDMETHOD(QueryInterface)(
/* [in] */ REFIID riid,
/* [iid_is][out] */ _COM_Outptr_ void __RPC_FAR *__RPC_FAR *ppvObject)
{
return DoQueryInterface(riid, ppvObject, static_cast<IInspectableTesting *>(this), static_cast<IInspectableTesting2 *>(this), static_cast<IInspectable*>(this));
}
DEFINE_REF_COUNTING();
};
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "Servers.h"
class InspectableTesting : public UnknownImpl, public IInspectableTesting, public IInspectableTesting2
{
public: // IInspectableTesting2
DEF_FUNC(Add)(
/*[in]*/ int a,
/*[in]*/ int b,
/*[out] [retval] */ int* retVal)
{
*retVal = a + b;
return S_OK;
}
public: // IInspectable
STDMETHOD(GetIids)(
/* [out] */ ULONG *iidCount,
/* [size_is][size_is][out] */ IID **iids)
{
return E_NOTIMPL;
}
STDMETHOD(GetRuntimeClassName)(
/* [out] */ HSTRING *className)
{
className = nullptr;
return S_OK;
}
STDMETHOD(GetTrustLevel)(
/* [out] */ TrustLevel *trustLevel)
{
*trustLevel = TrustLevel::FullTrust;
return S_OK;
}
public: // IUnknown
STDMETHOD(QueryInterface)(
/* [in] */ REFIID riid,
/* [iid_is][out] */ _COM_Outptr_ void __RPC_FAR *__RPC_FAR *ppvObject)
{
return DoQueryInterface(riid, ppvObject, static_cast<IInspectableTesting *>(this), static_cast<IInspectableTesting2 *>(this), static_cast<IInspectable*>(this));
}
DEFINE_REF_COUNTING();
};
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/inc/posterror.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// UtilCode.h
//
// Utility functions implemented in UtilCode.lib.
//
//*****************************************************************************
#ifndef __PostError_h__
#define __PostError_h__
#include "switches.h"
//*****************************************************************************
// This function will post an error for the client. If the LOWORD(hrRpt) can
// be found as a valid error message, then it is loaded and formatted with
// the arguments passed in. If it cannot be found, then the error is checked
// against FormatMessage to see if it is a system error. System errors are
// not formatted so no add'l parameters are required. If any errors in this
// process occur, hrRpt is returned for the client with no error posted.
//*****************************************************************************
extern "C"
HRESULT __cdecl PostError( // Returned error.
HRESULT hrRpt, // Reported error.
...); // Error arguments.
extern "C"
HRESULT __cdecl PostErrorVA( // Returned error.
HRESULT hrRpt, // Reported error.
va_list marker); // Error arguments.
//*****************************************************************************
// This function formats an error message, but doesn't fill the IErrorInfo.
//*****************************************************************************
HRESULT __cdecl FormatRuntimeErrorVa(
_Out_writes_(cchMsg) WCHAR *rcMsg, // Buffer into which to format.
ULONG cchMsg, // Size of buffer, characters.
HRESULT hrRpt, // The HR to report.
va_list marker); // Optional args.
HRESULT __cdecl FormatRuntimeError(
_Out_writes_(cchMsg) WCHAR *rcMsg, // Buffer into which to format.
ULONG cchMsg, // Size of buffer, characters.
HRESULT hrRpt, // The HR to report.
...); // Optional args.
#endif // __PostError_h__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// UtilCode.h
//
// Utility functions implemented in UtilCode.lib.
//
//*****************************************************************************
#ifndef __PostError_h__
#define __PostError_h__
#include "switches.h"
//*****************************************************************************
// This function will post an error for the client. If the LOWORD(hrRpt) can
// be found as a valid error message, then it is loaded and formatted with
// the arguments passed in. If it cannot be found, then the error is checked
// against FormatMessage to see if it is a system error. System errors are
// not formatted so no add'l parameters are required. If any errors in this
// process occur, hrRpt is returned for the client with no error posted.
//*****************************************************************************
extern "C"
HRESULT __cdecl PostError( // Returned error.
HRESULT hrRpt, // Reported error.
...); // Error arguments.
extern "C"
HRESULT __cdecl PostErrorVA( // Returned error.
HRESULT hrRpt, // Reported error.
va_list marker); // Error arguments.
//*****************************************************************************
// This function formats an error message, but doesn't fill the IErrorInfo.
//*****************************************************************************
HRESULT __cdecl FormatRuntimeErrorVa(
_Out_writes_(cchMsg) WCHAR *rcMsg, // Buffer into which to format.
ULONG cchMsg, // Size of buffer, characters.
HRESULT hrRpt, // The HR to report.
va_list marker); // Optional args.
HRESULT __cdecl FormatRuntimeError(
_Out_writes_(cchMsg) WCHAR *rcMsg, // Buffer into which to format.
ULONG cchMsg, // Size of buffer, characters.
HRESULT hrRpt, // The HR to report.
...); // Optional args.
#endif // __PostError_h__
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/vm/onstackreplacement.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ===========================================================================
// File: onstackreplacement.h
//
// ===========================================================================
#ifndef ON_STACK_REPLACEMENT_H
#define ON_STACK_REPLACEMENT_H
#ifdef FEATURE_ON_STACK_REPLACEMENT
#include "daccess.h"
#include "eehash.h"
// PerPatchpointInfo is the runtime state tracked for each active patchpoint.
//
// A patchpoint becomes active when the JIT_HELP_PATCHPOINT helper is invoked
// by jitted code.
//
struct PerPatchpointInfo
{
PerPatchpointInfo() :
m_osrMethodCode(0),
m_patchpointCount(0),
m_flags(0)
#if _DEBUG
, m_patchpointId(0)
#endif
{
}
// Flag bits
enum
{
patchpoint_triggered = 0x1,
patchpoint_invalid = 0x2
};
// The OSR method entry point for this patchpoint.
// NULL if no method has yet been jitted, or jitting failed.
PCODE m_osrMethodCode;
// Number of times jitted code has called the helper at this patchpoint.
LONG m_patchpointCount;
// Status of this patchpoint
LONG m_flags;
#if _DEBUG
int m_patchpointId;
#endif
};
typedef DPTR(PerPatchpointInfo) PTR_PerPatchpointInfo;
typedef EEPtrHashTable JitPatchpointTable;
// OnStackReplacementManager keeps track of mapping from patchpoint id to
// per patchpoint info.
//
// Patchpoint identity is currently the return address of the helper call
// in the jitted code.
//
class OnStackReplacementManager
{
#if DACCESS_COMPILE
public:
OnStackReplacementManager(LoaderAllocator *) {};
#else
public:
static void StaticInitialize();
public:
OnStackReplacementManager(LoaderAllocator * loaderHeaAllocator);
public:
PerPatchpointInfo* GetPerPatchpointInfo(PCODE ip);
#endif // DACCESS_COMPILE
private:
enum
{
INITIAL_TABLE_SIZE = 10
};
static CrstStatic s_lock;
#if _DEBUG
static int s_patchpointId;
#endif
private:
PTR_LoaderAllocator m_allocator;
JitPatchpointTable m_jitPatchpointTable;
};
#else // FEATURE_TIERED_COMPILATION
class OnStackReplacementManager
{
public:
static void StaticInitialize() {}
public:
OnStackReplacementManager(LoaderAllocator *) {}
};
#endif // FEATURE_TIERED_COMPILATION
typedef DPTR(OnStackReplacementManager) PTR_OnStackReplacementManager;
#endif // ON_STACK_REPLACEMENT_H
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ===========================================================================
// File: onstackreplacement.h
//
// ===========================================================================
#ifndef ON_STACK_REPLACEMENT_H
#define ON_STACK_REPLACEMENT_H
#ifdef FEATURE_ON_STACK_REPLACEMENT
#include "daccess.h"
#include "eehash.h"
// PerPatchpointInfo is the runtime state tracked for each active patchpoint.
//
// A patchpoint becomes active when the JIT_HELP_PATCHPOINT helper is invoked
// by jitted code.
//
struct PerPatchpointInfo
{
PerPatchpointInfo() :
m_osrMethodCode(0),
m_patchpointCount(0),
m_flags(0)
#if _DEBUG
, m_patchpointId(0)
#endif
{
}
// Flag bits
enum
{
patchpoint_triggered = 0x1,
patchpoint_invalid = 0x2
};
// The OSR method entry point for this patchpoint.
// NULL if no method has yet been jitted, or jitting failed.
PCODE m_osrMethodCode;
// Number of times jitted code has called the helper at this patchpoint.
LONG m_patchpointCount;
// Status of this patchpoint
LONG m_flags;
#if _DEBUG
int m_patchpointId;
#endif
};
typedef DPTR(PerPatchpointInfo) PTR_PerPatchpointInfo;
typedef EEPtrHashTable JitPatchpointTable;
// OnStackReplacementManager keeps track of mapping from patchpoint id to
// per patchpoint info.
//
// Patchpoint identity is currently the return address of the helper call
// in the jitted code.
//
class OnStackReplacementManager
{
#if DACCESS_COMPILE
public:
OnStackReplacementManager(LoaderAllocator *) {};
#else
public:
static void StaticInitialize();
public:
OnStackReplacementManager(LoaderAllocator * loaderHeaAllocator);
public:
PerPatchpointInfo* GetPerPatchpointInfo(PCODE ip);
#endif // DACCESS_COMPILE
private:
enum
{
INITIAL_TABLE_SIZE = 10
};
static CrstStatic s_lock;
#if _DEBUG
static int s_patchpointId;
#endif
private:
PTR_LoaderAllocator m_allocator;
JitPatchpointTable m_jitPatchpointTable;
};
#else // FEATURE_TIERED_COMPILATION
class OnStackReplacementManager
{
public:
static void StaticInitialize() {}
public:
OnStackReplacementManager(LoaderAllocator *) {}
};
#endif // FEATURE_TIERED_COMPILATION
typedef DPTR(OnStackReplacementManager) PTR_OnStackReplacementManager;
#endif // ON_STACK_REPLACEMENT_H
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/oft14.txt | <?xml version="1.0" encoding="utf-8"?>Hello, world! | <?xml version="1.0" encoding="utf-8"?>Hello, world! | -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/src/libunwind/include/remote.h | #ifndef REMOTE_H
#define REMOTE_H
/* Helper functions for accessing (remote) memory. These functions
assume that all addresses are naturally aligned (e.g., 32-bit
quantity is stored at a 32-bit-aligned address. */
#ifdef UNW_LOCAL_ONLY
static inline int
fetch8 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int8_t *valp, void *arg)
{
*valp = *(int8_t *) (uintptr_t) *addr;
*addr += 1;
return 0;
}
static inline int
fetch16 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int16_t *valp, void *arg)
{
*valp = *(int16_t *) (uintptr_t) *addr;
*addr += 2;
return 0;
}
static inline int
fetch32 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int32_t *valp, void *arg)
{
*valp = *(int32_t *) (uintptr_t) *addr;
*addr += 4;
return 0;
}
static inline int
fetchw (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, unw_word_t *valp, void *arg)
{
*valp = *(unw_word_t *) (uintptr_t) *addr;
*addr += sizeof (unw_word_t);
return 0;
}
#else /* !UNW_LOCAL_ONLY */
#define WSIZE (sizeof (unw_word_t))
static inline int
fetch8 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int8_t *valp, void *arg)
{
unw_word_t val, aligned_addr = *addr & -WSIZE, off = *addr - aligned_addr;
int ret;
*addr += 1;
ret = (*a->access_mem) (as, aligned_addr, &val, 0, arg);
#if UNW_BYTE_ORDER == UNW_LITTLE_ENDIAN
val >>= 8*off;
#else
val >>= 8*(WSIZE - 1 - off);
#endif
*valp = val & 0xff;
return ret;
}
static inline int
fetch16 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int16_t *valp, void *arg)
{
unw_word_t val, aligned_addr = *addr & -WSIZE, off = *addr - aligned_addr;
int ret;
if ((off & 0x1) != 0)
return -UNW_EINVAL;
*addr += 2;
ret = (*a->access_mem) (as, aligned_addr, &val, 0, arg);
#if UNW_BYTE_ORDER == UNW_LITTLE_ENDIAN
val >>= 8*off;
#else
val >>= 8*(WSIZE - 2 - off);
#endif
*valp = val & 0xffff;
return ret;
}
static inline int
fetch32 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int32_t *valp, void *arg)
{
unw_word_t val, aligned_addr = *addr & -WSIZE, off = *addr - aligned_addr;
int ret;
if ((off & 0x3) != 0)
return -UNW_EINVAL;
*addr += 4;
ret = (*a->access_mem) (as, aligned_addr, &val, 0, arg);
#if UNW_BYTE_ORDER == UNW_LITTLE_ENDIAN
val >>= 8*off;
#else
val >>= 8*(WSIZE - 4 - off);
#endif
*valp = val & 0xffffffff;
return ret;
}
static inline int
fetchw (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, unw_word_t *valp, void *arg)
{
int ret;
ret = (*a->access_mem) (as, *addr, valp, 0, arg);
*addr += WSIZE;
return ret;
}
#endif /* !UNW_LOCAL_ONLY */
#endif /* REMOTE_H */
| #ifndef REMOTE_H
#define REMOTE_H
/* Helper functions for accessing (remote) memory. These functions
assume that all addresses are naturally aligned (e.g., 32-bit
quantity is stored at a 32-bit-aligned address. */
#ifdef UNW_LOCAL_ONLY
static inline int
fetch8 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int8_t *valp, void *arg)
{
*valp = *(int8_t *) (uintptr_t) *addr;
*addr += 1;
return 0;
}
static inline int
fetch16 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int16_t *valp, void *arg)
{
*valp = *(int16_t *) (uintptr_t) *addr;
*addr += 2;
return 0;
}
static inline int
fetch32 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int32_t *valp, void *arg)
{
*valp = *(int32_t *) (uintptr_t) *addr;
*addr += 4;
return 0;
}
static inline int
fetchw (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, unw_word_t *valp, void *arg)
{
*valp = *(unw_word_t *) (uintptr_t) *addr;
*addr += sizeof (unw_word_t);
return 0;
}
#else /* !UNW_LOCAL_ONLY */
#define WSIZE (sizeof (unw_word_t))
static inline int
fetch8 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int8_t *valp, void *arg)
{
unw_word_t val, aligned_addr = *addr & -WSIZE, off = *addr - aligned_addr;
int ret;
*addr += 1;
ret = (*a->access_mem) (as, aligned_addr, &val, 0, arg);
#if UNW_BYTE_ORDER == UNW_LITTLE_ENDIAN
val >>= 8*off;
#else
val >>= 8*(WSIZE - 1 - off);
#endif
*valp = val & 0xff;
return ret;
}
static inline int
fetch16 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int16_t *valp, void *arg)
{
unw_word_t val, aligned_addr = *addr & -WSIZE, off = *addr - aligned_addr;
int ret;
if ((off & 0x1) != 0)
return -UNW_EINVAL;
*addr += 2;
ret = (*a->access_mem) (as, aligned_addr, &val, 0, arg);
#if UNW_BYTE_ORDER == UNW_LITTLE_ENDIAN
val >>= 8*off;
#else
val >>= 8*(WSIZE - 2 - off);
#endif
*valp = val & 0xffff;
return ret;
}
static inline int
fetch32 (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, int32_t *valp, void *arg)
{
unw_word_t val, aligned_addr = *addr & -WSIZE, off = *addr - aligned_addr;
int ret;
if ((off & 0x3) != 0)
return -UNW_EINVAL;
*addr += 4;
ret = (*a->access_mem) (as, aligned_addr, &val, 0, arg);
#if UNW_BYTE_ORDER == UNW_LITTLE_ENDIAN
val >>= 8*off;
#else
val >>= 8*(WSIZE - 4 - off);
#endif
*valp = val & 0xffffffff;
return ret;
}
static inline int
fetchw (unw_addr_space_t as, unw_accessors_t *a,
unw_word_t *addr, unw_word_t *valp, void *arg)
{
int ret;
ret = (*a->access_mem) (as, *addr, valp, 0, arg);
*addr += WSIZE;
return ret;
}
#endif /* !UNW_LOCAL_ONLY */
#endif /* REMOTE_H */
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/miscellaneous/GlobalMemoryStatusEx/test1/test.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source : test.c
**
** Purpose: Test for GlobalMemoryStatusEx() function
**
**
**=========================================================*/
#include <palsuite.h>
PALTEST(miscellaneous_GlobalMemoryStatusEx_test1_paltest_globalmemorystatusex_test1, "miscellaneous/GlobalMemoryStatusEx/test1/paltest_globalmemorystatusex_test1")
{
MEMORYSTATUSEX memoryStatus;
/*
* Initialize the PAL and return FAILURE if this fails
*/
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
if (!GlobalMemoryStatusEx(&memoryStatus))
{
Fail("ERROR: GlobalMemoryStatusEx failed.");
}
printf("GlobalMemoryStatusEx:\n");
printf(" ullTotalPhys: %llu\n", memoryStatus.ullTotalPhys);
printf(" ullAvailPhys: %llu\n", memoryStatus.ullAvailPhys);
printf(" ullTotalVirtual: %llu\n", memoryStatus.ullTotalVirtual);
printf(" ullAvailVirtual: %llu\n", memoryStatus.ullAvailVirtual);
printf(" ullTotalPageFile: %llu\n", memoryStatus.ullTotalPageFile);
printf(" ullAvailPageFile: %llu\n", memoryStatus.ullAvailPageFile);
printf(" ullAvailExtendedVirtual: %llu\n", memoryStatus.ullAvailExtendedVirtual);
printf(" dwMemoryLoad: %u\n", memoryStatus.dwMemoryLoad);
if (memoryStatus.ullTotalPhys == 0 ||
memoryStatus.ullAvailPhys == 0 ||
memoryStatus.ullTotalVirtual == 0 ||
memoryStatus.ullAvailVirtual == 0
)
{
Fail("ERROR: GlobalMemoryStatusEx succeeded, but returned zero physical of virtual memory sizes.");
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source : test.c
**
** Purpose: Test for GlobalMemoryStatusEx() function
**
**
**=========================================================*/
#include <palsuite.h>
PALTEST(miscellaneous_GlobalMemoryStatusEx_test1_paltest_globalmemorystatusex_test1, "miscellaneous/GlobalMemoryStatusEx/test1/paltest_globalmemorystatusex_test1")
{
MEMORYSTATUSEX memoryStatus;
/*
* Initialize the PAL and return FAILURE if this fails
*/
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
if (!GlobalMemoryStatusEx(&memoryStatus))
{
Fail("ERROR: GlobalMemoryStatusEx failed.");
}
printf("GlobalMemoryStatusEx:\n");
printf(" ullTotalPhys: %llu\n", memoryStatus.ullTotalPhys);
printf(" ullAvailPhys: %llu\n", memoryStatus.ullAvailPhys);
printf(" ullTotalVirtual: %llu\n", memoryStatus.ullTotalVirtual);
printf(" ullAvailVirtual: %llu\n", memoryStatus.ullAvailVirtual);
printf(" ullTotalPageFile: %llu\n", memoryStatus.ullTotalPageFile);
printf(" ullAvailPageFile: %llu\n", memoryStatus.ullAvailPageFile);
printf(" ullAvailExtendedVirtual: %llu\n", memoryStatus.ullAvailExtendedVirtual);
printf(" dwMemoryLoad: %u\n", memoryStatus.dwMemoryLoad);
if (memoryStatus.ullTotalPhys == 0 ||
memoryStatus.ullAvailPhys == 0 ||
memoryStatus.ullTotalVirtual == 0 ||
memoryStatus.ullAvailVirtual == 0
)
{
Fail("ERROR: GlobalMemoryStatusEx succeeded, but returned zero physical of virtual memory sizes.");
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/libs/System.Globalization.Native/pal_timeZoneInfo.c | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include "pal_errors_internal.h"
#include "pal_locale_internal.h"
#include "pal_timeZoneInfo.h"
#define DISPLAY_NAME_LENGTH 256 // arbitrarily large, to be safe
#define TZID_LENGTH 64 // arbitrarily large, to be safe
// For descriptions of the following patterns, see https://unicode-org.github.io/icu/userguide/format_parse/datetime/#date-field-symbol-table
static const UChar GENERIC_PATTERN_UCHAR[] = {'v', 'v', 'v', 'v', '\0'}; // u"vvvv"
static const UChar GENERIC_LOCATION_PATTERN_UCHAR[] = {'V', 'V', 'V', 'V', '\0'}; // u"VVVV"
static const UChar EXEMPLAR_CITY_PATTERN_UCHAR[] = {'V', 'V', 'V', '\0'}; // u"VVV"
/*
Convert Windows Time Zone Id to IANA Id
*/
int32_t GlobalizationNative_WindowsIdToIanaId(const UChar* windowsId, const char* region, UChar* ianaId, int32_t ianaIdLength)
{
UErrorCode status = U_ZERO_ERROR;
if (ucal_getTimeZoneIDForWindowsID_ptr != NULL)
{
int32_t ianaIdFilledLength = ucal_getTimeZoneIDForWindowsID(windowsId, -1, region, ianaId, ianaIdLength, &status);
if (U_SUCCESS(status))
{
return ianaIdFilledLength;
}
}
// Failed
return 0;
}
/*
Convert IANA Time Zone Id to Windows Id
*/
int32_t GlobalizationNative_IanaIdToWindowsId(const UChar* ianaId, UChar* windowsId, int32_t windowsIdLength)
{
UErrorCode status = U_ZERO_ERROR;
if (ucal_getWindowsTimeZoneID_ptr != NULL)
{
int32_t windowsIdFilledLength = ucal_getWindowsTimeZoneID(ianaId, -1, windowsId, windowsIdLength, &status);
if (U_SUCCESS(status))
{
return windowsIdFilledLength;
}
}
// Failed
return 0;
}
/*
Private function to get the standard and daylight names from the ICU Calendar API.
*/
static void GetTimeZoneDisplayName_FromCalendar(const char* locale, const UChar* timeZoneId, const UDate timestamp, UCalendarDisplayNameType type, UChar* result, int32_t resultLength, UErrorCode* err)
{
// Examples: "Pacific Standard Time" (standard)
// "Pacific Daylight Time" (daylight)
// (-1 == timeZoneId is null terminated)
UCalendar* calendar = ucal_open(timeZoneId, -1, locale, UCAL_DEFAULT, err);
if (U_SUCCESS(*err))
{
ucal_setMillis(calendar, timestamp, err);
if (U_SUCCESS(*err))
{
ucal_getTimeZoneDisplayName(calendar, type, locale, result, resultLength, err);
}
ucal_close(calendar);
}
}
/*
Private function to get the various forms of generic time zone names using patterns with the ICU Date Formatting API.
*/
static void GetTimeZoneDisplayName_FromPattern(const char* locale, const UChar* timeZoneId, const UDate timestamp, const UChar* pattern, UChar* result, int32_t resultLength, UErrorCode* err)
{
// (-1 == timeZoneId and pattern are null terminated)
UDateFormat* dateFormatter = udat_open(UDAT_PATTERN, UDAT_PATTERN, locale, timeZoneId, -1, pattern, -1, err);
if (U_SUCCESS(*err))
{
udat_format(dateFormatter, timestamp, result, resultLength, NULL, err);
udat_close(dateFormatter);
}
}
/*
Private function to modify the generic display name to better suit our needs.
*/
static void FixupTimeZoneGenericDisplayName(const char* locale, const UChar* timeZoneId, const UDate timestamp, UChar* genericName, UErrorCode* err)
{
// By default, some time zones will still give a standard name instead of the generic
// non-location name.
//
// For example, given the following zones and their English results:
// America/Denver => "Mountain Time"
// America/Phoenix => "Mountain Standard Time"
//
// We prefer that all time zones in the same metazone have the same generic name,
// such that they are grouped together when combined with their base offset, location
// and sorted alphabetically. For example:
//
// (UTC-07:00) Mountain Time (Denver)
// (UTC-07:00) Mountain Time (Phoenix)
//
// Without modification, they would show as:
//
// (UTC-07:00) Mountain Standard Time (Phoenix)
// (UTC-07:00) Mountain Time (Denver)
//
// When combined with the rest of the time zones, having them not grouped together
// makes it harder to locate the correct time zone from a list.
//
// The reason we get the standard name is because TR35 (LDML) defines a rule that
// states that metazone generic names should use standard names if there is no DST
// transition within a +/- 184 day range near the timestamp being translated.
//
// See the "Type Fallback" section in:
// https://www.unicode.org/reports/tr35/tr35-dates.html#Using_Time_Zone_Names
//
// This might make sense when attached to an exact timestamp, but doesn't work well
// when using the generic name to pick a time zone from a list.
// Note that this test only happens when the generic name comes from a metazone.
//
// ICU implements this test in TZGNCore::formatGenericNonLocationName in
// https://github.com/unicode-org/icu/blob/master/icu4c/source/i18n/tzgnames.cpp
// (Note the kDstCheckRange 184-day constant.)
//
// The rest of the code below is a workaround for this issue. When the generic
// name and standard name match, we search through the other time zones for one
// having the same base offset and standard name but a shorter generic name.
// That will at least keep them grouped together, though note that if there aren't
// any found that means all of them are using the standard name.
//
// If ICU ever adds an API to get a generic name that doesn't perform the
// 184-day check on metazone names, then test for the existence of that new API
// and use that instead of this workaround. Keep the workaround for when the
// new API is not available.
// Get the standard name for this time zone. (-1 == timeZoneId is null terminated)
// Note that we leave the calendar open and close it later so we can also get the base offset.
UChar standardName[DISPLAY_NAME_LENGTH];
UCalendar* calendar = ucal_open(timeZoneId, -1, locale, UCAL_DEFAULT, err);
if (U_FAILURE(*err))
{
return;
}
ucal_setMillis(calendar, timestamp, err);
if (U_FAILURE(*err))
{
ucal_close(calendar);
return;
}
ucal_getTimeZoneDisplayName(calendar, UCAL_STANDARD, locale, standardName, DISPLAY_NAME_LENGTH, err);
if (U_FAILURE(*err))
{
ucal_close(calendar);
return;
}
// Ensure the generic name is the same as the standard name.
if (u_strcmp(genericName, standardName) != 0)
{
ucal_close(calendar);
return;
}
// Get some details for later comparison.
const int32_t originalGenericNameActualLength = u_strlen(genericName);
const int32_t baseOffset = ucal_get(calendar, UCAL_ZONE_OFFSET, err);
if (U_FAILURE(*err))
{
ucal_close(calendar);
return;
}
// Allocate some additional strings for test values.
UChar testTimeZoneId[TZID_LENGTH];
UChar testDisplayName[DISPLAY_NAME_LENGTH];
UChar testDisplayName2[DISPLAY_NAME_LENGTH];
// Enumerate over all the time zones having the same base offset.
UEnumeration* pEnum = ucal_openTimeZoneIDEnumeration(UCAL_ZONE_TYPE_CANONICAL_LOCATION, NULL, &baseOffset, err);
if (U_FAILURE(*err))
{
uenum_close(pEnum);
ucal_close(calendar);
return;
}
int count = uenum_count(pEnum, err);
if (U_FAILURE(*err))
{
uenum_close(pEnum);
ucal_close(calendar);
return;
}
for (int i = 0; i < count; i++)
{
// Get a time zone id from the enumeration to test with.
int32_t testIdLength;
const char* testId = uenum_next(pEnum, &testIdLength, err);
if (U_FAILURE(*err))
{
// There shouldn't be a failure in enumeration, but if there was then exit.
uenum_close(pEnum);
ucal_close(calendar);
return;
}
// Make a UChar[] version of the test time zone id for use in the API calls.
u_uastrncpy(testTimeZoneId, testId, TZID_LENGTH);
// Get the standard name from the test time zone.
GetTimeZoneDisplayName_FromCalendar(locale, testTimeZoneId, timestamp, UCAL_STANDARD, testDisplayName, DISPLAY_NAME_LENGTH, err);
if (U_FAILURE(*err))
{
// Failed, but keep trying through the rest of the loop in case the failure is specific to this test zone.
continue;
}
// See if the test time zone has a different standard name.
if (u_strcmp(testDisplayName, standardName) != 0)
{
// It has a different standard name. We can't use it.
continue;
}
// Get the generic name from the test time zone.
GetTimeZoneDisplayName_FromPattern(locale, testTimeZoneId, timestamp, GENERIC_PATTERN_UCHAR, testDisplayName, DISPLAY_NAME_LENGTH, err);
if (U_FAILURE(*err))
{
// Failed, but keep trying through the rest of the loop in case the failure is specific to this test zone.
continue;
}
// See if the test time zone has a longer (or same size) generic name.
if (u_strlen(testDisplayName) >= originalGenericNameActualLength)
{
// The test time zone's generic name isn't any shorter than the one we already have.
continue;
}
// We probably have found a better generic name. But just to be safe, make sure the test zone isn't
// using a generic name that is specific to a particular location. For example, "Antarctica/Troll"
// uses "Troll Time" as a generic name, but "Greenwich Mean Time" as a standard name. We don't
// want other zones that use "Greenwich Mean Time" to be labeled as "Troll Time".
GetTimeZoneDisplayName_FromPattern(locale, testTimeZoneId, timestamp, GENERIC_LOCATION_PATTERN_UCHAR, testDisplayName2, DISPLAY_NAME_LENGTH, err);
if (U_FAILURE(*err))
{
// Failed, but keep trying through the rest of the loop in case the failure is specific to this test zone.
continue;
}
if (u_strcmp(testDisplayName, testDisplayName2) != 0)
{
// We have found a better generic name. Use it.
u_strcpy(genericName, testDisplayName);
break;
}
}
uenum_close(pEnum);
ucal_close(calendar);
}
/*
Gets the localized display name that is currently in effect for the specified time zone.
*/
ResultCode GlobalizationNative_GetTimeZoneDisplayName(const UChar* localeName, const UChar* timeZoneId, TimeZoneDisplayNameType type, UChar* result, int32_t resultLength)
{
UErrorCode err = U_ZERO_ERROR;
char locale[ULOC_FULLNAME_CAPACITY];
GetLocale(localeName, locale, ULOC_FULLNAME_CAPACITY, false, &err);
if (U_FAILURE(err))
{
return GetResultCode(err);
}
// Note: Due to how CLDR Metazones work, a past or future timestamp might use a different set of display names
// than are currently in effect.
//
// See https://github.com/unicode-org/cldr/blob/master/common/supplemental/metaZones.xml
//
// Example: As of writing this, Africa/Algiers is in the Europe_Central metazone,
// which has a standard-time name of "Central European Standard Time" (in English).
// However, in some previous dates, it used the Europe_Western metazone,
// having the standard-time name of "Western European Standard Time" (in English).
// Only the *current* name will be returned.
//
// TODO: Add a parameter for the timestamp that is used when getting the display names instead of
// getting "now" on the following line. Everything else should be using this timestamp.
// For now, since TimeZoneInfo presently uses only a single set of display names, we will
// use the names associated with the *current* date and time.
UDate timestamp = ucal_getNow();
switch (type)
{
case TimeZoneDisplayName_Standard:
GetTimeZoneDisplayName_FromCalendar(locale, timeZoneId, timestamp, UCAL_STANDARD, result, resultLength, &err);
break;
case TimeZoneDisplayName_DaylightSavings:
GetTimeZoneDisplayName_FromCalendar(locale, timeZoneId, timestamp, UCAL_DST, result, resultLength, &err);
break;
case TimeZoneDisplayName_Generic:
GetTimeZoneDisplayName_FromPattern(locale, timeZoneId, timestamp, GENERIC_PATTERN_UCHAR, result, resultLength, &err);
if (U_SUCCESS(err))
{
FixupTimeZoneGenericDisplayName(locale, timeZoneId, timestamp, result, &err);
}
break;
case TimeZoneDisplayName_GenericLocation:
GetTimeZoneDisplayName_FromPattern(locale, timeZoneId, timestamp, GENERIC_LOCATION_PATTERN_UCHAR, result, resultLength, &err);
break;
case TimeZoneDisplayName_ExemplarCity:
GetTimeZoneDisplayName_FromPattern(locale, timeZoneId, timestamp, EXEMPLAR_CITY_PATTERN_UCHAR, result, resultLength, &err);
break;
default:
return UnknownError;
}
return GetResultCode(err);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include "pal_errors_internal.h"
#include "pal_locale_internal.h"
#include "pal_timeZoneInfo.h"
#define DISPLAY_NAME_LENGTH 256 // arbitrarily large, to be safe
#define TZID_LENGTH 64 // arbitrarily large, to be safe
// For descriptions of the following patterns, see https://unicode-org.github.io/icu/userguide/format_parse/datetime/#date-field-symbol-table
static const UChar GENERIC_PATTERN_UCHAR[] = {'v', 'v', 'v', 'v', '\0'}; // u"vvvv"
static const UChar GENERIC_LOCATION_PATTERN_UCHAR[] = {'V', 'V', 'V', 'V', '\0'}; // u"VVVV"
static const UChar EXEMPLAR_CITY_PATTERN_UCHAR[] = {'V', 'V', 'V', '\0'}; // u"VVV"
/*
Convert Windows Time Zone Id to IANA Id
*/
int32_t GlobalizationNative_WindowsIdToIanaId(const UChar* windowsId, const char* region, UChar* ianaId, int32_t ianaIdLength)
{
UErrorCode status = U_ZERO_ERROR;
if (ucal_getTimeZoneIDForWindowsID_ptr != NULL)
{
int32_t ianaIdFilledLength = ucal_getTimeZoneIDForWindowsID(windowsId, -1, region, ianaId, ianaIdLength, &status);
if (U_SUCCESS(status))
{
return ianaIdFilledLength;
}
}
// Failed
return 0;
}
/*
Convert IANA Time Zone Id to Windows Id
*/
int32_t GlobalizationNative_IanaIdToWindowsId(const UChar* ianaId, UChar* windowsId, int32_t windowsIdLength)
{
UErrorCode status = U_ZERO_ERROR;
if (ucal_getWindowsTimeZoneID_ptr != NULL)
{
int32_t windowsIdFilledLength = ucal_getWindowsTimeZoneID(ianaId, -1, windowsId, windowsIdLength, &status);
if (U_SUCCESS(status))
{
return windowsIdFilledLength;
}
}
// Failed
return 0;
}
/*
Private function to get the standard and daylight names from the ICU Calendar API.
*/
static void GetTimeZoneDisplayName_FromCalendar(const char* locale, const UChar* timeZoneId, const UDate timestamp, UCalendarDisplayNameType type, UChar* result, int32_t resultLength, UErrorCode* err)
{
// Examples: "Pacific Standard Time" (standard)
// "Pacific Daylight Time" (daylight)
// (-1 == timeZoneId is null terminated)
UCalendar* calendar = ucal_open(timeZoneId, -1, locale, UCAL_DEFAULT, err);
if (U_SUCCESS(*err))
{
ucal_setMillis(calendar, timestamp, err);
if (U_SUCCESS(*err))
{
ucal_getTimeZoneDisplayName(calendar, type, locale, result, resultLength, err);
}
ucal_close(calendar);
}
}
/*
Private function to get the various forms of generic time zone names using patterns with the ICU Date Formatting API.
*/
static void GetTimeZoneDisplayName_FromPattern(const char* locale, const UChar* timeZoneId, const UDate timestamp, const UChar* pattern, UChar* result, int32_t resultLength, UErrorCode* err)
{
// (-1 == timeZoneId and pattern are null terminated)
UDateFormat* dateFormatter = udat_open(UDAT_PATTERN, UDAT_PATTERN, locale, timeZoneId, -1, pattern, -1, err);
if (U_SUCCESS(*err))
{
udat_format(dateFormatter, timestamp, result, resultLength, NULL, err);
udat_close(dateFormatter);
}
}
/*
Private function to modify the generic display name to better suit our needs.
*/
static void FixupTimeZoneGenericDisplayName(const char* locale, const UChar* timeZoneId, const UDate timestamp, UChar* genericName, UErrorCode* err)
{
// By default, some time zones will still give a standard name instead of the generic
// non-location name.
//
// For example, given the following zones and their English results:
// America/Denver => "Mountain Time"
// America/Phoenix => "Mountain Standard Time"
//
// We prefer that all time zones in the same metazone have the same generic name,
// such that they are grouped together when combined with their base offset, location
// and sorted alphabetically. For example:
//
// (UTC-07:00) Mountain Time (Denver)
// (UTC-07:00) Mountain Time (Phoenix)
//
// Without modification, they would show as:
//
// (UTC-07:00) Mountain Standard Time (Phoenix)
// (UTC-07:00) Mountain Time (Denver)
//
// When combined with the rest of the time zones, having them not grouped together
// makes it harder to locate the correct time zone from a list.
//
// The reason we get the standard name is because TR35 (LDML) defines a rule that
// states that metazone generic names should use standard names if there is no DST
// transition within a +/- 184 day range near the timestamp being translated.
//
// See the "Type Fallback" section in:
// https://www.unicode.org/reports/tr35/tr35-dates.html#Using_Time_Zone_Names
//
// This might make sense when attached to an exact timestamp, but doesn't work well
// when using the generic name to pick a time zone from a list.
// Note that this test only happens when the generic name comes from a metazone.
//
// ICU implements this test in TZGNCore::formatGenericNonLocationName in
// https://github.com/unicode-org/icu/blob/master/icu4c/source/i18n/tzgnames.cpp
// (Note the kDstCheckRange 184-day constant.)
//
// The rest of the code below is a workaround for this issue. When the generic
// name and standard name match, we search through the other time zones for one
// having the same base offset and standard name but a shorter generic name.
// That will at least keep them grouped together, though note that if there aren't
// any found that means all of them are using the standard name.
//
// If ICU ever adds an API to get a generic name that doesn't perform the
// 184-day check on metazone names, then test for the existence of that new API
// and use that instead of this workaround. Keep the workaround for when the
// new API is not available.
// Get the standard name for this time zone. (-1 == timeZoneId is null terminated)
// Note that we leave the calendar open and close it later so we can also get the base offset.
UChar standardName[DISPLAY_NAME_LENGTH];
UCalendar* calendar = ucal_open(timeZoneId, -1, locale, UCAL_DEFAULT, err);
if (U_FAILURE(*err))
{
return;
}
ucal_setMillis(calendar, timestamp, err);
if (U_FAILURE(*err))
{
ucal_close(calendar);
return;
}
ucal_getTimeZoneDisplayName(calendar, UCAL_STANDARD, locale, standardName, DISPLAY_NAME_LENGTH, err);
if (U_FAILURE(*err))
{
ucal_close(calendar);
return;
}
// Ensure the generic name is the same as the standard name.
if (u_strcmp(genericName, standardName) != 0)
{
ucal_close(calendar);
return;
}
// Get some details for later comparison.
const int32_t originalGenericNameActualLength = u_strlen(genericName);
const int32_t baseOffset = ucal_get(calendar, UCAL_ZONE_OFFSET, err);
if (U_FAILURE(*err))
{
ucal_close(calendar);
return;
}
// Allocate some additional strings for test values.
UChar testTimeZoneId[TZID_LENGTH];
UChar testDisplayName[DISPLAY_NAME_LENGTH];
UChar testDisplayName2[DISPLAY_NAME_LENGTH];
// Enumerate over all the time zones having the same base offset.
UEnumeration* pEnum = ucal_openTimeZoneIDEnumeration(UCAL_ZONE_TYPE_CANONICAL_LOCATION, NULL, &baseOffset, err);
if (U_FAILURE(*err))
{
uenum_close(pEnum);
ucal_close(calendar);
return;
}
int count = uenum_count(pEnum, err);
if (U_FAILURE(*err))
{
uenum_close(pEnum);
ucal_close(calendar);
return;
}
for (int i = 0; i < count; i++)
{
// Get a time zone id from the enumeration to test with.
int32_t testIdLength;
const char* testId = uenum_next(pEnum, &testIdLength, err);
if (U_FAILURE(*err))
{
// There shouldn't be a failure in enumeration, but if there was then exit.
uenum_close(pEnum);
ucal_close(calendar);
return;
}
// Make a UChar[] version of the test time zone id for use in the API calls.
u_uastrncpy(testTimeZoneId, testId, TZID_LENGTH);
// Get the standard name from the test time zone.
GetTimeZoneDisplayName_FromCalendar(locale, testTimeZoneId, timestamp, UCAL_STANDARD, testDisplayName, DISPLAY_NAME_LENGTH, err);
if (U_FAILURE(*err))
{
// Failed, but keep trying through the rest of the loop in case the failure is specific to this test zone.
continue;
}
// See if the test time zone has a different standard name.
if (u_strcmp(testDisplayName, standardName) != 0)
{
// It has a different standard name. We can't use it.
continue;
}
// Get the generic name from the test time zone.
GetTimeZoneDisplayName_FromPattern(locale, testTimeZoneId, timestamp, GENERIC_PATTERN_UCHAR, testDisplayName, DISPLAY_NAME_LENGTH, err);
if (U_FAILURE(*err))
{
// Failed, but keep trying through the rest of the loop in case the failure is specific to this test zone.
continue;
}
// See if the test time zone has a longer (or same size) generic name.
if (u_strlen(testDisplayName) >= originalGenericNameActualLength)
{
// The test time zone's generic name isn't any shorter than the one we already have.
continue;
}
// We probably have found a better generic name. But just to be safe, make sure the test zone isn't
// using a generic name that is specific to a particular location. For example, "Antarctica/Troll"
// uses "Troll Time" as a generic name, but "Greenwich Mean Time" as a standard name. We don't
// want other zones that use "Greenwich Mean Time" to be labeled as "Troll Time".
GetTimeZoneDisplayName_FromPattern(locale, testTimeZoneId, timestamp, GENERIC_LOCATION_PATTERN_UCHAR, testDisplayName2, DISPLAY_NAME_LENGTH, err);
if (U_FAILURE(*err))
{
// Failed, but keep trying through the rest of the loop in case the failure is specific to this test zone.
continue;
}
if (u_strcmp(testDisplayName, testDisplayName2) != 0)
{
// We have found a better generic name. Use it.
u_strcpy(genericName, testDisplayName);
break;
}
}
uenum_close(pEnum);
ucal_close(calendar);
}
/*
Gets the localized display name that is currently in effect for the specified time zone.
*/
ResultCode GlobalizationNative_GetTimeZoneDisplayName(const UChar* localeName, const UChar* timeZoneId, TimeZoneDisplayNameType type, UChar* result, int32_t resultLength)
{
UErrorCode err = U_ZERO_ERROR;
char locale[ULOC_FULLNAME_CAPACITY];
GetLocale(localeName, locale, ULOC_FULLNAME_CAPACITY, false, &err);
if (U_FAILURE(err))
{
return GetResultCode(err);
}
// Note: Due to how CLDR Metazones work, a past or future timestamp might use a different set of display names
// than are currently in effect.
//
// See https://github.com/unicode-org/cldr/blob/master/common/supplemental/metaZones.xml
//
// Example: As of writing this, Africa/Algiers is in the Europe_Central metazone,
// which has a standard-time name of "Central European Standard Time" (in English).
// However, in some previous dates, it used the Europe_Western metazone,
// having the standard-time name of "Western European Standard Time" (in English).
// Only the *current* name will be returned.
//
// TODO: Add a parameter for the timestamp that is used when getting the display names instead of
// getting "now" on the following line. Everything else should be using this timestamp.
// For now, since TimeZoneInfo presently uses only a single set of display names, we will
// use the names associated with the *current* date and time.
UDate timestamp = ucal_getNow();
switch (type)
{
case TimeZoneDisplayName_Standard:
GetTimeZoneDisplayName_FromCalendar(locale, timeZoneId, timestamp, UCAL_STANDARD, result, resultLength, &err);
break;
case TimeZoneDisplayName_DaylightSavings:
GetTimeZoneDisplayName_FromCalendar(locale, timeZoneId, timestamp, UCAL_DST, result, resultLength, &err);
break;
case TimeZoneDisplayName_Generic:
GetTimeZoneDisplayName_FromPattern(locale, timeZoneId, timestamp, GENERIC_PATTERN_UCHAR, result, resultLength, &err);
if (U_SUCCESS(err))
{
FixupTimeZoneGenericDisplayName(locale, timeZoneId, timestamp, result, &err);
}
break;
case TimeZoneDisplayName_GenericLocation:
GetTimeZoneDisplayName_FromPattern(locale, timeZoneId, timestamp, GENERIC_LOCATION_PATTERN_UCHAR, result, resultLength, &err);
break;
case TimeZoneDisplayName_ExemplarCity:
GetTimeZoneDisplayName_FromPattern(locale, timeZoneId, timestamp, EXEMPLAR_CITY_PATTERN_UCHAR, result, resultLength, &err);
break;
default:
return UnknownError;
}
return GetResultCode(err);
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/tools/superpmi/mcs/verbprintjiteeversion.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "standardpch.h"
#include "verbprintjiteeversion.h"
#include "runtimedetails.h"
// Print the GUID in format a5eec3a4-4176-43a7-8c2b-a05b551d4f49
//
// This is useful for tools that want to determine which MCH file to use for a
// particular JIT: if the JIT and MCS are built from the same source tree, then
// use this function to print out the JITEEVersion, and use that to determine
// which MCH files to use.
//
int verbPrintJITEEVersion::DoWork()
{
const GUID& g = JITEEVersionIdentifier;
printf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
g.Data1,
g.Data2,
g.Data3,
g.Data4[0],
g.Data4[1],
g.Data4[2],
g.Data4[3],
g.Data4[4],
g.Data4[5],
g.Data4[6],
g.Data4[7]);
return 0;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "standardpch.h"
#include "verbprintjiteeversion.h"
#include "runtimedetails.h"
// Print the GUID in format a5eec3a4-4176-43a7-8c2b-a05b551d4f49
//
// This is useful for tools that want to determine which MCH file to use for a
// particular JIT: if the JIT and MCS are built from the same source tree, then
// use this function to print out the JITEEVersion, and use that to determine
// which MCH files to use.
//
int verbPrintJITEEVersion::DoWork()
{
const GUID& g = JITEEVersionIdentifier;
printf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
g.Data1,
g.Data2,
g.Data3,
g.Data4[0],
g.Data4[1],
g.Data4[2],
g.Data4[3],
g.Data4[4],
g.Data4[5],
g.Data4[6],
g.Data4[7]);
return 0;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/src/libunwind/src/hppa/tables.c | #include "unwind_i.h"
static inline int
is_local_addr_space (unw_addr_space_t as)
{
extern unw_addr_space_t _ULhppa_local_addr_space;
return (as == _Uhppa_local_addr_space
#ifndef UNW_REMOTE_ONLY
|| as == _ULhppa_local_addr_space
#endif
);
}
HIDDEN int
tdep_find_proc_info (unw_addr_space_t as, unw_word_t ip,
unw_proc_info_t *pi, int need_unwind_info, void *arg)
{
printf ("%s: begging to get implemented...\n", __FUNCTION__);
return 0;
}
HIDDEN int
tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip,
unw_dyn_info_t *di,
unw_proc_info_t *pi, int need_unwind_info, void *arg)
{
printf ("%s: the biggest beggar of them all...\n", __FUNCTION__);
return 0;
}
HIDDEN void
tdep_put_unwind_info (unw_addr_space_t as, unw_proc_info_t *pi, void *arg)
{
if (!pi->unwind_info)
return;
if (!is_local_addr_space (as))
{
free (pi->unwind_info);
pi->unwind_info = NULL;
}
}
| #include "unwind_i.h"
static inline int
is_local_addr_space (unw_addr_space_t as)
{
extern unw_addr_space_t _ULhppa_local_addr_space;
return (as == _Uhppa_local_addr_space
#ifndef UNW_REMOTE_ONLY
|| as == _ULhppa_local_addr_space
#endif
);
}
HIDDEN int
tdep_find_proc_info (unw_addr_space_t as, unw_word_t ip,
unw_proc_info_t *pi, int need_unwind_info, void *arg)
{
printf ("%s: begging to get implemented...\n", __FUNCTION__);
return 0;
}
HIDDEN int
tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip,
unw_dyn_info_t *di,
unw_proc_info_t *pi, int need_unwind_info, void *arg)
{
printf ("%s: the biggest beggar of them all...\n", __FUNCTION__);
return 0;
}
HIDDEN void
tdep_put_unwind_info (unw_addr_space_t as, unw_proc_info_t *pi, void *arg)
{
if (!pi->unwind_info)
return;
if (!is_local_addr_space (as))
{
free (pi->unwind_info);
pi->unwind_info = NULL;
}
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/utils/mono-hwcap-s390x.c | /**
* \file
* S/390x hardware feature detection
*
* Authors:
* Alex Rønne Petersen ([email protected])
* Elijah Taylor ([email protected])
* Miguel de Icaza ([email protected])
* Neale Ferguson ([email protected])
* Paolo Molaro ([email protected])
* Rodrigo Kumpera ([email protected])
* Sebastien Pouliot ([email protected])
* Zoltan Varga ([email protected])
*
* Copyright 2003 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc
* Copyright 2006 Broadcom
* Copyright 2007-2008 Andreas Faerber
* Copyright 2011-2013 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mono/utils/mono-hwcap.h"
#include <signal.h>
typedef struct {
uint8_t n3:1; // 000 - N3 instructions
uint8_t zi:1; // 001 - z/Arch installed
uint8_t za:1; // 002 - z/Arch active
uint8_t date:1; // 003 - DAT-enhancement
uint8_t idtes:1; // 004 - IDTE-segment tables
uint8_t idter:1; // 005 - IDTE-region tables
uint8_t asnlx:1; // 006 - ASN-LX reuse
uint8_t stfle:1; // 007 - STFLE
uint8_t edat1:1; // 008 - EDAT 1
uint8_t srs:1; // 009 - Sense-Running-Status
uint8_t csske:1; // 010 - Conditional SSKE
uint8_t ctf:1; // 011 - Configuration-topology
uint8_t ibm01:1; // 012 - Assigned to IBM
uint8_t ipter:1; // 013 - IPTE-range
uint8_t nqks:1; // 014 - Nonquiescing key-setting
uint8_t ibm02:1; // 015 - Assigned to IBM
uint8_t etf2:1; // 016 - Extended translation 2
uint8_t msa:1; // 017 - Message security assist 1
uint8_t ld:1; // 018 - Long displacement
uint8_t ldh:1; // 019 - Long displacement high perf
uint8_t mas:1; // 020 - HFP multiply-add-subtract
uint8_t eif:1; // 021 - Extended immediate
uint8_t etf3:1; // 022 - Extended translation 3
uint8_t hux:1; // 023 - HFP unnormalized extension
uint8_t etf2e:1; // 024 - Extended translation enhanced 2
uint8_t stckf:1; // 025 - Store clock fast
uint8_t pe:1; // 026 - Parsing enhancement
uint8_t mvcos:1; // 027 - Move with optional specs
uint8_t tods:1; // 028 - TOD steering
uint8_t x000:1; // 029 - Undefined
uint8_t etf3e:1; // 030 - ETF3 enhancement
uint8_t ecput:1; // 031 - Extract CPU time
uint8_t csst:1; // 032 - Compare swap and store
uint8_t csst2:1; // 033 - Compare swap and store 2
uint8_t gie:1; // 034 - General instructions extension
uint8_t ee:1; // 035 - Execute extensions
uint8_t em:1; // 036 - Enhanced monitor
uint8_t fpe:1; // 037 - Floating point extension
uint8_t opcf:1; // 038 - Order-preserving-compression facility
uint8_t ibm03:1; // 039 - Assigned to IBM
uint8_t spp:1; // 040 - Set program parameters
uint8_t fpse:1; // 041 - FP support enhancement
uint8_t dfp:1; // 042 - DFP
uint8_t dfph:1; // 043 - DFP high performance
uint8_t pfpo:1; // 044 - PFPO instruction
uint8_t multi:1; // 045 - Multiple inc load/store on CC 1
uint8_t ibm04:1; // 046 - Assigned to IBM
uint8_t cmpsce:1; // 047 - CMPSC enhancement
uint8_t dfpzc:1; // 048 - DFP zoned conversion
uint8_t misc:1; // 049 - Multiple inc load and trap
uint8_t ctx:1; // 050 - Constrained transactional-execution
uint8_t ltlb:1; // 051 - Local TLB clearing
uint8_t ia:1; // 052 - Interlocked access
uint8_t lsoc2:1; // 053 - Load/store on CC 2
uint8_t eecf:1; // 054 - Entropy-encoding compression facility
uint8_t ibm05:1; // 055 - Assigned to IBM
uint8_t x003:1; // 056 - Undefined
uint8_t msa5:1; // 057 - Message security assist 5
uint8_t mie2:1; // 058 - Miscellaneous execution facility 2
uint8_t x005:1; // 059 - Undefined
uint8_t x006:1; // 060 - Undefined
uint8_t mie3:1; // 061 - Miscellaneous execution facility 3
uint8_t ibm06:1; // 062 - Assigned to IBM
uint8_t x008:1; // 063 - Undefined
uint8_t x009:1; // 064 - Undefined
uint8_t ibm07:1; // 065 - Assigned to IBM
uint8_t rrbm:1; // 066 - Reset reference bits multiple
uint8_t cmc:1; // 067 - CPU measurement counter
uint8_t cms:1; // 068 - CPU Measurement sampling
uint8_t ibm08:1; // 069 - Assigned to IBM
uint8_t ibm09:1; // 070 - Assigned to IBM
uint8_t ibm10:1; // 071 - Assigned to IBM
uint8_t ibm11:1; // 072 - Assigned to IBM
uint8_t txe:1; // 073 - Transactional execution
uint8_t sthy:1; // 074 - Store hypervisor information
uint8_t aefsi:1; // 075 - Access exception fetch/store indication
uint8_t msa3:1; // 076 - Message security assist 3
uint8_t msa4:1; // 077 - Message security assist 4
uint8_t edat2:1; // 078 - Enhanced DAT 2
uint8_t x010:1; // 079 - Undefined
uint8_t dfppc:1; // 080 - DFP packed conversion
uint8_t ppaf:1; // 081 - PPA in order facility
uint8_t x011:6; // 082-87 - Undefined
uint8_t x012[5]; // 088-127 - Undefined
uint8_t ibm12:1; // 128 - Assigned to IBM
uint8_t vec:1; // 129 - Vector facility
uint8_t iep:1; // 130 - Instruction Execution Protection Facility
uint8_t sea:1; // 131 - Side-effect-access Faility
uint8_t x013:1; // 132 - Undefined
uint8_t gs:1; // 133 - Guarded Storage Facility
uint8_t vpd:1; // 134 - Vector Packed Decimal Facility
uint8_t ve1:1; // 135 - Vector Enhancements Facilityty
uint8_t x014:2; // 136-137 - Undefined
uint8_t cazm:1; // 138 - Configuration-z/Architecture-arcitectural -mode Faciliy
uint8_t mef:1; // 139 - Multiple-epoch Facility ture-arcitectural -mode Faciliy
uint8_t ibm13:2; // 140-141 - Assigned to IBM
uint8_t sccm:1; // 142 - Store CPU counter multiple
uint8_t x015:1; // 143 - Assigned to IBM
uint8_t tpei:1; // 144 - Test Pending External Interrption Facility
uint8_t irbm:1; // 145 - Insert Reference Bits Multiple Facility
uint8_t mse8:1; // 146 - Message Security Assist Extension 8
uint8_t ibm14:1; // 147 - Reserved for IBM use
uint8_t vef2:1; // 148 - Vector Execution Facility 2
uint8_t mpsk:1; // 149 - Move Page and Set Key Facility
uint8_t x016:1; // 150 - Undefined
uint8_t dfcf:1; // 151 - Deflate Conversion Facility
uint8_t vpde:1; // 152 - Vector Packed Decimal Enhancement Facility
uint8_t x017:2; // 153-154 - Undefined
uint8_t msa9:1; // 155 - Message Security Assist Facility 9
uint8_t x018:4; // 156-159 - Undefined
uint8_t x019; // 160-167 - Undefined
uint8_t esac:1; // 168 - ESA/390 Compatibility Mode Facility
uint8_t x020:7; // 169-175 - Undefined
uint8_t x021[10]; // 176-256 Undefined
} __attribute__ ((__packed__)) __attribute__ ((__aligned__(8))) facilityList_t;
void
mono_hwcap_arch_init (void)
{
facilityList_t facs;
int lFacs = sizeof (facs) / 8;
__asm__ __volatile__ (
"lgfr\t%%r0,%1\n\t"
".insn\ts,0xb2b00000,%0\n\t"
: "=m" (facs)
: "r" (lFacs)
: "0", "cc"
);
mono_hwcap_s390x_has_fpe = facs.fpe;
mono_hwcap_s390x_has_vec = facs.vec;
mono_hwcap_s390x_has_mlt = facs.multi;
mono_hwcap_s390x_has_ia = facs.ia;
mono_hwcap_s390x_has_gie = facs.gie;
mono_hwcap_s390x_has_mie2 = facs.mie2;
mono_hwcap_s390x_has_mie3 = facs.mie3;
mono_hwcap_s390x_has_gs = facs.gs;
mono_hwcap_s390x_has_vef2 = facs.vef2;
mono_hwcap_s390x_has_eif = facs.eif;
}
| /**
* \file
* S/390x hardware feature detection
*
* Authors:
* Alex Rønne Petersen ([email protected])
* Elijah Taylor ([email protected])
* Miguel de Icaza ([email protected])
* Neale Ferguson ([email protected])
* Paolo Molaro ([email protected])
* Rodrigo Kumpera ([email protected])
* Sebastien Pouliot ([email protected])
* Zoltan Varga ([email protected])
*
* Copyright 2003 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc
* Copyright 2006 Broadcom
* Copyright 2007-2008 Andreas Faerber
* Copyright 2011-2013 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mono/utils/mono-hwcap.h"
#include <signal.h>
typedef struct {
uint8_t n3:1; // 000 - N3 instructions
uint8_t zi:1; // 001 - z/Arch installed
uint8_t za:1; // 002 - z/Arch active
uint8_t date:1; // 003 - DAT-enhancement
uint8_t idtes:1; // 004 - IDTE-segment tables
uint8_t idter:1; // 005 - IDTE-region tables
uint8_t asnlx:1; // 006 - ASN-LX reuse
uint8_t stfle:1; // 007 - STFLE
uint8_t edat1:1; // 008 - EDAT 1
uint8_t srs:1; // 009 - Sense-Running-Status
uint8_t csske:1; // 010 - Conditional SSKE
uint8_t ctf:1; // 011 - Configuration-topology
uint8_t ibm01:1; // 012 - Assigned to IBM
uint8_t ipter:1; // 013 - IPTE-range
uint8_t nqks:1; // 014 - Nonquiescing key-setting
uint8_t ibm02:1; // 015 - Assigned to IBM
uint8_t etf2:1; // 016 - Extended translation 2
uint8_t msa:1; // 017 - Message security assist 1
uint8_t ld:1; // 018 - Long displacement
uint8_t ldh:1; // 019 - Long displacement high perf
uint8_t mas:1; // 020 - HFP multiply-add-subtract
uint8_t eif:1; // 021 - Extended immediate
uint8_t etf3:1; // 022 - Extended translation 3
uint8_t hux:1; // 023 - HFP unnormalized extension
uint8_t etf2e:1; // 024 - Extended translation enhanced 2
uint8_t stckf:1; // 025 - Store clock fast
uint8_t pe:1; // 026 - Parsing enhancement
uint8_t mvcos:1; // 027 - Move with optional specs
uint8_t tods:1; // 028 - TOD steering
uint8_t x000:1; // 029 - Undefined
uint8_t etf3e:1; // 030 - ETF3 enhancement
uint8_t ecput:1; // 031 - Extract CPU time
uint8_t csst:1; // 032 - Compare swap and store
uint8_t csst2:1; // 033 - Compare swap and store 2
uint8_t gie:1; // 034 - General instructions extension
uint8_t ee:1; // 035 - Execute extensions
uint8_t em:1; // 036 - Enhanced monitor
uint8_t fpe:1; // 037 - Floating point extension
uint8_t opcf:1; // 038 - Order-preserving-compression facility
uint8_t ibm03:1; // 039 - Assigned to IBM
uint8_t spp:1; // 040 - Set program parameters
uint8_t fpse:1; // 041 - FP support enhancement
uint8_t dfp:1; // 042 - DFP
uint8_t dfph:1; // 043 - DFP high performance
uint8_t pfpo:1; // 044 - PFPO instruction
uint8_t multi:1; // 045 - Multiple inc load/store on CC 1
uint8_t ibm04:1; // 046 - Assigned to IBM
uint8_t cmpsce:1; // 047 - CMPSC enhancement
uint8_t dfpzc:1; // 048 - DFP zoned conversion
uint8_t misc:1; // 049 - Multiple inc load and trap
uint8_t ctx:1; // 050 - Constrained transactional-execution
uint8_t ltlb:1; // 051 - Local TLB clearing
uint8_t ia:1; // 052 - Interlocked access
uint8_t lsoc2:1; // 053 - Load/store on CC 2
uint8_t eecf:1; // 054 - Entropy-encoding compression facility
uint8_t ibm05:1; // 055 - Assigned to IBM
uint8_t x003:1; // 056 - Undefined
uint8_t msa5:1; // 057 - Message security assist 5
uint8_t mie2:1; // 058 - Miscellaneous execution facility 2
uint8_t x005:1; // 059 - Undefined
uint8_t x006:1; // 060 - Undefined
uint8_t mie3:1; // 061 - Miscellaneous execution facility 3
uint8_t ibm06:1; // 062 - Assigned to IBM
uint8_t x008:1; // 063 - Undefined
uint8_t x009:1; // 064 - Undefined
uint8_t ibm07:1; // 065 - Assigned to IBM
uint8_t rrbm:1; // 066 - Reset reference bits multiple
uint8_t cmc:1; // 067 - CPU measurement counter
uint8_t cms:1; // 068 - CPU Measurement sampling
uint8_t ibm08:1; // 069 - Assigned to IBM
uint8_t ibm09:1; // 070 - Assigned to IBM
uint8_t ibm10:1; // 071 - Assigned to IBM
uint8_t ibm11:1; // 072 - Assigned to IBM
uint8_t txe:1; // 073 - Transactional execution
uint8_t sthy:1; // 074 - Store hypervisor information
uint8_t aefsi:1; // 075 - Access exception fetch/store indication
uint8_t msa3:1; // 076 - Message security assist 3
uint8_t msa4:1; // 077 - Message security assist 4
uint8_t edat2:1; // 078 - Enhanced DAT 2
uint8_t x010:1; // 079 - Undefined
uint8_t dfppc:1; // 080 - DFP packed conversion
uint8_t ppaf:1; // 081 - PPA in order facility
uint8_t x011:6; // 082-87 - Undefined
uint8_t x012[5]; // 088-127 - Undefined
uint8_t ibm12:1; // 128 - Assigned to IBM
uint8_t vec:1; // 129 - Vector facility
uint8_t iep:1; // 130 - Instruction Execution Protection Facility
uint8_t sea:1; // 131 - Side-effect-access Faility
uint8_t x013:1; // 132 - Undefined
uint8_t gs:1; // 133 - Guarded Storage Facility
uint8_t vpd:1; // 134 - Vector Packed Decimal Facility
uint8_t ve1:1; // 135 - Vector Enhancements Facilityty
uint8_t x014:2; // 136-137 - Undefined
uint8_t cazm:1; // 138 - Configuration-z/Architecture-arcitectural -mode Faciliy
uint8_t mef:1; // 139 - Multiple-epoch Facility ture-arcitectural -mode Faciliy
uint8_t ibm13:2; // 140-141 - Assigned to IBM
uint8_t sccm:1; // 142 - Store CPU counter multiple
uint8_t x015:1; // 143 - Assigned to IBM
uint8_t tpei:1; // 144 - Test Pending External Interrption Facility
uint8_t irbm:1; // 145 - Insert Reference Bits Multiple Facility
uint8_t mse8:1; // 146 - Message Security Assist Extension 8
uint8_t ibm14:1; // 147 - Reserved for IBM use
uint8_t vef2:1; // 148 - Vector Execution Facility 2
uint8_t mpsk:1; // 149 - Move Page and Set Key Facility
uint8_t x016:1; // 150 - Undefined
uint8_t dfcf:1; // 151 - Deflate Conversion Facility
uint8_t vpde:1; // 152 - Vector Packed Decimal Enhancement Facility
uint8_t x017:2; // 153-154 - Undefined
uint8_t msa9:1; // 155 - Message Security Assist Facility 9
uint8_t x018:4; // 156-159 - Undefined
uint8_t x019; // 160-167 - Undefined
uint8_t esac:1; // 168 - ESA/390 Compatibility Mode Facility
uint8_t x020:7; // 169-175 - Undefined
uint8_t x021[10]; // 176-256 Undefined
} __attribute__ ((__packed__)) __attribute__ ((__aligned__(8))) facilityList_t;
void
mono_hwcap_arch_init (void)
{
facilityList_t facs;
int lFacs = sizeof (facs) / 8;
__asm__ __volatile__ (
"lgfr\t%%r0,%1\n\t"
".insn\ts,0xb2b00000,%0\n\t"
: "=m" (facs)
: "r" (lFacs)
: "0", "cc"
);
mono_hwcap_s390x_has_fpe = facs.fpe;
mono_hwcap_s390x_has_vec = facs.vec;
mono_hwcap_s390x_has_mlt = facs.multi;
mono_hwcap_s390x_has_ia = facs.ia;
mono_hwcap_s390x_has_gie = facs.gie;
mono_hwcap_s390x_has_mie2 = facs.mie2;
mono_hwcap_s390x_has_mie3 = facs.mie3;
mono_hwcap_s390x_has_gs = facs.gs;
mono_hwcap_s390x_has_vef2 = facs.vef2;
mono_hwcap_s390x_has_eif = facs.eif;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/inc/eventtrace.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: eventtrace.h
// Abstract: This module implements Event Tracing support. This includes
// eventtracebase.h, and adds VM-specific ETW helpers to support features like type
// logging, allocation logging, and gc heap walk logging.
//
//
//
//
// #EventTracing
// Windows
// ETW (Event Tracing for Windows) is a high-performance, low overhead and highly scalable
// tracing facility provided by the Windows Operating System. ETW is available on Win2K and above. There are
// four main types of components in ETW: event providers, controllers, consumers, and event trace sessions.
// An event provider is a logical entity that writes events to ETW sessions. The event provider must register
// a provider ID with ETW through the registration API. A provider first registers with ETW and writes events
// from various points in the code by invoking the ETW logging API. When a provider is enabled dynamically by
// the ETW controller application, calls to the logging API sends events to a specific trace session
// designated by the controller. Each event sent by the event provider to the trace session consists of a
// fixed header that includes event metadata and additional variable user-context data. CLR is an event
// provider.
// Mac
// DTrace is similar to ETW and has been made to look like ETW at most of the places.
// For convenience, it is called ETM (Event Tracing for Mac) and exists only on the Mac Leopard OS
// ============================================================================
#ifndef _VMEVENTTRACE_H_
#define _VMEVENTTRACE_H_
#include "eventtracebase.h"
#include "gcinterface.h"
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
struct ProfilingScanContext : ScanContext
{
BOOL fProfilerPinned;
void * pvEtwContext;
void *pHeapId;
ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext()
{
LIMITED_METHOD_CONTRACT;
pHeapId = NULL;
fProfilerPinned = fProfilerPinnedParam;
pvEtwContext = NULL;
#ifdef FEATURE_CONSERVATIVE_GC
// To not confuse GCScan::GcScanRoots
promotion = g_pConfig->GetGCConservative();
#endif
}
};
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifndef FEATURE_REDHAWK
namespace ETW
{
class LoggedTypesFromModule;
// We keep a hash of these to keep track of:
// * Which types have been logged through ETW (so we can avoid logging dupe Type
// events), and
// * GCSampledObjectAllocation stats to help with "smart sampling" which
// dynamically adjusts sampling rate of objects by type.
// See code:LoggedTypesFromModuleTraits
struct TypeLoggingInfo
{
public:
TypeLoggingInfo(TypeHandle thParam)
{
Init(thParam);
}
TypeLoggingInfo()
{
Init(TypeHandle());
}
void Init(TypeHandle thParam)
{
th = thParam;
dwTickOfCurrentTimeBucket = 0;
dwAllocCountInCurrentBucket = 0;
flAllocPerMSec = 0;
dwAllocsToSkipPerSample = 0;
dwAllocsSkippedForSample = 0;
cbIgnoredSizeForSample = 0;
};
// The type this TypeLoggingInfo represents
TypeHandle th;
// Smart sampling
// These bucket values remember stats of a particular time slice that are used to
// help adjust the sampling rate
DWORD dwTickOfCurrentTimeBucket;
DWORD dwAllocCountInCurrentBucket;
float flAllocPerMSec;
// The number of data points to ignore before taking a "sample" (i.e., logging a
// GCSampledObjectAllocation ETW event for this type)
DWORD dwAllocsToSkipPerSample;
// The current number of data points actually ignored for the current sample
DWORD dwAllocsSkippedForSample;
// The current count of bytes of objects of this type actually allocated (and
// ignored) for the current sample
SIZE_T cbIgnoredSizeForSample;
};
// Class to wrap all type system logic for ETW
class TypeSystemLog
{
private:
// Global type hash
static AllLoggedTypes *s_pAllLoggedTypes;
// An unsigned value that gets incremented whenever a global change is made.
// When this occurs, threads must synchronize themselves with the global state.
// Examples include unloading of modules and disabling of allocation sampling.
static unsigned int s_nEpoch;
// See code:ETW::TypeSystemLog::PostRegistrationInit
static BOOL s_fHeapAllocEventEnabledOnStartup;
static BOOL s_fHeapAllocHighEventEnabledNow;
static BOOL s_fHeapAllocLowEventEnabledNow;
// If COMPLUS_UNSUPPORTED_ETW_ObjectAllocationEventsPerTypePerSec is set, then
// this is used to determine the event frequency, overriding
// s_nDefaultMsBetweenEvents above (regardless of which
// GCSampledObjectAllocation*Keyword was used)
static int s_nCustomMsBetweenEvents;
public:
// This customizes the type logging behavior in LogTypeAndParametersIfNecessary
enum TypeLogBehavior
{
// Take lock, and consult hash table to see if this is the first time we've
// encountered the type, in which case, log it
kTypeLogBehaviorTakeLockAndLogIfFirstTime,
// Don't take lock, don't consult hash table. Just log the type. (This is
// used in cases when checking for dupe type logging isn't worth it, such as
// when logging the finalization of an object.)
kTypeLogBehaviorAlwaysLog,
// When logging the type for GCSampledObjectAllocation events,
// we already know we need to log the type (since we already
// looked it up in the hash). But we would still need to consult the hash
// for any type parameters, so kTypeLogBehaviorAlwaysLog isn't appropriate,
// and this is used instead.
kTypeLogBehaviorAlwaysLogTopLevelType,
};
static HRESULT PreRegistrationInit();
static void PostRegistrationInit();
static BOOL IsHeapAllocEventEnabled();
static void SendObjectAllocatedEvent(Object * pObject);
static CrstBase * GetHashCrst();
static VOID LogTypeAndParametersIfNecessary(BulkTypeEventLogger * pBulkTypeEventLogger, ULONGLONG thAsAddr, TypeLogBehavior typeLogBehavior);
static VOID OnModuleUnload(Module * pModule);
static void OnKeywordsChanged();
static void Cleanup();
static VOID DeleteTypeHashNoLock(AllLoggedTypes **ppAllLoggedTypes);
static VOID FlushObjectAllocationEvents();
static UINT32 TypeLoadBegin();
static VOID TypeLoadEnd(UINT32 typeLoad, TypeHandle th, UINT16 loadLevel);
private:
static BOOL ShouldLogType(TypeHandle th);
static TypeLoggingInfo LookupOrCreateTypeLoggingInfo(TypeHandle th, BOOL * pfCreatedNew, LoggedTypesFromModule ** ppLoggedTypesFromModule = NULL);
static BOOL AddTypeToGlobalCacheIfNotExists(TypeHandle th, BOOL * pfCreatedNew);
static BOOL AddOrReplaceTypeLoggingInfo(ETW::LoggedTypesFromModule * pLoggedTypesFromModule, const ETW::TypeLoggingInfo * pTypeLoggingInfo);
static int GetDefaultMsBetweenEvents();
static VOID OnTypesKeywordTurnedOff();
};
#endif // FEATURE_REDHAWK
// Class to wrap all GC logic for ETW
class GCLog
{
private:
// When WPA triggers a GC, it gives us this unique number to append to our
// GCStart event so WPA can correlate the CLR's GC with the JScript GC they
// triggered at the same time.
//
// We set this value when the GC is triggered, and then retrieve the value on the
// first subsequent FireGcStart() method call for a full, induced GC, assuming
// that that's the GC that WPA triggered. This is imperfect, and if we were in
// the act of beginning another full, induced GC (for some other reason), then
// we'll attach this sequence number to that GC instead of to the WPA-induced GC,
// but who cares? When parsing ETW logs later on, it's indistinguishable if both
// GCs really were induced at around the same time.
#ifdef FEATURE_REDHAWK
static volatile LONGLONG s_l64LastClientSequenceNumber;
#else // FEATURE_REDHAWK
static Volatile<LONGLONG> s_l64LastClientSequenceNumber;
#endif // FEATURE_REDHAWK
public:
typedef union st_GCEventInfo {
// These values are gotten from the gc_reason
// in gcimpl.h
typedef enum _GC_REASON {
GC_ALLOC_SOH = 0,
GC_INDUCED = 1,
GC_LOWMEMORY = 2,
GC_EMPTY = 3,
GC_ALLOC_LOH = 4,
GC_OOS_SOH = 5,
GC_OOS_LOH = 6,
GC_INDUCED_NOFORCE = 7,
GC_GCSTRESS = 8,
GC_LOWMEMORY_BLOCKING = 9,
GC_INDUCED_COMPACTING = 10,
GC_LOWMEMORY_HOST = 11
} GC_REASON;
typedef enum _GC_TYPE {
GC_NGC = 0,
GC_BGC = 1,
GC_FGC = 2
} GC_TYPE;
struct {
ULONG Count;
ULONG Depth;
GC_REASON Reason;
GC_TYPE Type;
} GCStart;
struct {
ULONG Reason;
// This is only valid when SuspendEE is called by GC (ie, Reason is either
// SUSPEND_FOR_GC or SUSPEND_FOR_GC_PREP.
ULONG GcCount;
} SuspendEE;
struct {
ULONGLONG SegmentSize;
ULONGLONG LargeObjectSegmentSize;
BOOL ServerGC; // TRUE means it's server GC; FALSE means it's workstation.
} GCSettings;
} ETW_GC_INFO, *PETW_GC_INFO;
#ifdef FEATURE_EVENT_TRACE
static VOID GCSettingsEvent();
#else
static VOID GCSettingsEvent() {};
#endif // FEATURE_EVENT_TRACE
static BOOL ShouldWalkHeapObjectsForEtw();
static BOOL ShouldWalkHeapRootsForEtw();
static BOOL ShouldTrackMovementForEtw();
static HRESULT ForceGCForDiagnostics();
static VOID ForceGC(LONGLONG l64ClientSequenceNumber);
static VOID FireGcStart(ETW_GC_INFO * pGcInfo);
static VOID RootReference(
LPVOID pvHandle,
Object * pRootedNode,
Object * pSecondaryNodeForDependentHandle,
BOOL fDependentHandle,
ProfilingScanContext * profilingScanContext,
DWORD dwGCFlags,
DWORD rootFlags);
static VOID ObjectReference(
ProfilerWalkHeapContext * profilerWalkHeapContext,
Object * pObjReferenceSource,
ULONGLONG typeID,
ULONGLONG cRefs,
Object ** rgObjReferenceTargets);
static BOOL ShouldWalkStaticsAndCOMForEtw();
static VOID WalkStaticsAndCOMForETW();
static VOID EndHeapDump(ProfilerWalkHeapContext * profilerWalkHeapContext);
#ifdef FEATURE_EVENT_TRACE
static VOID BeginMovedReferences(size_t * pProfilingContext);
static VOID MovedReference(BYTE * pbMemBlockStart, BYTE * pbMemBlockEnd, ptrdiff_t cbRelocDistance, size_t profilingContext, BOOL fCompacting, BOOL fAllowProfApiNotification = TRUE);
static VOID EndMovedReferences(size_t profilingContext, BOOL fAllowProfApiNotification = TRUE);
#else
// TODO: Need to be implemented for PROFILING_SUPPORTED.
static VOID BeginMovedReferences(size_t * pProfilingContext) {};
static VOID MovedReference(BYTE * pbMemBlockStart, BYTE * pbMemBlockEnd, ptrdiff_t cbRelocDistance, size_t profilingContext, BOOL fCompacting, BOOL fAllowProfApiNotification = TRUE) {};
static VOID EndMovedReferences(size_t profilingContext, BOOL fAllowProfApiNotification = TRUE) {};
#endif // FEATURE_EVENT_TRACE
static VOID SendFinalizeObjectEvent(MethodTable * pMT, Object * pObj);
};
};
#endif //_VMEVENTTRACE_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: eventtrace.h
// Abstract: This module implements Event Tracing support. This includes
// eventtracebase.h, and adds VM-specific ETW helpers to support features like type
// logging, allocation logging, and gc heap walk logging.
//
//
//
//
// #EventTracing
// Windows
// ETW (Event Tracing for Windows) is a high-performance, low overhead and highly scalable
// tracing facility provided by the Windows Operating System. ETW is available on Win2K and above. There are
// four main types of components in ETW: event providers, controllers, consumers, and event trace sessions.
// An event provider is a logical entity that writes events to ETW sessions. The event provider must register
// a provider ID with ETW through the registration API. A provider first registers with ETW and writes events
// from various points in the code by invoking the ETW logging API. When a provider is enabled dynamically by
// the ETW controller application, calls to the logging API sends events to a specific trace session
// designated by the controller. Each event sent by the event provider to the trace session consists of a
// fixed header that includes event metadata and additional variable user-context data. CLR is an event
// provider.
// Mac
// DTrace is similar to ETW and has been made to look like ETW at most of the places.
// For convenience, it is called ETM (Event Tracing for Mac) and exists only on the Mac Leopard OS
// ============================================================================
#ifndef _VMEVENTTRACE_H_
#define _VMEVENTTRACE_H_
#include "eventtracebase.h"
#include "gcinterface.h"
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
struct ProfilingScanContext : ScanContext
{
BOOL fProfilerPinned;
void * pvEtwContext;
void *pHeapId;
ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext()
{
LIMITED_METHOD_CONTRACT;
pHeapId = NULL;
fProfilerPinned = fProfilerPinnedParam;
pvEtwContext = NULL;
#ifdef FEATURE_CONSERVATIVE_GC
// To not confuse GCScan::GcScanRoots
promotion = g_pConfig->GetGCConservative();
#endif
}
};
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifndef FEATURE_REDHAWK
namespace ETW
{
class LoggedTypesFromModule;
// We keep a hash of these to keep track of:
// * Which types have been logged through ETW (so we can avoid logging dupe Type
// events), and
// * GCSampledObjectAllocation stats to help with "smart sampling" which
// dynamically adjusts sampling rate of objects by type.
// See code:LoggedTypesFromModuleTraits
struct TypeLoggingInfo
{
public:
TypeLoggingInfo(TypeHandle thParam)
{
Init(thParam);
}
TypeLoggingInfo()
{
Init(TypeHandle());
}
void Init(TypeHandle thParam)
{
th = thParam;
dwTickOfCurrentTimeBucket = 0;
dwAllocCountInCurrentBucket = 0;
flAllocPerMSec = 0;
dwAllocsToSkipPerSample = 0;
dwAllocsSkippedForSample = 0;
cbIgnoredSizeForSample = 0;
};
// The type this TypeLoggingInfo represents
TypeHandle th;
// Smart sampling
// These bucket values remember stats of a particular time slice that are used to
// help adjust the sampling rate
DWORD dwTickOfCurrentTimeBucket;
DWORD dwAllocCountInCurrentBucket;
float flAllocPerMSec;
// The number of data points to ignore before taking a "sample" (i.e., logging a
// GCSampledObjectAllocation ETW event for this type)
DWORD dwAllocsToSkipPerSample;
// The current number of data points actually ignored for the current sample
DWORD dwAllocsSkippedForSample;
// The current count of bytes of objects of this type actually allocated (and
// ignored) for the current sample
SIZE_T cbIgnoredSizeForSample;
};
// Class to wrap all type system logic for ETW
class TypeSystemLog
{
private:
// Global type hash
static AllLoggedTypes *s_pAllLoggedTypes;
// An unsigned value that gets incremented whenever a global change is made.
// When this occurs, threads must synchronize themselves with the global state.
// Examples include unloading of modules and disabling of allocation sampling.
static unsigned int s_nEpoch;
// See code:ETW::TypeSystemLog::PostRegistrationInit
static BOOL s_fHeapAllocEventEnabledOnStartup;
static BOOL s_fHeapAllocHighEventEnabledNow;
static BOOL s_fHeapAllocLowEventEnabledNow;
// If COMPLUS_UNSUPPORTED_ETW_ObjectAllocationEventsPerTypePerSec is set, then
// this is used to determine the event frequency, overriding
// s_nDefaultMsBetweenEvents above (regardless of which
// GCSampledObjectAllocation*Keyword was used)
static int s_nCustomMsBetweenEvents;
public:
// This customizes the type logging behavior in LogTypeAndParametersIfNecessary
enum TypeLogBehavior
{
// Take lock, and consult hash table to see if this is the first time we've
// encountered the type, in which case, log it
kTypeLogBehaviorTakeLockAndLogIfFirstTime,
// Don't take lock, don't consult hash table. Just log the type. (This is
// used in cases when checking for dupe type logging isn't worth it, such as
// when logging the finalization of an object.)
kTypeLogBehaviorAlwaysLog,
// When logging the type for GCSampledObjectAllocation events,
// we already know we need to log the type (since we already
// looked it up in the hash). But we would still need to consult the hash
// for any type parameters, so kTypeLogBehaviorAlwaysLog isn't appropriate,
// and this is used instead.
kTypeLogBehaviorAlwaysLogTopLevelType,
};
static HRESULT PreRegistrationInit();
static void PostRegistrationInit();
static BOOL IsHeapAllocEventEnabled();
static void SendObjectAllocatedEvent(Object * pObject);
static CrstBase * GetHashCrst();
static VOID LogTypeAndParametersIfNecessary(BulkTypeEventLogger * pBulkTypeEventLogger, ULONGLONG thAsAddr, TypeLogBehavior typeLogBehavior);
static VOID OnModuleUnload(Module * pModule);
static void OnKeywordsChanged();
static void Cleanup();
static VOID DeleteTypeHashNoLock(AllLoggedTypes **ppAllLoggedTypes);
static VOID FlushObjectAllocationEvents();
static UINT32 TypeLoadBegin();
static VOID TypeLoadEnd(UINT32 typeLoad, TypeHandle th, UINT16 loadLevel);
private:
static BOOL ShouldLogType(TypeHandle th);
static TypeLoggingInfo LookupOrCreateTypeLoggingInfo(TypeHandle th, BOOL * pfCreatedNew, LoggedTypesFromModule ** ppLoggedTypesFromModule = NULL);
static BOOL AddTypeToGlobalCacheIfNotExists(TypeHandle th, BOOL * pfCreatedNew);
static BOOL AddOrReplaceTypeLoggingInfo(ETW::LoggedTypesFromModule * pLoggedTypesFromModule, const ETW::TypeLoggingInfo * pTypeLoggingInfo);
static int GetDefaultMsBetweenEvents();
static VOID OnTypesKeywordTurnedOff();
};
#endif // FEATURE_REDHAWK
// Class to wrap all GC logic for ETW
class GCLog
{
private:
// When WPA triggers a GC, it gives us this unique number to append to our
// GCStart event so WPA can correlate the CLR's GC with the JScript GC they
// triggered at the same time.
//
// We set this value when the GC is triggered, and then retrieve the value on the
// first subsequent FireGcStart() method call for a full, induced GC, assuming
// that that's the GC that WPA triggered. This is imperfect, and if we were in
// the act of beginning another full, induced GC (for some other reason), then
// we'll attach this sequence number to that GC instead of to the WPA-induced GC,
// but who cares? When parsing ETW logs later on, it's indistinguishable if both
// GCs really were induced at around the same time.
#ifdef FEATURE_REDHAWK
static volatile LONGLONG s_l64LastClientSequenceNumber;
#else // FEATURE_REDHAWK
static Volatile<LONGLONG> s_l64LastClientSequenceNumber;
#endif // FEATURE_REDHAWK
public:
typedef union st_GCEventInfo {
// These values are gotten from the gc_reason
// in gcimpl.h
typedef enum _GC_REASON {
GC_ALLOC_SOH = 0,
GC_INDUCED = 1,
GC_LOWMEMORY = 2,
GC_EMPTY = 3,
GC_ALLOC_LOH = 4,
GC_OOS_SOH = 5,
GC_OOS_LOH = 6,
GC_INDUCED_NOFORCE = 7,
GC_GCSTRESS = 8,
GC_LOWMEMORY_BLOCKING = 9,
GC_INDUCED_COMPACTING = 10,
GC_LOWMEMORY_HOST = 11
} GC_REASON;
typedef enum _GC_TYPE {
GC_NGC = 0,
GC_BGC = 1,
GC_FGC = 2
} GC_TYPE;
struct {
ULONG Count;
ULONG Depth;
GC_REASON Reason;
GC_TYPE Type;
} GCStart;
struct {
ULONG Reason;
// This is only valid when SuspendEE is called by GC (ie, Reason is either
// SUSPEND_FOR_GC or SUSPEND_FOR_GC_PREP.
ULONG GcCount;
} SuspendEE;
struct {
ULONGLONG SegmentSize;
ULONGLONG LargeObjectSegmentSize;
BOOL ServerGC; // TRUE means it's server GC; FALSE means it's workstation.
} GCSettings;
} ETW_GC_INFO, *PETW_GC_INFO;
#ifdef FEATURE_EVENT_TRACE
static VOID GCSettingsEvent();
#else
static VOID GCSettingsEvent() {};
#endif // FEATURE_EVENT_TRACE
static BOOL ShouldWalkHeapObjectsForEtw();
static BOOL ShouldWalkHeapRootsForEtw();
static BOOL ShouldTrackMovementForEtw();
static HRESULT ForceGCForDiagnostics();
static VOID ForceGC(LONGLONG l64ClientSequenceNumber);
static VOID FireGcStart(ETW_GC_INFO * pGcInfo);
static VOID RootReference(
LPVOID pvHandle,
Object * pRootedNode,
Object * pSecondaryNodeForDependentHandle,
BOOL fDependentHandle,
ProfilingScanContext * profilingScanContext,
DWORD dwGCFlags,
DWORD rootFlags);
static VOID ObjectReference(
ProfilerWalkHeapContext * profilerWalkHeapContext,
Object * pObjReferenceSource,
ULONGLONG typeID,
ULONGLONG cRefs,
Object ** rgObjReferenceTargets);
static BOOL ShouldWalkStaticsAndCOMForEtw();
static VOID WalkStaticsAndCOMForETW();
static VOID EndHeapDump(ProfilerWalkHeapContext * profilerWalkHeapContext);
#ifdef FEATURE_EVENT_TRACE
static VOID BeginMovedReferences(size_t * pProfilingContext);
static VOID MovedReference(BYTE * pbMemBlockStart, BYTE * pbMemBlockEnd, ptrdiff_t cbRelocDistance, size_t profilingContext, BOOL fCompacting, BOOL fAllowProfApiNotification = TRUE);
static VOID EndMovedReferences(size_t profilingContext, BOOL fAllowProfApiNotification = TRUE);
#else
// TODO: Need to be implemented for PROFILING_SUPPORTED.
static VOID BeginMovedReferences(size_t * pProfilingContext) {};
static VOID MovedReference(BYTE * pbMemBlockStart, BYTE * pbMemBlockEnd, ptrdiff_t cbRelocDistance, size_t profilingContext, BOOL fCompacting, BOOL fAllowProfApiNotification = TRUE) {};
static VOID EndMovedReferences(size_t profilingContext, BOOL fAllowProfApiNotification = TRUE) {};
#endif // FEATURE_EVENT_TRACE
static VOID SendFinalizeObjectEvent(MethodTable * pMT, Object * pObj);
};
};
#endif //_VMEVENTTRACE_H_
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/debug/ee/amd64/debuggerregdisplayhelper.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/* ------------------------------------------------------------------------- *
* DebuggerRegDisplayHelper.cpp -- implementation of the platform-dependent
//
* methods for transferring information between
* REGDISPLAY and DebuggerREGDISPLAY
* ------------------------------------------------------------------------- */
#include "stdafx.h"
void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc)
{
memcpy((BYTE*)pDst, (BYTE*)pSrc, sizeof(REGDISPLAY));
pDst->pContext = pSrc->pContext;
if (pSrc->pCurrentContextPointers == &(pSrc->ctxPtrsOne))
{
pDst->pCurrentContextPointers = &(pDst->ctxPtrsOne);
pDst->pCallerContextPointers = &(pDst->ctxPtrsTwo);
}
else
{
pDst->pCurrentContextPointers = &(pDst->ctxPtrsTwo);
pDst->pCallerContextPointers = &(pDst->ctxPtrsOne);
}
if (pSrc->pCurrentContext == &(pSrc->ctxOne))
{
pDst->pCurrentContext = &(pDst->ctxOne);
pDst->pCallerContext = &(pDst->ctxTwo);
}
else
{
pDst->pCurrentContext = &(pDst->ctxTwo);
pDst->pCallerContext = &(pDst->ctxOne);
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/* ------------------------------------------------------------------------- *
* DebuggerRegDisplayHelper.cpp -- implementation of the platform-dependent
//
* methods for transferring information between
* REGDISPLAY and DebuggerREGDISPLAY
* ------------------------------------------------------------------------- */
#include "stdafx.h"
void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc)
{
memcpy((BYTE*)pDst, (BYTE*)pSrc, sizeof(REGDISPLAY));
pDst->pContext = pSrc->pContext;
if (pSrc->pCurrentContextPointers == &(pSrc->ctxPtrsOne))
{
pDst->pCurrentContextPointers = &(pDst->ctxPtrsOne);
pDst->pCallerContextPointers = &(pDst->ctxPtrsTwo);
}
else
{
pDst->pCurrentContextPointers = &(pDst->ctxPtrsTwo);
pDst->pCallerContextPointers = &(pDst->ctxPtrsOne);
}
if (pSrc->pCurrentContext == &(pSrc->ctxOne))
{
pDst->pCurrentContext = &(pDst->ctxOne);
pDst->pCallerContext = &(pDst->ctxTwo);
}
else
{
pDst->pCurrentContext = &(pDst->ctxTwo);
pDst->pCallerContext = &(pDst->ctxOne);
}
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/c_runtime/logf/test1/test1.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test1.c
**
** Purpose: Tests logf with a normal set of values.
**
**===================================================================*/
#include <palsuite.h>
// binary32 (float) has a machine epsilon of 2^-23 (approx. 1.19e-07). However, this
// is slightly too accurate when writing tests meant to run against libm implementations
// for various platforms. 2^-21 (approx. 4.76e-07) seems to be as accurate as we can get.
//
// The tests themselves will take PAL_EPSILON and adjust it according to the expected result
// so that the delta used for comparison will compare the most significant digits and ignore
// any digits that are outside the double precision range (6-9 digits).
// For example, a test with an expect result in the format of 0.xxxxxxxxx will use PAL_EPSILON
// for the variance, while an expected result in the format of 0.0xxxxxxxxx will use
// PAL_EPSILON / 10 and and expected result in the format of x.xxxxxx will use PAL_EPSILON * 10.
#define PAL_EPSILON 4.76837158e-07
#define PAL_NAN sqrtf(-1.0f)
#define PAL_POSINF -logf(0.0f)
#define PAL_NEGINF logf(0.0f)
/**
* Helper test structure
*/
struct test
{
float value; /* value to test the function with */
float expected; /* expected result */
float variance; /* maximum delta between the expected and actual result */
};
/**
* logf_test1_validate
*
* test validation function
*/
void __cdecl logf_test1_validate(float value, float expected, float variance)
{
float result = logf(value);
/*
* The test is valid when the difference between result
* and expected is less than or equal to variance
*/
float delta = fabsf(result - expected);
if (delta > variance)
{
Fail("logf(%g) returned %10.9g when it should have returned %10.9g",
value, result, expected);
}
}
/**
* logf_test1_validate
*
* test validation function for values returning NaN
*/
void __cdecl logf_test1_validate_isnan(float value)
{
float result = logf(value);
if (!_isnanf(result))
{
Fail("logf(%g) returned %10.9g when it should have returned %10.9g",
value, result, PAL_NAN);
}
}
/**
* main
*
* executable entry point
*/
PALTEST(c_runtime_logf_test1_paltest_logf_test1, "c_runtime/logf/test1/paltest_logf_test1")
{
struct test tests[] =
{
/* value expected variance */
{ 0, PAL_NEGINF, 0 },
{ 0.0432139183f, -3.14159265f, PAL_EPSILON * 10 }, // expected: -(pi)
{ 0.0659880358f, -2.71828183f, PAL_EPSILON * 10 }, // expected: -(e)
{ 0.1f, -2.30258509f, PAL_EPSILON * 10 }, // expected: -(ln(10))
{ 0.207879576f, -1.57079633f, PAL_EPSILON * 10 }, // expected: -(pi / 2)
{ 0.236290088f, -1.44269504f, PAL_EPSILON * 10 }, // expected: -(logf2(e))
{ 0.243116734f, -1.41421356f, PAL_EPSILON * 10 }, // expected: -(sqrtf(2))
{ 0.323557264f, -1.12837917f, PAL_EPSILON * 10 }, // expected: -(2 / sqrtf(pi))
{ 0.367879441f, -1, PAL_EPSILON * 10 }, // expected: -(1)
{ 0.455938128f, -0.785398163f, PAL_EPSILON }, // expected: -(pi / 4)
{ 0.493068691f, -0.707106781f, PAL_EPSILON }, // expected: -(1 / sqrtf(2))
{ 0.5f, -0.693147181f, PAL_EPSILON }, // expected: -(ln(2))
{ 0.529077808f, -0.636619772f, PAL_EPSILON }, // expected: -(2 / pi)
{ 0.647721485f, -0.434294482f, PAL_EPSILON }, // expected: -(log10f(e))
{ 0.727377349f, -0.318309886f, PAL_EPSILON }, // expected: -(1 / pi)
{ 1, 0, PAL_EPSILON },
{ 1.37480223f, 0.318309886f, PAL_EPSILON }, // expected: 1 / pi
{ 1.54387344f, 0.434294482f, PAL_EPSILON }, // expected: log10f(e)
{ 1.89008116f, 0.636619772f, PAL_EPSILON }, // expected: 2 / pi
{ 2, 0.693147181f, PAL_EPSILON }, // expected: ln(2)
{ 2.02811498f, 0.707106781f, PAL_EPSILON }, // expected: 1 / sqrtf(2)
{ 2.19328005f, 0.785398163f, PAL_EPSILON }, // expected: pi / 4
{ 2.71828183f, 1, PAL_EPSILON * 10 }, // value: e
{ 3.09064302f, 1.12837917f, PAL_EPSILON * 10 }, // expected: 2 / sqrtf(pi)
{ 4.11325038f, 1.41421356f, PAL_EPSILON * 10 }, // expected: sqrtf(2)
{ 4.23208611f, 1.44269504f, PAL_EPSILON * 10 }, // expected: logf2(e)
{ 4.81047738f, 1.57079633f, PAL_EPSILON * 10 }, // expected: pi / 2
{ 10, 2.30258509f, PAL_EPSILON * 10 }, // expected: ln(10)
{ 15.1542622f, 2.71828183f, PAL_EPSILON * 10 }, // expected: e
{ 23.1406926f, 3.14159265f, PAL_EPSILON * 10 }, // expected: pi
{ PAL_POSINF, PAL_POSINF, 0 },
};
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
for (int i = 0; i < (sizeof(tests) / sizeof(struct test)); i++)
{
logf_test1_validate(tests[i].value, tests[i].expected, tests[i].variance);
}
logf_test1_validate_isnan(PAL_NEGINF);
logf_test1_validate_isnan(PAL_NAN);
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test1.c
**
** Purpose: Tests logf with a normal set of values.
**
**===================================================================*/
#include <palsuite.h>
// binary32 (float) has a machine epsilon of 2^-23 (approx. 1.19e-07). However, this
// is slightly too accurate when writing tests meant to run against libm implementations
// for various platforms. 2^-21 (approx. 4.76e-07) seems to be as accurate as we can get.
//
// The tests themselves will take PAL_EPSILON and adjust it according to the expected result
// so that the delta used for comparison will compare the most significant digits and ignore
// any digits that are outside the double precision range (6-9 digits).
// For example, a test with an expect result in the format of 0.xxxxxxxxx will use PAL_EPSILON
// for the variance, while an expected result in the format of 0.0xxxxxxxxx will use
// PAL_EPSILON / 10 and and expected result in the format of x.xxxxxx will use PAL_EPSILON * 10.
#define PAL_EPSILON 4.76837158e-07
#define PAL_NAN sqrtf(-1.0f)
#define PAL_POSINF -logf(0.0f)
#define PAL_NEGINF logf(0.0f)
/**
* Helper test structure
*/
struct test
{
float value; /* value to test the function with */
float expected; /* expected result */
float variance; /* maximum delta between the expected and actual result */
};
/**
* logf_test1_validate
*
* test validation function
*/
void __cdecl logf_test1_validate(float value, float expected, float variance)
{
float result = logf(value);
/*
* The test is valid when the difference between result
* and expected is less than or equal to variance
*/
float delta = fabsf(result - expected);
if (delta > variance)
{
Fail("logf(%g) returned %10.9g when it should have returned %10.9g",
value, result, expected);
}
}
/**
* logf_test1_validate
*
* test validation function for values returning NaN
*/
void __cdecl logf_test1_validate_isnan(float value)
{
float result = logf(value);
if (!_isnanf(result))
{
Fail("logf(%g) returned %10.9g when it should have returned %10.9g",
value, result, PAL_NAN);
}
}
/**
* main
*
* executable entry point
*/
PALTEST(c_runtime_logf_test1_paltest_logf_test1, "c_runtime/logf/test1/paltest_logf_test1")
{
struct test tests[] =
{
/* value expected variance */
{ 0, PAL_NEGINF, 0 },
{ 0.0432139183f, -3.14159265f, PAL_EPSILON * 10 }, // expected: -(pi)
{ 0.0659880358f, -2.71828183f, PAL_EPSILON * 10 }, // expected: -(e)
{ 0.1f, -2.30258509f, PAL_EPSILON * 10 }, // expected: -(ln(10))
{ 0.207879576f, -1.57079633f, PAL_EPSILON * 10 }, // expected: -(pi / 2)
{ 0.236290088f, -1.44269504f, PAL_EPSILON * 10 }, // expected: -(logf2(e))
{ 0.243116734f, -1.41421356f, PAL_EPSILON * 10 }, // expected: -(sqrtf(2))
{ 0.323557264f, -1.12837917f, PAL_EPSILON * 10 }, // expected: -(2 / sqrtf(pi))
{ 0.367879441f, -1, PAL_EPSILON * 10 }, // expected: -(1)
{ 0.455938128f, -0.785398163f, PAL_EPSILON }, // expected: -(pi / 4)
{ 0.493068691f, -0.707106781f, PAL_EPSILON }, // expected: -(1 / sqrtf(2))
{ 0.5f, -0.693147181f, PAL_EPSILON }, // expected: -(ln(2))
{ 0.529077808f, -0.636619772f, PAL_EPSILON }, // expected: -(2 / pi)
{ 0.647721485f, -0.434294482f, PAL_EPSILON }, // expected: -(log10f(e))
{ 0.727377349f, -0.318309886f, PAL_EPSILON }, // expected: -(1 / pi)
{ 1, 0, PAL_EPSILON },
{ 1.37480223f, 0.318309886f, PAL_EPSILON }, // expected: 1 / pi
{ 1.54387344f, 0.434294482f, PAL_EPSILON }, // expected: log10f(e)
{ 1.89008116f, 0.636619772f, PAL_EPSILON }, // expected: 2 / pi
{ 2, 0.693147181f, PAL_EPSILON }, // expected: ln(2)
{ 2.02811498f, 0.707106781f, PAL_EPSILON }, // expected: 1 / sqrtf(2)
{ 2.19328005f, 0.785398163f, PAL_EPSILON }, // expected: pi / 4
{ 2.71828183f, 1, PAL_EPSILON * 10 }, // value: e
{ 3.09064302f, 1.12837917f, PAL_EPSILON * 10 }, // expected: 2 / sqrtf(pi)
{ 4.11325038f, 1.41421356f, PAL_EPSILON * 10 }, // expected: sqrtf(2)
{ 4.23208611f, 1.44269504f, PAL_EPSILON * 10 }, // expected: logf2(e)
{ 4.81047738f, 1.57079633f, PAL_EPSILON * 10 }, // expected: pi / 2
{ 10, 2.30258509f, PAL_EPSILON * 10 }, // expected: ln(10)
{ 15.1542622f, 2.71828183f, PAL_EPSILON * 10 }, // expected: e
{ 23.1406926f, 3.14159265f, PAL_EPSILON * 10 }, // expected: pi
{ PAL_POSINF, PAL_POSINF, 0 },
};
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
for (int i = 0; i < (sizeof(tests) / sizeof(struct test)); i++)
{
logf_test1_validate(tests[i].value, tests[i].expected, tests[i].variance);
}
logf_test1_validate_isnan(PAL_NEGINF);
logf_test1_validate_isnan(PAL_NAN);
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/src/libunwind/src/mips/regname.c | /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind_i.h"
static const char *regname[] =
{
/* 0. */
"$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
/* 8. */
"$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
/* 16. */
"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
/* 24. */
"$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
};
const char *
unw_regname (unw_regnum_t reg)
{
if (reg < (unw_regnum_t) ARRAY_SIZE (regname))
return regname[reg];
else if (reg == UNW_MIPS_PC)
return "pc";
else
return "???";
}
| /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind_i.h"
static const char *regname[] =
{
/* 0. */
"$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
/* 8. */
"$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
/* 16. */
"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
/* 24. */
"$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
};
const char *
unw_regname (unw_regnum_t reg)
{
if (reg < (unw_regnum_t) ARRAY_SIZE (regname))
return regname[reg];
else if (reg == UNW_MIPS_PC)
return "pc";
else
return "???";
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/bft35.txt | Microsoft (R) XSLT Compiler version 2.0.61016
for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727
Copyright (C) Microsoft Corporation 2007. All rights reserved.
fatal error : Source file 'nul.xsl' could not be found. ---> Win32 device names are not allowed.
| Microsoft (R) XSLT Compiler version 2.0.61016
for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727
Copyright (C) Microsoft Corporation 2007. All rights reserved.
fatal error : Source file 'nul.xsl' could not be found. ---> Win32 device names are not allowed.
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/vm/exinfo.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
#ifndef __ExInfo_h__
#define __ExInfo_h__
#if !defined(FEATURE_EH_FUNCLETS)
#include "exstatecommon.h"
typedef DPTR(class ExInfo) PTR_ExInfo;
class ExInfo
{
friend class ThreadExceptionState;
friend class ClrDataExceptionState;
public:
BOOL IsHeapAllocated()
{
LIMITED_METHOD_CONTRACT;
return m_StackAddress != (void *) this;
}
void CopyAndClearSource(ExInfo *from);
void UnwindExInfo(VOID* limit);
// Q: Why does this thing take an EXCEPTION_RECORD rather than an ExceptionCode?
// A: Because m_ExceptionCode and Ex_WasThrownByUs have to be kept
// in sync and this function needs the exception parms inside the record to figure
// out the "IsTagged" part.
void SetExceptionCode(const EXCEPTION_RECORD *pCER);
DWORD GetExceptionCode()
{
LIMITED_METHOD_CONTRACT;
return m_ExceptionCode;
}
public: // @TODO: make more of these private!
// Note: the debugger assumes that m_pThrowable is a strong
// reference so it can check it for NULL with preemptive GC
// enabled.
OBJECTHANDLE m_hThrowable; // thrown exception
PTR_Frame m_pSearchBoundary; // topmost frame for current managed frame group
private:
DWORD m_ExceptionCode; // After a catch of a COM+ exception, pointers/context are trashed.
public:
PTR_EXCEPTION_REGISTRATION_RECORD m_pBottomMostHandler; // most recent EH record registered
// Reference to the topmost handler we saw during an SO that goes past us
PTR_EXCEPTION_REGISTRATION_RECORD m_pTopMostHandlerDuringSO;
LPVOID m_dEsp; // Esp when fault occurred, OR esp to restore on endcatch
StackTraceInfo m_StackTraceInfo;
PTR_ExInfo m_pPrevNestedInfo; // pointer to nested info if are handling nested exception
size_t* m_pShadowSP; // Zero this after endcatch
PTR_EXCEPTION_RECORD m_pExceptionRecord;
PTR_EXCEPTION_POINTERS m_pExceptionPointers;
PTR_CONTEXT m_pContext;
// We have a rare case where (re-entry to the EE from an unmanaged filter) where we
// need to create a new ExInfo ... but don't have a nested handler for it. The handlers
// use stack addresses to figure out their correct lifetimes. This stack location is
// used for that. For most records, it will be the stack address of the ExInfo ... but
// for some records, it will be a pseudo stack location -- the place where we think
// the record should have been (except for the re-entry case).
//
//
//
void* m_StackAddress; // A pseudo or real stack location for this record.
#ifndef TARGET_UNIX
private:
EHWatsonBucketTracker m_WatsonBucketTracker;
public:
inline PTR_EHWatsonBucketTracker GetWatsonBucketTracker()
{
LIMITED_METHOD_CONTRACT;
return PTR_EHWatsonBucketTracker(PTR_HOST_MEMBER_TADDR(ExInfo, this, m_WatsonBucketTracker));
}
#endif
private:
BOOL m_fDeliveredFirstChanceNotification;
public:
inline BOOL DeliveredFirstChanceNotification()
{
LIMITED_METHOD_CONTRACT;
return m_fDeliveredFirstChanceNotification;
}
inline void SetFirstChanceNotificationStatus(BOOL fDelivered)
{
LIMITED_METHOD_CONTRACT;
m_fDeliveredFirstChanceNotification = fDelivered;
}
// Returns the exception tracker previous to the current
inline PTR_ExInfo GetPreviousExceptionTracker()
{
LIMITED_METHOD_CONTRACT;
return m_pPrevNestedInfo;
}
// Returns the throwable associated with the tracker
inline OBJECTREF GetThrowable()
{
LIMITED_METHOD_CONTRACT;
return (m_hThrowable != NULL)?ObjectFromHandle(m_hThrowable):NULL;
}
// Returns the throwble associated with the tracker as handle
inline OBJECTHANDLE GetThrowableAsHandle()
{
LIMITED_METHOD_CONTRACT;
return m_hThrowable;
}
public:
DebuggerExState m_DebuggerExState;
EHClauseInfo m_EHClauseInfo;
ExceptionFlags m_ExceptionFlags;
#if defined(TARGET_X86) && defined(DEBUGGING_SUPPORTED)
EHContext m_InterceptionContext;
BOOL m_ValidInterceptionContext;
#endif
#ifdef DACCESS_COMPILE
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif
void Init();
ExInfo() DAC_EMPTY();
void DestroyExceptionHandle();
private:
// Don't allow this
ExInfo& operator=(const ExInfo &from);
};
#if defined(TARGET_X86)
PTR_ExInfo GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, PTR_ExInfo pStartingEHTracker);
#endif // TARGET_X86
#endif // !FEATURE_EH_FUNCLETS
#endif // __ExInfo_h__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
#ifndef __ExInfo_h__
#define __ExInfo_h__
#if !defined(FEATURE_EH_FUNCLETS)
#include "exstatecommon.h"
typedef DPTR(class ExInfo) PTR_ExInfo;
class ExInfo
{
friend class ThreadExceptionState;
friend class ClrDataExceptionState;
public:
BOOL IsHeapAllocated()
{
LIMITED_METHOD_CONTRACT;
return m_StackAddress != (void *) this;
}
void CopyAndClearSource(ExInfo *from);
void UnwindExInfo(VOID* limit);
// Q: Why does this thing take an EXCEPTION_RECORD rather than an ExceptionCode?
// A: Because m_ExceptionCode and Ex_WasThrownByUs have to be kept
// in sync and this function needs the exception parms inside the record to figure
// out the "IsTagged" part.
void SetExceptionCode(const EXCEPTION_RECORD *pCER);
DWORD GetExceptionCode()
{
LIMITED_METHOD_CONTRACT;
return m_ExceptionCode;
}
public: // @TODO: make more of these private!
// Note: the debugger assumes that m_pThrowable is a strong
// reference so it can check it for NULL with preemptive GC
// enabled.
OBJECTHANDLE m_hThrowable; // thrown exception
PTR_Frame m_pSearchBoundary; // topmost frame for current managed frame group
private:
DWORD m_ExceptionCode; // After a catch of a COM+ exception, pointers/context are trashed.
public:
PTR_EXCEPTION_REGISTRATION_RECORD m_pBottomMostHandler; // most recent EH record registered
// Reference to the topmost handler we saw during an SO that goes past us
PTR_EXCEPTION_REGISTRATION_RECORD m_pTopMostHandlerDuringSO;
LPVOID m_dEsp; // Esp when fault occurred, OR esp to restore on endcatch
StackTraceInfo m_StackTraceInfo;
PTR_ExInfo m_pPrevNestedInfo; // pointer to nested info if are handling nested exception
size_t* m_pShadowSP; // Zero this after endcatch
PTR_EXCEPTION_RECORD m_pExceptionRecord;
PTR_EXCEPTION_POINTERS m_pExceptionPointers;
PTR_CONTEXT m_pContext;
// We have a rare case where (re-entry to the EE from an unmanaged filter) where we
// need to create a new ExInfo ... but don't have a nested handler for it. The handlers
// use stack addresses to figure out their correct lifetimes. This stack location is
// used for that. For most records, it will be the stack address of the ExInfo ... but
// for some records, it will be a pseudo stack location -- the place where we think
// the record should have been (except for the re-entry case).
//
//
//
void* m_StackAddress; // A pseudo or real stack location for this record.
#ifndef TARGET_UNIX
private:
EHWatsonBucketTracker m_WatsonBucketTracker;
public:
inline PTR_EHWatsonBucketTracker GetWatsonBucketTracker()
{
LIMITED_METHOD_CONTRACT;
return PTR_EHWatsonBucketTracker(PTR_HOST_MEMBER_TADDR(ExInfo, this, m_WatsonBucketTracker));
}
#endif
private:
BOOL m_fDeliveredFirstChanceNotification;
public:
inline BOOL DeliveredFirstChanceNotification()
{
LIMITED_METHOD_CONTRACT;
return m_fDeliveredFirstChanceNotification;
}
inline void SetFirstChanceNotificationStatus(BOOL fDelivered)
{
LIMITED_METHOD_CONTRACT;
m_fDeliveredFirstChanceNotification = fDelivered;
}
// Returns the exception tracker previous to the current
inline PTR_ExInfo GetPreviousExceptionTracker()
{
LIMITED_METHOD_CONTRACT;
return m_pPrevNestedInfo;
}
// Returns the throwable associated with the tracker
inline OBJECTREF GetThrowable()
{
LIMITED_METHOD_CONTRACT;
return (m_hThrowable != NULL)?ObjectFromHandle(m_hThrowable):NULL;
}
// Returns the throwble associated with the tracker as handle
inline OBJECTHANDLE GetThrowableAsHandle()
{
LIMITED_METHOD_CONTRACT;
return m_hThrowable;
}
public:
DebuggerExState m_DebuggerExState;
EHClauseInfo m_EHClauseInfo;
ExceptionFlags m_ExceptionFlags;
#if defined(TARGET_X86) && defined(DEBUGGING_SUPPORTED)
EHContext m_InterceptionContext;
BOOL m_ValidInterceptionContext;
#endif
#ifdef DACCESS_COMPILE
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif
void Init();
ExInfo() DAC_EMPTY();
void DestroyExceptionHandle();
private:
// Don't allow this
ExInfo& operator=(const ExInfo &from);
};
#if defined(TARGET_X86)
PTR_ExInfo GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, PTR_ExInfo pStartingEHTracker);
#endif // TARGET_X86
#endif // !FEATURE_EH_FUNCLETS
#endif // __ExInfo_h__
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/md/tables/external.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: external.h
//
//
// External types used in MetaData\Storage subcomponent classes.
// This file is used for precompiled headers, so it has to be included at the beginning of every .cpp in
// this directory.
//
// ======================================================================================
#pragma once
#include "../external.h"
#include "../export.h"
#include "../inc/recordpool.h"
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: external.h
//
//
// External types used in MetaData\Storage subcomponent classes.
// This file is used for precompiled headers, so it has to be included at the beginning of every .cpp in
// this directory.
//
// ======================================================================================
#pragma once
#include "../external.h"
#include "../export.h"
#include "../inc/recordpool.h"
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/mini/mini-windows-dllmain.c | /**
* \file
* DllMain entry point.
*
* (C) 2002-2003 Ximian, Inc.
* (C) 2003-2006 Novell, Inc.
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini-runtime.h"
#ifdef HOST_WIN32
#include "mini-windows.h"
#include <windows.h>
MONO_EXTERN_C
BOOL APIENTRY DllMain (HMODULE module_handle, DWORD reason, LPVOID reserved);
MONO_EXTERN_C
BOOL APIENTRY DllMain (HMODULE module_handle, DWORD reason, LPVOID reserved)
{
return mono_win32_runtime_tls_callback (module_handle, reason, reserved, MONO_WIN32_TLS_CALLBACK_TYPE_DLL);
}
#endif
| /**
* \file
* DllMain entry point.
*
* (C) 2002-2003 Ximian, Inc.
* (C) 2003-2006 Novell, Inc.
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini-runtime.h"
#ifdef HOST_WIN32
#include "mini-windows.h"
#include <windows.h>
MONO_EXTERN_C
BOOL APIENTRY DllMain (HMODULE module_handle, DWORD reason, LPVOID reserved);
MONO_EXTERN_C
BOOL APIENTRY DllMain (HMODULE module_handle, DWORD reason, LPVOID reserved)
{
return mono_win32_runtime_tls_callback (module_handle, reason, reserved, MONO_WIN32_TLS_CALLBACK_TYPE_DLL);
}
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/exception_handling/PAL_TRY_EXCEPT_EX/test1/PAL_TRY_EXCEPT_EX.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: PAL_TRY_EXCEPT.c (test 1)
**
** Purpose: Tests the PAL implementation of the PAL_TRY and
** PAL_EXCEPT functions. Exceptions are forced to ensure
** the exception blocks are hit.
**
**
**===================================================================*/
#include <palsuite.h>
PALTEST(exception_handling_PAL_TRY_EXCEPT_EX_test1_paltest_pal_try_except_ex_test1, "exception_handling/PAL_TRY_EXCEPT_EX/test1/paltest_pal_try_except_ex_test1")
{
int* p = 0x00000000; /* NULL pointer */
BOOL bTry = FALSE;
BOOL bExcept = FALSE;
BOOL bTestA = TRUE;
if (0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
/*
** test to make sure we get into the exception block
*/
PAL_TRY
{
if (!bTestA)
{
Fail("PAL_TRY_EXCEPT: ERROR ->"
" It appears the first try block was hit a second time.\n");
}
bTry = TRUE; /* indicate we hit the PAL_TRY block */
*p = 13; /* causes an access violation exception */
Fail("PAL_TRY_EXCEPT: ERROR -> code was executed after the "
"access violation.\n");
}
PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
if (!bTestA)
{
Fail("PAL_TRY_EXCEPT: ERROR ->"
" It appears the first except block was hit a second time.\n");
}
bExcept = TRUE; /* indicate we hit the PAL_EXCEPT block */
}
PAL_ENDTRY;
if (!bTry)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the PAL_TRY"
" block was not executed.\n");
}
if (!bExcept)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the first"
" PAL_EXCEPT block was not executed.\n");
}
/* did we hit all the code blocks? */
if(!bTry || !bExcept)
{
Fail("");
}
/*
** test to make sure we get into the second exception block
*/
bTry = FALSE;
bExcept = FALSE;
bTestA = FALSE; /* we are now going into the second block test */
PAL_TRY
{
if (bTestA)
{
Fail("PAL_TRY_EXCEPT: ERROR -> It appears"
" the second try block was hit too early.\n");
}
bTry = TRUE; /* indicate we hit the PAL_TRY block */
*p = 13; /* causes an access violation exception */
Fail("PAL_TRY_EXCEPT: ERROR -> code was executed after the "
"access violation.\n");
}
PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
if (bTestA)
{
Fail("PAL_TRY_EXCEPT: ERROR -> It appears"
" the second except block was hit too early.\n");
}
bExcept = TRUE; /* indicate we hit the PAL_EXCEPT block */
}
PAL_ENDTRY;
if (!bTry)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the second"
" PAL_TRY block was not executed.");
}
if (!bExcept)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the PAL_EXCEPT"
" block was not executed.");
}
/* did we hit all the code blocks? */
if(!bTry || !bExcept)
{
Fail("\n");
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: PAL_TRY_EXCEPT.c (test 1)
**
** Purpose: Tests the PAL implementation of the PAL_TRY and
** PAL_EXCEPT functions. Exceptions are forced to ensure
** the exception blocks are hit.
**
**
**===================================================================*/
#include <palsuite.h>
PALTEST(exception_handling_PAL_TRY_EXCEPT_EX_test1_paltest_pal_try_except_ex_test1, "exception_handling/PAL_TRY_EXCEPT_EX/test1/paltest_pal_try_except_ex_test1")
{
int* p = 0x00000000; /* NULL pointer */
BOOL bTry = FALSE;
BOOL bExcept = FALSE;
BOOL bTestA = TRUE;
if (0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
/*
** test to make sure we get into the exception block
*/
PAL_TRY
{
if (!bTestA)
{
Fail("PAL_TRY_EXCEPT: ERROR ->"
" It appears the first try block was hit a second time.\n");
}
bTry = TRUE; /* indicate we hit the PAL_TRY block */
*p = 13; /* causes an access violation exception */
Fail("PAL_TRY_EXCEPT: ERROR -> code was executed after the "
"access violation.\n");
}
PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
if (!bTestA)
{
Fail("PAL_TRY_EXCEPT: ERROR ->"
" It appears the first except block was hit a second time.\n");
}
bExcept = TRUE; /* indicate we hit the PAL_EXCEPT block */
}
PAL_ENDTRY;
if (!bTry)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the PAL_TRY"
" block was not executed.\n");
}
if (!bExcept)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the first"
" PAL_EXCEPT block was not executed.\n");
}
/* did we hit all the code blocks? */
if(!bTry || !bExcept)
{
Fail("");
}
/*
** test to make sure we get into the second exception block
*/
bTry = FALSE;
bExcept = FALSE;
bTestA = FALSE; /* we are now going into the second block test */
PAL_TRY
{
if (bTestA)
{
Fail("PAL_TRY_EXCEPT: ERROR -> It appears"
" the second try block was hit too early.\n");
}
bTry = TRUE; /* indicate we hit the PAL_TRY block */
*p = 13; /* causes an access violation exception */
Fail("PAL_TRY_EXCEPT: ERROR -> code was executed after the "
"access violation.\n");
}
PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
if (bTestA)
{
Fail("PAL_TRY_EXCEPT: ERROR -> It appears"
" the second except block was hit too early.\n");
}
bExcept = TRUE; /* indicate we hit the PAL_EXCEPT block */
}
PAL_ENDTRY;
if (!bTry)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the second"
" PAL_TRY block was not executed.");
}
if (!bExcept)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the PAL_EXCEPT"
" block was not executed.");
}
/* did we hit all the code blocks? */
if(!bTry || !bExcept)
{
Fail("\n");
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/tools/superpmi/superpmi-shared/errorhandling.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// ErrorHandling.h - Helpers & whatnot for using SEH for errors
//----------------------------------------------------------
#ifndef _ErrorHandling
#define _ErrorHandling
#include "logging.h"
// EXCEPTIONCODE_DebugBreakorAV is just the base exception number; calls to DebugBreakorAV()
// pass a unique number to add to this. EXCEPTIONCODE_DebugBreakorAV_MAX is the maximum number
// of this exception range.
#define EXCEPTIONCODE_DebugBreakorAV 0xe0421000
#define EXCEPTIONCODE_DebugBreakorAV_MAX 0xe0422000
#define EXCEPTIONCODE_MC 0xe0422000
#define EXCEPTIONCODE_LWM 0xe0423000
#define EXCEPTIONCODE_CALLUTILS 0xe0426000
#define EXCEPTIONCODE_TYPEUTILS 0xe0427000
#define EXCEPTIONCODE_ASSERT 0xe0440000
// RaiseException wrappers
void MSC_ONLY(__declspec(noreturn)) ThrowException(DWORD exceptionCode);
void MSC_ONLY(__declspec(noreturn)) ThrowException(DWORD exceptionCode, const char* message, ...);
// Assert stuff
#define AssertCodeMsg(expr, exCode, msg, ...) \
do \
{ \
if (!(expr)) \
LogException(exCode, "SuperPMI assertion '%s' failed (" #msg ")", #expr, ##__VA_ARGS__); \
} while (0)
#define AssertCode(expr, exCode) \
do \
{ \
if (!(expr)) \
LogException(exCode, "SuperPMI assertion '%s' failed", #expr); \
} while (0)
#define AssertMapExists(map, keymsg, ...) \
do \
{ \
if (map == nullptr) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing map " #map ")" keymsg, ##__VA_ARGS__); \
} while (0)
#define AssertKeyExists(map, key, keymsg, ...) \
do \
{ \
if (map->GetIndex(key) == -1) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing key \"" #key "\" in map " #map ")" keymsg, ##__VA_ARGS__); \
} while (0)
#define AssertMapAndKeyExist(map, key, keymsg, ...) \
do \
{ \
if (map == nullptr) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing map " #map ")" keymsg, ##__VA_ARGS__); \
if (map->GetIndex(key) == -1) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing key \"" #key "\" in map " #map ")" keymsg, ##__VA_ARGS__); \
} while (0)
// clang doesn't allow for an empty __VA_ARGS__, so we need to pass something non-empty to `LogException`, below.
#define AssertMapExistsNoMessage(map) \
do \
{ \
if (map == nullptr) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing map " #map ")", ""); \
} while (0)
#define AssertKeyExistsNoMessage(map, key) \
do \
{ \
if (map->GetIndex(key) == -1) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing key \"" #key "\" in map " #map ")", "");\
} while (0)
#define AssertMapAndKeyExistNoMessage(map, key) \
do \
{ \
if (map == nullptr) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing map " #map ")", ""); \
if (map->GetIndex(key) == -1) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing key \"" #key "\" in map " #map ")", "");\
} while (0)
#define AssertMsg(expr, msg, ...) AssertCodeMsg(expr, EXCEPTIONCODE_ASSERT, msg, ##__VA_ARGS__)
#define Assert(expr) AssertCode(expr, EXCEPTIONCODE_ASSERT)
//
// Functions and types used by PAL_TRY-related macros.
//
extern LONG FilterSuperPMIExceptions_CatchMC(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam);
struct FilterSuperPMIExceptionsParam_CaptureException
{
DWORD exceptionCode;
char* exceptionMessage; // 'new' memory passed from ThrowException()
FilterSuperPMIExceptionsParam_CaptureException()
: exceptionCode(0)
, exceptionMessage(nullptr)
{
}
// Note: this is called during an SEH filter; the data pointed to by PEXCEPTION_POINTERS is not valid after
// calling this function, so anything we want to safe must be copied.
// The exception message string is 'new' memory, allocated in the ThrowException() function.
void Initialize(PEXCEPTION_POINTERS pExceptionPointers)
{
exceptionCode = pExceptionPointers->ExceptionRecord->ExceptionCode;
exceptionMessage = (pExceptionPointers->ExceptionRecord->NumberParameters != 1) ? nullptr : (char*)pExceptionPointers->ExceptionRecord->ExceptionInformation[0];
}
};
extern LONG FilterSuperPMIExceptions_CaptureExceptionAndStop(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam);
extern bool RunWithErrorTrap(void (*function)(void*), void* param);
extern bool RunWithSPMIErrorTrap(void (*function)(void*), void* param);
extern void RunWithErrorExceptionCodeCaptureAndContinueImp(void* param, void (*function)(void*), void (*finallyFunction)(void*, DWORD));
template <typename LambdaType>
class LambdaExecutor
{
public:
LambdaType& _lambda;
LambdaExecutor(LambdaType& lambda) : _lambda(lambda) {}
};
template <typename LambdaTry, typename LambdaFinally>
void RunWithErrorExceptionCodeCaptureAndContinue(LambdaTry function, LambdaFinally finally)
{
struct LambdaArguments
{
LambdaExecutor<LambdaTry> *pTryLambda;
LambdaExecutor<LambdaFinally> *pFinallyLambda;
} lambdaArgs;
LambdaExecutor<LambdaTry> tryStorage(function);
LambdaExecutor<LambdaFinally> finallyStorage(finally);
lambdaArgs.pTryLambda = &tryStorage;
lambdaArgs.pFinallyLambda = &finallyStorage;
RunWithErrorExceptionCodeCaptureAndContinueImp(&lambdaArgs,
[](void* pParam)
{
((LambdaArguments*)pParam)->pTryLambda->_lambda();
},
[](void* pParam, DWORD exceptionCode)
{
((LambdaArguments*)pParam)->pFinallyLambda->_lambda(exceptionCode);
});
}
class SpmiException
{
private:
DWORD exCode;
char* exMessage;
public:
SpmiException(FilterSuperPMIExceptionsParam_CaptureException* e);
#if 0
~SpmiException();
#endif
char* GetExceptionMessage();
DWORD GetCode();
void ShowAndDeleteMessage();
void DeleteMessage();
};
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// ErrorHandling.h - Helpers & whatnot for using SEH for errors
//----------------------------------------------------------
#ifndef _ErrorHandling
#define _ErrorHandling
#include "logging.h"
// EXCEPTIONCODE_DebugBreakorAV is just the base exception number; calls to DebugBreakorAV()
// pass a unique number to add to this. EXCEPTIONCODE_DebugBreakorAV_MAX is the maximum number
// of this exception range.
#define EXCEPTIONCODE_DebugBreakorAV 0xe0421000
#define EXCEPTIONCODE_DebugBreakorAV_MAX 0xe0422000
#define EXCEPTIONCODE_MC 0xe0422000
#define EXCEPTIONCODE_LWM 0xe0423000
#define EXCEPTIONCODE_CALLUTILS 0xe0426000
#define EXCEPTIONCODE_TYPEUTILS 0xe0427000
#define EXCEPTIONCODE_ASSERT 0xe0440000
// RaiseException wrappers
void MSC_ONLY(__declspec(noreturn)) ThrowException(DWORD exceptionCode);
void MSC_ONLY(__declspec(noreturn)) ThrowException(DWORD exceptionCode, const char* message, ...);
// Assert stuff
#define AssertCodeMsg(expr, exCode, msg, ...) \
do \
{ \
if (!(expr)) \
LogException(exCode, "SuperPMI assertion '%s' failed (" #msg ")", #expr, ##__VA_ARGS__); \
} while (0)
#define AssertCode(expr, exCode) \
do \
{ \
if (!(expr)) \
LogException(exCode, "SuperPMI assertion '%s' failed", #expr); \
} while (0)
#define AssertMapExists(map, keymsg, ...) \
do \
{ \
if (map == nullptr) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing map " #map ")" keymsg, ##__VA_ARGS__); \
} while (0)
#define AssertKeyExists(map, key, keymsg, ...) \
do \
{ \
if (map->GetIndex(key) == -1) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing key \"" #key "\" in map " #map ")" keymsg, ##__VA_ARGS__); \
} while (0)
#define AssertMapAndKeyExist(map, key, keymsg, ...) \
do \
{ \
if (map == nullptr) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing map " #map ")" keymsg, ##__VA_ARGS__); \
if (map->GetIndex(key) == -1) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing key \"" #key "\" in map " #map ")" keymsg, ##__VA_ARGS__); \
} while (0)
// clang doesn't allow for an empty __VA_ARGS__, so we need to pass something non-empty to `LogException`, below.
#define AssertMapExistsNoMessage(map) \
do \
{ \
if (map == nullptr) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing map " #map ")", ""); \
} while (0)
#define AssertKeyExistsNoMessage(map, key) \
do \
{ \
if (map->GetIndex(key) == -1) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing key \"" #key "\" in map " #map ")", "");\
} while (0)
#define AssertMapAndKeyExistNoMessage(map, key) \
do \
{ \
if (map == nullptr) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing map " #map ")", ""); \
if (map->GetIndex(key) == -1) \
LogException(EXCEPTIONCODE_MC, "SuperPMI assertion failed (missing key \"" #key "\" in map " #map ")", "");\
} while (0)
#define AssertMsg(expr, msg, ...) AssertCodeMsg(expr, EXCEPTIONCODE_ASSERT, msg, ##__VA_ARGS__)
#define Assert(expr) AssertCode(expr, EXCEPTIONCODE_ASSERT)
//
// Functions and types used by PAL_TRY-related macros.
//
extern LONG FilterSuperPMIExceptions_CatchMC(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam);
struct FilterSuperPMIExceptionsParam_CaptureException
{
DWORD exceptionCode;
char* exceptionMessage; // 'new' memory passed from ThrowException()
FilterSuperPMIExceptionsParam_CaptureException()
: exceptionCode(0)
, exceptionMessage(nullptr)
{
}
// Note: this is called during an SEH filter; the data pointed to by PEXCEPTION_POINTERS is not valid after
// calling this function, so anything we want to safe must be copied.
// The exception message string is 'new' memory, allocated in the ThrowException() function.
void Initialize(PEXCEPTION_POINTERS pExceptionPointers)
{
exceptionCode = pExceptionPointers->ExceptionRecord->ExceptionCode;
exceptionMessage = (pExceptionPointers->ExceptionRecord->NumberParameters != 1) ? nullptr : (char*)pExceptionPointers->ExceptionRecord->ExceptionInformation[0];
}
};
extern LONG FilterSuperPMIExceptions_CaptureExceptionAndStop(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam);
extern bool RunWithErrorTrap(void (*function)(void*), void* param);
extern bool RunWithSPMIErrorTrap(void (*function)(void*), void* param);
extern void RunWithErrorExceptionCodeCaptureAndContinueImp(void* param, void (*function)(void*), void (*finallyFunction)(void*, DWORD));
template <typename LambdaType>
class LambdaExecutor
{
public:
LambdaType& _lambda;
LambdaExecutor(LambdaType& lambda) : _lambda(lambda) {}
};
template <typename LambdaTry, typename LambdaFinally>
void RunWithErrorExceptionCodeCaptureAndContinue(LambdaTry function, LambdaFinally finally)
{
struct LambdaArguments
{
LambdaExecutor<LambdaTry> *pTryLambda;
LambdaExecutor<LambdaFinally> *pFinallyLambda;
} lambdaArgs;
LambdaExecutor<LambdaTry> tryStorage(function);
LambdaExecutor<LambdaFinally> finallyStorage(finally);
lambdaArgs.pTryLambda = &tryStorage;
lambdaArgs.pFinallyLambda = &finallyStorage;
RunWithErrorExceptionCodeCaptureAndContinueImp(&lambdaArgs,
[](void* pParam)
{
((LambdaArguments*)pParam)->pTryLambda->_lambda();
},
[](void* pParam, DWORD exceptionCode)
{
((LambdaArguments*)pParam)->pFinallyLambda->_lambda(exceptionCode);
});
}
class SpmiException
{
private:
DWORD exCode;
char* exMessage;
public:
SpmiException(FilterSuperPMIExceptionsParam_CaptureException* e);
#if 0
~SpmiException();
#endif
char* GetExceptionMessage();
DWORD GetCode();
void ShowAndDeleteMessage();
void DeleteMessage();
};
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/sgen/sgen-memory-governor.h | /**
* \file
* Copyright 2001-2003 Ximian, Inc
* Copyright 2003-2010 Novell, Inc.
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SGEN_MEMORY_GOVERNOR_H__
#define __MONO_SGEN_MEMORY_GOVERNOR_H__
/* Heap limits */
void sgen_memgov_init (size_t max_heap, size_t soft_limit, gboolean debug_allowance, double min_allowance_ratio, double save_target);
void sgen_memgov_release_space (mword size, int space);
gboolean sgen_memgov_try_alloc_space (mword size, int space);
/* GC trigger heuristics */
void sgen_memgov_minor_collection_start (void);
void sgen_memgov_minor_collection_end (const char *reason, gboolean is_overflow);
void sgen_memgov_major_pre_sweep (void);
void sgen_memgov_major_post_sweep (mword used_slots_size);
void sgen_memgov_major_collection_start (gboolean concurrent, const char *reason);
void sgen_memgov_major_collection_end (gboolean forced, gboolean concurrent, const char *reason, gboolean is_overflow);
void sgen_memgov_collection_start (int generation);
void sgen_memgov_collection_end (int generation, gint64 stw);
gboolean sgen_need_major_collection (mword space_needed, gboolean *forced);
typedef enum {
SGEN_ALLOC_INTERNAL = 0,
SGEN_ALLOC_HEAP = 1,
SGEN_ALLOC_ACTIVATE = 2
} SgenAllocFlags;
typedef enum {
SGEN_LOG_NURSERY,
SGEN_LOG_MAJOR_SERIAL,
SGEN_LOG_MAJOR_CONC_START,
SGEN_LOG_MAJOR_CONC_FINISH,
SGEN_LOG_MAJOR_SWEEP_FINISH
} SgenLogType;
typedef struct {
SgenLogType type;
const char *reason;
gboolean is_overflow;
gint64 time;
mword promoted_size;
mword major_size;
mword major_size_in_use;
mword los_size;
mword los_size_in_use;
} SgenLogEntry;
/* OS memory allocation */
void* sgen_alloc_os_memory (size_t size, SgenAllocFlags flags, const char *assert_description, MonoMemAccountType type);
void* sgen_alloc_os_memory_aligned (size_t size, mword alignment, SgenAllocFlags flags, const char *assert_description, MonoMemAccountType type);
void sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags, MonoMemAccountType type);
/* Error handling */
void sgen_assert_memory_alloc (void *ptr, size_t requested_size, const char *assert_description);
#endif
| /**
* \file
* Copyright 2001-2003 Ximian, Inc
* Copyright 2003-2010 Novell, Inc.
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SGEN_MEMORY_GOVERNOR_H__
#define __MONO_SGEN_MEMORY_GOVERNOR_H__
/* Heap limits */
void sgen_memgov_init (size_t max_heap, size_t soft_limit, gboolean debug_allowance, double min_allowance_ratio, double save_target);
void sgen_memgov_release_space (mword size, int space);
gboolean sgen_memgov_try_alloc_space (mword size, int space);
/* GC trigger heuristics */
void sgen_memgov_minor_collection_start (void);
void sgen_memgov_minor_collection_end (const char *reason, gboolean is_overflow);
void sgen_memgov_major_pre_sweep (void);
void sgen_memgov_major_post_sweep (mword used_slots_size);
void sgen_memgov_major_collection_start (gboolean concurrent, const char *reason);
void sgen_memgov_major_collection_end (gboolean forced, gboolean concurrent, const char *reason, gboolean is_overflow);
void sgen_memgov_collection_start (int generation);
void sgen_memgov_collection_end (int generation, gint64 stw);
gboolean sgen_need_major_collection (mword space_needed, gboolean *forced);
typedef enum {
SGEN_ALLOC_INTERNAL = 0,
SGEN_ALLOC_HEAP = 1,
SGEN_ALLOC_ACTIVATE = 2
} SgenAllocFlags;
typedef enum {
SGEN_LOG_NURSERY,
SGEN_LOG_MAJOR_SERIAL,
SGEN_LOG_MAJOR_CONC_START,
SGEN_LOG_MAJOR_CONC_FINISH,
SGEN_LOG_MAJOR_SWEEP_FINISH
} SgenLogType;
typedef struct {
SgenLogType type;
const char *reason;
gboolean is_overflow;
gint64 time;
mword promoted_size;
mword major_size;
mword major_size_in_use;
mword los_size;
mword los_size_in_use;
} SgenLogEntry;
/* OS memory allocation */
void* sgen_alloc_os_memory (size_t size, SgenAllocFlags flags, const char *assert_description, MonoMemAccountType type);
void* sgen_alloc_os_memory_aligned (size_t size, mword alignment, SgenAllocFlags flags, const char *assert_description, MonoMemAccountType type);
void sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags, MonoMemAccountType type);
/* Error handling */
void sgen_assert_memory_alloc (void *ptr, size_t requested_size, const char *assert_description);
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/classlibnative/bcltype/stringnative.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: StringNative.cpp
//
//
// Purpose: The implementation of the String class.
//
//
#include "common.h"
#include "object.h"
#include "utilcode.h"
#include "excep.h"
#include "frames.h"
#include "field.h"
#include "vars.hpp"
#include "stringnative.h"
#include "comutilnative.h"
#include "metasig.h"
#include "excep.h"
// Compile the string functionality with these pragma flags (equivalent of the command line /Ox flag)
// Compiling this functionality differently gives us significant throughout gain in some cases.
#if defined(_MSC_VER) && defined(TARGET_X86)
#pragma optimize("tgy", on)
#endif
FCIMPL2(FC_BOOL_RET, COMString::FCTryGetTrailByte, StringObject* thisRefUNSAFE, UINT8 *pbData)
{
FCALL_CONTRACT;
STRINGREF thisRef = ObjectToSTRINGREF(thisRefUNSAFE);
FC_RETURN_BOOL(thisRef->GetTrailByte(pbData));
}
FCIMPLEND
FCIMPL2(VOID, COMString::FCSetTrailByte, StringObject* thisRefUNSAFE, UINT8 bData)
{
FCALL_CONTRACT;
STRINGREF thisRef = ObjectToSTRINGREF(thisRefUNSAFE);
HELPER_METHOD_FRAME_BEGIN_1(thisRef);
thisRef->SetTrailByte(bData);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
// Revert to command line compilation flags
#if defined(_MSC_VER) && defined(TARGET_X86)
#pragma optimize ("", on)
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: StringNative.cpp
//
//
// Purpose: The implementation of the String class.
//
//
#include "common.h"
#include "object.h"
#include "utilcode.h"
#include "excep.h"
#include "frames.h"
#include "field.h"
#include "vars.hpp"
#include "stringnative.h"
#include "comutilnative.h"
#include "metasig.h"
#include "excep.h"
// Compile the string functionality with these pragma flags (equivalent of the command line /Ox flag)
// Compiling this functionality differently gives us significant throughout gain in some cases.
#if defined(_MSC_VER) && defined(TARGET_X86)
#pragma optimize("tgy", on)
#endif
FCIMPL2(FC_BOOL_RET, COMString::FCTryGetTrailByte, StringObject* thisRefUNSAFE, UINT8 *pbData)
{
FCALL_CONTRACT;
STRINGREF thisRef = ObjectToSTRINGREF(thisRefUNSAFE);
FC_RETURN_BOOL(thisRef->GetTrailByte(pbData));
}
FCIMPLEND
FCIMPL2(VOID, COMString::FCSetTrailByte, StringObject* thisRefUNSAFE, UINT8 bData)
{
FCALL_CONTRACT;
STRINGREF thisRef = ObjectToSTRINGREF(thisRefUNSAFE);
HELPER_METHOD_FRAME_BEGIN_1(thisRef);
thisRef->SetTrailByte(bData);
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
// Revert to command line compilation flags
#if defined(_MSC_VER) && defined(TARGET_X86)
#pragma optimize ("", on)
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/c_runtime/wcspbrk/test1/test1.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose:
** Tests that wcspbrk returns a pointer to the first element in the first
** string that matches a character in the second (or NULL).
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(c_runtime_wcspbrk_test1_paltest_wcspbrk_test1, "c_runtime/wcspbrk/test1/paltest_wcspbrk_test1")
{
WCHAR *string;
WCHAR *key1;
WCHAR *key2;
WCHAR key3[] = {0};
WCHAR *result;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
string = convert("foo bar baz bar");
key1 = convert("z ");
key2 = convert("Q");
result = wcspbrk(string, key1);
if (result != string + 3)
{
Fail("ERROR: Got incorrect result in scanning \"%s\" with the set \"%s\".\n"
"Expected to get pointer to %#p, got %#p\n", convertC(string),
convertC(key1), string + 3, result);
}
result = wcspbrk(string, key2);
if (result != NULL)
{
Fail("ERROR: Got incorrect result in scanning \"%s\" with the set \"%s\".\n"
"Expected to get pointer to %#p, got %#p\n", convertC(string),
convertC(key2), NULL, result);
}
result = wcspbrk(string, key3);
if (result != NULL)
{
Fail("ERROR: Got incorrect result in scanning \"%s\" with the set \"%s\".\n"
"Expected to get pointer to %#p, got %#p\n", convertC(string),
convertC(key3), NULL, result);
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose:
** Tests that wcspbrk returns a pointer to the first element in the first
** string that matches a character in the second (or NULL).
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(c_runtime_wcspbrk_test1_paltest_wcspbrk_test1, "c_runtime/wcspbrk/test1/paltest_wcspbrk_test1")
{
WCHAR *string;
WCHAR *key1;
WCHAR *key2;
WCHAR key3[] = {0};
WCHAR *result;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
string = convert("foo bar baz bar");
key1 = convert("z ");
key2 = convert("Q");
result = wcspbrk(string, key1);
if (result != string + 3)
{
Fail("ERROR: Got incorrect result in scanning \"%s\" with the set \"%s\".\n"
"Expected to get pointer to %#p, got %#p\n", convertC(string),
convertC(key1), string + 3, result);
}
result = wcspbrk(string, key2);
if (result != NULL)
{
Fail("ERROR: Got incorrect result in scanning \"%s\" with the set \"%s\".\n"
"Expected to get pointer to %#p, got %#p\n", convertC(string),
convertC(key2), NULL, result);
}
result = wcspbrk(string, key3);
if (result != NULL)
{
Fail("ERROR: Got incorrect result in scanning \"%s\" with the set \"%s\".\n"
"Expected to get pointer to %#p, got %#p\n", convertC(string),
convertC(key3), NULL, result);
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/jit/compphases.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: CompPhases.h
//
//
// Names of JIT phases, in order. Assumes that the caller defines CompPhaseNameMacro
// in a useful way before including this file, e.g., to define the phase enumeration and the
// corresponding array of string names of those phases. This include file undefines CompPhaseNameMacro
// after the last use.
// The arguments are:
// CompPhaseNameMacro(enumName, stringName, shortName, hasChildren, parent, measureIR)
// "enumName" is an Enumeration-style all-caps name.
// "stringName" is a self-explanatory.
// "shortName" is an abbreviated form for stringName
// "hasChildren" is true if this phase is broken out into subphases.
// (We should never do EndPhase on a phase that has children, only on 'leaf phases.')
// "parent" is -1 for leaf phases, otherwise it is the "enumName" of the parent phase.
// "measureIR" is true for phases that generate a count of IR nodes during EndPhase when JitConfig.MeasureIR is
// true.
// clang-format off
// enumName stringName shortName hasChildren measureIR
// parent
CompPhaseNameMacro(PHASE_PRE_IMPORT, "Pre-import", "PRE-IMP", false, -1, false)
CompPhaseNameMacro(PHASE_IMPORTATION, "Importation", "IMPORT", false, -1, true)
CompPhaseNameMacro(PHASE_INDXCALL, "Indirect call transform", "INDXCALL", false, -1, true)
CompPhaseNameMacro(PHASE_PATCHPOINTS, "Expand patchpoints", "PPOINT", false, -1, true)
CompPhaseNameMacro(PHASE_POST_IMPORT, "Post-import", "POST-IMP", false, -1, false)
CompPhaseNameMacro(PHASE_IBCPREP, "Profile instrumentation prep", "IBCPREP", false, -1, false)
CompPhaseNameMacro(PHASE_IBCINSTR, "Profile instrumentation", "IBCINSTR", false, -1, false)
CompPhaseNameMacro(PHASE_INCPROFILE, "Profile incorporation", "INCPROF", false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_INIT, "Morph - Init", "MOR-INIT", false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_INLINE, "Morph - Inlining", "MOR-INL", false, -1, true)
CompPhaseNameMacro(PHASE_MORPH_ADD_INTERNAL, "Morph - Add internal blocks", "MOR-ADD", false, -1, true)
CompPhaseNameMacro(PHASE_ALLOCATE_OBJECTS, "Allocate Objects", "ALLOC-OBJ", false, -1, false)
CompPhaseNameMacro(PHASE_EMPTY_TRY, "Remove empty try", "EMPTYTRY", false, -1, false)
CompPhaseNameMacro(PHASE_EMPTY_FINALLY, "Remove empty finally", "EMPTYFIN", false, -1, false)
CompPhaseNameMacro(PHASE_MERGE_FINALLY_CHAINS, "Merge callfinally chains", "MRGCFCHN", false, -1, false)
CompPhaseNameMacro(PHASE_CLONE_FINALLY, "Clone finally", "CLONEFIN", false, -1, false)
CompPhaseNameMacro(PHASE_UPDATE_FINALLY_FLAGS, "Update finally target flags", "UPD-FTF", false, -1, false)
CompPhaseNameMacro(PHASE_COMPUTE_PREDS, "Compute preds", "PREDS", false, -1, false)
CompPhaseNameMacro(PHASE_EARLY_UPDATE_FLOW_GRAPH,"Update flow graph early pass", "UPD-FG-E", false, -1, false)
CompPhaseNameMacro(PHASE_STR_ADRLCL, "Morph - Structs/AddrExp", "MOR-STRAL",false, -1, false)
CompPhaseNameMacro(PHASE_FWD_SUB, "Forward Substitution", "FWD-SUB", false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_IMPBYREF, "Morph - ByRefs", "MOR-BYREF",false, -1, false)
CompPhaseNameMacro(PHASE_PROMOTE_STRUCTS, "Morph - Promote Structs", "PROMOTER" ,false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_GLOBAL, "Morph - Global", "MOR-GLOB", false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_END, "Morph - Finish", "MOR-END", false, -1, true)
CompPhaseNameMacro(PHASE_GS_COOKIE, "GS Cookie", "GS-COOK", false, -1, false)
CompPhaseNameMacro(PHASE_COMPUTE_EDGE_WEIGHTS, "Compute edge weights (1, false)","EDG-WGT", false, -1, false)
#if defined(FEATURE_EH_FUNCLETS)
CompPhaseNameMacro(PHASE_CREATE_FUNCLETS, "Create EH funclets", "EH-FUNC", false, -1, false)
#endif // FEATURE_EH_FUNCLETS
CompPhaseNameMacro(PHASE_MERGE_THROWS, "Merge throw blocks", "MRGTHROW", false, -1, false)
CompPhaseNameMacro(PHASE_INVERT_LOOPS, "Invert loops", "LOOP-INV", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_LAYOUT, "Optimize layout", "LAYOUT", false, -1, false)
CompPhaseNameMacro(PHASE_COMPUTE_REACHABILITY, "Compute blocks reachability", "BL_REACH", false, -1, false)
CompPhaseNameMacro(PHASE_SET_BLOCK_WEIGHTS, "Set block weights", "BL-WEIGHTS", false, -1, false)
CompPhaseNameMacro(PHASE_ZERO_INITS, "Redundant zero Inits", "ZERO-INIT", false, -1, false)
CompPhaseNameMacro(PHASE_FIND_LOOPS, "Find loops", "LOOP-FND", false, -1, false)
CompPhaseNameMacro(PHASE_CLONE_LOOPS, "Clone loops", "LP-CLONE", false, -1, false)
CompPhaseNameMacro(PHASE_UNROLL_LOOPS, "Unroll loops", "UNROLL", false, -1, false)
CompPhaseNameMacro(PHASE_CLEAR_LOOP_INFO, "Clear loop info", "LP-CLEAR", false, -1, false)
CompPhaseNameMacro(PHASE_HOIST_LOOP_CODE, "Hoist loop code", "LP-HOIST", false, -1, false)
CompPhaseNameMacro(PHASE_MARK_LOCAL_VARS, "Mark local vars", "MARK-LCL", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_BOOLS, "Optimize bools", "OPT-BOOL", false, -1, false)
CompPhaseNameMacro(PHASE_FIND_OPER_ORDER, "Find oper order", "OPER-ORD", false, -1, false)
CompPhaseNameMacro(PHASE_SET_BLOCK_ORDER, "Set block order", "BLK-ORD", false, -1, true)
CompPhaseNameMacro(PHASE_BUILD_SSA, "Build SSA representation", "SSA", true, -1, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_TOPOSORT, "SSA: topological sort", "SSA-SORT", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_DOMS, "SSA: Doms1", "SSA-DOMS", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_LIVENESS, "SSA: liveness", "SSA-LIVE", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_DF, "SSA: DF", "SSA-DF", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_INSERT_PHIS, "SSA: insert phis", "SSA-PHI", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_RENAME, "SSA: rename", "SSA-REN", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_EARLY_PROP, "Early Value Propagation", "ERL-PROP", false, -1, false)
CompPhaseNameMacro(PHASE_VALUE_NUMBER, "Do value numbering", "VAL-NUM", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_INDEX_CHECKS, "Optimize index checks", "OPT-CHK", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_VALNUM_CSES, "Optimize Valnum CSEs", "OPT-CSE", false, -1, false)
CompPhaseNameMacro(PHASE_VN_COPY_PROP, "VN based copy prop", "CP-PROP", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_BRANCHES, "Redundant branch opts", "OPT-BR", false, -1, false)
CompPhaseNameMacro(PHASE_ASSERTION_PROP_MAIN, "Assertion prop", "AST-PROP", false, -1, false)
CompPhaseNameMacro(PHASE_OPT_UPDATE_FLOW_GRAPH, "Update flow graph opt pass", "UPD-FG-O", false, -1, false)
CompPhaseNameMacro(PHASE_COMPUTE_EDGE_WEIGHTS2, "Compute edge weights (2, false)","EDG-WGT2", false, -1, false)
CompPhaseNameMacro(PHASE_INSERT_GC_POLLS, "Insert GC Polls", "GC-POLLS", false, -1, true)
CompPhaseNameMacro(PHASE_DETERMINE_FIRST_COLD_BLOCK, "Determine first cold block", "COLD-BLK", false, -1, true)
CompPhaseNameMacro(PHASE_RATIONALIZE, "Rationalize IR", "RAT", false, -1, false)
CompPhaseNameMacro(PHASE_SIMPLE_LOWERING, "Do 'simple' lowering", "SMP-LWR", false, -1, false)
CompPhaseNameMacro(PHASE_LCLVARLIVENESS, "Local var liveness", "LIVENESS", true, -1, false)
CompPhaseNameMacro(PHASE_LCLVARLIVENESS_INIT, "Local var liveness init", "LIV-INIT", false, PHASE_LCLVARLIVENESS, false)
CompPhaseNameMacro(PHASE_LCLVARLIVENESS_PERBLOCK,"Per block local var liveness", "LIV-BLK", false, PHASE_LCLVARLIVENESS, false)
CompPhaseNameMacro(PHASE_LCLVARLIVENESS_INTERBLOCK, "Global local var liveness", "LIV-GLBL", false, PHASE_LCLVARLIVENESS, false)
CompPhaseNameMacro(PHASE_LOWERING_DECOMP, "Lowering decomposition", "LWR-DEC", false, -1, false)
CompPhaseNameMacro(PHASE_LOWERING, "Lowering nodeinfo", "LWR-INFO", false, -1, true)
CompPhaseNameMacro(PHASE_STACK_LEVEL_SETTER, "Calculate stack level slots", "STK-SET", false, -1, false)
CompPhaseNameMacro(PHASE_LINEAR_SCAN, "Linear scan register alloc", "LSRA", true, -1, true)
CompPhaseNameMacro(PHASE_LINEAR_SCAN_BUILD, "LSRA build intervals", "LSRA-BLD", false, PHASE_LINEAR_SCAN, false)
CompPhaseNameMacro(PHASE_LINEAR_SCAN_ALLOC, "LSRA allocate", "LSRA-ALL", false, PHASE_LINEAR_SCAN, false)
CompPhaseNameMacro(PHASE_LINEAR_SCAN_RESOLVE, "LSRA resolve", "LSRA-RES", false, PHASE_LINEAR_SCAN, false)
CompPhaseNameMacro(PHASE_ALIGN_LOOPS, "Place 'align' instructions", "LOOP-ALIGN", false, -1, false)
CompPhaseNameMacro(PHASE_GENERATE_CODE, "Generate code", "CODEGEN", false, -1, false)
CompPhaseNameMacro(PHASE_EMIT_CODE, "Emit code", "EMIT", false, -1, false)
CompPhaseNameMacro(PHASE_EMIT_GCEH, "Emit GC+EH tables", "EMT-GCEH", false, -1, false)
CompPhaseNameMacro(PHASE_POST_EMIT, "Post-Emit", "POST-EMIT", false, -1, false)
#if MEASURE_CLRAPI_CALLS
// The following is a "pseudo-phase" - it aggregates timing info
// for calls through ICorJitInfo across all "real" phases.
CompPhaseNameMacro(PHASE_CLR_API, "CLR API calls", "CLR-API", false, -1, false)
#endif
// clang-format on
#undef CompPhaseNameMacro
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: CompPhases.h
//
//
// Names of JIT phases, in order. Assumes that the caller defines CompPhaseNameMacro
// in a useful way before including this file, e.g., to define the phase enumeration and the
// corresponding array of string names of those phases. This include file undefines CompPhaseNameMacro
// after the last use.
// The arguments are:
// CompPhaseNameMacro(enumName, stringName, shortName, hasChildren, parent, measureIR)
// "enumName" is an Enumeration-style all-caps name.
// "stringName" is a self-explanatory.
// "shortName" is an abbreviated form for stringName
// "hasChildren" is true if this phase is broken out into subphases.
// (We should never do EndPhase on a phase that has children, only on 'leaf phases.')
// "parent" is -1 for leaf phases, otherwise it is the "enumName" of the parent phase.
// "measureIR" is true for phases that generate a count of IR nodes during EndPhase when JitConfig.MeasureIR is
// true.
// clang-format off
// enumName stringName shortName hasChildren measureIR
// parent
CompPhaseNameMacro(PHASE_PRE_IMPORT, "Pre-import", "PRE-IMP", false, -1, false)
CompPhaseNameMacro(PHASE_IMPORTATION, "Importation", "IMPORT", false, -1, true)
CompPhaseNameMacro(PHASE_INDXCALL, "Indirect call transform", "INDXCALL", false, -1, true)
CompPhaseNameMacro(PHASE_PATCHPOINTS, "Expand patchpoints", "PPOINT", false, -1, true)
CompPhaseNameMacro(PHASE_POST_IMPORT, "Post-import", "POST-IMP", false, -1, false)
CompPhaseNameMacro(PHASE_IBCPREP, "Profile instrumentation prep", "IBCPREP", false, -1, false)
CompPhaseNameMacro(PHASE_IBCINSTR, "Profile instrumentation", "IBCINSTR", false, -1, false)
CompPhaseNameMacro(PHASE_INCPROFILE, "Profile incorporation", "INCPROF", false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_INIT, "Morph - Init", "MOR-INIT", false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_INLINE, "Morph - Inlining", "MOR-INL", false, -1, true)
CompPhaseNameMacro(PHASE_MORPH_ADD_INTERNAL, "Morph - Add internal blocks", "MOR-ADD", false, -1, true)
CompPhaseNameMacro(PHASE_ALLOCATE_OBJECTS, "Allocate Objects", "ALLOC-OBJ", false, -1, false)
CompPhaseNameMacro(PHASE_EMPTY_TRY, "Remove empty try", "EMPTYTRY", false, -1, false)
CompPhaseNameMacro(PHASE_EMPTY_FINALLY, "Remove empty finally", "EMPTYFIN", false, -1, false)
CompPhaseNameMacro(PHASE_MERGE_FINALLY_CHAINS, "Merge callfinally chains", "MRGCFCHN", false, -1, false)
CompPhaseNameMacro(PHASE_CLONE_FINALLY, "Clone finally", "CLONEFIN", false, -1, false)
CompPhaseNameMacro(PHASE_UPDATE_FINALLY_FLAGS, "Update finally target flags", "UPD-FTF", false, -1, false)
CompPhaseNameMacro(PHASE_COMPUTE_PREDS, "Compute preds", "PREDS", false, -1, false)
CompPhaseNameMacro(PHASE_EARLY_UPDATE_FLOW_GRAPH,"Update flow graph early pass", "UPD-FG-E", false, -1, false)
CompPhaseNameMacro(PHASE_STR_ADRLCL, "Morph - Structs/AddrExp", "MOR-STRAL",false, -1, false)
CompPhaseNameMacro(PHASE_FWD_SUB, "Forward Substitution", "FWD-SUB", false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_IMPBYREF, "Morph - ByRefs", "MOR-BYREF",false, -1, false)
CompPhaseNameMacro(PHASE_PROMOTE_STRUCTS, "Morph - Promote Structs", "PROMOTER" ,false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_GLOBAL, "Morph - Global", "MOR-GLOB", false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_END, "Morph - Finish", "MOR-END", false, -1, true)
CompPhaseNameMacro(PHASE_GS_COOKIE, "GS Cookie", "GS-COOK", false, -1, false)
CompPhaseNameMacro(PHASE_COMPUTE_EDGE_WEIGHTS, "Compute edge weights (1, false)","EDG-WGT", false, -1, false)
#if defined(FEATURE_EH_FUNCLETS)
CompPhaseNameMacro(PHASE_CREATE_FUNCLETS, "Create EH funclets", "EH-FUNC", false, -1, false)
#endif // FEATURE_EH_FUNCLETS
CompPhaseNameMacro(PHASE_MERGE_THROWS, "Merge throw blocks", "MRGTHROW", false, -1, false)
CompPhaseNameMacro(PHASE_INVERT_LOOPS, "Invert loops", "LOOP-INV", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_LAYOUT, "Optimize layout", "LAYOUT", false, -1, false)
CompPhaseNameMacro(PHASE_COMPUTE_REACHABILITY, "Compute blocks reachability", "BL_REACH", false, -1, false)
CompPhaseNameMacro(PHASE_SET_BLOCK_WEIGHTS, "Set block weights", "BL-WEIGHTS", false, -1, false)
CompPhaseNameMacro(PHASE_ZERO_INITS, "Redundant zero Inits", "ZERO-INIT", false, -1, false)
CompPhaseNameMacro(PHASE_FIND_LOOPS, "Find loops", "LOOP-FND", false, -1, false)
CompPhaseNameMacro(PHASE_CLONE_LOOPS, "Clone loops", "LP-CLONE", false, -1, false)
CompPhaseNameMacro(PHASE_UNROLL_LOOPS, "Unroll loops", "UNROLL", false, -1, false)
CompPhaseNameMacro(PHASE_CLEAR_LOOP_INFO, "Clear loop info", "LP-CLEAR", false, -1, false)
CompPhaseNameMacro(PHASE_HOIST_LOOP_CODE, "Hoist loop code", "LP-HOIST", false, -1, false)
CompPhaseNameMacro(PHASE_MARK_LOCAL_VARS, "Mark local vars", "MARK-LCL", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_BOOLS, "Optimize bools", "OPT-BOOL", false, -1, false)
CompPhaseNameMacro(PHASE_FIND_OPER_ORDER, "Find oper order", "OPER-ORD", false, -1, false)
CompPhaseNameMacro(PHASE_SET_BLOCK_ORDER, "Set block order", "BLK-ORD", false, -1, true)
CompPhaseNameMacro(PHASE_BUILD_SSA, "Build SSA representation", "SSA", true, -1, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_TOPOSORT, "SSA: topological sort", "SSA-SORT", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_DOMS, "SSA: Doms1", "SSA-DOMS", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_LIVENESS, "SSA: liveness", "SSA-LIVE", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_DF, "SSA: DF", "SSA-DF", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_INSERT_PHIS, "SSA: insert phis", "SSA-PHI", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_BUILD_SSA_RENAME, "SSA: rename", "SSA-REN", false, PHASE_BUILD_SSA, false)
CompPhaseNameMacro(PHASE_EARLY_PROP, "Early Value Propagation", "ERL-PROP", false, -1, false)
CompPhaseNameMacro(PHASE_VALUE_NUMBER, "Do value numbering", "VAL-NUM", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_INDEX_CHECKS, "Optimize index checks", "OPT-CHK", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_VALNUM_CSES, "Optimize Valnum CSEs", "OPT-CSE", false, -1, false)
CompPhaseNameMacro(PHASE_VN_COPY_PROP, "VN based copy prop", "CP-PROP", false, -1, false)
CompPhaseNameMacro(PHASE_OPTIMIZE_BRANCHES, "Redundant branch opts", "OPT-BR", false, -1, false)
CompPhaseNameMacro(PHASE_ASSERTION_PROP_MAIN, "Assertion prop", "AST-PROP", false, -1, false)
CompPhaseNameMacro(PHASE_OPT_UPDATE_FLOW_GRAPH, "Update flow graph opt pass", "UPD-FG-O", false, -1, false)
CompPhaseNameMacro(PHASE_COMPUTE_EDGE_WEIGHTS2, "Compute edge weights (2, false)","EDG-WGT2", false, -1, false)
CompPhaseNameMacro(PHASE_INSERT_GC_POLLS, "Insert GC Polls", "GC-POLLS", false, -1, true)
CompPhaseNameMacro(PHASE_DETERMINE_FIRST_COLD_BLOCK, "Determine first cold block", "COLD-BLK", false, -1, true)
CompPhaseNameMacro(PHASE_RATIONALIZE, "Rationalize IR", "RAT", false, -1, false)
CompPhaseNameMacro(PHASE_SIMPLE_LOWERING, "Do 'simple' lowering", "SMP-LWR", false, -1, false)
CompPhaseNameMacro(PHASE_LCLVARLIVENESS, "Local var liveness", "LIVENESS", true, -1, false)
CompPhaseNameMacro(PHASE_LCLVARLIVENESS_INIT, "Local var liveness init", "LIV-INIT", false, PHASE_LCLVARLIVENESS, false)
CompPhaseNameMacro(PHASE_LCLVARLIVENESS_PERBLOCK,"Per block local var liveness", "LIV-BLK", false, PHASE_LCLVARLIVENESS, false)
CompPhaseNameMacro(PHASE_LCLVARLIVENESS_INTERBLOCK, "Global local var liveness", "LIV-GLBL", false, PHASE_LCLVARLIVENESS, false)
CompPhaseNameMacro(PHASE_LOWERING_DECOMP, "Lowering decomposition", "LWR-DEC", false, -1, false)
CompPhaseNameMacro(PHASE_LOWERING, "Lowering nodeinfo", "LWR-INFO", false, -1, true)
CompPhaseNameMacro(PHASE_STACK_LEVEL_SETTER, "Calculate stack level slots", "STK-SET", false, -1, false)
CompPhaseNameMacro(PHASE_LINEAR_SCAN, "Linear scan register alloc", "LSRA", true, -1, true)
CompPhaseNameMacro(PHASE_LINEAR_SCAN_BUILD, "LSRA build intervals", "LSRA-BLD", false, PHASE_LINEAR_SCAN, false)
CompPhaseNameMacro(PHASE_LINEAR_SCAN_ALLOC, "LSRA allocate", "LSRA-ALL", false, PHASE_LINEAR_SCAN, false)
CompPhaseNameMacro(PHASE_LINEAR_SCAN_RESOLVE, "LSRA resolve", "LSRA-RES", false, PHASE_LINEAR_SCAN, false)
CompPhaseNameMacro(PHASE_ALIGN_LOOPS, "Place 'align' instructions", "LOOP-ALIGN", false, -1, false)
CompPhaseNameMacro(PHASE_GENERATE_CODE, "Generate code", "CODEGEN", false, -1, false)
CompPhaseNameMacro(PHASE_EMIT_CODE, "Emit code", "EMIT", false, -1, false)
CompPhaseNameMacro(PHASE_EMIT_GCEH, "Emit GC+EH tables", "EMT-GCEH", false, -1, false)
CompPhaseNameMacro(PHASE_POST_EMIT, "Post-Emit", "POST-EMIT", false, -1, false)
#if MEASURE_CLRAPI_CALLS
// The following is a "pseudo-phase" - it aggregates timing info
// for calls through ICorJitInfo across all "real" phases.
CompPhaseNameMacro(PHASE_CLR_API, "CLR API calls", "CLR-API", false, -1, false)
#endif
// clang-format on
#undef CompPhaseNameMacro
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/libs/System.Security.Cryptography.Native.Android/pal_ecdsa.c | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_ecdsa.h"
#include "pal_bignum.h"
#include "pal_signature.h"
#include "pal_utilities.h"
ARGS_NON_NULL_ALL static jobject GetEcDsaSignatureObject(JNIEnv* env)
{
jstring algorithmName = make_java_string(env, "NONEwithECDSA");
jobject signatureObject =
(*env)->CallStaticObjectMethod(env, g_SignatureClass, g_SignatureGetInstance, algorithmName);
(*env)->DeleteLocalRef(env, algorithmName);
if (CheckJNIExceptions(env))
return NULL;
return signatureObject;
}
int32_t AndroidCryptoNative_EcDsaSign(const uint8_t* dgst, int32_t dgstlen, uint8_t* sig, int32_t* siglen, EC_KEY* key)
{
abort_if_invalid_pointer_argument (dgst);
abort_if_invalid_pointer_argument (sig);
abort_if_invalid_pointer_argument (key);
abort_if_invalid_pointer_argument (siglen);
JNIEnv* env = GetJNIEnv();
jobject signatureObject = GetEcDsaSignatureObject(env);
if (!signatureObject)
{
return FAIL;
}
jobject privateKey = (*env)->CallObjectMethod(env, key->keyPair, g_keyPairGetPrivateMethod);
if (!privateKey)
{
ReleaseLRef(env, signatureObject);
return FAIL;
}
int32_t returnValue = AndroidCryptoNative_SignWithSignatureObject(env, signatureObject, privateKey, dgst, dgstlen, sig, siglen);
ReleaseLRef(env, privateKey);
ReleaseLRef(env, signatureObject);
return returnValue;
}
int32_t AndroidCryptoNative_EcDsaVerify(const uint8_t* dgst, int32_t dgstlen, const uint8_t* sig, int32_t siglen, EC_KEY* key)
{
abort_if_invalid_pointer_argument (dgst);
abort_if_invalid_pointer_argument (sig);
abort_if_invalid_pointer_argument (key);
JNIEnv* env = GetJNIEnv();
jobject signatureObject = GetEcDsaSignatureObject(env);
if (!signatureObject)
{
return FAIL;
}
jobject publicKey = (*env)->CallObjectMethod(env, key->keyPair, g_keyPairGetPublicMethod);
int32_t returnValue = AndroidCryptoNative_VerifyWithSignatureObject(env, signatureObject, publicKey, dgst, dgstlen, sig, siglen);
ReleaseLRef(env, publicKey);
ReleaseLRef(env, signatureObject);
return returnValue;
}
int32_t AndroidCryptoNative_EcDsaSize(const EC_KEY* key)
{
abort_if_invalid_pointer_argument (key);
// The maximum size of a signature for the provided key is 2* bitlength of the order + extra bytes for the DER
// encoding. The DER encoding is as follows (with R and S being the components of the signature and all lengths
// being one byte width):
// - SEQUENCE <length of sequence> INTEGER <length of R> <R> INTEGER <length of S> <S>
// As a result, we see that there are 6 additional bytes in the DER encoding than the lengths of R and S combined.
// As the ECDSA algorithm is defined, the maximum length of R and S each is the bitlength of the order, so as a
// result we get the maximum size as 2 * bitlength of the order + 6.
// With some additional padding bytes for the bigintegers to keep them positive, we get a current max of 7.
const int derEncodingBytes = 7;
JNIEnv* env = GetJNIEnv();
jobject order = (*env)->CallObjectMethod(env, key->curveParameters, g_ECParameterSpecGetOrder);
int byteLength = AndroidCryptoNative_GetBigNumBytesIncludingPaddingByteForSign(order);
ReleaseLRef(env, order);
return 2 * byteLength + derEncodingBytes;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_ecdsa.h"
#include "pal_bignum.h"
#include "pal_signature.h"
#include "pal_utilities.h"
ARGS_NON_NULL_ALL static jobject GetEcDsaSignatureObject(JNIEnv* env)
{
jstring algorithmName = make_java_string(env, "NONEwithECDSA");
jobject signatureObject =
(*env)->CallStaticObjectMethod(env, g_SignatureClass, g_SignatureGetInstance, algorithmName);
(*env)->DeleteLocalRef(env, algorithmName);
if (CheckJNIExceptions(env))
return NULL;
return signatureObject;
}
int32_t AndroidCryptoNative_EcDsaSign(const uint8_t* dgst, int32_t dgstlen, uint8_t* sig, int32_t* siglen, EC_KEY* key)
{
abort_if_invalid_pointer_argument (dgst);
abort_if_invalid_pointer_argument (sig);
abort_if_invalid_pointer_argument (key);
abort_if_invalid_pointer_argument (siglen);
JNIEnv* env = GetJNIEnv();
jobject signatureObject = GetEcDsaSignatureObject(env);
if (!signatureObject)
{
return FAIL;
}
jobject privateKey = (*env)->CallObjectMethod(env, key->keyPair, g_keyPairGetPrivateMethod);
if (!privateKey)
{
ReleaseLRef(env, signatureObject);
return FAIL;
}
int32_t returnValue = AndroidCryptoNative_SignWithSignatureObject(env, signatureObject, privateKey, dgst, dgstlen, sig, siglen);
ReleaseLRef(env, privateKey);
ReleaseLRef(env, signatureObject);
return returnValue;
}
int32_t AndroidCryptoNative_EcDsaVerify(const uint8_t* dgst, int32_t dgstlen, const uint8_t* sig, int32_t siglen, EC_KEY* key)
{
abort_if_invalid_pointer_argument (dgst);
abort_if_invalid_pointer_argument (sig);
abort_if_invalid_pointer_argument (key);
JNIEnv* env = GetJNIEnv();
jobject signatureObject = GetEcDsaSignatureObject(env);
if (!signatureObject)
{
return FAIL;
}
jobject publicKey = (*env)->CallObjectMethod(env, key->keyPair, g_keyPairGetPublicMethod);
int32_t returnValue = AndroidCryptoNative_VerifyWithSignatureObject(env, signatureObject, publicKey, dgst, dgstlen, sig, siglen);
ReleaseLRef(env, publicKey);
ReleaseLRef(env, signatureObject);
return returnValue;
}
int32_t AndroidCryptoNative_EcDsaSize(const EC_KEY* key)
{
abort_if_invalid_pointer_argument (key);
// The maximum size of a signature for the provided key is 2* bitlength of the order + extra bytes for the DER
// encoding. The DER encoding is as follows (with R and S being the components of the signature and all lengths
// being one byte width):
// - SEQUENCE <length of sequence> INTEGER <length of R> <R> INTEGER <length of S> <S>
// As a result, we see that there are 6 additional bytes in the DER encoding than the lengths of R and S combined.
// As the ECDSA algorithm is defined, the maximum length of R and S each is the bitlength of the order, so as a
// result we get the maximum size as 2 * bitlength of the order + 6.
// With some additional padding bytes for the bigintegers to keep them positive, we get a current max of 7.
const int derEncodingBytes = 7;
JNIEnv* env = GetJNIEnv();
jobject order = (*env)->CallObjectMethod(env, key->curveParameters, g_ECParameterSpecGetOrder);
int byteLength = AndroidCryptoNative_GetBigNumBytesIncludingPaddingByteForSign(order);
ReleaseLRef(env, order);
return 2 * byteLength + derEncodingBytes;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/locale_info/WideCharToMultiByte/test2/test2.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test2.c
**
** Purpose: Tests that WideCharToMultiByte respects the length of the wide
** character string.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(locale_info_WideCharToMultiByte_test2_paltest_widechartomultibyte_test2, "locale_info/WideCharToMultiByte/test2/paltest_widechartomultibyte_test2")
{
char mbStr[128];
WCHAR wideStr[128];
int ret;
int i;
int k;
BOOL bRet=TRUE;
/* These codepages are currently supported by the PAL */
int codePages[] ={
CP_ACP,
CP_UTF8
};
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
/* Go through all of the code pages */
for(i=0; i<(sizeof(codePages)/sizeof(int)); i++)
{
/* Filling the arrays */
for (k=0; k<128; k++)
{
wideStr[k] = 'a';
mbStr[i] = 0;
}
wideStr[127] = 0;
/* Passing a buffer that is too small */
ret = WideCharToMultiByte(codePages[i], 0, wideStr, 10,
mbStr, 0, NULL, NULL);
if (ret != 10)
{
Trace("WideCharToMultiByte did not return correct string length!\n"
"Got %d, expected %d for %d with error %u.\n", ret, 10,
codePages[i], GetLastError());
bRet = FALSE;
}
/* Passing a sufficiently large buffer */
mbStr[10] = 'b';
ret = WideCharToMultiByte(codePages[i], 0, wideStr, 10,
mbStr, 128, NULL, NULL);
if (ret != 10)
{
Trace("WideCharToMultiByte did not return correct string length!\n"
"Got %d, expected %d for code page %d with error %u.\n",
ret, 10, codePages[i], GetLastError());
bRet = FALSE;
}
/* Verifying overflow of the destination string did not occur */
if (mbStr[10] != 'b')
{
Trace("WideCharToMultiByte overflowed the destination buffer for "
"code page %d.\n", codePages[i]);
bRet = FALSE;
}
}
int result = bRet ? PASS : FAIL;
PAL_TerminateEx(result);
return result;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test2.c
**
** Purpose: Tests that WideCharToMultiByte respects the length of the wide
** character string.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(locale_info_WideCharToMultiByte_test2_paltest_widechartomultibyte_test2, "locale_info/WideCharToMultiByte/test2/paltest_widechartomultibyte_test2")
{
char mbStr[128];
WCHAR wideStr[128];
int ret;
int i;
int k;
BOOL bRet=TRUE;
/* These codepages are currently supported by the PAL */
int codePages[] ={
CP_ACP,
CP_UTF8
};
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
/* Go through all of the code pages */
for(i=0; i<(sizeof(codePages)/sizeof(int)); i++)
{
/* Filling the arrays */
for (k=0; k<128; k++)
{
wideStr[k] = 'a';
mbStr[i] = 0;
}
wideStr[127] = 0;
/* Passing a buffer that is too small */
ret = WideCharToMultiByte(codePages[i], 0, wideStr, 10,
mbStr, 0, NULL, NULL);
if (ret != 10)
{
Trace("WideCharToMultiByte did not return correct string length!\n"
"Got %d, expected %d for %d with error %u.\n", ret, 10,
codePages[i], GetLastError());
bRet = FALSE;
}
/* Passing a sufficiently large buffer */
mbStr[10] = 'b';
ret = WideCharToMultiByte(codePages[i], 0, wideStr, 10,
mbStr, 128, NULL, NULL);
if (ret != 10)
{
Trace("WideCharToMultiByte did not return correct string length!\n"
"Got %d, expected %d for code page %d with error %u.\n",
ret, 10, codePages[i], GetLastError());
bRet = FALSE;
}
/* Verifying overflow of the destination string did not occur */
if (mbStr[10] != 'b')
{
Trace("WideCharToMultiByte overflowed the destination buffer for "
"code page %d.\n", codePages[i]);
bRet = FALSE;
}
}
int result = bRet ? PASS : FAIL;
PAL_TerminateEx(result);
return result;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/src/libunwind/src/loongarch64/Gglobal.c | /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2021 Loongson Technology Corporation Limited
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind_i.h"
#include "dwarf_i.h"
HIDDEN define_lock (loongarch64_lock);
HIDDEN atomic_bool tdep_init_done;
HIDDEN void
tdep_init (void)
{
intrmask_t saved_mask;
sigfillset (&unwi_full_mask);
lock_acquire (&loongarch64_lock, saved_mask);
{
if (tdep_init_done)
/* another thread else beat us to it... */
goto out;
mi_init ();
dwarf_init ();
tdep_init_mem_validate ();
#ifndef UNW_REMOTE_ONLY
loongarch64_local_addr_space_init ();
#endif
tdep_init_done = 1; /* signal that we're initialized... */
}
out:
lock_release (&loongarch64_lock, saved_mask);
}
| /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2021 Loongson Technology Corporation Limited
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind_i.h"
#include "dwarf_i.h"
HIDDEN define_lock (loongarch64_lock);
HIDDEN atomic_bool tdep_init_done;
HIDDEN void
tdep_init (void)
{
intrmask_t saved_mask;
sigfillset (&unwi_full_mask);
lock_acquire (&loongarch64_lock, saved_mask);
{
if (tdep_init_done)
/* another thread else beat us to it... */
goto out;
mi_init ();
dwarf_init ();
tdep_init_mem_validate ();
#ifndef UNW_REMOTE_ONLY
loongarch64_local_addr_space_init ();
#endif
tdep_init_done = 1; /* signal that we're initialized... */
}
out:
lock_release (&loongarch64_lock, saved_mask);
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/tests/Interop/StringMarshalling/LPTSTR/LPTStrTestNative.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "../Native/StringMarshalingNative.h"
using StringType = LPWSTR;
using Tests = StringMarshalingTests<StringType, TP_slen>;
#define FUNCTION_NAME __FUNCTIONW__
#include "../Native/StringTestEntrypoints.inl"
// Verify that we append extra null terminators to our StringBuilder native buffers.
// Although this is a hidden implementation detail, it would be breaking behavior to stop doing this
// so we have a test for it. In particular, this detail prevents us from optimizing marshalling StringBuilders by pinning.
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE Verify_NullTerminators_PastEnd(LPCWSTR buffer, int length)
{
return buffer[length+1] == W('\0');
}
struct ByValStringInStructAnsi
{
char str[20];
};
struct ByValStringInStructUnicode
{
WCHAR str[20];
};
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE MatchFuncNameAnsi(ByValStringInStructAnsi str)
{
return StringMarshalingTests<char*, default_callconv_strlen>::Compare(__func__, str.str);
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE MatchFuncNameUni(ByValStringInStructUnicode str)
{
return StringMarshalingTests<LPWSTR, TP_slen>::Compare(__FUNCTIONW__, str.str);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseByValStringAnsi(ByValStringInStructAnsi* str)
{
StringMarshalingTests<char*, default_callconv_strlen>::ReverseInplace(str->str);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseByValStringUni(ByValStringInStructUnicode* str)
{
StringMarshalingTests<LPWSTR, TP_slen>::ReverseInplace(str->str);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseCopyByValStringAnsi(ByValStringInStructAnsi str, ByValStringInStructAnsi* out)
{
*out = str;
StringMarshalingTests<char*, default_callconv_strlen>::ReverseInplace(out->str);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseCopyByValStringUni(ByValStringInStructUnicode str, ByValStringInStructUnicode* out)
{
*out = str;
StringMarshalingTests<LPWSTR, TP_slen>::ReverseInplace(out->str);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "../Native/StringMarshalingNative.h"
using StringType = LPWSTR;
using Tests = StringMarshalingTests<StringType, TP_slen>;
#define FUNCTION_NAME __FUNCTIONW__
#include "../Native/StringTestEntrypoints.inl"
// Verify that we append extra null terminators to our StringBuilder native buffers.
// Although this is a hidden implementation detail, it would be breaking behavior to stop doing this
// so we have a test for it. In particular, this detail prevents us from optimizing marshalling StringBuilders by pinning.
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE Verify_NullTerminators_PastEnd(LPCWSTR buffer, int length)
{
return buffer[length+1] == W('\0');
}
struct ByValStringInStructAnsi
{
char str[20];
};
struct ByValStringInStructUnicode
{
WCHAR str[20];
};
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE MatchFuncNameAnsi(ByValStringInStructAnsi str)
{
return StringMarshalingTests<char*, default_callconv_strlen>::Compare(__func__, str.str);
}
extern "C" DLL_EXPORT BOOL STDMETHODCALLTYPE MatchFuncNameUni(ByValStringInStructUnicode str)
{
return StringMarshalingTests<LPWSTR, TP_slen>::Compare(__FUNCTIONW__, str.str);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseByValStringAnsi(ByValStringInStructAnsi* str)
{
StringMarshalingTests<char*, default_callconv_strlen>::ReverseInplace(str->str);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseByValStringUni(ByValStringInStructUnicode* str)
{
StringMarshalingTests<LPWSTR, TP_slen>::ReverseInplace(str->str);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseCopyByValStringAnsi(ByValStringInStructAnsi str, ByValStringInStructAnsi* out)
{
*out = str;
StringMarshalingTests<char*, default_callconv_strlen>::ReverseInplace(out->str);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ReverseCopyByValStringUni(ByValStringInStructUnicode str, ByValStringInStructUnicode* out)
{
*out = str;
StringMarshalingTests<LPWSTR, TP_slen>::ReverseInplace(out->str);
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/vm/gctoclreventsink.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "gctoclreventsink.h"
#include "eventtrace.h"
GCToCLREventSink g_gcToClrEventSink;
void GCToCLREventSink::FireDynamicEvent(const char* eventName, void* payload, uint32_t payloadSize)
{
LIMITED_METHOD_CONTRACT;
const size_t EventNameMaxSize = 255;
WCHAR wideEventName[EventNameMaxSize];
if (MultiByteToWideChar(CP_ACP, 0, eventName, -1, wideEventName, EventNameMaxSize) == 0)
{
return;
}
FireEtwGCDynamicEvent(wideEventName, payloadSize, (const BYTE*)payload, GetClrInstanceId());
}
void GCToCLREventSink::FireGCStart_V2(uint32_t count, uint32_t depth, uint32_t reason, uint32_t type)
{
#ifdef FEATURE_EVENT_TRACE
LIMITED_METHOD_CONTRACT;
ETW::GCLog::ETW_GC_INFO gcStartInfo;
gcStartInfo.GCStart.Count = count;
gcStartInfo.GCStart.Depth = depth;
gcStartInfo.GCStart.Reason = static_cast<ETW::GCLog::ETW_GC_INFO::GC_REASON>(reason);
gcStartInfo.GCStart.Type = static_cast<ETW::GCLog::ETW_GC_INFO::GC_TYPE>(type);
ETW::GCLog::FireGcStart(&gcStartInfo);
#endif
}
void GCToCLREventSink::FireGCGenerationRange(uint8_t generation, void* rangeStart, uint64_t rangeUsedLength, uint64_t rangeReservedLength)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCGenerationRange(generation, rangeStart, rangeUsedLength, rangeReservedLength, GetClrInstanceId());
}
void GCToCLREventSink::FireGCEnd_V1(uint32_t count, uint32_t depth)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCEnd_V1(count, depth, GetClrInstanceId());
}
void GCToCLREventSink::FireGCHeapStats_V2(
uint64_t generationSize0,
uint64_t totalPromotedSize0,
uint64_t generationSize1,
uint64_t totalPromotedSize1,
uint64_t generationSize2,
uint64_t totalPromotedSize2,
uint64_t generationSize3,
uint64_t totalPromotedSize3,
uint64_t generationSize4,
uint64_t totalPromotedSize4,
uint64_t finalizationPromotedSize,
uint64_t finalizationPromotedCount,
uint32_t pinnedObjectCount,
uint32_t sinkBlockCount,
uint32_t gcHandleCount)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCHeapStats_V2(generationSize0, totalPromotedSize0, generationSize1, totalPromotedSize1,
generationSize2, totalPromotedSize2, generationSize3, totalPromotedSize3,
finalizationPromotedSize, finalizationPromotedCount, pinnedObjectCount,
sinkBlockCount, gcHandleCount, GetClrInstanceId(),
generationSize4, totalPromotedSize4);
}
void GCToCLREventSink::FireGCCreateSegment_V1(void* address, size_t size, uint32_t type)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCCreateSegment_V1((uint64_t)address, static_cast<uint64_t>(size), type, GetClrInstanceId());
}
void GCToCLREventSink::FireGCFreeSegment_V1(void* address)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCFreeSegment_V1((uint64_t)address, GetClrInstanceId());
}
void GCToCLREventSink::FireGCCreateConcurrentThread_V1()
{
LIMITED_METHOD_CONTRACT;
FireEtwGCCreateConcurrentThread_V1(GetClrInstanceId());
}
void GCToCLREventSink::FireGCTerminateConcurrentThread_V1()
{
LIMITED_METHOD_CONTRACT;
FireEtwGCTerminateConcurrentThread_V1(GetClrInstanceId());
}
void GCToCLREventSink::FireGCTriggered(uint32_t reason)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCTriggered(reason, GetClrInstanceId());
}
void GCToCLREventSink::FireGCMarkWithType(uint32_t heapNum, uint32_t type, uint64_t bytes)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCMarkWithType(heapNum, GetClrInstanceId(), type, bytes);
}
void GCToCLREventSink::FireGCJoin_V2(uint32_t heap, uint32_t joinTime, uint32_t joinType, uint32_t joinId)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCJoin_V2(heap, joinTime, joinType, GetClrInstanceId(), joinId);
}
void GCToCLREventSink::FireGCGlobalHeapHistory_V4(uint64_t finalYoungestDesired,
int32_t numHeaps,
uint32_t condemnedGeneration,
uint32_t gen0reductionCount,
uint32_t reason,
uint32_t globalMechanisms,
uint32_t pauseMode,
uint32_t memoryPressure,
uint32_t condemnReasons0,
uint32_t condemnReasons1,
uint32_t count,
uint32_t valuesLen,
void *values)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCGlobalHeapHistory_V4(finalYoungestDesired, numHeaps, condemnedGeneration, gen0reductionCount, reason,
globalMechanisms, GetClrInstanceId(), pauseMode, memoryPressure, condemnReasons0, condemnReasons1,
count, valuesLen, values);
}
void GCToCLREventSink::FireGCAllocationTick_V1(uint32_t allocationAmount, uint32_t allocationKind)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCAllocationTick_V1(allocationAmount, allocationKind, GetClrInstanceId());
}
void GCToCLREventSink::FireGCAllocationTick_V4(uint64_t allocationAmount,
uint32_t allocationKind,
uint32_t heapIndex,
void* objectAddress,
uint64_t objectSize)
{
LIMITED_METHOD_CONTRACT;
void * typeId = nullptr;
const WCHAR * name = nullptr;
InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
EX_TRY
{
TypeHandle th = GetThread()->GetTHAllocContextObj();
if (th != 0)
{
th.GetName(strTypeName);
name = strTypeName.GetUnicode();
typeId = th.GetMethodTable();
}
}
EX_CATCH {}
EX_END_CATCH(SwallowAllExceptions)
if (typeId != nullptr)
{
FireEtwGCAllocationTick_V4((uint32_t)allocationAmount,
allocationKind,
GetClrInstanceId(),
allocationAmount,
typeId,
name,
heapIndex,
objectAddress,
objectSize);
}
}
void GCToCLREventSink::FirePinObjectAtGCTime(void* object, uint8_t** ppObject)
{
LIMITED_METHOD_CONTRACT;
Object* obj = (Object*)object;
InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
EX_TRY
{
FAULT_NOT_FATAL();
TypeHandle th = obj->GetGCSafeTypeHandleIfPossible();
if(th != NULL)
{
th.GetName(strTypeName);
}
FireEtwPinObjectAtGCTime(ppObject,
object,
obj->GetSize(),
strTypeName.GetUnicode(),
GetClrInstanceId());
}
EX_CATCH {}
EX_END_CATCH(SwallowAllExceptions)
}
void GCToCLREventSink::FirePinPlugAtGCTime(uint8_t* plugStart, uint8_t* plugEnd, uint8_t* gapBeforeSize)
{
LIMITED_METHOD_CONTRACT;
FireEtwPinPlugAtGCTime(plugStart, plugEnd, gapBeforeSize, GetClrInstanceId());
}
void GCToCLREventSink::FireGCPerHeapHistory_V3(void *freeListAllocated,
void *freeListRejected,
void *endOfSegAllocated,
void *condemnedAllocated,
void *pinnedAllocated,
void *pinnedAllocatedAdvance,
uint32_t runningFreeListEfficiency,
uint32_t condemnReasons0,
uint32_t condemnReasons1,
uint32_t compactMechanisms,
uint32_t expandMechanisms,
uint32_t heapIndex,
void *extraGen0Commit,
uint32_t count,
uint32_t valuesLen,
void *values)
{
FireEtwGCPerHeapHistory_V3(GetClrInstanceId(),
freeListAllocated,
freeListRejected,
endOfSegAllocated,
condemnedAllocated,
pinnedAllocated,
pinnedAllocatedAdvance,
runningFreeListEfficiency,
condemnReasons0,
condemnReasons1,
compactMechanisms,
expandMechanisms,
heapIndex,
extraGen0Commit,
count,
valuesLen,
values);
}
void GCToCLREventSink::FireGCLOHCompact(uint16_t count, uint32_t valuesLen, void *values)
{
FireEtwGCLOHCompact(GetClrInstanceId(), count, valuesLen, values);
}
void GCToCLREventSink::FireGCFitBucketInfo(uint16_t bucketKind,
size_t size,
uint16_t count,
uint32_t valuesLen,
void *values)
{
FireEtwGCFitBucketInfo(GetClrInstanceId(), bucketKind, size, count, valuesLen, values);
}
void GCToCLREventSink::FireBGCBegin()
{
FireEtwBGCBegin(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC1stNonConEnd()
{
FireEtwBGC1stNonConEnd(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC1stConEnd()
{
FireEtwBGC1stConEnd(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC1stSweepEnd(uint32_t genNumber)
{
FireEtwBGC1stSweepEnd(genNumber, GetClrInstanceId());
}
void GCToCLREventSink::FireBGC2ndNonConBegin()
{
FireEtwBGC2ndNonConBegin(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC2ndNonConEnd()
{
FireEtwBGC2ndNonConEnd(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC2ndConBegin()
{
FireEtwBGC2ndConBegin(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC2ndConEnd()
{
FireEtwBGC2ndConEnd(GetClrInstanceId());
}
void GCToCLREventSink::FireBGCDrainMark(uint64_t objects)
{
FireEtwBGCDrainMark(objects, GetClrInstanceId());
}
void GCToCLREventSink::FireBGCRevisit(uint64_t pages, uint64_t objects, uint32_t isLarge)
{
FireEtwBGCRevisit(pages, objects, isLarge, GetClrInstanceId());
}
void GCToCLREventSink::FireBGCOverflow_V1(uint64_t min, uint64_t max, uint64_t objects, uint32_t isLarge, uint32_t genNumber)
{
FireEtwBGCOverflow_V1(min, max, objects, isLarge, GetClrInstanceId(), genNumber);
}
void GCToCLREventSink::FireBGCAllocWaitBegin(uint32_t reason)
{
FireEtwBGCAllocWaitBegin(reason, GetClrInstanceId());
}
void GCToCLREventSink::FireBGCAllocWaitEnd(uint32_t reason)
{
FireEtwBGCAllocWaitEnd(reason, GetClrInstanceId());
}
void GCToCLREventSink::FireGCFullNotify_V1(uint32_t genNumber, uint32_t isAlloc)
{
FireEtwGCFullNotify_V1(genNumber, isAlloc, GetClrInstanceId());
}
void GCToCLREventSink::FireSetGCHandle(void *handleID, void *objectID, uint32_t kind, uint32_t generation)
{
FireEtwSetGCHandle(handleID, objectID, kind, generation, (uint64_t)dac_cast<TADDR>(AppDomain::GetCurrentDomain()), GetClrInstanceId());
}
void GCToCLREventSink::FirePrvSetGCHandle(void *handleID, void *objectID, uint32_t kind, uint32_t generation)
{
FireEtwPrvSetGCHandle(handleID, objectID, kind, generation, (uint64_t)dac_cast<TADDR>(AppDomain::GetCurrentDomain()), GetClrInstanceId());
}
void GCToCLREventSink::FireDestroyGCHandle(void *handleID)
{
FireEtwDestroyGCHandle(handleID, GetClrInstanceId());
}
void GCToCLREventSink::FirePrvDestroyGCHandle(void *handleID)
{
FireEtwPrvDestroyGCHandle(handleID, GetClrInstanceId());
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "gctoclreventsink.h"
#include "eventtrace.h"
GCToCLREventSink g_gcToClrEventSink;
void GCToCLREventSink::FireDynamicEvent(const char* eventName, void* payload, uint32_t payloadSize)
{
LIMITED_METHOD_CONTRACT;
const size_t EventNameMaxSize = 255;
WCHAR wideEventName[EventNameMaxSize];
if (MultiByteToWideChar(CP_ACP, 0, eventName, -1, wideEventName, EventNameMaxSize) == 0)
{
return;
}
FireEtwGCDynamicEvent(wideEventName, payloadSize, (const BYTE*)payload, GetClrInstanceId());
}
void GCToCLREventSink::FireGCStart_V2(uint32_t count, uint32_t depth, uint32_t reason, uint32_t type)
{
#ifdef FEATURE_EVENT_TRACE
LIMITED_METHOD_CONTRACT;
ETW::GCLog::ETW_GC_INFO gcStartInfo;
gcStartInfo.GCStart.Count = count;
gcStartInfo.GCStart.Depth = depth;
gcStartInfo.GCStart.Reason = static_cast<ETW::GCLog::ETW_GC_INFO::GC_REASON>(reason);
gcStartInfo.GCStart.Type = static_cast<ETW::GCLog::ETW_GC_INFO::GC_TYPE>(type);
ETW::GCLog::FireGcStart(&gcStartInfo);
#endif
}
void GCToCLREventSink::FireGCGenerationRange(uint8_t generation, void* rangeStart, uint64_t rangeUsedLength, uint64_t rangeReservedLength)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCGenerationRange(generation, rangeStart, rangeUsedLength, rangeReservedLength, GetClrInstanceId());
}
void GCToCLREventSink::FireGCEnd_V1(uint32_t count, uint32_t depth)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCEnd_V1(count, depth, GetClrInstanceId());
}
void GCToCLREventSink::FireGCHeapStats_V2(
uint64_t generationSize0,
uint64_t totalPromotedSize0,
uint64_t generationSize1,
uint64_t totalPromotedSize1,
uint64_t generationSize2,
uint64_t totalPromotedSize2,
uint64_t generationSize3,
uint64_t totalPromotedSize3,
uint64_t generationSize4,
uint64_t totalPromotedSize4,
uint64_t finalizationPromotedSize,
uint64_t finalizationPromotedCount,
uint32_t pinnedObjectCount,
uint32_t sinkBlockCount,
uint32_t gcHandleCount)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCHeapStats_V2(generationSize0, totalPromotedSize0, generationSize1, totalPromotedSize1,
generationSize2, totalPromotedSize2, generationSize3, totalPromotedSize3,
finalizationPromotedSize, finalizationPromotedCount, pinnedObjectCount,
sinkBlockCount, gcHandleCount, GetClrInstanceId(),
generationSize4, totalPromotedSize4);
}
void GCToCLREventSink::FireGCCreateSegment_V1(void* address, size_t size, uint32_t type)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCCreateSegment_V1((uint64_t)address, static_cast<uint64_t>(size), type, GetClrInstanceId());
}
void GCToCLREventSink::FireGCFreeSegment_V1(void* address)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCFreeSegment_V1((uint64_t)address, GetClrInstanceId());
}
void GCToCLREventSink::FireGCCreateConcurrentThread_V1()
{
LIMITED_METHOD_CONTRACT;
FireEtwGCCreateConcurrentThread_V1(GetClrInstanceId());
}
void GCToCLREventSink::FireGCTerminateConcurrentThread_V1()
{
LIMITED_METHOD_CONTRACT;
FireEtwGCTerminateConcurrentThread_V1(GetClrInstanceId());
}
void GCToCLREventSink::FireGCTriggered(uint32_t reason)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCTriggered(reason, GetClrInstanceId());
}
void GCToCLREventSink::FireGCMarkWithType(uint32_t heapNum, uint32_t type, uint64_t bytes)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCMarkWithType(heapNum, GetClrInstanceId(), type, bytes);
}
void GCToCLREventSink::FireGCJoin_V2(uint32_t heap, uint32_t joinTime, uint32_t joinType, uint32_t joinId)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCJoin_V2(heap, joinTime, joinType, GetClrInstanceId(), joinId);
}
void GCToCLREventSink::FireGCGlobalHeapHistory_V4(uint64_t finalYoungestDesired,
int32_t numHeaps,
uint32_t condemnedGeneration,
uint32_t gen0reductionCount,
uint32_t reason,
uint32_t globalMechanisms,
uint32_t pauseMode,
uint32_t memoryPressure,
uint32_t condemnReasons0,
uint32_t condemnReasons1,
uint32_t count,
uint32_t valuesLen,
void *values)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCGlobalHeapHistory_V4(finalYoungestDesired, numHeaps, condemnedGeneration, gen0reductionCount, reason,
globalMechanisms, GetClrInstanceId(), pauseMode, memoryPressure, condemnReasons0, condemnReasons1,
count, valuesLen, values);
}
void GCToCLREventSink::FireGCAllocationTick_V1(uint32_t allocationAmount, uint32_t allocationKind)
{
LIMITED_METHOD_CONTRACT;
FireEtwGCAllocationTick_V1(allocationAmount, allocationKind, GetClrInstanceId());
}
void GCToCLREventSink::FireGCAllocationTick_V4(uint64_t allocationAmount,
uint32_t allocationKind,
uint32_t heapIndex,
void* objectAddress,
uint64_t objectSize)
{
LIMITED_METHOD_CONTRACT;
void * typeId = nullptr;
const WCHAR * name = nullptr;
InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
EX_TRY
{
TypeHandle th = GetThread()->GetTHAllocContextObj();
if (th != 0)
{
th.GetName(strTypeName);
name = strTypeName.GetUnicode();
typeId = th.GetMethodTable();
}
}
EX_CATCH {}
EX_END_CATCH(SwallowAllExceptions)
if (typeId != nullptr)
{
FireEtwGCAllocationTick_V4((uint32_t)allocationAmount,
allocationKind,
GetClrInstanceId(),
allocationAmount,
typeId,
name,
heapIndex,
objectAddress,
objectSize);
}
}
void GCToCLREventSink::FirePinObjectAtGCTime(void* object, uint8_t** ppObject)
{
LIMITED_METHOD_CONTRACT;
Object* obj = (Object*)object;
InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
EX_TRY
{
FAULT_NOT_FATAL();
TypeHandle th = obj->GetGCSafeTypeHandleIfPossible();
if(th != NULL)
{
th.GetName(strTypeName);
}
FireEtwPinObjectAtGCTime(ppObject,
object,
obj->GetSize(),
strTypeName.GetUnicode(),
GetClrInstanceId());
}
EX_CATCH {}
EX_END_CATCH(SwallowAllExceptions)
}
void GCToCLREventSink::FirePinPlugAtGCTime(uint8_t* plugStart, uint8_t* plugEnd, uint8_t* gapBeforeSize)
{
LIMITED_METHOD_CONTRACT;
FireEtwPinPlugAtGCTime(plugStart, plugEnd, gapBeforeSize, GetClrInstanceId());
}
void GCToCLREventSink::FireGCPerHeapHistory_V3(void *freeListAllocated,
void *freeListRejected,
void *endOfSegAllocated,
void *condemnedAllocated,
void *pinnedAllocated,
void *pinnedAllocatedAdvance,
uint32_t runningFreeListEfficiency,
uint32_t condemnReasons0,
uint32_t condemnReasons1,
uint32_t compactMechanisms,
uint32_t expandMechanisms,
uint32_t heapIndex,
void *extraGen0Commit,
uint32_t count,
uint32_t valuesLen,
void *values)
{
FireEtwGCPerHeapHistory_V3(GetClrInstanceId(),
freeListAllocated,
freeListRejected,
endOfSegAllocated,
condemnedAllocated,
pinnedAllocated,
pinnedAllocatedAdvance,
runningFreeListEfficiency,
condemnReasons0,
condemnReasons1,
compactMechanisms,
expandMechanisms,
heapIndex,
extraGen0Commit,
count,
valuesLen,
values);
}
void GCToCLREventSink::FireGCLOHCompact(uint16_t count, uint32_t valuesLen, void *values)
{
FireEtwGCLOHCompact(GetClrInstanceId(), count, valuesLen, values);
}
void GCToCLREventSink::FireGCFitBucketInfo(uint16_t bucketKind,
size_t size,
uint16_t count,
uint32_t valuesLen,
void *values)
{
FireEtwGCFitBucketInfo(GetClrInstanceId(), bucketKind, size, count, valuesLen, values);
}
void GCToCLREventSink::FireBGCBegin()
{
FireEtwBGCBegin(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC1stNonConEnd()
{
FireEtwBGC1stNonConEnd(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC1stConEnd()
{
FireEtwBGC1stConEnd(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC1stSweepEnd(uint32_t genNumber)
{
FireEtwBGC1stSweepEnd(genNumber, GetClrInstanceId());
}
void GCToCLREventSink::FireBGC2ndNonConBegin()
{
FireEtwBGC2ndNonConBegin(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC2ndNonConEnd()
{
FireEtwBGC2ndNonConEnd(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC2ndConBegin()
{
FireEtwBGC2ndConBegin(GetClrInstanceId());
}
void GCToCLREventSink::FireBGC2ndConEnd()
{
FireEtwBGC2ndConEnd(GetClrInstanceId());
}
void GCToCLREventSink::FireBGCDrainMark(uint64_t objects)
{
FireEtwBGCDrainMark(objects, GetClrInstanceId());
}
void GCToCLREventSink::FireBGCRevisit(uint64_t pages, uint64_t objects, uint32_t isLarge)
{
FireEtwBGCRevisit(pages, objects, isLarge, GetClrInstanceId());
}
void GCToCLREventSink::FireBGCOverflow_V1(uint64_t min, uint64_t max, uint64_t objects, uint32_t isLarge, uint32_t genNumber)
{
FireEtwBGCOverflow_V1(min, max, objects, isLarge, GetClrInstanceId(), genNumber);
}
void GCToCLREventSink::FireBGCAllocWaitBegin(uint32_t reason)
{
FireEtwBGCAllocWaitBegin(reason, GetClrInstanceId());
}
void GCToCLREventSink::FireBGCAllocWaitEnd(uint32_t reason)
{
FireEtwBGCAllocWaitEnd(reason, GetClrInstanceId());
}
void GCToCLREventSink::FireGCFullNotify_V1(uint32_t genNumber, uint32_t isAlloc)
{
FireEtwGCFullNotify_V1(genNumber, isAlloc, GetClrInstanceId());
}
void GCToCLREventSink::FireSetGCHandle(void *handleID, void *objectID, uint32_t kind, uint32_t generation)
{
FireEtwSetGCHandle(handleID, objectID, kind, generation, (uint64_t)dac_cast<TADDR>(AppDomain::GetCurrentDomain()), GetClrInstanceId());
}
void GCToCLREventSink::FirePrvSetGCHandle(void *handleID, void *objectID, uint32_t kind, uint32_t generation)
{
FireEtwPrvSetGCHandle(handleID, objectID, kind, generation, (uint64_t)dac_cast<TADDR>(AppDomain::GetCurrentDomain()), GetClrInstanceId());
}
void GCToCLREventSink::FireDestroyGCHandle(void *handleID)
{
FireEtwDestroyGCHandle(handleID, GetClrInstanceId());
}
void GCToCLREventSink::FirePrvDestroyGCHandle(void *handleID)
{
FireEtwPrvDestroyGCHandle(handleID, GetClrInstanceId());
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/classlibnative/bcltype/varargsnative.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: VarArgsNative.h
//
//
// This module contains the implementation of the native methods for the
// varargs class(es)..
//
#ifndef _VARARGSNATIVE_H_
#define _VARARGSNATIVE_H_
#include "clrvarargs.h"
class VarArgsNative
{
public:
static FCDECL3(void, Init2, VARARGS* _this, LPVOID cookie, LPVOID firstArg);
static FCDECL2(void, Init, VARARGS* _this, LPVOID cookie);
static FCDECL1(int, GetRemainingCount, VARARGS* _this);
static FCDECL1(void*, GetNextArgType, VARARGS* _this);
//TypedByRef can not be passed by ref, so has to pass it as void pointer
static FCDECL2(void, DoGetNextArg, VARARGS* _this, void * value);
//TypedByRef can not be passed by ref, so has to pass it as void pointer
static FCDECL3(void, GetNextArg2, VARARGS* _this, void * value, ReflectClassBaseObject *pTypeUNSAFE);
static void GetNextArgHelper(VARARGS *data, TypedByRef *value, BOOL fData);
};
#endif // _VARARGSNATIVE_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: VarArgsNative.h
//
//
// This module contains the implementation of the native methods for the
// varargs class(es)..
//
#ifndef _VARARGSNATIVE_H_
#define _VARARGSNATIVE_H_
#include "clrvarargs.h"
class VarArgsNative
{
public:
static FCDECL3(void, Init2, VARARGS* _this, LPVOID cookie, LPVOID firstArg);
static FCDECL2(void, Init, VARARGS* _this, LPVOID cookie);
static FCDECL1(int, GetRemainingCount, VARARGS* _this);
static FCDECL1(void*, GetNextArgType, VARARGS* _this);
//TypedByRef can not be passed by ref, so has to pass it as void pointer
static FCDECL2(void, DoGetNextArg, VARARGS* _this, void * value);
//TypedByRef can not be passed by ref, so has to pass it as void pointer
static FCDECL3(void, GetNextArg2, VARARGS* _this, void * value, ReflectClassBaseObject *pTypeUNSAFE);
static void GetNextArgHelper(VARARGS *data, TypedByRef *value, BOOL fData);
};
#endif // _VARARGSNATIVE_H_
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/debug/shim/debugshim.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// debugshim.h
//
//
//*****************************************************************************
#ifndef _DEBUG_SHIM_
#define _DEBUG_SHIM_
#include "cor.h"
#include "cordebug.h"
#include <wchar.h>
#include <metahost.h>
#define CORECLR_DAC_MODULE_NAME_W W("mscordaccore")
#define CLR_DAC_MODULE_NAME_W W("mscordacwks")
#define MAIN_DBI_MODULE_NAME_W W("mscordbi")
// forward declaration
struct ICorDebugDataTarget;
// ICLRDebugging implementation.
class CLRDebuggingImpl : public ICLRDebugging
{
public:
CLRDebuggingImpl(GUID skuId) : m_cRef(0), m_skuId(skuId)
{
}
virtual ~CLRDebuggingImpl() {}
public:
// ICLRDebugging methods:
STDMETHOD(OpenVirtualProcess(
ULONG64 moduleBaseAddress,
IUnknown * pDataTarget,
ICLRDebuggingLibraryProvider * pLibraryProvider,
CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion,
REFIID riidProcess,
IUnknown ** ppProcess,
CLR_DEBUGGING_VERSION * pVersion,
CLR_DEBUGGING_PROCESS_FLAGS * pFlags));
STDMETHOD(CanUnloadNow(HMODULE hModule));
//IUnknown methods:
STDMETHOD(QueryInterface(
REFIID riid,
void **ppvObject));
// Standard AddRef implementation
STDMETHOD_(ULONG, AddRef());
// Standard Release implementation.
STDMETHOD_(ULONG, Release());
private:
VOID RetargetDacIfNeeded(DWORD* pdwTimeStamp,
DWORD* pdwSizeOfImage);
HRESULT GetCLRInfo(ICorDebugDataTarget * pDataTarget,
ULONG64 moduleBaseAddress,
CLR_DEBUGGING_VERSION * pVersion,
DWORD * pdwDbiTimeStamp,
DWORD * pdwDbiSizeOfImage,
_Inout_updates_z_(dwDbiNameCharCount) WCHAR * pDbiName,
DWORD dwDbiNameCharCount,
DWORD * pdwDacTimeStamp,
DWORD * pdwDacSizeOfImage,
_Inout_updates_z_(dwDacNameCharCount) WCHAR * pDacName,
DWORD dwDacNameCharCount);
HRESULT FormatLongDacModuleName(_Inout_updates_z_(cchBuffer) WCHAR * pBuffer,
DWORD cchBuffer,
DWORD targetImageFileMachine,
VS_FIXEDFILEINFO * pVersion);
volatile LONG m_cRef;
GUID m_skuId;
}; // class CLRDebuggingImpl
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// debugshim.h
//
//
//*****************************************************************************
#ifndef _DEBUG_SHIM_
#define _DEBUG_SHIM_
#include "cor.h"
#include "cordebug.h"
#include <wchar.h>
#include <metahost.h>
#define CORECLR_DAC_MODULE_NAME_W W("mscordaccore")
#define CLR_DAC_MODULE_NAME_W W("mscordacwks")
#define MAIN_DBI_MODULE_NAME_W W("mscordbi")
// forward declaration
struct ICorDebugDataTarget;
// ICLRDebugging implementation.
class CLRDebuggingImpl : public ICLRDebugging
{
public:
CLRDebuggingImpl(GUID skuId) : m_cRef(0), m_skuId(skuId)
{
}
virtual ~CLRDebuggingImpl() {}
public:
// ICLRDebugging methods:
STDMETHOD(OpenVirtualProcess(
ULONG64 moduleBaseAddress,
IUnknown * pDataTarget,
ICLRDebuggingLibraryProvider * pLibraryProvider,
CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion,
REFIID riidProcess,
IUnknown ** ppProcess,
CLR_DEBUGGING_VERSION * pVersion,
CLR_DEBUGGING_PROCESS_FLAGS * pFlags));
STDMETHOD(CanUnloadNow(HMODULE hModule));
//IUnknown methods:
STDMETHOD(QueryInterface(
REFIID riid,
void **ppvObject));
// Standard AddRef implementation
STDMETHOD_(ULONG, AddRef());
// Standard Release implementation.
STDMETHOD_(ULONG, Release());
private:
VOID RetargetDacIfNeeded(DWORD* pdwTimeStamp,
DWORD* pdwSizeOfImage);
HRESULT GetCLRInfo(ICorDebugDataTarget * pDataTarget,
ULONG64 moduleBaseAddress,
CLR_DEBUGGING_VERSION * pVersion,
DWORD * pdwDbiTimeStamp,
DWORD * pdwDbiSizeOfImage,
_Inout_updates_z_(dwDbiNameCharCount) WCHAR * pDbiName,
DWORD dwDbiNameCharCount,
DWORD * pdwDacTimeStamp,
DWORD * pdwDacSizeOfImage,
_Inout_updates_z_(dwDacNameCharCount) WCHAR * pDacName,
DWORD dwDacNameCharCount);
HRESULT FormatLongDacModuleName(_Inout_updates_z_(cchBuffer) WCHAR * pBuffer,
DWORD cchBuffer,
DWORD targetImageFileMachine,
VS_FIXEDFILEINFO * pVersion);
volatile LONG m_cRef;
GUID m_skuId;
}; // class CLRDebuggingImpl
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/eventpipe/ep-rt-config.h | #ifndef __EVENTPIPE_RT_CONFIG_H__
#define __EVENTPIPE_RT_CONFIG_H__
#include "ep-shared-config.h"
#ifndef FEATURE_CORECLR
#include <config.h>
// EventPipe runtime implementation.
#define EP_RT_H <mono/eventpipe/ep-rt-mono.h>
#define EP_RT_TYPES_H <mono/eventpipe/ep-rt-types-mono.h>
#define EP_RT_CONFIG_H <mono/eventpipe/ep-rt-config-mono.h>
// DiagnosticServer runtime implementation.
#define DS_RT_H <mono/eventpipe/ds-rt-mono.h>
#define DS_RT_TYPES_H <mono/eventpipe/ds-rt-types-mono.h>
#else /* !FEATURE_CORECLR */
#ifndef EP_NO_RT_DEPENDENCY
#include "common.h"
#endif
#if defined(FEATURE_PERFTRACING)
#define ENABLE_PERFTRACING
#endif
#ifdef TARGET_WINDOWS
#define HOST_WIN32
#endif
#if defined(FEATURE_PERFTRACING) && defined(FEATURE_PROFAPI_ATTACH_DETACH) && defined(DACCESS_COMPILE)
#undef FEATURE_PROFAPI_ATTACH_DETACH
#endif
// EventPipe runtime implementation.
#define EP_RT_H "ep-rt-coreclr.h"
#define EP_RT_TYPES_H "ep-rt-types-coreclr.h"
#define EP_RT_CONFIG_H "ep-rt-config-coreclr.h"
// DiagnosticServer runtime implementation.
#define DS_RT_H "ds-rt-coreclr.h"
#define DS_RT_TYPES_H "ds-rt-types-coreclr.h"
#endif
#ifndef EP_NO_RT_DEPENDENCY
#include EP_RT_CONFIG_H
#endif
#define EP_INLINE_GETTER_SETTER
#ifdef EP_INLINE_GETTER_SETTER
#define EP_INCLUDE_SOURCE_FILES
#endif
#endif /* __EVENTPIPE_RT_CONFIG_H__ */
| #ifndef __EVENTPIPE_RT_CONFIG_H__
#define __EVENTPIPE_RT_CONFIG_H__
#include "ep-shared-config.h"
#ifndef FEATURE_CORECLR
#include <config.h>
// EventPipe runtime implementation.
#define EP_RT_H <mono/eventpipe/ep-rt-mono.h>
#define EP_RT_TYPES_H <mono/eventpipe/ep-rt-types-mono.h>
#define EP_RT_CONFIG_H <mono/eventpipe/ep-rt-config-mono.h>
// DiagnosticServer runtime implementation.
#define DS_RT_H <mono/eventpipe/ds-rt-mono.h>
#define DS_RT_TYPES_H <mono/eventpipe/ds-rt-types-mono.h>
#else /* !FEATURE_CORECLR */
#ifndef EP_NO_RT_DEPENDENCY
#include "common.h"
#endif
#if defined(FEATURE_PERFTRACING)
#define ENABLE_PERFTRACING
#endif
#ifdef TARGET_WINDOWS
#define HOST_WIN32
#endif
#if defined(FEATURE_PERFTRACING) && defined(FEATURE_PROFAPI_ATTACH_DETACH) && defined(DACCESS_COMPILE)
#undef FEATURE_PROFAPI_ATTACH_DETACH
#endif
// EventPipe runtime implementation.
#define EP_RT_H "ep-rt-coreclr.h"
#define EP_RT_TYPES_H "ep-rt-types-coreclr.h"
#define EP_RT_CONFIG_H "ep-rt-config-coreclr.h"
// DiagnosticServer runtime implementation.
#define DS_RT_H "ds-rt-coreclr.h"
#define DS_RT_TYPES_H "ds-rt-types-coreclr.h"
#endif
#ifndef EP_NO_RT_DEPENDENCY
#include EP_RT_CONFIG_H
#endif
#define EP_INLINE_GETTER_SETTER
#ifdef EP_INLINE_GETTER_SETTER
#define EP_INCLUDE_SOURCE_FILES
#endif
#endif /* __EVENTPIPE_RT_CONFIG_H__ */
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/ildasm/dasm_sz.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "ildasmpch.h"
#include <metadata.h>
#include <utilcode.h>
#include "debugmacros.h"
#include "dasm_sz.h"
#include "ceeload.h"
extern PELoader *g_pPELoader;
unsigned SizeOfValueType(mdToken tk, IMDInternalImport* pImport)
{
unsigned ret = 0xFFFFFFFF;
if((TypeFromToken(tk)==mdtTypeDef)&&RidFromToken(tk)&&pImport)
{
DWORD dwAttrs;
mdToken tkExtends;
if (FAILED(pImport->GetTypeDefProps(tk, &dwAttrs, &tkExtends)))
{
return ret;
}
if(!(IsTdInterface(dwAttrs)||IsTdAbstract(dwAttrs)||IsTdImport(dwAttrs)))
{
mdToken tkField;
DWORD dwFieldAttrs;
unsigned uFieldSize;
ULONG ulPack=0,
ulSize = 0,
ulInstFieldSize = 0;
if (FAILED(pImport->GetClassPackSize(tk,&ulPack))) ulPack = 0;
if (FAILED(pImport->GetClassTotalSize(tk,&ulSize))) ulSize = 0;
if (IsTdExplicitLayout(dwAttrs))
{
MD_CLASS_LAYOUT hLayout;
if (SUCCEEDED(pImport->GetClassLayoutInit(tk,&hLayout)))
{
ULONG ulOffset;
while (SUCCEEDED(pImport->GetClassLayoutNext(&hLayout,&tkField,&ulOffset)) && RidFromToken(tkField))
{
if (FAILED(pImport->GetFieldDefProps(tkField, &dwFieldAttrs)))
{
break;
}
if (!(IsFdStatic(dwFieldAttrs)||IsFdLiteral(dwFieldAttrs)))
{
uFieldSize = SizeOfField(tkField,pImport);
if (uFieldSize == 0xFFFFFFFF) return uFieldSize;
uFieldSize += ulOffset;
if (uFieldSize > ulInstFieldSize) ulInstFieldSize = uFieldSize;
}
}
}
}
else
{
HENUMInternal hEnumField;
unsigned cFieldsMax = 0;
if (SUCCEEDED(pImport->EnumInit(mdtFieldDef, tk, &hEnumField)))
{
if ((cFieldsMax = pImport->EnumGetCount(&hEnumField)) != 0)
{
while (pImport->EnumNext(&hEnumField, &tkField) && RidFromToken(tkField))
{
if (FAILED(pImport->GetFieldDefProps(tkField, &dwFieldAttrs)))
{
break;
}
if (!IsFdStatic(dwFieldAttrs) && !IsFdLiteral(dwFieldAttrs))
{
uFieldSize = SizeOfField(tkField,pImport);
if (uFieldSize == 0xFFFFFFFF) return uFieldSize;
if (ulPack > 1)
{
ULONG ulDelta = ulInstFieldSize % ulPack;
if (ulDelta != 0) ulInstFieldSize += ulPack - ulDelta;
}
ulInstFieldSize += uFieldSize;
}
}
}
pImport->EnumClose(&hEnumField);
}
}
ret = (ulInstFieldSize > ulSize) ? ulInstFieldSize : ulSize;
if(ret == 0) ret = 1; // zero-sized value types automatically get 1 byte
}
}
return ret;
}
unsigned SizeOfField(mdToken tk, IMDInternalImport* pImport)
{
unsigned ret = 0xFFFFFFFF;
if((TypeFromToken(tk) == mdtFieldDef) && RidFromToken(tk) && pImport)
{
PCCOR_SIGNATURE pSig;
ULONG cSig;
if (FAILED(pImport->GetSigOfFieldDef(tk, &cSig, &pSig)))
{
return ret;
}
ret = SizeOfField(&pSig,cSig,pImport);
}
return ret;
}
unsigned SizeOfField(PCCOR_SIGNATURE *ppSig, ULONG cSig, IMDInternalImport* pImport)
{
unsigned ret = 0xFFFFFFFF;
if(ppSig && *ppSig && cSig && pImport)
{
unsigned callConv = CorSigUncompressData(*ppSig);
if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD))
{
mdToken tk;
int typ;
BOOL Reiterate;
unsigned uElementNumber = 1;
PCCOR_SIGNATURE pSigStart = *ppSig;
PCCOR_SIGNATURE pSigEnd = *ppSig+cSig;
// Size of the pointer depends on bitness of the assembly
unsigned nSizeOfPointer = g_pPELoader->IsPE32() ? 4 : 8;
do {
Reiterate = FALSE;
switch(typ = *(*ppSig)++) {
case ELEMENT_TYPE_VOID :
return 0;
case ELEMENT_TYPE_I1 :
case ELEMENT_TYPE_U1 :
case ELEMENT_TYPE_BOOLEAN :
return uElementNumber;
case ELEMENT_TYPE_CHAR :
case ELEMENT_TYPE_I2 :
case ELEMENT_TYPE_U2 :
return (uElementNumber << 1);
case ELEMENT_TYPE_I4 :
case ELEMENT_TYPE_U4 :
case ELEMENT_TYPE_R4 :
return (uElementNumber << 2);
case ELEMENT_TYPE_I8 :
case ELEMENT_TYPE_U8 :
case ELEMENT_TYPE_R8 :
return (uElementNumber << 3);
case ELEMENT_TYPE_OBJECT :
case ELEMENT_TYPE_STRING :
case ELEMENT_TYPE_FNPTR :
case ELEMENT_TYPE_CLASS :
case ELEMENT_TYPE_PTR :
case ELEMENT_TYPE_BYREF :
//case ELEMENT_TYPE_VAR :
case ELEMENT_TYPE_U :
case ELEMENT_TYPE_I :
return (uElementNumber * nSizeOfPointer);
case ELEMENT_TYPE_TYPEDBYREF : // pair of ptrs
return (uElementNumber * nSizeOfPointer * 2);
case ELEMENT_TYPE_VALUETYPE :
*ppSig += CorSigUncompressToken(*ppSig, &tk);
ret = SizeOfValueType(tk,pImport);
if(ret != 0xFFFFFFFF) ret *= uElementNumber;
return ret;
// Modifiers or depedant types
case ELEMENT_TYPE_ARRAY :
ret = SizeOfField(ppSig, cSig-(unsigned)((*ppSig)-pSigStart), pImport);
if(ret != 0xFFFFFFFF)
{
unsigned rank = CorSigUncompressData(*ppSig);
if (rank == 0) ret = 0xFFFFFFFF;
else
{
int* lowerBounds = new (nothrow) int[2*rank];
int* sizes = &lowerBounds[rank];
memset(lowerBounds, 0, sizeof(int)*2*rank);
unsigned numSizes = CorSigUncompressData(*ppSig);
_ASSERTE(numSizes <= rank);
unsigned i;
for(i =0; i < numSizes; i++)
sizes[i] = CorSigUncompressData(*ppSig);
unsigned numLowBounds = CorSigUncompressData(*ppSig);
_ASSERTE(numLowBounds <= rank);
for(i = 0; i < numLowBounds; i++)
*ppSig+=CorSigUncompressSignedInt(*ppSig,&lowerBounds[i]);
for(i = 0; i < numSizes; i++)
{
if (sizes[i]) uElementNumber *= sizes[i];
}
ret *= uElementNumber;
delete[] lowerBounds;
}
}
return ret;
case ELEMENT_TYPE_CMOD_OPT :
case ELEMENT_TYPE_CMOD_REQD :
*ppSig += CorSigUncompressToken(*ppSig, &tk);
FALLTHROUGH;
case ELEMENT_TYPE_PINNED :
case ELEMENT_TYPE_SZARRAY : // uElementNumber doesn't change
if(*ppSig < pSigEnd) Reiterate = TRUE;
break;
default:
case ELEMENT_TYPE_SENTINEL :
case ELEMENT_TYPE_END :
break;
} // end switch
} while(Reiterate);
} // end if(CALLCONV_FIELD)
} // end if(signature && import)
return ret;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "ildasmpch.h"
#include <metadata.h>
#include <utilcode.h>
#include "debugmacros.h"
#include "dasm_sz.h"
#include "ceeload.h"
extern PELoader *g_pPELoader;
unsigned SizeOfValueType(mdToken tk, IMDInternalImport* pImport)
{
unsigned ret = 0xFFFFFFFF;
if((TypeFromToken(tk)==mdtTypeDef)&&RidFromToken(tk)&&pImport)
{
DWORD dwAttrs;
mdToken tkExtends;
if (FAILED(pImport->GetTypeDefProps(tk, &dwAttrs, &tkExtends)))
{
return ret;
}
if(!(IsTdInterface(dwAttrs)||IsTdAbstract(dwAttrs)||IsTdImport(dwAttrs)))
{
mdToken tkField;
DWORD dwFieldAttrs;
unsigned uFieldSize;
ULONG ulPack=0,
ulSize = 0,
ulInstFieldSize = 0;
if (FAILED(pImport->GetClassPackSize(tk,&ulPack))) ulPack = 0;
if (FAILED(pImport->GetClassTotalSize(tk,&ulSize))) ulSize = 0;
if (IsTdExplicitLayout(dwAttrs))
{
MD_CLASS_LAYOUT hLayout;
if (SUCCEEDED(pImport->GetClassLayoutInit(tk,&hLayout)))
{
ULONG ulOffset;
while (SUCCEEDED(pImport->GetClassLayoutNext(&hLayout,&tkField,&ulOffset)) && RidFromToken(tkField))
{
if (FAILED(pImport->GetFieldDefProps(tkField, &dwFieldAttrs)))
{
break;
}
if (!(IsFdStatic(dwFieldAttrs)||IsFdLiteral(dwFieldAttrs)))
{
uFieldSize = SizeOfField(tkField,pImport);
if (uFieldSize == 0xFFFFFFFF) return uFieldSize;
uFieldSize += ulOffset;
if (uFieldSize > ulInstFieldSize) ulInstFieldSize = uFieldSize;
}
}
}
}
else
{
HENUMInternal hEnumField;
unsigned cFieldsMax = 0;
if (SUCCEEDED(pImport->EnumInit(mdtFieldDef, tk, &hEnumField)))
{
if ((cFieldsMax = pImport->EnumGetCount(&hEnumField)) != 0)
{
while (pImport->EnumNext(&hEnumField, &tkField) && RidFromToken(tkField))
{
if (FAILED(pImport->GetFieldDefProps(tkField, &dwFieldAttrs)))
{
break;
}
if (!IsFdStatic(dwFieldAttrs) && !IsFdLiteral(dwFieldAttrs))
{
uFieldSize = SizeOfField(tkField,pImport);
if (uFieldSize == 0xFFFFFFFF) return uFieldSize;
if (ulPack > 1)
{
ULONG ulDelta = ulInstFieldSize % ulPack;
if (ulDelta != 0) ulInstFieldSize += ulPack - ulDelta;
}
ulInstFieldSize += uFieldSize;
}
}
}
pImport->EnumClose(&hEnumField);
}
}
ret = (ulInstFieldSize > ulSize) ? ulInstFieldSize : ulSize;
if(ret == 0) ret = 1; // zero-sized value types automatically get 1 byte
}
}
return ret;
}
unsigned SizeOfField(mdToken tk, IMDInternalImport* pImport)
{
unsigned ret = 0xFFFFFFFF;
if((TypeFromToken(tk) == mdtFieldDef) && RidFromToken(tk) && pImport)
{
PCCOR_SIGNATURE pSig;
ULONG cSig;
if (FAILED(pImport->GetSigOfFieldDef(tk, &cSig, &pSig)))
{
return ret;
}
ret = SizeOfField(&pSig,cSig,pImport);
}
return ret;
}
unsigned SizeOfField(PCCOR_SIGNATURE *ppSig, ULONG cSig, IMDInternalImport* pImport)
{
unsigned ret = 0xFFFFFFFF;
if(ppSig && *ppSig && cSig && pImport)
{
unsigned callConv = CorSigUncompressData(*ppSig);
if (isCallConv(callConv, IMAGE_CEE_CS_CALLCONV_FIELD))
{
mdToken tk;
int typ;
BOOL Reiterate;
unsigned uElementNumber = 1;
PCCOR_SIGNATURE pSigStart = *ppSig;
PCCOR_SIGNATURE pSigEnd = *ppSig+cSig;
// Size of the pointer depends on bitness of the assembly
unsigned nSizeOfPointer = g_pPELoader->IsPE32() ? 4 : 8;
do {
Reiterate = FALSE;
switch(typ = *(*ppSig)++) {
case ELEMENT_TYPE_VOID :
return 0;
case ELEMENT_TYPE_I1 :
case ELEMENT_TYPE_U1 :
case ELEMENT_TYPE_BOOLEAN :
return uElementNumber;
case ELEMENT_TYPE_CHAR :
case ELEMENT_TYPE_I2 :
case ELEMENT_TYPE_U2 :
return (uElementNumber << 1);
case ELEMENT_TYPE_I4 :
case ELEMENT_TYPE_U4 :
case ELEMENT_TYPE_R4 :
return (uElementNumber << 2);
case ELEMENT_TYPE_I8 :
case ELEMENT_TYPE_U8 :
case ELEMENT_TYPE_R8 :
return (uElementNumber << 3);
case ELEMENT_TYPE_OBJECT :
case ELEMENT_TYPE_STRING :
case ELEMENT_TYPE_FNPTR :
case ELEMENT_TYPE_CLASS :
case ELEMENT_TYPE_PTR :
case ELEMENT_TYPE_BYREF :
//case ELEMENT_TYPE_VAR :
case ELEMENT_TYPE_U :
case ELEMENT_TYPE_I :
return (uElementNumber * nSizeOfPointer);
case ELEMENT_TYPE_TYPEDBYREF : // pair of ptrs
return (uElementNumber * nSizeOfPointer * 2);
case ELEMENT_TYPE_VALUETYPE :
*ppSig += CorSigUncompressToken(*ppSig, &tk);
ret = SizeOfValueType(tk,pImport);
if(ret != 0xFFFFFFFF) ret *= uElementNumber;
return ret;
// Modifiers or depedant types
case ELEMENT_TYPE_ARRAY :
ret = SizeOfField(ppSig, cSig-(unsigned)((*ppSig)-pSigStart), pImport);
if(ret != 0xFFFFFFFF)
{
unsigned rank = CorSigUncompressData(*ppSig);
if (rank == 0) ret = 0xFFFFFFFF;
else
{
int* lowerBounds = new (nothrow) int[2*rank];
int* sizes = &lowerBounds[rank];
memset(lowerBounds, 0, sizeof(int)*2*rank);
unsigned numSizes = CorSigUncompressData(*ppSig);
_ASSERTE(numSizes <= rank);
unsigned i;
for(i =0; i < numSizes; i++)
sizes[i] = CorSigUncompressData(*ppSig);
unsigned numLowBounds = CorSigUncompressData(*ppSig);
_ASSERTE(numLowBounds <= rank);
for(i = 0; i < numLowBounds; i++)
*ppSig+=CorSigUncompressSignedInt(*ppSig,&lowerBounds[i]);
for(i = 0; i < numSizes; i++)
{
if (sizes[i]) uElementNumber *= sizes[i];
}
ret *= uElementNumber;
delete[] lowerBounds;
}
}
return ret;
case ELEMENT_TYPE_CMOD_OPT :
case ELEMENT_TYPE_CMOD_REQD :
*ppSig += CorSigUncompressToken(*ppSig, &tk);
FALLTHROUGH;
case ELEMENT_TYPE_PINNED :
case ELEMENT_TYPE_SZARRAY : // uElementNumber doesn't change
if(*ppSig < pSigEnd) Reiterate = TRUE;
break;
default:
case ELEMENT_TYPE_SENTINEL :
case ELEMENT_TYPE_END :
break;
} // end switch
} while(Reiterate);
} // end if(CALLCONV_FIELD)
} // end if(signature && import)
return ret;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/ildasm/dasm_sz.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _DASM_SZ_H_
#define _DASM_SZ_H_
unsigned SizeOfValueType(mdToken tk, IMDInternalImport* pImport);
unsigned SizeOfField(mdToken tk, IMDInternalImport* pImport);
unsigned SizeOfField(PCCOR_SIGNATURE *ppSig, ULONG cSig, IMDInternalImport* pImport);
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _DASM_SZ_H_
#define _DASM_SZ_H_
unsigned SizeOfValueType(mdToken tk, IMDInternalImport* pImport);
unsigned SizeOfField(mdToken tk, IMDInternalImport* pImport);
unsigned SizeOfField(PCCOR_SIGNATURE *ppSig, ULONG cSig, IMDInternalImport* pImport);
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/external/brotli/enc/cluster.c | /* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions for clustering similar histograms together. */
#include "./cluster.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./bit_cost.h" /* BrotliPopulationCost */
#include "./fast_log.h"
#include "./histogram.h"
#include "./memory.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static BROTLI_INLINE BROTLI_BOOL HistogramPairIsLess(
const HistogramPair* p1, const HistogramPair* p2) {
if (p1->cost_diff != p2->cost_diff) {
return TO_BROTLI_BOOL(p1->cost_diff > p2->cost_diff);
}
return TO_BROTLI_BOOL((p1->idx2 - p1->idx1) > (p2->idx2 - p2->idx1));
}
/* Returns entropy reduction of the context map when we combine two clusters. */
static BROTLI_INLINE double ClusterCostDiff(size_t size_a, size_t size_b) {
size_t size_c = size_a + size_b;
return (double)size_a * FastLog2(size_a) +
(double)size_b * FastLog2(size_b) -
(double)size_c * FastLog2(size_c);
}
#define CODE(X) X
#define FN(X) X ## Literal
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Command
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Distance
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#undef CODE
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
| /* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Functions for clustering similar histograms together. */
#include "./cluster.h"
#include "../common/platform.h"
#include <brotli/types.h>
#include "./bit_cost.h" /* BrotliPopulationCost */
#include "./fast_log.h"
#include "./histogram.h"
#include "./memory.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static BROTLI_INLINE BROTLI_BOOL HistogramPairIsLess(
const HistogramPair* p1, const HistogramPair* p2) {
if (p1->cost_diff != p2->cost_diff) {
return TO_BROTLI_BOOL(p1->cost_diff > p2->cost_diff);
}
return TO_BROTLI_BOOL((p1->idx2 - p1->idx1) > (p2->idx2 - p2->idx1));
}
/* Returns entropy reduction of the context map when we combine two clusters. */
static BROTLI_INLINE double ClusterCostDiff(size_t size_a, size_t size_b) {
size_t size_c = size_a + size_b;
return (double)size_a * FastLog2(size_a) +
(double)size_b * FastLog2(size_b) -
(double)size_c * FastLog2(size_c);
}
#define CODE(X) X
#define FN(X) X ## Literal
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Command
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#define FN(X) X ## Distance
#include "./cluster_inc.h" /* NOLINT(build/include) */
#undef FN
#undef CODE
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/utilcode/securitywrapper.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: SecurityWrapper.cpp
//
//
// Wrapper around Win32 Security functions
//
//*****************************************************************************
#include "stdafx.h"
#include "securitywrapper.h"
#include "ex.h"
#include "holder.h"
// For GetSidFromProcess*
#include <tlhelp32.h>
#include "wtsapi32.h"
//-----------------------------------------------------------------------------
// Constructor for Sid wrapper class.
// pSid - OS sid to wrap
//-----------------------------------------------------------------------------
Sid::Sid(PSID pSid)
{
_ASSERTE(pSid != NULL);
m_pSid = pSid;
}
//-----------------------------------------------------------------------------
// Aesthetic wrapper for Sid equality
//-----------------------------------------------------------------------------
bool Sid::Equals(PSID a, PSID b)
{
return EqualSid(a, b) != 0;
}
//-----------------------------------------------------------------------------
// Ctor for SidBuffer class
//-----------------------------------------------------------------------------
SidBuffer::SidBuffer()
{
m_pBuffer = NULL;
}
//-----------------------------------------------------------------------------
// Dtor for SidBuffer class.
//-----------------------------------------------------------------------------
SidBuffer::~SidBuffer()
{
delete [] m_pBuffer;
}
//-----------------------------------------------------------------------------
// Get the underlying sid
// Caller assumes SidBuffer has been initialized.
//-----------------------------------------------------------------------------
Sid SidBuffer::GetSid()
{
_ASSERTE(m_pBuffer != NULL);
Sid s((PSID) m_pBuffer);
return s;
}
// ----------------------------------------------------------------------------
// Used by GetSidFromProcessWorker to determine which SID from the
// process token to use when initializing the SID
enum SidType
{
// Use TokenOwner: the default owner SID used for newly created objects
kOwnerSid,
// Use TokenUser: the user account from the token
kUserSid,
};
// ----------------------------------------------------------------------------
// GetSidFromProcessWorker
//
// Description:
// Internal helper. Gets the SID for the given process and given sid type
//
// Arguments:
// * dwProcessId - [in] Process to get SID from
// * sidType - [in] Type of sid to get (owner or user)
// * ppSid - [out] SID found. Caller responsible for deleting this memory.
//
// Return Value:
// HRESULT indicating success / failure.
//
// Notes:
// * Caller owns deleting (*ppSid) when done with the SID
//
HRESULT GetSidFromProcessWorker(DWORD dwProcessId, SidType sidType, PSID *ppSid)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
HRESULT hr = S_OK;
TOKEN_USER *pTokUser = NULL;
HANDLE hProc = INVALID_HANDLE_VALUE;
HANDLE hToken = INVALID_HANDLE_VALUE;
DWORD dwRetLength = 0;
LPVOID pvTokenInfo = NULL;
TOKEN_INFORMATION_CLASS tokenInfoClass;
PSID pSidFromTokenInfo = NULL;
DWORD cbSid;
PSID pSid = NULL;
LOG((LF_CORDB, LL_INFO10000,
"SecurityUtil::GetSidFromProcess: 0x%08x\n",
dwProcessId));
_ASSERTE(ppSid);
*ppSid = NULL;
_ASSERTE((sidType == kOwnerSid) || (sidType == kUserSid));
tokenInfoClass = (sidType == kOwnerSid) ? TokenOwner : TokenUser;
hProc = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, dwProcessId);
if (hProc == NULL)
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
if (!OpenProcessToken(hProc, TOKEN_QUERY, &hToken))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
// figure out the length
GetTokenInformation(hToken, tokenInfoClass, NULL, 0, &dwRetLength);
_ASSERTE(dwRetLength);
pvTokenInfo = new (nothrow) BYTE[dwRetLength];
if (pvTokenInfo == NULL)
{
hr = E_OUTOFMEMORY;
goto exit;
}
if (!GetTokenInformation(hToken, tokenInfoClass, pvTokenInfo, dwRetLength, &dwRetLength))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
// Copy over the SID
pSidFromTokenInfo =
(sidType == kOwnerSid) ?
((TOKEN_OWNER *) pvTokenInfo)->Owner :
((TOKEN_USER *) pvTokenInfo)->User.Sid;
cbSid = GetLengthSid(pSidFromTokenInfo);
pSid = new (nothrow) BYTE[cbSid];
if (pSid == NULL)
{
hr = E_OUTOFMEMORY;
}
else
{
if (!CopySid(cbSid, pSid, pSidFromTokenInfo))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
}
*ppSid = pSid;
pSid = NULL;
exit:
if (hToken != INVALID_HANDLE_VALUE)
{
CloseHandle(hToken);
}
if (hProc != INVALID_HANDLE_VALUE)
{
// clean up
CloseHandle(hProc);
}
if (pvTokenInfo)
{
delete [] (reinterpret_cast<BYTE*>(pvTokenInfo));
}
if (pSid)
{
delete [] (reinterpret_cast<BYTE*>(pSid));
}
LOG((LF_CORDB, LL_INFO10000,
"SecurityUtil::GetSidFromProcess return hr : 0x%08x\n",
hr));
return hr;
}
//-----------------------------------------------------------------------------
// The functions below initialize this SidBuffer instance with a Sid from
// the token of the specified process. The first pair use the OWNER sid from
// the process token if possible; else use the term serv API to find the
// USER sid from the process token. This seems a little inconsistent, but
// remains this way for backward compatibility. The second pair consistently
// use the USER sid (never the OWNER).
//
// While the USER and OWNER sid are often the same, they are not always the
// same. For example, running a process on win2k3 server as a member of the
// local admin group causes the USER sid to be the logged-on user, and the
// OWNER sid to be the local admins group. At least, that's how it was on
// Monday. Expect this to change randomly at unexpected times, as most
// security-related behavior does.
//-----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
// SidBuffer::InitFromProcessNoThrow
//
// Description:
// Initialize this SidBuffer instance with a Sid from the token of the specified
// process. Use the OWNER sid from the process token if possible; else use the term
// serv API to find the USER sid from the process token. This seems a little
// inconsistent, but remains this way for backward compatibility.
//
// Arguments:
// * pid - Process ID from which to grab the SID
//
// Return Value:
// HRESULT indicating success / failure
//
HRESULT SidBuffer::InitFromProcessNoThrow(DWORD pid)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
_ASSERTE(m_pBuffer == NULL);
HRESULT hr = GetSidFromProcessWorker(pid, kOwnerSid, (PSID *) &m_pBuffer);
if (FAILED(hr))
{
return hr;
}
_ASSERTE(m_pBuffer != NULL);
return S_OK;
}
// See code:SidBuffer::InitFromProcessNoThrow. Throws if there's an error.
void SidBuffer::InitFromProcess(DWORD pid)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
}
CONTRACTL_END;
HRESULT hr = InitFromProcessNoThrow(pid);
if (FAILED(hr))
{
ThrowHR(hr);
}
}
// ----------------------------------------------------------------------------
// SidBuffer::InitFromProcessAppContainerSidNoThrow
//
// Description:
// Initialize this SidBuffer instance with the TokenAppContainerSid from
// the process token
//
// Arguments:
// * pid - Process ID from which to grab the SID
//
// Return Value:
// HRESULT indicating success / failure
// S_FALSE indicates the process isn't in an AppContainer
//
HRESULT SidBuffer::InitFromProcessAppContainerSidNoThrow(DWORD pid)
{
HRESULT hr = S_OK;
HANDLE hToken = NULL;
BOOL fIsLowBox = FALSE;
HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pid);
if (hProcess == NULL)
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
if (!OpenProcessToken(hProcess, TOKEN_QUERY, &hToken))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
// Define new TOKEN_INFORMATION_CLASS/ TOKEN_APPCONTAINER_INFORMATION members for Win8 since they are not in the DevDiv copy of WinSDK yet
typedef enum _TOKEN_INFORMATION_CLASS_WIN8 {
TokenIsAppContainer = TokenLogonSid + 1,
TokenCapabilities,
TokenAppContainerSid
} TOKEN_INFORMATION_CLASS_WIN8;
typedef struct _TOKEN_APPCONTAINER_INFORMATION
{
PSID TokenPackage;
} TOKEN_APPCONTAINER_INFORMATION, *PTOKEN_APPCONTAINER_INFORMATION;
DWORD size;
if (!GetTokenInformation(hToken, (TOKEN_INFORMATION_CLASS)TokenIsAppContainer, &fIsLowBox, sizeof(fIsLowBox), &size))
{
DWORD gle = GetLastError();
if (gle == ERROR_INVALID_PARAMETER || gle == ERROR_INVALID_FUNCTION)
{
hr = S_FALSE; // We are on an OS which doesn't understand LowBox
}
else
{
hr = HRESULT_FROM_WIN32(gle);
}
goto exit;
}
if (!fIsLowBox)
{
hr = S_FALSE;
goto exit;
}
UCHAR PackSid[SECURITY_MAX_SID_SIZE + sizeof(TOKEN_APPCONTAINER_INFORMATION)];
if (!GetTokenInformation(hToken, (TOKEN_INFORMATION_CLASS)TokenAppContainerSid, &PackSid, sizeof(PackSid), &size))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
{
PTOKEN_APPCONTAINER_INFORMATION pTokPack = (PTOKEN_APPCONTAINER_INFORMATION)&PackSid;
PSID pLowBoxPackage = pTokPack->TokenPackage;
DWORD dwSidLen = GetLengthSid(pLowBoxPackage);
m_pBuffer = new (nothrow) BYTE[dwSidLen];
if (m_pBuffer == NULL)
{
hr = E_OUTOFMEMORY;
goto exit;
}
else
{
if (!CopySid(dwSidLen, m_pBuffer, pLowBoxPackage))
{
hr = HRESULT_FROM_GetLastError();
delete m_pBuffer;
m_pBuffer = NULL;
goto exit;
}
}
}
exit:
if (hProcess != NULL)
{
CloseHandle(hProcess);
}
if (hToken != NULL)
{
CloseHandle(hToken);
}
return hr;
}
// ----------------------------------------------------------------------------
// SidBuffer::InitFromProcessUserNoThrow
//
// Description:
// Initialize this SidBuffer instance with a Sid from the token of the specified
// process. Use the USER sid from the process token if possible; else use the term
// serv API to find the USER sid from the process token.
//
// Arguments:
// * pid - Process ID from which to grab the SID
//
// Return Value:
// HRESULT indicating success / failure
//
HRESULT SidBuffer::InitFromProcessUserNoThrow(DWORD pid)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
_ASSERTE(m_pBuffer == NULL);
HRESULT hr = GetSidFromProcessWorker(pid, kUserSid, (PSID *) &m_pBuffer);
if (FAILED(hr))
{
return hr;
}
_ASSERTE(m_pBuffer != NULL);
return S_OK;
}
// See code:SidBuffer::InitFromProcessUserNoThrow. Throws if there's an error.
void SidBuffer::InitFromProcessUser(DWORD pid)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
}
CONTRACTL_END;
HRESULT hr = InitFromProcessUserNoThrow(pid);
if (FAILED(hr))
{
ThrowHR(hr);
}
}
//-----------------------------------------------------------------------------
// Ctor for Dacl class. Wraps a win32 dacl.
//-----------------------------------------------------------------------------
Dacl::Dacl(PACL pAcl)
{
m_acl = pAcl;
}
//-----------------------------------------------------------------------------
// Get number of ACE (Access Control Entries) in this DACL.
//-----------------------------------------------------------------------------
SIZE_T Dacl::GetAceCount()
{
return (SIZE_T) m_acl->AceCount;
}
//-----------------------------------------------------------------------------
// Get Raw a ACE at the given index.
// Caller assumes index is valid (0 <= dwAceIndex < GetAceCount())
// Throws on error (which should only be if the index is out of bounds).
//-----------------------------------------------------------------------------
ACE_HEADER * Dacl::GetAce(SIZE_T dwAceIndex)
{
CONTRACTL {
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
ACE_HEADER * pAce = NULL;
BOOL fOk = ::GetAce(m_acl, (DWORD) dwAceIndex, (LPVOID*) &pAce);
_ASSERTE(fOk == (pAce != NULL));
if (!fOk)
{
ThrowLastError();
}
return pAce;
}
//-----------------------------------------------------------------------------
// Ctor for SecurityDescriptor
//-----------------------------------------------------------------------------
Win32SecurityDescriptor::Win32SecurityDescriptor()
{
m_pDesc = NULL;
}
//-----------------------------------------------------------------------------
// Dtor for security Descriptor.
//-----------------------------------------------------------------------------
Win32SecurityDescriptor::~Win32SecurityDescriptor()
{
delete [] ((BYTE*) m_pDesc);
}
//-----------------------------------------------------------------------------
// Get the dacl for this security descriptor.
//-----------------------------------------------------------------------------
Dacl Win32SecurityDescriptor::GetDacl()
{
CONTRACTL {
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
_ASSERTE(m_pDesc != NULL);
BOOL bPresent;
BOOL bDaclDefaulted;
PACL acl;
if (GetSecurityDescriptorDacl(m_pDesc, &bPresent, &acl, &bDaclDefaulted) == 0)
{
ThrowLastError();
}
if (!bPresent)
{
// No dacl. We consider this an error because all of the objects we expect
// to see should be dacled. If it's not dacled, then it's a malicious user spoofing it.
ThrowHR(E_INVALIDARG);
}
Dacl d(acl);
return d;
}
//-----------------------------------------------------------------------------
// Get the owner from the security descriptor.
//-----------------------------------------------------------------------------
HRESULT Win32SecurityDescriptor::GetOwnerNoThrow( PSID* ppSid)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
_ASSERTE(m_pDesc != NULL);
BOOL bOwnerDefaulted;
if( ppSid == NULL )
{
return E_INVALIDARG;
}
if (GetSecurityDescriptorOwner(m_pDesc, ppSid, &bOwnerDefaulted) == 0)
{
DWORD err = GetLastError();
return HRESULT_FROM_WIN32(err);
}
return S_OK;
}
Sid Win32SecurityDescriptor::GetOwner()
{
CONTRACTL {
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
PSID pSid;
HRESULT hr = GetOwnerNoThrow( &pSid );
if( FAILED(hr) )
{
ThrowHR( hr );
}
Sid s(pSid);
return s;
}
//-----------------------------------------------------------------------------
// Initialize this instance of a SecurityDescriptor with the SD for the handle.
// The handle must have READ_CONTROL permissions to do this.
// Throws on error.
//-----------------------------------------------------------------------------
HRESULT Win32SecurityDescriptor::InitFromHandleNoThrow(HANDLE h)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
_ASSERTE(m_pDesc == NULL); // only init once.
DWORD cbNeeded = 0;
DWORD flags = OWNER_SECURITY_INFORMATION | DACL_SECURITY_INFORMATION;
// Now get the creator's SID. First get the size of the array needed.
BOOL fOk = GetKernelObjectSecurity(h, flags, NULL, 0, &cbNeeded);
DWORD err = GetLastError();
// Caller should give us a handle for which this succeeds. First call will
// fail w/ InsufficientBuffer.
CONSISTENCY_CHECK_MSGF(fOk || (err == ERROR_INSUFFICIENT_BUFFER), ("Failed to get KernelSecurity for object handle=%p.Err=%d\n", h, err));
PSECURITY_DESCRIPTOR pSD = (PSECURITY_DESCRIPTOR) new(nothrow) BYTE[cbNeeded];
if( pSD == NULL )
{
return E_OUTOFMEMORY;
}
if (GetKernelObjectSecurity(h, flags, pSD, cbNeeded, &cbNeeded) == 0)
{
// get last error and fail out.
err = GetLastError();
delete [] ((BYTE*) pSD);
return HRESULT_FROM_WIN32(err);
}
m_pDesc = pSD;
return S_OK;
}
void Win32SecurityDescriptor::InitFromHandle(HANDLE h)
{
CONTRACTL {
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
HRESULT hr = InitFromHandleNoThrow(h);
if (FAILED(hr))
{
ThrowHR(hr);
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: SecurityWrapper.cpp
//
//
// Wrapper around Win32 Security functions
//
//*****************************************************************************
#include "stdafx.h"
#include "securitywrapper.h"
#include "ex.h"
#include "holder.h"
// For GetSidFromProcess*
#include <tlhelp32.h>
#include "wtsapi32.h"
//-----------------------------------------------------------------------------
// Constructor for Sid wrapper class.
// pSid - OS sid to wrap
//-----------------------------------------------------------------------------
Sid::Sid(PSID pSid)
{
_ASSERTE(pSid != NULL);
m_pSid = pSid;
}
//-----------------------------------------------------------------------------
// Aesthetic wrapper for Sid equality
//-----------------------------------------------------------------------------
bool Sid::Equals(PSID a, PSID b)
{
return EqualSid(a, b) != 0;
}
//-----------------------------------------------------------------------------
// Ctor for SidBuffer class
//-----------------------------------------------------------------------------
SidBuffer::SidBuffer()
{
m_pBuffer = NULL;
}
//-----------------------------------------------------------------------------
// Dtor for SidBuffer class.
//-----------------------------------------------------------------------------
SidBuffer::~SidBuffer()
{
delete [] m_pBuffer;
}
//-----------------------------------------------------------------------------
// Get the underlying sid
// Caller assumes SidBuffer has been initialized.
//-----------------------------------------------------------------------------
Sid SidBuffer::GetSid()
{
_ASSERTE(m_pBuffer != NULL);
Sid s((PSID) m_pBuffer);
return s;
}
// ----------------------------------------------------------------------------
// Used by GetSidFromProcessWorker to determine which SID from the
// process token to use when initializing the SID
enum SidType
{
// Use TokenOwner: the default owner SID used for newly created objects
kOwnerSid,
// Use TokenUser: the user account from the token
kUserSid,
};
// ----------------------------------------------------------------------------
// GetSidFromProcessWorker
//
// Description:
// Internal helper. Gets the SID for the given process and given sid type
//
// Arguments:
// * dwProcessId - [in] Process to get SID from
// * sidType - [in] Type of sid to get (owner or user)
// * ppSid - [out] SID found. Caller responsible for deleting this memory.
//
// Return Value:
// HRESULT indicating success / failure.
//
// Notes:
// * Caller owns deleting (*ppSid) when done with the SID
//
HRESULT GetSidFromProcessWorker(DWORD dwProcessId, SidType sidType, PSID *ppSid)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
HRESULT hr = S_OK;
TOKEN_USER *pTokUser = NULL;
HANDLE hProc = INVALID_HANDLE_VALUE;
HANDLE hToken = INVALID_HANDLE_VALUE;
DWORD dwRetLength = 0;
LPVOID pvTokenInfo = NULL;
TOKEN_INFORMATION_CLASS tokenInfoClass;
PSID pSidFromTokenInfo = NULL;
DWORD cbSid;
PSID pSid = NULL;
LOG((LF_CORDB, LL_INFO10000,
"SecurityUtil::GetSidFromProcess: 0x%08x\n",
dwProcessId));
_ASSERTE(ppSid);
*ppSid = NULL;
_ASSERTE((sidType == kOwnerSid) || (sidType == kUserSid));
tokenInfoClass = (sidType == kOwnerSid) ? TokenOwner : TokenUser;
hProc = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, dwProcessId);
if (hProc == NULL)
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
if (!OpenProcessToken(hProc, TOKEN_QUERY, &hToken))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
// figure out the length
GetTokenInformation(hToken, tokenInfoClass, NULL, 0, &dwRetLength);
_ASSERTE(dwRetLength);
pvTokenInfo = new (nothrow) BYTE[dwRetLength];
if (pvTokenInfo == NULL)
{
hr = E_OUTOFMEMORY;
goto exit;
}
if (!GetTokenInformation(hToken, tokenInfoClass, pvTokenInfo, dwRetLength, &dwRetLength))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
// Copy over the SID
pSidFromTokenInfo =
(sidType == kOwnerSid) ?
((TOKEN_OWNER *) pvTokenInfo)->Owner :
((TOKEN_USER *) pvTokenInfo)->User.Sid;
cbSid = GetLengthSid(pSidFromTokenInfo);
pSid = new (nothrow) BYTE[cbSid];
if (pSid == NULL)
{
hr = E_OUTOFMEMORY;
}
else
{
if (!CopySid(cbSid, pSid, pSidFromTokenInfo))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
}
*ppSid = pSid;
pSid = NULL;
exit:
if (hToken != INVALID_HANDLE_VALUE)
{
CloseHandle(hToken);
}
if (hProc != INVALID_HANDLE_VALUE)
{
// clean up
CloseHandle(hProc);
}
if (pvTokenInfo)
{
delete [] (reinterpret_cast<BYTE*>(pvTokenInfo));
}
if (pSid)
{
delete [] (reinterpret_cast<BYTE*>(pSid));
}
LOG((LF_CORDB, LL_INFO10000,
"SecurityUtil::GetSidFromProcess return hr : 0x%08x\n",
hr));
return hr;
}
//-----------------------------------------------------------------------------
// The functions below initialize this SidBuffer instance with a Sid from
// the token of the specified process. The first pair use the OWNER sid from
// the process token if possible; else use the term serv API to find the
// USER sid from the process token. This seems a little inconsistent, but
// remains this way for backward compatibility. The second pair consistently
// use the USER sid (never the OWNER).
//
// While the USER and OWNER sid are often the same, they are not always the
// same. For example, running a process on win2k3 server as a member of the
// local admin group causes the USER sid to be the logged-on user, and the
// OWNER sid to be the local admins group. At least, that's how it was on
// Monday. Expect this to change randomly at unexpected times, as most
// security-related behavior does.
//-----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
// SidBuffer::InitFromProcessNoThrow
//
// Description:
// Initialize this SidBuffer instance with a Sid from the token of the specified
// process. Use the OWNER sid from the process token if possible; else use the term
// serv API to find the USER sid from the process token. This seems a little
// inconsistent, but remains this way for backward compatibility.
//
// Arguments:
// * pid - Process ID from which to grab the SID
//
// Return Value:
// HRESULT indicating success / failure
//
HRESULT SidBuffer::InitFromProcessNoThrow(DWORD pid)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
_ASSERTE(m_pBuffer == NULL);
HRESULT hr = GetSidFromProcessWorker(pid, kOwnerSid, (PSID *) &m_pBuffer);
if (FAILED(hr))
{
return hr;
}
_ASSERTE(m_pBuffer != NULL);
return S_OK;
}
// See code:SidBuffer::InitFromProcessNoThrow. Throws if there's an error.
void SidBuffer::InitFromProcess(DWORD pid)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
}
CONTRACTL_END;
HRESULT hr = InitFromProcessNoThrow(pid);
if (FAILED(hr))
{
ThrowHR(hr);
}
}
// ----------------------------------------------------------------------------
// SidBuffer::InitFromProcessAppContainerSidNoThrow
//
// Description:
// Initialize this SidBuffer instance with the TokenAppContainerSid from
// the process token
//
// Arguments:
// * pid - Process ID from which to grab the SID
//
// Return Value:
// HRESULT indicating success / failure
// S_FALSE indicates the process isn't in an AppContainer
//
HRESULT SidBuffer::InitFromProcessAppContainerSidNoThrow(DWORD pid)
{
HRESULT hr = S_OK;
HANDLE hToken = NULL;
BOOL fIsLowBox = FALSE;
HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pid);
if (hProcess == NULL)
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
if (!OpenProcessToken(hProcess, TOKEN_QUERY, &hToken))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
// Define new TOKEN_INFORMATION_CLASS/ TOKEN_APPCONTAINER_INFORMATION members for Win8 since they are not in the DevDiv copy of WinSDK yet
typedef enum _TOKEN_INFORMATION_CLASS_WIN8 {
TokenIsAppContainer = TokenLogonSid + 1,
TokenCapabilities,
TokenAppContainerSid
} TOKEN_INFORMATION_CLASS_WIN8;
typedef struct _TOKEN_APPCONTAINER_INFORMATION
{
PSID TokenPackage;
} TOKEN_APPCONTAINER_INFORMATION, *PTOKEN_APPCONTAINER_INFORMATION;
DWORD size;
if (!GetTokenInformation(hToken, (TOKEN_INFORMATION_CLASS)TokenIsAppContainer, &fIsLowBox, sizeof(fIsLowBox), &size))
{
DWORD gle = GetLastError();
if (gle == ERROR_INVALID_PARAMETER || gle == ERROR_INVALID_FUNCTION)
{
hr = S_FALSE; // We are on an OS which doesn't understand LowBox
}
else
{
hr = HRESULT_FROM_WIN32(gle);
}
goto exit;
}
if (!fIsLowBox)
{
hr = S_FALSE;
goto exit;
}
UCHAR PackSid[SECURITY_MAX_SID_SIZE + sizeof(TOKEN_APPCONTAINER_INFORMATION)];
if (!GetTokenInformation(hToken, (TOKEN_INFORMATION_CLASS)TokenAppContainerSid, &PackSid, sizeof(PackSid), &size))
{
hr = HRESULT_FROM_GetLastError();
goto exit;
}
{
PTOKEN_APPCONTAINER_INFORMATION pTokPack = (PTOKEN_APPCONTAINER_INFORMATION)&PackSid;
PSID pLowBoxPackage = pTokPack->TokenPackage;
DWORD dwSidLen = GetLengthSid(pLowBoxPackage);
m_pBuffer = new (nothrow) BYTE[dwSidLen];
if (m_pBuffer == NULL)
{
hr = E_OUTOFMEMORY;
goto exit;
}
else
{
if (!CopySid(dwSidLen, m_pBuffer, pLowBoxPackage))
{
hr = HRESULT_FROM_GetLastError();
delete m_pBuffer;
m_pBuffer = NULL;
goto exit;
}
}
}
exit:
if (hProcess != NULL)
{
CloseHandle(hProcess);
}
if (hToken != NULL)
{
CloseHandle(hToken);
}
return hr;
}
// ----------------------------------------------------------------------------
// SidBuffer::InitFromProcessUserNoThrow
//
// Description:
// Initialize this SidBuffer instance with a Sid from the token of the specified
// process. Use the USER sid from the process token if possible; else use the term
// serv API to find the USER sid from the process token.
//
// Arguments:
// * pid - Process ID from which to grab the SID
//
// Return Value:
// HRESULT indicating success / failure
//
HRESULT SidBuffer::InitFromProcessUserNoThrow(DWORD pid)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
_ASSERTE(m_pBuffer == NULL);
HRESULT hr = GetSidFromProcessWorker(pid, kUserSid, (PSID *) &m_pBuffer);
if (FAILED(hr))
{
return hr;
}
_ASSERTE(m_pBuffer != NULL);
return S_OK;
}
// See code:SidBuffer::InitFromProcessUserNoThrow. Throws if there's an error.
void SidBuffer::InitFromProcessUser(DWORD pid)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
}
CONTRACTL_END;
HRESULT hr = InitFromProcessUserNoThrow(pid);
if (FAILED(hr))
{
ThrowHR(hr);
}
}
//-----------------------------------------------------------------------------
// Ctor for Dacl class. Wraps a win32 dacl.
//-----------------------------------------------------------------------------
Dacl::Dacl(PACL pAcl)
{
m_acl = pAcl;
}
//-----------------------------------------------------------------------------
// Get number of ACE (Access Control Entries) in this DACL.
//-----------------------------------------------------------------------------
SIZE_T Dacl::GetAceCount()
{
return (SIZE_T) m_acl->AceCount;
}
//-----------------------------------------------------------------------------
// Get Raw a ACE at the given index.
// Caller assumes index is valid (0 <= dwAceIndex < GetAceCount())
// Throws on error (which should only be if the index is out of bounds).
//-----------------------------------------------------------------------------
ACE_HEADER * Dacl::GetAce(SIZE_T dwAceIndex)
{
CONTRACTL {
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
ACE_HEADER * pAce = NULL;
BOOL fOk = ::GetAce(m_acl, (DWORD) dwAceIndex, (LPVOID*) &pAce);
_ASSERTE(fOk == (pAce != NULL));
if (!fOk)
{
ThrowLastError();
}
return pAce;
}
//-----------------------------------------------------------------------------
// Ctor for SecurityDescriptor
//-----------------------------------------------------------------------------
Win32SecurityDescriptor::Win32SecurityDescriptor()
{
m_pDesc = NULL;
}
//-----------------------------------------------------------------------------
// Dtor for security Descriptor.
//-----------------------------------------------------------------------------
Win32SecurityDescriptor::~Win32SecurityDescriptor()
{
delete [] ((BYTE*) m_pDesc);
}
//-----------------------------------------------------------------------------
// Get the dacl for this security descriptor.
//-----------------------------------------------------------------------------
Dacl Win32SecurityDescriptor::GetDacl()
{
CONTRACTL {
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
_ASSERTE(m_pDesc != NULL);
BOOL bPresent;
BOOL bDaclDefaulted;
PACL acl;
if (GetSecurityDescriptorDacl(m_pDesc, &bPresent, &acl, &bDaclDefaulted) == 0)
{
ThrowLastError();
}
if (!bPresent)
{
// No dacl. We consider this an error because all of the objects we expect
// to see should be dacled. If it's not dacled, then it's a malicious user spoofing it.
ThrowHR(E_INVALIDARG);
}
Dacl d(acl);
return d;
}
//-----------------------------------------------------------------------------
// Get the owner from the security descriptor.
//-----------------------------------------------------------------------------
HRESULT Win32SecurityDescriptor::GetOwnerNoThrow( PSID* ppSid)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
_ASSERTE(m_pDesc != NULL);
BOOL bOwnerDefaulted;
if( ppSid == NULL )
{
return E_INVALIDARG;
}
if (GetSecurityDescriptorOwner(m_pDesc, ppSid, &bOwnerDefaulted) == 0)
{
DWORD err = GetLastError();
return HRESULT_FROM_WIN32(err);
}
return S_OK;
}
Sid Win32SecurityDescriptor::GetOwner()
{
CONTRACTL {
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
PSID pSid;
HRESULT hr = GetOwnerNoThrow( &pSid );
if( FAILED(hr) )
{
ThrowHR( hr );
}
Sid s(pSid);
return s;
}
//-----------------------------------------------------------------------------
// Initialize this instance of a SecurityDescriptor with the SD for the handle.
// The handle must have READ_CONTROL permissions to do this.
// Throws on error.
//-----------------------------------------------------------------------------
HRESULT Win32SecurityDescriptor::InitFromHandleNoThrow(HANDLE h)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
_ASSERTE(m_pDesc == NULL); // only init once.
DWORD cbNeeded = 0;
DWORD flags = OWNER_SECURITY_INFORMATION | DACL_SECURITY_INFORMATION;
// Now get the creator's SID. First get the size of the array needed.
BOOL fOk = GetKernelObjectSecurity(h, flags, NULL, 0, &cbNeeded);
DWORD err = GetLastError();
// Caller should give us a handle for which this succeeds. First call will
// fail w/ InsufficientBuffer.
CONSISTENCY_CHECK_MSGF(fOk || (err == ERROR_INSUFFICIENT_BUFFER), ("Failed to get KernelSecurity for object handle=%p.Err=%d\n", h, err));
PSECURITY_DESCRIPTOR pSD = (PSECURITY_DESCRIPTOR) new(nothrow) BYTE[cbNeeded];
if( pSD == NULL )
{
return E_OUTOFMEMORY;
}
if (GetKernelObjectSecurity(h, flags, pSD, cbNeeded, &cbNeeded) == 0)
{
// get last error and fail out.
err = GetLastError();
delete [] ((BYTE*) pSD);
return HRESULT_FROM_WIN32(err);
}
m_pDesc = pSD;
return S_OK;
}
void Win32SecurityDescriptor::InitFromHandle(HANDLE h)
{
CONTRACTL {
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
HRESULT hr = InitFromHandleNoThrow(h);
if (FAILED(hr))
{
ThrowHR(hr);
}
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/libs/System.Native/pal_interfaceaddresses.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "pal_compiler.h"
#include "pal_maphardwaretype.h"
#include "pal_types.h"
typedef enum
{
OperationalStatus_Up = 1,
OperationalStatus_Down = 2,
OperationalStatus_Unknown = 4,
OperationalStatus_LowerLayerDown = 7,
} OperationalStatus;
typedef struct
{
uint32_t InterfaceIndex; // The index of the interface to which this address belongs.
uint8_t AddressBytes[8]; // A pointer to the bytes containing the address.
uint8_t NumAddressBytes; // The number of bytes actually stored in the address.
uint8_t _padding;
uint16_t HardwareType;
} LinkLayerAddressInfo;
typedef struct
{
uint32_t InterfaceIndex;
uint8_t AddressBytes[16];
uint8_t NumAddressBytes;
uint8_t PrefixLength;
uint8_t _padding[2];
} IpAddressInfo;
typedef struct
{
char Name[16]; // OS Interface name.
int64_t Speed; // Link speed for physical interfaces.
uint32_t InterfaceIndex; // Interface index.
int32_t Mtu; // Interface MTU.
uint16_t HardwareType; // Interface mapped from L2 to NetworkInterfaceType.
uint8_t OperationalState; // Operational status.
uint8_t NumAddressBytes; // The number of bytes actually stored in the address.
uint8_t AddressBytes[8]; // Link address.
uint8_t SupportsMulticast; // Interface supports multicast.
uint8_t _padding[3];
} NetworkInterfaceInfo;
typedef void (*IPv4AddressFound)(void* context, const char* interfaceName, IpAddressInfo* addressInfo);
typedef void (*IPv6AddressFound)(void* context, const char* interfaceName, IpAddressInfo* info, uint32_t* scopeId);
typedef void (*LinkLayerAddressFound)(void* context, const char* interfaceName, LinkLayerAddressInfo* llAddress);
typedef void (*GatewayAddressFound)(void* context, IpAddressInfo* addressInfo);
PALEXPORT int32_t SystemNative_EnumerateInterfaceAddresses(
void* context, IPv4AddressFound onIpv4Found, IPv6AddressFound onIpv6Found, LinkLayerAddressFound onLinkLayerFound);
PALEXPORT int32_t SystemNative_GetNetworkInterfaces(int32_t * interfaceCount, NetworkInterfaceInfo** interfaces, int32_t * addressCount, IpAddressInfo **addressList);
PALEXPORT int32_t SystemNative_EnumerateGatewayAddressesForInterface(void* context, uint32_t interfaceIndex, GatewayAddressFound onGatewayFound);
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "pal_compiler.h"
#include "pal_maphardwaretype.h"
#include "pal_types.h"
typedef enum
{
OperationalStatus_Up = 1,
OperationalStatus_Down = 2,
OperationalStatus_Unknown = 4,
OperationalStatus_LowerLayerDown = 7,
} OperationalStatus;
typedef struct
{
uint32_t InterfaceIndex; // The index of the interface to which this address belongs.
uint8_t AddressBytes[8]; // A pointer to the bytes containing the address.
uint8_t NumAddressBytes; // The number of bytes actually stored in the address.
uint8_t _padding;
uint16_t HardwareType;
} LinkLayerAddressInfo;
typedef struct
{
uint32_t InterfaceIndex;
uint8_t AddressBytes[16];
uint8_t NumAddressBytes;
uint8_t PrefixLength;
uint8_t _padding[2];
} IpAddressInfo;
typedef struct
{
char Name[16]; // OS Interface name.
int64_t Speed; // Link speed for physical interfaces.
uint32_t InterfaceIndex; // Interface index.
int32_t Mtu; // Interface MTU.
uint16_t HardwareType; // Interface mapped from L2 to NetworkInterfaceType.
uint8_t OperationalState; // Operational status.
uint8_t NumAddressBytes; // The number of bytes actually stored in the address.
uint8_t AddressBytes[8]; // Link address.
uint8_t SupportsMulticast; // Interface supports multicast.
uint8_t _padding[3];
} NetworkInterfaceInfo;
typedef void (*IPv4AddressFound)(void* context, const char* interfaceName, IpAddressInfo* addressInfo);
typedef void (*IPv6AddressFound)(void* context, const char* interfaceName, IpAddressInfo* info, uint32_t* scopeId);
typedef void (*LinkLayerAddressFound)(void* context, const char* interfaceName, LinkLayerAddressInfo* llAddress);
typedef void (*GatewayAddressFound)(void* context, IpAddressInfo* addressInfo);
PALEXPORT int32_t SystemNative_EnumerateInterfaceAddresses(
void* context, IPv4AddressFound onIpv4Found, IPv6AddressFound onIpv6Found, LinkLayerAddressFound onLinkLayerFound);
PALEXPORT int32_t SystemNative_GetNetworkInterfaces(int32_t * interfaceCount, NetworkInterfaceInfo** interfaces, int32_t * addressCount, IpAddressInfo **addressList);
PALEXPORT int32_t SystemNative_EnumerateGatewayAddressesForInterface(void* context, uint32_t interfaceIndex, GatewayAddressFound onGatewayFound);
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/public/mono/metadata/reflection.h | /**
* \file
*/
#ifndef __METADATA_REFLECTION_H__
#define __METADATA_REFLECTION_H__
#include <mono/metadata/details/reflection-types.h>
MONO_BEGIN_DECLS
#define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args;
#include <mono/metadata/details/reflection-functions.h>
#undef MONO_API_FUNCTION
MONO_END_DECLS
#endif /* __METADATA_REFLECTION_H__ */
| /**
* \file
*/
#ifndef __METADATA_REFLECTION_H__
#define __METADATA_REFLECTION_H__
#include <mono/metadata/details/reflection-types.h>
MONO_BEGIN_DECLS
#define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args;
#include <mono/metadata/details/reflection-functions.h>
#undef MONO_API_FUNCTION
MONO_END_DECLS
#endif /* __METADATA_REFLECTION_H__ */
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/nativeaot/CMakeLists.txt | if(WIN32)
add_definitions(-DUNICODE=1)
endif (WIN32)
if(MSVC)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/EHa->) # Native AOT runtime does not use C++ exception handling
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/EHs->)
# CFG runtime checks in C++ code are unnecessary overhead unless Native AOT compiler produces CFG compliant code as well
# and CFG is enabled in the linker
if(CMAKE_CXX_FLAGS MATCHES "/guard:cf")
string(REPLACE "/guard:cf" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
if(CMAKE_C_FLAGS MATCHES "/guard:cf")
string(REPLACE "/guard:cf" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif()
# The code generated by the Native AOT compiler doesn't work with Link Time Code Generation
add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/GL->)
# Sets the options that create the fastest code in the majority of cases
add_compile_options($<$<AND:$<COMPILE_LANGUAGE:C,CXX>,$<CONFIG:Release>>:/O2>)
endif (MSVC)
if(CLR_CMAKE_HOST_UNIX)
# Up for grabs to clean these warnings up
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
add_compile_options(-Wno-error)
endif()
add_compile_options(-fno-rtti) # Native AOT runtime doesn't use RTTI
add_compile_options(-fno-exceptions) # Native AOT runtime doesn't use C++ exception handling
if(CLR_CMAKE_TARGET_OSX)
add_definitions(-D_XOPEN_SOURCE)
endif(CLR_CMAKE_TARGET_OSX)
if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386)
# Allow 16 byte compare-exchange
add_compile_options(-mcx16)
endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386)
endif (CLR_CMAKE_HOST_UNIX)
if(CLR_CMAKE_HOST_ALPINE_LINUX)
# Fix up the main thread stack size for MUSL to more reasonable size.
# TODO: https://github.com/dotnet/runtimelab/issues/791
add_definitions(-DENSURE_PRIMARY_STACK_SIZE)
endif()
if(CLR_CMAKE_TARGET_ANDROID)
add_definitions(-DFEATURE_EMULATED_TLS)
endif()
add_subdirectory(Bootstrap)
add_subdirectory(Runtime)
| if(WIN32)
add_definitions(-DUNICODE=1)
endif (WIN32)
if(MSVC)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/EHa->) # Native AOT runtime does not use C++ exception handling
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/EHs->)
# CFG runtime checks in C++ code are unnecessary overhead unless Native AOT compiler produces CFG compliant code as well
# and CFG is enabled in the linker
if(CMAKE_CXX_FLAGS MATCHES "/guard:cf")
string(REPLACE "/guard:cf" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
if(CMAKE_C_FLAGS MATCHES "/guard:cf")
string(REPLACE "/guard:cf" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif()
# The code generated by the Native AOT compiler doesn't work with Link Time Code Generation
add_compile_options($<$<COMPILE_LANGUAGE:C,CXX>:/GL->)
# Sets the options that create the fastest code in the majority of cases
add_compile_options($<$<AND:$<COMPILE_LANGUAGE:C,CXX>,$<CONFIG:Release>>:/O2>)
endif (MSVC)
if(CLR_CMAKE_HOST_UNIX)
# Up for grabs to clean these warnings up
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
add_compile_options(-Wno-error)
endif()
add_compile_options(-fno-rtti) # Native AOT runtime doesn't use RTTI
add_compile_options(-fno-exceptions) # Native AOT runtime doesn't use C++ exception handling
if(CLR_CMAKE_TARGET_OSX)
add_definitions(-D_XOPEN_SOURCE)
endif(CLR_CMAKE_TARGET_OSX)
if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386)
# Allow 16 byte compare-exchange
add_compile_options(-mcx16)
endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386)
endif (CLR_CMAKE_HOST_UNIX)
if(CLR_CMAKE_HOST_ALPINE_LINUX)
# Fix up the main thread stack size for MUSL to more reasonable size.
# TODO: https://github.com/dotnet/runtimelab/issues/791
add_definitions(-DENSURE_PRIMARY_STACK_SIZE)
endif()
if(CLR_CMAKE_TARGET_ANDROID)
add_definitions(-DFEATURE_EMULATED_TLS)
endif()
add_subdirectory(Bootstrap)
add_subdirectory(Runtime)
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/src/libunwind/src/x86/regname.c | #include "unwind_i.h"
static const char *regname[] =
{
"eax", "edx", "ecx", "ebx", "esi", "edi", "ebp", "esp", "eip",
"eflags", "trapno",
"st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
"fcw", "fsw", "ftw", "fop", "fcs", "fip", "fea", "fds",
"xmm0_lo", "xmm0_hi", "xmm1_lo", "xmm1_hi",
"xmm2_lo", "xmm2_hi", "xmm3_lo", "xmm3_hi",
"xmm4_lo", "xmm4_hi", "xmm5_lo", "xmm5_hi",
"xmm6_lo", "xmm6_hi", "xmm7_lo", "xmm7_hi",
"mxcsr",
"gs", "fs", "es", "ds", "ss", "cs",
"tss", "ldt",
"cfi",
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
};
const char *
unw_regname (unw_regnum_t reg)
{
if (reg < (unw_regnum_t) ARRAY_SIZE (regname))
return regname[reg];
else
return "???";
}
| #include "unwind_i.h"
static const char *regname[] =
{
"eax", "edx", "ecx", "ebx", "esi", "edi", "ebp", "esp", "eip",
"eflags", "trapno",
"st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
"fcw", "fsw", "ftw", "fop", "fcs", "fip", "fea", "fds",
"xmm0_lo", "xmm0_hi", "xmm1_lo", "xmm1_hi",
"xmm2_lo", "xmm2_hi", "xmm3_lo", "xmm3_hi",
"xmm4_lo", "xmm4_hi", "xmm5_lo", "xmm5_hi",
"xmm6_lo", "xmm6_hi", "xmm7_lo", "xmm7_hi",
"mxcsr",
"gs", "fs", "es", "ds", "ss", "cs",
"tss", "ldt",
"cfi",
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
};
const char *
unw_regname (unw_regnum_t reg)
{
if (reg < (unw_regnum_t) ARRAY_SIZE (regname))
return regname[reg];
else
return "???";
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/binder/utils.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ============================================================
//
// Utils.cpp
//
//
// Implements a bunch of binder auxilary functions
//
// ============================================================
#include "utils.hpp"
#include "strongnameinternal.h"
#include "corpriv.h"
#include "clr/fs/path.h"
using namespace clr::fs;
namespace BINDER_SPACE
{
namespace
{
inline const WCHAR *GetPlatformPathSeparator()
{
#ifdef TARGET_UNIX
return W("/");
#else
return W("\\");
#endif // TARGET_UNIX
}
}
void MutateUrlToPath(SString &urlOrPath)
{
const SString fileUrlPrefix(SString::Literal, W("file://"));
SString::Iterator i = urlOrPath.Begin();
if (urlOrPath.MatchCaseInsensitive(i, fileUrlPrefix))
{
urlOrPath.Delete(i, fileUrlPrefix.GetCount());
i = urlOrPath.Begin() + 1;
if (i[0] == W(':'))
{
// CLR erroneously passes in file:// prepended to file paths,
// so we can't tell the difference between UNC and local file.
goto Exit;
}
i = urlOrPath.Begin();
#if !defined(TARGET_UNIX)
if (i[0] == W('/'))
{
// Disk path file:///
urlOrPath.Delete(i, 1);
}
else if (i[0] != W('\\'))
{
// UNC Path, re-insert "//" if not the wrong file://\\...
urlOrPath.Insert(i, W("//"));
}
#else
// Unix doesn't have a distinction between local and network path
_ASSERTE(i[0] == W('\\') || i[0] == W('/'));
#endif
}
Exit:
while (urlOrPath.Find(i, W('/')))
{
urlOrPath.Replace(i, W('\\'));
}
}
void CombinePath(const SString &pathA,
const SString &pathB,
SString &combinedPath)
{
SString platformPathSeparator(SString::Literal, GetPlatformPathSeparator());
combinedPath.Set(pathA);
if (!combinedPath.IsEmpty() && !combinedPath.EndsWith(platformPathSeparator))
{
combinedPath.Append(platformPathSeparator);
}
combinedPath.Append(pathB);
}
HRESULT GetTokenFromPublicKey(SBuffer &publicKeyBLOB,
SBuffer &publicKeyTokenBLOB)
{
HRESULT hr = S_OK;
const BYTE *pByteKey = publicKeyBLOB;
DWORD dwKeyLen = publicKeyBLOB.GetSize();
BYTE *pByteToken = NULL;
DWORD dwTokenLen = 0;
IF_FAIL_GO(StrongNameTokenFromPublicKey(
const_cast<BYTE*>(pByteKey),
dwKeyLen,
&pByteToken,
&dwTokenLen));
_ASSERTE(pByteToken != NULL);
publicKeyTokenBLOB.Set(pByteToken, dwTokenLen);
StrongNameFreeBuffer(pByteToken);
Exit:
return hr;
}
BOOL IsFileNotFound(HRESULT hr)
{
return RuntimeFileNotFound(hr);
}
HRESULT GetNextPath(const SString& paths, SString::CIterator& startPos, SString& outPath)
{
HRESULT hr = S_OK;
bool wrappedWithQuotes = false;
// Skip any leading spaces or path separators
while (paths.Skip(startPos, W(' ')) || paths.Skip(startPos, PATH_SEPARATOR_CHAR_W)) {}
if (startPos == paths.End())
{
// No more paths in the string and we just skipped over some white space
outPath.Set(W(""));
return S_FALSE;
}
// Support paths being wrapped with quotations
if (paths.Skip(startPos, W('\"')))
{
wrappedWithQuotes = true;
}
SString::CIterator iEnd = startPos; // Where current path ends
SString::CIterator iNext; // Where next path starts
if (wrappedWithQuotes)
{
if (paths.Find(iEnd, W('\"')))
{
iNext = iEnd;
// Find where the next path starts - there should be a path separator right after the closing quotation mark
if (paths.Find(iNext, PATH_SEPARATOR_CHAR_W))
{
iNext++;
}
else
{
iNext = paths.End();
}
}
else
{
// There was no terminating quotation mark - that's bad
GO_WITH_HRESULT(E_INVALIDARG);
}
}
else if (paths.Find(iEnd, PATH_SEPARATOR_CHAR_W))
{
iNext = iEnd + 1;
}
else
{
iNext = iEnd = paths.End();
}
// Skip any trailing spaces
while (iEnd[-1] == W(' '))
{
iEnd--;
}
_ASSERTE(startPos < iEnd);
outPath.Set(paths, startPos, iEnd);
startPos = iNext;
Exit:
return hr;
}
HRESULT GetNextTPAPath(const SString& paths, SString::CIterator& startPos, bool dllOnly, SString& outPath, SString& simpleName, bool& isNativeImage)
{
HRESULT hr = S_OK;
isNativeImage = false;
HRESULT pathResult = S_OK;
IF_FAIL_GO(pathResult = GetNextPath(paths, startPos, outPath));
if (pathResult == S_FALSE)
{
return S_FALSE;
}
if (Path::IsRelative(outPath))
{
GO_WITH_HRESULT(E_INVALIDARG);
}
{
// Find the beginning of the simple name
SString::CIterator iSimpleNameStart = outPath.End();
if (!outPath.FindBack(iSimpleNameStart, DIRECTORY_SEPARATOR_CHAR_W))
{
iSimpleNameStart = outPath.Begin();
}
else
{
// Advance past the directory separator to the first character of the file name
iSimpleNameStart++;
}
if (iSimpleNameStart == outPath.End())
{
GO_WITH_HRESULT(E_INVALIDARG);
}
const SString sNiDll(SString::Literal, W(".ni.dll"));
const SString sNiExe(SString::Literal, W(".ni.exe"));
const SString sDll(SString::Literal, W(".dll"));
const SString sExe(SString::Literal, W(".exe"));
if (!dllOnly && (outPath.EndsWithCaseInsensitive(sNiDll) ||
outPath.EndsWithCaseInsensitive(sNiExe)))
{
simpleName.Set(outPath, iSimpleNameStart, outPath.End() - 7);
isNativeImage = true;
}
else if (outPath.EndsWithCaseInsensitive(sDll) ||
(!dllOnly && outPath.EndsWithCaseInsensitive(sExe)))
{
simpleName.Set(outPath, iSimpleNameStart, outPath.End() - 4);
}
else
{
// Invalid filename
GO_WITH_HRESULT(E_INVALIDARG);
}
}
Exit:
return hr;
}
};
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ============================================================
//
// Utils.cpp
//
//
// Implements a bunch of binder auxilary functions
//
// ============================================================
#include "utils.hpp"
#include "strongnameinternal.h"
#include "corpriv.h"
#include "clr/fs/path.h"
using namespace clr::fs;
namespace BINDER_SPACE
{
namespace
{
inline const WCHAR *GetPlatformPathSeparator()
{
#ifdef TARGET_UNIX
return W("/");
#else
return W("\\");
#endif // TARGET_UNIX
}
}
void MutateUrlToPath(SString &urlOrPath)
{
const SString fileUrlPrefix(SString::Literal, W("file://"));
SString::Iterator i = urlOrPath.Begin();
if (urlOrPath.MatchCaseInsensitive(i, fileUrlPrefix))
{
urlOrPath.Delete(i, fileUrlPrefix.GetCount());
i = urlOrPath.Begin() + 1;
if (i[0] == W(':'))
{
// CLR erroneously passes in file:// prepended to file paths,
// so we can't tell the difference between UNC and local file.
goto Exit;
}
i = urlOrPath.Begin();
#if !defined(TARGET_UNIX)
if (i[0] == W('/'))
{
// Disk path file:///
urlOrPath.Delete(i, 1);
}
else if (i[0] != W('\\'))
{
// UNC Path, re-insert "//" if not the wrong file://\\...
urlOrPath.Insert(i, W("//"));
}
#else
// Unix doesn't have a distinction between local and network path
_ASSERTE(i[0] == W('\\') || i[0] == W('/'));
#endif
}
Exit:
while (urlOrPath.Find(i, W('/')))
{
urlOrPath.Replace(i, W('\\'));
}
}
void CombinePath(const SString &pathA,
const SString &pathB,
SString &combinedPath)
{
SString platformPathSeparator(SString::Literal, GetPlatformPathSeparator());
combinedPath.Set(pathA);
if (!combinedPath.IsEmpty() && !combinedPath.EndsWith(platformPathSeparator))
{
combinedPath.Append(platformPathSeparator);
}
combinedPath.Append(pathB);
}
HRESULT GetTokenFromPublicKey(SBuffer &publicKeyBLOB,
SBuffer &publicKeyTokenBLOB)
{
HRESULT hr = S_OK;
const BYTE *pByteKey = publicKeyBLOB;
DWORD dwKeyLen = publicKeyBLOB.GetSize();
BYTE *pByteToken = NULL;
DWORD dwTokenLen = 0;
IF_FAIL_GO(StrongNameTokenFromPublicKey(
const_cast<BYTE*>(pByteKey),
dwKeyLen,
&pByteToken,
&dwTokenLen));
_ASSERTE(pByteToken != NULL);
publicKeyTokenBLOB.Set(pByteToken, dwTokenLen);
StrongNameFreeBuffer(pByteToken);
Exit:
return hr;
}
BOOL IsFileNotFound(HRESULT hr)
{
return RuntimeFileNotFound(hr);
}
HRESULT GetNextPath(const SString& paths, SString::CIterator& startPos, SString& outPath)
{
HRESULT hr = S_OK;
bool wrappedWithQuotes = false;
// Skip any leading spaces or path separators
while (paths.Skip(startPos, W(' ')) || paths.Skip(startPos, PATH_SEPARATOR_CHAR_W)) {}
if (startPos == paths.End())
{
// No more paths in the string and we just skipped over some white space
outPath.Set(W(""));
return S_FALSE;
}
// Support paths being wrapped with quotations
if (paths.Skip(startPos, W('\"')))
{
wrappedWithQuotes = true;
}
SString::CIterator iEnd = startPos; // Where current path ends
SString::CIterator iNext; // Where next path starts
if (wrappedWithQuotes)
{
if (paths.Find(iEnd, W('\"')))
{
iNext = iEnd;
// Find where the next path starts - there should be a path separator right after the closing quotation mark
if (paths.Find(iNext, PATH_SEPARATOR_CHAR_W))
{
iNext++;
}
else
{
iNext = paths.End();
}
}
else
{
// There was no terminating quotation mark - that's bad
GO_WITH_HRESULT(E_INVALIDARG);
}
}
else if (paths.Find(iEnd, PATH_SEPARATOR_CHAR_W))
{
iNext = iEnd + 1;
}
else
{
iNext = iEnd = paths.End();
}
// Skip any trailing spaces
while (iEnd[-1] == W(' '))
{
iEnd--;
}
_ASSERTE(startPos < iEnd);
outPath.Set(paths, startPos, iEnd);
startPos = iNext;
Exit:
return hr;
}
HRESULT GetNextTPAPath(const SString& paths, SString::CIterator& startPos, bool dllOnly, SString& outPath, SString& simpleName, bool& isNativeImage)
{
HRESULT hr = S_OK;
isNativeImage = false;
HRESULT pathResult = S_OK;
IF_FAIL_GO(pathResult = GetNextPath(paths, startPos, outPath));
if (pathResult == S_FALSE)
{
return S_FALSE;
}
if (Path::IsRelative(outPath))
{
GO_WITH_HRESULT(E_INVALIDARG);
}
{
// Find the beginning of the simple name
SString::CIterator iSimpleNameStart = outPath.End();
if (!outPath.FindBack(iSimpleNameStart, DIRECTORY_SEPARATOR_CHAR_W))
{
iSimpleNameStart = outPath.Begin();
}
else
{
// Advance past the directory separator to the first character of the file name
iSimpleNameStart++;
}
if (iSimpleNameStart == outPath.End())
{
GO_WITH_HRESULT(E_INVALIDARG);
}
const SString sNiDll(SString::Literal, W(".ni.dll"));
const SString sNiExe(SString::Literal, W(".ni.exe"));
const SString sDll(SString::Literal, W(".dll"));
const SString sExe(SString::Literal, W(".exe"));
if (!dllOnly && (outPath.EndsWithCaseInsensitive(sNiDll) ||
outPath.EndsWithCaseInsensitive(sNiExe)))
{
simpleName.Set(outPath, iSimpleNameStart, outPath.End() - 7);
isNativeImage = true;
}
else if (outPath.EndsWithCaseInsensitive(sDll) ||
(!dllOnly && outPath.EndsWithCaseInsensitive(sExe)))
{
simpleName.Set(outPath, iSimpleNameStart, outPath.End() - 4);
}
else
{
// Invalid filename
GO_WITH_HRESULT(E_INVALIDARG);
}
}
Exit:
return hr;
}
};
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/c_runtime/fwprintf/test19/test19.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test19.c
**
** Purpose: Tests the variable length precision argument.
** This test is modeled after the sprintf series.
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../fwprintf.h"
/*
* Depends on memcmp, strlen, fopen, fseek and fgets.
*/
PALTEST(c_runtime_fwprintf_test19_paltest_fwprintf_test19, "c_runtime/fwprintf/test19/paltest_fwprintf_test19")
{
int n = -1;
if (PAL_Initialize(argc, argv) != 0)
{
return(FAIL);
}
DoArgumentPrecTest(convert("%.*s"), 2, (void*)convert("bar"), "bar", "ba", "ba");
DoArgumentPrecTest(convert("%.*S"), 2, (void*)"bar", "bar", "ba", "ba");
DoArgumentPrecTest(convert("foo %.*n"), 3, (void*)&n, "pointer to int", "foo ",
"foo ");
if (n != 4)
{
Fail("ERROR: Expected count parameter to resolve to %d, got %X\n",
4, n);
}
DoArgumentPrecTest(convert("%.*c"), 0, (void*)'a', "a", "a", "a");
DoArgumentPrecTest(convert("%.*c"), 4, (void*)'a', "a", "a", "a");
DoArgumentPrecTest(convert("%.*C"), 0, (void*)'a', "a", "a", "a");
DoArgumentPrecTest(convert("%.*C"), 4, (void*)'a', "a", "a", "a");
DoArgumentPrecTest(convert("%.*d"), 1, (void*)42, "42", "42", "42");
DoArgumentPrecTest(convert("%.*d"), 3, (void*)42, "42", "042", "042");
DoArgumentPrecTest(convert("%.*i"), 1, (void*)42, "42", "42", "42");
DoArgumentPrecTest(convert("%.*i"), 3, (void*)42, "42", "042", "042");
DoArgumentPrecTest(convert("%.*o"), 1, (void*)42, "42", "52", "52");
DoArgumentPrecTest(convert("%.*o"), 3, (void*)42, "42", "052", "052");
DoArgumentPrecTest(convert("%.*u"), 1, (void*)42, "42", "42", "42");
DoArgumentPrecTest(convert("%.*u"), 3, (void*)42, "42", "042", "042");
DoArgumentPrecTest(convert("%.*x"), 1, (void*)0x42, "0x42", "42", "42");
DoArgumentPrecTest(convert("%.*x"), 3, (void*)0x42, "0x42", "042", "042");
DoArgumentPrecTest(convert("%.*X"), 1, (void*)0x42, "0x42", "42", "42");
DoArgumentPrecTest(convert("%.*X"), 3, (void*)0x42, "0x42", "042", "042");
DoArgumentPrecDoubleTest(convert("%.*e"), 1, 2.01, "2.0e+000", "2.0e+00");
DoArgumentPrecDoubleTest(convert("%.*e"), 3, 2.01, "2.010e+000",
"2.010e+00");
DoArgumentPrecDoubleTest(convert("%.*E"), 1, 2.01, "2.0E+000", "2.0E+00");
DoArgumentPrecDoubleTest(convert("%.*E"), 3, 2.01, "2.010E+000",
"2.010E+00");
DoArgumentPrecDoubleTest(convert("%.*f"), 1, 2.01, "2.0", "2.0");
DoArgumentPrecDoubleTest(convert("%.*f"), 3, 2.01, "2.010", "2.010");
DoArgumentPrecDoubleTest(convert("%.*g"), 1, 256.01, "3e+002", "3e+02");
DoArgumentPrecDoubleTest(convert("%.*g"), 3, 256.01, "256", "256");
DoArgumentPrecDoubleTest(convert("%.*g"), 4, 256.01, "256", "256");
DoArgumentPrecDoubleTest(convert("%.*g"), 6, 256.01, "256.01", "256.01");
DoArgumentPrecDoubleTest(convert("%.*G"), 1, 256.01, "3E+002", "3E+02");
DoArgumentPrecDoubleTest(convert("%.*G"), 3, 256.01, "256", "256");
DoArgumentPrecDoubleTest(convert("%.*G"), 4, 256.01, "256", "256");
DoArgumentPrecDoubleTest(convert("%.*G"), 6, 256.01, "256.01", "256.01");
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test19.c
**
** Purpose: Tests the variable length precision argument.
** This test is modeled after the sprintf series.
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../fwprintf.h"
/*
* Depends on memcmp, strlen, fopen, fseek and fgets.
*/
PALTEST(c_runtime_fwprintf_test19_paltest_fwprintf_test19, "c_runtime/fwprintf/test19/paltest_fwprintf_test19")
{
int n = -1;
if (PAL_Initialize(argc, argv) != 0)
{
return(FAIL);
}
DoArgumentPrecTest(convert("%.*s"), 2, (void*)convert("bar"), "bar", "ba", "ba");
DoArgumentPrecTest(convert("%.*S"), 2, (void*)"bar", "bar", "ba", "ba");
DoArgumentPrecTest(convert("foo %.*n"), 3, (void*)&n, "pointer to int", "foo ",
"foo ");
if (n != 4)
{
Fail("ERROR: Expected count parameter to resolve to %d, got %X\n",
4, n);
}
DoArgumentPrecTest(convert("%.*c"), 0, (void*)'a', "a", "a", "a");
DoArgumentPrecTest(convert("%.*c"), 4, (void*)'a', "a", "a", "a");
DoArgumentPrecTest(convert("%.*C"), 0, (void*)'a', "a", "a", "a");
DoArgumentPrecTest(convert("%.*C"), 4, (void*)'a', "a", "a", "a");
DoArgumentPrecTest(convert("%.*d"), 1, (void*)42, "42", "42", "42");
DoArgumentPrecTest(convert("%.*d"), 3, (void*)42, "42", "042", "042");
DoArgumentPrecTest(convert("%.*i"), 1, (void*)42, "42", "42", "42");
DoArgumentPrecTest(convert("%.*i"), 3, (void*)42, "42", "042", "042");
DoArgumentPrecTest(convert("%.*o"), 1, (void*)42, "42", "52", "52");
DoArgumentPrecTest(convert("%.*o"), 3, (void*)42, "42", "052", "052");
DoArgumentPrecTest(convert("%.*u"), 1, (void*)42, "42", "42", "42");
DoArgumentPrecTest(convert("%.*u"), 3, (void*)42, "42", "042", "042");
DoArgumentPrecTest(convert("%.*x"), 1, (void*)0x42, "0x42", "42", "42");
DoArgumentPrecTest(convert("%.*x"), 3, (void*)0x42, "0x42", "042", "042");
DoArgumentPrecTest(convert("%.*X"), 1, (void*)0x42, "0x42", "42", "42");
DoArgumentPrecTest(convert("%.*X"), 3, (void*)0x42, "0x42", "042", "042");
DoArgumentPrecDoubleTest(convert("%.*e"), 1, 2.01, "2.0e+000", "2.0e+00");
DoArgumentPrecDoubleTest(convert("%.*e"), 3, 2.01, "2.010e+000",
"2.010e+00");
DoArgumentPrecDoubleTest(convert("%.*E"), 1, 2.01, "2.0E+000", "2.0E+00");
DoArgumentPrecDoubleTest(convert("%.*E"), 3, 2.01, "2.010E+000",
"2.010E+00");
DoArgumentPrecDoubleTest(convert("%.*f"), 1, 2.01, "2.0", "2.0");
DoArgumentPrecDoubleTest(convert("%.*f"), 3, 2.01, "2.010", "2.010");
DoArgumentPrecDoubleTest(convert("%.*g"), 1, 256.01, "3e+002", "3e+02");
DoArgumentPrecDoubleTest(convert("%.*g"), 3, 256.01, "256", "256");
DoArgumentPrecDoubleTest(convert("%.*g"), 4, 256.01, "256", "256");
DoArgumentPrecDoubleTest(convert("%.*g"), 6, 256.01, "256.01", "256.01");
DoArgumentPrecDoubleTest(convert("%.*G"), 1, 256.01, "3E+002", "3E+02");
DoArgumentPrecDoubleTest(convert("%.*G"), 3, 256.01, "256", "256");
DoArgumentPrecDoubleTest(convert("%.*G"), 4, 256.01, "256", "256");
DoArgumentPrecDoubleTest(convert("%.*G"), 6, 256.01, "256.01", "256.01");
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/libs/System.Security.Cryptography.Native.Apple/pal_sec.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "pal_types.h"
#include "pal_compiler.h"
#include <Security/Security.h>
/*
Get an error message for an OSStatus error from the security library.
Returns NULL if no message is available for the code.
*/
PALEXPORT CFStringRef AppleCryptoNative_SecCopyErrorMessageString(OSStatus osStatus);
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "pal_types.h"
#include "pal_compiler.h"
#include <Security/Security.h>
/*
Get an error message for an OSStatus error from the security library.
Returns NULL if no message is available for the code.
*/
PALEXPORT CFStringRef AppleCryptoNative_SecCopyErrorMessageString(OSStatus osStatus);
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/src/libunwind_mac/include/freebsd-elf64.h | /*-
* Copyright (c) 1996-1998 John D. Polstra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/sys/sys/elf64.h,v 1.10.14.2.2.1 2008/10/02 02:57:24 kensmith Exp $
*/
#ifndef _SYS_ELF64_H_
#define _SYS_ELF64_H_ 1
#include "freebsd-elf_common.h"
/*
* ELF definitions common to all 64-bit architectures.
*/
typedef uint64_t Elf64_Addr;
typedef uint16_t Elf64_Half;
typedef uint64_t Elf64_Off;
typedef int32_t Elf64_Sword;
typedef int64_t Elf64_Sxword;
typedef uint32_t Elf64_Word;
typedef uint64_t Elf64_Lword;
typedef uint64_t Elf64_Xword;
/*
* Types of dynamic symbol hash table bucket and chain elements.
*
* This is inconsistent among 64 bit architectures, so a machine dependent
* typedef is required.
*/
#ifdef __alpha__
typedef Elf64_Off Elf64_Hashelt;
#else
typedef Elf64_Word Elf64_Hashelt;
#endif
/* Non-standard class-dependent datatype used for abstraction. */
typedef Elf64_Xword Elf64_Size;
typedef Elf64_Sxword Elf64_Ssize;
/*
* ELF header.
*/
typedef struct {
unsigned char e_ident[EI_NIDENT]; /* File identification. */
Elf64_Half e_type; /* File type. */
Elf64_Half e_machine; /* Machine architecture. */
Elf64_Word e_version; /* ELF format version. */
Elf64_Addr e_entry; /* Entry point. */
Elf64_Off e_phoff; /* Program header file offset. */
Elf64_Off e_shoff; /* Section header file offset. */
Elf64_Word e_flags; /* Architecture-specific flags. */
Elf64_Half e_ehsize; /* Size of ELF header in bytes. */
Elf64_Half e_phentsize; /* Size of program header entry. */
Elf64_Half e_phnum; /* Number of program header entries. */
Elf64_Half e_shentsize; /* Size of section header entry. */
Elf64_Half e_shnum; /* Number of section header entries. */
Elf64_Half e_shstrndx; /* Section name strings section. */
} Elf64_Ehdr;
/*
* Section header.
*/
typedef struct {
Elf64_Word sh_name; /* Section name (index into the
section header string table). */
Elf64_Word sh_type; /* Section type. */
Elf64_Xword sh_flags; /* Section flags. */
Elf64_Addr sh_addr; /* Address in memory image. */
Elf64_Off sh_offset; /* Offset in file. */
Elf64_Xword sh_size; /* Size in bytes. */
Elf64_Word sh_link; /* Index of a related section. */
Elf64_Word sh_info; /* Depends on section type. */
Elf64_Xword sh_addralign; /* Alignment in bytes. */
Elf64_Xword sh_entsize; /* Size of each entry in section. */
} Elf64_Shdr;
/*
* Program header.
*/
typedef struct {
Elf64_Word p_type; /* Entry type. */
Elf64_Word p_flags; /* Access permission flags. */
Elf64_Off p_offset; /* File offset of contents. */
Elf64_Addr p_vaddr; /* Virtual address in memory image. */
Elf64_Addr p_paddr; /* Physical address (not used). */
Elf64_Xword p_filesz; /* Size of contents in file. */
Elf64_Xword p_memsz; /* Size of contents in memory. */
Elf64_Xword p_align; /* Alignment in memory and file. */
} Elf64_Phdr;
/*
* Dynamic structure. The ".dynamic" section contains an array of them.
*/
typedef struct {
Elf64_Sxword d_tag; /* Entry type. */
union {
Elf64_Xword d_val; /* Integer value. */
Elf64_Addr d_ptr; /* Address value. */
} d_un;
} Elf64_Dyn;
/*
* Relocation entries.
*/
/* Relocations that don't need an addend field. */
typedef struct {
Elf64_Addr r_offset; /* Location to be relocated. */
Elf64_Xword r_info; /* Relocation type and symbol index. */
} Elf64_Rel;
/* Relocations that need an addend field. */
typedef struct {
Elf64_Addr r_offset; /* Location to be relocated. */
Elf64_Xword r_info; /* Relocation type and symbol index. */
Elf64_Sxword r_addend; /* Addend. */
} Elf64_Rela;
/* Macros for accessing the fields of r_info. */
#define ELF64_R_SYM(info) ((info) >> 32)
#define ELF64_R_TYPE(info) ((info) & 0xffffffffL)
/* Macro for constructing r_info from field values. */
#define ELF64_R_INFO(sym, type) (((sym) << 32) + ((type) & 0xffffffffL))
#define ELF64_R_TYPE_DATA(info) (((Elf64_Xword)(info)<<32)>>40)
#define ELF64_R_TYPE_ID(info) (((Elf64_Xword)(info)<<56)>>56)
#define ELF64_R_TYPE_INFO(data, type) \
(((Elf64_Xword)(data)<<8)+(Elf64_Xword)(type))
/*
* Note entry header
*/
typedef Elf_Note Elf64_Nhdr;
/*
* Move entry
*/
typedef struct {
Elf64_Lword m_value; /* symbol value */
Elf64_Xword m_info; /* size + index */
Elf64_Xword m_poffset; /* symbol offset */
Elf64_Half m_repeat; /* repeat count */
Elf64_Half m_stride; /* stride info */
} Elf64_Move;
#define ELF64_M_SYM(info) ((info)>>8)
#define ELF64_M_SIZE(info) ((unsigned char)(info))
#define ELF64_M_INFO(sym, size) (((sym)<<8)+(unsigned char)(size))
/*
* Hardware/Software capabilities entry
*/
typedef struct {
Elf64_Xword c_tag; /* how to interpret value */
union {
Elf64_Xword c_val;
Elf64_Addr c_ptr;
} c_un;
} Elf64_Cap;
/*
* Symbol table entries.
*/
typedef struct {
Elf64_Word st_name; /* String table index of name. */
unsigned char st_info; /* Type and binding information. */
unsigned char st_other; /* Reserved (not used). */
Elf64_Half st_shndx; /* Section index of symbol. */
Elf64_Addr st_value; /* Symbol value. */
Elf64_Xword st_size; /* Size of associated object. */
} Elf64_Sym;
/* Macros for accessing the fields of st_info. */
#define ELF64_ST_BIND(info) ((info) >> 4)
#define ELF64_ST_TYPE(info) ((info) & 0xf)
/* Macro for constructing st_info from field values. */
#define ELF64_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
/* Macro for accessing the fields of st_other. */
#define ELF64_ST_VISIBILITY(oth) ((oth) & 0x3)
/* Structures used by Sun & GNU-style symbol versioning. */
typedef struct {
Elf64_Half vd_version;
Elf64_Half vd_flags;
Elf64_Half vd_ndx;
Elf64_Half vd_cnt;
Elf64_Word vd_hash;
Elf64_Word vd_aux;
Elf64_Word vd_next;
} Elf64_Verdef;
typedef struct {
Elf64_Word vda_name;
Elf64_Word vda_next;
} Elf64_Verdaux;
typedef struct {
Elf64_Half vn_version;
Elf64_Half vn_cnt;
Elf64_Word vn_file;
Elf64_Word vn_aux;
Elf64_Word vn_next;
} Elf64_Verneed;
typedef struct {
Elf64_Word vna_hash;
Elf64_Half vna_flags;
Elf64_Half vna_other;
Elf64_Word vna_name;
Elf64_Word vna_next;
} Elf64_Vernaux;
typedef Elf64_Half Elf64_Versym;
typedef struct {
Elf64_Half si_boundto; /* direct bindings - symbol bound to */
Elf64_Half si_flags; /* per symbol flags */
} Elf64_Syminfo;
#endif /* !_SYS_ELF64_H_ */
| /*-
* Copyright (c) 1996-1998 John D. Polstra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/sys/sys/elf64.h,v 1.10.14.2.2.1 2008/10/02 02:57:24 kensmith Exp $
*/
#ifndef _SYS_ELF64_H_
#define _SYS_ELF64_H_ 1
#include "freebsd-elf_common.h"
/*
* ELF definitions common to all 64-bit architectures.
*/
typedef uint64_t Elf64_Addr;
typedef uint16_t Elf64_Half;
typedef uint64_t Elf64_Off;
typedef int32_t Elf64_Sword;
typedef int64_t Elf64_Sxword;
typedef uint32_t Elf64_Word;
typedef uint64_t Elf64_Lword;
typedef uint64_t Elf64_Xword;
/*
* Types of dynamic symbol hash table bucket and chain elements.
*
* This is inconsistent among 64 bit architectures, so a machine dependent
* typedef is required.
*/
#ifdef __alpha__
typedef Elf64_Off Elf64_Hashelt;
#else
typedef Elf64_Word Elf64_Hashelt;
#endif
/* Non-standard class-dependent datatype used for abstraction. */
typedef Elf64_Xword Elf64_Size;
typedef Elf64_Sxword Elf64_Ssize;
/*
* ELF header.
*/
typedef struct {
unsigned char e_ident[EI_NIDENT]; /* File identification. */
Elf64_Half e_type; /* File type. */
Elf64_Half e_machine; /* Machine architecture. */
Elf64_Word e_version; /* ELF format version. */
Elf64_Addr e_entry; /* Entry point. */
Elf64_Off e_phoff; /* Program header file offset. */
Elf64_Off e_shoff; /* Section header file offset. */
Elf64_Word e_flags; /* Architecture-specific flags. */
Elf64_Half e_ehsize; /* Size of ELF header in bytes. */
Elf64_Half e_phentsize; /* Size of program header entry. */
Elf64_Half e_phnum; /* Number of program header entries. */
Elf64_Half e_shentsize; /* Size of section header entry. */
Elf64_Half e_shnum; /* Number of section header entries. */
Elf64_Half e_shstrndx; /* Section name strings section. */
} Elf64_Ehdr;
/*
* Section header.
*/
typedef struct {
Elf64_Word sh_name; /* Section name (index into the
section header string table). */
Elf64_Word sh_type; /* Section type. */
Elf64_Xword sh_flags; /* Section flags. */
Elf64_Addr sh_addr; /* Address in memory image. */
Elf64_Off sh_offset; /* Offset in file. */
Elf64_Xword sh_size; /* Size in bytes. */
Elf64_Word sh_link; /* Index of a related section. */
Elf64_Word sh_info; /* Depends on section type. */
Elf64_Xword sh_addralign; /* Alignment in bytes. */
Elf64_Xword sh_entsize; /* Size of each entry in section. */
} Elf64_Shdr;
/*
* Program header.
*/
typedef struct {
Elf64_Word p_type; /* Entry type. */
Elf64_Word p_flags; /* Access permission flags. */
Elf64_Off p_offset; /* File offset of contents. */
Elf64_Addr p_vaddr; /* Virtual address in memory image. */
Elf64_Addr p_paddr; /* Physical address (not used). */
Elf64_Xword p_filesz; /* Size of contents in file. */
Elf64_Xword p_memsz; /* Size of contents in memory. */
Elf64_Xword p_align; /* Alignment in memory and file. */
} Elf64_Phdr;
/*
* Dynamic structure. The ".dynamic" section contains an array of them.
*/
typedef struct {
Elf64_Sxword d_tag; /* Entry type. */
union {
Elf64_Xword d_val; /* Integer value. */
Elf64_Addr d_ptr; /* Address value. */
} d_un;
} Elf64_Dyn;
/*
* Relocation entries.
*/
/* Relocations that don't need an addend field. */
typedef struct {
Elf64_Addr r_offset; /* Location to be relocated. */
Elf64_Xword r_info; /* Relocation type and symbol index. */
} Elf64_Rel;
/* Relocations that need an addend field. */
typedef struct {
Elf64_Addr r_offset; /* Location to be relocated. */
Elf64_Xword r_info; /* Relocation type and symbol index. */
Elf64_Sxword r_addend; /* Addend. */
} Elf64_Rela;
/* Macros for accessing the fields of r_info. */
#define ELF64_R_SYM(info) ((info) >> 32)
#define ELF64_R_TYPE(info) ((info) & 0xffffffffL)
/* Macro for constructing r_info from field values. */
#define ELF64_R_INFO(sym, type) (((sym) << 32) + ((type) & 0xffffffffL))
#define ELF64_R_TYPE_DATA(info) (((Elf64_Xword)(info)<<32)>>40)
#define ELF64_R_TYPE_ID(info) (((Elf64_Xword)(info)<<56)>>56)
#define ELF64_R_TYPE_INFO(data, type) \
(((Elf64_Xword)(data)<<8)+(Elf64_Xword)(type))
/*
* Note entry header
*/
typedef Elf_Note Elf64_Nhdr;
/*
* Move entry
*/
typedef struct {
Elf64_Lword m_value; /* symbol value */
Elf64_Xword m_info; /* size + index */
Elf64_Xword m_poffset; /* symbol offset */
Elf64_Half m_repeat; /* repeat count */
Elf64_Half m_stride; /* stride info */
} Elf64_Move;
#define ELF64_M_SYM(info) ((info)>>8)
#define ELF64_M_SIZE(info) ((unsigned char)(info))
#define ELF64_M_INFO(sym, size) (((sym)<<8)+(unsigned char)(size))
/*
* Hardware/Software capabilities entry
*/
typedef struct {
Elf64_Xword c_tag; /* how to interpret value */
union {
Elf64_Xword c_val;
Elf64_Addr c_ptr;
} c_un;
} Elf64_Cap;
/*
* Symbol table entries.
*/
typedef struct {
Elf64_Word st_name; /* String table index of name. */
unsigned char st_info; /* Type and binding information. */
unsigned char st_other; /* Reserved (not used). */
Elf64_Half st_shndx; /* Section index of symbol. */
Elf64_Addr st_value; /* Symbol value. */
Elf64_Xword st_size; /* Size of associated object. */
} Elf64_Sym;
/* Macros for accessing the fields of st_info. */
#define ELF64_ST_BIND(info) ((info) >> 4)
#define ELF64_ST_TYPE(info) ((info) & 0xf)
/* Macro for constructing st_info from field values. */
#define ELF64_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
/* Macro for accessing the fields of st_other. */
#define ELF64_ST_VISIBILITY(oth) ((oth) & 0x3)
/* Structures used by Sun & GNU-style symbol versioning. */
typedef struct {
Elf64_Half vd_version;
Elf64_Half vd_flags;
Elf64_Half vd_ndx;
Elf64_Half vd_cnt;
Elf64_Word vd_hash;
Elf64_Word vd_aux;
Elf64_Word vd_next;
} Elf64_Verdef;
typedef struct {
Elf64_Word vda_name;
Elf64_Word vda_next;
} Elf64_Verdaux;
typedef struct {
Elf64_Half vn_version;
Elf64_Half vn_cnt;
Elf64_Word vn_file;
Elf64_Word vn_aux;
Elf64_Word vn_next;
} Elf64_Verneed;
typedef struct {
Elf64_Word vna_hash;
Elf64_Half vna_flags;
Elf64_Half vna_other;
Elf64_Word vna_name;
Elf64_Word vna_next;
} Elf64_Vernaux;
typedef Elf64_Half Elf64_Versym;
typedef struct {
Elf64_Half si_boundto; /* direct bindings - symbol bound to */
Elf64_Half si_flags; /* per symbol flags */
} Elf64_Syminfo;
#endif /* !_SYS_ELF64_H_ */
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/debug/createdump/threadinfounix.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "createdump.h"
#if defined(__aarch64__)
// See src/pal/src/include/pal/context.h
#define MCREG_Fp(mc) ((mc).regs[29])
#define MCREG_Lr(mc) ((mc).regs[30])
#define MCREG_Sp(mc) ((mc).sp)
#define MCREG_Pc(mc) ((mc).pc)
#define MCREG_Cpsr(mc) ((mc).pstate)
#endif
#ifndef THUMB_CODE
#define THUMB_CODE 1
#endif
#ifndef __GLIBC__
typedef int __ptrace_request;
#endif
bool GetStatus(pid_t pid, pid_t* ppid, pid_t* tgid, std::string* name);
ThreadInfo::ThreadInfo(CrashInfo& crashInfo, pid_t tid) :
m_crashInfo(crashInfo),
m_tid(tid),
m_ppid(0),
m_tgid(0),
m_managed(false),
m_exceptionObject(0),
m_exceptionHResult(0),
m_repeatedFrames(0)
{
m_beginRepeat = m_frames.end();
m_endRepeat = m_frames.end();
}
ThreadInfo::~ThreadInfo()
{
}
bool
ThreadInfo::Initialize()
{
if (!GetStatus(m_tid, &m_ppid, &m_tgid, nullptr))
{
return false;
}
if (!GetRegistersWithPTrace())
{
return false;
}
#if defined(__aarch64__)
TRACE("Thread %04x PC %016llx SP %016llx\n", m_tid, (unsigned long long)MCREG_Pc(m_gpRegisters), (unsigned long long)MCREG_Sp(m_gpRegisters));
#elif defined(__arm__)
TRACE("Thread %04x PC %08lx SP %08lx\n", m_tid, (unsigned long)m_gpRegisters.ARM_pc, (unsigned long)m_gpRegisters.ARM_sp);
#elif defined(__x86_64__)
TRACE("Thread %04x RIP %016llx RSP %016llx\n", m_tid, (unsigned long long)m_gpRegisters.rip, (unsigned long long)m_gpRegisters.rsp);
#else
#error "Unsupported architecture"
#endif
return true;
}
bool
ThreadInfo::GetRegistersWithPTrace()
{
struct iovec gpRegsVec = { &m_gpRegisters, sizeof(m_gpRegisters) };
if (ptrace((__ptrace_request)PTRACE_GETREGSET, m_tid, NT_PRSTATUS, &gpRegsVec) == -1)
{
fprintf(stderr, "ptrace(PTRACE_GETREGSET, %d, NT_PRSTATUS) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
return false;
}
assert(sizeof(m_gpRegisters) == gpRegsVec.iov_len);
struct iovec fpRegsVec = { &m_fpRegisters, sizeof(m_fpRegisters) };
if (ptrace((__ptrace_request)PTRACE_GETREGSET, m_tid, NT_FPREGSET, &fpRegsVec) == -1)
{
#if defined(__arm__)
// Some aarch64 kernels may not support NT_FPREGSET for arm processes. We treat this failure as non-fatal.
#else
fprintf(stderr, "ptrace(PTRACE_GETREGSET, %d, NT_FPREGSET) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
return false;
#endif
}
assert(sizeof(m_fpRegisters) == fpRegsVec.iov_len);
#if defined(__i386__)
if (ptrace((__ptrace_request)PTRACE_GETFPXREGS, m_tid, nullptr, &m_fpxRegisters) == -1)
{
fprintf(stderr, "ptrace(GETFPXREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
return false;
}
#elif defined(__arm__) && defined(__VFP_FP__) && !defined(__SOFTFP__)
#if defined(ARM_VFPREGS_SIZE)
assert(sizeof(m_vfpRegisters) == ARM_VFPREGS_SIZE);
#endif
if (ptrace((__ptrace_request)PTRACE_GETVFPREGS, m_tid, nullptr, &m_vfpRegisters) == -1)
{
fprintf(stderr, "ptrace(PTRACE_GETVFPREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
return false;
}
#endif
return true;
}
void
ThreadInfo::GetThreadContext(uint32_t flags, CONTEXT* context) const
{
context->ContextFlags = flags;
#if defined(__x86_64__)
if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
{
context->Rbp = m_gpRegisters.rbp;
context->Rip = m_gpRegisters.rip;
context->SegCs = m_gpRegisters.cs;
context->EFlags = m_gpRegisters.eflags;
context->SegSs = m_gpRegisters.ss;
context->Rsp = m_gpRegisters.rsp;
}
if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
{
context->Rdi = m_gpRegisters.rdi;
context->Rsi = m_gpRegisters.rsi;
context->Rbx = m_gpRegisters.rbx;
context->Rdx = m_gpRegisters.rdx;
context->Rcx = m_gpRegisters.rcx;
context->Rax = m_gpRegisters.rax;
context->R8 = m_gpRegisters.r8;
context->R9 = m_gpRegisters.r9;
context->R10 = m_gpRegisters.r10;
context->R11 = m_gpRegisters.r11;
context->R12 = m_gpRegisters.r12;
context->R13 = m_gpRegisters.r13;
context->R14 = m_gpRegisters.r14;
context->R15 = m_gpRegisters.r15;
}
if ((flags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS)
{
context->SegDs = m_gpRegisters.ds;
context->SegEs = m_gpRegisters.es;
context->SegFs = m_gpRegisters.fs;
context->SegGs = m_gpRegisters.gs;
}
if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
{
context->FltSave.ControlWord = m_fpRegisters.cwd;
context->FltSave.StatusWord = m_fpRegisters.swd;
context->FltSave.TagWord = m_fpRegisters.ftw;
context->FltSave.ErrorOpcode = m_fpRegisters.fop;
context->FltSave.ErrorOffset = FPREG_ErrorOffset(m_fpRegisters);
context->FltSave.ErrorSelector = FPREG_ErrorSelector(m_fpRegisters);
context->FltSave.DataOffset = FPREG_DataOffset(m_fpRegisters);
context->FltSave.DataSelector = FPREG_DataSelector(m_fpRegisters);
context->FltSave.MxCsr = m_fpRegisters.mxcsr;
context->FltSave.MxCsr_Mask = m_fpRegisters.mxcr_mask;
assert(sizeof(context->FltSave.FloatRegisters) == sizeof(m_fpRegisters.st_space));
memcpy(context->FltSave.FloatRegisters, m_fpRegisters.st_space, sizeof(context->FltSave.FloatRegisters));
assert(sizeof(context->FltSave.XmmRegisters) == sizeof(m_fpRegisters.xmm_space));
memcpy(context->FltSave.XmmRegisters, m_fpRegisters.xmm_space, sizeof(context->FltSave.XmmRegisters));
}
// TODO: debug registers?
#elif defined(__aarch64__)
if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
{
context->Fp = MCREG_Fp(m_gpRegisters);
context->Lr = MCREG_Lr(m_gpRegisters);
context->Sp = MCREG_Sp(m_gpRegisters);
context->Pc = MCREG_Pc(m_gpRegisters);
context->Cpsr = MCREG_Cpsr(m_gpRegisters);
}
if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
{
assert(sizeof(m_gpRegisters.regs) == (sizeof(context->X) + sizeof(context->Fp) + sizeof(context->Lr)));
memcpy(context->X, m_gpRegisters.regs, sizeof(context->X));
}
if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
{
assert(sizeof(m_fpRegisters.vregs) == sizeof(context->V));
memcpy(context->V, m_fpRegisters.vregs, sizeof(context->V));
context->Fpcr = m_fpRegisters.fpcr;
context->Fpsr = m_fpRegisters.fpsr;
}
#elif defined(__arm__)
if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
{
context->Sp = m_gpRegisters.ARM_sp;
context->Lr = m_gpRegisters.ARM_lr;
context->Pc = m_gpRegisters.ARM_pc;
context->Cpsr = m_gpRegisters.ARM_cpsr;
}
if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
{
context->R0 = m_gpRegisters.ARM_r0;
context->R1 = m_gpRegisters.ARM_r1;
context->R2 = m_gpRegisters.ARM_r2;
context->R3 = m_gpRegisters.ARM_r3;
context->R4 = m_gpRegisters.ARM_r4;
context->R5 = m_gpRegisters.ARM_r5;
context->R6 = m_gpRegisters.ARM_r6;
context->R7 = m_gpRegisters.ARM_r7;
context->R8 = m_gpRegisters.ARM_r8;
context->R9 = m_gpRegisters.ARM_r9;
context->R10 = m_gpRegisters.ARM_r10;
context->R11 = m_gpRegisters.ARM_fp;
context->R12 = m_gpRegisters.ARM_ip;
}
if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
{
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
context->Fpscr = m_vfpRegisters.fpscr;
assert(sizeof(context->D) == sizeof(m_vfpRegisters.fpregs));
memcpy(context->D, m_vfpRegisters.fpregs, sizeof(context->D));
#endif
}
#else
#error Platform not supported
#endif
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "createdump.h"
#if defined(__aarch64__)
// See src/pal/src/include/pal/context.h
#define MCREG_Fp(mc) ((mc).regs[29])
#define MCREG_Lr(mc) ((mc).regs[30])
#define MCREG_Sp(mc) ((mc).sp)
#define MCREG_Pc(mc) ((mc).pc)
#define MCREG_Cpsr(mc) ((mc).pstate)
#endif
#ifndef THUMB_CODE
#define THUMB_CODE 1
#endif
#ifndef __GLIBC__
typedef int __ptrace_request;
#endif
bool GetStatus(pid_t pid, pid_t* ppid, pid_t* tgid, std::string* name);
ThreadInfo::ThreadInfo(CrashInfo& crashInfo, pid_t tid) :
m_crashInfo(crashInfo),
m_tid(tid),
m_ppid(0),
m_tgid(0),
m_managed(false),
m_exceptionObject(0),
m_exceptionHResult(0),
m_repeatedFrames(0)
{
m_beginRepeat = m_frames.end();
m_endRepeat = m_frames.end();
}
ThreadInfo::~ThreadInfo()
{
}
bool
ThreadInfo::Initialize()
{
if (!GetStatus(m_tid, &m_ppid, &m_tgid, nullptr))
{
return false;
}
if (!GetRegistersWithPTrace())
{
return false;
}
#if defined(__aarch64__)
TRACE("Thread %04x PC %016llx SP %016llx\n", m_tid, (unsigned long long)MCREG_Pc(m_gpRegisters), (unsigned long long)MCREG_Sp(m_gpRegisters));
#elif defined(__arm__)
TRACE("Thread %04x PC %08lx SP %08lx\n", m_tid, (unsigned long)m_gpRegisters.ARM_pc, (unsigned long)m_gpRegisters.ARM_sp);
#elif defined(__x86_64__)
TRACE("Thread %04x RIP %016llx RSP %016llx\n", m_tid, (unsigned long long)m_gpRegisters.rip, (unsigned long long)m_gpRegisters.rsp);
#else
#error "Unsupported architecture"
#endif
return true;
}
bool
ThreadInfo::GetRegistersWithPTrace()
{
struct iovec gpRegsVec = { &m_gpRegisters, sizeof(m_gpRegisters) };
if (ptrace((__ptrace_request)PTRACE_GETREGSET, m_tid, NT_PRSTATUS, &gpRegsVec) == -1)
{
fprintf(stderr, "ptrace(PTRACE_GETREGSET, %d, NT_PRSTATUS) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
return false;
}
assert(sizeof(m_gpRegisters) == gpRegsVec.iov_len);
struct iovec fpRegsVec = { &m_fpRegisters, sizeof(m_fpRegisters) };
if (ptrace((__ptrace_request)PTRACE_GETREGSET, m_tid, NT_FPREGSET, &fpRegsVec) == -1)
{
#if defined(__arm__)
// Some aarch64 kernels may not support NT_FPREGSET for arm processes. We treat this failure as non-fatal.
#else
fprintf(stderr, "ptrace(PTRACE_GETREGSET, %d, NT_FPREGSET) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
return false;
#endif
}
assert(sizeof(m_fpRegisters) == fpRegsVec.iov_len);
#if defined(__i386__)
if (ptrace((__ptrace_request)PTRACE_GETFPXREGS, m_tid, nullptr, &m_fpxRegisters) == -1)
{
fprintf(stderr, "ptrace(GETFPXREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
return false;
}
#elif defined(__arm__) && defined(__VFP_FP__) && !defined(__SOFTFP__)
#if defined(ARM_VFPREGS_SIZE)
assert(sizeof(m_vfpRegisters) == ARM_VFPREGS_SIZE);
#endif
if (ptrace((__ptrace_request)PTRACE_GETVFPREGS, m_tid, nullptr, &m_vfpRegisters) == -1)
{
fprintf(stderr, "ptrace(PTRACE_GETVFPREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
return false;
}
#endif
return true;
}
void
ThreadInfo::GetThreadContext(uint32_t flags, CONTEXT* context) const
{
context->ContextFlags = flags;
#if defined(__x86_64__)
if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
{
context->Rbp = m_gpRegisters.rbp;
context->Rip = m_gpRegisters.rip;
context->SegCs = m_gpRegisters.cs;
context->EFlags = m_gpRegisters.eflags;
context->SegSs = m_gpRegisters.ss;
context->Rsp = m_gpRegisters.rsp;
}
if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
{
context->Rdi = m_gpRegisters.rdi;
context->Rsi = m_gpRegisters.rsi;
context->Rbx = m_gpRegisters.rbx;
context->Rdx = m_gpRegisters.rdx;
context->Rcx = m_gpRegisters.rcx;
context->Rax = m_gpRegisters.rax;
context->R8 = m_gpRegisters.r8;
context->R9 = m_gpRegisters.r9;
context->R10 = m_gpRegisters.r10;
context->R11 = m_gpRegisters.r11;
context->R12 = m_gpRegisters.r12;
context->R13 = m_gpRegisters.r13;
context->R14 = m_gpRegisters.r14;
context->R15 = m_gpRegisters.r15;
}
if ((flags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS)
{
context->SegDs = m_gpRegisters.ds;
context->SegEs = m_gpRegisters.es;
context->SegFs = m_gpRegisters.fs;
context->SegGs = m_gpRegisters.gs;
}
if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
{
context->FltSave.ControlWord = m_fpRegisters.cwd;
context->FltSave.StatusWord = m_fpRegisters.swd;
context->FltSave.TagWord = m_fpRegisters.ftw;
context->FltSave.ErrorOpcode = m_fpRegisters.fop;
context->FltSave.ErrorOffset = FPREG_ErrorOffset(m_fpRegisters);
context->FltSave.ErrorSelector = FPREG_ErrorSelector(m_fpRegisters);
context->FltSave.DataOffset = FPREG_DataOffset(m_fpRegisters);
context->FltSave.DataSelector = FPREG_DataSelector(m_fpRegisters);
context->FltSave.MxCsr = m_fpRegisters.mxcsr;
context->FltSave.MxCsr_Mask = m_fpRegisters.mxcr_mask;
assert(sizeof(context->FltSave.FloatRegisters) == sizeof(m_fpRegisters.st_space));
memcpy(context->FltSave.FloatRegisters, m_fpRegisters.st_space, sizeof(context->FltSave.FloatRegisters));
assert(sizeof(context->FltSave.XmmRegisters) == sizeof(m_fpRegisters.xmm_space));
memcpy(context->FltSave.XmmRegisters, m_fpRegisters.xmm_space, sizeof(context->FltSave.XmmRegisters));
}
// TODO: debug registers?
#elif defined(__aarch64__)
if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
{
context->Fp = MCREG_Fp(m_gpRegisters);
context->Lr = MCREG_Lr(m_gpRegisters);
context->Sp = MCREG_Sp(m_gpRegisters);
context->Pc = MCREG_Pc(m_gpRegisters);
context->Cpsr = MCREG_Cpsr(m_gpRegisters);
}
if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
{
assert(sizeof(m_gpRegisters.regs) == (sizeof(context->X) + sizeof(context->Fp) + sizeof(context->Lr)));
memcpy(context->X, m_gpRegisters.regs, sizeof(context->X));
}
if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
{
assert(sizeof(m_fpRegisters.vregs) == sizeof(context->V));
memcpy(context->V, m_fpRegisters.vregs, sizeof(context->V));
context->Fpcr = m_fpRegisters.fpcr;
context->Fpsr = m_fpRegisters.fpsr;
}
#elif defined(__arm__)
if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
{
context->Sp = m_gpRegisters.ARM_sp;
context->Lr = m_gpRegisters.ARM_lr;
context->Pc = m_gpRegisters.ARM_pc;
context->Cpsr = m_gpRegisters.ARM_cpsr;
}
if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
{
context->R0 = m_gpRegisters.ARM_r0;
context->R1 = m_gpRegisters.ARM_r1;
context->R2 = m_gpRegisters.ARM_r2;
context->R3 = m_gpRegisters.ARM_r3;
context->R4 = m_gpRegisters.ARM_r4;
context->R5 = m_gpRegisters.ARM_r5;
context->R6 = m_gpRegisters.ARM_r6;
context->R7 = m_gpRegisters.ARM_r7;
context->R8 = m_gpRegisters.ARM_r8;
context->R9 = m_gpRegisters.ARM_r9;
context->R10 = m_gpRegisters.ARM_r10;
context->R11 = m_gpRegisters.ARM_fp;
context->R12 = m_gpRegisters.ARM_ip;
}
if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
{
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
context->Fpscr = m_vfpRegisters.fpscr;
assert(sizeof(context->D) == sizeof(m_vfpRegisters.fpregs));
memcpy(context->D, m_vfpRegisters.fpregs, sizeof(context->D));
#endif
}
#else
#error Platform not supported
#endif
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/vm/ildump.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#define ILDUMP_VOID BYTE
IL_OPCODE(0x00, "nop ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x01, "break ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x02, "ldarg.0 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x03, "ldarg.1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x04, "ldarg.2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x05, "ldarg.3 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x06, "ldloc.0 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x07, "ldloc.1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x08, "ldloc.2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x09, "ldloc.3 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0a, "stloc.0 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0b, "stloc.1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0c, "stloc.2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0d, "stloc.3 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0e, "ldarg.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x0f, "ldarga.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x10, "starg.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x11, "ldloc.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x12, "ldloca.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x13, "stloc.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x14, "ldnull ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x15, "ldc.i4.m1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x16, "ldc.i4.0 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x17, "ldc.i4.1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x18, "ldc.i4.2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x19, "ldc.i4.3 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1a, "ldc.i4.4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1b, "ldc.i4.5 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1c, "ldc.i4.6 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1d, "ldc.i4.7 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1e, "ldc.i4.8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1f, "ldc.i4.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x20, "ldc.i4 ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x21, "ldc.i8 ", 8, UNALIGNED INT64, "0x%I64x", 0)
IL_OPCODE(0x22, "ldc.r4 ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x23, "ldc.r8 ", 8, UNALIGNED INT64, "0x%I64x", 0)
IL_OPCODE(0x25, "dup ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x26, "pop ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x27, "jmp ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x28, "call ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x29, "calli ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x2a, "ret ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x2b, "br.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x2c, "brfalse.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x2d, "brtrue.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x2e, "beq.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x2f, "bge.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x30, "bgt.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x31, "ble.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x32, "blt.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x33, "bne.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x34, "bge.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x35, "bgt.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x36, "ble.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x37, "blt.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x38, "br ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x39, "brfalse ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3a, "brtrue ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3b, "beq ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3c, "bge ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3d, "bgt ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3e, "ble ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3f, "blt ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x40, "bne.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x41, "bge.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x42, "bgt.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x43, "ble.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x44, "blt.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x46, "ldind.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x47, "ldind.u1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x48, "ldind.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x49, "ldind.u2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4a, "ldind.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4b, "ldind.u4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4c, "ldind.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4d, "ldind.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4e, "ldind.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4f, "ldind.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x50, "ldind.ref ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x51, "stind.ref ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x52, "stind.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x53, "stind.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x54, "stind.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x55, "stind.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x56, "stind.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x57, "stind.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x58, "add ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x59, "sub ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5a, "mul ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5b, "div ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5c, "div.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5d, "rem ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5e, "rem.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5f, "and ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x60, "or ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x61, "xor ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x62, "shl ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x63, "shr ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x64, "shr.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x65, "neg ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x66, "not ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x67, "conv.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x68, "conv.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x69, "conv.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6a, "conv.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6b, "conv.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6c, "conv.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6d, "conv.u4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6e, "conv.u8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6f, "callvirt ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x70, "cpobj ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x71, "ldobj ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x72, "ldstr ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x73, "newobj ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x74, "castclass ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x75, "isinst ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x76, "conv.r.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x79, "unbox ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x7a, "throw ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x7b, "ldfld ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x7c, "ldflda ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x7d, "stfld ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x7e, "ldsfld ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x7f, "ldsflda ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x80, "stsfld ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x81, "stobj ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x82, "conv.ovf.i1.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x83, "conv.ovf.i2.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x84, "conv.ovf.i4.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x85, "conv.ovf.i8.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x86, "conv.ovf.u1.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x87, "conv.ovf.u2.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x88, "conv.ovf.u4.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x89, "conv.ovf.u8.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x8a, "conv.ovf.i.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x8b, "conv.ovf.u.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x8c, "box ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x8d, "newarr ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x8e, "ldlen ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x8f, "ldelema ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x90, "ldelem.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x91, "ldelem.u1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x92, "ldelem.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x93, "ldelem.u2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x94, "ldelem.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x95, "ldelem.u4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x96, "ldelem.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x97, "ldelem.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x98, "ldelem.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x99, "ldelem.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9a, "ldelem.ref ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9b, "stelem.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9c, "stelem.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9d, "stelem.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9e, "stelem.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9f, "stelem.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xa0, "stelem.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xa1, "stelem.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xa2, "stelem.ref ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb3, "conv.ovf.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb4, "conv.ovf.u1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb5, "conv.ovf.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb6, "conv.ovf.u2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb7, "conv.ovf.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb8, "conv.ovf.u4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb9, "conv.ovf.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xba, "conv.ovf.u8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xc2, "refanyval ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0xc3, "ckfinite ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xc6, "mkrefany ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0xd0, "ldtoken ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0xd1, "conv.u2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd2, "conv.u1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd3, "conv.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd4, "conv.ovf.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd5, "conv.ovf.u ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd6, "add.ovf ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd7, "add.ovf.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd8, "mul.ovf ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd9, "mul.ovf.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xda, "sub.ovf ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xdb, "sub.ovf.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xdc, "endfinally ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xdd, "leave ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0xde, "leave.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0xdf, "stind.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xe0, "conv.u ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x00, "arglist ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x01, "ceq ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x02, "cgt ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x03, "cgt.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x04, "clt ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x05, "clt.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x06, "ldftn ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE_EXT(0xfe, 0x07, "ldvirtftn ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE_EXT(0xfe, 0x09, "ldarg ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0a, "ldarga ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0b, "starg ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0c, "ldloc ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0d, "ldloca ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0e, "stloc ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0f, "localloc ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x11, "endfilter ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x12, "unaligned. ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x13, "volatile. ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x14, "tail. ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x15, "initobj ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE_EXT(0xfe, 0x17, "cpblk ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x18, "initblk ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x1a, "rethrow ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x1c, "sizeof ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE_EXT(0xfe, 0x1d, "refanytype ", 0, ILDUMP_VOID, "", 0)
#undef ILDUMP_VOID
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#define ILDUMP_VOID BYTE
IL_OPCODE(0x00, "nop ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x01, "break ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x02, "ldarg.0 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x03, "ldarg.1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x04, "ldarg.2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x05, "ldarg.3 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x06, "ldloc.0 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x07, "ldloc.1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x08, "ldloc.2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x09, "ldloc.3 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0a, "stloc.0 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0b, "stloc.1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0c, "stloc.2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0d, "stloc.3 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x0e, "ldarg.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x0f, "ldarga.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x10, "starg.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x11, "ldloc.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x12, "ldloca.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x13, "stloc.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x14, "ldnull ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x15, "ldc.i4.m1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x16, "ldc.i4.0 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x17, "ldc.i4.1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x18, "ldc.i4.2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x19, "ldc.i4.3 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1a, "ldc.i4.4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1b, "ldc.i4.5 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1c, "ldc.i4.6 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1d, "ldc.i4.7 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1e, "ldc.i4.8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x1f, "ldc.i4.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x20, "ldc.i4 ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x21, "ldc.i8 ", 8, UNALIGNED INT64, "0x%I64x", 0)
IL_OPCODE(0x22, "ldc.r4 ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x23, "ldc.r8 ", 8, UNALIGNED INT64, "0x%I64x", 0)
IL_OPCODE(0x25, "dup ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x26, "pop ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x27, "jmp ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x28, "call ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x29, "calli ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x2a, "ret ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x2b, "br.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x2c, "brfalse.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x2d, "brtrue.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x2e, "beq.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x2f, "bge.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x30, "bgt.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x31, "ble.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x32, "blt.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x33, "bne.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x34, "bge.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x35, "bgt.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x36, "ble.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x37, "blt.un.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0x38, "br ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x39, "brfalse ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3a, "brtrue ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3b, "beq ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3c, "bge ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3d, "bgt ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3e, "ble ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x3f, "blt ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x40, "bne.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x41, "bge.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x42, "bgt.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x43, "ble.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x44, "blt.un ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x46, "ldind.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x47, "ldind.u1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x48, "ldind.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x49, "ldind.u2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4a, "ldind.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4b, "ldind.u4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4c, "ldind.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4d, "ldind.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4e, "ldind.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x4f, "ldind.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x50, "ldind.ref ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x51, "stind.ref ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x52, "stind.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x53, "stind.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x54, "stind.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x55, "stind.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x56, "stind.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x57, "stind.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x58, "add ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x59, "sub ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5a, "mul ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5b, "div ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5c, "div.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5d, "rem ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5e, "rem.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x5f, "and ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x60, "or ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x61, "xor ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x62, "shl ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x63, "shr ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x64, "shr.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x65, "neg ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x66, "not ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x67, "conv.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x68, "conv.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x69, "conv.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6a, "conv.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6b, "conv.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6c, "conv.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6d, "conv.u4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6e, "conv.u8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x6f, "callvirt ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x70, "cpobj ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x71, "ldobj ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x72, "ldstr ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x73, "newobj ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x74, "castclass ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x75, "isinst ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x76, "conv.r.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x79, "unbox ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x7a, "throw ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x7b, "ldfld ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x7c, "ldflda ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x7d, "stfld ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x7e, "ldsfld ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x7f, "ldsflda ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x80, "stsfld ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x81, "stobj ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0x82, "conv.ovf.i1.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x83, "conv.ovf.i2.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x84, "conv.ovf.i4.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x85, "conv.ovf.i8.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x86, "conv.ovf.u1.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x87, "conv.ovf.u2.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x88, "conv.ovf.u4.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x89, "conv.ovf.u8.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x8a, "conv.ovf.i.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x8b, "conv.ovf.u.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x8c, "box ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x8d, "newarr ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x8e, "ldlen ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x8f, "ldelema ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0x90, "ldelem.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x91, "ldelem.u1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x92, "ldelem.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x93, "ldelem.u2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x94, "ldelem.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x95, "ldelem.u4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x96, "ldelem.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x97, "ldelem.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x98, "ldelem.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x99, "ldelem.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9a, "ldelem.ref ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9b, "stelem.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9c, "stelem.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9d, "stelem.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9e, "stelem.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0x9f, "stelem.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xa0, "stelem.r4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xa1, "stelem.r8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xa2, "stelem.ref ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb3, "conv.ovf.i1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb4, "conv.ovf.u1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb5, "conv.ovf.i2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb6, "conv.ovf.u2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb7, "conv.ovf.i4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb8, "conv.ovf.u4 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xb9, "conv.ovf.i8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xba, "conv.ovf.u8 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xc2, "refanyval ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0xc3, "ckfinite ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xc6, "mkrefany ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0xd0, "ldtoken ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE(0xd1, "conv.u2 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd2, "conv.u1 ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd3, "conv.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd4, "conv.ovf.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd5, "conv.ovf.u ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd6, "add.ovf ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd7, "add.ovf.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd8, "mul.ovf ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xd9, "mul.ovf.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xda, "sub.ovf ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xdb, "sub.ovf.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xdc, "endfinally ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xdd, "leave ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE(0xde, "leave.s ", 1, BYTE, "%d", 0)
IL_OPCODE(0xdf, "stind.i ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE(0xe0, "conv.u ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x00, "arglist ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x01, "ceq ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x02, "cgt ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x03, "cgt.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x04, "clt ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x05, "clt.un ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x06, "ldftn ", 4, UNALIGNED INT32, "0x%08x", 1)
IL_OPCODE_EXT(0xfe, 0x07, "ldvirtftn ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE_EXT(0xfe, 0x09, "ldarg ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0a, "ldarga ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0b, "starg ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0c, "ldloc ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0d, "ldloca ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0e, "stloc ", 2, UNALIGNED INT16, "0x%04x", 0)
IL_OPCODE_EXT(0xfe, 0x0f, "localloc ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x11, "endfilter ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x12, "unaligned. ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x13, "volatile. ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x14, "tail. ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x15, "initobj ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE_EXT(0xfe, 0x17, "cpblk ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x18, "initblk ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x1a, "rethrow ", 0, ILDUMP_VOID, "", 0)
IL_OPCODE_EXT(0xfe, 0x1c, "sizeof ", 4, UNALIGNED INT32, "0x%08x", 0)
IL_OPCODE_EXT(0xfe, 0x1d, "refanytype ", 0, ILDUMP_VOID, "", 0)
#undef ILDUMP_VOID
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/vm/mngstditflist.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Header: MngStdItfList.h
**
**
** Purpose: This file contains the list of managed standard
** interfaces. Each standard interface also has the
** list of method that it contains.
**
===========================================================*/
#ifndef FEATURE_COMINTEROP
#error FEATURE_COMINTEROP is required for this file
#endif // FEATURE_COMINTEROP
//
// Helper macros
//
#define MNGSTDITF_DEFINE_METH(FriendlyName, MethName, MethSig, FcallDecl) \
MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, MethName, MethName, MethSig, FcallDecl)
#define MNGSTDITF_DEFINE_METH2(FriendlyName, MethName, MethSig, FcallDecl) \
MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, MethName##_2, MethName, MethSig, FcallDecl)
#define MNGSTDITF_DEFINE_METH3(FriendlyName, MethName, MethSig, FcallDecl) \
MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, MethName##_3, MethName, MethSig, FcallDecl)
//
// MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID) \
//
// This macro defines a new managed standard interface.
//
// FriendlyName Friendly name for the class that implements the ECall's.
// idMngItf BinderClassID of the managed interface.
// idUCOMMngItf BinderClassID of the UCom version of the managed interface.
// idCustomMarshaler BinderClassID of the custom marshaler.
// idGetInstMethod BinderMethodID of the GetInstance method of the custom marshaler.
// strCustomMarshalerCookie String containing the cookie to be passed to the custom marshaler.
// strManagedViewName String containing the name of the managed view of the native interface.
// NativeItfIID IID of the native interface.
// bCanCastOnNativeItfQI If this is true casting to a COM object that supports the native interface
// will cause the cast to succeed.
//
//
// MNGSTDITF_DEFINE_METH(FriendlyName, MethName, MethSig)
//
// This macro defines a method of the standard managed interface.
// MNGSTDITF_DEFINE_METH2 and MNGSTDITF_DEFINE_METH3 are used to
// define overloaded versions of the method.
//
// FriendlyName Friendly name for the class that implements the ECall's.
// MethName This is the method name
// MethSig This is the method signature.
//
//
// IReflect
//
#define MNGSTDITF_IREFLECT_DECL__GETMETHOD FCDECL6(Object*, GetMethod, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refTypesArrayUNSAFE, Object* refModifiersArrayUNSAFE)
#define MNGSTDITF_IREFLECT_DECL__GETMETHOD_2 FCDECL3(Object*, GetMethod_2, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETMETHODS FCDECL2(Object*, GetMethods, Object* refThisUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETFIELD FCDECL3(Object*, GetField, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETFIELDS FCDECL2(Object*, GetFields, Object* refThisUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETPROPERTY FCDECL7(Object*, GetProperty, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refReturnTypeUNSAFE, Object* refTypesArrayUNSAFE, Object* refModifiersArrayUNSAFE)
#define MNGSTDITF_IREFLECT_DECL__GETPROPERTY_2 FCDECL3(Object*, GetProperty_2, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETPROPERTIES FCDECL2(Object*, GetProperties, Object* refThisUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETMEMBER FCDECL3(Object*, GetMember, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETMEMBERS FCDECL2(Object*, GetMembers, Object* refThisUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__INVOKEMEMBER FCDECL9(Object*, InvokeMember, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refTargetUNSAFE, Object* refArgsArrayUNSAFE, Object* refModifiersArrayUNSAFE, Object* refCultureUNSAFE, Object* refNamedParamsArrayUNSAFE)
#define MNGSTDITF_IREFLECT_DECL__GET_UNDERLYING_SYSTEM_TYPE FCDECL1(Object*, get_UnderlyingSystemType, Object* refThisUNSAFE)
MNGSTDITF_BEGIN_INTERFACE(StdMngIReflect, g_ReflectionReflectItfName, "System.Runtime.InteropServices.ComTypes.IReflect", g_CMExpandoToDispatchExMarshaler, "IReflect", g_CMExpandoViewOfDispatchEx, IID_IDispatchEx, TRUE)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMethod, &gsig_IM_Str_BindingFlags_Binder_ArrType_ArrParameterModifier_RetMethodInfo, MNGSTDITF_IREFLECT_DECL__GETMETHOD)
MNGSTDITF_DEFINE_METH2(StdMngIReflect,GetMethod, &gsig_IM_Str_BindingFlags_RetMethodInfo, MNGSTDITF_IREFLECT_DECL__GETMETHOD_2)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMethods, &gsig_IM_BindingFlags_RetArrMethodInfo, MNGSTDITF_IREFLECT_DECL__GETMETHODS)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetField, &gsig_IM_Str_BindingFlags_RetFieldInfo, MNGSTDITF_IREFLECT_DECL__GETFIELD)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetFields, &gsig_IM_BindingFlags_RetArrFieldInfo, MNGSTDITF_IREFLECT_DECL__GETFIELDS)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetProperty, &gsig_IM_Str_BindingFlags_Binder_Type_ArrType_ArrParameterModifier_RetPropertyInfo, MNGSTDITF_IREFLECT_DECL__GETPROPERTY)
MNGSTDITF_DEFINE_METH2(StdMngIReflect,GetProperty, &gsig_IM_Str_BindingFlags_RetPropertyInfo, MNGSTDITF_IREFLECT_DECL__GETPROPERTY_2)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetProperties,&gsig_IM_BindingFlags_RetArrPropertyInfo, MNGSTDITF_IREFLECT_DECL__GETPROPERTIES)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMember, &gsig_IM_Str_BindingFlags_RetMemberInfo, MNGSTDITF_IREFLECT_DECL__GETMEMBER)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMembers, &gsig_IM_BindingFlags_RetArrMemberInfo, MNGSTDITF_IREFLECT_DECL__GETMEMBERS)
MNGSTDITF_DEFINE_METH(StdMngIReflect, InvokeMember, &gsig_IM_Str_BindingFlags_Binder_Obj_ArrObj_ArrParameterModifier_CultureInfo_ArrStr_RetObj, MNGSTDITF_IREFLECT_DECL__INVOKEMEMBER)
MNGSTDITF_DEFINE_METH(StdMngIReflect, get_UnderlyingSystemType, &gsig_IM_RetType, MNGSTDITF_IREFLECT_DECL__GET_UNDERLYING_SYSTEM_TYPE)
MNGSTDITF_END_INTERFACE(StdMngIReflect)
//
// IEnumerator
//
#define MNGSTDITF_IENUMERATOR_DECL__MOVE_NEXT FCDECL1(FC_BOOL_RET, MoveNext, Object* refThisUNSAFE)
#define MNGSTDITF_IENUMERATOR_DECL__GET_CURRENT FCDECL1(Object*, get_Current, Object* refThisUNSAFE)
#define MNGSTDITF_IENUMERATOR_DECL__RESET FCDECL1(void, Reset, Object* refThisUNSAFE)
MNGSTDITF_BEGIN_INTERFACE(StdMngIEnumerator, g_CollectionsEnumeratorClassName, "System.Runtime.InteropServices.ComTypes.IEnumerator", g_EnumeratorToEnumClassName, "", "System.Runtime.InteropServices.CustomMarshalers.EnumeratorViewOfEnumVariant", IID_IEnumVARIANT, TRUE)
MNGSTDITF_DEFINE_METH(StdMngIEnumerator, MoveNext, &gsig_IM_RetBool, MNGSTDITF_IENUMERATOR_DECL__MOVE_NEXT)
MNGSTDITF_DEFINE_METH(StdMngIEnumerator, get_Current, &gsig_IM_RetObj, MNGSTDITF_IENUMERATOR_DECL__GET_CURRENT)
MNGSTDITF_DEFINE_METH(StdMngIEnumerator, Reset, &gsig_IM_RetVoid, MNGSTDITF_IENUMERATOR_DECL__RESET)
MNGSTDITF_END_INTERFACE(StdMngIEnumerator)
//
// IEnumerable
//
#define MNGSTDITF_IENUMERABLE_DECL__GETENUMERATOR FCDECL1(Object*, GetEnumerator, Object* refThisUNSAFE)
MNGSTDITF_BEGIN_INTERFACE(StdMngIEnumerable, g_CollectionsEnumerableItfName, "System.Runtime.InteropServices.ComTypes.IEnumerable", "System.Runtime.InteropServices.CustomMarshalers.EnumerableToDispatchMarshaler", "", "System.Runtime.InteropServices.CustomMarshalers.EnumerableViewOfDispatch", IID_IDispatch, FALSE)
MNGSTDITF_DEFINE_METH(StdMngIEnumerable, GetEnumerator, &gsig_IM_RetIEnumerator, MNGSTDITF_IENUMERABLE_DECL__GETENUMERATOR)
MNGSTDITF_END_INTERFACE(StdMngIEnumerable)
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Header: MngStdItfList.h
**
**
** Purpose: This file contains the list of managed standard
** interfaces. Each standard interface also has the
** list of method that it contains.
**
===========================================================*/
#ifndef FEATURE_COMINTEROP
#error FEATURE_COMINTEROP is required for this file
#endif // FEATURE_COMINTEROP
//
// Helper macros
//
#define MNGSTDITF_DEFINE_METH(FriendlyName, MethName, MethSig, FcallDecl) \
MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, MethName, MethName, MethSig, FcallDecl)
#define MNGSTDITF_DEFINE_METH2(FriendlyName, MethName, MethSig, FcallDecl) \
MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, MethName##_2, MethName, MethSig, FcallDecl)
#define MNGSTDITF_DEFINE_METH3(FriendlyName, MethName, MethSig, FcallDecl) \
MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, MethName##_3, MethName, MethSig, FcallDecl)
//
// MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID) \
//
// This macro defines a new managed standard interface.
//
// FriendlyName Friendly name for the class that implements the ECall's.
// idMngItf BinderClassID of the managed interface.
// idUCOMMngItf BinderClassID of the UCom version of the managed interface.
// idCustomMarshaler BinderClassID of the custom marshaler.
// idGetInstMethod BinderMethodID of the GetInstance method of the custom marshaler.
// strCustomMarshalerCookie String containing the cookie to be passed to the custom marshaler.
// strManagedViewName String containing the name of the managed view of the native interface.
// NativeItfIID IID of the native interface.
// bCanCastOnNativeItfQI If this is true casting to a COM object that supports the native interface
// will cause the cast to succeed.
//
//
// MNGSTDITF_DEFINE_METH(FriendlyName, MethName, MethSig)
//
// This macro defines a method of the standard managed interface.
// MNGSTDITF_DEFINE_METH2 and MNGSTDITF_DEFINE_METH3 are used to
// define overloaded versions of the method.
//
// FriendlyName Friendly name for the class that implements the ECall's.
// MethName This is the method name
// MethSig This is the method signature.
//
//
// IReflect
//
#define MNGSTDITF_IREFLECT_DECL__GETMETHOD FCDECL6(Object*, GetMethod, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refTypesArrayUNSAFE, Object* refModifiersArrayUNSAFE)
#define MNGSTDITF_IREFLECT_DECL__GETMETHOD_2 FCDECL3(Object*, GetMethod_2, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETMETHODS FCDECL2(Object*, GetMethods, Object* refThisUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETFIELD FCDECL3(Object*, GetField, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETFIELDS FCDECL2(Object*, GetFields, Object* refThisUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETPROPERTY FCDECL7(Object*, GetProperty, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refReturnTypeUNSAFE, Object* refTypesArrayUNSAFE, Object* refModifiersArrayUNSAFE)
#define MNGSTDITF_IREFLECT_DECL__GETPROPERTY_2 FCDECL3(Object*, GetProperty_2, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETPROPERTIES FCDECL2(Object*, GetProperties, Object* refThisUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETMEMBER FCDECL3(Object*, GetMember, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__GETMEMBERS FCDECL2(Object*, GetMembers, Object* refThisUNSAFE, INT32 enumBindingAttr)
#define MNGSTDITF_IREFLECT_DECL__INVOKEMEMBER FCDECL9(Object*, InvokeMember, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refTargetUNSAFE, Object* refArgsArrayUNSAFE, Object* refModifiersArrayUNSAFE, Object* refCultureUNSAFE, Object* refNamedParamsArrayUNSAFE)
#define MNGSTDITF_IREFLECT_DECL__GET_UNDERLYING_SYSTEM_TYPE FCDECL1(Object*, get_UnderlyingSystemType, Object* refThisUNSAFE)
MNGSTDITF_BEGIN_INTERFACE(StdMngIReflect, g_ReflectionReflectItfName, "System.Runtime.InteropServices.ComTypes.IReflect", g_CMExpandoToDispatchExMarshaler, "IReflect", g_CMExpandoViewOfDispatchEx, IID_IDispatchEx, TRUE)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMethod, &gsig_IM_Str_BindingFlags_Binder_ArrType_ArrParameterModifier_RetMethodInfo, MNGSTDITF_IREFLECT_DECL__GETMETHOD)
MNGSTDITF_DEFINE_METH2(StdMngIReflect,GetMethod, &gsig_IM_Str_BindingFlags_RetMethodInfo, MNGSTDITF_IREFLECT_DECL__GETMETHOD_2)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMethods, &gsig_IM_BindingFlags_RetArrMethodInfo, MNGSTDITF_IREFLECT_DECL__GETMETHODS)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetField, &gsig_IM_Str_BindingFlags_RetFieldInfo, MNGSTDITF_IREFLECT_DECL__GETFIELD)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetFields, &gsig_IM_BindingFlags_RetArrFieldInfo, MNGSTDITF_IREFLECT_DECL__GETFIELDS)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetProperty, &gsig_IM_Str_BindingFlags_Binder_Type_ArrType_ArrParameterModifier_RetPropertyInfo, MNGSTDITF_IREFLECT_DECL__GETPROPERTY)
MNGSTDITF_DEFINE_METH2(StdMngIReflect,GetProperty, &gsig_IM_Str_BindingFlags_RetPropertyInfo, MNGSTDITF_IREFLECT_DECL__GETPROPERTY_2)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetProperties,&gsig_IM_BindingFlags_RetArrPropertyInfo, MNGSTDITF_IREFLECT_DECL__GETPROPERTIES)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMember, &gsig_IM_Str_BindingFlags_RetMemberInfo, MNGSTDITF_IREFLECT_DECL__GETMEMBER)
MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMembers, &gsig_IM_BindingFlags_RetArrMemberInfo, MNGSTDITF_IREFLECT_DECL__GETMEMBERS)
MNGSTDITF_DEFINE_METH(StdMngIReflect, InvokeMember, &gsig_IM_Str_BindingFlags_Binder_Obj_ArrObj_ArrParameterModifier_CultureInfo_ArrStr_RetObj, MNGSTDITF_IREFLECT_DECL__INVOKEMEMBER)
MNGSTDITF_DEFINE_METH(StdMngIReflect, get_UnderlyingSystemType, &gsig_IM_RetType, MNGSTDITF_IREFLECT_DECL__GET_UNDERLYING_SYSTEM_TYPE)
MNGSTDITF_END_INTERFACE(StdMngIReflect)
//
// IEnumerator
//
#define MNGSTDITF_IENUMERATOR_DECL__MOVE_NEXT FCDECL1(FC_BOOL_RET, MoveNext, Object* refThisUNSAFE)
#define MNGSTDITF_IENUMERATOR_DECL__GET_CURRENT FCDECL1(Object*, get_Current, Object* refThisUNSAFE)
#define MNGSTDITF_IENUMERATOR_DECL__RESET FCDECL1(void, Reset, Object* refThisUNSAFE)
MNGSTDITF_BEGIN_INTERFACE(StdMngIEnumerator, g_CollectionsEnumeratorClassName, "System.Runtime.InteropServices.ComTypes.IEnumerator", g_EnumeratorToEnumClassName, "", "System.Runtime.InteropServices.CustomMarshalers.EnumeratorViewOfEnumVariant", IID_IEnumVARIANT, TRUE)
MNGSTDITF_DEFINE_METH(StdMngIEnumerator, MoveNext, &gsig_IM_RetBool, MNGSTDITF_IENUMERATOR_DECL__MOVE_NEXT)
MNGSTDITF_DEFINE_METH(StdMngIEnumerator, get_Current, &gsig_IM_RetObj, MNGSTDITF_IENUMERATOR_DECL__GET_CURRENT)
MNGSTDITF_DEFINE_METH(StdMngIEnumerator, Reset, &gsig_IM_RetVoid, MNGSTDITF_IENUMERATOR_DECL__RESET)
MNGSTDITF_END_INTERFACE(StdMngIEnumerator)
//
// IEnumerable
//
#define MNGSTDITF_IENUMERABLE_DECL__GETENUMERATOR FCDECL1(Object*, GetEnumerator, Object* refThisUNSAFE)
MNGSTDITF_BEGIN_INTERFACE(StdMngIEnumerable, g_CollectionsEnumerableItfName, "System.Runtime.InteropServices.ComTypes.IEnumerable", "System.Runtime.InteropServices.CustomMarshalers.EnumerableToDispatchMarshaler", "", "System.Runtime.InteropServices.CustomMarshalers.EnumerableViewOfDispatch", IID_IDispatch, FALSE)
MNGSTDITF_DEFINE_METH(StdMngIEnumerable, GetEnumerator, &gsig_IM_RetIEnumerator, MNGSTDITF_IENUMERABLE_DECL__GETENUMERATOR)
MNGSTDITF_END_INTERFACE(StdMngIEnumerable)
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/debug/di/breakpoint.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: breakpoint.cpp
//
//
//*****************************************************************************
#include "stdafx.h"
/* ------------------------------------------------------------------------- *
* Breakpoint class
* ------------------------------------------------------------------------- */
CordbBreakpoint::CordbBreakpoint(CordbProcess * pProcess, CordbBreakpointType bpType)
: CordbBase(pProcess, 0, enumCordbBreakpoint),
m_active(false), m_pAppDomain(NULL), m_type(bpType)
{
}
// Neutered by CordbAppDomain
void CordbBreakpoint::Neuter()
{
m_pAppDomain = NULL; // clear ref
CordbBase::Neuter();
}
HRESULT CordbBreakpoint::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugBreakpoint)
{
*pInterface = static_cast<ICorDebugBreakpoint*>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugBreakpoint*>(this));
}
else
{
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
HRESULT CordbBreakpoint::BaseIsActive(BOOL *pbActive)
{
*pbActive = m_active ? TRUE : FALSE;
return S_OK;
}
/* ------------------------------------------------------------------------- *
* Function Breakpoint class
* ------------------------------------------------------------------------- */
CordbFunctionBreakpoint::CordbFunctionBreakpoint(CordbCode *code,
SIZE_T offset,
BOOL offsetIsIl)
: CordbBreakpoint(code->GetProcess(), CBT_FUNCTION),
m_code(code), m_offset(offset),
m_offsetIsIl(offsetIsIl)
{
// Remember the app domain we came from so that breakpoints can be
// deactivated from within the ExitAppdomain callback.
m_pAppDomain = m_code->GetAppDomain();
_ASSERTE(m_pAppDomain != NULL);
}
CordbFunctionBreakpoint::~CordbFunctionBreakpoint()
{
// @todo- eventually get CordbFunctionBreakpoint rooted and enable this.
//_ASSERTE(this->IsNeutered());
//_ASSERTE(m_code == NULL);
}
void CordbFunctionBreakpoint::Neuter()
{
Disconnect();
CordbBreakpoint::Neuter();
}
HRESULT CordbFunctionBreakpoint::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugFunctionBreakpoint)
{
*pInterface = static_cast<ICorDebugFunctionBreakpoint*>(this);
}
else
{
// Not looking for a function breakpoint? See if the base class handles
// this interface. (issue 143976)
return CordbBreakpoint::QueryInterface(id, pInterface);
}
ExternalAddRef();
return S_OK;
}
HRESULT CordbFunctionBreakpoint::GetFunction(ICorDebugFunction **ppFunction)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppFunction, ICorDebugFunction **);
if (m_code == NULL)
{
return CORDBG_E_PROCESS_TERMINATED;
}
if (m_code->IsNeutered())
{
return CORDBG_E_CODE_NOT_AVAILABLE;
}
*ppFunction = static_cast<ICorDebugFunction *> (m_code->GetFunction());
(*ppFunction)->AddRef();
return S_OK;
}
// m_id is actually a LSPTR_BREAKPOINT. Get it as a type-safe member.
LSPTR_BREAKPOINT CordbFunctionBreakpoint::GetLsPtrBP()
{
LSPTR_BREAKPOINT p;
p.Set((void*) m_id);
return p;
}
HRESULT CordbFunctionBreakpoint::GetOffset(ULONG32 *pnOffset)
{
//REVISIT_TODO: is this casting correct for ia64?
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pnOffset, SIZE_T *);
*pnOffset = (ULONG32)m_offset;
return S_OK;
}
//---------------------------------------------------------------------------------------
//
// Activates or removes a breakpoint
//
// Arguments:
// fActivate - TRUE if to activate the breakpoint, else FALSE.
//
// Return Value:
// S_OK if successful, else a specific error code detailing the type of failure.
//
//---------------------------------------------------------------------------------------
HRESULT CordbFunctionBreakpoint::Activate(BOOL fActivate)
{
PUBLIC_REENTRANT_API_ENTRY(this);
OK_IF_NEUTERED(this); // we'll check again later
if (fActivate == (m_active == true) )
{
return S_OK;
}
// For backwards compat w/ everett, we let the other error codes
// take precedence over neutering error codes.
if ((m_code == NULL) || this->IsNeutered())
{
return CORDBG_E_PROCESS_TERMINATED;
}
HRESULT hr;
ATT_ALLOW_LIVE_DO_STOPGO(GetProcess());
// For legacy, check this error condition. We must do this under the stop-go lock to ensure
// that the m_code object was not deleted out from underneath us.
//
// 6/23/09 - This isn't just for legacy anymore, collectible types should be able to hit this
// by unloading the module containing the code this breakpoint is bound to.
if (m_code->IsNeutered())
{
return CORDBG_E_CODE_NOT_AVAILABLE;
}
//
// <REVISIT_TODO>@todo: when we implement module and value breakpoints, then
// we'll want to factor some of this code out.</REVISIT_TODO>
//
CordbProcess * pProcess = GetProcess();
RSLockHolder lockHolder(pProcess->GetProcessLock());
pProcess->ClearPatchTable(); // if we add something, then the right side
// view of the patch table is no longer valid
DebuggerIPCEvent * pEvent = (DebuggerIPCEvent *) _alloca(CorDBIPC_BUFFER_SIZE);
CordbAppDomain * pAppDomain = GetAppDomain();
_ASSERTE (pAppDomain != NULL);
if (fActivate)
{
pProcess->InitIPCEvent(pEvent, DB_IPCE_BREAKPOINT_ADD, true, pAppDomain->GetADToken());
pEvent->BreakpointData.funcMetadataToken = m_code->GetMetadataToken();
pEvent->BreakpointData.vmDomainAssembly = m_code->GetModule()->GetRuntimeDomainAssembly();
pEvent->BreakpointData.encVersion = m_code->GetVersion();
BOOL codeIsIL = m_code->IsIL();
pEvent->BreakpointData.isIL = m_offsetIsIl ? true : false;
pEvent->BreakpointData.offset = m_offset;
if (codeIsIL)
{
pEvent->BreakpointData.nativeCodeMethodDescToken = pEvent->BreakpointData.nativeCodeMethodDescToken.NullPtr();
}
else
{
pEvent->BreakpointData.nativeCodeMethodDescToken =
(m_code.GetValue()->AsNativeCode())->GetVMNativeCodeMethodDescToken().ToLsPtr();
}
// Note: we're sending a two-way event, so it blocks here
// until the breakpoint is really added and the reply event is
// copied over the event we sent.
lockHolder.Release();
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
lockHolder.Acquire();
hr = WORST_HR(hr, pEvent->hr);
if (FAILED(hr))
{
return hr;
}
m_id = LsPtrToCookie(pEvent->BreakpointData.breakpointToken);
// If we weren't able to allocate the BP, we should have set the
// hr on the left side.
_ASSERTE(m_id != 0);
pAppDomain->m_breakpoints.AddBase(this);
m_active = true;
// Continue called automatically by StopContinueHolder
}
else
{
_ASSERTE (pAppDomain != NULL);
if (pProcess->IsSafeToSendEvents())
{
pProcess->InitIPCEvent(pEvent, DB_IPCE_BREAKPOINT_REMOVE, false, pAppDomain->GetADToken());
pEvent->BreakpointData.breakpointToken = GetLsPtrBP();
lockHolder.Release();
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
lockHolder.Acquire();
hr = WORST_HR(hr, pEvent->hr);
}
else
{
hr = CORDBHRFromProcessState(pProcess, pAppDomain);
}
pAppDomain->m_breakpoints.RemoveBase(LsPtrToCookie(GetLsPtrBP()));
m_active = false;
}
return hr;
}
void CordbFunctionBreakpoint::Disconnect()
{
m_code.Clear();
}
/* ------------------------------------------------------------------------- *
* Stepper class
* ------------------------------------------------------------------------- */
CordbStepper::CordbStepper(CordbThread *thread, CordbFrame *frame)
: CordbBase(thread->GetProcess(), 0, enumCordbStepper),
m_thread(thread), m_frame(frame),
m_stepperToken(0), m_active(false),
m_rangeIL(TRUE),
m_fIsJMCStepper(false),
m_rgfMappingStop(STOP_OTHER_UNMAPPED),
m_rgfInterceptStop(INTERCEPT_NONE)
{
}
HRESULT CordbStepper::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugStepper)
*pInterface = static_cast<ICorDebugStepper *>(this);
else if (id == IID_ICorDebugStepper2)
*pInterface = static_cast<ICorDebugStepper2 *>(this);
else if (id == IID_IUnknown)
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugStepper *>(this));
else
return E_NOINTERFACE;
ExternalAddRef();
return S_OK;
}
HRESULT CordbStepper::SetRangeIL(BOOL bIL)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
m_rangeIL = (bIL != FALSE);
return S_OK;
}
HRESULT CordbStepper::SetJMC(BOOL fIsJMCStepper)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
// Can't have JMC and stopping with anything else.
if (m_rgfMappingStop & STOP_ALL)
return E_INVALIDARG;
m_fIsJMCStepper = (fIsJMCStepper != FALSE);
return S_OK;
}
HRESULT CordbStepper::IsActive(BOOL *pbActive)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pbActive, BOOL *);
*pbActive = m_active;
return S_OK;
}
// M_id is a ptr to the stepper in the LS process.
LSPTR_STEPPER CordbStepper::GetLsPtrStepper()
{
LSPTR_STEPPER p;
p.Set((void*) m_id);
return p;
}
HRESULT CordbStepper::Deactivate()
{
PUBLIC_REENTRANT_API_ENTRY(this);
if (!m_active)
return S_OK;
FAIL_IF_NEUTERED(this);
if (m_thread == NULL)
return CORDBG_E_PROCESS_TERMINATED;
HRESULT hr;
CordbProcess *process = GetProcess();
ATT_ALLOW_LIVE_DO_STOPGO(process);
process->Lock();
if (!m_active) // another thread may be deactivating (e.g. step complete event)
{
process->Unlock();
return S_OK;
}
CordbAppDomain *pAppDomain = GetAppDomain();
_ASSERTE (pAppDomain != NULL);
DebuggerIPCEvent event;
process->InitIPCEvent(&event,
DB_IPCE_STEP_CANCEL,
false,
pAppDomain->GetADToken());
event.StepData.stepperToken = GetLsPtrStepper();
process->Unlock();
hr = process->SendIPCEvent(&event, sizeof(DebuggerIPCEvent));
hr = WORST_HR(hr, event.hr);
process->Lock();
process->m_steppers.RemoveBase((ULONG_PTR)m_id);
m_active = false;
process->Unlock();
return hr;
}
HRESULT CordbStepper::SetInterceptMask(CorDebugIntercept mask)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
m_rgfInterceptStop = mask;
return S_OK;
}
HRESULT CordbStepper::SetUnmappedStopMask(CorDebugUnmappedStop mask)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
// You must be Win32 attached to stop in unmanaged code.
if ((mask & STOP_UNMANAGED) && !GetProcess()->IsInteropDebugging())
return E_INVALIDARG;
// Limitations on JMC Stepping - if JMC stepping is active,
// all other stop masks must be disabled.
// The jit can't place JMC probes before the prolog, so if we're
// we're JMC stepping, we'll stop after the prolog.
// The implementation for JMC stepping also doesn't let us stop in
// unmanaged code. (because there are no probes there).
// So enforce those implementation limitations here.
if (m_fIsJMCStepper)
{
if (mask & STOP_ALL)
return E_INVALIDARG;
}
// @todo- Ensure that we only set valid bits.
m_rgfMappingStop = mask;
return S_OK;
}
HRESULT CordbStepper::Step(BOOL bStepIn)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (m_thread == NULL)
return CORDBG_E_PROCESS_TERMINATED;
return StepRange(bStepIn, NULL, 0);
}
//---------------------------------------------------------------------------------------
//
// Ships off a step-range command to the left-side. On the next continue the LS will
// step across one range at a time.
//
// Arguments:
// fStepIn - TRUE if this stepper should execute a step-in, else FALSE
// rgRanges - Array of ranges that define a single step.
// cRanges - Count of number of elements in rgRanges.
//
// Returns:
// S_OK if the stepper is successfully set-up, else an appropriate error code.
//
HRESULT CordbStepper::StepRange(BOOL fStepIn,
COR_DEBUG_STEP_RANGE rgRanges[],
ULONG32 cRanges)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(rgRanges, COR_DEBUG_STEP_RANGE, cRanges, true, true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (m_thread == NULL)
{
return CORDBG_E_PROCESS_TERMINATED;
}
HRESULT hr = S_OK;
if (m_active)
{
//
// Deactivate the current stepping.
// or return an error???
//
hr = Deactivate();
if (FAILED(hr))
{
return hr;
}
}
// Validate step-ranges. Ranges are exclusive, so end offset
// should always be greater than start offset.
// Ranges don't have to be sorted.
// Zero ranges is ok; though they ought to just call Step() in that case.
for (ULONG32 i = 0; i < cRanges; i++)
{
if (rgRanges[i].startOffset >= rgRanges[i].endOffset)
{
STRESS_LOG2(LF_CORDB, LL_INFO10, "Illegal step range. 0x%x-0x%x\n", rgRanges[i].startOffset, rgRanges[i].endOffset);
return ErrWrapper(E_INVALIDARG);
}
}
CordbProcess * pProcess = GetProcess();
//
// Build step event
//
DebuggerIPCEvent * pEvent = reinterpret_cast<DebuggerIPCEvent *>(_alloca(CorDBIPC_BUFFER_SIZE));
pProcess->InitIPCEvent(pEvent, DB_IPCE_STEP, true, GetAppDomain()->GetADToken());
pEvent->StepData.vmThreadToken = m_thread->m_vmThreadToken;
pEvent->StepData.rgfMappingStop = m_rgfMappingStop;
pEvent->StepData.rgfInterceptStop = m_rgfInterceptStop;
pEvent->StepData.IsJMCStop = !!m_fIsJMCStepper;
if (m_frame == NULL)
{
pEvent->StepData.frameToken = LEAF_MOST_FRAME;
}
else
{
pEvent->StepData.frameToken = m_frame->GetFramePointer();
}
pEvent->StepData.stepIn = (fStepIn != 0);
pEvent->StepData.totalRangeCount = cRanges;
pEvent->StepData.rangeIL = m_rangeIL;
//
// Send ranges. We may have to send > 1 message.
//
COR_DEBUG_STEP_RANGE * pRangeStart = &(pEvent->StepData.range);
COR_DEBUG_STEP_RANGE * pRangeEnd = (reinterpret_cast<COR_DEBUG_STEP_RANGE *> (((BYTE *)pEvent) + CorDBIPC_BUFFER_SIZE)) - 1;
int cRangesToGo = cRanges;
if (cRangesToGo > 0)
{
while (cRangesToGo > 0)
{
//
// Find the number of ranges we can copy this time thru the loop
//
int cRangesToCopy;
if (cRangesToGo < (pRangeEnd - pRangeStart))
{
cRangesToCopy = cRangesToGo;
}
else
{
cRangesToCopy = (unsigned int)(pRangeEnd - pRangeStart);
}
//
// Copy the ranges into the IPC block now, 1-by-1
//
int cRangesCopied = 0;
while (cRangesCopied != cRangesToCopy)
{
pRangeStart[cRangesCopied] = rgRanges[cRanges - cRangesToGo + cRangesCopied];
cRangesCopied++;
}
pEvent->StepData.rangeCount = cRangesCopied;
cRangesToGo -= cRangesCopied;
//
// Send step event (two-way event here...)
//
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
hr = WORST_HR(hr, pEvent->hr);
if (FAILED(hr))
{
return hr;
}
}
}
else
{
//
// Send step event without any ranges (two-way event here...)
//
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
hr = WORST_HR(hr, pEvent->hr);
if (FAILED(hr))
{
return hr;
}
}
m_id = LsPtrToCookie(pEvent->StepData.stepperToken);
LOG((LF_CORDB,LL_INFO10000, "CS::SR: m_id:0x%x | 0x%x \n",
m_id,
LsPtrToCookie(pEvent->StepData.stepperToken)));
#ifdef _DEBUG
CordbAppDomain *pAppDomain = GetAppDomain();
#endif
_ASSERTE (pAppDomain != NULL);
pProcess->Lock();
pProcess->m_steppers.AddBase(this);
m_active = true;
pProcess->Unlock();
return hr;
}
//---------------------------------------------------------------------------------------
//
// Ships off a step-out command to the left-side. On the next continue the LS will
// execute a step-out
//
// Returns:
// S_OK if the stepper is successfully set-up, else an appropriate error code.
//
HRESULT CordbStepper::StepOut()
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (m_thread == NULL)
{
return CORDBG_E_PROCESS_TERMINATED;
}
HRESULT hr;
if (m_active)
{
//
// Deactivate the current stepping.
// or return an error???
//
hr = Deactivate();
if (FAILED(hr))
{
return hr;
}
}
CordbProcess * pProcess = GetProcess();
// We don't do native step-out.
if (pProcess->SupportsVersion(ver_ICorDebugProcess2))
{
if ((m_rgfMappingStop & STOP_UNMANAGED) != 0)
{
return ErrWrapper(CORDBG_E_CANT_INTEROP_STEP_OUT);
}
}
//
// Build step event
//
DebuggerIPCEvent * pEvent = (DebuggerIPCEvent *) _alloca(CorDBIPC_BUFFER_SIZE);
pProcess->InitIPCEvent(pEvent, DB_IPCE_STEP_OUT, true, GetAppDomain()->GetADToken());
pEvent->StepData.vmThreadToken = m_thread->m_vmThreadToken;
pEvent->StepData.rgfMappingStop = m_rgfMappingStop;
pEvent->StepData.rgfInterceptStop = m_rgfInterceptStop;
pEvent->StepData.IsJMCStop = !!m_fIsJMCStepper;
if (m_frame == NULL)
{
pEvent->StepData.frameToken = LEAF_MOST_FRAME;
}
else
{
pEvent->StepData.frameToken = m_frame->GetFramePointer();
}
pEvent->StepData.totalRangeCount = 0;
// Note: two-way event here...
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
hr = WORST_HR(hr, pEvent->hr);
if (FAILED(hr))
{
return hr;
}
m_id = LsPtrToCookie(pEvent->StepData.stepperToken);
#ifdef _DEBUG
CordbAppDomain * pAppDomain = GetAppDomain();
#endif
_ASSERTE (pAppDomain != NULL);
pProcess->Lock();
pProcess->m_steppers.AddBase(this);
m_active = true;
pProcess->Unlock();
return S_OK;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: breakpoint.cpp
//
//
//*****************************************************************************
#include "stdafx.h"
/* ------------------------------------------------------------------------- *
* Breakpoint class
* ------------------------------------------------------------------------- */
CordbBreakpoint::CordbBreakpoint(CordbProcess * pProcess, CordbBreakpointType bpType)
: CordbBase(pProcess, 0, enumCordbBreakpoint),
m_active(false), m_pAppDomain(NULL), m_type(bpType)
{
}
// Neutered by CordbAppDomain
void CordbBreakpoint::Neuter()
{
m_pAppDomain = NULL; // clear ref
CordbBase::Neuter();
}
HRESULT CordbBreakpoint::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugBreakpoint)
{
*pInterface = static_cast<ICorDebugBreakpoint*>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugBreakpoint*>(this));
}
else
{
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
HRESULT CordbBreakpoint::BaseIsActive(BOOL *pbActive)
{
*pbActive = m_active ? TRUE : FALSE;
return S_OK;
}
/* ------------------------------------------------------------------------- *
* Function Breakpoint class
* ------------------------------------------------------------------------- */
CordbFunctionBreakpoint::CordbFunctionBreakpoint(CordbCode *code,
SIZE_T offset,
BOOL offsetIsIl)
: CordbBreakpoint(code->GetProcess(), CBT_FUNCTION),
m_code(code), m_offset(offset),
m_offsetIsIl(offsetIsIl)
{
// Remember the app domain we came from so that breakpoints can be
// deactivated from within the ExitAppdomain callback.
m_pAppDomain = m_code->GetAppDomain();
_ASSERTE(m_pAppDomain != NULL);
}
CordbFunctionBreakpoint::~CordbFunctionBreakpoint()
{
// @todo- eventually get CordbFunctionBreakpoint rooted and enable this.
//_ASSERTE(this->IsNeutered());
//_ASSERTE(m_code == NULL);
}
void CordbFunctionBreakpoint::Neuter()
{
Disconnect();
CordbBreakpoint::Neuter();
}
HRESULT CordbFunctionBreakpoint::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugFunctionBreakpoint)
{
*pInterface = static_cast<ICorDebugFunctionBreakpoint*>(this);
}
else
{
// Not looking for a function breakpoint? See if the base class handles
// this interface. (issue 143976)
return CordbBreakpoint::QueryInterface(id, pInterface);
}
ExternalAddRef();
return S_OK;
}
HRESULT CordbFunctionBreakpoint::GetFunction(ICorDebugFunction **ppFunction)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppFunction, ICorDebugFunction **);
if (m_code == NULL)
{
return CORDBG_E_PROCESS_TERMINATED;
}
if (m_code->IsNeutered())
{
return CORDBG_E_CODE_NOT_AVAILABLE;
}
*ppFunction = static_cast<ICorDebugFunction *> (m_code->GetFunction());
(*ppFunction)->AddRef();
return S_OK;
}
// m_id is actually a LSPTR_BREAKPOINT. Get it as a type-safe member.
LSPTR_BREAKPOINT CordbFunctionBreakpoint::GetLsPtrBP()
{
LSPTR_BREAKPOINT p;
p.Set((void*) m_id);
return p;
}
HRESULT CordbFunctionBreakpoint::GetOffset(ULONG32 *pnOffset)
{
//REVISIT_TODO: is this casting correct for ia64?
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pnOffset, SIZE_T *);
*pnOffset = (ULONG32)m_offset;
return S_OK;
}
//---------------------------------------------------------------------------------------
//
// Activates or removes a breakpoint
//
// Arguments:
// fActivate - TRUE if to activate the breakpoint, else FALSE.
//
// Return Value:
// S_OK if successful, else a specific error code detailing the type of failure.
//
//---------------------------------------------------------------------------------------
HRESULT CordbFunctionBreakpoint::Activate(BOOL fActivate)
{
PUBLIC_REENTRANT_API_ENTRY(this);
OK_IF_NEUTERED(this); // we'll check again later
if (fActivate == (m_active == true) )
{
return S_OK;
}
// For backwards compat w/ everett, we let the other error codes
// take precedence over neutering error codes.
if ((m_code == NULL) || this->IsNeutered())
{
return CORDBG_E_PROCESS_TERMINATED;
}
HRESULT hr;
ATT_ALLOW_LIVE_DO_STOPGO(GetProcess());
// For legacy, check this error condition. We must do this under the stop-go lock to ensure
// that the m_code object was not deleted out from underneath us.
//
// 6/23/09 - This isn't just for legacy anymore, collectible types should be able to hit this
// by unloading the module containing the code this breakpoint is bound to.
if (m_code->IsNeutered())
{
return CORDBG_E_CODE_NOT_AVAILABLE;
}
//
// <REVISIT_TODO>@todo: when we implement module and value breakpoints, then
// we'll want to factor some of this code out.</REVISIT_TODO>
//
CordbProcess * pProcess = GetProcess();
RSLockHolder lockHolder(pProcess->GetProcessLock());
pProcess->ClearPatchTable(); // if we add something, then the right side
// view of the patch table is no longer valid
DebuggerIPCEvent * pEvent = (DebuggerIPCEvent *) _alloca(CorDBIPC_BUFFER_SIZE);
CordbAppDomain * pAppDomain = GetAppDomain();
_ASSERTE (pAppDomain != NULL);
if (fActivate)
{
pProcess->InitIPCEvent(pEvent, DB_IPCE_BREAKPOINT_ADD, true, pAppDomain->GetADToken());
pEvent->BreakpointData.funcMetadataToken = m_code->GetMetadataToken();
pEvent->BreakpointData.vmDomainAssembly = m_code->GetModule()->GetRuntimeDomainAssembly();
pEvent->BreakpointData.encVersion = m_code->GetVersion();
BOOL codeIsIL = m_code->IsIL();
pEvent->BreakpointData.isIL = m_offsetIsIl ? true : false;
pEvent->BreakpointData.offset = m_offset;
if (codeIsIL)
{
pEvent->BreakpointData.nativeCodeMethodDescToken = pEvent->BreakpointData.nativeCodeMethodDescToken.NullPtr();
}
else
{
pEvent->BreakpointData.nativeCodeMethodDescToken =
(m_code.GetValue()->AsNativeCode())->GetVMNativeCodeMethodDescToken().ToLsPtr();
}
// Note: we're sending a two-way event, so it blocks here
// until the breakpoint is really added and the reply event is
// copied over the event we sent.
lockHolder.Release();
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
lockHolder.Acquire();
hr = WORST_HR(hr, pEvent->hr);
if (FAILED(hr))
{
return hr;
}
m_id = LsPtrToCookie(pEvent->BreakpointData.breakpointToken);
// If we weren't able to allocate the BP, we should have set the
// hr on the left side.
_ASSERTE(m_id != 0);
pAppDomain->m_breakpoints.AddBase(this);
m_active = true;
// Continue called automatically by StopContinueHolder
}
else
{
_ASSERTE (pAppDomain != NULL);
if (pProcess->IsSafeToSendEvents())
{
pProcess->InitIPCEvent(pEvent, DB_IPCE_BREAKPOINT_REMOVE, false, pAppDomain->GetADToken());
pEvent->BreakpointData.breakpointToken = GetLsPtrBP();
lockHolder.Release();
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
lockHolder.Acquire();
hr = WORST_HR(hr, pEvent->hr);
}
else
{
hr = CORDBHRFromProcessState(pProcess, pAppDomain);
}
pAppDomain->m_breakpoints.RemoveBase(LsPtrToCookie(GetLsPtrBP()));
m_active = false;
}
return hr;
}
void CordbFunctionBreakpoint::Disconnect()
{
m_code.Clear();
}
/* ------------------------------------------------------------------------- *
* Stepper class
* ------------------------------------------------------------------------- */
CordbStepper::CordbStepper(CordbThread *thread, CordbFrame *frame)
: CordbBase(thread->GetProcess(), 0, enumCordbStepper),
m_thread(thread), m_frame(frame),
m_stepperToken(0), m_active(false),
m_rangeIL(TRUE),
m_fIsJMCStepper(false),
m_rgfMappingStop(STOP_OTHER_UNMAPPED),
m_rgfInterceptStop(INTERCEPT_NONE)
{
}
HRESULT CordbStepper::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugStepper)
*pInterface = static_cast<ICorDebugStepper *>(this);
else if (id == IID_ICorDebugStepper2)
*pInterface = static_cast<ICorDebugStepper2 *>(this);
else if (id == IID_IUnknown)
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugStepper *>(this));
else
return E_NOINTERFACE;
ExternalAddRef();
return S_OK;
}
HRESULT CordbStepper::SetRangeIL(BOOL bIL)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
m_rangeIL = (bIL != FALSE);
return S_OK;
}
HRESULT CordbStepper::SetJMC(BOOL fIsJMCStepper)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
// Can't have JMC and stopping with anything else.
if (m_rgfMappingStop & STOP_ALL)
return E_INVALIDARG;
m_fIsJMCStepper = (fIsJMCStepper != FALSE);
return S_OK;
}
HRESULT CordbStepper::IsActive(BOOL *pbActive)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pbActive, BOOL *);
*pbActive = m_active;
return S_OK;
}
// M_id is a ptr to the stepper in the LS process.
LSPTR_STEPPER CordbStepper::GetLsPtrStepper()
{
LSPTR_STEPPER p;
p.Set((void*) m_id);
return p;
}
HRESULT CordbStepper::Deactivate()
{
PUBLIC_REENTRANT_API_ENTRY(this);
if (!m_active)
return S_OK;
FAIL_IF_NEUTERED(this);
if (m_thread == NULL)
return CORDBG_E_PROCESS_TERMINATED;
HRESULT hr;
CordbProcess *process = GetProcess();
ATT_ALLOW_LIVE_DO_STOPGO(process);
process->Lock();
if (!m_active) // another thread may be deactivating (e.g. step complete event)
{
process->Unlock();
return S_OK;
}
CordbAppDomain *pAppDomain = GetAppDomain();
_ASSERTE (pAppDomain != NULL);
DebuggerIPCEvent event;
process->InitIPCEvent(&event,
DB_IPCE_STEP_CANCEL,
false,
pAppDomain->GetADToken());
event.StepData.stepperToken = GetLsPtrStepper();
process->Unlock();
hr = process->SendIPCEvent(&event, sizeof(DebuggerIPCEvent));
hr = WORST_HR(hr, event.hr);
process->Lock();
process->m_steppers.RemoveBase((ULONG_PTR)m_id);
m_active = false;
process->Unlock();
return hr;
}
HRESULT CordbStepper::SetInterceptMask(CorDebugIntercept mask)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
m_rgfInterceptStop = mask;
return S_OK;
}
HRESULT CordbStepper::SetUnmappedStopMask(CorDebugUnmappedStop mask)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
// You must be Win32 attached to stop in unmanaged code.
if ((mask & STOP_UNMANAGED) && !GetProcess()->IsInteropDebugging())
return E_INVALIDARG;
// Limitations on JMC Stepping - if JMC stepping is active,
// all other stop masks must be disabled.
// The jit can't place JMC probes before the prolog, so if we're
// we're JMC stepping, we'll stop after the prolog.
// The implementation for JMC stepping also doesn't let us stop in
// unmanaged code. (because there are no probes there).
// So enforce those implementation limitations here.
if (m_fIsJMCStepper)
{
if (mask & STOP_ALL)
return E_INVALIDARG;
}
// @todo- Ensure that we only set valid bits.
m_rgfMappingStop = mask;
return S_OK;
}
HRESULT CordbStepper::Step(BOOL bStepIn)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (m_thread == NULL)
return CORDBG_E_PROCESS_TERMINATED;
return StepRange(bStepIn, NULL, 0);
}
//---------------------------------------------------------------------------------------
//
// Ships off a step-range command to the left-side. On the next continue the LS will
// step across one range at a time.
//
// Arguments:
// fStepIn - TRUE if this stepper should execute a step-in, else FALSE
// rgRanges - Array of ranges that define a single step.
// cRanges - Count of number of elements in rgRanges.
//
// Returns:
// S_OK if the stepper is successfully set-up, else an appropriate error code.
//
HRESULT CordbStepper::StepRange(BOOL fStepIn,
COR_DEBUG_STEP_RANGE rgRanges[],
ULONG32 cRanges)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(rgRanges, COR_DEBUG_STEP_RANGE, cRanges, true, true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (m_thread == NULL)
{
return CORDBG_E_PROCESS_TERMINATED;
}
HRESULT hr = S_OK;
if (m_active)
{
//
// Deactivate the current stepping.
// or return an error???
//
hr = Deactivate();
if (FAILED(hr))
{
return hr;
}
}
// Validate step-ranges. Ranges are exclusive, so end offset
// should always be greater than start offset.
// Ranges don't have to be sorted.
// Zero ranges is ok; though they ought to just call Step() in that case.
for (ULONG32 i = 0; i < cRanges; i++)
{
if (rgRanges[i].startOffset >= rgRanges[i].endOffset)
{
STRESS_LOG2(LF_CORDB, LL_INFO10, "Illegal step range. 0x%x-0x%x\n", rgRanges[i].startOffset, rgRanges[i].endOffset);
return ErrWrapper(E_INVALIDARG);
}
}
CordbProcess * pProcess = GetProcess();
//
// Build step event
//
DebuggerIPCEvent * pEvent = reinterpret_cast<DebuggerIPCEvent *>(_alloca(CorDBIPC_BUFFER_SIZE));
pProcess->InitIPCEvent(pEvent, DB_IPCE_STEP, true, GetAppDomain()->GetADToken());
pEvent->StepData.vmThreadToken = m_thread->m_vmThreadToken;
pEvent->StepData.rgfMappingStop = m_rgfMappingStop;
pEvent->StepData.rgfInterceptStop = m_rgfInterceptStop;
pEvent->StepData.IsJMCStop = !!m_fIsJMCStepper;
if (m_frame == NULL)
{
pEvent->StepData.frameToken = LEAF_MOST_FRAME;
}
else
{
pEvent->StepData.frameToken = m_frame->GetFramePointer();
}
pEvent->StepData.stepIn = (fStepIn != 0);
pEvent->StepData.totalRangeCount = cRanges;
pEvent->StepData.rangeIL = m_rangeIL;
//
// Send ranges. We may have to send > 1 message.
//
COR_DEBUG_STEP_RANGE * pRangeStart = &(pEvent->StepData.range);
COR_DEBUG_STEP_RANGE * pRangeEnd = (reinterpret_cast<COR_DEBUG_STEP_RANGE *> (((BYTE *)pEvent) + CorDBIPC_BUFFER_SIZE)) - 1;
int cRangesToGo = cRanges;
if (cRangesToGo > 0)
{
while (cRangesToGo > 0)
{
//
// Find the number of ranges we can copy this time thru the loop
//
int cRangesToCopy;
if (cRangesToGo < (pRangeEnd - pRangeStart))
{
cRangesToCopy = cRangesToGo;
}
else
{
cRangesToCopy = (unsigned int)(pRangeEnd - pRangeStart);
}
//
// Copy the ranges into the IPC block now, 1-by-1
//
int cRangesCopied = 0;
while (cRangesCopied != cRangesToCopy)
{
pRangeStart[cRangesCopied] = rgRanges[cRanges - cRangesToGo + cRangesCopied];
cRangesCopied++;
}
pEvent->StepData.rangeCount = cRangesCopied;
cRangesToGo -= cRangesCopied;
//
// Send step event (two-way event here...)
//
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
hr = WORST_HR(hr, pEvent->hr);
if (FAILED(hr))
{
return hr;
}
}
}
else
{
//
// Send step event without any ranges (two-way event here...)
//
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
hr = WORST_HR(hr, pEvent->hr);
if (FAILED(hr))
{
return hr;
}
}
m_id = LsPtrToCookie(pEvent->StepData.stepperToken);
LOG((LF_CORDB,LL_INFO10000, "CS::SR: m_id:0x%x | 0x%x \n",
m_id,
LsPtrToCookie(pEvent->StepData.stepperToken)));
#ifdef _DEBUG
CordbAppDomain *pAppDomain = GetAppDomain();
#endif
_ASSERTE (pAppDomain != NULL);
pProcess->Lock();
pProcess->m_steppers.AddBase(this);
m_active = true;
pProcess->Unlock();
return hr;
}
//---------------------------------------------------------------------------------------
//
// Ships off a step-out command to the left-side. On the next continue the LS will
// execute a step-out
//
// Returns:
// S_OK if the stepper is successfully set-up, else an appropriate error code.
//
HRESULT CordbStepper::StepOut()
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (m_thread == NULL)
{
return CORDBG_E_PROCESS_TERMINATED;
}
HRESULT hr;
if (m_active)
{
//
// Deactivate the current stepping.
// or return an error???
//
hr = Deactivate();
if (FAILED(hr))
{
return hr;
}
}
CordbProcess * pProcess = GetProcess();
// We don't do native step-out.
if (pProcess->SupportsVersion(ver_ICorDebugProcess2))
{
if ((m_rgfMappingStop & STOP_UNMANAGED) != 0)
{
return ErrWrapper(CORDBG_E_CANT_INTEROP_STEP_OUT);
}
}
//
// Build step event
//
DebuggerIPCEvent * pEvent = (DebuggerIPCEvent *) _alloca(CorDBIPC_BUFFER_SIZE);
pProcess->InitIPCEvent(pEvent, DB_IPCE_STEP_OUT, true, GetAppDomain()->GetADToken());
pEvent->StepData.vmThreadToken = m_thread->m_vmThreadToken;
pEvent->StepData.rgfMappingStop = m_rgfMappingStop;
pEvent->StepData.rgfInterceptStop = m_rgfInterceptStop;
pEvent->StepData.IsJMCStop = !!m_fIsJMCStepper;
if (m_frame == NULL)
{
pEvent->StepData.frameToken = LEAF_MOST_FRAME;
}
else
{
pEvent->StepData.frameToken = m_frame->GetFramePointer();
}
pEvent->StepData.totalRangeCount = 0;
// Note: two-way event here...
hr = pProcess->SendIPCEvent(pEvent, CorDBIPC_BUFFER_SIZE);
hr = WORST_HR(hr, pEvent->hr);
if (FAILED(hr))
{
return hr;
}
m_id = LsPtrToCookie(pEvent->StepData.stepperToken);
#ifdef _DEBUG
CordbAppDomain * pAppDomain = GetAppDomain();
#endif
_ASSERTE (pAppDomain != NULL);
pProcess->Lock();
pProcess->m_steppers.AddBase(this);
m_active = true;
pProcess->Unlock();
return S_OK;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/file_io/GetTempFileNameA/test1/GetTempFileNameA.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: GetTempFileNameA.c (test 1)
**
** Purpose: Tests the PAL implementation of the GetTempFileNameA function.
**
** Depends on:
** GetFileAttributesA
** DeleteFileA
**
**
**===================================================================*/
#include <palsuite.h>
PALTEST(file_io_GetTempFileNameA_test1_paltest_gettempfilenamea_test1, "file_io/GetTempFileNameA/test1/paltest_gettempfilenamea_test1")
{
UINT uiError = 0;
const UINT uUnique = 0;
const char* szDot = {"."};
const char* szValidPrefix = {"cfr"};
const char* szLongValidPrefix = {"cfrwxyz"};
char szReturnedName[256];
char szTempString[256];
if (0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
/* valid path with null prefix */
uiError = GetTempFileNameA(szDot, NULL, uUnique, szReturnedName);
if (uiError == 0)
{
Fail("GetTempFileNameA: ERROR -> Call failed with a valid path "
"with the error code: %ld\n", GetLastError());
}
else
{
/* verify temp file was created */
if (GetFileAttributesA(szReturnedName) == -1)
{
Fail("GetTempFileNameA: ERROR -> GetFileAttributes failed on the "
"returned temp file \"%s\" with error code: %ld.\n",
szReturnedName,
GetLastError());
}
if (DeleteFileA(szReturnedName) != TRUE)
{
Fail("GetTempFileNameA: ERROR -> DeleteFileW failed to delete"
"the created temp file with error code: %ld.\n", GetLastError());
}
}
/* valid path with valid prefix */
uiError = GetTempFileNameA(szDot, szValidPrefix, uUnique, szReturnedName);
if (uiError == 0)
{
Fail("GetTempFileNameA: ERROR -> Call failed with a valid path and "
"prefix with the error code: %ld\n", GetLastError());
}
else
{
/* verify temp file was created */
if (GetFileAttributesA(szReturnedName) == -1)
{
Fail("GetTempFileNameA: ERROR -> GetFileAttributes failed on the "
"returned temp file \"%s\" with error code: %ld.\n",
szReturnedName,
GetLastError());
}
if (DeleteFileA(szReturnedName) != TRUE)
{
Fail("GetTempFileNameA: ERROR -> DeleteFileW failed to delete"
"the created temp \"%s\" file with error code: %ld.\n",
szReturnedName,
GetLastError());
}
}
/* valid path with long prefix */
uiError = GetTempFileNameA(szDot, szLongValidPrefix, uUnique, szReturnedName);
if (uiError == 0)
{
Fail("GetTempFileNameA: ERROR -> Call failed with a valid path and "
"prefix with the error code: %ld\n", GetLastError());
}
else
{
/* verify temp file was created */
if (GetFileAttributesA(szReturnedName) == -1)
{
Fail("GetTempFileNameA: ERROR -> GetFileAttributes failed on the "
"returned temp file \"%s\" with error code: %ld.\n",
szReturnedName,
GetLastError());
}
/* now verify that it only used the first 3 characters of the prefix */
sprintf_s(szTempString, ARRAY_SIZE(szTempString), "%s\\%s", szDot, szLongValidPrefix);
if (strncmp(szTempString, szReturnedName, 6) == 0)
{
Fail("GetTempFileNameA: ERROR -> It appears that an improper prefix "
"was used.\n");
}
if (DeleteFileA(szReturnedName) != TRUE)
{
Fail("GetTempFileNameA: ERROR -> DeleteFileW failed to delete"
"the created temp file \"%s\" with error code: %ld.\n",
szReturnedName,
GetLastError());
}
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: GetTempFileNameA.c (test 1)
**
** Purpose: Tests the PAL implementation of the GetTempFileNameA function.
**
** Depends on:
** GetFileAttributesA
** DeleteFileA
**
**
**===================================================================*/
#include <palsuite.h>
PALTEST(file_io_GetTempFileNameA_test1_paltest_gettempfilenamea_test1, "file_io/GetTempFileNameA/test1/paltest_gettempfilenamea_test1")
{
UINT uiError = 0;
const UINT uUnique = 0;
const char* szDot = {"."};
const char* szValidPrefix = {"cfr"};
const char* szLongValidPrefix = {"cfrwxyz"};
char szReturnedName[256];
char szTempString[256];
if (0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
/* valid path with null prefix */
uiError = GetTempFileNameA(szDot, NULL, uUnique, szReturnedName);
if (uiError == 0)
{
Fail("GetTempFileNameA: ERROR -> Call failed with a valid path "
"with the error code: %ld\n", GetLastError());
}
else
{
/* verify temp file was created */
if (GetFileAttributesA(szReturnedName) == -1)
{
Fail("GetTempFileNameA: ERROR -> GetFileAttributes failed on the "
"returned temp file \"%s\" with error code: %ld.\n",
szReturnedName,
GetLastError());
}
if (DeleteFileA(szReturnedName) != TRUE)
{
Fail("GetTempFileNameA: ERROR -> DeleteFileW failed to delete"
"the created temp file with error code: %ld.\n", GetLastError());
}
}
/* valid path with valid prefix */
uiError = GetTempFileNameA(szDot, szValidPrefix, uUnique, szReturnedName);
if (uiError == 0)
{
Fail("GetTempFileNameA: ERROR -> Call failed with a valid path and "
"prefix with the error code: %ld\n", GetLastError());
}
else
{
/* verify temp file was created */
if (GetFileAttributesA(szReturnedName) == -1)
{
Fail("GetTempFileNameA: ERROR -> GetFileAttributes failed on the "
"returned temp file \"%s\" with error code: %ld.\n",
szReturnedName,
GetLastError());
}
if (DeleteFileA(szReturnedName) != TRUE)
{
Fail("GetTempFileNameA: ERROR -> DeleteFileW failed to delete"
"the created temp \"%s\" file with error code: %ld.\n",
szReturnedName,
GetLastError());
}
}
/* valid path with long prefix */
uiError = GetTempFileNameA(szDot, szLongValidPrefix, uUnique, szReturnedName);
if (uiError == 0)
{
Fail("GetTempFileNameA: ERROR -> Call failed with a valid path and "
"prefix with the error code: %ld\n", GetLastError());
}
else
{
/* verify temp file was created */
if (GetFileAttributesA(szReturnedName) == -1)
{
Fail("GetTempFileNameA: ERROR -> GetFileAttributes failed on the "
"returned temp file \"%s\" with error code: %ld.\n",
szReturnedName,
GetLastError());
}
/* now verify that it only used the first 3 characters of the prefix */
sprintf_s(szTempString, ARRAY_SIZE(szTempString), "%s\\%s", szDot, szLongValidPrefix);
if (strncmp(szTempString, szReturnedName, 6) == 0)
{
Fail("GetTempFileNameA: ERROR -> It appears that an improper prefix "
"was used.\n");
}
if (DeleteFileA(szReturnedName) != TRUE)
{
Fail("GetTempFileNameA: ERROR -> DeleteFileW failed to delete"
"the created temp file \"%s\" with error code: %ld.\n",
szReturnedName,
GetLastError());
}
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/debug/di/shimprocess.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: ShimProcess.cpp
//
//
// The V3 ICD debugging APIs have a lower abstraction level than V2.
// This provides V2 ICD debugging functionality on top of the V3 debugger object.
//*****************************************************************************
#include "stdafx.h"
#include "safewrap.h"
#include "check.h"
#include <limits.h>
#include "shimpriv.h"
//---------------------------------------------------------------------------------------
//
// Ctor for a ShimProcess
//
// Notes:
// See InitializeDataTarget in header for details of how to instantiate a ShimProcess and hook it up.
// Initial ref count is 0. This is the convention used int the RS, and it plays well with semantics
// like immediately assigning to a smart pointer (which will bump the count up to 1).
ShimProcess::ShimProcess() :
m_ref(0),
m_fFirstManagedEvent(false),
m_fInCreateProcess(false),
m_fInLoadModule(false),
m_fIsInteropDebugging(false),
m_fIsDisposed(false),
m_loaderBPReceived(false)
{
m_ShimLock.Init("ShimLock", RSLock::cLockReentrant, RSLock::LL_SHIM_LOCK);
m_ShimProcessDisposeLock.Init(
"ShimProcessDisposeLock",
RSLock::cLockReentrant | RSLock::cLockNonDbgApi,
RSLock::LL_SHIM_PROCESS_DISPOSE_LOCK);
m_eventQueue.Init(&m_ShimLock);
m_pShimCallback.Assign(new ShimProxyCallback(this)); // Throws
m_fNeedFakeAttachEvents = false;
m_ContinueStatusChangedData.Clear();
m_pShimStackWalkHashTable = new ShimStackWalkHashTable();
m_pDupeEventsHashTable = new DuplicateCreationEventsHashTable();
m_machineInfo.Clear();
m_markAttachPendingEvent = WszCreateEvent(NULL, TRUE, FALSE, NULL);
if (m_markAttachPendingEvent == NULL)
{
ThrowLastError();
}
m_terminatingEvent = WszCreateEvent(NULL, TRUE, FALSE, NULL);
if (m_terminatingEvent == NULL)
{
ThrowLastError();
}
}
//---------------------------------------------------------------------------------------
//
// ShimProcess dtor. Invoked when reference count goes to 0.
//
// Assumptions:
// Dtors should not do any interesting work. If this object has been initialized,
// then call Dispose() first.
//
//
ShimProcess::~ShimProcess()
{
// Expected that this was either already disposed first, or not initialized.
_ASSERTE(m_pWin32EventThread == NULL);
_ASSERTE(m_ShimProcessDisposeLock.IsInit());
m_ShimProcessDisposeLock.Destroy();
if (m_markAttachPendingEvent != NULL)
{
CloseHandle(m_markAttachPendingEvent);
m_markAttachPendingEvent = NULL;
}
if (m_terminatingEvent != NULL)
{
CloseHandle(m_terminatingEvent);
m_terminatingEvent = NULL;
}
// Dtor will release m_pLiveDataTarget
}
//---------------------------------------------------------------------------------------
//
// Part of initialization to hook up to process.
//
// Arguments:
// pProcess - debuggee object to connect to. Maybe null if part of shutdown.
//
// Notes:
// This will take a strong reference to the process object.
// This is part of the initialization phase.
// This should only be called once.
//
//
void ShimProcess::SetProcess(ICorDebugProcess * pProcess)
{
PRIVATE_SHIM_CALLBACK_IN_THIS_SCOPE0(NULL);
// Data-target should already be setup before we try to connect to a process.
_ASSERTE(m_pLiveDataTarget != NULL);
// Reference is kept by m_pProcess;
m_pIProcess.Assign(pProcess);
// Get the private shim hooks. This just exists to access private functionality that has not
// yet been promoted to the ICorDebug interfaces.
m_pProcess = static_cast<CordbProcess *>(pProcess);
if (pProcess != NULL)
{
// Verify that DataTarget + new process have the same pid?
_ASSERTE(m_pProcess->GetProcessDescriptor()->m_Pid == m_pLiveDataTarget->GetPid());
}
}
//---------------------------------------------------------------------------------------
//
// Create a Data-Target around the live process.
//
// Arguments:
// processId - OS process ID to connect to. Must be a local, same platform, process.
//
// Return Value:
// S_OK on success.
//
// Assumptions:
// This is part of the initialization dance.
//
// Notes:
// Only call this once, during the initialization dance.
//
HRESULT ShimProcess::InitializeDataTarget(const ProcessDescriptor * pProcessDescriptor)
{
_ASSERTE(m_pLiveDataTarget == NULL);
HRESULT hr = BuildPlatformSpecificDataTarget(GetMachineInfo(), pProcessDescriptor, &m_pLiveDataTarget);
if (FAILED(hr))
{
_ASSERTE(m_pLiveDataTarget == NULL);
return hr;
}
m_pLiveDataTarget->HookContinueStatusChanged(ShimProcess::ContinueStatusChanged, this);
// Ref on pDataTarget is now 1.
_ASSERTE(m_pLiveDataTarget != NULL);
return S_OK;
}
//---------------------------------------------------------------------------------------
//
// Determines if current thread is the Win32 Event Thread
//
// Return Value:
// True iff current thread is win32 event thread, else false.
//
// Notes:
// The win32 event thread is created by code:ShimProcess::CreateAndStartWin32ET
//
bool ShimProcess::IsWin32EventThread()
{
return (m_pWin32EventThread != NULL) && m_pWin32EventThread->IsWin32EventThread();
}
//---------------------------------------------------------------------------------------
//
// Add a reference
//
void ShimProcess::AddRef()
{
InterlockedIncrement(&m_ref);
}
//---------------------------------------------------------------------------------------
//
// Release a reference.
//
// Notes:
// When ref goes to 0, object is deleted.
//
void ShimProcess::Release()
{
LONG ref = InterlockedDecrement(&m_ref);
if (ref == 0)
{
delete this;
}
}
//---------------------------------------------------------------------------------------
//
// Dispose (Neuter) the object.
//
//
// Assumptions:
// This is called to gracefully shutdown the ShimProcess object.
// This must be called before destruction if the object was initialized.
//
// Notes:
// This will release all external resources, including getting the win32 event thread to exit.
// This can safely be called multiple times.
//
void ShimProcess::Dispose()
{
// Serialize Dispose with any other locked access to the shim. This helps
// protect against the debugger detaching while we're in the middle of
// doing stuff on the ShimProcess
RSLockHolder lockHolder(&m_ShimProcessDisposeLock);
m_fIsDisposed = true;
// Can't shut down the W32ET if we're on it.
_ASSERTE(!IsWin32EventThread());
m_eventQueue.DeleteAll();
if (m_pWin32EventThread != NULL)
{
// This will block waiting for the thread to exit gracefully.
m_pWin32EventThread->Stop();
delete m_pWin32EventThread;
m_pWin32EventThread = NULL;
}
if (m_pLiveDataTarget != NULL)
{
m_pLiveDataTarget->Dispose();
m_pLiveDataTarget.Clear();
}
m_pIProcess.Clear();
m_pProcess = NULL;
_ASSERTE(m_ShimLock.IsInit());
m_ShimLock.Destroy();
if (m_pShimStackWalkHashTable != NULL)
{
// The hash table should be empty by now. ClearAllShimStackWalk() should have been called.
_ASSERTE(m_pShimStackWalkHashTable->GetCount() == 0);
delete m_pShimStackWalkHashTable;
m_pShimStackWalkHashTable = NULL;
}
if (m_pDupeEventsHashTable != NULL)
{
if (m_pDupeEventsHashTable->GetCount() > 0)
{
// loop through all the entries in the hash table, remove them, and delete them
for (DuplicateCreationEventsHashTable::Iterator pCurElem = m_pDupeEventsHashTable->Begin(),
pEndElem = m_pDupeEventsHashTable->End();
pCurElem != pEndElem;
pCurElem++)
{
DuplicateCreationEventEntry * pEntry = *pCurElem;
delete pEntry;
}
m_pDupeEventsHashTable->RemoveAll();
}
delete m_pDupeEventsHashTable;
m_pDupeEventsHashTable = NULL;
}
}
//---------------------------------------------------------------------------------------
// Track (and close) file handles from debug events.
//
// Arguments:
// pEvent - debug event
//
// Notes:
// Some debug events introduce file handles that the debugger needs to track and
// close on other debug events. For example, the LoadDll,CreateProcess debug
// events both give back a file handle that the debugger must close. This is generally
// done on the corresponding UnloadDll/ExitProcess debug events.
//
// Since we won't use the file handles, we'll just close them as soon as we get them.
// That way, we don't need to remember any state.
void ShimProcess::TrackFileHandleForDebugEvent(const DEBUG_EVENT * pEvent)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
HANDLE hFile = NULL;
switch(pEvent->dwDebugEventCode)
{
//
// Events that add a file handle
//
case CREATE_PROCESS_DEBUG_EVENT:
hFile = pEvent->u.CreateProcessInfo.hFile;
CloseHandle(hFile);
break;
case LOAD_DLL_DEBUG_EVENT:
hFile = pEvent->u.LoadDll.hFile;
CloseHandle(hFile);
break;
}
}
//---------------------------------------------------------------------------------------
// ThreadProc helper to drain event queue.
//
// Arguments:
// parameter - thread proc parameter, an ICorDebugProcess*
//
// Returns
// 0.
//
// Notes:
// This is useful when the shim queued a fake managed event (such as Control+C)
// and needs to get the debuggee to synchronize in order to start dispatching events.
// @dbgtodo sync: this will likely change as we iron out the Synchronization feature crew.
//
// We do this in a new thread proc to avoid thread restrictions:
// Can't call this on win32 event thread because that can't send the IPC event to
// make the aysnc-break request.
// Can't call this on the RCET because that can't send an async-break (see SendIPCEvent for details)
// So we just spin up a new thread to do the work.
//---------------------------------------------------------------------------------------
DWORD WINAPI CallStopGoThreadProc(LPVOID parameter)
{
ICorDebugProcess* pProc = reinterpret_cast<ICorDebugProcess *>(parameter);
// We expect these operations to succeed; but if they do fail, there's nothing we can really do about it.
// If it fails on process exit/neuter/detach, then it would be ignorable.
HRESULT hr;
// Calling Stop + Continue will synchronize the process and force any queued events to be called.
// Stop is synchronous and will block until debuggee is synchronized.
hr = pProc->Stop(INFINITE);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
// Continue will resume the debuggee. If there are queued events (which we expect in this case)
// then continue will drain the event queue instead of actually resuming the process.
hr = pProc->Continue(FALSE);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
// This thread just needs to trigger an event dispatch. Now that it's done that, it can exit.
return 0;
}
//---------------------------------------------------------------------------------------
// Does default event handling for native debug events.
//
// Arguments:
// pEvent - IN event ot handle
// pdwContinueStatus - IN /OUT - continuation status for event.
//
// Assumptions:
// Called when target is stopped. Caller still needs to Continue the debug event.
// This is called on the win32 event thread.
//
// Notes:
// Some native events require extra work before continuing. Eg, skip loader
// breakpoint, close certain handles, etc.
// This is only called in the manage-only case. In the interop-case, the
// debugger will get and handle these native debug events.
void ShimProcess::DefaultEventHandler(
const DEBUG_EVENT * pEvent,
DWORD * pdwContinueStatus)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
//
// Loader breakpoint
//
BOOL fFirstChance;
const EXCEPTION_RECORD * pRecord = NULL;
if (IsExceptionEvent(pEvent, &fFirstChance, &pRecord))
{
DWORD dwThreadId = GetThreadId(pEvent);
switch(pRecord->ExceptionCode)
{
case STATUS_BREAKPOINT:
{
if (!m_loaderBPReceived)
{
m_loaderBPReceived = true;
// Clear the loader breakpoint
*pdwContinueStatus = DBG_CONTINUE;
// After loader-breakpoint, notify that managed attach can begin.
// This is done to trigger a synchronization. The shim
// can then send the fake attach events once the target
// is synced.
// @dbgtodo sync: not needed once shim can
// work on sync APIs.
m_pProcess->QueueManagedAttachIfNeeded(); // throws
}
}
break;
/*
// If we handle the Ctlr-C event here and send the notification to the debugger, then we may break pre-V4
// behaviour because the debugger may handle the event and intercept the handlers registered in the debuggee
// process. So don't handle the event here and let the debuggee process handle it instead. See Dev10 issue
// 846455 for more info.
//
// However, when the re-arch is completed, we will need to work with VS to define what the right behaviour
// should be. We don't want to rely on in-process code to handle the Ctrl-C event.
case DBG_CONTROL_C:
{
// Queue a fake managed Ctrl+C event.
m_pShimCallback->ControlCTrap(GetProcess());
// Request an Async Break
// This is on Win32 Event Thread, so we can't call Stop / Continue.
// Instead, spawn a new threead, and have that call Stop/Continue, which
// will get the RCET to drain the event queue and dispatch the ControlCTrap we just queued.
{
DWORD dwDummyId;
CreateThread(NULL,
0,
CallStopGoThreadProc,
(LPVOID) GetProcess(),
0,
&dwDummyId);
}
// We don't worry about suspending the Control-C thread right now. The event is
// coming asynchronously, and so it's ok if the debuggee slips forward while
// we try to do a managed async break.
// Clear the control-C event.
*pdwContinueStatus = DBG_CONTINUE;
}
break;
*/
}
}
// Native debugging APIs have an undocumented expectation that you clear for OutputDebugString.
if (pEvent->dwDebugEventCode == OUTPUT_DEBUG_STRING_EVENT)
{
*pdwContinueStatus = DBG_CONTINUE;
}
//
// File handles.
//
TrackFileHandleForDebugEvent(pEvent);
}
//---------------------------------------------------------------------------------------
// Determine if we need to change the continue status
//
// Returns:
// True if the continue status was changed. Else false.
//
// Assumptions:
// This is single-threaded, which is enforced by it only be called on the win32et.
// The shim guarnatees only 1 outstanding debug-event at a time.
//
// Notes:
// See code:ShimProcess::ContinueStatusChangedWorker for big picture.
// Continue status is changed from a data-target callback which invokes
// code:ShimProcess::ContinueStatusChangedWorker.
// Call code:ShimProcess::ContinueStatusChangedData::Clear to clear the 'IsSet' bit.
//
bool ShimProcess::ContinueStatusChangedData::IsSet()
{
return m_dwThreadId != 0;
}
//---------------------------------------------------------------------------------------
// Clears the bit marking
//
// Assumptions:
// This is single-threaded, which is enforced by it only be called on the win32et.
// The shim guarantees only 1 outstanding debug-event at a time.
//
// Notes:
// See code:ShimProcess::ContinueStatusChangedWorker for big picture.
// This makes code:ShimProcess::ContinueStatusChangedData::IsSet return false.
// This can safely be called multiple times in a row.
//
void ShimProcess::ContinueStatusChangedData::Clear()
{
m_dwThreadId = 0;
}
//---------------------------------------------------------------------------------------
// Callback invoked from data-target when continue status is changed.
//
// Arguments:
// pUserData - data we supplied to the callback. a 'this' pointer.
// dwThreadId - the tid whose continue status is changing
// dwContinueStatus - the new continue status.
//
// Notes:
//
// Static
HRESULT ShimProcess::ContinueStatusChanged(void * pUserData, DWORD dwThreadId, CORDB_CONTINUE_STATUS dwContinueStatus)
{
ShimProcess * pThis = reinterpret_cast<ShimProcess *>(pUserData);
return pThis->ContinueStatusChangedWorker(dwThreadId, dwContinueStatus);
}
//---------------------------------------------------------------------------------------
// Real worker callback invoked from data-target when continue status is changed.
//
// Arguments:
// dwThreadId - the tid whose continue status is changing
// dwContinueStatus - the new continue status.
//
// Notes:
// ICorDebugProcess4::Filter returns an initial continue status (heavily biased to 'gn').
// Some ICorDebug operations may need to change the continue status that filter returned.
// For example, on windows, hijacking a thread at an unhandled exception would need to
// change the status to 'gh' (since continuing 2nd chance exception 'gn' will tear down the
// process and the hijack would never execute).
//
// Such operations will invoke into the data-target (code:ICorDebugMutableDataTarget::ContinueStatusChanged)
// to notify the debugger that the continue status was changed.
//
// The shim only executes such operations on the win32-event thread in a small window between
// WaitForDebugEvent and Continue. Therefore, we know:
// * the callback must come on the Win32EventThread (which means our handling the callback is
// single-threaded.
// * We only have 1 outstanding debug event to worry about at a time. This simplifies our tracking.
//
// The shim tracks the outstanding change request in m_ContinueStatusChangedData.
HRESULT ShimProcess::ContinueStatusChangedWorker(DWORD dwThreadId, CORDB_CONTINUE_STATUS dwContinueStatus)
{
// Should only be set once. This is only called on the win32 event thread, which protects against races.
_ASSERTE(IsWin32EventThread());
_ASSERTE(!m_ContinueStatusChangedData.IsSet());
m_ContinueStatusChangedData.m_dwThreadId = dwThreadId;
m_ContinueStatusChangedData.m_status = dwContinueStatus;
// Setting dwThreadId to non-zero should now mark this as set.
_ASSERTE(m_ContinueStatusChangedData.IsSet());
return S_OK;
}
//---------------------------------------------------------------------------------------
//
// Add a duplicate creation event entry for the specified key.
//
// Arguments:
// pKey - the key of the entry to be added; this is expected to be an
// ICDProcess/ICDAppDomain/ICDThread/ICDAssembly/ICDModule
//
// Assumptions:
// pKey is really an interface pointer of one of the types mentioned above
//
// Notes:
// We have to keep track of which creation events we have sent already because some runtime data structures
// are discoverable through enumeration before they send their creation events. As a result, we may have
// faked up a creation event for a data structure during attach, and then later on get another creation
// event for the same data structure. VS is not resilient in the face of multiple creation events for
// the same data structure.
//
// Needless to say this is a problem in attach scenarios only. However, keep in mind that for CoreCLR,
// launch really is early attach. For early attach, we get three creation events up front: a create
// process, a create appdomain, and a create thread.
//
void ShimProcess::AddDuplicateCreationEvent(void * pKey)
{
NewHolder<DuplicateCreationEventEntry> pEntry(new DuplicateCreationEventEntry(pKey));
m_pDupeEventsHashTable->Add(pEntry);
pEntry.SuppressRelease();
}
//---------------------------------------------------------------------------------------
//
// Check whether the specified key exists in the hash table. If so, remove it.
//
// Arguments:
// pKey - the key of the entry to check; this is expected to be an
// ICDProcess/ICDAppDomain/ICDThread/ICDAssembly/ICDModule
//
// Return Value:
// Returns true if the entry exists. The entry will have been removed because we can't have more than two
// duplicates for any given event.
//
// Assumptions:
// pKey is really an interface pointer of one of the types mentioned above
//
// Notes:
// See code:ShimProcess::AddDuplicateCreationEvent.
//
bool ShimProcess::RemoveDuplicateCreationEventIfPresent(void * pKey)
{
// We only worry about duplicate events in attach scenarios.
if (GetAttached())
{
// Only do the check if the hash table actually contains entries.
if (m_pDupeEventsHashTable->GetCount() > 0)
{
// Check if this is a dupe.
DuplicateCreationEventEntry * pResult = m_pDupeEventsHashTable->Lookup(pKey);
if (pResult != NULL)
{
// This is a dupe. We can't have a dupe twice, so remove it.
// This will help as a bit of optimization, since we will no longer check the hash table if
// its count reaches 0.
m_pDupeEventsHashTable->Remove(pKey);
delete pResult;
return true;
}
}
}
return false;
}
//---------------------------------------------------------------------------------------
// Gets the exception record format of the host
//
// Returns:
// The CorDebugRecordFormat for the host architecture.
//
// Notes:
// This corresponds to the definition EXCEPTION_RECORD on the host-architecture.
// It can be passed into ICorDebugProcess4::Filter.
CorDebugRecordFormat GetHostExceptionRecordFormat()
{
#if defined(HOST_64BIT)
return FORMAT_WINDOWS_EXCEPTIONRECORD64;
#else
return FORMAT_WINDOWS_EXCEPTIONRECORD32;
#endif
}
//---------------------------------------------------------------------------------------
// Main event handler for native debug events. Must also ensure Continue is called.
//
// Arguments:
// pEvent - debug event to handle
//
// Assumptions:
// Caller did a Flush() if needed.
//
// Notes:
// The main Handle native debug events.
// This must call back into ICD to let ICD filter the debug event (in case it's a managed notification).
//
// If we're interop-debugging (V2), then the debugger is expecting the debug events. In that case,
// we go through the V2 interop-debugging logic to queue / dispatch the events.
// If we're managed-only debugging, then the shim provides a default handler for the native debug.
// This includes some basic work (skipping the loader breakpoint, close certain handles, etc).
//---------------------------------------------------------------------------------------
HRESULT ShimProcess::HandleWin32DebugEvent(const DEBUG_EVENT * pEvent)
{
_ASSERTE(IsWin32EventThread());
//
// If this is an exception event, then we need to feed it into the CLR.
//
BOOL dwFirstChance = FALSE;
const EXCEPTION_RECORD * pRecord = NULL;
const DWORD dwThreadId = GetThreadId(pEvent);
bool fContinueNow = true;
// If true, we're continuing (unhandled) a 2nd-chance exception
bool fExceptionGoingUnhandled = false;
//
const DWORD kDONTCARE = 0;
DWORD dwContinueStatus = kDONTCARE;
if (IsExceptionEvent(pEvent, &dwFirstChance, &pRecord))
{
// As a diagnostic aid we can configure the debugger to assert when the debuggee does DebugBreak()
#ifdef DEBUG
static ConfigDWORD config;
DWORD fAssert = config.val(CLRConfig::INTERNAL_DbgAssertOnDebuggeeDebugBreak);
if (fAssert)
{
// If we got a 2nd-chance breakpoint, then it's extremely likely that it's from an
// _ASSERTE in the target and we really want to know about it now before we kill the
// target. The debuggee will exit once we continue (unless we are mixed-mode debugging), so alert now.
// This assert could be our only warning of various catastrophic failures in the left-side.
if (!dwFirstChance && (pRecord->ExceptionCode == STATUS_BREAKPOINT) && !m_fIsInteropDebugging)
{
DWORD pid = (m_pLiveDataTarget == NULL) ? 0 : m_pLiveDataTarget->GetPid();
CONSISTENCY_CHECK_MSGF(false,
("Unhandled breakpoint exception in debuggee (pid=%d (0x%x)) on thread %d(0x%x)\n"
"This may mean there was an assert in the debuggee on that thread.\n"
"\n"
"You should attach to that process (non-invasively) and get a callstack of that thread.\n"
"(This assert only occurs when CLRConfig::INTERNAL_DebuggerAssertOnDebuggeeDebugBreak is set)\n",
pid, pid, dwThreadId,dwThreadId));
}
}
#endif
// We pass the Shim's proxy callback object, which will just take the callbacks and queue them
// to an event-queue in the shim. When we get the sync-complete event, the shim
// will then drain the event queue and dispatch the events to the user's callback object.
const DWORD dwFlags = dwFirstChance ? 1 : 0;
m_ContinueStatusChangedData.Clear();
// If ICorDebug doesn't care about this exception, it will leave dwContinueStatus unchanged.
RSExtSmartPtr<ICorDebugProcess4> pProcess4;
GetProcess()->QueryInterface(IID_ICorDebugProcess4, (void**) &pProcess4);
HRESULT hrFilter = pProcess4->Filter(
(const BYTE*) pRecord,
sizeof(EXCEPTION_RECORD),
GetHostExceptionRecordFormat(),
dwFlags,
dwThreadId,
m_pShimCallback,
&dwContinueStatus);
if (FAILED(hrFilter))
{
// Filter failed (eg. DAC couldn't be loaded), return the
// error so it can become an unrecoverable error.
return hrFilter;
}
// For unhandled exceptions, hijacking if needed.
if (!dwFirstChance)
{
// May invoke data-target callback (which may call code:ShimProcess::ContinueStatusChanged) to change continue status.
if (!m_pProcess->HijackThreadForUnhandledExceptionIfNeeded(dwThreadId))
{
// We decided not to hijack, so this exception is going to go unhandled
fExceptionGoingUnhandled = true;
}
if (m_ContinueStatusChangedData.IsSet())
{
_ASSERTE(m_ContinueStatusChangedData.m_dwThreadId == dwThreadId);
// Claiming this now means we won't do any other processing on the exception event.
// This means the interop-debugging logic will never see 2nd-chance managed exceptions.
dwContinueStatus = m_ContinueStatusChangedData.m_status;
}
}
}
// Do standard event handling, including Handling loader-breakpoint,
// and callback into CordbProcess for Attach if needed.
HRESULT hrIgnore = S_OK;
EX_TRY
{
// For NonClr notifications, allow extra processing.
// This includes both non-exception events, and exception events that aren't
// specific CLR debugging services notifications.
if (dwContinueStatus == kDONTCARE)
{
if (m_fIsInteropDebugging)
{
// Interop-debugging logic will handle the continue.
fContinueNow = false;
#if defined(FEATURE_INTEROP_DEBUGGING)
// @dbgtodo interop: All the interop-debugging logic is still in CordbProcess.
// Call back into that. This will handle Continuing the debug event.
m_pProcess->HandleDebugEventForInteropDebugging(pEvent);
#else
_ASSERTE(!"Interop debugging not supported");
#endif
}
else
{
dwContinueStatus = DBG_EXCEPTION_NOT_HANDLED;
// For managed-only debugging, there's no user handler for native debug events,
// and so we still need to do some basic work on certain debug events.
DefaultEventHandler(pEvent, &dwContinueStatus);
// This is the managed-only case. No reason to keep the target win32 frozen, so continue it immediately.
_ASSERTE(fContinueNow);
}
}
}
EX_CATCH_HRESULT(hrIgnore);
// Dont' expect errors here (but could probably return it up to become an
// unrecoverable error if necessary). We still want to call Continue thought.
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hrIgnore);
//
// Continue the debuggee if needed.
//
if (fContinueNow)
{
BOOL fContinueOk = GetNativePipeline()->ContinueDebugEvent(
GetProcessId(pEvent),
dwThreadId,
dwContinueStatus);
(void)fContinueOk; //prevent "unused variable" error from GCC
SIMPLIFYING_ASSUMPTION(fContinueOk);
if (fExceptionGoingUnhandled)
{
_ASSERTE(dwContinueStatus == DBG_EXCEPTION_NOT_HANDLED);
// We just passed a 2nd-chance exception back to the OS which may have now invoked
// Windows error-reporting logic which suspended all threads in the target. Since we're
// still debugging and may want to break, inspect state and even detach (eg. to attach
// a different sort of debugger that can handle the exception) we need to let our threads run.
// Note that when WER auto-invokes a debugger it doesn't suspend threads, so it doesn't really
// make sense for them to be suspended now when a debugger is already attached.
// A better solution may be to suspend this faulting thread before continuing the event, do an
// async-break and give the debugger a notification of an unhandled exception. But this will require
// an ICorDebug API change, and also makes it harder to reliably get the WER dialog box once we're
// ready for it.
// Unfortunately we have to wait for WerFault.exe to start and actually suspend the threads, and
// there doesn't appear to be any better way than to just sleep for a little here. In practice 200ms
// seems like more than enough, but this is so uncommon of a scenario that a half-second delay
// (just to be safe) isn't a problem.
// Provide an undocumented knob to turn this behavior off in the very rare case it's not what we want
// (eg. we're trying to debug something that races with crashing / terminating the process on multiple
// threads)
static ConfigDWORD config;
DWORD fSkipResume = config.val(CLRConfig::UNSUPPORTED_DbgDontResumeThreadsOnUnhandledException);
if (!fSkipResume)
{
::Sleep(500);
}
}
}
return S_OK;
}
// Trivial accessor to get the event queue.
ManagedEventQueue * ShimProcess::GetManagedEventQueue()
{
return &m_eventQueue;
}
// Combines GetManagedEventQueue() and Dequeue() into a single function
// that holds m_ShimProcessDisposeLock for the duration
ManagedEvent * ShimProcess::DequeueManagedEvent()
{
// Serialize this function with Dispoe()
RSLockHolder lockHolder(&m_ShimProcessDisposeLock);
if (m_fIsDisposed)
return NULL;
return m_eventQueue.Dequeue();
}
// Trivial accessor to get Shim's proxy callback object.
ShimProxyCallback * ShimProcess::GetShimCallback()
{
return m_pShimCallback;
}
// Trivial accessor to get the ICDProcess for the debuggee.
// A ShimProcess object can then provide V2 functionality by building it on V3 functionality
// exposed by the ICDProcess object.
ICorDebugProcess * ShimProcess::GetProcess()
{
return m_pIProcess;
}
// Trivial accessor to get the data-target for the debuggee.
// The data-target lets us access the debuggee, especially reading debuggee memory.
ICorDebugMutableDataTarget * ShimProcess::GetDataTarget()
{
return m_pLiveDataTarget;
};
// Trivial accessor to get the raw native event pipeline.
// In V3, ICorDebug no longer owns the event thread and it does not own the event pipeline either.
INativeEventPipeline * ShimProcess::GetNativePipeline()
{
return m_pWin32EventThread->GetNativePipeline();
}
// Trivial accessor to expose the W32ET thread to the CordbProcess so that it can emulate V2 behavior.
// In V3, ICorDebug no longer owns the event thread and it does not own the event pipeline either.
// The Win32 Event Thread is the only thread that can use the native pipeline
// see code:ShimProcess::GetNativePipeline.
CordbWin32EventThread * ShimProcess::GetWin32EventThread()
{
return m_pWin32EventThread;
}
// Trivial accessor to mark whether we're interop-debugging.
// Retreived via code:ShimProcess::IsInteropDebugging
void ShimProcess::SetIsInteropDebugging(bool fIsInteropDebugging)
{
m_fIsInteropDebugging = fIsInteropDebugging;
}
// Trivial accessor to check if we're interop-debugging.
// This affects how we handle native debug events.
// The significant usage of this is in code:ShimProcess::HandleWin32DebugEvent
bool ShimProcess::IsInteropDebugging()
{
return m_fIsInteropDebugging;
}
//---------------------------------------------------------------------------------------
// Begin queueing the fake attach events.
//
// Notes:
// See code:ShimProcess::QueueFakeAttachEvents for more about "fake attach events".
//
// This marks that we need to send fake attach events, and queus a CreateProcess.
// Caller calls code:ShimProcess::QueueFakeAttachEventsIfNeeded to finish queuing
// the rest of the fake attach events.
void ShimProcess::BeginQueueFakeAttachEvents()
{
m_fNeedFakeAttachEvents = true;
// Put a fake CreateProcess event in the queue.
// This will not be drained until we get a Sync-Complete from the Left-side.
GetShimCallback()->QueueCreateProcess(GetProcess());
AddDuplicateCreationEvent(GetProcess());
}
//---------------------------------------------------------------------------------------
// potentially Queue fake attach events like we did in V2.
//
// Arguments:
// fRealCreateProcessEvent - true if the shim is about to dispatch a real create process event (as opposed
// to one faked up by the shim itself)
//
// Notes:
// See code:ShimProcess::QueueFakeAttachEvents for details.
void ShimProcess::QueueFakeAttachEventsIfNeeded(bool fRealCreateProcessEvent)
{
// This was set high in code:ShimProcess::BeginQueueFakeAttachEvents
if (!m_fNeedFakeAttachEvents)
{
return;
}
m_fNeedFakeAttachEvents = false;
// If the first event we get after attaching is a create process event, then this is an early attach
// scenario and we don't need to queue any fake attach events.
if (!fRealCreateProcessEvent)
{
HRESULT hr = S_OK;
EX_TRY
{
QueueFakeAttachEvents();
}
EX_CATCH_HRESULT(hr);
}
}
//---------------------------------------------------------------------------------------
// Send fake Thread-create events for attach, using an arbitrary order.
//
// Returns:
// S_OK on success, else error.
//
// Notes:
// This sends fake thread-create events, ala V2 attach.
// See code:ShimProcess::QueueFakeAttachEvents for details
//
// The order of thread creates is random and at the mercy of ICorDebugProcess::EnumerateThreads.
// Whidbey would send thread creates in the order of the OS's native thread
// list. Since Arrowhead no longer sends fake attach events, the shim simulates
// the fake attach events. But ICorDebug doesn't provide a way to get the
// same order that V2 used. So without using platform-specific thread-enumeration,
// we can't get the V2 ordering.
//
HRESULT ShimProcess::QueueFakeThreadAttachEventsNoOrder()
{
ICorDebugProcess * pProcess = GetProcess();
RSExtSmartPtr<ICorDebugThreadEnum> pThreadEnum;
RSExtSmartPtr<ICorDebugThread> pThread;
// V2 would only send create threads after a thread had run managed code.
// V3 has a discovery model where Enumeration can find threads before they've run managed code.
// So the emulation here may send some additional create-thread events that v2 didn't send.
HRESULT hr = pProcess->EnumerateThreads(&pThreadEnum);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
if (FAILED(hr))
{
return hr;
}
ULONG cDummy;
while(SUCCEEDED(pThreadEnum->Next(1, &pThread, &cDummy)) && (pThread != NULL))
{
RSExtSmartPtr<ICorDebugAppDomain> pAppDomain;
hr = pThread->GetAppDomain(&pAppDomain);
// Getting the appdomain shouldn't fail. If it does, we can't dispatch
// this callback, but we can still dispatch the other thread creates.
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
if (pAppDomain != NULL)
{
GetShimCallback()->CreateThread(pAppDomain, pThread);
AddDuplicateCreationEvent(pThread);
}
pThread.Clear();
}
return S_OK;
}
//---------------------------------------------------------------------------------------
// Queues the fake Assembly and Module load events
//
// Arguments:
// pAssembly - non-null, the assembly to queue.
//
// Notes:
// Helper for code:ShimProcess::QueueFakeAttachEvents
// Queues create events for the assembly and for all modules within the
// assembly. Most assemblies only have 1 module.
void ShimProcess::QueueFakeAssemblyAndModuleEvent(ICorDebugAssembly * pAssembly)
{
RSExtSmartPtr<ICorDebugAppDomain> pAppDomain;
HRESULT hr = pAssembly->GetAppDomain(&pAppDomain);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
//
// Send the fake Load Assembly event.
//
GetShimCallback()->LoadAssembly(pAppDomain, pAssembly);
AddDuplicateCreationEvent(pAssembly);
//
// Send Modules - must be in load order
//
RSExtSmartPtr<ICorDebugModuleEnum> pModuleEnum;
hr = pAssembly->EnumerateModules(&pModuleEnum);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
ULONG countModules;
hr = pModuleEnum->GetCount(&countModules);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
// ISSUE WORKAROUND 835869
// The CordbEnumFilter used as the implementation of CordbAssembly::EnumerateModules has
// a ref counting bug in it. It adds one ref to each item when it is constructed and never
// removes that ref. Expected behavior would be that it adds a ref at construction, another on
// every call to next, and releases the construction ref when the enumerator is destroyed. The
// user is expected to release the reference they receive from Next. Thus enumerating exactly
// one time and calling Release() does the correct thing regardless of whether this bug is present
// or not. Note that with the bug the enumerator holds 0 references at the end of this loop,
// however the assembly also holds references so the modules will not be prematurely released.
for(ULONG i = 0; i < countModules; i++)
{
ICorDebugModule* pModule = NULL;
ULONG countFetched = 0;
pModuleEnum->Next(1, &pModule, &countFetched);
_ASSERTE(pModule != NULL);
if(pModule != NULL)
{
pModule->Release();
}
}
RSExtSmartPtr<ICorDebugModule> * pModules = new RSExtSmartPtr<ICorDebugModule> [countModules];
m_pProcess->GetModulesInLoadOrder(pAssembly, pModules, countModules);
for(ULONG iModule = 0; iModule < countModules; iModule++)
{
ICorDebugModule * pModule = pModules[iModule];
GetShimCallback()->FakeLoadModule(pAppDomain, pModule);
AddDuplicateCreationEvent(pModule);
// V2 may send UpdatePdbStreams for certain modules (like dynamic or in-memory modules).
// We don't yet have this support for out-of-proc.
// When the LoadModule event that we just queued is actually dispatched, it will
// send an IPC event in-process that will collect the information and queue the event
// at that time.
// @dbgtodo : I don't think the above is true anymore - clean it up?
RSExtSmartPtr<IStream> pSymbolStream;
// ICorDebug has no public way to request raw symbols. This is by-design because we
// don't want people taking a dependency on a specific format (to give us the ability
// to innovate for the RefEmit case). So we must use a private hook here to get the
// symbol data.
CordbModule * pCordbModule = static_cast<CordbModule *>(pModule);
IDacDbiInterface::SymbolFormat symFormat = IDacDbiInterface::kSymbolFormatNone;
EX_TRY
{
symFormat = pCordbModule->GetInMemorySymbolStream(&pSymbolStream);
}
EX_CATCH_HRESULT(hr);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr); // Shouldn't be any errors trying to read symbols
// Only pass the raw symbols onto the debugger if they're in PDB format (all that was supported
// in V2). Note that we could have avoided creating a stream for the non-PDB case, but we'd have
// to refactor GetInMemorySymbolStream and the perf impact should be negligable.
if (symFormat == IDacDbiInterface::kSymbolFormatPDB)
{
_ASSERTE(pSymbolStream != NULL); // symFormat should have been kSymbolFormatNone if null stream
GetShimCallback()->UpdateModuleSymbols(pAppDomain, pModule, pSymbolStream);
}
}
delete [] pModules;
}
//---------------------------------------------------------------------------------------
// Get an array of appdomains, sorted by increasing AppDomain ID
//
// Arguments:
// pProcess - process containing the appdomains
// ppAppDomains - array that this function will allocate to hold appdomains
// pCount - size of ppAppDomains array
//
// Assumptions:
// Caller must delete [] ppAppDomains
//
// Notes
// This is used as part of code:ShimProcess::QueueFakeAttachEvents.
// The fake attach events want appdomains in creation order. ICorDebug doesn't provide
// this ordering in the enumerators.
//
// This returns the appdomains sorted in order of increasing AppDomain ID, since that's the best
// approximation of creation order that we have.
// @dbgtodo - determine if ICD will provide
// ordered enumerators
//
HRESULT GetSortedAppDomains(ICorDebugProcess * pProcess, RSExtSmartPtr<ICorDebugAppDomain> **ppAppDomains, ULONG * pCount)
{
_ASSERTE(ppAppDomains != NULL);
HRESULT hr = S_OK;
RSExtSmartPtr<ICorDebugAppDomainEnum> pAppEnum;
//
// Find the size of the array to hold all the appdomains
//
hr = pProcess->EnumerateAppDomains(&pAppEnum);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
ULONG countAppDomains = 0;
hr = pAppEnum->GetCount(&countAppDomains);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
//
// Allocate the array
//
RSExtSmartPtr<ICorDebugAppDomain> * pAppDomains = new RSExtSmartPtr<ICorDebugAppDomain>[countAppDomains];
*ppAppDomains = pAppDomains;
*pCount = countAppDomains;
//
// Load all the appdomains into the array
//
ULONG countDummy;
hr = pAppEnum->Next(countAppDomains, (ICorDebugAppDomain**) pAppDomains, &countDummy);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
SIMPLIFYING_ASSUMPTION(countDummy == countAppDomains);
//
// Now sort them based on appdomain ID.
// We generally expect a very low number of appdomains (usually 1). So a n^2 sort shouldn't be a perf
// problem here.
//
for(ULONG i = 0; i < countAppDomains; i++)
{
ULONG32 id1;
hr = pAppDomains[i]->GetID(&id1);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
for(ULONG j = i + 1; j < countAppDomains; j++)
{
ULONG32 id2;
hr = pAppDomains[j]->GetID(&id2);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
if (id1 > id2)
{
// swap values
ICorDebugAppDomain * pTemp = pAppDomains[i];
pAppDomains[i].Assign(pAppDomains[j]);
pAppDomains[j].Assign(pTemp);
// update id1 key since it's in the outer-loop.
id1 = id2;
}
}
}
return S_OK;
}
//---------------------------------------------------------------------------------------
// To emulate the V2 attach-handshake, give the shim a chance to inject fake attach events.
//
// Notes:
// Do this before the queue is empty so that HasQueuedCallbacks() doesn't toggle from false to true.
// This is called once the process is synchronized, which emulates V2 semantics on attach.
// This may be called on the Win32Event Thread from inside of Filter, or on another thread.
void ShimProcess::QueueFakeAttachEvents()
{
// Serialize this function with Dispose()
RSLockHolder lockHolder(&m_ShimProcessDisposeLock);
if (m_fIsDisposed)
return;
// The fake CreateProcess is already queued. Start queuing the rest of the events.
// The target is stopped (synchronized) this whole time.
// This will use the inspection API to look at the process and queue up the fake
// events that V2 would have sent in a similar situation. All of the callbacks to GetShimCallback()
// just queue up the events. The event queue is then drained as the V2 debugger calls continue.
HRESULT hr = S_OK;
ICorDebugProcess * pProcess = GetProcess();
//
// First, Queue all the Fake AppDomains
//
RSExtSmartPtr<ICorDebugAppDomain> * pAppDomains = NULL;
ULONG countAppDomains = 0;
hr = GetSortedAppDomains(pProcess, &pAppDomains, &countAppDomains);
if (FAILED(hr))
return;
for(ULONG i = 0; i < countAppDomains; i++)
{
// V2 expects that the debugger then attaches to each AppDomain during the Create-appdomain callback.
// This was done to allow for potential per-appdomain debugging. However, only-process
// wide debugging support was allowed in V2. The caller had to attach to all Appdomains.
GetShimCallback()->CreateAppDomain(pProcess, pAppDomains[i]);
AddDuplicateCreationEvent(pAppDomains[i]);
}
// V2 had a break in the callback queue at this point.
//
// Second, queue all Assembly and Modules events.
//
for(ULONG iAppDomain = 0; iAppDomain < countAppDomains; iAppDomain++)
{
ICorDebugAppDomain * pAppDomain = pAppDomains[iAppDomain];
//
// Send Assemblies. Must be in load order.
//
RSExtSmartPtr<ICorDebugAssemblyEnum> pAssemblyEnum;
hr = pAppDomain->EnumerateAssemblies(&pAssemblyEnum);
if (FAILED(hr))
break;
ULONG countAssemblies;
hr = pAssemblyEnum->GetCount(&countAssemblies);
if (FAILED(hr))
break;
RSExtSmartPtr<ICorDebugAssembly> * pAssemblies = new RSExtSmartPtr<ICorDebugAssembly> [countAssemblies];
m_pProcess->GetAssembliesInLoadOrder(pAppDomain, pAssemblies, countAssemblies);
for(ULONG iAssembly = 0; iAssembly < countAssemblies; iAssembly++)
{
QueueFakeAssemblyAndModuleEvent(pAssemblies[iAssembly]);
}
delete [] pAssemblies;
}
delete [] pAppDomains;
// V2 would have a break in the callback queue at this point.
// V2 would send all relevant ClassLoad events now.
//
// That includes class loads for all modules that:
// - are dynamic
// - subscribed to class load events via ICorDebugModule::EnableClassLoadCallbacks.
// We don't provide Class-loads in our emulation because:
// 1. "ClassLoad" doesn't actually mean anything here.
// 2. We have no way of enumerating "loaded" classes in the CLR. We could use the metadata to enumerate
// all classes, but that's offers no value.
// 3. ClassLoad is useful for dynamic modules to notify a debugger that the module changed and
// to update symbols; but the LoadModule/UpdateModule syms already do that.
//
// Third, Queue all Threads
//
// Use ICorDebug to enumerate threads. The order of managed threads may
// not match the order the threads were created in.
QueueFakeThreadAttachEventsNoOrder();
// Forth, Queue all Connections.
// Enumerate connections is not exposed through ICorDebug, so we need to go use a private hook on CordbProcess.
m_pProcess->QueueFakeConnectionEvents();
// For V2 jit-attach, the callback queue would also include the jit-attach event (Exception, UserBreak, MDA, etc).
// This was explicitly in the same callback queue so that a debugger would drain it as part of draining the attach
// events.
// In V3, on normal attach, the VM just sends a Sync-complete event.
// On jit-attach, the VM sends the jit-attach event and then the sync-complete.
// The shim just queues the fake attach events at the first event it gets from the left-side.
// In jit-attach, the shim will queue the fake events right before it queues the jit-attach event,
// thus keeping them in the same callback queue as V2 did.
}
// Accessor for m_attached.
bool ShimProcess::GetAttached()
{
return m_attached;
}
// We need to know whether we are in the CreateProcess callback to be able to
// return the v2.0 hresults from code:CordbProcess::SetDesiredNGENCompilerFlags
// when we are using the shim.
//
// Expose m_fInCreateProcess
bool ShimProcess::GetInCreateProcess()
{
return m_fInCreateProcess;
}
void ShimProcess::SetInCreateProcess(bool value)
{
m_fInCreateProcess = value;
}
// We need to know whether we are in the FakeLoadModule callback to be able to
// return the v2.0 hresults from code:CordbModule::SetJITCompilerFlags when
// we are using the shim.
//
// Expose m_fInLoadModule
bool ShimProcess::GetInLoadModule()
{
return m_fInLoadModule;
}
void ShimProcess::SetInLoadModule(bool value)
{
m_fInLoadModule = value;
}
// When we get a continue, we need to clear the flags indicating we're still in a callback
void ShimProcess::NotifyOnContinue ()
{
m_fInCreateProcess = false;
m_fInLoadModule = false;
}
// The RS calls this function when the stack is about to be changed in any way, e.g. continue, SetIP, etc.
void ShimProcess::NotifyOnStackInvalidate()
{
ClearAllShimStackWalk();
}
//---------------------------------------------------------------------------------------
//
// Filter HResults for ICorDebugProcess2::SetDesiredNGENCompilerFlags to emualte V2 error semantics.
// Arguments:
// hr - V3 hresult
//
// Returns:
// hresult V2 would have returned in same situation.
HRESULT ShimProcess::FilterSetNgenHresult(HRESULT hr)
{
if ((hr == CORDBG_E_MUST_BE_IN_CREATE_PROCESS) && !m_fInCreateProcess)
{
return hr;
}
if (m_attached)
{
return CORDBG_E_CANNOT_BE_ON_ATTACH;
}
return hr;
}
//---------------------------------------------------------------------------------------
// Filter HRs for ICorDebugModule::EnableJITDebugging, ICorDebugModule2::SetJITCompilerFlags
// to emulate V2 error semantics
//
// Arguments:
// hr - V3 hresult
//
// Returns:
// hresult V2 would have returned in same situation.
HRESULT ShimProcess::FilterSetJitFlagsHresult(HRESULT hr)
{
if ((hr == CORDBG_E_MUST_BE_IN_LOAD_MODULE) && !m_fInLoadModule)
{
return hr;
}
if (m_attached && (hr == CORDBG_E_MUST_BE_IN_LOAD_MODULE))
{
return CORDBG_E_CANNOT_BE_ON_ATTACH;
}
return hr;
}
// ----------------------------------------------------------------------------
// ShimProcess::LookupOrCreateShimStackWalk
//
// Description:
// Find the ShimStackWalk associated with the specified ICDThread. Create one if it's not found.
//
// Arguments:
// * pThread - the specified thread
//
// Return Value:
// Return the ShimStackWalk associated with the specified thread.
//
// Notes:
// The ShimStackWalks handed back by this function is only valid until the next time the stack is changed
// in any way. In other words, the ShimStackWalks are valid until the next time
// code:CordbThread::CleanupStack or code:CordbThread::MarkStackFramesDirty is called.
//
// ShimStackWalk and ICDThread have a 1:1 relationship. Only one ShimStackWalk will be created for any
// given ICDThread. So if two threads in the debugger are walking the same thread in the debuggee, they
// operate on the same ShimStackWalk. This is ok because ShimStackWalks walk the stack at creation time,
// cache all the frames, and become read-only after creation.
//
// Refer to code:ShimProcess::ClearAllShimStackWalk to see how ShimStackWalks are cleared.
//
ShimStackWalk * ShimProcess::LookupOrCreateShimStackWalk(ICorDebugThread * pThread)
{
ShimStackWalk * pSW = NULL;
{
// do the lookup under the Shim lock
RSLockHolder lockHolder(&m_ShimLock);
pSW = m_pShimStackWalkHashTable->Lookup(pThread);
}
if (pSW == NULL)
{
// create one if it's not found and add it to the hash table
NewHolder<ShimStackWalk> pNewSW(new ShimStackWalk(this, pThread));
{
// Do the lookup again under the Shim lock, and only add the new ShimStackWalk if no other thread
// has beaten us to it.
RSLockHolder lockHolder(&m_ShimLock);
pSW = m_pShimStackWalkHashTable->Lookup(pThread);
if (pSW == NULL)
{
m_pShimStackWalkHashTable->Add(pNewSW);
pSW = pNewSW;
// don't release the memory if all goes well
pNewSW.SuppressRelease();
}
else
{
// The NewHolder will automatically delete the ShimStackWalk when it goes out of scope.
}
}
}
return pSW;
}
// ----------------------------------------------------------------------------
// ShimProcess::ClearAllShimStackWalk
//
// Description:
// Remove and delete all the entries in the hash table of ShimStackWalks.
//
// Notes:
// Refer to code:ShimProcess::LookupOrCreateShimStackWalk to see how ShimStackWalks are created.
//
void ShimProcess::ClearAllShimStackWalk()
{
RSLockHolder lockHolder(&m_ShimLock);
// loop through all the entries in the hash table, remove them, and delete them
for (ShimStackWalkHashTable::Iterator pCurElem = m_pShimStackWalkHashTable->Begin(),
pEndElem = m_pShimStackWalkHashTable->End();
pCurElem != pEndElem;
pCurElem++)
{
ShimStackWalk * pSW = *pCurElem;
m_pShimStackWalkHashTable->Remove(pSW->GetThread());
delete pSW;
}
}
//---------------------------------------------------------------------------------------
// Called before shim dispatches an event.
//
// Arguments:
// fRealCreateProcessEvent - true if the shim is about to dispatch a real create process event (as opposed
// to one faked up by the shim itself)
// Notes:
// This may be called from within Filter, which means we may be on the win32-event-thread.
// This is called on all callbacks from the VM.
// This gives us a chance to queue fake-attach events. So call it before the Jit-attach
// event has been queued.
void ShimProcess::PreDispatchEvent(bool fRealCreateProcessEvent /*= false*/)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
// For emulating the V2 case, we need to do additional initialization before dispatching the callback to the user.
if (!m_fFirstManagedEvent)
{
// Remember that we're processing the first managed event so that we only call HandleFirstRCEvent() once
m_fFirstManagedEvent = true;
// This can fail with the incompatable version HR. The process has already been terminated if this
// is the case. This will dispatch an Error callback
// If this fails, the process is in an undefined state.
// @dbgtodo ipc-block: this will go away once we get rid
// of the IPC block.
m_pProcess->FinishInitializeIPCChannel(); // throws on error
}
{
// In jit-attach cases, the first event the shim gets is the event that triggered the jit-attach.
// Queue up the fake events now, and then once we return, our caller will queue the jit-attach event.
// In the jit-attach case, this is before a sync-complete has been sent (since the sync doesn't get sent
// until after the jit-attach event is sent).
QueueFakeAttachEventsIfNeeded(fRealCreateProcessEvent);
}
// Always request an sync (emulates V2 behavior). If LS is not sync-ready, it will ignore the request.
m_pProcess->RequestSyncAtEvent();
}
//---------------------------------------------------------------------------------------
//
// Locates DAC by finding mscordac{wks|core} next to DBI
//
// Return Value:
// Returns the module handle for DAC
// Throws on errors.
//
HMODULE ShimProcess::GetDacModule()
{
HModuleHolder hDacDll;
PathString wszAccessDllPath;
//
// Load the access DLL from the same directory as the the current CLR Debugging Services DLL.
//
if (GetClrModuleDirectory(wszAccessDllPath) != S_OK)
{
ThrowLastError();
}
// Dac Dll is named:
// mscordaccore.dll <-- coreclr
// mscordacwks.dll <-- desktop
PCWSTR eeFlavor = MAKEDLLNAME_W(W("mscordaccore"));
wszAccessDllPath.Append(eeFlavor);
hDacDll.Assign(WszLoadLibrary(wszAccessDllPath));
if (!hDacDll)
{
DWORD dwLastError = GetLastError();
if (dwLastError == ERROR_MOD_NOT_FOUND)
{
// Give a more specific error in the case where we can't find the DAC dll.
ThrowHR(CORDBG_E_DEBUG_COMPONENT_MISSING);
}
else
{
ThrowWin32(dwLastError);
}
}
hDacDll.SuppressRelease();
return (HMODULE) hDacDll;
}
MachineInfo ShimProcess::GetMachineInfo()
{
return m_machineInfo;
}
void ShimProcess::SetMarkAttachPendingEvent()
{
SetEvent(m_markAttachPendingEvent);
}
void ShimProcess::SetTerminatingEvent()
{
SetEvent(m_terminatingEvent);
}
RSLock * ShimProcess::GetShimLock()
{
return &m_ShimLock;
}
bool ShimProcess::IsThreadSuspendedOrHijacked(ICorDebugThread * pThread)
{
return m_pProcess->IsThreadSuspendedOrHijacked(pThread);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: ShimProcess.cpp
//
//
// The V3 ICD debugging APIs have a lower abstraction level than V2.
// This provides V2 ICD debugging functionality on top of the V3 debugger object.
//*****************************************************************************
#include "stdafx.h"
#include "safewrap.h"
#include "check.h"
#include <limits.h>
#include "shimpriv.h"
//---------------------------------------------------------------------------------------
//
// Ctor for a ShimProcess
//
// Notes:
// See InitializeDataTarget in header for details of how to instantiate a ShimProcess and hook it up.
// Initial ref count is 0. This is the convention used int the RS, and it plays well with semantics
// like immediately assigning to a smart pointer (which will bump the count up to 1).
ShimProcess::ShimProcess() :
m_ref(0),
m_fFirstManagedEvent(false),
m_fInCreateProcess(false),
m_fInLoadModule(false),
m_fIsInteropDebugging(false),
m_fIsDisposed(false),
m_loaderBPReceived(false)
{
m_ShimLock.Init("ShimLock", RSLock::cLockReentrant, RSLock::LL_SHIM_LOCK);
m_ShimProcessDisposeLock.Init(
"ShimProcessDisposeLock",
RSLock::cLockReentrant | RSLock::cLockNonDbgApi,
RSLock::LL_SHIM_PROCESS_DISPOSE_LOCK);
m_eventQueue.Init(&m_ShimLock);
m_pShimCallback.Assign(new ShimProxyCallback(this)); // Throws
m_fNeedFakeAttachEvents = false;
m_ContinueStatusChangedData.Clear();
m_pShimStackWalkHashTable = new ShimStackWalkHashTable();
m_pDupeEventsHashTable = new DuplicateCreationEventsHashTable();
m_machineInfo.Clear();
m_markAttachPendingEvent = WszCreateEvent(NULL, TRUE, FALSE, NULL);
if (m_markAttachPendingEvent == NULL)
{
ThrowLastError();
}
m_terminatingEvent = WszCreateEvent(NULL, TRUE, FALSE, NULL);
if (m_terminatingEvent == NULL)
{
ThrowLastError();
}
}
//---------------------------------------------------------------------------------------
//
// ShimProcess dtor. Invoked when reference count goes to 0.
//
// Assumptions:
// Dtors should not do any interesting work. If this object has been initialized,
// then call Dispose() first.
//
//
ShimProcess::~ShimProcess()
{
// Expected that this was either already disposed first, or not initialized.
_ASSERTE(m_pWin32EventThread == NULL);
_ASSERTE(m_ShimProcessDisposeLock.IsInit());
m_ShimProcessDisposeLock.Destroy();
if (m_markAttachPendingEvent != NULL)
{
CloseHandle(m_markAttachPendingEvent);
m_markAttachPendingEvent = NULL;
}
if (m_terminatingEvent != NULL)
{
CloseHandle(m_terminatingEvent);
m_terminatingEvent = NULL;
}
// Dtor will release m_pLiveDataTarget
}
//---------------------------------------------------------------------------------------
//
// Part of initialization to hook up to process.
//
// Arguments:
// pProcess - debuggee object to connect to. Maybe null if part of shutdown.
//
// Notes:
// This will take a strong reference to the process object.
// This is part of the initialization phase.
// This should only be called once.
//
//
void ShimProcess::SetProcess(ICorDebugProcess * pProcess)
{
PRIVATE_SHIM_CALLBACK_IN_THIS_SCOPE0(NULL);
// Data-target should already be setup before we try to connect to a process.
_ASSERTE(m_pLiveDataTarget != NULL);
// Reference is kept by m_pProcess;
m_pIProcess.Assign(pProcess);
// Get the private shim hooks. This just exists to access private functionality that has not
// yet been promoted to the ICorDebug interfaces.
m_pProcess = static_cast<CordbProcess *>(pProcess);
if (pProcess != NULL)
{
// Verify that DataTarget + new process have the same pid?
_ASSERTE(m_pProcess->GetProcessDescriptor()->m_Pid == m_pLiveDataTarget->GetPid());
}
}
//---------------------------------------------------------------------------------------
//
// Create a Data-Target around the live process.
//
// Arguments:
// processId - OS process ID to connect to. Must be a local, same platform, process.
//
// Return Value:
// S_OK on success.
//
// Assumptions:
// This is part of the initialization dance.
//
// Notes:
// Only call this once, during the initialization dance.
//
HRESULT ShimProcess::InitializeDataTarget(const ProcessDescriptor * pProcessDescriptor)
{
_ASSERTE(m_pLiveDataTarget == NULL);
HRESULT hr = BuildPlatformSpecificDataTarget(GetMachineInfo(), pProcessDescriptor, &m_pLiveDataTarget);
if (FAILED(hr))
{
_ASSERTE(m_pLiveDataTarget == NULL);
return hr;
}
m_pLiveDataTarget->HookContinueStatusChanged(ShimProcess::ContinueStatusChanged, this);
// Ref on pDataTarget is now 1.
_ASSERTE(m_pLiveDataTarget != NULL);
return S_OK;
}
//---------------------------------------------------------------------------------------
//
// Determines if current thread is the Win32 Event Thread
//
// Return Value:
// True iff current thread is win32 event thread, else false.
//
// Notes:
// The win32 event thread is created by code:ShimProcess::CreateAndStartWin32ET
//
bool ShimProcess::IsWin32EventThread()
{
return (m_pWin32EventThread != NULL) && m_pWin32EventThread->IsWin32EventThread();
}
//---------------------------------------------------------------------------------------
//
// Add a reference
//
void ShimProcess::AddRef()
{
InterlockedIncrement(&m_ref);
}
//---------------------------------------------------------------------------------------
//
// Release a reference.
//
// Notes:
// When ref goes to 0, object is deleted.
//
void ShimProcess::Release()
{
LONG ref = InterlockedDecrement(&m_ref);
if (ref == 0)
{
delete this;
}
}
//---------------------------------------------------------------------------------------
//
// Dispose (Neuter) the object.
//
//
// Assumptions:
// This is called to gracefully shutdown the ShimProcess object.
// This must be called before destruction if the object was initialized.
//
// Notes:
// This will release all external resources, including getting the win32 event thread to exit.
// This can safely be called multiple times.
//
void ShimProcess::Dispose()
{
// Serialize Dispose with any other locked access to the shim. This helps
// protect against the debugger detaching while we're in the middle of
// doing stuff on the ShimProcess
RSLockHolder lockHolder(&m_ShimProcessDisposeLock);
m_fIsDisposed = true;
// Can't shut down the W32ET if we're on it.
_ASSERTE(!IsWin32EventThread());
m_eventQueue.DeleteAll();
if (m_pWin32EventThread != NULL)
{
// This will block waiting for the thread to exit gracefully.
m_pWin32EventThread->Stop();
delete m_pWin32EventThread;
m_pWin32EventThread = NULL;
}
if (m_pLiveDataTarget != NULL)
{
m_pLiveDataTarget->Dispose();
m_pLiveDataTarget.Clear();
}
m_pIProcess.Clear();
m_pProcess = NULL;
_ASSERTE(m_ShimLock.IsInit());
m_ShimLock.Destroy();
if (m_pShimStackWalkHashTable != NULL)
{
// The hash table should be empty by now. ClearAllShimStackWalk() should have been called.
_ASSERTE(m_pShimStackWalkHashTable->GetCount() == 0);
delete m_pShimStackWalkHashTable;
m_pShimStackWalkHashTable = NULL;
}
if (m_pDupeEventsHashTable != NULL)
{
if (m_pDupeEventsHashTable->GetCount() > 0)
{
// loop through all the entries in the hash table, remove them, and delete them
for (DuplicateCreationEventsHashTable::Iterator pCurElem = m_pDupeEventsHashTable->Begin(),
pEndElem = m_pDupeEventsHashTable->End();
pCurElem != pEndElem;
pCurElem++)
{
DuplicateCreationEventEntry * pEntry = *pCurElem;
delete pEntry;
}
m_pDupeEventsHashTable->RemoveAll();
}
delete m_pDupeEventsHashTable;
m_pDupeEventsHashTable = NULL;
}
}
//---------------------------------------------------------------------------------------
// Track (and close) file handles from debug events.
//
// Arguments:
// pEvent - debug event
//
// Notes:
// Some debug events introduce file handles that the debugger needs to track and
// close on other debug events. For example, the LoadDll,CreateProcess debug
// events both give back a file handle that the debugger must close. This is generally
// done on the corresponding UnloadDll/ExitProcess debug events.
//
// Since we won't use the file handles, we'll just close them as soon as we get them.
// That way, we don't need to remember any state.
void ShimProcess::TrackFileHandleForDebugEvent(const DEBUG_EVENT * pEvent)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
HANDLE hFile = NULL;
switch(pEvent->dwDebugEventCode)
{
//
// Events that add a file handle
//
case CREATE_PROCESS_DEBUG_EVENT:
hFile = pEvent->u.CreateProcessInfo.hFile;
CloseHandle(hFile);
break;
case LOAD_DLL_DEBUG_EVENT:
hFile = pEvent->u.LoadDll.hFile;
CloseHandle(hFile);
break;
}
}
//---------------------------------------------------------------------------------------
// ThreadProc helper to drain event queue.
//
// Arguments:
// parameter - thread proc parameter, an ICorDebugProcess*
//
// Returns
// 0.
//
// Notes:
// This is useful when the shim queued a fake managed event (such as Control+C)
// and needs to get the debuggee to synchronize in order to start dispatching events.
// @dbgtodo sync: this will likely change as we iron out the Synchronization feature crew.
//
// We do this in a new thread proc to avoid thread restrictions:
// Can't call this on win32 event thread because that can't send the IPC event to
// make the aysnc-break request.
// Can't call this on the RCET because that can't send an async-break (see SendIPCEvent for details)
// So we just spin up a new thread to do the work.
//---------------------------------------------------------------------------------------
DWORD WINAPI CallStopGoThreadProc(LPVOID parameter)
{
ICorDebugProcess* pProc = reinterpret_cast<ICorDebugProcess *>(parameter);
// We expect these operations to succeed; but if they do fail, there's nothing we can really do about it.
// If it fails on process exit/neuter/detach, then it would be ignorable.
HRESULT hr;
// Calling Stop + Continue will synchronize the process and force any queued events to be called.
// Stop is synchronous and will block until debuggee is synchronized.
hr = pProc->Stop(INFINITE);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
// Continue will resume the debuggee. If there are queued events (which we expect in this case)
// then continue will drain the event queue instead of actually resuming the process.
hr = pProc->Continue(FALSE);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
// This thread just needs to trigger an event dispatch. Now that it's done that, it can exit.
return 0;
}
//---------------------------------------------------------------------------------------
// Does default event handling for native debug events.
//
// Arguments:
// pEvent - IN event ot handle
// pdwContinueStatus - IN /OUT - continuation status for event.
//
// Assumptions:
// Called when target is stopped. Caller still needs to Continue the debug event.
// This is called on the win32 event thread.
//
// Notes:
// Some native events require extra work before continuing. Eg, skip loader
// breakpoint, close certain handles, etc.
// This is only called in the manage-only case. In the interop-case, the
// debugger will get and handle these native debug events.
void ShimProcess::DefaultEventHandler(
const DEBUG_EVENT * pEvent,
DWORD * pdwContinueStatus)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
//
// Loader breakpoint
//
BOOL fFirstChance;
const EXCEPTION_RECORD * pRecord = NULL;
if (IsExceptionEvent(pEvent, &fFirstChance, &pRecord))
{
DWORD dwThreadId = GetThreadId(pEvent);
switch(pRecord->ExceptionCode)
{
case STATUS_BREAKPOINT:
{
if (!m_loaderBPReceived)
{
m_loaderBPReceived = true;
// Clear the loader breakpoint
*pdwContinueStatus = DBG_CONTINUE;
// After loader-breakpoint, notify that managed attach can begin.
// This is done to trigger a synchronization. The shim
// can then send the fake attach events once the target
// is synced.
// @dbgtodo sync: not needed once shim can
// work on sync APIs.
m_pProcess->QueueManagedAttachIfNeeded(); // throws
}
}
break;
/*
// If we handle the Ctlr-C event here and send the notification to the debugger, then we may break pre-V4
// behaviour because the debugger may handle the event and intercept the handlers registered in the debuggee
// process. So don't handle the event here and let the debuggee process handle it instead. See Dev10 issue
// 846455 for more info.
//
// However, when the re-arch is completed, we will need to work with VS to define what the right behaviour
// should be. We don't want to rely on in-process code to handle the Ctrl-C event.
case DBG_CONTROL_C:
{
// Queue a fake managed Ctrl+C event.
m_pShimCallback->ControlCTrap(GetProcess());
// Request an Async Break
// This is on Win32 Event Thread, so we can't call Stop / Continue.
// Instead, spawn a new threead, and have that call Stop/Continue, which
// will get the RCET to drain the event queue and dispatch the ControlCTrap we just queued.
{
DWORD dwDummyId;
CreateThread(NULL,
0,
CallStopGoThreadProc,
(LPVOID) GetProcess(),
0,
&dwDummyId);
}
// We don't worry about suspending the Control-C thread right now. The event is
// coming asynchronously, and so it's ok if the debuggee slips forward while
// we try to do a managed async break.
// Clear the control-C event.
*pdwContinueStatus = DBG_CONTINUE;
}
break;
*/
}
}
// Native debugging APIs have an undocumented expectation that you clear for OutputDebugString.
if (pEvent->dwDebugEventCode == OUTPUT_DEBUG_STRING_EVENT)
{
*pdwContinueStatus = DBG_CONTINUE;
}
//
// File handles.
//
TrackFileHandleForDebugEvent(pEvent);
}
//---------------------------------------------------------------------------------------
// Determine if we need to change the continue status
//
// Returns:
// True if the continue status was changed. Else false.
//
// Assumptions:
// This is single-threaded, which is enforced by it only be called on the win32et.
// The shim guarnatees only 1 outstanding debug-event at a time.
//
// Notes:
// See code:ShimProcess::ContinueStatusChangedWorker for big picture.
// Continue status is changed from a data-target callback which invokes
// code:ShimProcess::ContinueStatusChangedWorker.
// Call code:ShimProcess::ContinueStatusChangedData::Clear to clear the 'IsSet' bit.
//
bool ShimProcess::ContinueStatusChangedData::IsSet()
{
return m_dwThreadId != 0;
}
//---------------------------------------------------------------------------------------
// Clears the bit marking
//
// Assumptions:
// This is single-threaded, which is enforced by it only be called on the win32et.
// The shim guarantees only 1 outstanding debug-event at a time.
//
// Notes:
// See code:ShimProcess::ContinueStatusChangedWorker for big picture.
// This makes code:ShimProcess::ContinueStatusChangedData::IsSet return false.
// This can safely be called multiple times in a row.
//
void ShimProcess::ContinueStatusChangedData::Clear()
{
m_dwThreadId = 0;
}
//---------------------------------------------------------------------------------------
// Callback invoked from data-target when continue status is changed.
//
// Arguments:
// pUserData - data we supplied to the callback. a 'this' pointer.
// dwThreadId - the tid whose continue status is changing
// dwContinueStatus - the new continue status.
//
// Notes:
//
// Static
HRESULT ShimProcess::ContinueStatusChanged(void * pUserData, DWORD dwThreadId, CORDB_CONTINUE_STATUS dwContinueStatus)
{
ShimProcess * pThis = reinterpret_cast<ShimProcess *>(pUserData);
return pThis->ContinueStatusChangedWorker(dwThreadId, dwContinueStatus);
}
//---------------------------------------------------------------------------------------
// Real worker callback invoked from data-target when continue status is changed.
//
// Arguments:
// dwThreadId - the tid whose continue status is changing
// dwContinueStatus - the new continue status.
//
// Notes:
// ICorDebugProcess4::Filter returns an initial continue status (heavily biased to 'gn').
// Some ICorDebug operations may need to change the continue status that filter returned.
// For example, on windows, hijacking a thread at an unhandled exception would need to
// change the status to 'gh' (since continuing 2nd chance exception 'gn' will tear down the
// process and the hijack would never execute).
//
// Such operations will invoke into the data-target (code:ICorDebugMutableDataTarget::ContinueStatusChanged)
// to notify the debugger that the continue status was changed.
//
// The shim only executes such operations on the win32-event thread in a small window between
// WaitForDebugEvent and Continue. Therefore, we know:
// * the callback must come on the Win32EventThread (which means our handling the callback is
// single-threaded.
// * We only have 1 outstanding debug event to worry about at a time. This simplifies our tracking.
//
// The shim tracks the outstanding change request in m_ContinueStatusChangedData.
HRESULT ShimProcess::ContinueStatusChangedWorker(DWORD dwThreadId, CORDB_CONTINUE_STATUS dwContinueStatus)
{
// Should only be set once. This is only called on the win32 event thread, which protects against races.
_ASSERTE(IsWin32EventThread());
_ASSERTE(!m_ContinueStatusChangedData.IsSet());
m_ContinueStatusChangedData.m_dwThreadId = dwThreadId;
m_ContinueStatusChangedData.m_status = dwContinueStatus;
// Setting dwThreadId to non-zero should now mark this as set.
_ASSERTE(m_ContinueStatusChangedData.IsSet());
return S_OK;
}
//---------------------------------------------------------------------------------------
//
// Add a duplicate creation event entry for the specified key.
//
// Arguments:
// pKey - the key of the entry to be added; this is expected to be an
// ICDProcess/ICDAppDomain/ICDThread/ICDAssembly/ICDModule
//
// Assumptions:
// pKey is really an interface pointer of one of the types mentioned above
//
// Notes:
// We have to keep track of which creation events we have sent already because some runtime data structures
// are discoverable through enumeration before they send their creation events. As a result, we may have
// faked up a creation event for a data structure during attach, and then later on get another creation
// event for the same data structure. VS is not resilient in the face of multiple creation events for
// the same data structure.
//
// Needless to say this is a problem in attach scenarios only. However, keep in mind that for CoreCLR,
// launch really is early attach. For early attach, we get three creation events up front: a create
// process, a create appdomain, and a create thread.
//
void ShimProcess::AddDuplicateCreationEvent(void * pKey)
{
NewHolder<DuplicateCreationEventEntry> pEntry(new DuplicateCreationEventEntry(pKey));
m_pDupeEventsHashTable->Add(pEntry);
pEntry.SuppressRelease();
}
//---------------------------------------------------------------------------------------
//
// Check whether the specified key exists in the hash table. If so, remove it.
//
// Arguments:
// pKey - the key of the entry to check; this is expected to be an
// ICDProcess/ICDAppDomain/ICDThread/ICDAssembly/ICDModule
//
// Return Value:
// Returns true if the entry exists. The entry will have been removed because we can't have more than two
// duplicates for any given event.
//
// Assumptions:
// pKey is really an interface pointer of one of the types mentioned above
//
// Notes:
// See code:ShimProcess::AddDuplicateCreationEvent.
//
bool ShimProcess::RemoveDuplicateCreationEventIfPresent(void * pKey)
{
// We only worry about duplicate events in attach scenarios.
if (GetAttached())
{
// Only do the check if the hash table actually contains entries.
if (m_pDupeEventsHashTable->GetCount() > 0)
{
// Check if this is a dupe.
DuplicateCreationEventEntry * pResult = m_pDupeEventsHashTable->Lookup(pKey);
if (pResult != NULL)
{
// This is a dupe. We can't have a dupe twice, so remove it.
// This will help as a bit of optimization, since we will no longer check the hash table if
// its count reaches 0.
m_pDupeEventsHashTable->Remove(pKey);
delete pResult;
return true;
}
}
}
return false;
}
//---------------------------------------------------------------------------------------
// Gets the exception record format of the host
//
// Returns:
// The CorDebugRecordFormat for the host architecture.
//
// Notes:
// This corresponds to the definition EXCEPTION_RECORD on the host-architecture.
// It can be passed into ICorDebugProcess4::Filter.
CorDebugRecordFormat GetHostExceptionRecordFormat()
{
#if defined(HOST_64BIT)
return FORMAT_WINDOWS_EXCEPTIONRECORD64;
#else
return FORMAT_WINDOWS_EXCEPTIONRECORD32;
#endif
}
//---------------------------------------------------------------------------------------
// Main event handler for native debug events. Must also ensure Continue is called.
//
// Arguments:
// pEvent - debug event to handle
//
// Assumptions:
// Caller did a Flush() if needed.
//
// Notes:
// The main Handle native debug events.
// This must call back into ICD to let ICD filter the debug event (in case it's a managed notification).
//
// If we're interop-debugging (V2), then the debugger is expecting the debug events. In that case,
// we go through the V2 interop-debugging logic to queue / dispatch the events.
// If we're managed-only debugging, then the shim provides a default handler for the native debug.
// This includes some basic work (skipping the loader breakpoint, close certain handles, etc).
//---------------------------------------------------------------------------------------
HRESULT ShimProcess::HandleWin32DebugEvent(const DEBUG_EVENT * pEvent)
{
_ASSERTE(IsWin32EventThread());
//
// If this is an exception event, then we need to feed it into the CLR.
//
BOOL dwFirstChance = FALSE;
const EXCEPTION_RECORD * pRecord = NULL;
const DWORD dwThreadId = GetThreadId(pEvent);
bool fContinueNow = true;
// If true, we're continuing (unhandled) a 2nd-chance exception
bool fExceptionGoingUnhandled = false;
//
const DWORD kDONTCARE = 0;
DWORD dwContinueStatus = kDONTCARE;
if (IsExceptionEvent(pEvent, &dwFirstChance, &pRecord))
{
// As a diagnostic aid we can configure the debugger to assert when the debuggee does DebugBreak()
#ifdef DEBUG
static ConfigDWORD config;
DWORD fAssert = config.val(CLRConfig::INTERNAL_DbgAssertOnDebuggeeDebugBreak);
if (fAssert)
{
// If we got a 2nd-chance breakpoint, then it's extremely likely that it's from an
// _ASSERTE in the target and we really want to know about it now before we kill the
// target. The debuggee will exit once we continue (unless we are mixed-mode debugging), so alert now.
// This assert could be our only warning of various catastrophic failures in the left-side.
if (!dwFirstChance && (pRecord->ExceptionCode == STATUS_BREAKPOINT) && !m_fIsInteropDebugging)
{
DWORD pid = (m_pLiveDataTarget == NULL) ? 0 : m_pLiveDataTarget->GetPid();
CONSISTENCY_CHECK_MSGF(false,
("Unhandled breakpoint exception in debuggee (pid=%d (0x%x)) on thread %d(0x%x)\n"
"This may mean there was an assert in the debuggee on that thread.\n"
"\n"
"You should attach to that process (non-invasively) and get a callstack of that thread.\n"
"(This assert only occurs when CLRConfig::INTERNAL_DebuggerAssertOnDebuggeeDebugBreak is set)\n",
pid, pid, dwThreadId,dwThreadId));
}
}
#endif
// We pass the Shim's proxy callback object, which will just take the callbacks and queue them
// to an event-queue in the shim. When we get the sync-complete event, the shim
// will then drain the event queue and dispatch the events to the user's callback object.
const DWORD dwFlags = dwFirstChance ? 1 : 0;
m_ContinueStatusChangedData.Clear();
// If ICorDebug doesn't care about this exception, it will leave dwContinueStatus unchanged.
RSExtSmartPtr<ICorDebugProcess4> pProcess4;
GetProcess()->QueryInterface(IID_ICorDebugProcess4, (void**) &pProcess4);
HRESULT hrFilter = pProcess4->Filter(
(const BYTE*) pRecord,
sizeof(EXCEPTION_RECORD),
GetHostExceptionRecordFormat(),
dwFlags,
dwThreadId,
m_pShimCallback,
&dwContinueStatus);
if (FAILED(hrFilter))
{
// Filter failed (eg. DAC couldn't be loaded), return the
// error so it can become an unrecoverable error.
return hrFilter;
}
// For unhandled exceptions, hijacking if needed.
if (!dwFirstChance)
{
// May invoke data-target callback (which may call code:ShimProcess::ContinueStatusChanged) to change continue status.
if (!m_pProcess->HijackThreadForUnhandledExceptionIfNeeded(dwThreadId))
{
// We decided not to hijack, so this exception is going to go unhandled
fExceptionGoingUnhandled = true;
}
if (m_ContinueStatusChangedData.IsSet())
{
_ASSERTE(m_ContinueStatusChangedData.m_dwThreadId == dwThreadId);
// Claiming this now means we won't do any other processing on the exception event.
// This means the interop-debugging logic will never see 2nd-chance managed exceptions.
dwContinueStatus = m_ContinueStatusChangedData.m_status;
}
}
}
// Do standard event handling, including Handling loader-breakpoint,
// and callback into CordbProcess for Attach if needed.
HRESULT hrIgnore = S_OK;
EX_TRY
{
// For NonClr notifications, allow extra processing.
// This includes both non-exception events, and exception events that aren't
// specific CLR debugging services notifications.
if (dwContinueStatus == kDONTCARE)
{
if (m_fIsInteropDebugging)
{
// Interop-debugging logic will handle the continue.
fContinueNow = false;
#if defined(FEATURE_INTEROP_DEBUGGING)
// @dbgtodo interop: All the interop-debugging logic is still in CordbProcess.
// Call back into that. This will handle Continuing the debug event.
m_pProcess->HandleDebugEventForInteropDebugging(pEvent);
#else
_ASSERTE(!"Interop debugging not supported");
#endif
}
else
{
dwContinueStatus = DBG_EXCEPTION_NOT_HANDLED;
// For managed-only debugging, there's no user handler for native debug events,
// and so we still need to do some basic work on certain debug events.
DefaultEventHandler(pEvent, &dwContinueStatus);
// This is the managed-only case. No reason to keep the target win32 frozen, so continue it immediately.
_ASSERTE(fContinueNow);
}
}
}
EX_CATCH_HRESULT(hrIgnore);
// Dont' expect errors here (but could probably return it up to become an
// unrecoverable error if necessary). We still want to call Continue thought.
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hrIgnore);
//
// Continue the debuggee if needed.
//
if (fContinueNow)
{
BOOL fContinueOk = GetNativePipeline()->ContinueDebugEvent(
GetProcessId(pEvent),
dwThreadId,
dwContinueStatus);
(void)fContinueOk; //prevent "unused variable" error from GCC
SIMPLIFYING_ASSUMPTION(fContinueOk);
if (fExceptionGoingUnhandled)
{
_ASSERTE(dwContinueStatus == DBG_EXCEPTION_NOT_HANDLED);
// We just passed a 2nd-chance exception back to the OS which may have now invoked
// Windows error-reporting logic which suspended all threads in the target. Since we're
// still debugging and may want to break, inspect state and even detach (eg. to attach
// a different sort of debugger that can handle the exception) we need to let our threads run.
// Note that when WER auto-invokes a debugger it doesn't suspend threads, so it doesn't really
// make sense for them to be suspended now when a debugger is already attached.
// A better solution may be to suspend this faulting thread before continuing the event, do an
// async-break and give the debugger a notification of an unhandled exception. But this will require
// an ICorDebug API change, and also makes it harder to reliably get the WER dialog box once we're
// ready for it.
// Unfortunately we have to wait for WerFault.exe to start and actually suspend the threads, and
// there doesn't appear to be any better way than to just sleep for a little here. In practice 200ms
// seems like more than enough, but this is so uncommon of a scenario that a half-second delay
// (just to be safe) isn't a problem.
// Provide an undocumented knob to turn this behavior off in the very rare case it's not what we want
// (eg. we're trying to debug something that races with crashing / terminating the process on multiple
// threads)
static ConfigDWORD config;
DWORD fSkipResume = config.val(CLRConfig::UNSUPPORTED_DbgDontResumeThreadsOnUnhandledException);
if (!fSkipResume)
{
::Sleep(500);
}
}
}
return S_OK;
}
// Trivial accessor to get the event queue.
ManagedEventQueue * ShimProcess::GetManagedEventQueue()
{
return &m_eventQueue;
}
// Combines GetManagedEventQueue() and Dequeue() into a single function
// that holds m_ShimProcessDisposeLock for the duration
ManagedEvent * ShimProcess::DequeueManagedEvent()
{
// Serialize this function with Dispoe()
RSLockHolder lockHolder(&m_ShimProcessDisposeLock);
if (m_fIsDisposed)
return NULL;
return m_eventQueue.Dequeue();
}
// Trivial accessor to get Shim's proxy callback object.
ShimProxyCallback * ShimProcess::GetShimCallback()
{
return m_pShimCallback;
}
// Trivial accessor to get the ICDProcess for the debuggee.
// A ShimProcess object can then provide V2 functionality by building it on V3 functionality
// exposed by the ICDProcess object.
ICorDebugProcess * ShimProcess::GetProcess()
{
return m_pIProcess;
}
// Trivial accessor to get the data-target for the debuggee.
// The data-target lets us access the debuggee, especially reading debuggee memory.
ICorDebugMutableDataTarget * ShimProcess::GetDataTarget()
{
return m_pLiveDataTarget;
};
// Trivial accessor to get the raw native event pipeline.
// In V3, ICorDebug no longer owns the event thread and it does not own the event pipeline either.
INativeEventPipeline * ShimProcess::GetNativePipeline()
{
return m_pWin32EventThread->GetNativePipeline();
}
// Trivial accessor to expose the W32ET thread to the CordbProcess so that it can emulate V2 behavior.
// In V3, ICorDebug no longer owns the event thread and it does not own the event pipeline either.
// The Win32 Event Thread is the only thread that can use the native pipeline
// see code:ShimProcess::GetNativePipeline.
CordbWin32EventThread * ShimProcess::GetWin32EventThread()
{
return m_pWin32EventThread;
}
// Trivial accessor to mark whether we're interop-debugging.
// Retreived via code:ShimProcess::IsInteropDebugging
void ShimProcess::SetIsInteropDebugging(bool fIsInteropDebugging)
{
m_fIsInteropDebugging = fIsInteropDebugging;
}
// Trivial accessor to check if we're interop-debugging.
// This affects how we handle native debug events.
// The significant usage of this is in code:ShimProcess::HandleWin32DebugEvent
bool ShimProcess::IsInteropDebugging()
{
return m_fIsInteropDebugging;
}
//---------------------------------------------------------------------------------------
// Begin queueing the fake attach events.
//
// Notes:
// See code:ShimProcess::QueueFakeAttachEvents for more about "fake attach events".
//
// This marks that we need to send fake attach events, and queus a CreateProcess.
// Caller calls code:ShimProcess::QueueFakeAttachEventsIfNeeded to finish queuing
// the rest of the fake attach events.
void ShimProcess::BeginQueueFakeAttachEvents()
{
m_fNeedFakeAttachEvents = true;
// Put a fake CreateProcess event in the queue.
// This will not be drained until we get a Sync-Complete from the Left-side.
GetShimCallback()->QueueCreateProcess(GetProcess());
AddDuplicateCreationEvent(GetProcess());
}
//---------------------------------------------------------------------------------------
// potentially Queue fake attach events like we did in V2.
//
// Arguments:
// fRealCreateProcessEvent - true if the shim is about to dispatch a real create process event (as opposed
// to one faked up by the shim itself)
//
// Notes:
// See code:ShimProcess::QueueFakeAttachEvents for details.
void ShimProcess::QueueFakeAttachEventsIfNeeded(bool fRealCreateProcessEvent)
{
// This was set high in code:ShimProcess::BeginQueueFakeAttachEvents
if (!m_fNeedFakeAttachEvents)
{
return;
}
m_fNeedFakeAttachEvents = false;
// If the first event we get after attaching is a create process event, then this is an early attach
// scenario and we don't need to queue any fake attach events.
if (!fRealCreateProcessEvent)
{
HRESULT hr = S_OK;
EX_TRY
{
QueueFakeAttachEvents();
}
EX_CATCH_HRESULT(hr);
}
}
//---------------------------------------------------------------------------------------
// Send fake Thread-create events for attach, using an arbitrary order.
//
// Returns:
// S_OK on success, else error.
//
// Notes:
// This sends fake thread-create events, ala V2 attach.
// See code:ShimProcess::QueueFakeAttachEvents for details
//
// The order of thread creates is random and at the mercy of ICorDebugProcess::EnumerateThreads.
// Whidbey would send thread creates in the order of the OS's native thread
// list. Since Arrowhead no longer sends fake attach events, the shim simulates
// the fake attach events. But ICorDebug doesn't provide a way to get the
// same order that V2 used. So without using platform-specific thread-enumeration,
// we can't get the V2 ordering.
//
HRESULT ShimProcess::QueueFakeThreadAttachEventsNoOrder()
{
ICorDebugProcess * pProcess = GetProcess();
RSExtSmartPtr<ICorDebugThreadEnum> pThreadEnum;
RSExtSmartPtr<ICorDebugThread> pThread;
// V2 would only send create threads after a thread had run managed code.
// V3 has a discovery model where Enumeration can find threads before they've run managed code.
// So the emulation here may send some additional create-thread events that v2 didn't send.
HRESULT hr = pProcess->EnumerateThreads(&pThreadEnum);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
if (FAILED(hr))
{
return hr;
}
ULONG cDummy;
while(SUCCEEDED(pThreadEnum->Next(1, &pThread, &cDummy)) && (pThread != NULL))
{
RSExtSmartPtr<ICorDebugAppDomain> pAppDomain;
hr = pThread->GetAppDomain(&pAppDomain);
// Getting the appdomain shouldn't fail. If it does, we can't dispatch
// this callback, but we can still dispatch the other thread creates.
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
if (pAppDomain != NULL)
{
GetShimCallback()->CreateThread(pAppDomain, pThread);
AddDuplicateCreationEvent(pThread);
}
pThread.Clear();
}
return S_OK;
}
//---------------------------------------------------------------------------------------
// Queues the fake Assembly and Module load events
//
// Arguments:
// pAssembly - non-null, the assembly to queue.
//
// Notes:
// Helper for code:ShimProcess::QueueFakeAttachEvents
// Queues create events for the assembly and for all modules within the
// assembly. Most assemblies only have 1 module.
void ShimProcess::QueueFakeAssemblyAndModuleEvent(ICorDebugAssembly * pAssembly)
{
RSExtSmartPtr<ICorDebugAppDomain> pAppDomain;
HRESULT hr = pAssembly->GetAppDomain(&pAppDomain);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
//
// Send the fake Load Assembly event.
//
GetShimCallback()->LoadAssembly(pAppDomain, pAssembly);
AddDuplicateCreationEvent(pAssembly);
//
// Send Modules - must be in load order
//
RSExtSmartPtr<ICorDebugModuleEnum> pModuleEnum;
hr = pAssembly->EnumerateModules(&pModuleEnum);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
ULONG countModules;
hr = pModuleEnum->GetCount(&countModules);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
// ISSUE WORKAROUND 835869
// The CordbEnumFilter used as the implementation of CordbAssembly::EnumerateModules has
// a ref counting bug in it. It adds one ref to each item when it is constructed and never
// removes that ref. Expected behavior would be that it adds a ref at construction, another on
// every call to next, and releases the construction ref when the enumerator is destroyed. The
// user is expected to release the reference they receive from Next. Thus enumerating exactly
// one time and calling Release() does the correct thing regardless of whether this bug is present
// or not. Note that with the bug the enumerator holds 0 references at the end of this loop,
// however the assembly also holds references so the modules will not be prematurely released.
for(ULONG i = 0; i < countModules; i++)
{
ICorDebugModule* pModule = NULL;
ULONG countFetched = 0;
pModuleEnum->Next(1, &pModule, &countFetched);
_ASSERTE(pModule != NULL);
if(pModule != NULL)
{
pModule->Release();
}
}
RSExtSmartPtr<ICorDebugModule> * pModules = new RSExtSmartPtr<ICorDebugModule> [countModules];
m_pProcess->GetModulesInLoadOrder(pAssembly, pModules, countModules);
for(ULONG iModule = 0; iModule < countModules; iModule++)
{
ICorDebugModule * pModule = pModules[iModule];
GetShimCallback()->FakeLoadModule(pAppDomain, pModule);
AddDuplicateCreationEvent(pModule);
// V2 may send UpdatePdbStreams for certain modules (like dynamic or in-memory modules).
// We don't yet have this support for out-of-proc.
// When the LoadModule event that we just queued is actually dispatched, it will
// send an IPC event in-process that will collect the information and queue the event
// at that time.
// @dbgtodo : I don't think the above is true anymore - clean it up?
RSExtSmartPtr<IStream> pSymbolStream;
// ICorDebug has no public way to request raw symbols. This is by-design because we
// don't want people taking a dependency on a specific format (to give us the ability
// to innovate for the RefEmit case). So we must use a private hook here to get the
// symbol data.
CordbModule * pCordbModule = static_cast<CordbModule *>(pModule);
IDacDbiInterface::SymbolFormat symFormat = IDacDbiInterface::kSymbolFormatNone;
EX_TRY
{
symFormat = pCordbModule->GetInMemorySymbolStream(&pSymbolStream);
}
EX_CATCH_HRESULT(hr);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr); // Shouldn't be any errors trying to read symbols
// Only pass the raw symbols onto the debugger if they're in PDB format (all that was supported
// in V2). Note that we could have avoided creating a stream for the non-PDB case, but we'd have
// to refactor GetInMemorySymbolStream and the perf impact should be negligable.
if (symFormat == IDacDbiInterface::kSymbolFormatPDB)
{
_ASSERTE(pSymbolStream != NULL); // symFormat should have been kSymbolFormatNone if null stream
GetShimCallback()->UpdateModuleSymbols(pAppDomain, pModule, pSymbolStream);
}
}
delete [] pModules;
}
//---------------------------------------------------------------------------------------
// Get an array of appdomains, sorted by increasing AppDomain ID
//
// Arguments:
// pProcess - process containing the appdomains
// ppAppDomains - array that this function will allocate to hold appdomains
// pCount - size of ppAppDomains array
//
// Assumptions:
// Caller must delete [] ppAppDomains
//
// Notes
// This is used as part of code:ShimProcess::QueueFakeAttachEvents.
// The fake attach events want appdomains in creation order. ICorDebug doesn't provide
// this ordering in the enumerators.
//
// This returns the appdomains sorted in order of increasing AppDomain ID, since that's the best
// approximation of creation order that we have.
// @dbgtodo - determine if ICD will provide
// ordered enumerators
//
HRESULT GetSortedAppDomains(ICorDebugProcess * pProcess, RSExtSmartPtr<ICorDebugAppDomain> **ppAppDomains, ULONG * pCount)
{
_ASSERTE(ppAppDomains != NULL);
HRESULT hr = S_OK;
RSExtSmartPtr<ICorDebugAppDomainEnum> pAppEnum;
//
// Find the size of the array to hold all the appdomains
//
hr = pProcess->EnumerateAppDomains(&pAppEnum);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
ULONG countAppDomains = 0;
hr = pAppEnum->GetCount(&countAppDomains);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
//
// Allocate the array
//
RSExtSmartPtr<ICorDebugAppDomain> * pAppDomains = new RSExtSmartPtr<ICorDebugAppDomain>[countAppDomains];
*ppAppDomains = pAppDomains;
*pCount = countAppDomains;
//
// Load all the appdomains into the array
//
ULONG countDummy;
hr = pAppEnum->Next(countAppDomains, (ICorDebugAppDomain**) pAppDomains, &countDummy);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
SIMPLIFYING_ASSUMPTION(countDummy == countAppDomains);
//
// Now sort them based on appdomain ID.
// We generally expect a very low number of appdomains (usually 1). So a n^2 sort shouldn't be a perf
// problem here.
//
for(ULONG i = 0; i < countAppDomains; i++)
{
ULONG32 id1;
hr = pAppDomains[i]->GetID(&id1);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
for(ULONG j = i + 1; j < countAppDomains; j++)
{
ULONG32 id2;
hr = pAppDomains[j]->GetID(&id2);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
if (id1 > id2)
{
// swap values
ICorDebugAppDomain * pTemp = pAppDomains[i];
pAppDomains[i].Assign(pAppDomains[j]);
pAppDomains[j].Assign(pTemp);
// update id1 key since it's in the outer-loop.
id1 = id2;
}
}
}
return S_OK;
}
//---------------------------------------------------------------------------------------
// To emulate the V2 attach-handshake, give the shim a chance to inject fake attach events.
//
// Notes:
// Do this before the queue is empty so that HasQueuedCallbacks() doesn't toggle from false to true.
// This is called once the process is synchronized, which emulates V2 semantics on attach.
// This may be called on the Win32Event Thread from inside of Filter, or on another thread.
void ShimProcess::QueueFakeAttachEvents()
{
// Serialize this function with Dispose()
RSLockHolder lockHolder(&m_ShimProcessDisposeLock);
if (m_fIsDisposed)
return;
// The fake CreateProcess is already queued. Start queuing the rest of the events.
// The target is stopped (synchronized) this whole time.
// This will use the inspection API to look at the process and queue up the fake
// events that V2 would have sent in a similar situation. All of the callbacks to GetShimCallback()
// just queue up the events. The event queue is then drained as the V2 debugger calls continue.
HRESULT hr = S_OK;
ICorDebugProcess * pProcess = GetProcess();
//
// First, Queue all the Fake AppDomains
//
RSExtSmartPtr<ICorDebugAppDomain> * pAppDomains = NULL;
ULONG countAppDomains = 0;
hr = GetSortedAppDomains(pProcess, &pAppDomains, &countAppDomains);
if (FAILED(hr))
return;
for(ULONG i = 0; i < countAppDomains; i++)
{
// V2 expects that the debugger then attaches to each AppDomain during the Create-appdomain callback.
// This was done to allow for potential per-appdomain debugging. However, only-process
// wide debugging support was allowed in V2. The caller had to attach to all Appdomains.
GetShimCallback()->CreateAppDomain(pProcess, pAppDomains[i]);
AddDuplicateCreationEvent(pAppDomains[i]);
}
// V2 had a break in the callback queue at this point.
//
// Second, queue all Assembly and Modules events.
//
for(ULONG iAppDomain = 0; iAppDomain < countAppDomains; iAppDomain++)
{
ICorDebugAppDomain * pAppDomain = pAppDomains[iAppDomain];
//
// Send Assemblies. Must be in load order.
//
RSExtSmartPtr<ICorDebugAssemblyEnum> pAssemblyEnum;
hr = pAppDomain->EnumerateAssemblies(&pAssemblyEnum);
if (FAILED(hr))
break;
ULONG countAssemblies;
hr = pAssemblyEnum->GetCount(&countAssemblies);
if (FAILED(hr))
break;
RSExtSmartPtr<ICorDebugAssembly> * pAssemblies = new RSExtSmartPtr<ICorDebugAssembly> [countAssemblies];
m_pProcess->GetAssembliesInLoadOrder(pAppDomain, pAssemblies, countAssemblies);
for(ULONG iAssembly = 0; iAssembly < countAssemblies; iAssembly++)
{
QueueFakeAssemblyAndModuleEvent(pAssemblies[iAssembly]);
}
delete [] pAssemblies;
}
delete [] pAppDomains;
// V2 would have a break in the callback queue at this point.
// V2 would send all relevant ClassLoad events now.
//
// That includes class loads for all modules that:
// - are dynamic
// - subscribed to class load events via ICorDebugModule::EnableClassLoadCallbacks.
// We don't provide Class-loads in our emulation because:
// 1. "ClassLoad" doesn't actually mean anything here.
// 2. We have no way of enumerating "loaded" classes in the CLR. We could use the metadata to enumerate
// all classes, but that's offers no value.
// 3. ClassLoad is useful for dynamic modules to notify a debugger that the module changed and
// to update symbols; but the LoadModule/UpdateModule syms already do that.
//
// Third, Queue all Threads
//
// Use ICorDebug to enumerate threads. The order of managed threads may
// not match the order the threads were created in.
QueueFakeThreadAttachEventsNoOrder();
// Forth, Queue all Connections.
// Enumerate connections is not exposed through ICorDebug, so we need to go use a private hook on CordbProcess.
m_pProcess->QueueFakeConnectionEvents();
// For V2 jit-attach, the callback queue would also include the jit-attach event (Exception, UserBreak, MDA, etc).
// This was explicitly in the same callback queue so that a debugger would drain it as part of draining the attach
// events.
// In V3, on normal attach, the VM just sends a Sync-complete event.
// On jit-attach, the VM sends the jit-attach event and then the sync-complete.
// The shim just queues the fake attach events at the first event it gets from the left-side.
// In jit-attach, the shim will queue the fake events right before it queues the jit-attach event,
// thus keeping them in the same callback queue as V2 did.
}
// Accessor for m_attached.
bool ShimProcess::GetAttached()
{
return m_attached;
}
// We need to know whether we are in the CreateProcess callback to be able to
// return the v2.0 hresults from code:CordbProcess::SetDesiredNGENCompilerFlags
// when we are using the shim.
//
// Expose m_fInCreateProcess
bool ShimProcess::GetInCreateProcess()
{
return m_fInCreateProcess;
}
void ShimProcess::SetInCreateProcess(bool value)
{
m_fInCreateProcess = value;
}
// We need to know whether we are in the FakeLoadModule callback to be able to
// return the v2.0 hresults from code:CordbModule::SetJITCompilerFlags when
// we are using the shim.
//
// Expose m_fInLoadModule
bool ShimProcess::GetInLoadModule()
{
return m_fInLoadModule;
}
void ShimProcess::SetInLoadModule(bool value)
{
m_fInLoadModule = value;
}
// When we get a continue, we need to clear the flags indicating we're still in a callback
void ShimProcess::NotifyOnContinue ()
{
m_fInCreateProcess = false;
m_fInLoadModule = false;
}
// The RS calls this function when the stack is about to be changed in any way, e.g. continue, SetIP, etc.
void ShimProcess::NotifyOnStackInvalidate()
{
ClearAllShimStackWalk();
}
//---------------------------------------------------------------------------------------
//
// Filter HResults for ICorDebugProcess2::SetDesiredNGENCompilerFlags to emualte V2 error semantics.
// Arguments:
// hr - V3 hresult
//
// Returns:
// hresult V2 would have returned in same situation.
HRESULT ShimProcess::FilterSetNgenHresult(HRESULT hr)
{
if ((hr == CORDBG_E_MUST_BE_IN_CREATE_PROCESS) && !m_fInCreateProcess)
{
return hr;
}
if (m_attached)
{
return CORDBG_E_CANNOT_BE_ON_ATTACH;
}
return hr;
}
//---------------------------------------------------------------------------------------
// Filter HRs for ICorDebugModule::EnableJITDebugging, ICorDebugModule2::SetJITCompilerFlags
// to emulate V2 error semantics
//
// Arguments:
// hr - V3 hresult
//
// Returns:
// hresult V2 would have returned in same situation.
HRESULT ShimProcess::FilterSetJitFlagsHresult(HRESULT hr)
{
if ((hr == CORDBG_E_MUST_BE_IN_LOAD_MODULE) && !m_fInLoadModule)
{
return hr;
}
if (m_attached && (hr == CORDBG_E_MUST_BE_IN_LOAD_MODULE))
{
return CORDBG_E_CANNOT_BE_ON_ATTACH;
}
return hr;
}
// ----------------------------------------------------------------------------
// ShimProcess::LookupOrCreateShimStackWalk
//
// Description:
// Find the ShimStackWalk associated with the specified ICDThread. Create one if it's not found.
//
// Arguments:
// * pThread - the specified thread
//
// Return Value:
// Return the ShimStackWalk associated with the specified thread.
//
// Notes:
// The ShimStackWalks handed back by this function is only valid until the next time the stack is changed
// in any way. In other words, the ShimStackWalks are valid until the next time
// code:CordbThread::CleanupStack or code:CordbThread::MarkStackFramesDirty is called.
//
// ShimStackWalk and ICDThread have a 1:1 relationship. Only one ShimStackWalk will be created for any
// given ICDThread. So if two threads in the debugger are walking the same thread in the debuggee, they
// operate on the same ShimStackWalk. This is ok because ShimStackWalks walk the stack at creation time,
// cache all the frames, and become read-only after creation.
//
// Refer to code:ShimProcess::ClearAllShimStackWalk to see how ShimStackWalks are cleared.
//
ShimStackWalk * ShimProcess::LookupOrCreateShimStackWalk(ICorDebugThread * pThread)
{
ShimStackWalk * pSW = NULL;
{
// do the lookup under the Shim lock
RSLockHolder lockHolder(&m_ShimLock);
pSW = m_pShimStackWalkHashTable->Lookup(pThread);
}
if (pSW == NULL)
{
// create one if it's not found and add it to the hash table
NewHolder<ShimStackWalk> pNewSW(new ShimStackWalk(this, pThread));
{
// Do the lookup again under the Shim lock, and only add the new ShimStackWalk if no other thread
// has beaten us to it.
RSLockHolder lockHolder(&m_ShimLock);
pSW = m_pShimStackWalkHashTable->Lookup(pThread);
if (pSW == NULL)
{
m_pShimStackWalkHashTable->Add(pNewSW);
pSW = pNewSW;
// don't release the memory if all goes well
pNewSW.SuppressRelease();
}
else
{
// The NewHolder will automatically delete the ShimStackWalk when it goes out of scope.
}
}
}
return pSW;
}
// ----------------------------------------------------------------------------
// ShimProcess::ClearAllShimStackWalk
//
// Description:
// Remove and delete all the entries in the hash table of ShimStackWalks.
//
// Notes:
// Refer to code:ShimProcess::LookupOrCreateShimStackWalk to see how ShimStackWalks are created.
//
void ShimProcess::ClearAllShimStackWalk()
{
RSLockHolder lockHolder(&m_ShimLock);
// loop through all the entries in the hash table, remove them, and delete them
for (ShimStackWalkHashTable::Iterator pCurElem = m_pShimStackWalkHashTable->Begin(),
pEndElem = m_pShimStackWalkHashTable->End();
pCurElem != pEndElem;
pCurElem++)
{
ShimStackWalk * pSW = *pCurElem;
m_pShimStackWalkHashTable->Remove(pSW->GetThread());
delete pSW;
}
}
//---------------------------------------------------------------------------------------
// Called before shim dispatches an event.
//
// Arguments:
// fRealCreateProcessEvent - true if the shim is about to dispatch a real create process event (as opposed
// to one faked up by the shim itself)
// Notes:
// This may be called from within Filter, which means we may be on the win32-event-thread.
// This is called on all callbacks from the VM.
// This gives us a chance to queue fake-attach events. So call it before the Jit-attach
// event has been queued.
void ShimProcess::PreDispatchEvent(bool fRealCreateProcessEvent /*= false*/)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
// For emulating the V2 case, we need to do additional initialization before dispatching the callback to the user.
if (!m_fFirstManagedEvent)
{
// Remember that we're processing the first managed event so that we only call HandleFirstRCEvent() once
m_fFirstManagedEvent = true;
// This can fail with the incompatable version HR. The process has already been terminated if this
// is the case. This will dispatch an Error callback
// If this fails, the process is in an undefined state.
// @dbgtodo ipc-block: this will go away once we get rid
// of the IPC block.
m_pProcess->FinishInitializeIPCChannel(); // throws on error
}
{
// In jit-attach cases, the first event the shim gets is the event that triggered the jit-attach.
// Queue up the fake events now, and then once we return, our caller will queue the jit-attach event.
// In the jit-attach case, this is before a sync-complete has been sent (since the sync doesn't get sent
// until after the jit-attach event is sent).
QueueFakeAttachEventsIfNeeded(fRealCreateProcessEvent);
}
// Always request an sync (emulates V2 behavior). If LS is not sync-ready, it will ignore the request.
m_pProcess->RequestSyncAtEvent();
}
//---------------------------------------------------------------------------------------
//
// Locates DAC by finding mscordac{wks|core} next to DBI
//
// Return Value:
// Returns the module handle for DAC
// Throws on errors.
//
HMODULE ShimProcess::GetDacModule()
{
HModuleHolder hDacDll;
PathString wszAccessDllPath;
//
// Load the access DLL from the same directory as the the current CLR Debugging Services DLL.
//
if (GetClrModuleDirectory(wszAccessDllPath) != S_OK)
{
ThrowLastError();
}
// Dac Dll is named:
// mscordaccore.dll <-- coreclr
// mscordacwks.dll <-- desktop
PCWSTR eeFlavor = MAKEDLLNAME_W(W("mscordaccore"));
wszAccessDllPath.Append(eeFlavor);
hDacDll.Assign(WszLoadLibrary(wszAccessDllPath));
if (!hDacDll)
{
DWORD dwLastError = GetLastError();
if (dwLastError == ERROR_MOD_NOT_FOUND)
{
// Give a more specific error in the case where we can't find the DAC dll.
ThrowHR(CORDBG_E_DEBUG_COMPONENT_MISSING);
}
else
{
ThrowWin32(dwLastError);
}
}
hDacDll.SuppressRelease();
return (HMODULE) hDacDll;
}
MachineInfo ShimProcess::GetMachineInfo()
{
return m_machineInfo;
}
void ShimProcess::SetMarkAttachPendingEvent()
{
SetEvent(m_markAttachPendingEvent);
}
void ShimProcess::SetTerminatingEvent()
{
SetEvent(m_terminatingEvent);
}
RSLock * ShimProcess::GetShimLock()
{
return &m_ShimLock;
}
bool ShimProcess::IsThreadSuspendedOrHijacked(ICorDebugThread * pThread)
{
return m_pProcess->IsThreadSuspendedOrHijacked(pThread);
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/mini/seq-points.h | /**
* \file
* Copyright 2014 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SEQ_POINTS_H__
#define __MONO_SEQ_POINTS_H__
#include <mono/metadata/seq-points-data.h>
void
mono_save_seq_point_info (MonoCompile *cfg, MonoJitInfo *jinfo);
MONO_COMPONENT_API MonoSeqPointInfo*
mono_get_seq_points (MonoMethod *method);
MONO_COMPONENT_API gboolean
mono_find_next_seq_point_for_native_offset (MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info, SeqPoint* seq_point);
MONO_COMPONENT_API gboolean
mono_find_prev_seq_point_for_native_offset (MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info, SeqPoint* seq_point);
MONO_COMPONENT_API gboolean
mono_find_seq_point (MonoMethod *method, gint32 il_offset, MonoSeqPointInfo **info, SeqPoint *seq_point);
void
mono_bb_deduplicate_op_il_seq_points (MonoCompile *cfg, MonoBasicBlock *bb);
#endif /* __MONO_SEQ_POINTS_H__ */
| /**
* \file
* Copyright 2014 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SEQ_POINTS_H__
#define __MONO_SEQ_POINTS_H__
#include <mono/metadata/seq-points-data.h>
void
mono_save_seq_point_info (MonoCompile *cfg, MonoJitInfo *jinfo);
MONO_COMPONENT_API MonoSeqPointInfo*
mono_get_seq_points (MonoMethod *method);
MONO_COMPONENT_API gboolean
mono_find_next_seq_point_for_native_offset (MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info, SeqPoint* seq_point);
MONO_COMPONENT_API gboolean
mono_find_prev_seq_point_for_native_offset (MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info, SeqPoint* seq_point);
MONO_COMPONENT_API gboolean
mono_find_seq_point (MonoMethod *method, gint32 il_offset, MonoSeqPointInfo **info, SeqPoint *seq_point);
void
mono_bb_deduplicate_op_il_seq_points (MonoCompile *cfg, MonoBasicBlock *bb);
#endif /* __MONO_SEQ_POINTS_H__ */
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/vm/i386/excepx86.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
/* EXCEP.CPP:
*
*/
#include "common.h"
#include "frames.h"
#include "excep.h"
#include "object.h"
#include "field.h"
#include "dbginterface.h"
#include "cgensys.h"
#include "comutilnative.h"
#include "sigformat.h"
#include "siginfo.hpp"
#include "gcheaputilities.h"
#include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow
#include "eventtrace.h"
#include "eetoprofinterfacewrapper.inl"
#include "eedbginterfaceimpl.inl"
#include "dllimportcallback.h"
#include "threads.h"
#include "eeconfig.h"
#include "vars.hpp"
#include "generics.h"
#include "asmconstants.h"
#include "virtualcallstub.h"
#ifndef FEATURE_EH_FUNCLETS
MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut);
#if !defined(DACCESS_COMPILE)
#define FORMAT_MESSAGE_BUFFER_LENGTH 1024
BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD*);
PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD*);
extern "C" {
// in asmhelpers.asm:
VOID STDCALL ResumeAtJitEHHelper(EHContext *pContext);
int STDCALL CallJitEHFilterHelper(size_t *pShadowSP, EHContext *pContext);
VOID STDCALL CallJitEHFinallyHelper(size_t *pShadowSP, EHContext *pContext);
typedef void (*RtlUnwindCallbackType)(void);
BOOL CallRtlUnwind(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
RtlUnwindCallbackType callback,
EXCEPTION_RECORD *pExceptionRecord,
void *retval);
BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
RtlUnwindCallbackType callback,
EXCEPTION_RECORD *pExceptionRecord,
void *retval);
}
static inline BOOL
CPFH_ShouldUnwindStack(const EXCEPTION_RECORD * pCER) {
LIMITED_METHOD_CONTRACT;
_ASSERTE(pCER != NULL);
// We can only unwind those exceptions whose context/record we don't need for a
// rethrow. This is complus, and stack overflow. For all the others, we
// need to keep the context around for a rethrow, which means they can't
// be unwound.
if (IsComPlusException(pCER) || pCER->ExceptionCode == STATUS_STACK_OVERFLOW)
return TRUE;
else
return FALSE;
}
static inline BOOL IsComPlusNestedExceptionRecord(EXCEPTION_REGISTRATION_RECORD* pEHR)
{
LIMITED_METHOD_CONTRACT;
if (pEHR->Handler == (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler)
return TRUE;
return FALSE;
}
EXCEPTION_REGISTRATION_RECORD *TryFindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
{
LIMITED_METHOD_CONTRACT;
while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) {
pEstablisherFrame = pEstablisherFrame->Next;
if (pEstablisherFrame == EXCEPTION_CHAIN_END) return 0;
}
return pEstablisherFrame;
}
#ifdef _DEBUG
// stores last handler we went to in case we didn't get an endcatch and stack is
// corrupted we can figure out who did it.
static MethodDesc *gLastResumedExceptionFunc = NULL;
static DWORD gLastResumedExceptionHandler = 0;
#endif
//---------------------------------------------------------------------
// void RtlUnwindCallback()
// call back function after global unwind, rtlunwind calls this function
//---------------------------------------------------------------------
static void RtlUnwindCallback()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(!"Should never get here");
}
BOOL FastNExportSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
{
LIMITED_METHOD_CONTRACT;
if ((LPVOID)pEHR->Handler == (LPVOID)FastNExportExceptHandler)
return TRUE;
return FALSE;
}
BOOL ReverseCOMSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
{
LIMITED_METHOD_CONTRACT;
#ifdef FEATURE_COMINTEROP
if ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandlerRevCom)
return TRUE;
#endif // FEATURE_COMINTEROP
return FALSE;
}
//
// Returns true if the given SEH handler is one of our SEH handlers that is responsible for managing exceptions in
// regions of managed code.
//
BOOL IsUnmanagedToManagedSEHHandler(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
{
WRAPPER_NO_CONTRACT;
//
// ComPlusFrameSEH() is for COMPlusFrameHandler & COMPlusNestedExceptionHandler.
// FastNExportSEH() is for FastNExportExceptHandler.
//
return (ComPlusFrameSEH(pEstablisherFrame) || FastNExportSEH(pEstablisherFrame) || ReverseCOMSEH(pEstablisherFrame));
}
Frame *GetCurrFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
{
Frame *pFrame;
WRAPPER_NO_CONTRACT;
_ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame));
pFrame = ((FrameHandlerExRecord *)pEstablisherFrame)->GetCurrFrame();
// Assert that the exception frame is on the thread or that the exception frame is the top frame.
_ASSERTE(GetThreadNULLOk() == NULL || GetThread()->GetFrame() == (Frame*)-1 || GetThread()->GetFrame() <= pFrame);
return pFrame;
}
EXCEPTION_REGISTRATION_RECORD* GetNextCOMPlusSEHRecord(EXCEPTION_REGISTRATION_RECORD* pRec) {
WRAPPER_NO_CONTRACT;
if (pRec == EXCEPTION_CHAIN_END)
return EXCEPTION_CHAIN_END;
do {
_ASSERTE(pRec != 0);
pRec = pRec->Next;
} while (pRec != EXCEPTION_CHAIN_END && !IsUnmanagedToManagedSEHHandler(pRec));
_ASSERTE(pRec == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pRec));
return pRec;
}
/*
* GetClrSEHRecordServicingStackPointer
*
* This function searchs all the Frame SEH records, and finds the one that is
* currently signed up to do all exception handling for the given stack pointer
* on the given thread.
*
* Parameters:
* pThread - The thread to search on.
* pStackPointer - The stack location that we are finding the Frame SEH Record for.
*
* Returns
* A pointer to the SEH record, or EXCEPTION_CHAIN_END if none was found.
*
*/
PEXCEPTION_REGISTRATION_RECORD
GetClrSEHRecordServicingStackPointer(Thread *pThread,
void *pStackPointer)
{
ThreadExceptionState* pExState = pThread->GetExceptionState();
//
// We can only do this if there is a context in the pExInfo. There are cases (most notably the
// EEPolicy::HandleFatalError case) where we don't have that. In these cases we will return
// no enclosing handler since we cannot accurately determine the FS:0 entry which services
// this stack address.
//
// The side effect of this is that for these cases, the debugger cannot intercept
// the exception
//
CONTEXT* pContextRecord = pExState->GetContextRecord();
if (pContextRecord == NULL)
{
return EXCEPTION_CHAIN_END;
}
void *exceptionSP = dac_cast<PTR_VOID>(GetSP(pContextRecord));
//
// Now set the establishing frame. What this means in English is that we need to find
// the fs:0 entry that handles exceptions for the place on the stack given in stackPointer.
//
PEXCEPTION_REGISTRATION_RECORD pSEHRecord = GetFirstCOMPlusSEHRecord(pThread);
while (pSEHRecord != EXCEPTION_CHAIN_END)
{
//
// Skip any SEHRecord which is not a CLR record or was pushed after the exception
// on this thread occurred.
//
if (IsUnmanagedToManagedSEHHandler(pSEHRecord) && (exceptionSP <= (void *)pSEHRecord))
{
Frame *pFrame = GetCurrFrame(pSEHRecord);
//
// Arcane knowledge here. All Frame records are stored on the stack by the runtime
// in ever decreasing address space. So, we merely have to search back until
// we find the first frame record with a higher stack value to find the
// establishing frame for the given stack address.
//
if (((void *)pFrame) >= pStackPointer)
{
break;
}
}
pSEHRecord = GetNextCOMPlusSEHRecord(pSEHRecord);
}
return pSEHRecord;
}
#ifdef _DEBUG
// We've deteremined during a stack walk that managed code is transitioning to unamanaged (EE) code. Check that the
// state of the EH chain is correct.
//
// For x86, check that we do INSTALL_COMPLUS_EXCEPTION_HANDLER before calling managed code. This check should be
// done for all managed code sites, not just transistions. But this will catch most problem cases.
void VerifyValidTransitionFromManagedCode(Thread *pThread, CrawlFrame *pCF)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(ExecutionManager::IsManagedCode(GetControlPC(pCF->GetRegisterSet())));
// Cannot get to the TEB of other threads. So ignore them.
if (pThread != GetThreadNULLOk())
{
return;
}
// Find the EH record guarding the current region of managed code, based on the CrawlFrame passed in.
PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord();
while ((pEHR != EXCEPTION_CHAIN_END) && ((ULONG_PTR)pEHR < GetRegdisplaySP(pCF->GetRegisterSet())))
{
pEHR = pEHR->Next;
}
// VerifyValidTransitionFromManagedCode can be called before the CrawlFrame's MethodDesc is initialized.
// Fix that if necessary for the consistency check.
MethodDesc * pFunction = pCF->GetFunction();
if ((!IsUnmanagedToManagedSEHHandler(pEHR)) && // Will the assert fire? If not, don't waste our time.
(pFunction == NULL))
{
_ASSERTE(pCF->GetRegisterSet());
PCODE ip = GetControlPC(pCF->GetRegisterSet());
pFunction = ExecutionManager::GetCodeMethodDesc(ip);
_ASSERTE(pFunction);
}
// Great, we've got the EH record that's next up the stack from the current SP (which is in managed code). That
// had better be a record for one of our handlers responsible for handling exceptions in managed code. If its
// not, then someone made it into managed code without setting up one of our EH handlers, and that's really
// bad.
CONSISTENCY_CHECK_MSGF(IsUnmanagedToManagedSEHHandler(pEHR),
("Invalid transition into managed code!\n\n"
"We're walking this thread's stack and we've reached a managed frame at Esp=0x%p. "
"(The method is %s::%s) "
"The very next FS:0 record (0x%p) up from this point on the stack should be one of "
"our 'unmanaged to managed SEH handlers', but its not... its something else, and "
"that's very bad. It indicates that someone managed to call into managed code without "
"setting up the proper exception handling.\n\n"
"Get a good unmanaged stack trace for this thread. All FS:0 records are on the stack, "
"so you can see who installed the last handler. Somewhere between that function and "
"where the thread is now is where the bad transition occurred.\n\n"
"A little extra info: FS:0 = 0x%p, pEHR->Handler = 0x%p\n",
GetRegdisplaySP(pCF->GetRegisterSet()),
pFunction ->m_pszDebugClassName,
pFunction ->m_pszDebugMethodName,
pEHR,
GetCurrentSEHRecord(),
pEHR->Handler));
}
#endif
//================================================================================
// There are some things that should never be true when handling an
// exception. This function checks for them. Will assert or trap
// if it finds an error.
static inline void
CPFH_VerifyThreadIsInValidState(Thread* pThread, DWORD exceptionCode, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) {
WRAPPER_NO_CONTRACT;
if ( exceptionCode == STATUS_BREAKPOINT
|| exceptionCode == STATUS_SINGLE_STEP) {
return;
}
#ifdef _DEBUG
// check for overwriting of stack
CheckStackBarrier(pEstablisherFrame);
// trigger check for bad fs:0 chain
GetCurrentSEHRecord();
#endif
if (!g_fEEShutDown) {
// An exception on the GC thread, or while holding the thread store lock, will likely lock out the entire process.
if (::IsGCThread() || ThreadStore::HoldingThreadStore())
{
_ASSERTE(!"Exception during garbage collection or while holding thread store");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
}
}
#ifdef FEATURE_HIJACK
void
CPFH_AdjustContextForThreadSuspensionRace(CONTEXT *pContext, Thread *pThread)
{
WRAPPER_NO_CONTRACT;
PCODE f_IP = GetIP(pContext);
if (Thread::IsAddrOfRedirectFunc((PVOID)f_IP)) {
// This is a very rare case where we tried to redirect a thread that was
// just about to dispatch an exception, and our update of EIP took, but
// the thread continued dispatching the exception.
//
// If this should happen (very rare) then we fix it up here.
//
_ASSERTE(pThread->GetSavedRedirectContext());
SetIP(pContext, GetIP(pThread->GetSavedRedirectContext()));
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 1 setting IP = %x\n", pContext->Eip);
}
if (f_IP == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) {
// This is a very rare case where we tried to redirect a thread that was
// just about to dispatch an exception, and our update of EIP took, but
// the thread continued dispatching the exception.
//
// If this should happen (very rare) then we fix it up here.
//
SetIP(pContext, GetIP(pThread->m_OSContext));
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 2 setting IP = %x\n", pContext->Eip);
}
// We have another even rarer race condition:
// - A) On thread A, Debugger puts an int 3 in the code stream at address X
// - A) We hit it and the begin an exception. The eip will be X + 1 (int3 is special)
// - B) Meanwhile, thread B redirects A's eip to Y. (Although A is really somewhere
// in the kernel, it looks like it's still in user code, so it can fall under the
// HandledJitCase and can be redirected)
// - A) The OS, trying to be nice, expects we have a breakpoint exception at X+1,
// but does -1 on the address since it knows int3 will leave the eip +1.
// So the context structure it will pass to the Handler is ideally (X+1)-1 = X
//
// ** Here's the race: Since thread B redirected A, the eip is actually Y (not X+1),
// but the kernel still touches it up to Y-1. So there's a window between when we hit a
// bp and when the handler gets called that this can happen.
// This causes an unhandled BP (since the debugger doesn't recognize the bp at Y-1)
//
// So what to do: If we land at Y-1 (ie, if f_IP+1 is the addr of a Redirected Func),
// then restore the EIP back to X. This will skip the redirection.
// Fortunately, this only occurs in cases where it's ok
// to skip. The debugger will recognize the patch and handle it.
if (Thread::IsAddrOfRedirectFunc((PVOID)(f_IP + 1))) {
_ASSERTE(pThread->GetSavedRedirectContext());
SetIP(pContext, GetIP(pThread->GetSavedRedirectContext()) - 1);
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 3 setting IP = %x\n", pContext->Eip);
}
if (f_IP + 1 == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) {
SetIP(pContext, GetIP(pThread->m_OSContext) - 1);
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 4 setting IP = %x\n", pContext->Eip);
}
}
#endif // FEATURE_HIJACK
uint32_t g_exceptionCount;
//******************************************************************************
EXCEPTION_DISPOSITION COMPlusAfterUnwind(
EXCEPTION_RECORD *pExceptionRecord,
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
ThrowCallbackType& tct)
{
WRAPPER_NO_CONTRACT;
// Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our
// cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be
// unwound. We go ahead and assert right here that indeed there are no handlers below the establisher frame
// before we go any further.
_ASSERTE(pEstablisherFrame == GetCurrentSEHRecord());
Thread* pThread = GetThread();
_ASSERTE(tct.pCurrentExceptionRecord == pEstablisherFrame);
NestedHandlerExRecord nestedHandlerExRecord;
nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame));
// ... and now, put the nested record back on.
INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
// We entered COMPlusAfterUnwind in PREEMP, but we need to be in COOP from here on out
GCX_COOP_NO_DTOR();
tct.bIsUnwind = TRUE;
tct.pProfilerNotify = NULL;
LOG((LF_EH, LL_INFO100, "COMPlusFrameHandler: unwinding\n"));
tct.bUnwindStack = CPFH_ShouldUnwindStack(pExceptionRecord);
LOG((LF_EH, LL_INFO1000, "COMPlusAfterUnwind: going to: pFunc:%#X, pStack:%#X\n",
tct.pFunc, tct.pStack));
UnwindFrames(pThread, &tct);
#ifdef DEBUGGING_SUPPORTED
ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker();
if (pExInfo->m_ValidInterceptionContext)
{
// By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until
// the interception point. We can now pop nested exception handlers and resume at interception context.
EHContext context = pExInfo->m_InterceptionContext;
pExInfo->m_InterceptionContext.Init();
pExInfo->m_ValidInterceptionContext = FALSE;
UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context);
}
#endif // DEBUGGING_SUPPORTED
_ASSERTE(!"Should not get here");
return ExceptionContinueSearch;
} // EXCEPTION_DISPOSITION COMPlusAfterUnwind()
#ifdef DEBUGGING_SUPPORTED
//---------------------------------------------------------------------------------------
//
// This function is called to intercept an exception and start an unwind.
//
// Arguments:
// pCurrentEstablisherFrame - the exception registration record covering the stack range
// containing the interception point
// pExceptionRecord - EXCEPTION_RECORD of the exception being intercepted
//
// Return Value:
// ExceptionContinueSearch if the exception cannot be intercepted
//
// Notes:
// If the exception is intercepted, this function never returns.
//
EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(EXCEPTION_REGISTRATION_RECORD *pCurrentEstablisherFrame,
EXCEPTION_RECORD *pExceptionRecord)
{
WRAPPER_NO_CONTRACT;
if (!CheckThreadExceptionStateForInterception())
{
return ExceptionContinueSearch;
}
Thread* pThread = GetThread();
ThreadExceptionState* pExState = pThread->GetExceptionState();
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame;
ThrowCallbackType tct;
tct.Init();
pExState->GetDebuggerState()->GetDebuggerInterceptInfo(&pEstablisherFrame,
&(tct.pFunc),
&(tct.dHandler),
&(tct.pStack),
NULL,
&(tct.pBottomFrame)
);
//
// If the handler that we've selected as the handler for the target frame of the unwind is in fact above the
// handler that we're currently executing in, then use the current handler instead. Why? Our handlers for
// nested exceptions actually process managed frames that live above them, up to the COMPlusFrameHanlder that
// pushed the nested handler. If the user selectes a frame above the nested handler, then we will have selected
// the COMPlusFrameHandler above the current nested handler. But we don't want to ask RtlUnwind to unwind past
// the nested handler that we're currently executing in.
//
if (pEstablisherFrame > pCurrentEstablisherFrame)
{
// This should only happen if we're in a COMPlusNestedExceptionHandler.
_ASSERTE(IsComPlusNestedExceptionRecord(pCurrentEstablisherFrame));
pEstablisherFrame = pCurrentEstablisherFrame;
}
#ifdef _DEBUG
tct.pCurrentExceptionRecord = pEstablisherFrame;
#endif
LOG((LF_EH|LF_CORDB, LL_INFO100, "ClrDebuggerDoUnwindAndIntercept: Intercepting at %s\n", tct.pFunc->m_pszDebugMethodName));
LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pFunc is 0x%X\n", tct.pFunc));
LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pStack is 0x%X\n", tct.pStack));
CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0);
ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker();
if (pExInfo->m_ValidInterceptionContext)
{
// By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until
// the interception point. We can now pop nested exception handlers and resume at interception context.
GCX_COOP();
EHContext context = pExInfo->m_InterceptionContext;
pExInfo->m_InterceptionContext.Init();
pExInfo->m_ValidInterceptionContext = FALSE;
UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context);
}
// on x86 at least, RtlUnwind always returns
// Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our
// cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be
// unwound.
return COMPlusAfterUnwind(pExState->GetExceptionRecord(), pEstablisherFrame, tct);
} // EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept()
#endif // DEBUGGING_SUPPORTED
// This is a wrapper around the assembly routine that invokes RtlUnwind in the OS.
// When we invoke RtlUnwind, the OS will modify the ExceptionFlags field in the
// exception record to reflect unwind. Since we call RtlUnwind in the first pass
// with a valid exception record when we find an exception handler AND because RtlUnwind
// returns on x86, the OS would have flagged the exception record for unwind.
//
// Incase the exception is rethrown from the catch/filter-handler AND it's a non-COMPLUS
// exception, the runtime will use the reference to the saved exception record to reraise
// the exception, as part of rethrow fixup. Since the OS would have modified the exception record
// to reflect unwind, this wrapper will "reset" the ExceptionFlags field when RtlUnwind returns.
// Otherwise, the rethrow will result in second pass, as opposed to first, since the ExceptionFlags
// would indicate an unwind.
//
// This rethrow issue does not affect COMPLUS exceptions since we always create a brand new exception
// record for them in RaiseTheExceptionInternalOnly.
BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
RtlUnwindCallbackType callback,
EXCEPTION_RECORD *pExceptionRecord,
void *retval)
{
LIMITED_METHOD_CONTRACT;
// Save the ExceptionFlags value before invoking RtlUnwind.
DWORD dwExceptionFlags = pExceptionRecord->ExceptionFlags;
BOOL fRetVal = CallRtlUnwind(pEstablisherFrame, callback, pExceptionRecord, retval);
// Reset ExceptionFlags field, if applicable
if (pExceptionRecord->ExceptionFlags != dwExceptionFlags)
{
// We would expect the 32bit OS to have set the unwind flag at this point.
_ASSERTE(pExceptionRecord->ExceptionFlags & EXCEPTION_UNWINDING);
LOG((LF_EH, LL_INFO100, "CallRtlUnwindSafe: Resetting ExceptionFlags from %lu to %lu\n", pExceptionRecord->ExceptionFlags, dwExceptionFlags));
pExceptionRecord->ExceptionFlags = dwExceptionFlags;
}
return fRetVal;
}
//******************************************************************************
// The essence of the first pass handler (after we've decided to actually do
// the first pass handling).
//******************************************************************************
inline EXCEPTION_DISPOSITION __cdecl
CPFH_RealFirstPassHandler( // ExceptionContinueSearch, etc.
EXCEPTION_RECORD *pExceptionRecord, // The exception record, with exception type.
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, // Exception frame on whose behalf this is called.
CONTEXT *pContext, // Context from the exception.
void *pDispatcherContext, // @todo
BOOL bAsynchronousThreadStop, // @todo
BOOL fPGCDisabledOnEntry) // @todo
{
// We don't want to use a runtime contract here since this codepath is used during
// the processing of a hard SO. Contracts use a significant amount of stack
// which we can't afford for those cases.
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
#ifdef _DEBUG
static int breakOnFirstPass = -1;
if (breakOnFirstPass == -1)
breakOnFirstPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnFirstPass);
if (breakOnFirstPass != 0)
{
_ASSERTE(!"First pass exception handler");
}
#endif
EXCEPTION_DISPOSITION retval;
DWORD exceptionCode = pExceptionRecord->ExceptionCode;
Thread *pThread = GetThread();
#ifdef _DEBUG
static int breakOnSO = -1;
if (breakOnSO == -1)
breakOnSO = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnSO);
if (breakOnSO != 0 && exceptionCode == STATUS_STACK_OVERFLOW)
{
DebugBreak(); // ASSERTing will overwrite the guard region
}
#endif
// We always want to be in co-operative mode when we run this function and whenever we return
// from it, want to go to pre-emptive mode because are returning to OS.
_ASSERTE(pThread->PreemptiveGCDisabled());
BOOL bPopNestedHandlerExRecord = FALSE;
LFH found = LFH_NOT_FOUND; // Result of calling LookForHandler.
BOOL bRethrownException = FALSE;
BOOL bNestedException = FALSE;
#if defined(USE_FEF)
BOOL bPopFaultingExceptionFrame = FALSE;
FrameWithCookie<FaultingExceptionFrame> faultingExceptionFrame;
#endif // USE_FEF
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
ThrowCallbackType tct;
tct.Init();
tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to
#ifdef _DEBUG
tct.pCurrentExceptionRecord = pEstablisherFrame;
tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame);
#endif // _DEBUG
BOOL fIsManagedCode = pContext ? ExecutionManager::IsManagedCode(GetIP(pContext)) : FALSE;
// this establishes a marker so can determine if are processing a nested exception
// don't want to use the current frame to limit search as it could have been unwound by
// the time get to nested handler (ie if find an exception, unwind to the call point and
// then resume in the catch and then get another exception) so make the nested handler
// have the same boundary as this one. If nested handler can't find a handler, we won't
// end up searching this frame list twice because the nested handler will set the search
// boundary in the thread and so if get back to this handler it will have a range that starts
// and ends at the same place.
NestedHandlerExRecord nestedHandlerExRecord;
nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame));
INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
bPopNestedHandlerExRecord = TRUE;
#if defined(USE_FEF)
// Note: don't attempt to push a FEF for an exception in managed code if we weren't in cooperative mode when
// the exception was received. If preemptive GC was enabled when we received the exception, then it means the
// exception was rethrown from unmangaed code (including EE impl), and we shouldn't push a FEF.
if (fIsManagedCode &&
fPGCDisabledOnEntry &&
(pThread->m_pFrame == FRAME_TOP ||
pThread->m_pFrame->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr() ||
(size_t)pThread->m_pFrame > (size_t)pEstablisherFrame))
{
// setup interrupted frame so that GC during calls to init won't collect the frames
// only need it for non COM+ exceptions in managed code when haven't already
// got one on the stack (will have one already if we have called rtlunwind because
// the instantiation that called unwind would have installed one)
faultingExceptionFrame.InitAndLink(pContext);
bPopFaultingExceptionFrame = TRUE;
}
#endif // USE_FEF
OBJECTREF e;
e = pThread->LastThrownObject();
STRESS_LOG7(LF_EH, LL_INFO10, "CPFH_RealFirstPassHandler: code:%X, LastThrownObject:%p, MT:%pT"
", IP:%p, SP:%p, pContext:%p, pEstablisherFrame:%p\n",
exceptionCode, OBJECTREFToObject(e), (e!=0)?e->GetMethodTable():0,
pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0,
pContext, pEstablisherFrame);
#ifdef LOGGING
// If it is a complus exception, and there is a thrown object, get its name, for better logging.
if (IsComPlusException(pExceptionRecord))
{
const char * eClsName = "!EXCEPTION_COMPLUS";
if (e != 0)
{
eClsName = e->GetMethodTable()->GetDebugClassName();
}
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: exception: 0x%08X, class: '%s', IP: 0x%p\n",
exceptionCode, eClsName, pContext ? GetIP(pContext) : NULL));
}
#endif
EXCEPTION_POINTERS exceptionPointers = {pExceptionRecord, pContext};
STRESS_LOG4(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting boundaries: Exinfo: 0x%p, BottomMostHandler:0x%p, SearchBoundary:0x%p, TopFrame:0x%p\n",
pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary, tct.pTopFrame);
// Here we are trying to decide if we are coming in as:
// 1) first handler in a brand new exception
// 2) a subsequent handler in an exception
// 3) a nested exception
// m_pBottomMostHandler is the registration structure (establisher frame) for the most recent (ie lowest in
// memory) non-nested handler that was installed and pEstablisher frame is what the current handler
// was registered with.
// The OS calls each registered handler in the chain, passing its establisher frame to it.
if (pExInfo->m_pBottomMostHandler != NULL && pEstablisherFrame > pExInfo->m_pBottomMostHandler)
{
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: detected subsequent handler. ExInfo:0x%p, BottomMost:0x%p SearchBoundary:0x%p\n",
pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary);
// If the establisher frame of this handler is greater than the bottommost then it must have been
// installed earlier and therefore we are case 2
if (pThread->GetThrowable() == NULL)
{
// Bottommost didn't setup a throwable, so not exception not for us
retval = ExceptionContinueSearch;
goto exit;
}
// setup search start point
tct.pBottomFrame = pExInfo->m_pSearchBoundary;
if (tct.pTopFrame == tct.pBottomFrame)
{
// this will happen if our nested handler already searched for us so we don't want
// to search again
retval = ExceptionContinueSearch;
goto exit;
}
}
else
{ // we are either case 1 or case 3
#if defined(_DEBUG_IMPL)
//@todo: merge frames, context, handlers
if (pThread->GetFrame() != FRAME_TOP)
pThread->GetFrame()->LogFrameChain(LF_EH, LL_INFO1000);
#endif // _DEBUG_IMPL
// If the exception was rethrown, we'll create a new ExInfo, which will represent the rethrown exception.
// The original exception is not the rethrown one.
if (pExInfo->m_ExceptionFlags.IsRethrown() && pThread->LastThrownObject() != NULL)
{
pExInfo->m_ExceptionFlags.ResetIsRethrown();
bRethrownException = TRUE;
#if defined(USE_FEF)
if (bPopFaultingExceptionFrame)
{
// if we added a FEF, it will refer to the frame at the point of the original exception which is
// already unwound so don't want it.
// If we rethrew the exception we have already added a helper frame for the rethrow, so don't
// need this one. If we didn't rethrow it, (ie rethrow from native) then there the topmost frame will
// be a transition to native frame in which case we don't need it either
faultingExceptionFrame.Pop();
bPopFaultingExceptionFrame = FALSE;
}
#endif
}
// If the establisher frame is less than the bottommost handler, then this is nested because the
// establisher frame was installed after the bottommost.
if (pEstablisherFrame < pExInfo->m_pBottomMostHandler
/* || IsComPlusNestedExceptionRecord(pEstablisherFrame) */ )
{
bNestedException = TRUE;
// case 3: this is a nested exception. Need to save and restore the thread info
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: ExInfo:0x%p detected nested exception 0x%p < 0x%p\n",
pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
EXCEPTION_REGISTRATION_RECORD* pNestedER = TryFindNestedEstablisherFrame(pEstablisherFrame);
ExInfo *pNestedExInfo;
if (!pNestedER || pNestedER >= pExInfo->m_pBottomMostHandler )
{
// RARE CASE. We've re-entered the EE from an unmanaged filter.
//
// OR
//
// We can be here if we dont find a nested exception handler. This is exemplified using
// call chain of scenario 2 explained further below.
//
// Assuming __try of NativeB throws an exception E1 and it gets caught in ManagedA2, then
// bottom-most handler (BMH) is going to be CPFH_A. The catch will trigger an unwind
// and invoke __finally in NativeB. Let the __finally throw a new exception E2.
//
// Assuming ManagedB2 has a catch block to catch E2, when we enter CPFH_B looking for a
// handler for E2, our establisher frame will be that of CPFH_B, which will be lower
// in stack than current BMH (which is CPFH_A). Thus, we will come here, determining
// E2 to be nested exception correctly but not find a nested exception handler.
void *limit = (void *) GetPrevSEHRecord(pExInfo->m_pBottomMostHandler);
pNestedExInfo = new (nothrow) ExInfo(); // Very rare failure here; need robust allocator.
if (pNestedExInfo == NULL)
{ // if we can't allocate memory, we can't correctly continue.
#if defined(_DEBUG)
if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NestedEhOom))
_ASSERTE(!"OOM in callback from unmanaged filter.");
#endif // _DEBUG
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY);
}
pNestedExInfo->m_StackAddress = limit; // Note: this is also the flag that tells us this
// ExInfo was stack allocated.
}
else
{
pNestedExInfo = &((NestedHandlerExRecord*)pNestedER)->m_handlerInfo;
}
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: PushExInfo() current: 0x%p previous: 0x%p\n",
pExInfo->m_StackAddress, pNestedExInfo->m_StackAddress));
_ASSERTE(pNestedExInfo);
pNestedExInfo->m_hThrowable = NULL; // pNestedExInfo may be stack allocated, and as such full of
// garbage. m_hThrowable must be sane, so set it to NULL. (We could
// zero the entire record, but this is cheaper.)
pNestedExInfo->CopyAndClearSource(pExInfo);
pExInfo->m_pPrevNestedInfo = pNestedExInfo; // Save at head of nested info chain
#if 0
/* the following code was introduced in Whidbey as part of the Faulting Exception Frame removal (12/03).
However it isn't correct. If any nested exceptions occur while processing a rethrow, we would
incorrectly consider the nested exception to be a rethrow. See VSWhidbey 349379 for an example.
Therefore I am disabling this code until we see a failure that explains why it was added in the first
place. cwb 9/04.
*/
// If we're here as a result of a rethrown exception, set the rethrown flag on the new ExInfo.
if (bRethrownException)
{
pExInfo->m_ExceptionFlags.SetIsRethrown();
}
#endif
}
else
{
// At this point, either:
//
// 1) the bottom-most handler is NULL, implying this is a new exception for which we are getting ready, OR
// 2) the bottom-most handler is not-NULL, implying that a there is already an existing exception in progress.
//
// Scenario 1 is that of a new throw and is easy to understand. Scenario 2 is the interesting one.
//
// ManagedA1 -> ManagedA2 -> ManagedA3 -> NativeCodeA -> ManagedB1 -> ManagedB2 -> ManagedB3 -> NativeCodeB
//
// On x86, each block of managed code is protected by one COMPlusFrameHandler [CPFH] (CLR's exception handler
// for managed code), unlike 64bit where each frame has a personality routine attached to it. Thus,
// for the example above, assume CPFH_A protects ManagedA* blocks and is setup just before the call to
// ManagedA1. Likewise, CPFH_B protects ManagedB* blocks and is setup just before the call to ManagedB1.
//
// When ManagedB3 throws an exception, CPFH_B is invoked to look for a handler in all of the ManagedB* blocks.
// At this point, it is setup as the "bottom-most-handler" (BMH). If no handler is found and exception reaches
// ManagedA* blocks, CPFH_A is invoked to look for a handler and thus, becomes BMH.
//
// Thus, in the first pass on x86 for a given exception, a particular CPFH will be invoked only once when looking
// for a handler and thus, registered as BMH only once. Either the exception goes unhandled and the process will
// terminate or a handler will be found and second pass will commence.
//
// However, assume NativeCodeB had a __try/__finally and raised an exception [E1] within the __try. Let's assume
// it gets caught in ManagedB1 and thus, unwind is triggered. At this point, the active exception tracker
// has context about the exception thrown out of __try and CPFH_B is registered as BMH.
//
// If the __finally throws a new exception [E2], CPFH_B will be invoked again for first pass while looking for
// a handler for the thrown exception. Since BMH is already non-NULL, we will come here since EstablisherFrame will be
// the same as BMH (because EstablisherFrame will be that of CPFH_B). We will proceed to overwrite the "required" parts
// of the existing exception tracker with the details of E2 (see setting of exception record and context below), erasing
// any artifact of E1.
//
// This is unlike Scenario 1 when exception tracker is completely initialized to default values. This is also
// unlike 64bit which will detect that E1 and E2 are different exceptions and hence, will setup a new tracker
// to track E2, effectively behaving like Scenario 1 above. X86 cannot do this since there is no nested exception
// tracker setup that gets to see the new exception.
//
// Thus, if E1 was a CSE and E2 isn't, we will come here and treat E2 as a CSE as well since corruption severity
// is initialized as part of exception tracker initialization. Thus, E2 will start to be treated as CSE, which is
// incorrect. Similar argument applies to delivery of First chance exception notification delivery.
//
// <QUIP> Another example why we should unify EH systems :) </QUIP>
//
// To address this issue, we will need to reset exception tracker here, just like the overwriting of "required"
// parts of exception tracker.
// If the current establisher frame is the same as the bottom-most-handler and we are here
// in the first pass, assert that current exception and the one tracked by active exception tracker
// are indeed different exceptions. In such a case, we must reset the exception tracker so that it can be
// setup correctly further down when CEHelper::SetupCorruptionSeverityForActiveException is invoked.
if ((pExInfo->m_pBottomMostHandler != NULL) &&
(pEstablisherFrame == pExInfo->m_pBottomMostHandler))
{
// Current exception should be different from the one exception tracker is already tracking.
_ASSERTE(pExceptionRecord != pExInfo->m_pExceptionRecord);
// This cannot be nested exceptions - they are handled earlier (see above).
_ASSERTE(!bNestedException);
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Bottom-most handler (0x%p) is the same as EstablisherFrame.\n",
pExInfo->m_pBottomMostHandler));
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Exception record in exception tracker is 0x%p, while that of new exception is 0x%p.\n",
pExInfo->m_pExceptionRecord, pExceptionRecord));
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Resetting exception tracker (0x%p).\n", pExInfo));
// This will reset the exception tracker state, including the corruption severity.
pExInfo->Init();
}
}
// If we are handling a fault from managed code, we need to set the Thread->ExInfo->pContext to
// the current fault context, which is used in the stack walk to get back into the managed
// stack with the correct registers. (Previously, this was done by linking in a FaultingExceptionFrame
// record.)
// We are about to create the managed exception object, which may trigger a GC, so set this up now.
pExInfo->m_pExceptionRecord = pExceptionRecord;
pExInfo->m_pContext = pContext;
if (pContext && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread))
{ // If this was a fault in managed code, rather than create a Frame for stackwalking,
// we can use this exinfo (after all, it has all the register info.)
pExInfo->m_ExceptionFlags.SetUseExInfoForStackwalk();
}
// It should now be safe for a GC to happen.
// case 1 & 3: this is the first time through of a new, nested, or rethrown exception, so see if we can
// find a handler. Only setup throwable if are bottommost handler
if (IsComPlusException(pExceptionRecord) && (!bAsynchronousThreadStop))
{
// Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace
// both throwables with the preallocated OOM exception.
pThread->SafeSetThrowables(pThread->LastThrownObject());
// now we've got a COM+ exception, fall through to so see if we handle it
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: fall through ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n",
pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
pExInfo->m_pBottomMostHandler = pEstablisherFrame;
}
else if (bRethrownException)
{
// If it was rethrown and not COM+, will still be the last one thrown. Either we threw it last and
// stashed it here or someone else caught it and rethrew it, in which case it will still have been
// originally stashed here.
// Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace
// both throwables with the preallocated OOM exception.
pThread->SafeSetThrowables(pThread->LastThrownObject());
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: rethrow non-COM+ ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n",
pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
pExInfo->m_pBottomMostHandler = pEstablisherFrame;
}
else
{
if (!fIsManagedCode)
{
tct.bDontCatch = false;
}
if (exceptionCode == STATUS_BREAKPOINT)
{
// don't catch int 3
retval = ExceptionContinueSearch;
goto exit;
}
// We need to set m_pBottomMostHandler here, Thread::IsExceptionInProgress returns 1.
// This is a necessary part of suppressing thread abort exceptions in the constructor
// of any exception object we might create.
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting ExInfo:0x%p m_pBottomMostHandler for IsExceptionInProgress to 0x%p from 0x%p\n",
pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
pExInfo->m_pBottomMostHandler = pEstablisherFrame;
// Create the managed exception object.
OBJECTREF throwable = CreateCOMPlusExceptionObject(pThread, pExceptionRecord, bAsynchronousThreadStop);
// Set the throwables on the thread to the newly created object. If this fails, it will return a
// preallocated exception object instead. This also updates the last thrown exception, for rethrows.
throwable = pThread->SafeSetThrowables(throwable);
// Set the exception code and pointers. We set these after setting the throwables on the thread,
// because if the proper exception is replaced by an OOM exception, we still want the exception code
// and pointers set in the OOM exception.
EXCEPTIONREF exceptionRef = (EXCEPTIONREF)throwable;
exceptionRef->SetXCode(pExceptionRecord->ExceptionCode);
exceptionRef->SetXPtrs(&exceptionPointers);
}
tct.pBottomFrame = NULL;
EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread);
g_exceptionCount++;
} // End of case-1-or-3
{
// Allocate storage for the stack trace.
OBJECTREF throwable = NULL;
GCPROTECT_BEGIN(throwable);
throwable = pThread->GetThrowable();
if (IsProcessCorruptedStateException(exceptionCode, throwable))
{
// Failfast if exception indicates corrupted process state
EEPOLICY_HANDLE_FATAL_ERROR(exceptionCode);
}
// If we're out of memory, then we figure there's probably not memory to maintain a stack trace, so we skip it.
// If we've got a stack overflow, then we figure the stack will be so huge as to make tracking the stack trace
// impracticle, so we skip it.
if ((throwable == CLRException::GetPreallocatedOutOfMemoryException()) ||
(throwable == CLRException::GetPreallocatedStackOverflowException()))
{
tct.bAllowAllocMem = FALSE;
}
else
{
pExInfo->m_StackTraceInfo.AllocateStackTrace();
}
GCPROTECT_END();
}
// Set up information for GetExceptionPointers()/GetExceptionCode() callback.
pExInfo->SetExceptionCode(pExceptionRecord);
pExInfo->m_pExceptionPointers = &exceptionPointers;
if (bRethrownException || bNestedException)
{
_ASSERTE(pExInfo->m_pPrevNestedInfo != NULL);
SetStateForWatsonBucketing(bRethrownException, pExInfo->GetPreviousExceptionTracker()->GetThrowableAsHandle());
}
#ifdef DEBUGGING_SUPPORTED
//
// At this point the exception is still fresh to us, so assert that
// there should be nothing from the debugger on it.
//
_ASSERTE(!pExInfo->m_ExceptionFlags.DebuggerInterceptInfo());
#endif
if (pThread->IsRudeAbort())
{
OBJECTREF throwable = pThread->GetThrowable();
if (throwable == NULL || !IsExceptionOfType(kThreadAbortException, &throwable))
{
// Neither of these sets will throw because the throwable that we're setting is a preallocated
// exception. This also updates the last thrown exception, for rethrows.
pThread->SafeSetThrowables(CLRException::GetBestThreadAbortException());
}
if (!pThread->IsRudeAbortInitiated())
{
pThread->PreWorkForThreadAbort();
}
}
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: looking for handler bottom %x, top %x\n",
tct.pBottomFrame, tct.pTopFrame));
tct.bReplaceStack = pExInfo->m_pBottomMostHandler == pEstablisherFrame && !bRethrownException;
tct.bSkipLastElement = bRethrownException && bNestedException;
found = LookForHandler(&exceptionPointers,
pThread,
&tct);
// We have searched this far.
pExInfo->m_pSearchBoundary = tct.pTopFrame;
LOG((LF_EH, LL_INFO1000, "CPFH_RealFirstPassHandler: set pSearchBoundary to 0x%p\n", pExInfo->m_pSearchBoundary));
if ((found == LFH_NOT_FOUND)
#ifdef DEBUGGING_SUPPORTED
&& !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()
#endif
)
{
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND\n"));
if (tct.pTopFrame == FRAME_TOP)
{
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND at FRAME_TOP\n"));
}
retval = ExceptionContinueSearch;
goto exit;
}
else
{
// so we are going to handle the exception
// Remove the nested exception record -- before calling RtlUnwind.
// The second-pass callback for a NestedExceptionRecord assumes that if it's
// being unwound, it should pop one exception from the pExInfo chain. This is
// true for any older NestedRecords that might be unwound -- but not for the
// new one we're about to add. To avoid this, we remove the new record
// before calling Unwind.
//
// <TODO>@NICE: This can probably be a little cleaner -- the nested record currently
// is also used to guard the running of the filter code. When we clean up the
// behaviour of exceptions within filters, we should be able to get rid of this
// PUSH/POP/PUSH behaviour.</TODO>
_ASSERTE(bPopNestedHandlerExRecord);
UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
// Since we are going to handle the exception we switch into preemptive mode
GCX_PREEMP_NO_DTOR();
#ifdef DEBUGGING_SUPPORTED
//
// Check if the debugger wants to intercept this frame at a different point than where we are.
//
if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
{
ClrDebuggerDoUnwindAndIntercept(pEstablisherFrame, pExceptionRecord);
//
// If this returns, then the debugger couldn't do it's stuff and we default to the found handler.
//
if (found == LFH_NOT_FOUND)
{
retval = ExceptionContinueSearch;
// we need to be sure to switch back into Cooperative mode since we are going to
// jump to the exit: label and follow the normal return path (it is expected that
// CPFH_RealFirstPassHandler returns in COOP.
GCX_PREEMP_NO_DTOR_END();
goto exit;
}
}
#endif
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: handler found: %s\n", tct.pFunc->m_pszDebugMethodName));
CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0);
// on x86 at least, RtlUnwind always returns
// The CallRtlUnwindSafe could have popped the explicit frame that the tct.pBottomFrame points to (UMThunkPrestubHandler
// does that). In such case, the tct.pBottomFrame needs to be updated to point to the first valid explicit frame.
Frame* frame = pThread->GetFrame();
if ((tct.pBottomFrame != NULL) && (frame > tct.pBottomFrame))
{
tct.pBottomFrame = frame;
}
// Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our
// cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be
// unwound.
// Note: we are still in Preemptive mode here and that is correct, COMPlusAfterUnwind will switch us back
// into Cooperative mode.
return COMPlusAfterUnwind(pExceptionRecord, pEstablisherFrame, tct);
}
exit:
{
// We need to be in COOP if we get here
GCX_ASSERT_COOP();
}
// If we got as far as saving pExInfo, save the context pointer so it's available for the unwind.
if (pExInfo)
{
pExInfo->m_pContext = pContext;
// pExInfo->m_pExceptionPointers points to a local structure, which is now going out of scope.
pExInfo->m_pExceptionPointers = NULL;
}
#if defined(USE_FEF)
if (bPopFaultingExceptionFrame)
{
faultingExceptionFrame.Pop();
}
#endif // USE_FEF
if (bPopNestedHandlerExRecord)
{
UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
}
return retval;
} // CPFH_RealFirstPassHandler()
//******************************************************************************
//
void InitializeExceptionHandling()
{
WRAPPER_NO_CONTRACT;
InitSavedExceptionInfo();
CLRAddVectoredHandlers();
// Initialize the lock used for synchronizing access to the stacktrace in the exception object
g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE);
}
//******************************************************************************
static inline EXCEPTION_DISPOSITION __cdecl
CPFH_FirstPassHandler(EXCEPTION_RECORD *pExceptionRecord,
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
CONTEXT *pContext,
DISPATCHER_CONTEXT *pDispatcherContext)
{
WRAPPER_NO_CONTRACT;
EXCEPTION_DISPOSITION retval;
_ASSERTE (!(pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)));
DWORD exceptionCode = pExceptionRecord->ExceptionCode;
Thread *pThread = GetThread();
STRESS_LOG4(LF_EH, LL_INFO100,
"CPFH_FirstPassHandler: pEstablisherFrame = %x EH code = %x EIP = %x with ESP = %x\n",
pEstablisherFrame, exceptionCode, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0);
EXCEPTION_POINTERS ptrs = { pExceptionRecord, pContext };
// Call to the vectored handler to give other parts of the Runtime a chance to jump in and take over an
// exception before we do too much with it. The most important point in the vectored handler is not to toggle
// the GC mode.
DWORD filter = CLRVectoredExceptionHandler(&ptrs);
if (filter == (DWORD) EXCEPTION_CONTINUE_EXECUTION)
{
return ExceptionContinueExecution;
}
else if (filter == EXCEPTION_CONTINUE_SEARCH)
{
return ExceptionContinueSearch;
}
#if defined(STRESS_HEAP)
//
// Check to see if this exception is due to GCStress. Since the GCStress mechanism only injects these faults
// into managed code, we only need to check for them in CPFH_FirstPassHandler.
//
if (IsGcMarker(pContext, pExceptionRecord))
{
return ExceptionContinueExecution;
}
#endif // STRESS_HEAP
// We always want to be in co-operative mode when we run this function and whenever we return
// from it, want to go to pre-emptive mode because are returning to OS.
BOOL disabled = pThread->PreemptiveGCDisabled();
GCX_COOP_NO_DTOR();
BOOL bAsynchronousThreadStop = IsThreadHijackedForThreadStop(pThread, pExceptionRecord);
if (bAsynchronousThreadStop)
{
// If we ever get here in preemptive mode, we're in trouble. We've
// changed the thread's IP to point at a little function that throws ... if
// the thread were to be in preemptive mode and a GC occurred, the stack
// crawl would have been all messed up (becuase we have no frame that points
// us back to the right place in managed code).
_ASSERTE(disabled);
AdjustContextForThreadStop(pThread, pContext);
LOG((LF_EH, LL_INFO100, "CPFH_FirstPassHandler is Asynchronous Thread Stop or Abort\n"));
}
pThread->ResetThrowControlForThread();
CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame);
// If we were in cooperative mode when we came in here, then its okay to see if we should do HandleManagedFault
// and push a FaultingExceptionFrame. If we weren't in coop mode coming in here, then it means that there's no
// way the exception could really be from managed code. I might look like it was from managed code, but in
// reality its a rethrow from unmanaged code, either unmanaged user code, or unmanaged EE implementation.
if (disabled && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread))
{
#if defined(USE_FEF)
HandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread);
retval = ExceptionContinueExecution;
goto exit;
#else // USE_FEF
// Save the context pointer in the Thread's EXInfo, so that a stack crawl can recover the
// register values from the fault.
//@todo: I haven't yet found any case where we need to do anything here. If there are none, eliminate
// this entire if () {} block.
#endif // USE_FEF
}
// OK. We're finally ready to start the real work. Nobody else grabbed the exception in front of us. Now we can
// get started.
retval = CPFH_RealFirstPassHandler(pExceptionRecord,
pEstablisherFrame,
pContext,
pDispatcherContext,
bAsynchronousThreadStop,
disabled);
#if defined(USE_FEF) // This label is only used in the HandleManagedFault() case above.
exit:
#endif
if (retval != ExceptionContinueExecution || !disabled)
{
GCX_PREEMP_NO_DTOR();
}
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_FirstPassHandler: exiting with retval %d\n", retval);
return retval;
} // CPFH_FirstPassHandler()
//******************************************************************************
inline void
CPFH_UnwindFrames1(Thread* pThread, EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame, DWORD exceptionCode)
{
WRAPPER_NO_CONTRACT;
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
// Ready to unwind the stack...
ThrowCallbackType tct;
tct.Init();
tct.bIsUnwind = TRUE;
tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to
tct.pBottomFrame = NULL;
#ifdef _DEBUG
tct.pCurrentExceptionRecord = pEstablisherFrame;
tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame);
#endif
#ifdef DEBUGGING_SUPPORTED
EXCEPTION_REGISTRATION_RECORD *pInterceptEstablisherFrame = NULL;
// If the exception is intercepted, use information stored in the DebuggerExState to unwind the stack.
if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
{
pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(&pInterceptEstablisherFrame,
NULL, // MethodDesc **ppFunc,
NULL, // int *pdHandler,
NULL, // BYTE **ppStack
NULL, // ULONG_PTR *pNativeOffset,
NULL // Frame **ppFrame)
);
LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: frames are Est 0x%X, Intercept 0x%X\n",
pEstablisherFrame, pInterceptEstablisherFrame));
//
// When we set up for the interception we store off the CPFH or CPNEH that we
// *know* will handle unwinding the destination of the intercept.
//
// However, a CPNEH with the same limiting Capital-F-rame could do the work
// and unwind us, so...
//
// If this is the exact frame handler we are supposed to search for, or
// if this frame handler services the same Capital-F-rame as the frame handler
// we are looking for (i.e. this frame handler may do the work that we would
// expect our frame handler to do),
// then
// we need to pass the interception destination during this unwind.
//
_ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame));
if ((pEstablisherFrame == pInterceptEstablisherFrame) ||
(GetCurrFrame(pEstablisherFrame) == GetCurrFrame(pInterceptEstablisherFrame)))
{
pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL,
&(tct.pFunc),
&(tct.dHandler),
&(tct.pStack),
NULL,
&(tct.pBottomFrame)
);
LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: going to: pFunc:%#X, pStack:%#X\n",
tct.pFunc, tct.pStack));
}
}
#endif
UnwindFrames(pThread, &tct);
LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: after unwind ec:%#x, tct.pTopFrame:0x%p, pSearchBndry:0x%p\n"
" pEstFrame:0x%p, IsC+NestExRec:%d, !Nest||Active:%d\n",
exceptionCode, tct.pTopFrame, pExInfo->m_pSearchBoundary, pEstablisherFrame,
IsComPlusNestedExceptionRecord(pEstablisherFrame),
(!IsComPlusNestedExceptionRecord(pEstablisherFrame) || reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind)));
if (tct.pTopFrame >= pExInfo->m_pSearchBoundary &&
(!IsComPlusNestedExceptionRecord(pEstablisherFrame) ||
reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind) )
{
// If this is the search boundary, and we're not a nested handler, then
// this is the last time we'll see this exception. Time to unwind our
// exinfo.
STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindFrames1: Exception unwind -- unmanaged catcher detected\n");
pExInfo->UnwindExInfo((VOID*)pEstablisherFrame);
}
} // CPFH_UnwindFrames1()
//******************************************************************************
inline EXCEPTION_DISPOSITION __cdecl
CPFH_UnwindHandler(EXCEPTION_RECORD *pExceptionRecord,
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
CONTEXT *pContext,
void *pDispatcherContext)
{
WRAPPER_NO_CONTRACT;
_ASSERTE (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND));
#ifdef _DEBUG
// Note: you might be inclined to write "static int breakOnSecondPass = CLRConfig::GetConfigValue(...);", but
// you can't do that here. That causes C++ EH to be generated under the covers for this function, and this
// function isn't allowed to have any C++ EH in it because its never going to return.
static int breakOnSecondPass; // = 0
static BOOL breakOnSecondPassSetup; // = FALSE
if (!breakOnSecondPassSetup)
{
breakOnSecondPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnSecondPass);
breakOnSecondPassSetup = TRUE;
}
if (breakOnSecondPass != 0)
{
_ASSERTE(!"Unwind handler");
}
#endif
DWORD exceptionCode = pExceptionRecord->ExceptionCode;
Thread *pThread = GetThread();
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
STRESS_LOG4(LF_EH, LL_INFO100, "In CPFH_UnwindHandler EHCode = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n", exceptionCode,
pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame);
// We always want to be in co-operative mode when we run this function. Whenever we return
// from it, want to go to pre-emptive mode because are returning to OS.
{
// needs to be in its own scope to avoid polluting the namespace, since
// we don't do a _END then we don't revert the state
GCX_COOP_NO_DTOR();
}
CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame);
if (IsComPlusNestedExceptionRecord(pEstablisherFrame))
{
NestedHandlerExRecord *pHandler = reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame);
if (pHandler->m_pCurrentExInfo != NULL)
{
// See the comment at the end of COMPlusNestedExceptionHandler about nested exception.
// OS is going to skip the EstablisherFrame before our NestedHandler.
if (pHandler->m_pCurrentExInfo->m_pBottomMostHandler <= pHandler->m_pCurrentHandler)
{
// We're unwinding -- the bottom most handler is potentially off top-of-stack now. If
// it is, change it to the next COM+ frame. (This one is not good, as it's about to
// disappear.)
EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pHandler->m_pCurrentHandler);
STRESS_LOG3(LF_EH, LL_INFO10000, "COMPlusNestedExceptionHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
pHandler->m_pCurrentExInfo, pHandler->m_pCurrentExInfo->m_pBottomMostHandler, pNextBottomMost);
pHandler->m_pCurrentExInfo->m_pBottomMostHandler = pNextBottomMost;
}
}
}
// this establishes a marker so can determine if are processing a nested exception
// don't want to use the current frame to limit search as it could have been unwound by
// the time get to nested handler (ie if find an exception, unwind to the call point and
// then resume in the catch and then get another exception) so make the nested handler
// have the same boundary as this one. If nested handler can't find a handler, we won't
// end up searching this frame list twice because the nested handler will set the search
// boundary in the thread and so if get back to this handler it will have a range that starts
// and ends at the same place.
NestedHandlerExRecord nestedHandlerExRecord;
nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame));
nestedHandlerExRecord.m_ActiveForUnwind = TRUE;
nestedHandlerExRecord.m_pCurrentExInfo = pExInfo;
nestedHandlerExRecord.m_pCurrentHandler = pEstablisherFrame;
INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
// Unwind the stack. The establisher frame sets the boundary.
CPFH_UnwindFrames1(pThread, pEstablisherFrame, exceptionCode);
// We're unwinding -- the bottom most handler is potentially off top-of-stack now. If
// it is, change it to the next COM+ frame. (This one is not good, as it's about to
// disappear.)
if (pExInfo->m_pBottomMostHandler &&
pExInfo->m_pBottomMostHandler <= pEstablisherFrame)
{
EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pEstablisherFrame);
// If there is no previous COM+ SEH handler, GetNextCOMPlusSEHRecord() will return -1. Much later, we will dereference that and AV.
_ASSERTE (pNextBottomMost != EXCEPTION_CHAIN_END);
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_UnwindHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
pExInfo, pExInfo->m_pBottomMostHandler, pNextBottomMost);
pExInfo->m_pBottomMostHandler = pNextBottomMost;
}
{
// needs to be in its own scope to avoid polluting the namespace, since
// we don't do a _END then we don't revert the state
GCX_PREEMP_NO_DTOR();
}
UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
// If we are here, then exception was not caught in managed code protected by this
// ComplusFrameHandler. Hence, reset thread abort state if this is the last personality routine,
// for managed code, on the stack.
ResetThreadAbortState(pThread, pEstablisherFrame);
STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindHandler: Leaving with ExceptionContinueSearch\n");
return ExceptionContinueSearch;
} // CPFH_UnwindHandler()
//******************************************************************************
// This is the first handler that is called in the context of managed code
// It is the first level of defense and tries to find a handler in the user
// code to handle the exception
//-------------------------------------------------------------------------
// EXCEPTION_DISPOSITION __cdecl COMPlusFrameHandler(
// EXCEPTION_RECORD *pExceptionRecord,
// _EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
// CONTEXT *pContext,
// DISPATCHER_CONTEXT *pDispatcherContext)
//
// See http://www.microsoft.com/msj/0197/exception/exception.aspx for a background piece on Windows
// unmanaged structured exception handling.
EXCEPTION_HANDLER_IMPL(COMPlusFrameHandler)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(!DebugIsEECxxException(pExceptionRecord) && "EE C++ Exception leaked into managed code!");
STRESS_LOG5(LF_EH, LL_INFO100, "In COMPlusFrameHander EH code = %x flag = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n",
pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionFlags,
pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame);
_ASSERTE((pContext == NULL) || ((pContext->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL));
if (g_fNoExceptions)
return ExceptionContinueSearch; // No EH during EE shutdown.
// Check if the exception represents a GCStress Marker. If it does,
// we shouldnt record its entry in the TLS as such exceptions are
// continuable and can confuse the VM to treat them as CSE,
// as they are implemented using illegal instruction exception.
bool fIsGCMarker = false;
#ifdef HAVE_GCCOVER // This is a debug only macro
if (GCStress<cfg_instr_jit>::IsEnabled())
{
// TlsGetValue trashes last error. When Complus_GCStress=4, GC is invoked
// on every allowable JITed instruction by means of our exception handling machanism
// it is very easy to trash the last error. For example, a p/invoke called a native method
// which sets last error. Before we getting the last error in the IL stub, it is trashed here
DWORD dwLastError = GetLastError();
fIsGCMarker = IsGcMarker(pContext, pExceptionRecord);
if (!fIsGCMarker)
{
SaveCurrentExceptionInfo(pExceptionRecord, pContext);
}
SetLastError(dwLastError);
}
else
#endif
{
// GCStress does not exist on retail builds (see IsGcMarker implementation for details).
SaveCurrentExceptionInfo(pExceptionRecord, pContext);
}
if (fIsGCMarker)
{
// If this was a GCStress marker exception, then return
// ExceptionContinueExecution to the OS.
return ExceptionContinueExecution;
}
EXCEPTION_DISPOSITION retVal = ExceptionContinueSearch;
Thread *pThread = GetThread();
if ((pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) == 0)
{
if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
{
EEPolicy::HandleStackOverflow();
// VC's unhandled exception filter plays with stack. It VirtualAlloc's a new stack, and
// then launch Watson from the new stack. When Watson asks CLR to save required data, we
// are not able to walk the stack.
// Setting Context in ExInfo so that our Watson dump routine knows how to walk this stack.
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
pExInfo->m_pContext = pContext;
// Save the reference to the topmost handler we see during first pass when an SO goes past us.
// When an unwind gets triggered for the exception, we will reset the frame chain when we reach
// the topmost handler we saw during the first pass.
//
// This unifies, behaviour-wise, 32bit with 64bit.
if ((pExInfo->m_pTopMostHandlerDuringSO == NULL) ||
(pEstablisherFrame > pExInfo->m_pTopMostHandlerDuringSO))
{
pExInfo->m_pTopMostHandlerDuringSO = pEstablisherFrame;
}
// Switch to preemp mode since we are returning back to the OS.
// We will do the quick switch since we are short of stack
FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
return ExceptionContinueSearch;
}
}
else
{
DWORD exceptionCode = pExceptionRecord->ExceptionCode;
if (exceptionCode == STATUS_UNWIND)
{
// If exceptionCode is STATUS_UNWIND, RtlUnwind is called with a NULL ExceptionRecord,
// therefore OS uses a faked ExceptionRecord with STATUS_UNWIND code. Then we need to
// look at our saved exception code.
exceptionCode = GetCurrentExceptionCode();
}
if (exceptionCode == STATUS_STACK_OVERFLOW)
{
// We saved the context during the first pass in case the stack overflow exception is
// unhandled and Watson dump code needs it. Now we are in the second pass, therefore
// either the exception is handled by user code, or we have finished unhandled exception
// filter process, and the OS is unwinding the stack. Either way, we don't need the
// context any more. It is very important to reset the context so that our code does not
// accidentally walk the frame using the dangling context in ExInfoWalker::WalkToPosition.
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
pExInfo->m_pContext = NULL;
// We should have the reference to the topmost handler seen during the first pass of SO
_ASSERTE(pExInfo->m_pTopMostHandlerDuringSO != NULL);
// Reset frame chain till we reach the topmost establisher frame we saw in the first pass.
// This will ensure that if any intermediary frame calls back into managed (e.g. native frame
// containing a __finally that reverse pinvokes into managed), then we have the correct
// explicit frame on the stack. Resetting the frame chain only when we reach the topmost
// personality routine seen in the first pass may not result in expected behaviour,
// specially during stack walks when crawl frame needs to be initialized from
// explicit frame.
if (pEstablisherFrame <= pExInfo->m_pTopMostHandlerDuringSO)
{
GCX_COOP_NO_DTOR();
if (pThread->GetFrame() < GetCurrFrame(pEstablisherFrame))
{
// We are very short of stack. We avoid calling UnwindFrame which may
// run unknown code here.
pThread->SetFrame(GetCurrFrame(pEstablisherFrame));
}
}
// Switch to preemp mode since we are returning back to the OS.
// We will do the quick switch since we are short of stack
FastInterlockAnd(&pThread->m_fPreemptiveGCDisabled, 0);
return ExceptionContinueSearch;
}
}
if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
{
retVal = CPFH_UnwindHandler(pExceptionRecord,
pEstablisherFrame,
pContext,
pDispatcherContext);
}
else
{
/* Make no assumptions about the current machine state.
<TODO>@PERF: Only needs to be called by the very first handler invoked by SEH </TODO>*/
ResetCurrentContext();
retVal = CPFH_FirstPassHandler(pExceptionRecord,
pEstablisherFrame,
pContext,
pDispatcherContext);
}
return retVal;
} // COMPlusFrameHandler()
//-------------------------------------------------------------------------
// This is called by the EE to restore the stack pointer if necessary.
//-------------------------------------------------------------------------
// This can't be inlined into the caller to avoid introducing EH frame
NOINLINE LPVOID COMPlusEndCatchWorker(Thread * pThread)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:called with "
"pThread:0x%x\n",pThread));
// indicate that we are out of the managed clause as early as possible
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
void* esp = NULL;
// Notify the profiler that the catcher has finished running
// IL stubs don't contain catch blocks so inability to perform this check does not matter.
// if (!pFunc->IsILStub())
EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherLeave();
// no need to set pExInfo->m_ClauseType = (DWORD)COR_PRF_CLAUSE_NONE now that the
// notification is done because because the ExInfo record is about to be popped off anyway
LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:pThread:0x%x\n",pThread));
#ifdef _DEBUG
gLastResumedExceptionFunc = NULL;
gLastResumedExceptionHandler = 0;
#endif
// Set the thrown object to NULL as no longer needed. This also sets the last thrown object to NULL.
pThread->SafeSetThrowables(NULL);
// reset the stashed exception info
pExInfo->m_pExceptionRecord = NULL;
pExInfo->m_pContext = NULL;
pExInfo->m_pExceptionPointers = NULL;
if (pExInfo->m_pShadowSP)
{
*pExInfo->m_pShadowSP = 0; // Reset the shadow SP
}
// pExInfo->m_dEsp was set in ResumeAtJITEH(). It is the Esp of the
// handler nesting level which catches the exception.
esp = (void*)(size_t)pExInfo->m_dEsp;
pExInfo->UnwindExInfo(esp);
// Prepare to sync managed exception state
//
// In a case when we're nested inside another catch block, the domain in which we're executing may not be the
// same as the one the domain of the throwable that was just made the current throwable above. Therefore, we
// make a special effort to preserve the domain of the throwable as we update the the last thrown object.
//
// This function (COMPlusEndCatch) can also be called by the in-proc debugger helper thread on x86 when
// an attempt to SetIP takes place to set IP outside the catch clause. In such a case, managed thread object
// will not be available. Thus, we should reset the severity only if its not such a thread.
//
// This behaviour (of debugger doing SetIP) is not allowed on 64bit since the catch clauses are implemented
// as a seperate funclet and it's just not allowed to set the IP across EH scopes, such as from inside a catch
// clause to outside of the catch clause.
bool fIsDebuggerHelperThread = (g_pDebugInterface == NULL) ? false : g_pDebugInterface->ThisIsHelperThread();
// Sync managed exception state, for the managed thread, based upon any active exception tracker
pThread->SyncManagedExceptionState(fIsDebuggerHelperThread);
LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch: esp=%p\n", esp));
return esp;
}
//
// This function works in conjunction with JIT_EndCatch. On input, the parameters are set as follows:
// ebp, ebx, edi, esi: the values of these registers at the end of the catch block
// *pRetAddress: the next instruction after the call to JIT_EndCatch
//
// On output, *pRetAddress is the instruction at which to resume execution. This may be user code,
// or it may be ThrowControlForThread (which will re-raise a pending ThreadAbortException).
//
// Returns the esp to set before resuming at *pRetAddress.
//
LPVOID STDCALL COMPlusEndCatch(LPVOID ebp, DWORD ebx, DWORD edi, DWORD esi, LPVOID* pRetAddress)
{
//
// PopNestedExceptionRecords directly manipulates fs:[0] chain. This method can't have any EH!
//
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
ETW::ExceptionLog::ExceptionCatchEnd();
ETW::ExceptionLog::ExceptionThrownEnd();
void* esp = COMPlusEndCatchWorker(GetThread());
// We are going to resume at a handler nesting level whose esp is dEsp. Pop off any SEH records below it. This
// would be the COMPlusNestedExceptionHandler we had inserted.
PopNestedExceptionRecords(esp);
//
// Set up m_OSContext for the call to COMPlusCheckForAbort
//
Thread* pThread = GetThread();
SetIP(pThread->m_OSContext, (PCODE)*pRetAddress);
SetSP(pThread->m_OSContext, (TADDR)esp);
SetFP(pThread->m_OSContext, (TADDR)ebp);
pThread->m_OSContext->Ebx = ebx;
pThread->m_OSContext->Edi = edi;
pThread->m_OSContext->Esi = esi;
LPVOID throwControl = COMPlusCheckForAbort((UINT_PTR)*pRetAddress);
if (throwControl)
*pRetAddress = throwControl;
return esp;
}
PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord()
{
WRAPPER_NO_CONTRACT;
LPVOID fs0 = (LPVOID)__readfsdword(0);
#if 0 // This walk is too expensive considering we hit it every time we a CONTRACT(NOTHROW)
#ifdef _DEBUG
EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)fs0;
LPVOID spVal;
__asm {
mov spVal, esp
}
// check that all the eh frames are all greater than the current stack value. If not, the
// stack has been updated somehow w/o unwinding the SEH chain.
// LOG((LF_EH, LL_INFO1000000, "ER Chain:\n"));
while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END) {
// LOG((LF_EH, LL_INFO1000000, "\tp: prev:p handler:%x\n", pEHR, pEHR->Next, pEHR->Handler));
if (pEHR < spVal) {
if (gLastResumedExceptionFunc != 0)
_ASSERTE(!"Stack is greater than start of SEH chain - possible missing leave in handler. See gLastResumedExceptionHandler & gLastResumedExceptionFunc for info");
else
_ASSERTE(!"Stack is greater than start of SEH chain (FS:0)");
}
if (pEHR->Handler == (void *)-1)
_ASSERTE(!"Handler value has been corrupted");
_ASSERTE(pEHR < pEHR->Next);
pEHR = pEHR->Next;
}
#endif
#endif // 0
return (EXCEPTION_REGISTRATION_RECORD*) fs0;
}
PEXCEPTION_REGISTRATION_RECORD GetFirstCOMPlusSEHRecord(Thread *pThread) {
WRAPPER_NO_CONTRACT;
EXCEPTION_REGISTRATION_RECORD *pEHR = *(pThread->GetExceptionListPtr());
if (pEHR == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pEHR)) {
return pEHR;
} else {
return GetNextCOMPlusSEHRecord(pEHR);
}
}
PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD *next)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(IsUnmanagedToManagedSEHHandler(next));
EXCEPTION_REGISTRATION_RECORD *pEHR = GetCurrentSEHRecord();
_ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END);
EXCEPTION_REGISTRATION_RECORD *pBest = 0;
while (pEHR != next) {
if (IsUnmanagedToManagedSEHHandler(pEHR))
pBest = pEHR;
pEHR = pEHR->Next;
_ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END);
}
return pBest;
}
VOID SetCurrentSEHRecord(EXCEPTION_REGISTRATION_RECORD *pSEH)
{
WRAPPER_NO_CONTRACT;
*GetThread()->GetExceptionListPtr() = pSEH;
}
// Note that this logic is copied below, in PopSEHRecords
__declspec(naked)
VOID __cdecl PopSEHRecords(LPVOID pTargetSP)
{
// No CONTRACT possible on naked functions
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
__asm{
mov ecx, [esp+4] ;; ecx <- pTargetSP
mov eax, fs:[0] ;; get current SEH record
poploop:
cmp eax, ecx
jge done
mov eax, [eax] ;; get next SEH record
jmp poploop
done:
mov fs:[0], eax
retn
}
}
//
// Unwind pExinfo, pops FS:[0] handlers until the interception context SP, and
// resumes at interception context.
//
VOID UnwindExceptionTrackerAndResumeInInterceptionFrame(ExInfo* pExInfo, EHContext* context)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE;
_ASSERTE(pExInfo && context);
pExInfo->UnwindExInfo((LPVOID)(size_t)context->Esp);
PopNestedExceptionRecords((LPVOID)(size_t)context->Esp);
STRESS_LOG3(LF_EH|LF_CORDB, LL_INFO100, "UnwindExceptionTrackerAndResumeInInterceptionFrame: completing intercept at EIP = %p ESP = %p EBP = %p\n", context->Eip, context->Esp, context->Ebp);
ResumeAtJitEHHelper(context);
UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!");
}
//
// Pop SEH records below the given target ESP. This is only used to pop nested exception records.
// If bCheckForUnknownHandlers is set, it only checks for unknown FS:[0] handlers.
//
BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
{
// No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord();
while ((LPVOID)pEHR < pTargetSP)
{
//
// The only handler types we're allowed to have below the limit on the FS:0 chain in these cases are a
// nested exception record or a fast NExport record, so we verify that here.
//
// There is a special case, of course: for an unhandled exception, when the default handler does the exit
// unwind, we may have an exception that escapes a finally clause, thus replacing the original unhandled
// exception. If we find a catcher for that new exception, then we'll go ahead and do our own unwind, then
// jump to the catch. When we are called here, just before jumpping to the catch, we'll pop off our nested
// handlers, then we'll pop off one more handler: the handler that ntdll!ExecuteHandler2 pushed before
// calling our nested handler. We go ahead and pop off that handler, too. Its okay, its only there to catch
// exceptions from handlers and turn them into collided unwind status codes... there's no cleanup in the
// handler that we're removing, and that's the important point. The handler that ExecuteHandler2 pushes
// isn't a public export from ntdll, but its named "UnwindHandler" and is physically shortly after
// ExecuteHandler2 in ntdll.
// In this case, we don't want to pop off the NExportSEH handler since it's our outermost handler.
//
static HINSTANCE ExecuteHandler2Module = 0;
static BOOL ExecuteHandler2ModuleInited = FALSE;
// Cache the handle to the dll with the handler pushed by ExecuteHandler2.
if (!ExecuteHandler2ModuleInited)
{
ExecuteHandler2Module = WszGetModuleHandle(W("ntdll.dll"));
ExecuteHandler2ModuleInited = TRUE;
}
if (bCheckForUnknownHandlers)
{
if (!IsComPlusNestedExceptionRecord(pEHR) ||
!((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler)))
{
return TRUE;
}
}
#ifdef _DEBUG
else
{
// Note: if we can't find the module containing ExecuteHandler2, we'll just be really strict and require
// that we're only popping nested handlers or the FastNExportSEH handler.
_ASSERTE(FastNExportSEH(pEHR) || IsComPlusNestedExceptionRecord(pEHR) ||
((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler)));
}
#endif // _DEBUG
pEHR = pEHR->Next;
}
if (!bCheckForUnknownHandlers)
{
SetCurrentSEHRecord(pEHR);
}
return FALSE;
}
//
// This is implemented differently from the PopNestedExceptionRecords above because it's called in the context of
// the DebuggerRCThread to operate on the stack of another thread.
//
VOID PopNestedExceptionRecords(LPVOID pTargetSP, CONTEXT *pCtx, void *pSEH)
{
// No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
#ifdef _DEBUG
LOG((LF_CORDB,LL_INFO1000, "\nPrintSEHRecords:\n"));
EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH;
// check that all the eh frames are all greater than the current stack value. If not, the
// stack has been updated somehow w/o unwinding the SEH chain.
while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END)
{
LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler));
pEHR = pEHR->Next;
}
#endif
DWORD dwCur = *(DWORD*)pSEH; // 'EAX' in the original routine
DWORD dwPrev = (DWORD)(size_t)pSEH;
while (dwCur < (DWORD)(size_t)pTargetSP)
{
// Watch for the OS handler
// for nested exceptions, or any C++ handlers for destructors in our call
// stack, or anything else.
if (dwCur < (DWORD)GetSP(pCtx))
dwPrev = dwCur;
dwCur = *(DWORD *)(size_t)dwCur;
LOG((LF_CORDB,LL_INFO10000, "dwCur: 0x%x dwPrev:0x%x pTargetSP:0x%x\n",
dwCur, dwPrev, pTargetSP));
}
*(DWORD *)(size_t)dwPrev = dwCur;
#ifdef _DEBUG
pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH;
// check that all the eh frames are all greater than the current stack value. If not, the
// stack has been updated somehow w/o unwinding the SEH chain.
LOG((LF_CORDB,LL_INFO1000, "\nPopSEHRecords:\n"));
while (pEHR != NULL && pEHR != (void *)-1)
{
LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler));
pEHR = pEHR->Next;
}
#endif
}
//==========================================================================
// COMPlusThrowCallback
//
//==========================================================================
/*
*
* COMPlusThrowCallbackHelper
*
* This function is a simple helper function for COMPlusThrowCallback. It is needed
* because of the EX_TRY macro. This macro does an alloca(), which allocates space
* off the stack, not free'ing it. Thus, doing a EX_TRY in a loop can easily result
* in a stack overflow error. By factoring out the EX_TRY into a separate function,
* we recover that stack space.
*
* Parameters:
* pJitManager - The JIT manager that will filter the EH.
* pCf - The frame to crawl.
* EHClausePtr
* nestingLevel
* pThread - Used to determine if the thread is throwable or not.
*
* Return:
* Exception status.
*
*/
int COMPlusThrowCallbackHelper(IJitManager *pJitManager,
CrawlFrame *pCf,
ThrowCallbackType* pData,
EE_ILEXCEPTION_CLAUSE *EHClausePtr,
DWORD nestingLevel,
OBJECTREF throwable,
Thread *pThread
)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
int iFilt = 0;
EX_TRY
{
GCPROTECT_BEGIN (throwable);
// We want to call filters even if the thread is aborting, so suppress abort
// checks while the filter runs.
ThreadPreventAsyncHolder preventAbort;
BYTE* startAddress = (BYTE*)pCf->GetCodeInfo()->GetStartAddress();
iFilt = ::CallJitEHFilter(pCf, startAddress, EHClausePtr, nestingLevel, throwable);
GCPROTECT_END();
}
EX_CATCH
{
// We had an exception in filter invocation that remained unhandled.
// Sync managed exception state, for the managed thread, based upon the active exception tracker.
pThread->SyncManagedExceptionState(false);
//
// Swallow exception. Treat as exception continue search.
//
iFilt = EXCEPTION_CONTINUE_SEARCH;
}
EX_END_CATCH(SwallowAllExceptions)
return iFilt;
}
//******************************************************************************
// The stack walk callback for exception handling on x86.
// Returns one of:
// SWA_CONTINUE = 0, // continue walking
// SWA_ABORT = 1, // stop walking, early out in "failure case"
// SWA_FAILED = 2 // couldn't walk stack
StackWalkAction COMPlusThrowCallback( // SWA value
CrawlFrame *pCf, // Data from StackWalkFramesEx
ThrowCallbackType *pData) // Context data passed through from CPFH
{
// We don't want to use a runtime contract here since this codepath is used during
// the processing of a hard SO. Contracts use a significant amount of stack
// which we can't afford for those cases.
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
Frame *pFrame = pCf->GetFrame();
MethodDesc *pFunc = pCf->GetFunction();
#if defined(_DEBUG)
#define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>")
#else
#define METHODNAME(pFunc) "<n/a>"
#endif
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n",
pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame));
#undef METHODNAME
Thread *pThread = GetThread();
if (pFrame && pData->pTopFrame == pFrame)
/* Don't look past limiting frame if there is one */
return SWA_ABORT;
if (!pFunc)
return SWA_CONTINUE;
if (pThread->IsRudeAbortInitiated())
{
return SWA_CONTINUE;
}
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
_ASSERTE(!pData->bIsUnwind);
#ifdef _DEBUG
// It SHOULD be the case that any frames we consider live between this exception
// record and the previous one.
if (!pExInfo->m_pPrevNestedInfo) {
if (pData->pCurrentExceptionRecord) {
if (pFrame) _ASSERTE(pData->pCurrentExceptionRecord > pFrame);
// The FastNExport SEH handler can be in the frame we just unwound and as a result just out of range.
if (pCf->IsFrameless() && !FastNExportSEH((PEXCEPTION_REGISTRATION_RECORD)pData->pCurrentExceptionRecord))
{
_ASSERTE((ULONG_PTR)pData->pCurrentExceptionRecord >= GetRegdisplaySP(pCf->GetRegisterSet()));
}
}
if (pData->pPrevExceptionRecord) {
// FCALLS have an extra SEH record in debug because of the desctructor
// associated with ForbidGC checking. This is benign, so just ignore it.
if (pFrame) _ASSERTE(pData->pPrevExceptionRecord < pFrame || pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr());
if (pCf->IsFrameless()) _ASSERTE((ULONG_PTR)pData->pPrevExceptionRecord <= GetRegdisplaySP(pCf->GetRegisterSet()));
}
}
#endif
UINT_PTR currentIP = 0;
UINT_PTR currentSP = 0;
if (pCf->IsFrameless())
{
currentIP = (UINT_PTR)GetControlPC(pCf->GetRegisterSet());
currentSP = (UINT_PTR)GetRegdisplaySP(pCf->GetRegisterSet());
}
else if (InlinedCallFrame::FrameHasActiveCall(pFrame))
{
// don't have the IP, SP for native code
currentIP = 0;
currentSP = 0;
}
else
{
currentIP = (UINT_PTR)(pCf->GetFrame()->GetIP());
currentSP = 0; //Don't have an SP to get.
}
if (!pFunc->IsILStub())
{
// Append the current frame to the stack trace and save the save trace to the managed Exception object.
pExInfo->m_StackTraceInfo.AppendElement(pData->bAllowAllocMem, currentIP, currentSP, pFunc, pCf);
pExInfo->m_StackTraceInfo.SaveStackTrace(pData->bAllowAllocMem,
pThread->GetThrowableAsHandle(),
pData->bReplaceStack,
pData->bSkipLastElement);
}
else
{
LOG((LF_EH, LL_INFO1000, "COMPlusThrowCallback: Skipping AppendElement/SaveStackTrace for IL stub MD %p\n", pFunc));
}
// Fire an exception thrown ETW event when an exception occurs
ETW::ExceptionLog::ExceptionThrown(pCf, pData->bSkipLastElement, pData->bReplaceStack);
// Reset the flags. These flags are set only once before each stack walk done by LookForHandler(), and
// they apply only to the first frame we append to the stack trace. Subsequent frames are always appended.
if (pData->bReplaceStack)
{
pData->bReplaceStack = FALSE;
}
if (pData->bSkipLastElement)
{
pData->bSkipLastElement = FALSE;
}
// now we've got the stack trace, if we aren't allowed to catch this and we're first pass, return
if (pData->bDontCatch)
return SWA_CONTINUE;
if (!pCf->IsFrameless())
{
// @todo - remove this once SIS is fully enabled.
extern bool g_EnableSIS;
if (g_EnableSIS)
{
// For debugger, we may want to notify 1st chance exceptions if they're coming out of a stub.
// We recognize stubs as Frames with a M2U transition type. The debugger's stackwalker also
// recognizes these frames and publishes ICorDebugInternalFrames in the stackwalk. It's
// important to use pFrame as the stack address so that the Exception callback matches up
// w/ the ICorDebugInternlFrame stack range.
if (CORDebuggerAttached())
{
Frame * pFrameStub = pCf->GetFrame();
Frame::ETransitionType t = pFrameStub->GetTransitionType();
if (t == Frame::TT_M2U)
{
// Use address of the frame as the stack address.
currentSP = (SIZE_T) ((void*) pFrameStub);
currentIP = 0; // no IP.
EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP);
// Deliver the FirstChanceNotification after the debugger, if not already delivered.
if (!pExInfo->DeliveredFirstChanceNotification())
{
ExceptionNotifications::DeliverFirstChanceNotification();
}
}
}
}
return SWA_CONTINUE;
}
bool fIsILStub = pFunc->IsILStub();
bool fGiveDebuggerAndProfilerNotification = !fIsILStub;
BOOL fMethodCanHandleException = TRUE;
MethodDesc * pUserMDForILStub = NULL;
Frame * pILStubFrame = NULL;
if (fIsILStub)
pUserMDForILStub = GetUserMethodForILStub(pThread, currentSP, pFunc, &pILStubFrame);
// Let the profiler know that we are searching for a handler within this function instance
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionEnter(pFunc);
// The following debugger notification and AppDomain::FirstChanceNotification should be scoped together
// since the AD notification *must* follow immediately after the debugger's notification.
{
#ifdef DEBUGGING_SUPPORTED
//
// Go ahead and notify any debugger of this exception.
//
EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP);
if (CORDebuggerAttached() && pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
{
return SWA_ABORT;
}
#endif // DEBUGGING_SUPPORTED
// Attempt to deliver the first chance notification to the AD only *AFTER* the debugger
// has done that, provided we have not already done that.
if (!pExInfo->DeliveredFirstChanceNotification())
{
ExceptionNotifications::DeliverFirstChanceNotification();
}
}
IJitManager* pJitManager = pCf->GetJitManager();
_ASSERTE(pJitManager);
EH_CLAUSE_ENUMERATOR pEnumState;
unsigned EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState);
if (EHCount == 0)
{
// Inform the profiler that we're leaving, and what pass we're on
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
return SWA_CONTINUE;
}
TypeHandle thrownType = TypeHandle();
// if we are being called on an unwind for an exception that we did not try to catch, eg.
// an internal EE exception, then pThread->GetThrowable will be null
{
OBJECTREF throwable = pThread->GetThrowable();
if (throwable != NULL)
{
throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly());
thrownType = TypeHandle(throwable->GetMethodTable());
}
}
PREGDISPLAY regs = pCf->GetRegisterSet();
BYTE *pStack = (BYTE *) GetRegdisplaySP(regs);
#ifdef DEBUGGING_SUPPORTED
BYTE *pHandlerEBP = (BYTE *) GetRegdisplayFP(regs);
#endif
DWORD offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress();
STRESS_LOG1(LF_EH, LL_INFO10000, "COMPlusThrowCallback: offset is %d\n", offs);
EE_ILEXCEPTION_CLAUSE EHClause;
unsigned start_adjust, end_adjust;
start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted());
end_adjust = pCf->IsActiveFunc();
for(ULONG i=0; i < EHCount; i++)
{
pJitManager->GetNextEHClause(&pEnumState, &EHClause);
_ASSERTE(IsValidClause(&EHClause));
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: considering '%s' clause [%d,%d], ofs:%d\n",
(IsFault(&EHClause) ? "fault" : (
IsFinally(&EHClause) ? "finally" : (
IsFilterHandler(&EHClause) ? "filter" : (
IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
EHClause.TryStartPC,
EHClause.TryEndPC,
offs
);
// Checking the exception range is a bit tricky because
// on CPU faults (null pointer access, div 0, ..., the IP points
// to the faulting instruction, but on calls, the IP points
// to the next instruction.
// This means that we should not include the start point on calls
// as this would be a call just preceding the try block.
// Also, we should include the end point on calls, but not faults.
// If we're in the FILTER part of a filter clause, then we
// want to stop crawling. It's going to be caught in a
// EX_CATCH just above us. If not, the exception
if ( IsFilterHandler(&EHClause)
&& ( offs > EHClause.FilterOffset
|| (offs == EHClause.FilterOffset && !start_adjust) )
&& ( offs < EHClause.HandlerStartPC
|| (offs == EHClause.HandlerStartPC && !end_adjust) )) {
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n",
EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust);
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
return SWA_ABORT;
}
if ( (offs < EHClause.TryStartPC) ||
(offs > EHClause.TryEndPC) ||
(offs == EHClause.TryStartPC && start_adjust) ||
(offs == EHClause.TryEndPC && end_adjust))
continue;
BOOL typeMatch = FALSE;
BOOL isTypedHandler = IsTypedHandler(&EHClause);
if (isTypedHandler && !thrownType.IsNull())
{
if (EHClause.TypeHandle == (void*)(size_t)mdTypeRefNil)
{
// this is a catch(...)
typeMatch = TRUE;
}
else
{
TypeHandle exnType = pJitManager->ResolveEHClause(&EHClause,pCf);
// if doesn't have cached class then class wasn't loaded so couldn't have been thrown
typeMatch = !exnType.IsNull() && ExceptionIsOfRightType(exnType, thrownType);
}
}
// <TODO>@PERF: Is this too expensive? Consider storing the nesting level
// instead of the HandlerEndPC.</TODO>
// Determine the nesting level of EHClause. Just walk the table
// again, and find out how many handlers enclose it
DWORD nestingLevel = 0;
if (IsFaultOrFinally(&EHClause))
continue;
if (isTypedHandler)
{
LOG((LF_EH, LL_INFO100, "COMPlusThrowCallback: %s match for typed handler.\n", typeMatch?"Found":"Did not find"));
if (!typeMatch)
{
continue;
}
}
else
{
// Must be an exception filter (__except() part of __try{}__except(){}).
nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager,
pCf->GetMethodToken(),
EHClause.HandlerStartPC);
// We just need *any* address within the method. This will let the debugger
// resolve the EnC version of the method.
PCODE pMethodAddr = GetControlPC(regs);
if (fGiveDebuggerAndProfilerNotification)
EEToDebuggerExceptionInterfaceWrapper::ExceptionFilter(pFunc, pMethodAddr, EHClause.FilterOffset, pHandlerEBP);
UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress();
// save clause information in the exinfo
pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FILTER,
uStartAddress + EHClause.FilterOffset,
StackFrame((UINT_PTR)pHandlerEBP));
// Let the profiler know we are entering a filter
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterEnter(pFunc);
STRESS_LOG3(LF_EH, LL_INFO10, "COMPlusThrowCallback: calling filter code, EHClausePtr:%08x, Start:%08x, End:%08x\n",
&EHClause, EHClause.HandlerStartPC, EHClause.HandlerEndPC);
OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly());
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
int iFilt = COMPlusThrowCallbackHelper(pJitManager,
pCf,
pData,
&EHClause,
nestingLevel,
throwable,
pThread);
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
// Let the profiler know we are leaving a filter
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave();
pExInfo->m_EHClauseInfo.ResetInfo();
if (pThread->IsRudeAbortInitiated())
{
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
return SWA_CONTINUE;
}
// If this filter didn't want the exception, keep looking.
if (EXCEPTION_EXECUTE_HANDLER != iFilt)
continue;
}
// Record this location, to stop the unwind phase, later.
pData->pFunc = pFunc;
pData->dHandler = i;
pData->pStack = pStack;
// Notify the profiler that a catcher has been found
if (fGiveDebuggerAndProfilerNotification)
{
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchCatcherFound(pFunc);
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
}
#ifdef DEBUGGING_SUPPORTED
//
// Notify debugger that a catcher has been found.
//
if (fIsILStub)
{
EEToDebuggerExceptionInterfaceWrapper::NotifyOfCHFFilter(pExInfo->m_pExceptionPointers, pILStubFrame);
}
else
if (fGiveDebuggerAndProfilerNotification &&
CORDebuggerAttached() && !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
{
_ASSERTE(pData);
// We just need *any* address within the method. This will let the debugger
// resolve the EnC version of the method.
PCODE pMethodAddr = GetControlPC(regs);
EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedExceptionCatcherFound(pThread,
pData->pFunc, pMethodAddr,
(SIZE_T)pData->pStack,
&EHClause);
}
#endif // DEBUGGING_SUPPORTED
return SWA_ABORT;
}
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
return SWA_CONTINUE;
} // StackWalkAction COMPlusThrowCallback()
//==========================================================================
// COMPlusUnwindCallback
//==========================================================================
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning (disable : 4740) // There is inline asm code in this function, which disables
// global optimizations.
#pragma warning (disable : 4731)
#endif
StackWalkAction COMPlusUnwindCallback (CrawlFrame *pCf, ThrowCallbackType *pData)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE;
_ASSERTE(pData->bIsUnwind);
Frame *pFrame = pCf->GetFrame();
MethodDesc *pFunc = pCf->GetFunction();
#if defined(_DEBUG)
#define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>")
#else
#define METHODNAME(pFunc) "<n/a>"
#endif
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n",
pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame));
#undef METHODNAME
if (pFrame && pData->pTopFrame == pFrame)
/* Don't look past limiting frame if there is one */
return SWA_ABORT;
if (!pFunc)
return SWA_CONTINUE;
if (!pCf->IsFrameless())
return SWA_CONTINUE;
Thread *pThread = GetThread();
// If the thread is being RudeAbort, we will not run any finally
if (pThread->IsRudeAbortInitiated())
{
return SWA_CONTINUE;
}
IJitManager* pJitManager = pCf->GetJitManager();
_ASSERTE(pJitManager);
ExInfo *pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
PREGDISPLAY regs = pCf->GetRegisterSet();
BYTE *pStack = (BYTE *) GetRegdisplaySP(regs);
TypeHandle thrownType = TypeHandle();
#ifdef DEBUGGING_SUPPORTED
LOG((LF_EH, LL_INFO1000, "COMPlusUnwindCallback: Intercept %d, pData->pFunc 0x%X, pFunc 0x%X, pData->pStack 0x%X, pStack 0x%X\n",
pExInfo->m_ExceptionFlags.DebuggerInterceptInfo(),
pData->pFunc,
pFunc,
pData->pStack,
pStack));
//
// If the debugger wants to intercept this exception here, go do that.
//
if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo() && (pData->pFunc == pFunc) && (pData->pStack == pStack))
{
goto LDoDebuggerIntercept;
}
#endif
bool fGiveDebuggerAndProfilerNotification;
fGiveDebuggerAndProfilerNotification = !pFunc->IsILStub();
// Notify the profiler of the function we're dealing with in the unwind phase
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionEnter(pFunc);
EH_CLAUSE_ENUMERATOR pEnumState;
unsigned EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState);
if (EHCount == 0)
{
// Inform the profiler that we're leaving, and what pass we're on
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc);
return SWA_CONTINUE;
}
// if we are being called on an unwind for an exception that we did not try to catch, eg.
// an internal EE exception, then pThread->GetThrowable will be null
{
OBJECTREF throwable = pThread->GetThrowable();
if (throwable != NULL)
{
throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly());
thrownType = TypeHandle(throwable->GetMethodTable());
}
}
#ifdef DEBUGGING_SUPPORTED
BYTE *pHandlerEBP;
pHandlerEBP = (BYTE *) GetRegdisplayFP(regs);
#endif
DWORD offs;
offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress();
LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: current EIP offset in method 0x%x, \n", offs));
EE_ILEXCEPTION_CLAUSE EHClause;
unsigned start_adjust, end_adjust;
start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted());
end_adjust = pCf->IsActiveFunc();
for(ULONG i=0; i < EHCount; i++)
{
pJitManager->GetNextEHClause(&pEnumState, &EHClause);
_ASSERTE(IsValidClause(&EHClause));
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: considering '%s' clause [%d,%d], offs:%d\n",
(IsFault(&EHClause) ? "fault" : (
IsFinally(&EHClause) ? "finally" : (
IsFilterHandler(&EHClause) ? "filter" : (
IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
EHClause.TryStartPC,
EHClause.TryEndPC,
offs
);
// Checking the exception range is a bit tricky because
// on CPU faults (null pointer access, div 0, ..., the IP points
// to the faulting instruction, but on calls, the IP points
// to the next instruction.
// This means that we should not include the start point on calls
// as this would be a call just preceding the try block.
// Also, we should include the end point on calls, but not faults.
if ( IsFilterHandler(&EHClause)
&& ( offs > EHClause.FilterOffset
|| (offs == EHClause.FilterOffset && !start_adjust) )
&& ( offs < EHClause.HandlerStartPC
|| (offs == EHClause.HandlerStartPC && !end_adjust) )
) {
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n",
EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust);
// Make the filter as done. See comment in CallJitEHFilter
// on why we have to do it here.
Frame* pFilterFrame = pThread->GetFrame();
_ASSERTE(pFilterFrame->GetVTablePtr() == ExceptionFilterFrame::GetMethodFrameVPtr());
((ExceptionFilterFrame*)pFilterFrame)->SetFilterDone();
// Inform the profiler that we're leaving, and what pass we're on
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc);
return SWA_ABORT;
}
if ( (offs < EHClause.TryStartPC) ||
(offs > EHClause.TryEndPC) ||
(offs == EHClause.TryStartPC && start_adjust) ||
(offs == EHClause.TryEndPC && end_adjust))
continue;
// <TODO>@PERF : Is this too expensive? Consider storing the nesting level
// instead of the HandlerEndPC.</TODO>
// Determine the nesting level of EHClause. Just walk the table
// again, and find out how many handlers enclose it
DWORD nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager,
pCf->GetMethodToken(),
EHClause.HandlerStartPC);
// We just need *any* address within the method. This will let the debugger
// resolve the EnC version of the method.
PCODE pMethodAddr = GetControlPC(regs);
UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress();
if (IsFaultOrFinally(&EHClause))
{
if (fGiveDebuggerAndProfilerNotification)
EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP);
pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FINALLY,
uStartAddress + EHClause.HandlerStartPC,
StackFrame((UINT_PTR)pHandlerEBP));
// Notify the profiler that we are about to execute the finally code
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyEnter(pFunc);
LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally clause [%d,%d] - call\n", EHClause.TryStartPC, EHClause.TryEndPC));
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
::CallJitEHFinally(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel);
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally - returned\n"));
// Notify the profiler that we are done with the finally code
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyLeave();
pExInfo->m_EHClauseInfo.ResetInfo();
continue;
}
// Current is not a finally, check if it's the catching handler (or filter).
if (pData->pFunc != pFunc || (ULONG)(pData->dHandler) != i || pData->pStack != pStack)
{
continue;
}
#ifdef _DEBUG
gLastResumedExceptionFunc = pCf->GetFunction();
gLastResumedExceptionHandler = i;
#endif
// save clause information in the exinfo
pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_CATCH,
uStartAddress + EHClause.HandlerStartPC,
StackFrame((UINT_PTR)pHandlerEBP));
// Notify the profiler that we are about to resume at the catcher.
if (fGiveDebuggerAndProfilerNotification)
{
DACNotify::DoExceptionCatcherEnterNotification(pFunc, EHClause.HandlerStartPC);
EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherEnter(pThread, pFunc);
EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP);
}
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: offset 0x%x matches clause [0x%x, 0x%x) matches in method %pM\n",
offs, EHClause.TryStartPC, EHClause.TryEndPC, pFunc);
// ResumeAtJitEH will set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = TRUE; at the appropriate time
::ResumeAtJitEH(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel, pThread, pData->bUnwindStack);
//UNREACHABLE_MSG("ResumeAtJitEH shouldn't have returned!");
// we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here,
// that happens when the catch clause calls back to COMPlusEndCatch
}
STRESS_LOG1(LF_EH, LL_INFO100, "COMPlusUnwindCallback: no handler found in method %pM\n", pFunc);
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc);
return SWA_CONTINUE;
#ifdef DEBUGGING_SUPPORTED
LDoDebuggerIntercept:
STRESS_LOG1(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Intercepting in method %pM\n", pFunc);
//
// Setup up the easy parts of the context to restart at.
//
EHContext context;
//
// Note: EAX ECX EDX are scratch
//
context.Esp = (DWORD)(size_t)(GetRegdisplaySP(regs));
context.Ebx = *regs->pEbx;
context.Esi = *regs->pEsi;
context.Edi = *regs->pEdi;
context.Ebp = *regs->pEbp;
//
// Set scratch registers to 0 to avoid reporting incorrect values to GC in case of debugger changing the IP
// in the middle of a scratch register lifetime (see Dev10 754922)
//
context.Eax = 0;
context.Ecx = 0;
context.Edx = 0;
//
// Ok, now set the target Eip to the address the debugger requested.
//
ULONG_PTR nativeOffset;
pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL, NULL, NULL, NULL, &nativeOffset, NULL);
context.Eip = GetControlPC(regs) - (pCf->GetRelOffset() - nativeOffset);
//
// Finally we need to get the correct Esp for this nested level
//
context.Esp = pCf->GetCodeManager()->GetAmbientSP(regs,
pCf->GetCodeInfo(),
nativeOffset,
pData->dHandler,
pCf->GetCodeManState()
);
//
// In case we see unknown FS:[0] handlers we delay the interception point until we reach the handler that protects the interception point.
// This way we have both FS:[0] handlers being poped up by RtlUnwind and managed capital F Frames being unwinded by managed stackwalker.
//
BOOL fCheckForUnknownHandler = TRUE;
if (PopNestedExceptionRecords((LPVOID)(size_t)context.Esp, fCheckForUnknownHandler))
{
// Let ClrDebuggerDoUnwindAndIntercept RtlUnwind continue to unwind frames until we reach the handler protected by COMPlusNestedExceptionHandler.
pExInfo->m_InterceptionContext = context;
pExInfo->m_ValidInterceptionContext = TRUE;
STRESS_LOG0(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Skip interception until unwinding reaches the actual handler protected by COMPlusNestedExceptionHandler\n");
}
else
{
//
// Pop off all the Exception information up to this point in the stack
//
UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context);
}
return SWA_ABORT;
#endif // DEBUGGING_SUPPORTED
} // StackWalkAction COMPlusUnwindCallback ()
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning (disable : 4740) // There is inline asm code in this function, which disables
// global optimizations.
#pragma warning (disable : 4731)
#endif
void ResumeAtJitEH(CrawlFrame* pCf,
BYTE* startPC,
EE_ILEXCEPTION_CLAUSE *EHClausePtr,
DWORD nestingLevel,
Thread *pThread,
BOOL unwindStack)
{
// No dynamic contract here because this function doesn't return and destructors wouldn't be executed
WRAPPER_NO_CONTRACT;
EHContext context;
context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet());
size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler
size_t * pHandlerEnd = NULL;
OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly());
pCf->GetCodeManager()->FixContext(ICodeManager::CATCH_CONTEXT,
&context,
pCf->GetCodeInfo(),
EHClausePtr->HandlerStartPC,
nestingLevel,
throwable,
pCf->GetCodeManState(),
&pShadowSP,
&pHandlerEnd);
if (pHandlerEnd)
{
*pHandlerEnd = EHClausePtr->HandlerEndPC;
}
MethodDesc* pMethodDesc = pCf->GetCodeInfo()->GetMethodDesc();
TADDR startAddress = pCf->GetCodeInfo()->GetStartAddress();
if (InlinedCallFrame::FrameHasActiveCall(pThread->m_pFrame))
{
// When unwinding an exception in ReadyToRun, the JIT_PInvokeEnd helper which unlinks the ICF from
// the thread will be skipped. This is because unlike jitted code, each pinvoke is wrapped by calls
// to the JIT_PInvokeBegin and JIT_PInvokeEnd helpers, which push and pop the ICF on the thread. The
// ICF is not linked at the method prolog and unlinked at the epilog when running R2R code. Since the
// JIT_PInvokeEnd helper will be skipped, we need to unlink the ICF here. If the executing method
// has another pinvoke, it will re-link the ICF again when the JIT_PInvokeBegin helper is called.
// Check that the InlinedCallFrame is in the method with the exception handler. There can be other
// InlinedCallFrame somewhere up the call chain that is not related to the current exception
// handling.
#ifdef DEBUG
TADDR handlerFrameSP = pCf->GetRegisterSet()->SP;
#endif // DEBUG
// Find the ESP of the caller of the method with the exception handler.
bool unwindSuccess = pCf->GetCodeManager()->UnwindStackFrame(pCf->GetRegisterSet(),
pCf->GetCodeInfo(),
pCf->GetCodeManagerFlags(),
pCf->GetCodeManState(),
NULL /* StackwalkCacheUnwindInfo* */);
_ASSERTE(unwindSuccess);
if (((TADDR)pThread->m_pFrame < pCf->GetRegisterSet()->SP) && ExecutionManager::IsReadyToRunCode(((InlinedCallFrame*)pThread->m_pFrame)->m_pCallerReturnAddress))
{
_ASSERTE((TADDR)pThread->m_pFrame >= handlerFrameSP);
pThread->m_pFrame->Pop(pThread);
}
}
// save esp so that endcatch can restore it (it always restores, so want correct value)
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
pExInfo->m_dEsp = (LPVOID)context.GetSP();
LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: current m_dEsp set to %p\n", context.GetSP()));
PVOID dEsp = GetCurrentSP();
if (!unwindStack)
{
// If we don't want to unwind the stack, then the guard page had better not be gone!
_ASSERTE(pThread->DetermineIfGuardPagePresent());
// so down below won't really update esp
context.SetSP(dEsp);
pExInfo->m_pShadowSP = pShadowSP; // so that endcatch can zero it back
if (pShadowSP)
{
*pShadowSP = (size_t)dEsp;
}
}
else
{
// so shadow SP has the real SP as we are going to unwind the stack
dEsp = (LPVOID)context.GetSP();
// BEGIN: pExInfo->UnwindExInfo(dEsp);
ExInfo *pPrevNestedInfo = pExInfo->m_pPrevNestedInfo;
while (pPrevNestedInfo && pPrevNestedInfo->m_StackAddress < dEsp)
{
LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: popping nested ExInfo at 0x%p\n", pPrevNestedInfo->m_StackAddress));
pPrevNestedInfo->DestroyExceptionHandle();
pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace();
#ifdef DEBUGGING_SUPPORTED
if (g_pDebugInterface != NULL)
{
g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext());
}
#endif // DEBUGGING_SUPPORTED
pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo;
}
pExInfo->m_pPrevNestedInfo = pPrevNestedInfo;
_ASSERTE(pExInfo->m_pPrevNestedInfo == 0 || pExInfo->m_pPrevNestedInfo->m_StackAddress >= dEsp);
// Before we unwind the SEH records, get the Frame from the top-most nested exception record.
Frame* pNestedFrame = GetCurrFrame(FindNestedEstablisherFrame(GetCurrentSEHRecord()));
PopNestedExceptionRecords((LPVOID)(size_t)dEsp);
EXCEPTION_REGISTRATION_RECORD* pNewBottomMostHandler = GetCurrentSEHRecord();
pExInfo->m_pShadowSP = pShadowSP;
// The context and exception record are no longer any good.
_ASSERTE(pExInfo->m_pContext < dEsp); // It must be off the top of the stack.
pExInfo->m_pContext = 0; // Whack it.
pExInfo->m_pExceptionRecord = 0;
pExInfo->m_pExceptionPointers = 0;
// We're going to put one nested record back on the stack before we resume. This is
// where it goes.
NestedHandlerExRecord *pNestedHandlerExRecord = (NestedHandlerExRecord*)((BYTE*)dEsp - ALIGN_UP(sizeof(NestedHandlerExRecord), STACK_ALIGN_SIZE));
// The point of no return. The next statement starts scribbling on the stack. It's
// deep enough that we won't hit our own locals. (That's important, 'cuz we're still
// using them.)
//
_ASSERTE(dEsp > &pCf);
pNestedHandlerExRecord->m_handlerInfo.m_hThrowable=NULL; // This is random memory. Handle
// must be initialized to null before
// calling Init(), as Init() will try
// to free any old handle.
pNestedHandlerExRecord->Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, pNestedFrame);
INSTALL_EXCEPTION_HANDLING_RECORD(&(pNestedHandlerExRecord->m_ExReg));
context.SetSP(pNestedHandlerExRecord);
// We might have moved the bottommost handler. The nested record itself is never
// the bottom most handler -- it's pushed after the fact. So we have to make the
// bottom-most handler the one BEFORE the nested record.
if (pExInfo->m_pBottomMostHandler < pNewBottomMostHandler)
{
STRESS_LOG3(LF_EH, LL_INFO10000, "ResumeAtJitEH: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
pExInfo, pExInfo->m_pBottomMostHandler, pNewBottomMostHandler);
pExInfo->m_pBottomMostHandler = pNewBottomMostHandler;
}
if (pShadowSP)
{
*pShadowSP = context.GetSP();
}
}
STRESS_LOG3(LF_EH, LL_INFO100, "ResumeAtJitEH: resuming at EIP = %p ESP = %p EBP = %p\n",
context.Eip, context.GetSP(), context.GetFP());
#ifdef STACK_GUARDS_DEBUG
// We are transitioning back to managed code, so ensure that we are in
// SO-tolerant mode before we do so.
RestoreSOToleranceState();
#endif
// we want this to happen as late as possible but certainly after the notification
// that the handle for the current ExInfo has been freed has been delivered
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
ETW::ExceptionLog::ExceptionCatchBegin(pMethodDesc, (PVOID)startAddress);
ResumeAtJitEHHelper(&context);
UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!");
// we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here,
// that happens when the catch clause calls back to COMPlusEndCatch
// we don't return to this point so it would be moot (see unreachable_msg above)
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
// Must be in a separate function because INSTALL_COMPLUS_EXCEPTION_HANDLER has a filter
int CallJitEHFilterWorker(size_t *pShadowSP, EHContext *pContext)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
int retVal = EXCEPTION_CONTINUE_SEARCH;
BEGIN_CALL_TO_MANAGED();
retVal = CallJitEHFilterHelper(pShadowSP, pContext);
END_CALL_TO_MANAGED();
return retVal;
}
int CallJitEHFilter(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, OBJECTREF thrownObj)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
int retVal = EXCEPTION_CONTINUE_SEARCH;
size_t * pShadowSP = NULL;
EHContext context;
context.Setup(PCODE(startPC + EHClausePtr->FilterOffset), pCf->GetRegisterSet());
size_t * pEndFilter = NULL; // Write
pCf->GetCodeManager()->FixContext(ICodeManager::FILTER_CONTEXT, &context, pCf->GetCodeInfo(),
EHClausePtr->FilterOffset, nestingLevel, thrownObj, pCf->GetCodeManState(),
&pShadowSP, &pEndFilter);
// End of the filter is the same as start of handler
if (pEndFilter)
{
*pEndFilter = EHClausePtr->HandlerStartPC;
}
// ExceptionFilterFrame serves two purposes:
//
// 1. It serves as a frame that stops the managed search for handler
// if we fault in the filter. ThrowCallbackType.pTopFrame is going point
// to this frame during search for exception handler inside filter.
// The search for handler needs a frame to stop. If we had no frame here,
// the exceptions in filters would not be swallowed correctly since we would
// walk past the EX_TRY/EX_CATCH block in COMPlusThrowCallbackHelper.
//
// 2. It allows setting of SHADOW_SP_FILTER_DONE flag in UnwindFrames()
// if we fault in the filter. We have to set this flag together with unwinding
// of the filter frame. Using a regular C++ holder to clear this flag here would cause
// GC holes. The stack would be in inconsistent state when we trigger gc just before
// returning from UnwindFrames.
FrameWithCookie<ExceptionFilterFrame> exceptionFilterFrame(pShadowSP);
ETW::ExceptionLog::ExceptionFilterBegin(pCf->GetCodeInfo()->GetMethodDesc(), (PVOID)pCf->GetCodeInfo()->GetStartAddress());
retVal = CallJitEHFilterWorker(pShadowSP, &context);
ETW::ExceptionLog::ExceptionFilterEnd();
exceptionFilterFrame.Pop();
return retVal;
}
void CallJitEHFinally(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel)
{
WRAPPER_NO_CONTRACT;
EHContext context;
context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet());
size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler
size_t * pFinallyEnd = NULL;
pCf->GetCodeManager()->FixContext(
ICodeManager::FINALLY_CONTEXT, &context, pCf->GetCodeInfo(),
EHClausePtr->HandlerStartPC, nestingLevel, ObjectToOBJECTREF((Object *) NULL), pCf->GetCodeManState(),
&pShadowSP, &pFinallyEnd);
if (pFinallyEnd)
{
*pFinallyEnd = EHClausePtr->HandlerEndPC;
}
ETW::ExceptionLog::ExceptionFinallyBegin(pCf->GetCodeInfo()->GetMethodDesc(), (PVOID)pCf->GetCodeInfo()->GetStartAddress());
CallJitEHFinallyHelper(pShadowSP, &context);
ETW::ExceptionLog::ExceptionFinallyEnd();
//
// Update the registers using new context
//
// This is necessary to reflect GC pointer changes during the middle of a unwind inside a
// finally clause, because:
// 1. GC won't see the part of stack inside try (which has thrown an exception) that is already
// unwinded and thus GC won't update GC pointers for this portion of the stack, but rather the
// call stack in finally.
// 2. upon return of finally, the unwind process continues and unwinds stack based on the part
// of stack inside try and won't see the updated values in finally.
// As a result, we need to manually update the context using register values upon return of finally
//
// Note that we only update the registers for finally clause because
// 1. For filter handlers, stack walker is able to see the whole stack (including the try part)
// with the help of ExceptionFilterFrame as filter handlers are called in first pass
// 2. For catch handlers, the current unwinding is already finished
//
context.UpdateFrame(pCf->GetRegisterSet());
// This does not need to be guarded by a holder because the frame is dead if an exception gets thrown. Filters are different
// since they are run in the first pass, so we must update the shadowSP reset in CallJitEHFilter.
if (pShadowSP) {
*pShadowSP = 0; // reset the shadowSP to 0
}
}
#if defined(_MSC_VER)
#pragma warning (default : 4731)
#endif
//=====================================================================
// *********************************************************************
BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
{
LIMITED_METHOD_CONTRACT;
return ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandler || (LPVOID)pEHR->Handler == (LPVOID)COMPlusNestedExceptionHandler);
}
//
//-------------------------------------------------------------------------
// This is installed when we call COMPlusFrameHandler to provide a bound to
// determine when are within a nested exception
//-------------------------------------------------------------------------
EXCEPTION_HANDLER_IMPL(COMPlusNestedExceptionHandler)
{
WRAPPER_NO_CONTRACT;
if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
{
LOG((LF_EH, LL_INFO100, " COMPlusNestedHandler(unwind) with %x at %x\n", pExceptionRecord->ExceptionCode,
pContext ? GetIP(pContext) : 0));
// We're unwinding past a nested exception record, which means that we've thrown
// a new exception out of a region in which we're handling a previous one. The
// previous exception is overridden -- and needs to be unwound.
// The preceding is ALMOST true. There is one more case, where we use setjmp/longjmp
// from withing a nested handler. We won't have a nested exception in that case -- just
// the unwind.
Thread* pThread = GetThread();
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
ExInfo* pPrevNestedInfo = pExInfo->m_pPrevNestedInfo;
if (pPrevNestedInfo == &((NestedHandlerExRecord*)pEstablisherFrame)->m_handlerInfo)
{
_ASSERTE(pPrevNestedInfo);
LOG((LF_EH, LL_INFO100, "COMPlusNestedExceptionHandler: PopExInfo(): popping nested ExInfo at 0x%p\n", pPrevNestedInfo));
pPrevNestedInfo->DestroyExceptionHandle();
pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace();
#ifdef DEBUGGING_SUPPORTED
if (g_pDebugInterface != NULL)
{
g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext());
}
#endif // DEBUGGING_SUPPORTED
pExInfo->m_pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo;
} else {
// The whacky setjmp/longjmp case. Nothing to do.
}
} else {
LOG((LF_EH, LL_INFO100, " InCOMPlusNestedHandler with %x at %x\n", pExceptionRecord->ExceptionCode,
pContext ? GetIP(pContext) : 0));
}
// There is a nasty "gotcha" in the way exception unwinding, finally's, and nested exceptions
// interact. Here's the scenario ... it involves two exceptions, one normal one, and one
// raised in a finally.
//
// The first exception occurs, and is caught by some handler way up the stack. That handler
// calls RtlUnwind -- and handlers that didn't catch this first exception are called again, with
// the UNWIND flag set. If, one of the handlers throws an exception during
// unwind (like, a throw from a finally) -- then that same handler is not called during
// the unwind pass of the second exception. [ASIDE: It is called on first-pass.]
//
// What that means is -- the COMPlusExceptionHandler, can't count on unwinding itself correctly
// if an exception is thrown from a finally. Instead, it relies on the NestedExceptionHandler
// that it pushes for this.
//
EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
LOG((LF_EH, LL_INFO100, "Leaving COMPlusNestedExceptionHandler with %d\n", retval));
return retval;
}
EXCEPTION_REGISTRATION_RECORD *FindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
{
LIMITED_METHOD_CONTRACT;
while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) {
pEstablisherFrame = pEstablisherFrame->Next;
_ASSERTE(pEstablisherFrame != EXCEPTION_CHAIN_END); // should always find one
}
return pEstablisherFrame;
}
EXCEPTION_HANDLER_IMPL(FastNExportExceptHandler)
{
WRAPPER_NO_CONTRACT;
// Most of our logic is in commin with COMPlusFrameHandler.
EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
#ifdef _DEBUG
// If the exception is escaping the last CLR personality routine on the stack,
// then state a flag on the thread to indicate so.
if (retval == ExceptionContinueSearch)
{
SetReversePInvokeEscapingUnhandledExceptionStatus(IS_UNWINDING(pExceptionRecord->ExceptionFlags), pEstablisherFrame);
}
#endif // _DEBUG
return retval;
}
#ifdef FEATURE_COMINTEROP
// The reverse COM interop path needs to be sure to pop the ComMethodFrame that is pushed, but we do not want
// to have an additional FS:0 handler between the COM callsite and the call into managed. So we push this
// FS:0 handler, which will defer to the usual COMPlusFrameHandler and then perform the cleanup of the
// ComMethodFrame, if needed.
EXCEPTION_HANDLER_IMPL(COMPlusFrameHandlerRevCom)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
// Defer to COMPlusFrameHandler
EXCEPTION_DISPOSITION result = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
{
// Do cleanup as needed
ComMethodFrame::DoSecondPassHandlerCleanup(GetCurrFrame(pEstablisherFrame));
}
return result;
}
#endif // FEATURE_COMINTEROP
#endif // !DACCESS_COMPILE
#endif // !FEATURE_EH_FUNCLETS
PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext)
{
LIMITED_METHOD_DAC_CONTRACT;
UINT_PTR stackSlot = pContext->Ebp + REDIRECTSTUB_EBP_OFFSET_CONTEXT;
PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot);
return *ppContext;
}
#ifndef DACCESS_COMPILE
LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
{
#ifndef FEATURE_EH_FUNCLETS
WRAPPER_NO_CONTRACT;
STATIC_CONTRACT_ENTRY_POINT;
LONG result = EXCEPTION_CONTINUE_SEARCH;
// This function can be called during the handling of a SO
//BEGIN_ENTRYPOINT_VOIDRET;
result = CLRVectoredExceptionHandler(pExceptionInfo);
if (EXCEPTION_EXECUTE_HANDLER == result)
{
result = EXCEPTION_CONTINUE_SEARCH;
}
//END_ENTRYPOINT_VOIDRET;
return result;
#else // !FEATURE_EH_FUNCLETS
return EXCEPTION_CONTINUE_SEARCH;
#endif // !FEATURE_EH_FUNCLETS
}
// Returns TRUE if caller should resume execution.
BOOL
AdjustContextForVirtualStub(
EXCEPTION_RECORD *pExceptionRecord,
CONTEXT *pContext)
{
LIMITED_METHOD_CONTRACT;
Thread * pThread = GetThreadNULLOk();
// We may not have a managed thread object. Example is an AV on the helper thread.
// (perhaps during StubManager::IsStub)
if (pThread == NULL)
{
return FALSE;
}
PCODE f_IP = GetIP(pContext);
VirtualCallStubManager::StubKind sk;
VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(f_IP, &sk);
if (sk == VirtualCallStubManager::SK_DISPATCH)
{
if (*PTR_WORD(f_IP) != X86_INSTR_CMP_IND_ECX_IMM32)
{
_ASSERTE(!"AV in DispatchStub at unknown instruction");
return FALSE;
}
}
else
if (sk == VirtualCallStubManager::SK_RESOLVE)
{
if (*PTR_WORD(f_IP) != X86_INSTR_MOV_EAX_ECX_IND)
{
_ASSERTE(!"AV in ResolveStub at unknown instruction");
return FALSE;
}
SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(GetSP(pContext)) + sizeof(void*))); // rollback push eax
}
else
{
return FALSE;
}
PCODE callsite = *dac_cast<PTR_PCODE>(GetSP(pContext));
if (pExceptionRecord != NULL)
{
pExceptionRecord->ExceptionAddress = (PVOID)callsite;
}
SetIP(pContext, callsite);
#if defined(GCCOVER_TOLERATE_SPURIOUS_AV)
// Modify LastAVAddress saved in thread to distinguish between fake & real AV
// See comments in IsGcMarker in file excep.cpp for more details
pThread->SetLastAVAddress((LPVOID)GetIP(pContext));
#endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV)
// put ESP back to what it was before the call.
TADDR sp = GetSP(pContext) + sizeof(void*);
#ifndef UNIX_X86_ABI
// set the ESP to what it would be after the call (remove pushed arguments)
size_t stackArgumentsSize;
if (sk == VirtualCallStubManager::SK_DISPATCH)
{
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
DispatchHolder *holder = DispatchHolder::FromDispatchEntry(f_IP);
MethodTable *pMT = (MethodTable*)holder->stub()->expectedMT();
DispatchToken token(VirtualCallStubManager::GetTokenFromStubQuick(pMgr, f_IP, sk));
MethodDesc* pMD = VirtualCallStubManager::GetRepresentativeMethodDescFromToken(token, pMT);
stackArgumentsSize = pMD->SizeOfArgStack();
}
else
{
// Compute the stub entry address from the address of failure (location of dereferencing of "this" pointer)
ResolveHolder *holder = ResolveHolder::FromResolveEntry(f_IP - ResolveStub::offsetOfThisDeref());
stackArgumentsSize = holder->stub()->stackArgumentsSize();
}
sp += stackArgumentsSize;
#endif // UNIX_X86_ABI
SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(sp)));
return TRUE;
}
#endif // !DACCESS_COMPILE
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
/* EXCEP.CPP:
*
*/
#include "common.h"
#include "frames.h"
#include "excep.h"
#include "object.h"
#include "field.h"
#include "dbginterface.h"
#include "cgensys.h"
#include "comutilnative.h"
#include "sigformat.h"
#include "siginfo.hpp"
#include "gcheaputilities.h"
#include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow
#include "eventtrace.h"
#include "eetoprofinterfacewrapper.inl"
#include "eedbginterfaceimpl.inl"
#include "dllimportcallback.h"
#include "threads.h"
#include "eeconfig.h"
#include "vars.hpp"
#include "generics.h"
#include "asmconstants.h"
#include "virtualcallstub.h"
#ifndef FEATURE_EH_FUNCLETS
MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut);
#if !defined(DACCESS_COMPILE)
#define FORMAT_MESSAGE_BUFFER_LENGTH 1024
BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD*);
PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD*);
extern "C" {
// in asmhelpers.asm:
VOID STDCALL ResumeAtJitEHHelper(EHContext *pContext);
int STDCALL CallJitEHFilterHelper(size_t *pShadowSP, EHContext *pContext);
VOID STDCALL CallJitEHFinallyHelper(size_t *pShadowSP, EHContext *pContext);
typedef void (*RtlUnwindCallbackType)(void);
BOOL CallRtlUnwind(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
RtlUnwindCallbackType callback,
EXCEPTION_RECORD *pExceptionRecord,
void *retval);
BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
RtlUnwindCallbackType callback,
EXCEPTION_RECORD *pExceptionRecord,
void *retval);
}
static inline BOOL
CPFH_ShouldUnwindStack(const EXCEPTION_RECORD * pCER) {
LIMITED_METHOD_CONTRACT;
_ASSERTE(pCER != NULL);
// We can only unwind those exceptions whose context/record we don't need for a
// rethrow. This is complus, and stack overflow. For all the others, we
// need to keep the context around for a rethrow, which means they can't
// be unwound.
if (IsComPlusException(pCER) || pCER->ExceptionCode == STATUS_STACK_OVERFLOW)
return TRUE;
else
return FALSE;
}
static inline BOOL IsComPlusNestedExceptionRecord(EXCEPTION_REGISTRATION_RECORD* pEHR)
{
LIMITED_METHOD_CONTRACT;
if (pEHR->Handler == (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler)
return TRUE;
return FALSE;
}
EXCEPTION_REGISTRATION_RECORD *TryFindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
{
LIMITED_METHOD_CONTRACT;
while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) {
pEstablisherFrame = pEstablisherFrame->Next;
if (pEstablisherFrame == EXCEPTION_CHAIN_END) return 0;
}
return pEstablisherFrame;
}
#ifdef _DEBUG
// stores last handler we went to in case we didn't get an endcatch and stack is
// corrupted we can figure out who did it.
static MethodDesc *gLastResumedExceptionFunc = NULL;
static DWORD gLastResumedExceptionHandler = 0;
#endif
//---------------------------------------------------------------------
// void RtlUnwindCallback()
// call back function after global unwind, rtlunwind calls this function
//---------------------------------------------------------------------
static void RtlUnwindCallback()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(!"Should never get here");
}
BOOL FastNExportSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
{
LIMITED_METHOD_CONTRACT;
if ((LPVOID)pEHR->Handler == (LPVOID)FastNExportExceptHandler)
return TRUE;
return FALSE;
}
BOOL ReverseCOMSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
{
LIMITED_METHOD_CONTRACT;
#ifdef FEATURE_COMINTEROP
if ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandlerRevCom)
return TRUE;
#endif // FEATURE_COMINTEROP
return FALSE;
}
//
// Returns true if the given SEH handler is one of our SEH handlers that is responsible for managing exceptions in
// regions of managed code.
//
BOOL IsUnmanagedToManagedSEHHandler(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
{
WRAPPER_NO_CONTRACT;
//
// ComPlusFrameSEH() is for COMPlusFrameHandler & COMPlusNestedExceptionHandler.
// FastNExportSEH() is for FastNExportExceptHandler.
//
return (ComPlusFrameSEH(pEstablisherFrame) || FastNExportSEH(pEstablisherFrame) || ReverseCOMSEH(pEstablisherFrame));
}
Frame *GetCurrFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
{
Frame *pFrame;
WRAPPER_NO_CONTRACT;
_ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame));
pFrame = ((FrameHandlerExRecord *)pEstablisherFrame)->GetCurrFrame();
// Assert that the exception frame is on the thread or that the exception frame is the top frame.
_ASSERTE(GetThreadNULLOk() == NULL || GetThread()->GetFrame() == (Frame*)-1 || GetThread()->GetFrame() <= pFrame);
return pFrame;
}
EXCEPTION_REGISTRATION_RECORD* GetNextCOMPlusSEHRecord(EXCEPTION_REGISTRATION_RECORD* pRec) {
WRAPPER_NO_CONTRACT;
if (pRec == EXCEPTION_CHAIN_END)
return EXCEPTION_CHAIN_END;
do {
_ASSERTE(pRec != 0);
pRec = pRec->Next;
} while (pRec != EXCEPTION_CHAIN_END && !IsUnmanagedToManagedSEHHandler(pRec));
_ASSERTE(pRec == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pRec));
return pRec;
}
/*
* GetClrSEHRecordServicingStackPointer
*
* This function searchs all the Frame SEH records, and finds the one that is
* currently signed up to do all exception handling for the given stack pointer
* on the given thread.
*
* Parameters:
* pThread - The thread to search on.
* pStackPointer - The stack location that we are finding the Frame SEH Record for.
*
* Returns
* A pointer to the SEH record, or EXCEPTION_CHAIN_END if none was found.
*
*/
PEXCEPTION_REGISTRATION_RECORD
GetClrSEHRecordServicingStackPointer(Thread *pThread,
void *pStackPointer)
{
ThreadExceptionState* pExState = pThread->GetExceptionState();
//
// We can only do this if there is a context in the pExInfo. There are cases (most notably the
// EEPolicy::HandleFatalError case) where we don't have that. In these cases we will return
// no enclosing handler since we cannot accurately determine the FS:0 entry which services
// this stack address.
//
// The side effect of this is that for these cases, the debugger cannot intercept
// the exception
//
CONTEXT* pContextRecord = pExState->GetContextRecord();
if (pContextRecord == NULL)
{
return EXCEPTION_CHAIN_END;
}
void *exceptionSP = dac_cast<PTR_VOID>(GetSP(pContextRecord));
//
// Now set the establishing frame. What this means in English is that we need to find
// the fs:0 entry that handles exceptions for the place on the stack given in stackPointer.
//
PEXCEPTION_REGISTRATION_RECORD pSEHRecord = GetFirstCOMPlusSEHRecord(pThread);
while (pSEHRecord != EXCEPTION_CHAIN_END)
{
//
// Skip any SEHRecord which is not a CLR record or was pushed after the exception
// on this thread occurred.
//
if (IsUnmanagedToManagedSEHHandler(pSEHRecord) && (exceptionSP <= (void *)pSEHRecord))
{
Frame *pFrame = GetCurrFrame(pSEHRecord);
//
// Arcane knowledge here. All Frame records are stored on the stack by the runtime
// in ever decreasing address space. So, we merely have to search back until
// we find the first frame record with a higher stack value to find the
// establishing frame for the given stack address.
//
if (((void *)pFrame) >= pStackPointer)
{
break;
}
}
pSEHRecord = GetNextCOMPlusSEHRecord(pSEHRecord);
}
return pSEHRecord;
}
#ifdef _DEBUG
// We've deteremined during a stack walk that managed code is transitioning to unamanaged (EE) code. Check that the
// state of the EH chain is correct.
//
// For x86, check that we do INSTALL_COMPLUS_EXCEPTION_HANDLER before calling managed code. This check should be
// done for all managed code sites, not just transistions. But this will catch most problem cases.
void VerifyValidTransitionFromManagedCode(Thread *pThread, CrawlFrame *pCF)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(ExecutionManager::IsManagedCode(GetControlPC(pCF->GetRegisterSet())));
// Cannot get to the TEB of other threads. So ignore them.
if (pThread != GetThreadNULLOk())
{
return;
}
// Find the EH record guarding the current region of managed code, based on the CrawlFrame passed in.
PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord();
while ((pEHR != EXCEPTION_CHAIN_END) && ((ULONG_PTR)pEHR < GetRegdisplaySP(pCF->GetRegisterSet())))
{
pEHR = pEHR->Next;
}
// VerifyValidTransitionFromManagedCode can be called before the CrawlFrame's MethodDesc is initialized.
// Fix that if necessary for the consistency check.
MethodDesc * pFunction = pCF->GetFunction();
if ((!IsUnmanagedToManagedSEHHandler(pEHR)) && // Will the assert fire? If not, don't waste our time.
(pFunction == NULL))
{
_ASSERTE(pCF->GetRegisterSet());
PCODE ip = GetControlPC(pCF->GetRegisterSet());
pFunction = ExecutionManager::GetCodeMethodDesc(ip);
_ASSERTE(pFunction);
}
// Great, we've got the EH record that's next up the stack from the current SP (which is in managed code). That
// had better be a record for one of our handlers responsible for handling exceptions in managed code. If its
// not, then someone made it into managed code without setting up one of our EH handlers, and that's really
// bad.
CONSISTENCY_CHECK_MSGF(IsUnmanagedToManagedSEHHandler(pEHR),
("Invalid transition into managed code!\n\n"
"We're walking this thread's stack and we've reached a managed frame at Esp=0x%p. "
"(The method is %s::%s) "
"The very next FS:0 record (0x%p) up from this point on the stack should be one of "
"our 'unmanaged to managed SEH handlers', but its not... its something else, and "
"that's very bad. It indicates that someone managed to call into managed code without "
"setting up the proper exception handling.\n\n"
"Get a good unmanaged stack trace for this thread. All FS:0 records are on the stack, "
"so you can see who installed the last handler. Somewhere between that function and "
"where the thread is now is where the bad transition occurred.\n\n"
"A little extra info: FS:0 = 0x%p, pEHR->Handler = 0x%p\n",
GetRegdisplaySP(pCF->GetRegisterSet()),
pFunction ->m_pszDebugClassName,
pFunction ->m_pszDebugMethodName,
pEHR,
GetCurrentSEHRecord(),
pEHR->Handler));
}
#endif
//================================================================================
// There are some things that should never be true when handling an
// exception. This function checks for them. Will assert or trap
// if it finds an error.
static inline void
CPFH_VerifyThreadIsInValidState(Thread* pThread, DWORD exceptionCode, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) {
WRAPPER_NO_CONTRACT;
if ( exceptionCode == STATUS_BREAKPOINT
|| exceptionCode == STATUS_SINGLE_STEP) {
return;
}
#ifdef _DEBUG
// check for overwriting of stack
CheckStackBarrier(pEstablisherFrame);
// trigger check for bad fs:0 chain
GetCurrentSEHRecord();
#endif
if (!g_fEEShutDown) {
// An exception on the GC thread, or while holding the thread store lock, will likely lock out the entire process.
if (::IsGCThread() || ThreadStore::HoldingThreadStore())
{
_ASSERTE(!"Exception during garbage collection or while holding thread store");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
}
}
#ifdef FEATURE_HIJACK
void
CPFH_AdjustContextForThreadSuspensionRace(CONTEXT *pContext, Thread *pThread)
{
WRAPPER_NO_CONTRACT;
PCODE f_IP = GetIP(pContext);
if (Thread::IsAddrOfRedirectFunc((PVOID)f_IP)) {
// This is a very rare case where we tried to redirect a thread that was
// just about to dispatch an exception, and our update of EIP took, but
// the thread continued dispatching the exception.
//
// If this should happen (very rare) then we fix it up here.
//
_ASSERTE(pThread->GetSavedRedirectContext());
SetIP(pContext, GetIP(pThread->GetSavedRedirectContext()));
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 1 setting IP = %x\n", pContext->Eip);
}
if (f_IP == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) {
// This is a very rare case where we tried to redirect a thread that was
// just about to dispatch an exception, and our update of EIP took, but
// the thread continued dispatching the exception.
//
// If this should happen (very rare) then we fix it up here.
//
SetIP(pContext, GetIP(pThread->m_OSContext));
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 2 setting IP = %x\n", pContext->Eip);
}
// We have another even rarer race condition:
// - A) On thread A, Debugger puts an int 3 in the code stream at address X
// - A) We hit it and the begin an exception. The eip will be X + 1 (int3 is special)
// - B) Meanwhile, thread B redirects A's eip to Y. (Although A is really somewhere
// in the kernel, it looks like it's still in user code, so it can fall under the
// HandledJitCase and can be redirected)
// - A) The OS, trying to be nice, expects we have a breakpoint exception at X+1,
// but does -1 on the address since it knows int3 will leave the eip +1.
// So the context structure it will pass to the Handler is ideally (X+1)-1 = X
//
// ** Here's the race: Since thread B redirected A, the eip is actually Y (not X+1),
// but the kernel still touches it up to Y-1. So there's a window between when we hit a
// bp and when the handler gets called that this can happen.
// This causes an unhandled BP (since the debugger doesn't recognize the bp at Y-1)
//
// So what to do: If we land at Y-1 (ie, if f_IP+1 is the addr of a Redirected Func),
// then restore the EIP back to X. This will skip the redirection.
// Fortunately, this only occurs in cases where it's ok
// to skip. The debugger will recognize the patch and handle it.
if (Thread::IsAddrOfRedirectFunc((PVOID)(f_IP + 1))) {
_ASSERTE(pThread->GetSavedRedirectContext());
SetIP(pContext, GetIP(pThread->GetSavedRedirectContext()) - 1);
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 3 setting IP = %x\n", pContext->Eip);
}
if (f_IP + 1 == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) {
SetIP(pContext, GetIP(pThread->m_OSContext) - 1);
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 4 setting IP = %x\n", pContext->Eip);
}
}
#endif // FEATURE_HIJACK
uint32_t g_exceptionCount;
//******************************************************************************
EXCEPTION_DISPOSITION COMPlusAfterUnwind(
EXCEPTION_RECORD *pExceptionRecord,
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
ThrowCallbackType& tct)
{
WRAPPER_NO_CONTRACT;
// Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our
// cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be
// unwound. We go ahead and assert right here that indeed there are no handlers below the establisher frame
// before we go any further.
_ASSERTE(pEstablisherFrame == GetCurrentSEHRecord());
Thread* pThread = GetThread();
_ASSERTE(tct.pCurrentExceptionRecord == pEstablisherFrame);
NestedHandlerExRecord nestedHandlerExRecord;
nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame));
// ... and now, put the nested record back on.
INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
// We entered COMPlusAfterUnwind in PREEMP, but we need to be in COOP from here on out
GCX_COOP_NO_DTOR();
tct.bIsUnwind = TRUE;
tct.pProfilerNotify = NULL;
LOG((LF_EH, LL_INFO100, "COMPlusFrameHandler: unwinding\n"));
tct.bUnwindStack = CPFH_ShouldUnwindStack(pExceptionRecord);
LOG((LF_EH, LL_INFO1000, "COMPlusAfterUnwind: going to: pFunc:%#X, pStack:%#X\n",
tct.pFunc, tct.pStack));
UnwindFrames(pThread, &tct);
#ifdef DEBUGGING_SUPPORTED
ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker();
if (pExInfo->m_ValidInterceptionContext)
{
// By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until
// the interception point. We can now pop nested exception handlers and resume at interception context.
EHContext context = pExInfo->m_InterceptionContext;
pExInfo->m_InterceptionContext.Init();
pExInfo->m_ValidInterceptionContext = FALSE;
UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context);
}
#endif // DEBUGGING_SUPPORTED
_ASSERTE(!"Should not get here");
return ExceptionContinueSearch;
} // EXCEPTION_DISPOSITION COMPlusAfterUnwind()
#ifdef DEBUGGING_SUPPORTED
//---------------------------------------------------------------------------------------
//
// This function is called to intercept an exception and start an unwind.
//
// Arguments:
// pCurrentEstablisherFrame - the exception registration record covering the stack range
// containing the interception point
// pExceptionRecord - EXCEPTION_RECORD of the exception being intercepted
//
// Return Value:
// ExceptionContinueSearch if the exception cannot be intercepted
//
// Notes:
// If the exception is intercepted, this function never returns.
//
EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(EXCEPTION_REGISTRATION_RECORD *pCurrentEstablisherFrame,
EXCEPTION_RECORD *pExceptionRecord)
{
WRAPPER_NO_CONTRACT;
if (!CheckThreadExceptionStateForInterception())
{
return ExceptionContinueSearch;
}
Thread* pThread = GetThread();
ThreadExceptionState* pExState = pThread->GetExceptionState();
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame;
ThrowCallbackType tct;
tct.Init();
pExState->GetDebuggerState()->GetDebuggerInterceptInfo(&pEstablisherFrame,
&(tct.pFunc),
&(tct.dHandler),
&(tct.pStack),
NULL,
&(tct.pBottomFrame)
);
//
// If the handler that we've selected as the handler for the target frame of the unwind is in fact above the
// handler that we're currently executing in, then use the current handler instead. Why? Our handlers for
// nested exceptions actually process managed frames that live above them, up to the COMPlusFrameHanlder that
// pushed the nested handler. If the user selectes a frame above the nested handler, then we will have selected
// the COMPlusFrameHandler above the current nested handler. But we don't want to ask RtlUnwind to unwind past
// the nested handler that we're currently executing in.
//
if (pEstablisherFrame > pCurrentEstablisherFrame)
{
// This should only happen if we're in a COMPlusNestedExceptionHandler.
_ASSERTE(IsComPlusNestedExceptionRecord(pCurrentEstablisherFrame));
pEstablisherFrame = pCurrentEstablisherFrame;
}
#ifdef _DEBUG
tct.pCurrentExceptionRecord = pEstablisherFrame;
#endif
LOG((LF_EH|LF_CORDB, LL_INFO100, "ClrDebuggerDoUnwindAndIntercept: Intercepting at %s\n", tct.pFunc->m_pszDebugMethodName));
LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pFunc is 0x%X\n", tct.pFunc));
LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pStack is 0x%X\n", tct.pStack));
CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0);
ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker();
if (pExInfo->m_ValidInterceptionContext)
{
// By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until
// the interception point. We can now pop nested exception handlers and resume at interception context.
GCX_COOP();
EHContext context = pExInfo->m_InterceptionContext;
pExInfo->m_InterceptionContext.Init();
pExInfo->m_ValidInterceptionContext = FALSE;
UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context);
}
// on x86 at least, RtlUnwind always returns
// Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our
// cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be
// unwound.
return COMPlusAfterUnwind(pExState->GetExceptionRecord(), pEstablisherFrame, tct);
} // EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept()
#endif // DEBUGGING_SUPPORTED
// This is a wrapper around the assembly routine that invokes RtlUnwind in the OS.
// When we invoke RtlUnwind, the OS will modify the ExceptionFlags field in the
// exception record to reflect unwind. Since we call RtlUnwind in the first pass
// with a valid exception record when we find an exception handler AND because RtlUnwind
// returns on x86, the OS would have flagged the exception record for unwind.
//
// Incase the exception is rethrown from the catch/filter-handler AND it's a non-COMPLUS
// exception, the runtime will use the reference to the saved exception record to reraise
// the exception, as part of rethrow fixup. Since the OS would have modified the exception record
// to reflect unwind, this wrapper will "reset" the ExceptionFlags field when RtlUnwind returns.
// Otherwise, the rethrow will result in second pass, as opposed to first, since the ExceptionFlags
// would indicate an unwind.
//
// This rethrow issue does not affect COMPLUS exceptions since we always create a brand new exception
// record for them in RaiseTheExceptionInternalOnly.
BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
RtlUnwindCallbackType callback,
EXCEPTION_RECORD *pExceptionRecord,
void *retval)
{
LIMITED_METHOD_CONTRACT;
// Save the ExceptionFlags value before invoking RtlUnwind.
DWORD dwExceptionFlags = pExceptionRecord->ExceptionFlags;
BOOL fRetVal = CallRtlUnwind(pEstablisherFrame, callback, pExceptionRecord, retval);
// Reset ExceptionFlags field, if applicable
if (pExceptionRecord->ExceptionFlags != dwExceptionFlags)
{
// We would expect the 32bit OS to have set the unwind flag at this point.
_ASSERTE(pExceptionRecord->ExceptionFlags & EXCEPTION_UNWINDING);
LOG((LF_EH, LL_INFO100, "CallRtlUnwindSafe: Resetting ExceptionFlags from %lu to %lu\n", pExceptionRecord->ExceptionFlags, dwExceptionFlags));
pExceptionRecord->ExceptionFlags = dwExceptionFlags;
}
return fRetVal;
}
//******************************************************************************
// The essence of the first pass handler (after we've decided to actually do
// the first pass handling).
//******************************************************************************
inline EXCEPTION_DISPOSITION __cdecl
CPFH_RealFirstPassHandler( // ExceptionContinueSearch, etc.
EXCEPTION_RECORD *pExceptionRecord, // The exception record, with exception type.
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, // Exception frame on whose behalf this is called.
CONTEXT *pContext, // Context from the exception.
void *pDispatcherContext, // @todo
BOOL bAsynchronousThreadStop, // @todo
BOOL fPGCDisabledOnEntry) // @todo
{
// We don't want to use a runtime contract here since this codepath is used during
// the processing of a hard SO. Contracts use a significant amount of stack
// which we can't afford for those cases.
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
#ifdef _DEBUG
static int breakOnFirstPass = -1;
if (breakOnFirstPass == -1)
breakOnFirstPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnFirstPass);
if (breakOnFirstPass != 0)
{
_ASSERTE(!"First pass exception handler");
}
#endif
EXCEPTION_DISPOSITION retval;
DWORD exceptionCode = pExceptionRecord->ExceptionCode;
Thread *pThread = GetThread();
#ifdef _DEBUG
static int breakOnSO = -1;
if (breakOnSO == -1)
breakOnSO = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnSO);
if (breakOnSO != 0 && exceptionCode == STATUS_STACK_OVERFLOW)
{
DebugBreak(); // ASSERTing will overwrite the guard region
}
#endif
// We always want to be in co-operative mode when we run this function and whenever we return
// from it, want to go to pre-emptive mode because are returning to OS.
_ASSERTE(pThread->PreemptiveGCDisabled());
BOOL bPopNestedHandlerExRecord = FALSE;
LFH found = LFH_NOT_FOUND; // Result of calling LookForHandler.
BOOL bRethrownException = FALSE;
BOOL bNestedException = FALSE;
#if defined(USE_FEF)
BOOL bPopFaultingExceptionFrame = FALSE;
FrameWithCookie<FaultingExceptionFrame> faultingExceptionFrame;
#endif // USE_FEF
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
ThrowCallbackType tct;
tct.Init();
tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to
#ifdef _DEBUG
tct.pCurrentExceptionRecord = pEstablisherFrame;
tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame);
#endif // _DEBUG
BOOL fIsManagedCode = pContext ? ExecutionManager::IsManagedCode(GetIP(pContext)) : FALSE;
// this establishes a marker so can determine if are processing a nested exception
// don't want to use the current frame to limit search as it could have been unwound by
// the time get to nested handler (ie if find an exception, unwind to the call point and
// then resume in the catch and then get another exception) so make the nested handler
// have the same boundary as this one. If nested handler can't find a handler, we won't
// end up searching this frame list twice because the nested handler will set the search
// boundary in the thread and so if get back to this handler it will have a range that starts
// and ends at the same place.
NestedHandlerExRecord nestedHandlerExRecord;
nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame));
INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
bPopNestedHandlerExRecord = TRUE;
#if defined(USE_FEF)
// Note: don't attempt to push a FEF for an exception in managed code if we weren't in cooperative mode when
// the exception was received. If preemptive GC was enabled when we received the exception, then it means the
// exception was rethrown from unmangaed code (including EE impl), and we shouldn't push a FEF.
if (fIsManagedCode &&
fPGCDisabledOnEntry &&
(pThread->m_pFrame == FRAME_TOP ||
pThread->m_pFrame->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr() ||
(size_t)pThread->m_pFrame > (size_t)pEstablisherFrame))
{
// setup interrupted frame so that GC during calls to init won't collect the frames
// only need it for non COM+ exceptions in managed code when haven't already
// got one on the stack (will have one already if we have called rtlunwind because
// the instantiation that called unwind would have installed one)
faultingExceptionFrame.InitAndLink(pContext);
bPopFaultingExceptionFrame = TRUE;
}
#endif // USE_FEF
OBJECTREF e;
e = pThread->LastThrownObject();
STRESS_LOG7(LF_EH, LL_INFO10, "CPFH_RealFirstPassHandler: code:%X, LastThrownObject:%p, MT:%pT"
", IP:%p, SP:%p, pContext:%p, pEstablisherFrame:%p\n",
exceptionCode, OBJECTREFToObject(e), (e!=0)?e->GetMethodTable():0,
pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0,
pContext, pEstablisherFrame);
#ifdef LOGGING
// If it is a complus exception, and there is a thrown object, get its name, for better logging.
if (IsComPlusException(pExceptionRecord))
{
const char * eClsName = "!EXCEPTION_COMPLUS";
if (e != 0)
{
eClsName = e->GetMethodTable()->GetDebugClassName();
}
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: exception: 0x%08X, class: '%s', IP: 0x%p\n",
exceptionCode, eClsName, pContext ? GetIP(pContext) : NULL));
}
#endif
EXCEPTION_POINTERS exceptionPointers = {pExceptionRecord, pContext};
STRESS_LOG4(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting boundaries: Exinfo: 0x%p, BottomMostHandler:0x%p, SearchBoundary:0x%p, TopFrame:0x%p\n",
pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary, tct.pTopFrame);
// Here we are trying to decide if we are coming in as:
// 1) first handler in a brand new exception
// 2) a subsequent handler in an exception
// 3) a nested exception
// m_pBottomMostHandler is the registration structure (establisher frame) for the most recent (ie lowest in
// memory) non-nested handler that was installed and pEstablisher frame is what the current handler
// was registered with.
// The OS calls each registered handler in the chain, passing its establisher frame to it.
if (pExInfo->m_pBottomMostHandler != NULL && pEstablisherFrame > pExInfo->m_pBottomMostHandler)
{
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: detected subsequent handler. ExInfo:0x%p, BottomMost:0x%p SearchBoundary:0x%p\n",
pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary);
// If the establisher frame of this handler is greater than the bottommost then it must have been
// installed earlier and therefore we are case 2
if (pThread->GetThrowable() == NULL)
{
// Bottommost didn't setup a throwable, so not exception not for us
retval = ExceptionContinueSearch;
goto exit;
}
// setup search start point
tct.pBottomFrame = pExInfo->m_pSearchBoundary;
if (tct.pTopFrame == tct.pBottomFrame)
{
// this will happen if our nested handler already searched for us so we don't want
// to search again
retval = ExceptionContinueSearch;
goto exit;
}
}
else
{ // we are either case 1 or case 3
#if defined(_DEBUG_IMPL)
//@todo: merge frames, context, handlers
if (pThread->GetFrame() != FRAME_TOP)
pThread->GetFrame()->LogFrameChain(LF_EH, LL_INFO1000);
#endif // _DEBUG_IMPL
// If the exception was rethrown, we'll create a new ExInfo, which will represent the rethrown exception.
// The original exception is not the rethrown one.
if (pExInfo->m_ExceptionFlags.IsRethrown() && pThread->LastThrownObject() != NULL)
{
pExInfo->m_ExceptionFlags.ResetIsRethrown();
bRethrownException = TRUE;
#if defined(USE_FEF)
if (bPopFaultingExceptionFrame)
{
// if we added a FEF, it will refer to the frame at the point of the original exception which is
// already unwound so don't want it.
// If we rethrew the exception we have already added a helper frame for the rethrow, so don't
// need this one. If we didn't rethrow it, (ie rethrow from native) then there the topmost frame will
// be a transition to native frame in which case we don't need it either
faultingExceptionFrame.Pop();
bPopFaultingExceptionFrame = FALSE;
}
#endif
}
// If the establisher frame is less than the bottommost handler, then this is nested because the
// establisher frame was installed after the bottommost.
if (pEstablisherFrame < pExInfo->m_pBottomMostHandler
/* || IsComPlusNestedExceptionRecord(pEstablisherFrame) */ )
{
bNestedException = TRUE;
// case 3: this is a nested exception. Need to save and restore the thread info
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: ExInfo:0x%p detected nested exception 0x%p < 0x%p\n",
pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
EXCEPTION_REGISTRATION_RECORD* pNestedER = TryFindNestedEstablisherFrame(pEstablisherFrame);
ExInfo *pNestedExInfo;
if (!pNestedER || pNestedER >= pExInfo->m_pBottomMostHandler )
{
// RARE CASE. We've re-entered the EE from an unmanaged filter.
//
// OR
//
// We can be here if we dont find a nested exception handler. This is exemplified using
// call chain of scenario 2 explained further below.
//
// Assuming __try of NativeB throws an exception E1 and it gets caught in ManagedA2, then
// bottom-most handler (BMH) is going to be CPFH_A. The catch will trigger an unwind
// and invoke __finally in NativeB. Let the __finally throw a new exception E2.
//
// Assuming ManagedB2 has a catch block to catch E2, when we enter CPFH_B looking for a
// handler for E2, our establisher frame will be that of CPFH_B, which will be lower
// in stack than current BMH (which is CPFH_A). Thus, we will come here, determining
// E2 to be nested exception correctly but not find a nested exception handler.
void *limit = (void *) GetPrevSEHRecord(pExInfo->m_pBottomMostHandler);
pNestedExInfo = new (nothrow) ExInfo(); // Very rare failure here; need robust allocator.
if (pNestedExInfo == NULL)
{ // if we can't allocate memory, we can't correctly continue.
#if defined(_DEBUG)
if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NestedEhOom))
_ASSERTE(!"OOM in callback from unmanaged filter.");
#endif // _DEBUG
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY);
}
pNestedExInfo->m_StackAddress = limit; // Note: this is also the flag that tells us this
// ExInfo was stack allocated.
}
else
{
pNestedExInfo = &((NestedHandlerExRecord*)pNestedER)->m_handlerInfo;
}
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: PushExInfo() current: 0x%p previous: 0x%p\n",
pExInfo->m_StackAddress, pNestedExInfo->m_StackAddress));
_ASSERTE(pNestedExInfo);
pNestedExInfo->m_hThrowable = NULL; // pNestedExInfo may be stack allocated, and as such full of
// garbage. m_hThrowable must be sane, so set it to NULL. (We could
// zero the entire record, but this is cheaper.)
pNestedExInfo->CopyAndClearSource(pExInfo);
pExInfo->m_pPrevNestedInfo = pNestedExInfo; // Save at head of nested info chain
#if 0
/* the following code was introduced in Whidbey as part of the Faulting Exception Frame removal (12/03).
However it isn't correct. If any nested exceptions occur while processing a rethrow, we would
incorrectly consider the nested exception to be a rethrow. See VSWhidbey 349379 for an example.
Therefore I am disabling this code until we see a failure that explains why it was added in the first
place. cwb 9/04.
*/
// If we're here as a result of a rethrown exception, set the rethrown flag on the new ExInfo.
if (bRethrownException)
{
pExInfo->m_ExceptionFlags.SetIsRethrown();
}
#endif
}
else
{
// At this point, either:
//
// 1) the bottom-most handler is NULL, implying this is a new exception for which we are getting ready, OR
// 2) the bottom-most handler is not-NULL, implying that a there is already an existing exception in progress.
//
// Scenario 1 is that of a new throw and is easy to understand. Scenario 2 is the interesting one.
//
// ManagedA1 -> ManagedA2 -> ManagedA3 -> NativeCodeA -> ManagedB1 -> ManagedB2 -> ManagedB3 -> NativeCodeB
//
// On x86, each block of managed code is protected by one COMPlusFrameHandler [CPFH] (CLR's exception handler
// for managed code), unlike 64bit where each frame has a personality routine attached to it. Thus,
// for the example above, assume CPFH_A protects ManagedA* blocks and is setup just before the call to
// ManagedA1. Likewise, CPFH_B protects ManagedB* blocks and is setup just before the call to ManagedB1.
//
// When ManagedB3 throws an exception, CPFH_B is invoked to look for a handler in all of the ManagedB* blocks.
// At this point, it is setup as the "bottom-most-handler" (BMH). If no handler is found and exception reaches
// ManagedA* blocks, CPFH_A is invoked to look for a handler and thus, becomes BMH.
//
// Thus, in the first pass on x86 for a given exception, a particular CPFH will be invoked only once when looking
// for a handler and thus, registered as BMH only once. Either the exception goes unhandled and the process will
// terminate or a handler will be found and second pass will commence.
//
// However, assume NativeCodeB had a __try/__finally and raised an exception [E1] within the __try. Let's assume
// it gets caught in ManagedB1 and thus, unwind is triggered. At this point, the active exception tracker
// has context about the exception thrown out of __try and CPFH_B is registered as BMH.
//
// If the __finally throws a new exception [E2], CPFH_B will be invoked again for first pass while looking for
// a handler for the thrown exception. Since BMH is already non-NULL, we will come here since EstablisherFrame will be
// the same as BMH (because EstablisherFrame will be that of CPFH_B). We will proceed to overwrite the "required" parts
// of the existing exception tracker with the details of E2 (see setting of exception record and context below), erasing
// any artifact of E1.
//
// This is unlike Scenario 1 when exception tracker is completely initialized to default values. This is also
// unlike 64bit which will detect that E1 and E2 are different exceptions and hence, will setup a new tracker
// to track E2, effectively behaving like Scenario 1 above. X86 cannot do this since there is no nested exception
// tracker setup that gets to see the new exception.
//
// Thus, if E1 was a CSE and E2 isn't, we will come here and treat E2 as a CSE as well since corruption severity
// is initialized as part of exception tracker initialization. Thus, E2 will start to be treated as CSE, which is
// incorrect. Similar argument applies to delivery of First chance exception notification delivery.
//
// <QUIP> Another example why we should unify EH systems :) </QUIP>
//
// To address this issue, we will need to reset exception tracker here, just like the overwriting of "required"
// parts of exception tracker.
// If the current establisher frame is the same as the bottom-most-handler and we are here
// in the first pass, assert that current exception and the one tracked by active exception tracker
// are indeed different exceptions. In such a case, we must reset the exception tracker so that it can be
// setup correctly further down when CEHelper::SetupCorruptionSeverityForActiveException is invoked.
if ((pExInfo->m_pBottomMostHandler != NULL) &&
(pEstablisherFrame == pExInfo->m_pBottomMostHandler))
{
// Current exception should be different from the one exception tracker is already tracking.
_ASSERTE(pExceptionRecord != pExInfo->m_pExceptionRecord);
// This cannot be nested exceptions - they are handled earlier (see above).
_ASSERTE(!bNestedException);
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Bottom-most handler (0x%p) is the same as EstablisherFrame.\n",
pExInfo->m_pBottomMostHandler));
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Exception record in exception tracker is 0x%p, while that of new exception is 0x%p.\n",
pExInfo->m_pExceptionRecord, pExceptionRecord));
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Resetting exception tracker (0x%p).\n", pExInfo));
// This will reset the exception tracker state, including the corruption severity.
pExInfo->Init();
}
}
// If we are handling a fault from managed code, we need to set the Thread->ExInfo->pContext to
// the current fault context, which is used in the stack walk to get back into the managed
// stack with the correct registers. (Previously, this was done by linking in a FaultingExceptionFrame
// record.)
// We are about to create the managed exception object, which may trigger a GC, so set this up now.
pExInfo->m_pExceptionRecord = pExceptionRecord;
pExInfo->m_pContext = pContext;
if (pContext && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread))
{ // If this was a fault in managed code, rather than create a Frame for stackwalking,
// we can use this exinfo (after all, it has all the register info.)
pExInfo->m_ExceptionFlags.SetUseExInfoForStackwalk();
}
// It should now be safe for a GC to happen.
// case 1 & 3: this is the first time through of a new, nested, or rethrown exception, so see if we can
// find a handler. Only setup throwable if are bottommost handler
if (IsComPlusException(pExceptionRecord) && (!bAsynchronousThreadStop))
{
// Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace
// both throwables with the preallocated OOM exception.
pThread->SafeSetThrowables(pThread->LastThrownObject());
// now we've got a COM+ exception, fall through to so see if we handle it
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: fall through ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n",
pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
pExInfo->m_pBottomMostHandler = pEstablisherFrame;
}
else if (bRethrownException)
{
// If it was rethrown and not COM+, will still be the last one thrown. Either we threw it last and
// stashed it here or someone else caught it and rethrew it, in which case it will still have been
// originally stashed here.
// Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace
// both throwables with the preallocated OOM exception.
pThread->SafeSetThrowables(pThread->LastThrownObject());
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: rethrow non-COM+ ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n",
pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
pExInfo->m_pBottomMostHandler = pEstablisherFrame;
}
else
{
if (!fIsManagedCode)
{
tct.bDontCatch = false;
}
if (exceptionCode == STATUS_BREAKPOINT)
{
// don't catch int 3
retval = ExceptionContinueSearch;
goto exit;
}
// We need to set m_pBottomMostHandler here, Thread::IsExceptionInProgress returns 1.
// This is a necessary part of suppressing thread abort exceptions in the constructor
// of any exception object we might create.
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting ExInfo:0x%p m_pBottomMostHandler for IsExceptionInProgress to 0x%p from 0x%p\n",
pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
pExInfo->m_pBottomMostHandler = pEstablisherFrame;
// Create the managed exception object.
OBJECTREF throwable = CreateCOMPlusExceptionObject(pThread, pExceptionRecord, bAsynchronousThreadStop);
// Set the throwables on the thread to the newly created object. If this fails, it will return a
// preallocated exception object instead. This also updates the last thrown exception, for rethrows.
throwable = pThread->SafeSetThrowables(throwable);
// Set the exception code and pointers. We set these after setting the throwables on the thread,
// because if the proper exception is replaced by an OOM exception, we still want the exception code
// and pointers set in the OOM exception.
EXCEPTIONREF exceptionRef = (EXCEPTIONREF)throwable;
exceptionRef->SetXCode(pExceptionRecord->ExceptionCode);
exceptionRef->SetXPtrs(&exceptionPointers);
}
tct.pBottomFrame = NULL;
EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread);
g_exceptionCount++;
} // End of case-1-or-3
{
// Allocate storage for the stack trace.
OBJECTREF throwable = NULL;
GCPROTECT_BEGIN(throwable);
throwable = pThread->GetThrowable();
if (IsProcessCorruptedStateException(exceptionCode, throwable))
{
// Failfast if exception indicates corrupted process state
EEPOLICY_HANDLE_FATAL_ERROR(exceptionCode);
}
// If we're out of memory, then we figure there's probably not memory to maintain a stack trace, so we skip it.
// If we've got a stack overflow, then we figure the stack will be so huge as to make tracking the stack trace
// impracticle, so we skip it.
if ((throwable == CLRException::GetPreallocatedOutOfMemoryException()) ||
(throwable == CLRException::GetPreallocatedStackOverflowException()))
{
tct.bAllowAllocMem = FALSE;
}
else
{
pExInfo->m_StackTraceInfo.AllocateStackTrace();
}
GCPROTECT_END();
}
// Set up information for GetExceptionPointers()/GetExceptionCode() callback.
pExInfo->SetExceptionCode(pExceptionRecord);
pExInfo->m_pExceptionPointers = &exceptionPointers;
if (bRethrownException || bNestedException)
{
_ASSERTE(pExInfo->m_pPrevNestedInfo != NULL);
SetStateForWatsonBucketing(bRethrownException, pExInfo->GetPreviousExceptionTracker()->GetThrowableAsHandle());
}
#ifdef DEBUGGING_SUPPORTED
//
// At this point the exception is still fresh to us, so assert that
// there should be nothing from the debugger on it.
//
_ASSERTE(!pExInfo->m_ExceptionFlags.DebuggerInterceptInfo());
#endif
if (pThread->IsRudeAbort())
{
OBJECTREF throwable = pThread->GetThrowable();
if (throwable == NULL || !IsExceptionOfType(kThreadAbortException, &throwable))
{
// Neither of these sets will throw because the throwable that we're setting is a preallocated
// exception. This also updates the last thrown exception, for rethrows.
pThread->SafeSetThrowables(CLRException::GetBestThreadAbortException());
}
if (!pThread->IsRudeAbortInitiated())
{
pThread->PreWorkForThreadAbort();
}
}
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: looking for handler bottom %x, top %x\n",
tct.pBottomFrame, tct.pTopFrame));
tct.bReplaceStack = pExInfo->m_pBottomMostHandler == pEstablisherFrame && !bRethrownException;
tct.bSkipLastElement = bRethrownException && bNestedException;
found = LookForHandler(&exceptionPointers,
pThread,
&tct);
// We have searched this far.
pExInfo->m_pSearchBoundary = tct.pTopFrame;
LOG((LF_EH, LL_INFO1000, "CPFH_RealFirstPassHandler: set pSearchBoundary to 0x%p\n", pExInfo->m_pSearchBoundary));
if ((found == LFH_NOT_FOUND)
#ifdef DEBUGGING_SUPPORTED
&& !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()
#endif
)
{
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND\n"));
if (tct.pTopFrame == FRAME_TOP)
{
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND at FRAME_TOP\n"));
}
retval = ExceptionContinueSearch;
goto exit;
}
else
{
// so we are going to handle the exception
// Remove the nested exception record -- before calling RtlUnwind.
// The second-pass callback for a NestedExceptionRecord assumes that if it's
// being unwound, it should pop one exception from the pExInfo chain. This is
// true for any older NestedRecords that might be unwound -- but not for the
// new one we're about to add. To avoid this, we remove the new record
// before calling Unwind.
//
// <TODO>@NICE: This can probably be a little cleaner -- the nested record currently
// is also used to guard the running of the filter code. When we clean up the
// behaviour of exceptions within filters, we should be able to get rid of this
// PUSH/POP/PUSH behaviour.</TODO>
_ASSERTE(bPopNestedHandlerExRecord);
UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
// Since we are going to handle the exception we switch into preemptive mode
GCX_PREEMP_NO_DTOR();
#ifdef DEBUGGING_SUPPORTED
//
// Check if the debugger wants to intercept this frame at a different point than where we are.
//
if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
{
ClrDebuggerDoUnwindAndIntercept(pEstablisherFrame, pExceptionRecord);
//
// If this returns, then the debugger couldn't do it's stuff and we default to the found handler.
//
if (found == LFH_NOT_FOUND)
{
retval = ExceptionContinueSearch;
// we need to be sure to switch back into Cooperative mode since we are going to
// jump to the exit: label and follow the normal return path (it is expected that
// CPFH_RealFirstPassHandler returns in COOP.
GCX_PREEMP_NO_DTOR_END();
goto exit;
}
}
#endif
LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: handler found: %s\n", tct.pFunc->m_pszDebugMethodName));
CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0);
// on x86 at least, RtlUnwind always returns
// The CallRtlUnwindSafe could have popped the explicit frame that the tct.pBottomFrame points to (UMThunkPrestubHandler
// does that). In such case, the tct.pBottomFrame needs to be updated to point to the first valid explicit frame.
Frame* frame = pThread->GetFrame();
if ((tct.pBottomFrame != NULL) && (frame > tct.pBottomFrame))
{
tct.pBottomFrame = frame;
}
// Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our
// cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be
// unwound.
// Note: we are still in Preemptive mode here and that is correct, COMPlusAfterUnwind will switch us back
// into Cooperative mode.
return COMPlusAfterUnwind(pExceptionRecord, pEstablisherFrame, tct);
}
exit:
{
// We need to be in COOP if we get here
GCX_ASSERT_COOP();
}
// If we got as far as saving pExInfo, save the context pointer so it's available for the unwind.
if (pExInfo)
{
pExInfo->m_pContext = pContext;
// pExInfo->m_pExceptionPointers points to a local structure, which is now going out of scope.
pExInfo->m_pExceptionPointers = NULL;
}
#if defined(USE_FEF)
if (bPopFaultingExceptionFrame)
{
faultingExceptionFrame.Pop();
}
#endif // USE_FEF
if (bPopNestedHandlerExRecord)
{
UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
}
return retval;
} // CPFH_RealFirstPassHandler()
//******************************************************************************
//
void InitializeExceptionHandling()
{
WRAPPER_NO_CONTRACT;
InitSavedExceptionInfo();
CLRAddVectoredHandlers();
// Initialize the lock used for synchronizing access to the stacktrace in the exception object
g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE);
}
//******************************************************************************
static inline EXCEPTION_DISPOSITION __cdecl
CPFH_FirstPassHandler(EXCEPTION_RECORD *pExceptionRecord,
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
CONTEXT *pContext,
DISPATCHER_CONTEXT *pDispatcherContext)
{
WRAPPER_NO_CONTRACT;
EXCEPTION_DISPOSITION retval;
_ASSERTE (!(pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)));
DWORD exceptionCode = pExceptionRecord->ExceptionCode;
Thread *pThread = GetThread();
STRESS_LOG4(LF_EH, LL_INFO100,
"CPFH_FirstPassHandler: pEstablisherFrame = %x EH code = %x EIP = %x with ESP = %x\n",
pEstablisherFrame, exceptionCode, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0);
EXCEPTION_POINTERS ptrs = { pExceptionRecord, pContext };
// Call to the vectored handler to give other parts of the Runtime a chance to jump in and take over an
// exception before we do too much with it. The most important point in the vectored handler is not to toggle
// the GC mode.
DWORD filter = CLRVectoredExceptionHandler(&ptrs);
if (filter == (DWORD) EXCEPTION_CONTINUE_EXECUTION)
{
return ExceptionContinueExecution;
}
else if (filter == EXCEPTION_CONTINUE_SEARCH)
{
return ExceptionContinueSearch;
}
#if defined(STRESS_HEAP)
//
// Check to see if this exception is due to GCStress. Since the GCStress mechanism only injects these faults
// into managed code, we only need to check for them in CPFH_FirstPassHandler.
//
if (IsGcMarker(pContext, pExceptionRecord))
{
return ExceptionContinueExecution;
}
#endif // STRESS_HEAP
// We always want to be in co-operative mode when we run this function and whenever we return
// from it, want to go to pre-emptive mode because are returning to OS.
BOOL disabled = pThread->PreemptiveGCDisabled();
GCX_COOP_NO_DTOR();
BOOL bAsynchronousThreadStop = IsThreadHijackedForThreadStop(pThread, pExceptionRecord);
if (bAsynchronousThreadStop)
{
// If we ever get here in preemptive mode, we're in trouble. We've
// changed the thread's IP to point at a little function that throws ... if
// the thread were to be in preemptive mode and a GC occurred, the stack
// crawl would have been all messed up (becuase we have no frame that points
// us back to the right place in managed code).
_ASSERTE(disabled);
AdjustContextForThreadStop(pThread, pContext);
LOG((LF_EH, LL_INFO100, "CPFH_FirstPassHandler is Asynchronous Thread Stop or Abort\n"));
}
pThread->ResetThrowControlForThread();
CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame);
// If we were in cooperative mode when we came in here, then its okay to see if we should do HandleManagedFault
// and push a FaultingExceptionFrame. If we weren't in coop mode coming in here, then it means that there's no
// way the exception could really be from managed code. I might look like it was from managed code, but in
// reality its a rethrow from unmanaged code, either unmanaged user code, or unmanaged EE implementation.
if (disabled && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread))
{
#if defined(USE_FEF)
HandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread);
retval = ExceptionContinueExecution;
goto exit;
#else // USE_FEF
// Save the context pointer in the Thread's EXInfo, so that a stack crawl can recover the
// register values from the fault.
//@todo: I haven't yet found any case where we need to do anything here. If there are none, eliminate
// this entire if () {} block.
#endif // USE_FEF
}
// OK. We're finally ready to start the real work. Nobody else grabbed the exception in front of us. Now we can
// get started.
retval = CPFH_RealFirstPassHandler(pExceptionRecord,
pEstablisherFrame,
pContext,
pDispatcherContext,
bAsynchronousThreadStop,
disabled);
#if defined(USE_FEF) // This label is only used in the HandleManagedFault() case above.
exit:
#endif
if (retval != ExceptionContinueExecution || !disabled)
{
GCX_PREEMP_NO_DTOR();
}
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_FirstPassHandler: exiting with retval %d\n", retval);
return retval;
} // CPFH_FirstPassHandler()
//******************************************************************************
inline void
CPFH_UnwindFrames1(Thread* pThread, EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame, DWORD exceptionCode)
{
WRAPPER_NO_CONTRACT;
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
// Ready to unwind the stack...
ThrowCallbackType tct;
tct.Init();
tct.bIsUnwind = TRUE;
tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to
tct.pBottomFrame = NULL;
#ifdef _DEBUG
tct.pCurrentExceptionRecord = pEstablisherFrame;
tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame);
#endif
#ifdef DEBUGGING_SUPPORTED
EXCEPTION_REGISTRATION_RECORD *pInterceptEstablisherFrame = NULL;
// If the exception is intercepted, use information stored in the DebuggerExState to unwind the stack.
if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
{
pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(&pInterceptEstablisherFrame,
NULL, // MethodDesc **ppFunc,
NULL, // int *pdHandler,
NULL, // BYTE **ppStack
NULL, // ULONG_PTR *pNativeOffset,
NULL // Frame **ppFrame)
);
LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: frames are Est 0x%X, Intercept 0x%X\n",
pEstablisherFrame, pInterceptEstablisherFrame));
//
// When we set up for the interception we store off the CPFH or CPNEH that we
// *know* will handle unwinding the destination of the intercept.
//
// However, a CPNEH with the same limiting Capital-F-rame could do the work
// and unwind us, so...
//
// If this is the exact frame handler we are supposed to search for, or
// if this frame handler services the same Capital-F-rame as the frame handler
// we are looking for (i.e. this frame handler may do the work that we would
// expect our frame handler to do),
// then
// we need to pass the interception destination during this unwind.
//
_ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame));
if ((pEstablisherFrame == pInterceptEstablisherFrame) ||
(GetCurrFrame(pEstablisherFrame) == GetCurrFrame(pInterceptEstablisherFrame)))
{
pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL,
&(tct.pFunc),
&(tct.dHandler),
&(tct.pStack),
NULL,
&(tct.pBottomFrame)
);
LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: going to: pFunc:%#X, pStack:%#X\n",
tct.pFunc, tct.pStack));
}
}
#endif
UnwindFrames(pThread, &tct);
LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: after unwind ec:%#x, tct.pTopFrame:0x%p, pSearchBndry:0x%p\n"
" pEstFrame:0x%p, IsC+NestExRec:%d, !Nest||Active:%d\n",
exceptionCode, tct.pTopFrame, pExInfo->m_pSearchBoundary, pEstablisherFrame,
IsComPlusNestedExceptionRecord(pEstablisherFrame),
(!IsComPlusNestedExceptionRecord(pEstablisherFrame) || reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind)));
if (tct.pTopFrame >= pExInfo->m_pSearchBoundary &&
(!IsComPlusNestedExceptionRecord(pEstablisherFrame) ||
reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind) )
{
// If this is the search boundary, and we're not a nested handler, then
// this is the last time we'll see this exception. Time to unwind our
// exinfo.
STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindFrames1: Exception unwind -- unmanaged catcher detected\n");
pExInfo->UnwindExInfo((VOID*)pEstablisherFrame);
}
} // CPFH_UnwindFrames1()
//******************************************************************************
inline EXCEPTION_DISPOSITION __cdecl
CPFH_UnwindHandler(EXCEPTION_RECORD *pExceptionRecord,
EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
CONTEXT *pContext,
void *pDispatcherContext)
{
WRAPPER_NO_CONTRACT;
_ASSERTE (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND));
#ifdef _DEBUG
// Note: you might be inclined to write "static int breakOnSecondPass = CLRConfig::GetConfigValue(...);", but
// you can't do that here. That causes C++ EH to be generated under the covers for this function, and this
// function isn't allowed to have any C++ EH in it because its never going to return.
static int breakOnSecondPass; // = 0
static BOOL breakOnSecondPassSetup; // = FALSE
if (!breakOnSecondPassSetup)
{
breakOnSecondPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnSecondPass);
breakOnSecondPassSetup = TRUE;
}
if (breakOnSecondPass != 0)
{
_ASSERTE(!"Unwind handler");
}
#endif
DWORD exceptionCode = pExceptionRecord->ExceptionCode;
Thread *pThread = GetThread();
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
STRESS_LOG4(LF_EH, LL_INFO100, "In CPFH_UnwindHandler EHCode = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n", exceptionCode,
pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame);
// We always want to be in co-operative mode when we run this function. Whenever we return
// from it, want to go to pre-emptive mode because are returning to OS.
{
// needs to be in its own scope to avoid polluting the namespace, since
// we don't do a _END then we don't revert the state
GCX_COOP_NO_DTOR();
}
CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame);
if (IsComPlusNestedExceptionRecord(pEstablisherFrame))
{
NestedHandlerExRecord *pHandler = reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame);
if (pHandler->m_pCurrentExInfo != NULL)
{
// See the comment at the end of COMPlusNestedExceptionHandler about nested exception.
// OS is going to skip the EstablisherFrame before our NestedHandler.
if (pHandler->m_pCurrentExInfo->m_pBottomMostHandler <= pHandler->m_pCurrentHandler)
{
// We're unwinding -- the bottom most handler is potentially off top-of-stack now. If
// it is, change it to the next COM+ frame. (This one is not good, as it's about to
// disappear.)
EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pHandler->m_pCurrentHandler);
STRESS_LOG3(LF_EH, LL_INFO10000, "COMPlusNestedExceptionHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
pHandler->m_pCurrentExInfo, pHandler->m_pCurrentExInfo->m_pBottomMostHandler, pNextBottomMost);
pHandler->m_pCurrentExInfo->m_pBottomMostHandler = pNextBottomMost;
}
}
}
// this establishes a marker so can determine if are processing a nested exception
// don't want to use the current frame to limit search as it could have been unwound by
// the time get to nested handler (ie if find an exception, unwind to the call point and
// then resume in the catch and then get another exception) so make the nested handler
// have the same boundary as this one. If nested handler can't find a handler, we won't
// end up searching this frame list twice because the nested handler will set the search
// boundary in the thread and so if get back to this handler it will have a range that starts
// and ends at the same place.
NestedHandlerExRecord nestedHandlerExRecord;
nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame));
nestedHandlerExRecord.m_ActiveForUnwind = TRUE;
nestedHandlerExRecord.m_pCurrentExInfo = pExInfo;
nestedHandlerExRecord.m_pCurrentHandler = pEstablisherFrame;
INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
// Unwind the stack. The establisher frame sets the boundary.
CPFH_UnwindFrames1(pThread, pEstablisherFrame, exceptionCode);
// We're unwinding -- the bottom most handler is potentially off top-of-stack now. If
// it is, change it to the next COM+ frame. (This one is not good, as it's about to
// disappear.)
if (pExInfo->m_pBottomMostHandler &&
pExInfo->m_pBottomMostHandler <= pEstablisherFrame)
{
EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pEstablisherFrame);
// If there is no previous COM+ SEH handler, GetNextCOMPlusSEHRecord() will return -1. Much later, we will dereference that and AV.
_ASSERTE (pNextBottomMost != EXCEPTION_CHAIN_END);
STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_UnwindHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
pExInfo, pExInfo->m_pBottomMostHandler, pNextBottomMost);
pExInfo->m_pBottomMostHandler = pNextBottomMost;
}
{
// needs to be in its own scope to avoid polluting the namespace, since
// we don't do a _END then we don't revert the state
GCX_PREEMP_NO_DTOR();
}
UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
// If we are here, then exception was not caught in managed code protected by this
// ComplusFrameHandler. Hence, reset thread abort state if this is the last personality routine,
// for managed code, on the stack.
ResetThreadAbortState(pThread, pEstablisherFrame);
STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindHandler: Leaving with ExceptionContinueSearch\n");
return ExceptionContinueSearch;
} // CPFH_UnwindHandler()
//******************************************************************************
// This is the first handler that is called in the context of managed code
// It is the first level of defense and tries to find a handler in the user
// code to handle the exception
//-------------------------------------------------------------------------
// EXCEPTION_DISPOSITION __cdecl COMPlusFrameHandler(
// EXCEPTION_RECORD *pExceptionRecord,
// _EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
// CONTEXT *pContext,
// DISPATCHER_CONTEXT *pDispatcherContext)
//
// See http://www.microsoft.com/msj/0197/exception/exception.aspx for a background piece on Windows
// unmanaged structured exception handling.
EXCEPTION_HANDLER_IMPL(COMPlusFrameHandler)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(!DebugIsEECxxException(pExceptionRecord) && "EE C++ Exception leaked into managed code!");
STRESS_LOG5(LF_EH, LL_INFO100, "In COMPlusFrameHander EH code = %x flag = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n",
pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionFlags,
pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame);
_ASSERTE((pContext == NULL) || ((pContext->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL));
if (g_fNoExceptions)
return ExceptionContinueSearch; // No EH during EE shutdown.
// Check if the exception represents a GCStress Marker. If it does,
// we shouldnt record its entry in the TLS as such exceptions are
// continuable and can confuse the VM to treat them as CSE,
// as they are implemented using illegal instruction exception.
bool fIsGCMarker = false;
#ifdef HAVE_GCCOVER // This is a debug only macro
if (GCStress<cfg_instr_jit>::IsEnabled())
{
// TlsGetValue trashes last error. When Complus_GCStress=4, GC is invoked
// on every allowable JITed instruction by means of our exception handling machanism
// it is very easy to trash the last error. For example, a p/invoke called a native method
// which sets last error. Before we getting the last error in the IL stub, it is trashed here
DWORD dwLastError = GetLastError();
fIsGCMarker = IsGcMarker(pContext, pExceptionRecord);
if (!fIsGCMarker)
{
SaveCurrentExceptionInfo(pExceptionRecord, pContext);
}
SetLastError(dwLastError);
}
else
#endif
{
// GCStress does not exist on retail builds (see IsGcMarker implementation for details).
SaveCurrentExceptionInfo(pExceptionRecord, pContext);
}
if (fIsGCMarker)
{
// If this was a GCStress marker exception, then return
// ExceptionContinueExecution to the OS.
return ExceptionContinueExecution;
}
EXCEPTION_DISPOSITION retVal = ExceptionContinueSearch;
Thread *pThread = GetThread();
if ((pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) == 0)
{
if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
{
EEPolicy::HandleStackOverflow();
// VC's unhandled exception filter plays with stack. It VirtualAlloc's a new stack, and
// then launch Watson from the new stack. When Watson asks CLR to save required data, we
// are not able to walk the stack.
// Setting Context in ExInfo so that our Watson dump routine knows how to walk this stack.
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
pExInfo->m_pContext = pContext;
// Save the reference to the topmost handler we see during first pass when an SO goes past us.
// When an unwind gets triggered for the exception, we will reset the frame chain when we reach
// the topmost handler we saw during the first pass.
//
// This unifies, behaviour-wise, 32bit with 64bit.
if ((pExInfo->m_pTopMostHandlerDuringSO == NULL) ||
(pEstablisherFrame > pExInfo->m_pTopMostHandlerDuringSO))
{
pExInfo->m_pTopMostHandlerDuringSO = pEstablisherFrame;
}
// Switch to preemp mode since we are returning back to the OS.
// We will do the quick switch since we are short of stack
FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
return ExceptionContinueSearch;
}
}
else
{
DWORD exceptionCode = pExceptionRecord->ExceptionCode;
if (exceptionCode == STATUS_UNWIND)
{
// If exceptionCode is STATUS_UNWIND, RtlUnwind is called with a NULL ExceptionRecord,
// therefore OS uses a faked ExceptionRecord with STATUS_UNWIND code. Then we need to
// look at our saved exception code.
exceptionCode = GetCurrentExceptionCode();
}
if (exceptionCode == STATUS_STACK_OVERFLOW)
{
// We saved the context during the first pass in case the stack overflow exception is
// unhandled and Watson dump code needs it. Now we are in the second pass, therefore
// either the exception is handled by user code, or we have finished unhandled exception
// filter process, and the OS is unwinding the stack. Either way, we don't need the
// context any more. It is very important to reset the context so that our code does not
// accidentally walk the frame using the dangling context in ExInfoWalker::WalkToPosition.
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
pExInfo->m_pContext = NULL;
// We should have the reference to the topmost handler seen during the first pass of SO
_ASSERTE(pExInfo->m_pTopMostHandlerDuringSO != NULL);
// Reset frame chain till we reach the topmost establisher frame we saw in the first pass.
// This will ensure that if any intermediary frame calls back into managed (e.g. native frame
// containing a __finally that reverse pinvokes into managed), then we have the correct
// explicit frame on the stack. Resetting the frame chain only when we reach the topmost
// personality routine seen in the first pass may not result in expected behaviour,
// specially during stack walks when crawl frame needs to be initialized from
// explicit frame.
if (pEstablisherFrame <= pExInfo->m_pTopMostHandlerDuringSO)
{
GCX_COOP_NO_DTOR();
if (pThread->GetFrame() < GetCurrFrame(pEstablisherFrame))
{
// We are very short of stack. We avoid calling UnwindFrame which may
// run unknown code here.
pThread->SetFrame(GetCurrFrame(pEstablisherFrame));
}
}
// Switch to preemp mode since we are returning back to the OS.
// We will do the quick switch since we are short of stack
FastInterlockAnd(&pThread->m_fPreemptiveGCDisabled, 0);
return ExceptionContinueSearch;
}
}
if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
{
retVal = CPFH_UnwindHandler(pExceptionRecord,
pEstablisherFrame,
pContext,
pDispatcherContext);
}
else
{
/* Make no assumptions about the current machine state.
<TODO>@PERF: Only needs to be called by the very first handler invoked by SEH </TODO>*/
ResetCurrentContext();
retVal = CPFH_FirstPassHandler(pExceptionRecord,
pEstablisherFrame,
pContext,
pDispatcherContext);
}
return retVal;
} // COMPlusFrameHandler()
//-------------------------------------------------------------------------
// This is called by the EE to restore the stack pointer if necessary.
//-------------------------------------------------------------------------
// This can't be inlined into the caller to avoid introducing EH frame
NOINLINE LPVOID COMPlusEndCatchWorker(Thread * pThread)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:called with "
"pThread:0x%x\n",pThread));
// indicate that we are out of the managed clause as early as possible
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
void* esp = NULL;
// Notify the profiler that the catcher has finished running
// IL stubs don't contain catch blocks so inability to perform this check does not matter.
// if (!pFunc->IsILStub())
EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherLeave();
// no need to set pExInfo->m_ClauseType = (DWORD)COR_PRF_CLAUSE_NONE now that the
// notification is done because because the ExInfo record is about to be popped off anyway
LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:pThread:0x%x\n",pThread));
#ifdef _DEBUG
gLastResumedExceptionFunc = NULL;
gLastResumedExceptionHandler = 0;
#endif
// Set the thrown object to NULL as no longer needed. This also sets the last thrown object to NULL.
pThread->SafeSetThrowables(NULL);
// reset the stashed exception info
pExInfo->m_pExceptionRecord = NULL;
pExInfo->m_pContext = NULL;
pExInfo->m_pExceptionPointers = NULL;
if (pExInfo->m_pShadowSP)
{
*pExInfo->m_pShadowSP = 0; // Reset the shadow SP
}
// pExInfo->m_dEsp was set in ResumeAtJITEH(). It is the Esp of the
// handler nesting level which catches the exception.
esp = (void*)(size_t)pExInfo->m_dEsp;
pExInfo->UnwindExInfo(esp);
// Prepare to sync managed exception state
//
// In a case when we're nested inside another catch block, the domain in which we're executing may not be the
// same as the one the domain of the throwable that was just made the current throwable above. Therefore, we
// make a special effort to preserve the domain of the throwable as we update the the last thrown object.
//
// This function (COMPlusEndCatch) can also be called by the in-proc debugger helper thread on x86 when
// an attempt to SetIP takes place to set IP outside the catch clause. In such a case, managed thread object
// will not be available. Thus, we should reset the severity only if its not such a thread.
//
// This behaviour (of debugger doing SetIP) is not allowed on 64bit since the catch clauses are implemented
// as a seperate funclet and it's just not allowed to set the IP across EH scopes, such as from inside a catch
// clause to outside of the catch clause.
bool fIsDebuggerHelperThread = (g_pDebugInterface == NULL) ? false : g_pDebugInterface->ThisIsHelperThread();
// Sync managed exception state, for the managed thread, based upon any active exception tracker
pThread->SyncManagedExceptionState(fIsDebuggerHelperThread);
LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch: esp=%p\n", esp));
return esp;
}
//
// This function works in conjunction with JIT_EndCatch. On input, the parameters are set as follows:
// ebp, ebx, edi, esi: the values of these registers at the end of the catch block
// *pRetAddress: the next instruction after the call to JIT_EndCatch
//
// On output, *pRetAddress is the instruction at which to resume execution. This may be user code,
// or it may be ThrowControlForThread (which will re-raise a pending ThreadAbortException).
//
// Returns the esp to set before resuming at *pRetAddress.
//
LPVOID STDCALL COMPlusEndCatch(LPVOID ebp, DWORD ebx, DWORD edi, DWORD esi, LPVOID* pRetAddress)
{
//
// PopNestedExceptionRecords directly manipulates fs:[0] chain. This method can't have any EH!
//
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
ETW::ExceptionLog::ExceptionCatchEnd();
ETW::ExceptionLog::ExceptionThrownEnd();
void* esp = COMPlusEndCatchWorker(GetThread());
// We are going to resume at a handler nesting level whose esp is dEsp. Pop off any SEH records below it. This
// would be the COMPlusNestedExceptionHandler we had inserted.
PopNestedExceptionRecords(esp);
//
// Set up m_OSContext for the call to COMPlusCheckForAbort
//
Thread* pThread = GetThread();
SetIP(pThread->m_OSContext, (PCODE)*pRetAddress);
SetSP(pThread->m_OSContext, (TADDR)esp);
SetFP(pThread->m_OSContext, (TADDR)ebp);
pThread->m_OSContext->Ebx = ebx;
pThread->m_OSContext->Edi = edi;
pThread->m_OSContext->Esi = esi;
LPVOID throwControl = COMPlusCheckForAbort((UINT_PTR)*pRetAddress);
if (throwControl)
*pRetAddress = throwControl;
return esp;
}
PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord()
{
WRAPPER_NO_CONTRACT;
LPVOID fs0 = (LPVOID)__readfsdword(0);
#if 0 // This walk is too expensive considering we hit it every time we a CONTRACT(NOTHROW)
#ifdef _DEBUG
EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)fs0;
LPVOID spVal;
__asm {
mov spVal, esp
}
// check that all the eh frames are all greater than the current stack value. If not, the
// stack has been updated somehow w/o unwinding the SEH chain.
// LOG((LF_EH, LL_INFO1000000, "ER Chain:\n"));
while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END) {
// LOG((LF_EH, LL_INFO1000000, "\tp: prev:p handler:%x\n", pEHR, pEHR->Next, pEHR->Handler));
if (pEHR < spVal) {
if (gLastResumedExceptionFunc != 0)
_ASSERTE(!"Stack is greater than start of SEH chain - possible missing leave in handler. See gLastResumedExceptionHandler & gLastResumedExceptionFunc for info");
else
_ASSERTE(!"Stack is greater than start of SEH chain (FS:0)");
}
if (pEHR->Handler == (void *)-1)
_ASSERTE(!"Handler value has been corrupted");
_ASSERTE(pEHR < pEHR->Next);
pEHR = pEHR->Next;
}
#endif
#endif // 0
return (EXCEPTION_REGISTRATION_RECORD*) fs0;
}
PEXCEPTION_REGISTRATION_RECORD GetFirstCOMPlusSEHRecord(Thread *pThread) {
WRAPPER_NO_CONTRACT;
EXCEPTION_REGISTRATION_RECORD *pEHR = *(pThread->GetExceptionListPtr());
if (pEHR == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pEHR)) {
return pEHR;
} else {
return GetNextCOMPlusSEHRecord(pEHR);
}
}
PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD *next)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(IsUnmanagedToManagedSEHHandler(next));
EXCEPTION_REGISTRATION_RECORD *pEHR = GetCurrentSEHRecord();
_ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END);
EXCEPTION_REGISTRATION_RECORD *pBest = 0;
while (pEHR != next) {
if (IsUnmanagedToManagedSEHHandler(pEHR))
pBest = pEHR;
pEHR = pEHR->Next;
_ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END);
}
return pBest;
}
VOID SetCurrentSEHRecord(EXCEPTION_REGISTRATION_RECORD *pSEH)
{
WRAPPER_NO_CONTRACT;
*GetThread()->GetExceptionListPtr() = pSEH;
}
// Note that this logic is copied below, in PopSEHRecords
__declspec(naked)
VOID __cdecl PopSEHRecords(LPVOID pTargetSP)
{
// No CONTRACT possible on naked functions
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
__asm{
mov ecx, [esp+4] ;; ecx <- pTargetSP
mov eax, fs:[0] ;; get current SEH record
poploop:
cmp eax, ecx
jge done
mov eax, [eax] ;; get next SEH record
jmp poploop
done:
mov fs:[0], eax
retn
}
}
//
// Unwind pExinfo, pops FS:[0] handlers until the interception context SP, and
// resumes at interception context.
//
VOID UnwindExceptionTrackerAndResumeInInterceptionFrame(ExInfo* pExInfo, EHContext* context)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE;
_ASSERTE(pExInfo && context);
pExInfo->UnwindExInfo((LPVOID)(size_t)context->Esp);
PopNestedExceptionRecords((LPVOID)(size_t)context->Esp);
STRESS_LOG3(LF_EH|LF_CORDB, LL_INFO100, "UnwindExceptionTrackerAndResumeInInterceptionFrame: completing intercept at EIP = %p ESP = %p EBP = %p\n", context->Eip, context->Esp, context->Ebp);
ResumeAtJitEHHelper(context);
UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!");
}
//
// Pop SEH records below the given target ESP. This is only used to pop nested exception records.
// If bCheckForUnknownHandlers is set, it only checks for unknown FS:[0] handlers.
//
BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
{
// No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord();
while ((LPVOID)pEHR < pTargetSP)
{
//
// The only handler types we're allowed to have below the limit on the FS:0 chain in these cases are a
// nested exception record or a fast NExport record, so we verify that here.
//
// There is a special case, of course: for an unhandled exception, when the default handler does the exit
// unwind, we may have an exception that escapes a finally clause, thus replacing the original unhandled
// exception. If we find a catcher for that new exception, then we'll go ahead and do our own unwind, then
// jump to the catch. When we are called here, just before jumpping to the catch, we'll pop off our nested
// handlers, then we'll pop off one more handler: the handler that ntdll!ExecuteHandler2 pushed before
// calling our nested handler. We go ahead and pop off that handler, too. Its okay, its only there to catch
// exceptions from handlers and turn them into collided unwind status codes... there's no cleanup in the
// handler that we're removing, and that's the important point. The handler that ExecuteHandler2 pushes
// isn't a public export from ntdll, but its named "UnwindHandler" and is physically shortly after
// ExecuteHandler2 in ntdll.
// In this case, we don't want to pop off the NExportSEH handler since it's our outermost handler.
//
static HINSTANCE ExecuteHandler2Module = 0;
static BOOL ExecuteHandler2ModuleInited = FALSE;
// Cache the handle to the dll with the handler pushed by ExecuteHandler2.
if (!ExecuteHandler2ModuleInited)
{
ExecuteHandler2Module = WszGetModuleHandle(W("ntdll.dll"));
ExecuteHandler2ModuleInited = TRUE;
}
if (bCheckForUnknownHandlers)
{
if (!IsComPlusNestedExceptionRecord(pEHR) ||
!((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler)))
{
return TRUE;
}
}
#ifdef _DEBUG
else
{
// Note: if we can't find the module containing ExecuteHandler2, we'll just be really strict and require
// that we're only popping nested handlers or the FastNExportSEH handler.
_ASSERTE(FastNExportSEH(pEHR) || IsComPlusNestedExceptionRecord(pEHR) ||
((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler)));
}
#endif // _DEBUG
pEHR = pEHR->Next;
}
if (!bCheckForUnknownHandlers)
{
SetCurrentSEHRecord(pEHR);
}
return FALSE;
}
//
// This is implemented differently from the PopNestedExceptionRecords above because it's called in the context of
// the DebuggerRCThread to operate on the stack of another thread.
//
VOID PopNestedExceptionRecords(LPVOID pTargetSP, CONTEXT *pCtx, void *pSEH)
{
// No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
#ifdef _DEBUG
LOG((LF_CORDB,LL_INFO1000, "\nPrintSEHRecords:\n"));
EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH;
// check that all the eh frames are all greater than the current stack value. If not, the
// stack has been updated somehow w/o unwinding the SEH chain.
while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END)
{
LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler));
pEHR = pEHR->Next;
}
#endif
DWORD dwCur = *(DWORD*)pSEH; // 'EAX' in the original routine
DWORD dwPrev = (DWORD)(size_t)pSEH;
while (dwCur < (DWORD)(size_t)pTargetSP)
{
// Watch for the OS handler
// for nested exceptions, or any C++ handlers for destructors in our call
// stack, or anything else.
if (dwCur < (DWORD)GetSP(pCtx))
dwPrev = dwCur;
dwCur = *(DWORD *)(size_t)dwCur;
LOG((LF_CORDB,LL_INFO10000, "dwCur: 0x%x dwPrev:0x%x pTargetSP:0x%x\n",
dwCur, dwPrev, pTargetSP));
}
*(DWORD *)(size_t)dwPrev = dwCur;
#ifdef _DEBUG
pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH;
// check that all the eh frames are all greater than the current stack value. If not, the
// stack has been updated somehow w/o unwinding the SEH chain.
LOG((LF_CORDB,LL_INFO1000, "\nPopSEHRecords:\n"));
while (pEHR != NULL && pEHR != (void *)-1)
{
LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler));
pEHR = pEHR->Next;
}
#endif
}
//==========================================================================
// COMPlusThrowCallback
//
//==========================================================================
/*
*
* COMPlusThrowCallbackHelper
*
* This function is a simple helper function for COMPlusThrowCallback. It is needed
* because of the EX_TRY macro. This macro does an alloca(), which allocates space
* off the stack, not free'ing it. Thus, doing a EX_TRY in a loop can easily result
* in a stack overflow error. By factoring out the EX_TRY into a separate function,
* we recover that stack space.
*
* Parameters:
* pJitManager - The JIT manager that will filter the EH.
* pCf - The frame to crawl.
* EHClausePtr
* nestingLevel
* pThread - Used to determine if the thread is throwable or not.
*
* Return:
* Exception status.
*
*/
int COMPlusThrowCallbackHelper(IJitManager *pJitManager,
CrawlFrame *pCf,
ThrowCallbackType* pData,
EE_ILEXCEPTION_CLAUSE *EHClausePtr,
DWORD nestingLevel,
OBJECTREF throwable,
Thread *pThread
)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
int iFilt = 0;
EX_TRY
{
GCPROTECT_BEGIN (throwable);
// We want to call filters even if the thread is aborting, so suppress abort
// checks while the filter runs.
ThreadPreventAsyncHolder preventAbort;
BYTE* startAddress = (BYTE*)pCf->GetCodeInfo()->GetStartAddress();
iFilt = ::CallJitEHFilter(pCf, startAddress, EHClausePtr, nestingLevel, throwable);
GCPROTECT_END();
}
EX_CATCH
{
// We had an exception in filter invocation that remained unhandled.
// Sync managed exception state, for the managed thread, based upon the active exception tracker.
pThread->SyncManagedExceptionState(false);
//
// Swallow exception. Treat as exception continue search.
//
iFilt = EXCEPTION_CONTINUE_SEARCH;
}
EX_END_CATCH(SwallowAllExceptions)
return iFilt;
}
//******************************************************************************
// The stack walk callback for exception handling on x86.
// Returns one of:
// SWA_CONTINUE = 0, // continue walking
// SWA_ABORT = 1, // stop walking, early out in "failure case"
// SWA_FAILED = 2 // couldn't walk stack
StackWalkAction COMPlusThrowCallback( // SWA value
CrawlFrame *pCf, // Data from StackWalkFramesEx
ThrowCallbackType *pData) // Context data passed through from CPFH
{
// We don't want to use a runtime contract here since this codepath is used during
// the processing of a hard SO. Contracts use a significant amount of stack
// which we can't afford for those cases.
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
Frame *pFrame = pCf->GetFrame();
MethodDesc *pFunc = pCf->GetFunction();
#if defined(_DEBUG)
#define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>")
#else
#define METHODNAME(pFunc) "<n/a>"
#endif
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n",
pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame));
#undef METHODNAME
Thread *pThread = GetThread();
if (pFrame && pData->pTopFrame == pFrame)
/* Don't look past limiting frame if there is one */
return SWA_ABORT;
if (!pFunc)
return SWA_CONTINUE;
if (pThread->IsRudeAbortInitiated())
{
return SWA_CONTINUE;
}
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
_ASSERTE(!pData->bIsUnwind);
#ifdef _DEBUG
// It SHOULD be the case that any frames we consider live between this exception
// record and the previous one.
if (!pExInfo->m_pPrevNestedInfo) {
if (pData->pCurrentExceptionRecord) {
if (pFrame) _ASSERTE(pData->pCurrentExceptionRecord > pFrame);
// The FastNExport SEH handler can be in the frame we just unwound and as a result just out of range.
if (pCf->IsFrameless() && !FastNExportSEH((PEXCEPTION_REGISTRATION_RECORD)pData->pCurrentExceptionRecord))
{
_ASSERTE((ULONG_PTR)pData->pCurrentExceptionRecord >= GetRegdisplaySP(pCf->GetRegisterSet()));
}
}
if (pData->pPrevExceptionRecord) {
// FCALLS have an extra SEH record in debug because of the desctructor
// associated with ForbidGC checking. This is benign, so just ignore it.
if (pFrame) _ASSERTE(pData->pPrevExceptionRecord < pFrame || pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr());
if (pCf->IsFrameless()) _ASSERTE((ULONG_PTR)pData->pPrevExceptionRecord <= GetRegdisplaySP(pCf->GetRegisterSet()));
}
}
#endif
UINT_PTR currentIP = 0;
UINT_PTR currentSP = 0;
if (pCf->IsFrameless())
{
currentIP = (UINT_PTR)GetControlPC(pCf->GetRegisterSet());
currentSP = (UINT_PTR)GetRegdisplaySP(pCf->GetRegisterSet());
}
else if (InlinedCallFrame::FrameHasActiveCall(pFrame))
{
// don't have the IP, SP for native code
currentIP = 0;
currentSP = 0;
}
else
{
currentIP = (UINT_PTR)(pCf->GetFrame()->GetIP());
currentSP = 0; //Don't have an SP to get.
}
if (!pFunc->IsILStub())
{
// Append the current frame to the stack trace and save the save trace to the managed Exception object.
pExInfo->m_StackTraceInfo.AppendElement(pData->bAllowAllocMem, currentIP, currentSP, pFunc, pCf);
pExInfo->m_StackTraceInfo.SaveStackTrace(pData->bAllowAllocMem,
pThread->GetThrowableAsHandle(),
pData->bReplaceStack,
pData->bSkipLastElement);
}
else
{
LOG((LF_EH, LL_INFO1000, "COMPlusThrowCallback: Skipping AppendElement/SaveStackTrace for IL stub MD %p\n", pFunc));
}
// Fire an exception thrown ETW event when an exception occurs
ETW::ExceptionLog::ExceptionThrown(pCf, pData->bSkipLastElement, pData->bReplaceStack);
// Reset the flags. These flags are set only once before each stack walk done by LookForHandler(), and
// they apply only to the first frame we append to the stack trace. Subsequent frames are always appended.
if (pData->bReplaceStack)
{
pData->bReplaceStack = FALSE;
}
if (pData->bSkipLastElement)
{
pData->bSkipLastElement = FALSE;
}
// now we've got the stack trace, if we aren't allowed to catch this and we're first pass, return
if (pData->bDontCatch)
return SWA_CONTINUE;
if (!pCf->IsFrameless())
{
// @todo - remove this once SIS is fully enabled.
extern bool g_EnableSIS;
if (g_EnableSIS)
{
// For debugger, we may want to notify 1st chance exceptions if they're coming out of a stub.
// We recognize stubs as Frames with a M2U transition type. The debugger's stackwalker also
// recognizes these frames and publishes ICorDebugInternalFrames in the stackwalk. It's
// important to use pFrame as the stack address so that the Exception callback matches up
// w/ the ICorDebugInternlFrame stack range.
if (CORDebuggerAttached())
{
Frame * pFrameStub = pCf->GetFrame();
Frame::ETransitionType t = pFrameStub->GetTransitionType();
if (t == Frame::TT_M2U)
{
// Use address of the frame as the stack address.
currentSP = (SIZE_T) ((void*) pFrameStub);
currentIP = 0; // no IP.
EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP);
// Deliver the FirstChanceNotification after the debugger, if not already delivered.
if (!pExInfo->DeliveredFirstChanceNotification())
{
ExceptionNotifications::DeliverFirstChanceNotification();
}
}
}
}
return SWA_CONTINUE;
}
bool fIsILStub = pFunc->IsILStub();
bool fGiveDebuggerAndProfilerNotification = !fIsILStub;
BOOL fMethodCanHandleException = TRUE;
MethodDesc * pUserMDForILStub = NULL;
Frame * pILStubFrame = NULL;
if (fIsILStub)
pUserMDForILStub = GetUserMethodForILStub(pThread, currentSP, pFunc, &pILStubFrame);
// Let the profiler know that we are searching for a handler within this function instance
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionEnter(pFunc);
// The following debugger notification and AppDomain::FirstChanceNotification should be scoped together
// since the AD notification *must* follow immediately after the debugger's notification.
{
#ifdef DEBUGGING_SUPPORTED
//
// Go ahead and notify any debugger of this exception.
//
EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP);
if (CORDebuggerAttached() && pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
{
return SWA_ABORT;
}
#endif // DEBUGGING_SUPPORTED
// Attempt to deliver the first chance notification to the AD only *AFTER* the debugger
// has done that, provided we have not already done that.
if (!pExInfo->DeliveredFirstChanceNotification())
{
ExceptionNotifications::DeliverFirstChanceNotification();
}
}
IJitManager* pJitManager = pCf->GetJitManager();
_ASSERTE(pJitManager);
EH_CLAUSE_ENUMERATOR pEnumState;
unsigned EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState);
if (EHCount == 0)
{
// Inform the profiler that we're leaving, and what pass we're on
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
return SWA_CONTINUE;
}
TypeHandle thrownType = TypeHandle();
// if we are being called on an unwind for an exception that we did not try to catch, eg.
// an internal EE exception, then pThread->GetThrowable will be null
{
OBJECTREF throwable = pThread->GetThrowable();
if (throwable != NULL)
{
throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly());
thrownType = TypeHandle(throwable->GetMethodTable());
}
}
PREGDISPLAY regs = pCf->GetRegisterSet();
BYTE *pStack = (BYTE *) GetRegdisplaySP(regs);
#ifdef DEBUGGING_SUPPORTED
BYTE *pHandlerEBP = (BYTE *) GetRegdisplayFP(regs);
#endif
DWORD offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress();
STRESS_LOG1(LF_EH, LL_INFO10000, "COMPlusThrowCallback: offset is %d\n", offs);
EE_ILEXCEPTION_CLAUSE EHClause;
unsigned start_adjust, end_adjust;
start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted());
end_adjust = pCf->IsActiveFunc();
for(ULONG i=0; i < EHCount; i++)
{
pJitManager->GetNextEHClause(&pEnumState, &EHClause);
_ASSERTE(IsValidClause(&EHClause));
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: considering '%s' clause [%d,%d], ofs:%d\n",
(IsFault(&EHClause) ? "fault" : (
IsFinally(&EHClause) ? "finally" : (
IsFilterHandler(&EHClause) ? "filter" : (
IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
EHClause.TryStartPC,
EHClause.TryEndPC,
offs
);
// Checking the exception range is a bit tricky because
// on CPU faults (null pointer access, div 0, ..., the IP points
// to the faulting instruction, but on calls, the IP points
// to the next instruction.
// This means that we should not include the start point on calls
// as this would be a call just preceding the try block.
// Also, we should include the end point on calls, but not faults.
// If we're in the FILTER part of a filter clause, then we
// want to stop crawling. It's going to be caught in a
// EX_CATCH just above us. If not, the exception
if ( IsFilterHandler(&EHClause)
&& ( offs > EHClause.FilterOffset
|| (offs == EHClause.FilterOffset && !start_adjust) )
&& ( offs < EHClause.HandlerStartPC
|| (offs == EHClause.HandlerStartPC && !end_adjust) )) {
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n",
EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust);
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
return SWA_ABORT;
}
if ( (offs < EHClause.TryStartPC) ||
(offs > EHClause.TryEndPC) ||
(offs == EHClause.TryStartPC && start_adjust) ||
(offs == EHClause.TryEndPC && end_adjust))
continue;
BOOL typeMatch = FALSE;
BOOL isTypedHandler = IsTypedHandler(&EHClause);
if (isTypedHandler && !thrownType.IsNull())
{
if (EHClause.TypeHandle == (void*)(size_t)mdTypeRefNil)
{
// this is a catch(...)
typeMatch = TRUE;
}
else
{
TypeHandle exnType = pJitManager->ResolveEHClause(&EHClause,pCf);
// if doesn't have cached class then class wasn't loaded so couldn't have been thrown
typeMatch = !exnType.IsNull() && ExceptionIsOfRightType(exnType, thrownType);
}
}
// <TODO>@PERF: Is this too expensive? Consider storing the nesting level
// instead of the HandlerEndPC.</TODO>
// Determine the nesting level of EHClause. Just walk the table
// again, and find out how many handlers enclose it
DWORD nestingLevel = 0;
if (IsFaultOrFinally(&EHClause))
continue;
if (isTypedHandler)
{
LOG((LF_EH, LL_INFO100, "COMPlusThrowCallback: %s match for typed handler.\n", typeMatch?"Found":"Did not find"));
if (!typeMatch)
{
continue;
}
}
else
{
// Must be an exception filter (__except() part of __try{}__except(){}).
nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager,
pCf->GetMethodToken(),
EHClause.HandlerStartPC);
// We just need *any* address within the method. This will let the debugger
// resolve the EnC version of the method.
PCODE pMethodAddr = GetControlPC(regs);
if (fGiveDebuggerAndProfilerNotification)
EEToDebuggerExceptionInterfaceWrapper::ExceptionFilter(pFunc, pMethodAddr, EHClause.FilterOffset, pHandlerEBP);
UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress();
// save clause information in the exinfo
pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FILTER,
uStartAddress + EHClause.FilterOffset,
StackFrame((UINT_PTR)pHandlerEBP));
// Let the profiler know we are entering a filter
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterEnter(pFunc);
STRESS_LOG3(LF_EH, LL_INFO10, "COMPlusThrowCallback: calling filter code, EHClausePtr:%08x, Start:%08x, End:%08x\n",
&EHClause, EHClause.HandlerStartPC, EHClause.HandlerEndPC);
OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly());
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
int iFilt = COMPlusThrowCallbackHelper(pJitManager,
pCf,
pData,
&EHClause,
nestingLevel,
throwable,
pThread);
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
// Let the profiler know we are leaving a filter
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave();
pExInfo->m_EHClauseInfo.ResetInfo();
if (pThread->IsRudeAbortInitiated())
{
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
return SWA_CONTINUE;
}
// If this filter didn't want the exception, keep looking.
if (EXCEPTION_EXECUTE_HANDLER != iFilt)
continue;
}
// Record this location, to stop the unwind phase, later.
pData->pFunc = pFunc;
pData->dHandler = i;
pData->pStack = pStack;
// Notify the profiler that a catcher has been found
if (fGiveDebuggerAndProfilerNotification)
{
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchCatcherFound(pFunc);
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
}
#ifdef DEBUGGING_SUPPORTED
//
// Notify debugger that a catcher has been found.
//
if (fIsILStub)
{
EEToDebuggerExceptionInterfaceWrapper::NotifyOfCHFFilter(pExInfo->m_pExceptionPointers, pILStubFrame);
}
else
if (fGiveDebuggerAndProfilerNotification &&
CORDebuggerAttached() && !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
{
_ASSERTE(pData);
// We just need *any* address within the method. This will let the debugger
// resolve the EnC version of the method.
PCODE pMethodAddr = GetControlPC(regs);
EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedExceptionCatcherFound(pThread,
pData->pFunc, pMethodAddr,
(SIZE_T)pData->pStack,
&EHClause);
}
#endif // DEBUGGING_SUPPORTED
return SWA_ABORT;
}
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
return SWA_CONTINUE;
} // StackWalkAction COMPlusThrowCallback()
//==========================================================================
// COMPlusUnwindCallback
//==========================================================================
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning (disable : 4740) // There is inline asm code in this function, which disables
// global optimizations.
#pragma warning (disable : 4731)
#endif
StackWalkAction COMPlusUnwindCallback (CrawlFrame *pCf, ThrowCallbackType *pData)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE;
_ASSERTE(pData->bIsUnwind);
Frame *pFrame = pCf->GetFrame();
MethodDesc *pFunc = pCf->GetFunction();
#if defined(_DEBUG)
#define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>")
#else
#define METHODNAME(pFunc) "<n/a>"
#endif
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n",
pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame));
#undef METHODNAME
if (pFrame && pData->pTopFrame == pFrame)
/* Don't look past limiting frame if there is one */
return SWA_ABORT;
if (!pFunc)
return SWA_CONTINUE;
if (!pCf->IsFrameless())
return SWA_CONTINUE;
Thread *pThread = GetThread();
// If the thread is being RudeAbort, we will not run any finally
if (pThread->IsRudeAbortInitiated())
{
return SWA_CONTINUE;
}
IJitManager* pJitManager = pCf->GetJitManager();
_ASSERTE(pJitManager);
ExInfo *pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
PREGDISPLAY regs = pCf->GetRegisterSet();
BYTE *pStack = (BYTE *) GetRegdisplaySP(regs);
TypeHandle thrownType = TypeHandle();
#ifdef DEBUGGING_SUPPORTED
LOG((LF_EH, LL_INFO1000, "COMPlusUnwindCallback: Intercept %d, pData->pFunc 0x%X, pFunc 0x%X, pData->pStack 0x%X, pStack 0x%X\n",
pExInfo->m_ExceptionFlags.DebuggerInterceptInfo(),
pData->pFunc,
pFunc,
pData->pStack,
pStack));
//
// If the debugger wants to intercept this exception here, go do that.
//
if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo() && (pData->pFunc == pFunc) && (pData->pStack == pStack))
{
goto LDoDebuggerIntercept;
}
#endif
bool fGiveDebuggerAndProfilerNotification;
fGiveDebuggerAndProfilerNotification = !pFunc->IsILStub();
// Notify the profiler of the function we're dealing with in the unwind phase
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionEnter(pFunc);
EH_CLAUSE_ENUMERATOR pEnumState;
unsigned EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState);
if (EHCount == 0)
{
// Inform the profiler that we're leaving, and what pass we're on
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc);
return SWA_CONTINUE;
}
// if we are being called on an unwind for an exception that we did not try to catch, eg.
// an internal EE exception, then pThread->GetThrowable will be null
{
OBJECTREF throwable = pThread->GetThrowable();
if (throwable != NULL)
{
throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly());
thrownType = TypeHandle(throwable->GetMethodTable());
}
}
#ifdef DEBUGGING_SUPPORTED
BYTE *pHandlerEBP;
pHandlerEBP = (BYTE *) GetRegdisplayFP(regs);
#endif
DWORD offs;
offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress();
LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: current EIP offset in method 0x%x, \n", offs));
EE_ILEXCEPTION_CLAUSE EHClause;
unsigned start_adjust, end_adjust;
start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted());
end_adjust = pCf->IsActiveFunc();
for(ULONG i=0; i < EHCount; i++)
{
pJitManager->GetNextEHClause(&pEnumState, &EHClause);
_ASSERTE(IsValidClause(&EHClause));
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: considering '%s' clause [%d,%d], offs:%d\n",
(IsFault(&EHClause) ? "fault" : (
IsFinally(&EHClause) ? "finally" : (
IsFilterHandler(&EHClause) ? "filter" : (
IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
EHClause.TryStartPC,
EHClause.TryEndPC,
offs
);
// Checking the exception range is a bit tricky because
// on CPU faults (null pointer access, div 0, ..., the IP points
// to the faulting instruction, but on calls, the IP points
// to the next instruction.
// This means that we should not include the start point on calls
// as this would be a call just preceding the try block.
// Also, we should include the end point on calls, but not faults.
if ( IsFilterHandler(&EHClause)
&& ( offs > EHClause.FilterOffset
|| (offs == EHClause.FilterOffset && !start_adjust) )
&& ( offs < EHClause.HandlerStartPC
|| (offs == EHClause.HandlerStartPC && !end_adjust) )
) {
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n",
EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust);
// Make the filter as done. See comment in CallJitEHFilter
// on why we have to do it here.
Frame* pFilterFrame = pThread->GetFrame();
_ASSERTE(pFilterFrame->GetVTablePtr() == ExceptionFilterFrame::GetMethodFrameVPtr());
((ExceptionFilterFrame*)pFilterFrame)->SetFilterDone();
// Inform the profiler that we're leaving, and what pass we're on
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc);
return SWA_ABORT;
}
if ( (offs < EHClause.TryStartPC) ||
(offs > EHClause.TryEndPC) ||
(offs == EHClause.TryStartPC && start_adjust) ||
(offs == EHClause.TryEndPC && end_adjust))
continue;
// <TODO>@PERF : Is this too expensive? Consider storing the nesting level
// instead of the HandlerEndPC.</TODO>
// Determine the nesting level of EHClause. Just walk the table
// again, and find out how many handlers enclose it
DWORD nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager,
pCf->GetMethodToken(),
EHClause.HandlerStartPC);
// We just need *any* address within the method. This will let the debugger
// resolve the EnC version of the method.
PCODE pMethodAddr = GetControlPC(regs);
UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress();
if (IsFaultOrFinally(&EHClause))
{
if (fGiveDebuggerAndProfilerNotification)
EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP);
pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FINALLY,
uStartAddress + EHClause.HandlerStartPC,
StackFrame((UINT_PTR)pHandlerEBP));
// Notify the profiler that we are about to execute the finally code
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyEnter(pFunc);
LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally clause [%d,%d] - call\n", EHClause.TryStartPC, EHClause.TryEndPC));
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
::CallJitEHFinally(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel);
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally - returned\n"));
// Notify the profiler that we are done with the finally code
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyLeave();
pExInfo->m_EHClauseInfo.ResetInfo();
continue;
}
// Current is not a finally, check if it's the catching handler (or filter).
if (pData->pFunc != pFunc || (ULONG)(pData->dHandler) != i || pData->pStack != pStack)
{
continue;
}
#ifdef _DEBUG
gLastResumedExceptionFunc = pCf->GetFunction();
gLastResumedExceptionHandler = i;
#endif
// save clause information in the exinfo
pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_CATCH,
uStartAddress + EHClause.HandlerStartPC,
StackFrame((UINT_PTR)pHandlerEBP));
// Notify the profiler that we are about to resume at the catcher.
if (fGiveDebuggerAndProfilerNotification)
{
DACNotify::DoExceptionCatcherEnterNotification(pFunc, EHClause.HandlerStartPC);
EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherEnter(pThread, pFunc);
EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP);
}
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: offset 0x%x matches clause [0x%x, 0x%x) matches in method %pM\n",
offs, EHClause.TryStartPC, EHClause.TryEndPC, pFunc);
// ResumeAtJitEH will set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = TRUE; at the appropriate time
::ResumeAtJitEH(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel, pThread, pData->bUnwindStack);
//UNREACHABLE_MSG("ResumeAtJitEH shouldn't have returned!");
// we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here,
// that happens when the catch clause calls back to COMPlusEndCatch
}
STRESS_LOG1(LF_EH, LL_INFO100, "COMPlusUnwindCallback: no handler found in method %pM\n", pFunc);
if (fGiveDebuggerAndProfilerNotification)
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc);
return SWA_CONTINUE;
#ifdef DEBUGGING_SUPPORTED
LDoDebuggerIntercept:
STRESS_LOG1(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Intercepting in method %pM\n", pFunc);
//
// Setup up the easy parts of the context to restart at.
//
EHContext context;
//
// Note: EAX ECX EDX are scratch
//
context.Esp = (DWORD)(size_t)(GetRegdisplaySP(regs));
context.Ebx = *regs->pEbx;
context.Esi = *regs->pEsi;
context.Edi = *regs->pEdi;
context.Ebp = *regs->pEbp;
//
// Set scratch registers to 0 to avoid reporting incorrect values to GC in case of debugger changing the IP
// in the middle of a scratch register lifetime (see Dev10 754922)
//
context.Eax = 0;
context.Ecx = 0;
context.Edx = 0;
//
// Ok, now set the target Eip to the address the debugger requested.
//
ULONG_PTR nativeOffset;
pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL, NULL, NULL, NULL, &nativeOffset, NULL);
context.Eip = GetControlPC(regs) - (pCf->GetRelOffset() - nativeOffset);
//
// Finally we need to get the correct Esp for this nested level
//
context.Esp = pCf->GetCodeManager()->GetAmbientSP(regs,
pCf->GetCodeInfo(),
nativeOffset,
pData->dHandler,
pCf->GetCodeManState()
);
//
// In case we see unknown FS:[0] handlers we delay the interception point until we reach the handler that protects the interception point.
// This way we have both FS:[0] handlers being poped up by RtlUnwind and managed capital F Frames being unwinded by managed stackwalker.
//
BOOL fCheckForUnknownHandler = TRUE;
if (PopNestedExceptionRecords((LPVOID)(size_t)context.Esp, fCheckForUnknownHandler))
{
// Let ClrDebuggerDoUnwindAndIntercept RtlUnwind continue to unwind frames until we reach the handler protected by COMPlusNestedExceptionHandler.
pExInfo->m_InterceptionContext = context;
pExInfo->m_ValidInterceptionContext = TRUE;
STRESS_LOG0(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Skip interception until unwinding reaches the actual handler protected by COMPlusNestedExceptionHandler\n");
}
else
{
//
// Pop off all the Exception information up to this point in the stack
//
UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context);
}
return SWA_ABORT;
#endif // DEBUGGING_SUPPORTED
} // StackWalkAction COMPlusUnwindCallback ()
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning (disable : 4740) // There is inline asm code in this function, which disables
// global optimizations.
#pragma warning (disable : 4731)
#endif
void ResumeAtJitEH(CrawlFrame* pCf,
BYTE* startPC,
EE_ILEXCEPTION_CLAUSE *EHClausePtr,
DWORD nestingLevel,
Thread *pThread,
BOOL unwindStack)
{
// No dynamic contract here because this function doesn't return and destructors wouldn't be executed
WRAPPER_NO_CONTRACT;
EHContext context;
context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet());
size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler
size_t * pHandlerEnd = NULL;
OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly());
pCf->GetCodeManager()->FixContext(ICodeManager::CATCH_CONTEXT,
&context,
pCf->GetCodeInfo(),
EHClausePtr->HandlerStartPC,
nestingLevel,
throwable,
pCf->GetCodeManState(),
&pShadowSP,
&pHandlerEnd);
if (pHandlerEnd)
{
*pHandlerEnd = EHClausePtr->HandlerEndPC;
}
MethodDesc* pMethodDesc = pCf->GetCodeInfo()->GetMethodDesc();
TADDR startAddress = pCf->GetCodeInfo()->GetStartAddress();
if (InlinedCallFrame::FrameHasActiveCall(pThread->m_pFrame))
{
// When unwinding an exception in ReadyToRun, the JIT_PInvokeEnd helper which unlinks the ICF from
// the thread will be skipped. This is because unlike jitted code, each pinvoke is wrapped by calls
// to the JIT_PInvokeBegin and JIT_PInvokeEnd helpers, which push and pop the ICF on the thread. The
// ICF is not linked at the method prolog and unlinked at the epilog when running R2R code. Since the
// JIT_PInvokeEnd helper will be skipped, we need to unlink the ICF here. If the executing method
// has another pinvoke, it will re-link the ICF again when the JIT_PInvokeBegin helper is called.
// Check that the InlinedCallFrame is in the method with the exception handler. There can be other
// InlinedCallFrame somewhere up the call chain that is not related to the current exception
// handling.
#ifdef DEBUG
TADDR handlerFrameSP = pCf->GetRegisterSet()->SP;
#endif // DEBUG
// Find the ESP of the caller of the method with the exception handler.
bool unwindSuccess = pCf->GetCodeManager()->UnwindStackFrame(pCf->GetRegisterSet(),
pCf->GetCodeInfo(),
pCf->GetCodeManagerFlags(),
pCf->GetCodeManState(),
NULL /* StackwalkCacheUnwindInfo* */);
_ASSERTE(unwindSuccess);
if (((TADDR)pThread->m_pFrame < pCf->GetRegisterSet()->SP) && ExecutionManager::IsReadyToRunCode(((InlinedCallFrame*)pThread->m_pFrame)->m_pCallerReturnAddress))
{
_ASSERTE((TADDR)pThread->m_pFrame >= handlerFrameSP);
pThread->m_pFrame->Pop(pThread);
}
}
// save esp so that endcatch can restore it (it always restores, so want correct value)
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
pExInfo->m_dEsp = (LPVOID)context.GetSP();
LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: current m_dEsp set to %p\n", context.GetSP()));
PVOID dEsp = GetCurrentSP();
if (!unwindStack)
{
// If we don't want to unwind the stack, then the guard page had better not be gone!
_ASSERTE(pThread->DetermineIfGuardPagePresent());
// so down below won't really update esp
context.SetSP(dEsp);
pExInfo->m_pShadowSP = pShadowSP; // so that endcatch can zero it back
if (pShadowSP)
{
*pShadowSP = (size_t)dEsp;
}
}
else
{
// so shadow SP has the real SP as we are going to unwind the stack
dEsp = (LPVOID)context.GetSP();
// BEGIN: pExInfo->UnwindExInfo(dEsp);
ExInfo *pPrevNestedInfo = pExInfo->m_pPrevNestedInfo;
while (pPrevNestedInfo && pPrevNestedInfo->m_StackAddress < dEsp)
{
LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: popping nested ExInfo at 0x%p\n", pPrevNestedInfo->m_StackAddress));
pPrevNestedInfo->DestroyExceptionHandle();
pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace();
#ifdef DEBUGGING_SUPPORTED
if (g_pDebugInterface != NULL)
{
g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext());
}
#endif // DEBUGGING_SUPPORTED
pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo;
}
pExInfo->m_pPrevNestedInfo = pPrevNestedInfo;
_ASSERTE(pExInfo->m_pPrevNestedInfo == 0 || pExInfo->m_pPrevNestedInfo->m_StackAddress >= dEsp);
// Before we unwind the SEH records, get the Frame from the top-most nested exception record.
Frame* pNestedFrame = GetCurrFrame(FindNestedEstablisherFrame(GetCurrentSEHRecord()));
PopNestedExceptionRecords((LPVOID)(size_t)dEsp);
EXCEPTION_REGISTRATION_RECORD* pNewBottomMostHandler = GetCurrentSEHRecord();
pExInfo->m_pShadowSP = pShadowSP;
// The context and exception record are no longer any good.
_ASSERTE(pExInfo->m_pContext < dEsp); // It must be off the top of the stack.
pExInfo->m_pContext = 0; // Whack it.
pExInfo->m_pExceptionRecord = 0;
pExInfo->m_pExceptionPointers = 0;
// We're going to put one nested record back on the stack before we resume. This is
// where it goes.
NestedHandlerExRecord *pNestedHandlerExRecord = (NestedHandlerExRecord*)((BYTE*)dEsp - ALIGN_UP(sizeof(NestedHandlerExRecord), STACK_ALIGN_SIZE));
// The point of no return. The next statement starts scribbling on the stack. It's
// deep enough that we won't hit our own locals. (That's important, 'cuz we're still
// using them.)
//
_ASSERTE(dEsp > &pCf);
pNestedHandlerExRecord->m_handlerInfo.m_hThrowable=NULL; // This is random memory. Handle
// must be initialized to null before
// calling Init(), as Init() will try
// to free any old handle.
pNestedHandlerExRecord->Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, pNestedFrame);
INSTALL_EXCEPTION_HANDLING_RECORD(&(pNestedHandlerExRecord->m_ExReg));
context.SetSP(pNestedHandlerExRecord);
// We might have moved the bottommost handler. The nested record itself is never
// the bottom most handler -- it's pushed after the fact. So we have to make the
// bottom-most handler the one BEFORE the nested record.
if (pExInfo->m_pBottomMostHandler < pNewBottomMostHandler)
{
STRESS_LOG3(LF_EH, LL_INFO10000, "ResumeAtJitEH: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
pExInfo, pExInfo->m_pBottomMostHandler, pNewBottomMostHandler);
pExInfo->m_pBottomMostHandler = pNewBottomMostHandler;
}
if (pShadowSP)
{
*pShadowSP = context.GetSP();
}
}
STRESS_LOG3(LF_EH, LL_INFO100, "ResumeAtJitEH: resuming at EIP = %p ESP = %p EBP = %p\n",
context.Eip, context.GetSP(), context.GetFP());
#ifdef STACK_GUARDS_DEBUG
// We are transitioning back to managed code, so ensure that we are in
// SO-tolerant mode before we do so.
RestoreSOToleranceState();
#endif
// we want this to happen as late as possible but certainly after the notification
// that the handle for the current ExInfo has been freed has been delivered
pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
ETW::ExceptionLog::ExceptionCatchBegin(pMethodDesc, (PVOID)startAddress);
ResumeAtJitEHHelper(&context);
UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!");
// we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here,
// that happens when the catch clause calls back to COMPlusEndCatch
// we don't return to this point so it would be moot (see unreachable_msg above)
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
// Must be in a separate function because INSTALL_COMPLUS_EXCEPTION_HANDLER has a filter
int CallJitEHFilterWorker(size_t *pShadowSP, EHContext *pContext)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
int retVal = EXCEPTION_CONTINUE_SEARCH;
BEGIN_CALL_TO_MANAGED();
retVal = CallJitEHFilterHelper(pShadowSP, pContext);
END_CALL_TO_MANAGED();
return retVal;
}
int CallJitEHFilter(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, OBJECTREF thrownObj)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
int retVal = EXCEPTION_CONTINUE_SEARCH;
size_t * pShadowSP = NULL;
EHContext context;
context.Setup(PCODE(startPC + EHClausePtr->FilterOffset), pCf->GetRegisterSet());
size_t * pEndFilter = NULL; // Write
pCf->GetCodeManager()->FixContext(ICodeManager::FILTER_CONTEXT, &context, pCf->GetCodeInfo(),
EHClausePtr->FilterOffset, nestingLevel, thrownObj, pCf->GetCodeManState(),
&pShadowSP, &pEndFilter);
// End of the filter is the same as start of handler
if (pEndFilter)
{
*pEndFilter = EHClausePtr->HandlerStartPC;
}
// ExceptionFilterFrame serves two purposes:
//
// 1. It serves as a frame that stops the managed search for handler
// if we fault in the filter. ThrowCallbackType.pTopFrame is going point
// to this frame during search for exception handler inside filter.
// The search for handler needs a frame to stop. If we had no frame here,
// the exceptions in filters would not be swallowed correctly since we would
// walk past the EX_TRY/EX_CATCH block in COMPlusThrowCallbackHelper.
//
// 2. It allows setting of SHADOW_SP_FILTER_DONE flag in UnwindFrames()
// if we fault in the filter. We have to set this flag together with unwinding
// of the filter frame. Using a regular C++ holder to clear this flag here would cause
// GC holes. The stack would be in inconsistent state when we trigger gc just before
// returning from UnwindFrames.
FrameWithCookie<ExceptionFilterFrame> exceptionFilterFrame(pShadowSP);
ETW::ExceptionLog::ExceptionFilterBegin(pCf->GetCodeInfo()->GetMethodDesc(), (PVOID)pCf->GetCodeInfo()->GetStartAddress());
retVal = CallJitEHFilterWorker(pShadowSP, &context);
ETW::ExceptionLog::ExceptionFilterEnd();
exceptionFilterFrame.Pop();
return retVal;
}
void CallJitEHFinally(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel)
{
WRAPPER_NO_CONTRACT;
EHContext context;
context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet());
size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler
size_t * pFinallyEnd = NULL;
pCf->GetCodeManager()->FixContext(
ICodeManager::FINALLY_CONTEXT, &context, pCf->GetCodeInfo(),
EHClausePtr->HandlerStartPC, nestingLevel, ObjectToOBJECTREF((Object *) NULL), pCf->GetCodeManState(),
&pShadowSP, &pFinallyEnd);
if (pFinallyEnd)
{
*pFinallyEnd = EHClausePtr->HandlerEndPC;
}
ETW::ExceptionLog::ExceptionFinallyBegin(pCf->GetCodeInfo()->GetMethodDesc(), (PVOID)pCf->GetCodeInfo()->GetStartAddress());
CallJitEHFinallyHelper(pShadowSP, &context);
ETW::ExceptionLog::ExceptionFinallyEnd();
//
// Update the registers using new context
//
// This is necessary to reflect GC pointer changes during the middle of a unwind inside a
// finally clause, because:
// 1. GC won't see the part of stack inside try (which has thrown an exception) that is already
// unwinded and thus GC won't update GC pointers for this portion of the stack, but rather the
// call stack in finally.
// 2. upon return of finally, the unwind process continues and unwinds stack based on the part
// of stack inside try and won't see the updated values in finally.
// As a result, we need to manually update the context using register values upon return of finally
//
// Note that we only update the registers for finally clause because
// 1. For filter handlers, stack walker is able to see the whole stack (including the try part)
// with the help of ExceptionFilterFrame as filter handlers are called in first pass
// 2. For catch handlers, the current unwinding is already finished
//
context.UpdateFrame(pCf->GetRegisterSet());
// This does not need to be guarded by a holder because the frame is dead if an exception gets thrown. Filters are different
// since they are run in the first pass, so we must update the shadowSP reset in CallJitEHFilter.
if (pShadowSP) {
*pShadowSP = 0; // reset the shadowSP to 0
}
}
#if defined(_MSC_VER)
#pragma warning (default : 4731)
#endif
//=====================================================================
// *********************************************************************
BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
{
LIMITED_METHOD_CONTRACT;
return ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandler || (LPVOID)pEHR->Handler == (LPVOID)COMPlusNestedExceptionHandler);
}
//
//-------------------------------------------------------------------------
// This is installed when we call COMPlusFrameHandler to provide a bound to
// determine when are within a nested exception
//-------------------------------------------------------------------------
EXCEPTION_HANDLER_IMPL(COMPlusNestedExceptionHandler)
{
WRAPPER_NO_CONTRACT;
if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
{
LOG((LF_EH, LL_INFO100, " COMPlusNestedHandler(unwind) with %x at %x\n", pExceptionRecord->ExceptionCode,
pContext ? GetIP(pContext) : 0));
// We're unwinding past a nested exception record, which means that we've thrown
// a new exception out of a region in which we're handling a previous one. The
// previous exception is overridden -- and needs to be unwound.
// The preceding is ALMOST true. There is one more case, where we use setjmp/longjmp
// from withing a nested handler. We won't have a nested exception in that case -- just
// the unwind.
Thread* pThread = GetThread();
ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
ExInfo* pPrevNestedInfo = pExInfo->m_pPrevNestedInfo;
if (pPrevNestedInfo == &((NestedHandlerExRecord*)pEstablisherFrame)->m_handlerInfo)
{
_ASSERTE(pPrevNestedInfo);
LOG((LF_EH, LL_INFO100, "COMPlusNestedExceptionHandler: PopExInfo(): popping nested ExInfo at 0x%p\n", pPrevNestedInfo));
pPrevNestedInfo->DestroyExceptionHandle();
pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace();
#ifdef DEBUGGING_SUPPORTED
if (g_pDebugInterface != NULL)
{
g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext());
}
#endif // DEBUGGING_SUPPORTED
pExInfo->m_pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo;
} else {
// The whacky setjmp/longjmp case. Nothing to do.
}
} else {
LOG((LF_EH, LL_INFO100, " InCOMPlusNestedHandler with %x at %x\n", pExceptionRecord->ExceptionCode,
pContext ? GetIP(pContext) : 0));
}
// There is a nasty "gotcha" in the way exception unwinding, finally's, and nested exceptions
// interact. Here's the scenario ... it involves two exceptions, one normal one, and one
// raised in a finally.
//
// The first exception occurs, and is caught by some handler way up the stack. That handler
// calls RtlUnwind -- and handlers that didn't catch this first exception are called again, with
// the UNWIND flag set. If, one of the handlers throws an exception during
// unwind (like, a throw from a finally) -- then that same handler is not called during
// the unwind pass of the second exception. [ASIDE: It is called on first-pass.]
//
// What that means is -- the COMPlusExceptionHandler, can't count on unwinding itself correctly
// if an exception is thrown from a finally. Instead, it relies on the NestedExceptionHandler
// that it pushes for this.
//
EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
LOG((LF_EH, LL_INFO100, "Leaving COMPlusNestedExceptionHandler with %d\n", retval));
return retval;
}
EXCEPTION_REGISTRATION_RECORD *FindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
{
LIMITED_METHOD_CONTRACT;
while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) {
pEstablisherFrame = pEstablisherFrame->Next;
_ASSERTE(pEstablisherFrame != EXCEPTION_CHAIN_END); // should always find one
}
return pEstablisherFrame;
}
EXCEPTION_HANDLER_IMPL(FastNExportExceptHandler)
{
WRAPPER_NO_CONTRACT;
// Most of our logic is in commin with COMPlusFrameHandler.
EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
#ifdef _DEBUG
// If the exception is escaping the last CLR personality routine on the stack,
// then state a flag on the thread to indicate so.
if (retval == ExceptionContinueSearch)
{
SetReversePInvokeEscapingUnhandledExceptionStatus(IS_UNWINDING(pExceptionRecord->ExceptionFlags), pEstablisherFrame);
}
#endif // _DEBUG
return retval;
}
#ifdef FEATURE_COMINTEROP
// The reverse COM interop path needs to be sure to pop the ComMethodFrame that is pushed, but we do not want
// to have an additional FS:0 handler between the COM callsite and the call into managed. So we push this
// FS:0 handler, which will defer to the usual COMPlusFrameHandler and then perform the cleanup of the
// ComMethodFrame, if needed.
EXCEPTION_HANDLER_IMPL(COMPlusFrameHandlerRevCom)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
// Defer to COMPlusFrameHandler
EXCEPTION_DISPOSITION result = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
{
// Do cleanup as needed
ComMethodFrame::DoSecondPassHandlerCleanup(GetCurrFrame(pEstablisherFrame));
}
return result;
}
#endif // FEATURE_COMINTEROP
#endif // !DACCESS_COMPILE
#endif // !FEATURE_EH_FUNCLETS
PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext)
{
LIMITED_METHOD_DAC_CONTRACT;
UINT_PTR stackSlot = pContext->Ebp + REDIRECTSTUB_EBP_OFFSET_CONTEXT;
PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot);
return *ppContext;
}
#ifndef DACCESS_COMPILE
LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
{
#ifndef FEATURE_EH_FUNCLETS
WRAPPER_NO_CONTRACT;
STATIC_CONTRACT_ENTRY_POINT;
LONG result = EXCEPTION_CONTINUE_SEARCH;
// This function can be called during the handling of a SO
//BEGIN_ENTRYPOINT_VOIDRET;
result = CLRVectoredExceptionHandler(pExceptionInfo);
if (EXCEPTION_EXECUTE_HANDLER == result)
{
result = EXCEPTION_CONTINUE_SEARCH;
}
//END_ENTRYPOINT_VOIDRET;
return result;
#else // !FEATURE_EH_FUNCLETS
return EXCEPTION_CONTINUE_SEARCH;
#endif // !FEATURE_EH_FUNCLETS
}
// Returns TRUE if caller should resume execution.
BOOL
AdjustContextForVirtualStub(
EXCEPTION_RECORD *pExceptionRecord,
CONTEXT *pContext)
{
LIMITED_METHOD_CONTRACT;
Thread * pThread = GetThreadNULLOk();
// We may not have a managed thread object. Example is an AV on the helper thread.
// (perhaps during StubManager::IsStub)
if (pThread == NULL)
{
return FALSE;
}
PCODE f_IP = GetIP(pContext);
VirtualCallStubManager::StubKind sk;
VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(f_IP, &sk);
if (sk == VirtualCallStubManager::SK_DISPATCH)
{
if (*PTR_WORD(f_IP) != X86_INSTR_CMP_IND_ECX_IMM32)
{
_ASSERTE(!"AV in DispatchStub at unknown instruction");
return FALSE;
}
}
else
if (sk == VirtualCallStubManager::SK_RESOLVE)
{
if (*PTR_WORD(f_IP) != X86_INSTR_MOV_EAX_ECX_IND)
{
_ASSERTE(!"AV in ResolveStub at unknown instruction");
return FALSE;
}
SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(GetSP(pContext)) + sizeof(void*))); // rollback push eax
}
else
{
return FALSE;
}
PCODE callsite = *dac_cast<PTR_PCODE>(GetSP(pContext));
if (pExceptionRecord != NULL)
{
pExceptionRecord->ExceptionAddress = (PVOID)callsite;
}
SetIP(pContext, callsite);
#if defined(GCCOVER_TOLERATE_SPURIOUS_AV)
// Modify LastAVAddress saved in thread to distinguish between fake & real AV
// See comments in IsGcMarker in file excep.cpp for more details
pThread->SetLastAVAddress((LPVOID)GetIP(pContext));
#endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV)
// put ESP back to what it was before the call.
TADDR sp = GetSP(pContext) + sizeof(void*);
#ifndef UNIX_X86_ABI
// set the ESP to what it would be after the call (remove pushed arguments)
size_t stackArgumentsSize;
if (sk == VirtualCallStubManager::SK_DISPATCH)
{
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
DispatchHolder *holder = DispatchHolder::FromDispatchEntry(f_IP);
MethodTable *pMT = (MethodTable*)holder->stub()->expectedMT();
DispatchToken token(VirtualCallStubManager::GetTokenFromStubQuick(pMgr, f_IP, sk));
MethodDesc* pMD = VirtualCallStubManager::GetRepresentativeMethodDescFromToken(token, pMT);
stackArgumentsSize = pMD->SizeOfArgStack();
}
else
{
// Compute the stub entry address from the address of failure (location of dereferencing of "this" pointer)
ResolveHolder *holder = ResolveHolder::FromResolveEntry(f_IP - ResolveStub::offsetOfThisDeref());
stackArgumentsSize = holder->stub()->stackArgumentsSize();
}
sp += stackArgumentsSize;
#endif // UNIX_X86_ABI
SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(sp)));
return TRUE;
}
#endif // !DACCESS_COMPILE
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/public/mono/metadata/metadata.h | /**
* \file
*/
#ifndef __MONO_METADATA_H__
#define __MONO_METADATA_H__
#include <mono/metadata/details/metadata-types.h>
MONO_BEGIN_DECLS
#define MONO_TYPE_ISSTRUCT(t) mono_type_is_struct (t)
#define MONO_TYPE_IS_VOID(t) mono_type_is_void (t)
#define MONO_TYPE_IS_POINTER(t) mono_type_is_pointer (t)
#define MONO_TYPE_IS_REFERENCE(t) mono_type_is_reference (t)
#define MONO_CLASS_IS_INTERFACE(c) ((mono_class_get_flags (c) & TYPE_ATTRIBUTE_INTERFACE) || mono_type_is_generic_parameter (mono_class_get_type (c)))
#define MONO_CLASS_IS_IMPORT(c) ((mono_class_get_flags (c) & TYPE_ATTRIBUTE_IMPORT))
/*
* This macro is used to extract the size of the table encoded in
* the size_bitfield of MonoTableInfo.
*/
#define mono_metadata_table_size(bitfield,table) ((((bitfield) >> ((table)*2)) & 0x3) + 1)
#define mono_metadata_table_count(bitfield) ((bitfield) >> 24)
#define MONO_OFFSET_IN_CLAUSE(clause,offset) \
((clause)->try_offset <= (offset) && (offset) < ((clause)->try_offset + (clause)->try_len))
#define MONO_OFFSET_IN_HANDLER(clause,offset) \
((clause)->handler_offset <= (offset) && (offset) < ((clause)->handler_offset + (clause)->handler_len))
#define MONO_OFFSET_IN_FILTER(clause,offset) \
((clause)->flags == MONO_EXCEPTION_CLAUSE_FILTER && (clause)->data.filter_offset <= (offset) && (offset) < ((clause)->handler_offset))
/*
* Makes a token based on a table and an index
*/
#define mono_metadata_make_token(table,idx) (((table) << 24)| (idx))
/*
* Returns the table index that this token encodes.
*/
#define mono_metadata_token_table(token) ((token) >> 24)
/*
* Returns the index that a token refers to
*/
#define mono_metadata_token_index(token) ((token) & 0xffffff)
#define mono_metadata_token_code(token) ((token) & 0xff000000)
#define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args;
#include <mono/metadata/details/metadata-functions.h>
#undef MONO_API_FUNCTION
MONO_END_DECLS
#endif /* __MONO_METADATA_H__ */
| /**
* \file
*/
#ifndef __MONO_METADATA_H__
#define __MONO_METADATA_H__
#include <mono/metadata/details/metadata-types.h>
MONO_BEGIN_DECLS
#define MONO_TYPE_ISSTRUCT(t) mono_type_is_struct (t)
#define MONO_TYPE_IS_VOID(t) mono_type_is_void (t)
#define MONO_TYPE_IS_POINTER(t) mono_type_is_pointer (t)
#define MONO_TYPE_IS_REFERENCE(t) mono_type_is_reference (t)
#define MONO_CLASS_IS_INTERFACE(c) ((mono_class_get_flags (c) & TYPE_ATTRIBUTE_INTERFACE) || mono_type_is_generic_parameter (mono_class_get_type (c)))
#define MONO_CLASS_IS_IMPORT(c) ((mono_class_get_flags (c) & TYPE_ATTRIBUTE_IMPORT))
/*
* This macro is used to extract the size of the table encoded in
* the size_bitfield of MonoTableInfo.
*/
#define mono_metadata_table_size(bitfield,table) ((((bitfield) >> ((table)*2)) & 0x3) + 1)
#define mono_metadata_table_count(bitfield) ((bitfield) >> 24)
#define MONO_OFFSET_IN_CLAUSE(clause,offset) \
((clause)->try_offset <= (offset) && (offset) < ((clause)->try_offset + (clause)->try_len))
#define MONO_OFFSET_IN_HANDLER(clause,offset) \
((clause)->handler_offset <= (offset) && (offset) < ((clause)->handler_offset + (clause)->handler_len))
#define MONO_OFFSET_IN_FILTER(clause,offset) \
((clause)->flags == MONO_EXCEPTION_CLAUSE_FILTER && (clause)->data.filter_offset <= (offset) && (offset) < ((clause)->handler_offset))
/*
* Makes a token based on a table and an index
*/
#define mono_metadata_make_token(table,idx) (((table) << 24)| (idx))
/*
* Returns the table index that this token encodes.
*/
#define mono_metadata_token_table(token) ((token) >> 24)
/*
* Returns the index that a token refers to
*/
#define mono_metadata_token_index(token) ((token) & 0xffffff)
#define mono_metadata_token_code(token) ((token) & 0xff000000)
#define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args;
#include <mono/metadata/details/metadata-functions.h>
#undef MONO_API_FUNCTION
MONO_END_DECLS
#endif /* __MONO_METADATA_H__ */
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/c_runtime/swprintf/test13/test13.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test13.c
**
** Purpose: Tests swprintf with hex numbers (uppercase)
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swprintf.h"
/*
* Uses memcmp & wcslen
*/
PALTEST(c_runtime_swprintf_test13_paltest_swprintf_test13, "c_runtime/swprintf/test13/paltest_swprintf_test13")
{
int neg = -42;
int pos = 0x1234ab;
INT64 l = I64(0x1234567887654321);
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
DoNumTest(convert("foo %X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %lX"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %hX"), pos, convert("foo 34AB"));
DoNumTest(convert("foo %LX"), pos, convert("foo 1234AB"));
DoI64Test(convert("foo %I64X"), l, "0X1234567887654321",
convert("foo 1234567887654321"));
DoNumTest(convert("foo %7X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %-7X"), pos, convert("foo 1234AB "));
DoNumTest(convert("foo %.1X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %.7X"), pos, convert("foo 01234AB"));
DoNumTest(convert("foo %07X"), pos, convert("foo 01234AB"));
DoNumTest(convert("foo %#X"), pos, convert("foo 0X1234AB"));
DoNumTest(convert("foo %+X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo % X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %+X"), neg, convert("foo FFFFFFD6"));
DoNumTest(convert("foo % X"), neg, convert("foo FFFFFFD6"));
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test13.c
**
** Purpose: Tests swprintf with hex numbers (uppercase)
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swprintf.h"
/*
* Uses memcmp & wcslen
*/
PALTEST(c_runtime_swprintf_test13_paltest_swprintf_test13, "c_runtime/swprintf/test13/paltest_swprintf_test13")
{
int neg = -42;
int pos = 0x1234ab;
INT64 l = I64(0x1234567887654321);
if (PAL_Initialize(argc, argv) != 0)
{
return FAIL;
}
DoNumTest(convert("foo %X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %lX"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %hX"), pos, convert("foo 34AB"));
DoNumTest(convert("foo %LX"), pos, convert("foo 1234AB"));
DoI64Test(convert("foo %I64X"), l, "0X1234567887654321",
convert("foo 1234567887654321"));
DoNumTest(convert("foo %7X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %-7X"), pos, convert("foo 1234AB "));
DoNumTest(convert("foo %.1X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %.7X"), pos, convert("foo 01234AB"));
DoNumTest(convert("foo %07X"), pos, convert("foo 01234AB"));
DoNumTest(convert("foo %#X"), pos, convert("foo 0X1234AB"));
DoNumTest(convert("foo %+X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo % X"), pos, convert("foo 1234AB"));
DoNumTest(convert("foo %+X"), neg, convert("foo FFFFFFD6"));
DoNumTest(convert("foo % X"), neg, convert("foo FFFFFFD6"));
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/oft16.txt | <?xml version="1.0" encoding="utf-8"?>Hello, world! | <?xml version="1.0" encoding="utf-8"?>Hello, world! | -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/composite/object_management/mutex/nonshared/main.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source Code: main.c and mutex.c
** main.c creates process and waits for all processes to get over
** mutex.c creates a mutex and then calls threads which will contend for the mutex
**
** This test is for Object Management Test case for Mutex where Object type is not shareable.
** Algorithm
** o Create PROCESS_COUNT processes.
** o Main Thread of each process creates OBJECT_TYPE Object
**
** Author: ShamitP
**============================================================
*/
#include <palsuite.h>
#include "resulttime.h"
/* Test Input Variables */
unsigned int PROCESS_COUNT = 2;
unsigned int THREAD_COUNT = 20;
unsigned int REPEAT_COUNT = 4000;
unsigned int RELATION_ID = 1001;
struct TestStats{
DWORD operationTime;
unsigned int relationId;
unsigned int processCount;
unsigned int threadCount;
unsigned int repeatCount;
char* buildNumber;
};
int GetParameters( int argc, char **argv)
{
if( (argc != 5) || ((argc == 1) && !strcmp(argv[1],"/?"))
|| !strcmp(argv[1],"/h") || !strcmp(argv[1],"/H"))
{
printf("PAL -Composite Object Management Mutex Test\n");
printf("Usage:\n");
printf("main\n\t[PROCESS_COUNT [greater than 1] \n");
printf("\t[THREAD_COUNT [greater than 1] \n");
printf("\t[REPEAT_COUNT [greater than 1]\n");
printf("\t[RELATION_ID [greater than 1]\n");
return -1;
}
PROCESS_COUNT = atoi(argv[1]);
if( (PROCESS_COUNT < 1) || (PROCESS_COUNT > MAXIMUM_WAIT_OBJECTS) )
{
printf("\nMain Process:Invalid PROCESS_COUNT number, Pass greater than 1 and less than PROCESS_COUNT %d\n", MAXIMUM_WAIT_OBJECTS);
return -1;
}
THREAD_COUNT = atoi(argv[2]);
if( (THREAD_COUNT < 1) || (THREAD_COUNT > MAXIMUM_WAIT_OBJECTS) )
{
printf("\nInvalid THREAD_COUNT number, Pass greater than 1 and less than %d\n", MAXIMUM_WAIT_OBJECTS);
return -1;
}
REPEAT_COUNT = atoi(argv[3]);
if( REPEAT_COUNT < 1)
{
printf("\nMain Process:Invalid REPEAT_COUNT number, Pass greater than 1\n");
return -1;
}
RELATION_ID = atoi(argv[4]);
if( RELATION_ID < 1)
{
printf("\nMain Process:Invalid RELATION_ID number, Pass greater than 1\n");
return -1;
}
return 0;
}
PALTEST(composite_object_management_mutex_nonshared_paltest_mutex_nonshared, "composite/object_management/mutex/nonshared/paltest_mutex_nonshared")
{
unsigned int i = 0;
HANDLE hProcess[MAXIMUM_WAIT_OBJECTS];
HANDLE hMutexHandle[MAXIMUM_WAIT_OBJECTS];
STARTUPINFO si[MAXIMUM_WAIT_OBJECTS];
PROCESS_INFORMATION pi[MAXIMUM_WAIT_OBJECTS];
const char *ObjName = "Mutex";
char lpCommandLine[MAX_PATH] = "";
int returnCode = 0;
DWORD processReturnCode = 0;
int testReturnCode = PASS;
char fileName[MAX_PATH];
FILE *pFile = NULL;
DWORD dwStartTime;
struct TestStats testStats;
if(0 != (PAL_Initialize(argc, argv)))
{
return ( FAIL );
}
if(GetParameters(argc, argv))
{
Fail("Error in obtaining the parameters\n");
}
/* Register the start time */
dwStartTime = GetTickCount();
testStats.relationId = RELATION_ID;
testStats.processCount = PROCESS_COUNT;
testStats.threadCount = THREAD_COUNT;
testStats.repeatCount = REPEAT_COUNT;
testStats.buildNumber = getBuildNumber();
_snprintf(fileName, MAX_PATH, "main_mutex_%d_.txt", RELATION_ID);
pFile = fopen(fileName, "w+");
if(pFile == NULL)
{
Fail("Error in opening main file for write\n");
}
for( i = 0; i < PROCESS_COUNT; i++ )
{
ZeroMemory( lpCommandLine, MAX_PATH );
if ( _snprintf( lpCommandLine, MAX_PATH-1, "mutex %d %d %d %d", i, THREAD_COUNT, REPEAT_COUNT, RELATION_ID) < 0 )
{
Fail("Error Insufficient mutex name string length for %s for iteration [%d]\n", ObjName, i);
}
/* Zero the data structure space */
ZeroMemory ( &pi[i], sizeof(pi[i]) );
ZeroMemory ( &si[i], sizeof(si[i]) );
/* Set the process flags and standard io handles */
si[i].cb = sizeof(si[i]);
if(!CreateProcess( NULL, /* lpApplicationName*/
lpCommandLine, /* lpCommandLine */
NULL, /* lpProcessAttributes */
NULL, /* lpThreadAttributes */
TRUE, /* bInheritHandles */
0, /* dwCreationFlags, */
NULL, /* lpEnvironment */
NULL, /* pCurrentDirectory */
&si[i], /* lpStartupInfo */
&pi[i] /* lpProcessInformation */
))
{
Fail("Process Not created for [%d], the error code is [%d]\n", i, GetLastError());
}
else
{
hProcess[i] = pi[i].hProcess;
// Trace("Process created for [%d]\n", i);
}
//Create Process
}
returnCode = WaitForMultipleObjects( PROCESS_COUNT, hProcess, TRUE, INFINITE);
if( WAIT_OBJECT_0 != returnCode )
{
Trace("Wait for Object(s) @ Main thread for %d processes returned %d, and GetLastError value is %d\n", PROCESS_COUNT, returnCode, GetLastError());
testReturnCode = FAIL;
}
for( i = 0; i < PROCESS_COUNT; i++ )
{
/* check the exit code from the process */
if( ! GetExitCodeProcess( pi[i].hProcess, &processReturnCode ) )
{
Trace( "GetExitCodeProcess call failed for iteration %d with error code %u\n",
i, GetLastError() );
testReturnCode = FAIL;
}
if(processReturnCode == FAIL)
{
Trace( "Process [%d] failed and returned FAIL\n", i);
testReturnCode = FAIL;
}
if(!CloseHandle(pi[i].hThread))
{
Trace("Error:%d: CloseHandle failed for Process [%d] hThread\n", GetLastError(), i);
testReturnCode = FAIL;
}
if(!CloseHandle(pi[i].hProcess) )
{
Trace("Error:%d: CloseHandle failed for Process [%d] hProcess\n", GetLastError(), i);
testReturnCode = FAIL;
}
}
testStats.operationTime = GetTimeDiff(dwStartTime);
fprintf(pFile, "%d,%d,%d,%d,%d,%s\n", testStats.operationTime, testStats.relationId, testStats.processCount, testStats.threadCount, testStats.repeatCount, testStats.buildNumber);
if(fclose(pFile))
{
Trace("Error: fclose failed for pFile\n");
testReturnCode = FAIL;
}
if( testReturnCode == PASS)
{
Trace("Test Passed\n");
}
else
{
Trace("Test Failed\n");
}
PAL_Terminate();
return testReturnCode;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source Code: main.c and mutex.c
** main.c creates process and waits for all processes to get over
** mutex.c creates a mutex and then calls threads which will contend for the mutex
**
** This test is for Object Management Test case for Mutex where Object type is not shareable.
** Algorithm
** o Create PROCESS_COUNT processes.
** o Main Thread of each process creates OBJECT_TYPE Object
**
** Author: ShamitP
**============================================================
*/
#include <palsuite.h>
#include "resulttime.h"
/* Test Input Variables */
unsigned int PROCESS_COUNT = 2;
unsigned int THREAD_COUNT = 20;
unsigned int REPEAT_COUNT = 4000;
unsigned int RELATION_ID = 1001;
struct TestStats{
DWORD operationTime;
unsigned int relationId;
unsigned int processCount;
unsigned int threadCount;
unsigned int repeatCount;
char* buildNumber;
};
int GetParameters( int argc, char **argv)
{
if( (argc != 5) || ((argc == 1) && !strcmp(argv[1],"/?"))
|| !strcmp(argv[1],"/h") || !strcmp(argv[1],"/H"))
{
printf("PAL -Composite Object Management Mutex Test\n");
printf("Usage:\n");
printf("main\n\t[PROCESS_COUNT [greater than 1] \n");
printf("\t[THREAD_COUNT [greater than 1] \n");
printf("\t[REPEAT_COUNT [greater than 1]\n");
printf("\t[RELATION_ID [greater than 1]\n");
return -1;
}
PROCESS_COUNT = atoi(argv[1]);
if( (PROCESS_COUNT < 1) || (PROCESS_COUNT > MAXIMUM_WAIT_OBJECTS) )
{
printf("\nMain Process:Invalid PROCESS_COUNT number, Pass greater than 1 and less than PROCESS_COUNT %d\n", MAXIMUM_WAIT_OBJECTS);
return -1;
}
THREAD_COUNT = atoi(argv[2]);
if( (THREAD_COUNT < 1) || (THREAD_COUNT > MAXIMUM_WAIT_OBJECTS) )
{
printf("\nInvalid THREAD_COUNT number, Pass greater than 1 and less than %d\n", MAXIMUM_WAIT_OBJECTS);
return -1;
}
REPEAT_COUNT = atoi(argv[3]);
if( REPEAT_COUNT < 1)
{
printf("\nMain Process:Invalid REPEAT_COUNT number, Pass greater than 1\n");
return -1;
}
RELATION_ID = atoi(argv[4]);
if( RELATION_ID < 1)
{
printf("\nMain Process:Invalid RELATION_ID number, Pass greater than 1\n");
return -1;
}
return 0;
}
PALTEST(composite_object_management_mutex_nonshared_paltest_mutex_nonshared, "composite/object_management/mutex/nonshared/paltest_mutex_nonshared")
{
unsigned int i = 0;
HANDLE hProcess[MAXIMUM_WAIT_OBJECTS];
HANDLE hMutexHandle[MAXIMUM_WAIT_OBJECTS];
STARTUPINFO si[MAXIMUM_WAIT_OBJECTS];
PROCESS_INFORMATION pi[MAXIMUM_WAIT_OBJECTS];
const char *ObjName = "Mutex";
char lpCommandLine[MAX_PATH] = "";
int returnCode = 0;
DWORD processReturnCode = 0;
int testReturnCode = PASS;
char fileName[MAX_PATH];
FILE *pFile = NULL;
DWORD dwStartTime;
struct TestStats testStats;
if(0 != (PAL_Initialize(argc, argv)))
{
return ( FAIL );
}
if(GetParameters(argc, argv))
{
Fail("Error in obtaining the parameters\n");
}
/* Register the start time */
dwStartTime = GetTickCount();
testStats.relationId = RELATION_ID;
testStats.processCount = PROCESS_COUNT;
testStats.threadCount = THREAD_COUNT;
testStats.repeatCount = REPEAT_COUNT;
testStats.buildNumber = getBuildNumber();
_snprintf(fileName, MAX_PATH, "main_mutex_%d_.txt", RELATION_ID);
pFile = fopen(fileName, "w+");
if(pFile == NULL)
{
Fail("Error in opening main file for write\n");
}
for( i = 0; i < PROCESS_COUNT; i++ )
{
ZeroMemory( lpCommandLine, MAX_PATH );
if ( _snprintf( lpCommandLine, MAX_PATH-1, "mutex %d %d %d %d", i, THREAD_COUNT, REPEAT_COUNT, RELATION_ID) < 0 )
{
Fail("Error Insufficient mutex name string length for %s for iteration [%d]\n", ObjName, i);
}
/* Zero the data structure space */
ZeroMemory ( &pi[i], sizeof(pi[i]) );
ZeroMemory ( &si[i], sizeof(si[i]) );
/* Set the process flags and standard io handles */
si[i].cb = sizeof(si[i]);
if(!CreateProcess( NULL, /* lpApplicationName*/
lpCommandLine, /* lpCommandLine */
NULL, /* lpProcessAttributes */
NULL, /* lpThreadAttributes */
TRUE, /* bInheritHandles */
0, /* dwCreationFlags, */
NULL, /* lpEnvironment */
NULL, /* pCurrentDirectory */
&si[i], /* lpStartupInfo */
&pi[i] /* lpProcessInformation */
))
{
Fail("Process Not created for [%d], the error code is [%d]\n", i, GetLastError());
}
else
{
hProcess[i] = pi[i].hProcess;
// Trace("Process created for [%d]\n", i);
}
//Create Process
}
returnCode = WaitForMultipleObjects( PROCESS_COUNT, hProcess, TRUE, INFINITE);
if( WAIT_OBJECT_0 != returnCode )
{
Trace("Wait for Object(s) @ Main thread for %d processes returned %d, and GetLastError value is %d\n", PROCESS_COUNT, returnCode, GetLastError());
testReturnCode = FAIL;
}
for( i = 0; i < PROCESS_COUNT; i++ )
{
/* check the exit code from the process */
if( ! GetExitCodeProcess( pi[i].hProcess, &processReturnCode ) )
{
Trace( "GetExitCodeProcess call failed for iteration %d with error code %u\n",
i, GetLastError() );
testReturnCode = FAIL;
}
if(processReturnCode == FAIL)
{
Trace( "Process [%d] failed and returned FAIL\n", i);
testReturnCode = FAIL;
}
if(!CloseHandle(pi[i].hThread))
{
Trace("Error:%d: CloseHandle failed for Process [%d] hThread\n", GetLastError(), i);
testReturnCode = FAIL;
}
if(!CloseHandle(pi[i].hProcess) )
{
Trace("Error:%d: CloseHandle failed for Process [%d] hProcess\n", GetLastError(), i);
testReturnCode = FAIL;
}
}
testStats.operationTime = GetTimeDiff(dwStartTime);
fprintf(pFile, "%d,%d,%d,%d,%d,%s\n", testStats.operationTime, testStats.relationId, testStats.processCount, testStats.threadCount, testStats.repeatCount, testStats.buildNumber);
if(fclose(pFile))
{
Trace("Error: fclose failed for pFile\n");
testReturnCode = FAIL;
}
if( testReturnCode == PASS)
{
Trace("Test Passed\n");
}
else
{
Trace("Test Failed\n");
}
PAL_Terminate();
return testReturnCode;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/sgen/sgen-client.h | /**
* \file
* SGen client interface.
*
* Copyright (C) 2014 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mono/sgen/sgen-pointer-queue.h"
/*
* Init whatever needs initing. This is called relatively early in SGen initialization.
* Must initialized the small ID for the current thread.
*/
void sgen_client_init (void);
/*
* The slow path for getting an object's size. We're passing in the vtable because we've
* already fetched it.
*/
mword sgen_client_slow_object_get_size (GCVTable vtable, GCObject* o);
/*
* Fill the given range with a dummy object. If the range is too short to be filled with an
* object, null it. Return `TRUE` if the range was filled with an object, `FALSE` if it was
* nulled.
*/
gboolean sgen_client_array_fill_range (char *start, size_t size);
/*
* This is called if the nursery clearing policy at `clear-at-gc`, which is usually only
* used for debugging. If `size` is large enough for the memory to have been filled with a
* dummy, object, zero its header. Note that there might not actually be a header there.
*/
void sgen_client_zero_array_fill_header (void *p, size_t size);
/*
* Return whether the given object is an array fill dummy object.
*/
gboolean sgen_client_object_is_array_fill (GCObject *o);
/*
* Return whether the given finalizable object's finalizer is critical, i.e., needs to run
* after all non-critical finalizers have run.
*/
gboolean sgen_client_object_has_critical_finalizer (GCObject *obj);
/*
* Called after an object is enqueued for finalization. This is a very low-level callback.
* It should almost certainly be a NOP.
*
* FIXME: Can we merge this with `sgen_client_object_has_critical_finalizer()`?
*/
void sgen_client_object_queued_for_finalization (GCObject *obj);
/*
* Run the given object's finalizer.
*/
void sgen_client_run_finalize (GCObject *obj);
/*
* Is called after a collection if there are objects to finalize. The world is still
* stopped. This will usually notify the finalizer thread that it needs to run.
*/
void sgen_client_finalize_notify (void);
/*
* Returns TRUE if no ephemerons have been marked. Will be called again if it returned
* FALSE. If ephemerons are not supported, just return TRUE.
*/
gboolean sgen_client_mark_ephemerons (ScanCopyContext ctx)
MONO_PERMIT (need (sgen_gc_locked));
/*
* Clear ephemeron pairs with unreachable keys.
* We pass the copy func so we can figure out if an array was promoted or not.
*/
void sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx)
MONO_PERMIT (need (sgen_gc_locked));
/*
* May return NULL. Must be an aligned pointer.
*/
gpointer sgen_client_default_metadata (void);
gpointer sgen_client_metadata_for_object (GCObject *obj);
/*
* No action required.
*/
void sgen_client_gchandle_created (int handle_type, GCObject *obj, guint32 handle);
void sgen_client_gchandle_destroyed (int handle_type, guint32 handle);
void sgen_client_ensure_weak_gchandles_accessible (void);
/*
* This is called for objects that are larger than one card. If it's possible to scan only
* parts of the object based on which cards are marked, do so and return TRUE. Otherwise,
* return FALSE.
*/
gboolean sgen_client_cardtable_scan_object (GCObject *obj, guint8 *cards, ScanCopyContext ctx);
/*
* Called after nursery objects have been pinned. No action is necessary.
*/
void sgen_client_nursery_objects_pinned (void **definitely_pinned, int count);
/*
* Called at a semi-random point during minor collections. No action is necessary.
*/
void sgen_client_collecting_minor_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue);
/*
* Called at semi-random points during major collections. No action is necessary.
*/
void sgen_client_collecting_major_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue);
/*
* Called after a LOS object has been pinned. No action is necessary.
*/
void sgen_client_pinned_los_object (GCObject *obj);
/*
* Called for each cemented obj
*/
void sgen_client_pinned_cemented_object (GCObject *obj);
/*
* Called for each major heap obj pinned
*/
void sgen_client_pinned_major_heap_object (GCObject *obj);
void sgen_client_pinning_start (void);
void sgen_client_pinning_end (void);
/*
* Called for every degraded allocation. No action is necessary.
*/
void sgen_client_degraded_allocation (void);
/*
* Called whenever the amount of memory allocated for the managed heap changes. No action
* is necessary.
*/
void sgen_client_total_allocated_heap_changed (size_t allocated_heap_size);
/*
* If the client has registered any internal memory types, this must return a string
* describing the given type. Only used for debugging.
*/
const char* sgen_client_description_for_internal_mem_type (int type);
/*
* Only used for debugging. `sgen_client_vtable_get_namespace()` may return NULL.
*/
gboolean sgen_client_vtable_is_inited (GCVTable vtable);
const char* sgen_client_vtable_get_namespace (GCVTable vtable);
const char* sgen_client_vtable_get_name (GCVTable vtable);
/*
* Called before starting collections. The world is already stopped. No action is
* necessary.
*/
void sgen_client_pre_collection_checks (void);
/*
* Must set the thread's thread info to `info`. If the thread's small ID was not already
* initialized in `sgen_client_init()` (for the main thread, usually), it must be done here.
*
* `stack_bottom_fallback` is the value passed through via `sgen_thread_attach()`.
*/
void sgen_client_thread_attach (SgenThreadInfo* info);
void sgen_client_thread_detach_with_lock (SgenThreadInfo *p);
/*
* Called on each worker thread when it starts up. Must initialize the thread's small ID.
*/
void sgen_client_thread_register_worker (void);
/*
* The least this function needs to do is scan all registers and thread stacks. To do this
* conservatively, use `sgen_conservatively_pin_objects_from()`.
*/
void sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, ScanCopyContext ctx);
/*
* Stop and restart the world, i.e., all threads that interact with the managed heap. For
* single-threaded programs this is a nop.
*/
void sgen_client_stop_world (int generation, gboolean serial_collection)
MONO_PERMIT (need (sgen_gc_locked));
void sgen_client_restart_world (int generation, gboolean serial_collection, gint64 *stw_time)
MONO_PERMIT (need (sgen_gc_locked));
/*
* Must return FALSE. The bridge is not supported outside of Mono.
*/
gboolean sgen_client_bridge_need_processing (void);
/*
* None of these should ever be called.
*/
void sgen_client_bridge_reset_data (void);
void sgen_client_bridge_processing_stw_step (void);
void sgen_client_bridge_wait_for_processing (void);
void sgen_client_bridge_processing_finish (int generation);
gboolean sgen_client_bridge_is_bridge_object (GCObject *obj);
void sgen_client_bridge_register_finalized_object (GCObject *object);
#ifndef DISABLE_SGEN_TOGGLEREF
/*
* No action is necessary.
*/
void sgen_client_mark_togglerefs (char *start, char *end, ScanCopyContext ctx);
void sgen_client_clear_togglerefs (char *start, char *end, ScanCopyContext ctx);
void sgen_foreach_toggleref_root (void (*callback)(MonoObject*, gpointer), gpointer data);
#else
static inline void sgen_client_mark_togglerefs (char *start, char *end, ScanCopyContext ctx) { }
static inline void sgen_client_clear_togglerefs (char *start, char *end, ScanCopyContext ctx) { }
static inline void sgen_foreach_toggleref_root (void (*callback)(MonoObject*, gpointer), gpointer data) { }
#endif
/*
* Called to handle `MONO_GC_PARAMS` and `MONO_GC_DEBUG` options. The `handle` functions
* must return TRUE if they have recognized and processed the option, FALSE otherwise.
*/
gboolean sgen_client_handle_gc_param (const char *opt);
void sgen_client_print_gc_params_usage (void);
gboolean sgen_client_handle_gc_debug (const char *opt);
void sgen_client_print_gc_debug_usage (void);
/*
* Called to obtain an identifier for the current location, such as a method pointer. This
* is used for logging the provenances of allocations with the heavy binary protocol.
*/
gpointer sgen_client_get_provenance (void);
/*
* Called by the debugging infrastructure to describe pointers that have an invalid vtable.
* Should usually print to `stdout`.
*/
void sgen_client_describe_invalid_pointer (GCObject *ptr);
/*
* Return the weak bitmap for a class
*/
gsize *sgen_client_get_weak_bitmap (GCVTable vt, int *nbits);
/*
* Scheduled @cv to be invoked later in the background.
*
* This function is idepotent WRT background execution. Meaning that calling it multiple times with the same funciton pointer before any bg execution happens will only call @cb once.
*/
void sgen_client_schedule_background_job (void (*cb)(void));
/*
* These client binary protocol functions are called from the respective binary protocol
* functions. No action is necessary. We suggest implementing them as inline functions in
* the client header file so that no overhead is incurred if they don't actually do
* anything.
*/
#define TYPE_INT int
#define TYPE_LONGLONG long long
#define TYPE_SIZE size_t
#define TYPE_POINTER gpointer
#define TYPE_BOOL gboolean
#define BEGIN_PROTOCOL_ENTRY0(method) \
void sgen_client_ ## method (void);
#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
void sgen_client_ ## method (void);
#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
void sgen_client_ ## method (t1 f1);
#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
void sgen_client_ ## method (t1 f1);
#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
void sgen_client_ ## method (t1 f1, t2 f2);
#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
void sgen_client_ ## method (t1 f1, t2 f2);
#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3);
#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3);
#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4);
#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4);
#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
#define DEFAULT_PRINT()
#define CUSTOM_PRINT(_)
#define IS_ALWAYS_MATCH(_)
#define MATCH_INDEX(_)
#define IS_VTABLE_MATCH(_)
#define END_PROTOCOL_ENTRY
#define END_PROTOCOL_ENTRY_FLUSH
#define END_PROTOCOL_ENTRY_HEAVY
#include "sgen-protocol-def.h"
#undef TYPE_INT
#undef TYPE_LONGLONG
#undef TYPE_SIZE
#undef TYPE_POINTER
#undef TYPE_BOOL
#ifdef SGEN_WITHOUT_MONO
/*
* Get the current thread's thread info. This will only be called on managed threads.
*/
SgenThreadInfo* mono_thread_info_current (void);
/*
* Get the current thread's small ID. This will be called on managed and worker threads.
*/
int mono_thread_info_get_small_id (void);
#endif
| /**
* \file
* SGen client interface.
*
* Copyright (C) 2014 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mono/sgen/sgen-pointer-queue.h"
/*
* Init whatever needs initing. This is called relatively early in SGen initialization.
* Must initialized the small ID for the current thread.
*/
void sgen_client_init (void);
/*
* The slow path for getting an object's size. We're passing in the vtable because we've
* already fetched it.
*/
mword sgen_client_slow_object_get_size (GCVTable vtable, GCObject* o);
/*
* Fill the given range with a dummy object. If the range is too short to be filled with an
* object, null it. Return `TRUE` if the range was filled with an object, `FALSE` if it was
* nulled.
*/
gboolean sgen_client_array_fill_range (char *start, size_t size);
/*
* This is called if the nursery clearing policy at `clear-at-gc`, which is usually only
* used for debugging. If `size` is large enough for the memory to have been filled with a
* dummy, object, zero its header. Note that there might not actually be a header there.
*/
void sgen_client_zero_array_fill_header (void *p, size_t size);
/*
* Return whether the given object is an array fill dummy object.
*/
gboolean sgen_client_object_is_array_fill (GCObject *o);
/*
* Return whether the given finalizable object's finalizer is critical, i.e., needs to run
* after all non-critical finalizers have run.
*/
gboolean sgen_client_object_has_critical_finalizer (GCObject *obj);
/*
* Called after an object is enqueued for finalization. This is a very low-level callback.
* It should almost certainly be a NOP.
*
* FIXME: Can we merge this with `sgen_client_object_has_critical_finalizer()`?
*/
void sgen_client_object_queued_for_finalization (GCObject *obj);
/*
* Run the given object's finalizer.
*/
void sgen_client_run_finalize (GCObject *obj);
/*
* Is called after a collection if there are objects to finalize. The world is still
* stopped. This will usually notify the finalizer thread that it needs to run.
*/
void sgen_client_finalize_notify (void);
/*
* Returns TRUE if no ephemerons have been marked. Will be called again if it returned
* FALSE. If ephemerons are not supported, just return TRUE.
*/
gboolean sgen_client_mark_ephemerons (ScanCopyContext ctx)
MONO_PERMIT (need (sgen_gc_locked));
/*
* Clear ephemeron pairs with unreachable keys.
* We pass the copy func so we can figure out if an array was promoted or not.
*/
void sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx)
MONO_PERMIT (need (sgen_gc_locked));
/*
* May return NULL. Must be an aligned pointer.
*/
gpointer sgen_client_default_metadata (void);
gpointer sgen_client_metadata_for_object (GCObject *obj);
/*
* No action required.
*/
void sgen_client_gchandle_created (int handle_type, GCObject *obj, guint32 handle);
void sgen_client_gchandle_destroyed (int handle_type, guint32 handle);
void sgen_client_ensure_weak_gchandles_accessible (void);
/*
* This is called for objects that are larger than one card. If it's possible to scan only
* parts of the object based on which cards are marked, do so and return TRUE. Otherwise,
* return FALSE.
*/
gboolean sgen_client_cardtable_scan_object (GCObject *obj, guint8 *cards, ScanCopyContext ctx);
/*
* Called after nursery objects have been pinned. No action is necessary.
*/
void sgen_client_nursery_objects_pinned (void **definitely_pinned, int count);
/*
* Called at a semi-random point during minor collections. No action is necessary.
*/
void sgen_client_collecting_minor_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue);
/*
* Called at semi-random points during major collections. No action is necessary.
*/
void sgen_client_collecting_major_report_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue);
/*
* Called after a LOS object has been pinned. No action is necessary.
*/
void sgen_client_pinned_los_object (GCObject *obj);
/*
* Called for each cemented obj
*/
void sgen_client_pinned_cemented_object (GCObject *obj);
/*
* Called for each major heap obj pinned
*/
void sgen_client_pinned_major_heap_object (GCObject *obj);
void sgen_client_pinning_start (void);
void sgen_client_pinning_end (void);
/*
* Called for every degraded allocation. No action is necessary.
*/
void sgen_client_degraded_allocation (void);
/*
* Called whenever the amount of memory allocated for the managed heap changes. No action
* is necessary.
*/
void sgen_client_total_allocated_heap_changed (size_t allocated_heap_size);
/*
* If the client has registered any internal memory types, this must return a string
* describing the given type. Only used for debugging.
*/
const char* sgen_client_description_for_internal_mem_type (int type);
/*
* Only used for debugging. `sgen_client_vtable_get_namespace()` may return NULL.
*/
gboolean sgen_client_vtable_is_inited (GCVTable vtable);
const char* sgen_client_vtable_get_namespace (GCVTable vtable);
const char* sgen_client_vtable_get_name (GCVTable vtable);
/*
* Called before starting collections. The world is already stopped. No action is
* necessary.
*/
void sgen_client_pre_collection_checks (void);
/*
* Must set the thread's thread info to `info`. If the thread's small ID was not already
* initialized in `sgen_client_init()` (for the main thread, usually), it must be done here.
*
* `stack_bottom_fallback` is the value passed through via `sgen_thread_attach()`.
*/
void sgen_client_thread_attach (SgenThreadInfo* info);
void sgen_client_thread_detach_with_lock (SgenThreadInfo *p);
/*
* Called on each worker thread when it starts up. Must initialize the thread's small ID.
*/
void sgen_client_thread_register_worker (void);
/*
* The least this function needs to do is scan all registers and thread stacks. To do this
* conservatively, use `sgen_conservatively_pin_objects_from()`.
*/
void sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, ScanCopyContext ctx);
/*
* Stop and restart the world, i.e., all threads that interact with the managed heap. For
* single-threaded programs this is a nop.
*/
void sgen_client_stop_world (int generation, gboolean serial_collection)
MONO_PERMIT (need (sgen_gc_locked));
void sgen_client_restart_world (int generation, gboolean serial_collection, gint64 *stw_time)
MONO_PERMIT (need (sgen_gc_locked));
/*
* Must return FALSE. The bridge is not supported outside of Mono.
*/
gboolean sgen_client_bridge_need_processing (void);
/*
* None of these should ever be called.
*/
void sgen_client_bridge_reset_data (void);
void sgen_client_bridge_processing_stw_step (void);
void sgen_client_bridge_wait_for_processing (void);
void sgen_client_bridge_processing_finish (int generation);
gboolean sgen_client_bridge_is_bridge_object (GCObject *obj);
void sgen_client_bridge_register_finalized_object (GCObject *object);
#ifndef DISABLE_SGEN_TOGGLEREF
/*
* No action is necessary.
*/
void sgen_client_mark_togglerefs (char *start, char *end, ScanCopyContext ctx);
void sgen_client_clear_togglerefs (char *start, char *end, ScanCopyContext ctx);
void sgen_foreach_toggleref_root (void (*callback)(MonoObject*, gpointer), gpointer data);
#else
static inline void sgen_client_mark_togglerefs (char *start, char *end, ScanCopyContext ctx) { }
static inline void sgen_client_clear_togglerefs (char *start, char *end, ScanCopyContext ctx) { }
static inline void sgen_foreach_toggleref_root (void (*callback)(MonoObject*, gpointer), gpointer data) { }
#endif
/*
* Called to handle `MONO_GC_PARAMS` and `MONO_GC_DEBUG` options. The `handle` functions
* must return TRUE if they have recognized and processed the option, FALSE otherwise.
*/
gboolean sgen_client_handle_gc_param (const char *opt);
void sgen_client_print_gc_params_usage (void);
gboolean sgen_client_handle_gc_debug (const char *opt);
void sgen_client_print_gc_debug_usage (void);
/*
* Called to obtain an identifier for the current location, such as a method pointer. This
* is used for logging the provenances of allocations with the heavy binary protocol.
*/
gpointer sgen_client_get_provenance (void);
/*
* Called by the debugging infrastructure to describe pointers that have an invalid vtable.
* Should usually print to `stdout`.
*/
void sgen_client_describe_invalid_pointer (GCObject *ptr);
/*
* Return the weak bitmap for a class
*/
gsize *sgen_client_get_weak_bitmap (GCVTable vt, int *nbits);
/*
* Scheduled @cv to be invoked later in the background.
*
* This function is idepotent WRT background execution. Meaning that calling it multiple times with the same funciton pointer before any bg execution happens will only call @cb once.
*/
void sgen_client_schedule_background_job (void (*cb)(void));
/*
* These client binary protocol functions are called from the respective binary protocol
* functions. No action is necessary. We suggest implementing them as inline functions in
* the client header file so that no overhead is incurred if they don't actually do
* anything.
*/
#define TYPE_INT int
#define TYPE_LONGLONG long long
#define TYPE_SIZE size_t
#define TYPE_POINTER gpointer
#define TYPE_BOOL gboolean
#define BEGIN_PROTOCOL_ENTRY0(method) \
void sgen_client_ ## method (void);
#define BEGIN_PROTOCOL_ENTRY_HEAVY0(method) \
void sgen_client_ ## method (void);
#define BEGIN_PROTOCOL_ENTRY1(method,t1,f1) \
void sgen_client_ ## method (t1 f1);
#define BEGIN_PROTOCOL_ENTRY_HEAVY1(method,t1,f1) \
void sgen_client_ ## method (t1 f1);
#define BEGIN_PROTOCOL_ENTRY2(method,t1,f1,t2,f2) \
void sgen_client_ ## method (t1 f1, t2 f2);
#define BEGIN_PROTOCOL_ENTRY_HEAVY2(method,t1,f1,t2,f2) \
void sgen_client_ ## method (t1 f1, t2 f2);
#define BEGIN_PROTOCOL_ENTRY3(method,t1,f1,t2,f2,t3,f3) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3);
#define BEGIN_PROTOCOL_ENTRY_HEAVY3(method,t1,f1,t2,f2,t3,f3) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3);
#define BEGIN_PROTOCOL_ENTRY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4);
#define BEGIN_PROTOCOL_ENTRY_HEAVY4(method,t1,f1,t2,f2,t3,f3,t4,f4) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4);
#define BEGIN_PROTOCOL_ENTRY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
#define BEGIN_PROTOCOL_ENTRY_HEAVY5(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5);
#define BEGIN_PROTOCOL_ENTRY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
#define BEGIN_PROTOCOL_ENTRY_HEAVY6(method,t1,f1,t2,f2,t3,f3,t4,f4,t5,f5,t6,f6) \
void sgen_client_ ## method (t1 f1, t2 f2, t3 f3, t4 f4, t5 f5, t6 f6);
#define DEFAULT_PRINT()
#define CUSTOM_PRINT(_)
#define IS_ALWAYS_MATCH(_)
#define MATCH_INDEX(_)
#define IS_VTABLE_MATCH(_)
#define END_PROTOCOL_ENTRY
#define END_PROTOCOL_ENTRY_FLUSH
#define END_PROTOCOL_ENTRY_HEAVY
#include "sgen-protocol-def.h"
#undef TYPE_INT
#undef TYPE_LONGLONG
#undef TYPE_SIZE
#undef TYPE_POINTER
#undef TYPE_BOOL
#ifdef SGEN_WITHOUT_MONO
/*
* Get the current thread's thread info. This will only be called on managed threads.
*/
SgenThreadInfo* mono_thread_info_current (void);
/*
* Get the current thread's small ID. This will be called on managed and worker threads.
*/
int mono_thread_info_get_small_id (void);
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/sft48.txt | Microsoft (R) XSLT Compiler version 2.0.61009
for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727
Copyright (C) Microsoft Corporation 2007. All rights reserved.
fatal error : Unrecognized option: '/DTD-'.
| Microsoft (R) XSLT Compiler version 2.0.61009
for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727
Copyright (C) Microsoft Corporation 2007. All rights reserved.
fatal error : Unrecognized option: '/DTD-'.
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/src/libunwind/src/sh/Gis_signal_frame.c | /* libunwind - a platform-independent unwind library
Copyright (C) 2012 Tommi Rantala <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind_i.h"
/* Disassembly of the Linux VDSO sigreturn functions:
00000000 <__kernel_sigreturn>:
0: 05 93 mov.w e <__kernel_sigreturn+0xe>,r3 ! 77
2: 10 c3 trapa #16
4: 0b 20 or r0,r0
6: 0b 20 or r0,r0
8: 0b 20 or r0,r0
a: 0b 20 or r0,r0
c: 0b 20 or r0,r0
e: 77 00 .word 0x0077
10: 09 00 nop
12: 09 00 nop
14: 09 00 nop
16: 09 00 nop
18: 09 00 nop
1a: 09 00 nop
1c: 09 00 nop
1e: 09 00 nop
00000020 <__kernel_rt_sigreturn>:
20: 05 93 mov.w 2e <__kernel_rt_sigreturn+0xe>,r3 ! ad
22: 10 c3 trapa #16
24: 0b 20 or r0,r0
26: 0b 20 or r0,r0
28: 0b 20 or r0,r0
2a: 0b 20 or r0,r0
2c: 0b 20 or r0,r0
2e: ad 00 mov.w @(r0,r10),r0
30: 09 00 nop
32: 09 00 nop
34: 09 00 nop
36: 09 00 nop
38: 09 00 nop
3a: 09 00 nop
3c: 09 00 nop
3e: 09 00 nop
*/
int
unw_is_signal_frame (unw_cursor_t *cursor)
{
#ifdef __linux__
struct cursor *c = (struct cursor *) cursor;
unw_word_t w0, ip;
unw_addr_space_t as;
unw_accessors_t *a;
void *arg;
int ret;
as = c->dwarf.as;
a = unw_get_accessors_int (as);
arg = c->dwarf.as_arg;
ip = c->dwarf.ip;
ret = (*a->access_mem) (as, ip, &w0, 0, arg);
if (ret < 0)
return ret;
if (w0 != 0xc3109305)
return 0;
ret = (*a->access_mem) (as, ip+4, &w0, 0, arg);
if (ret < 0)
return ret;
if (w0 != 0x200b200b)
return 0;
ret = (*a->access_mem) (as, ip+8, &w0, 0, arg);
if (ret < 0)
return ret;
if (w0 != 0x200b200b)
return 0;
ret = (*a->access_mem) (as, ip+12, &w0, 0, arg);
if (ret < 0)
return ret;
if (w0 == 0x0077200b)
return 1; /* non-RT */
else if (w0 == 0x00ad200b)
return 2; /* RT */
/* does not look like a signal frame */
return 0;
#else
return -UNW_ENOINFO;
#endif
}
| /* libunwind - a platform-independent unwind library
Copyright (C) 2012 Tommi Rantala <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "unwind_i.h"
/* Disassembly of the Linux VDSO sigreturn functions:
00000000 <__kernel_sigreturn>:
0: 05 93 mov.w e <__kernel_sigreturn+0xe>,r3 ! 77
2: 10 c3 trapa #16
4: 0b 20 or r0,r0
6: 0b 20 or r0,r0
8: 0b 20 or r0,r0
a: 0b 20 or r0,r0
c: 0b 20 or r0,r0
e: 77 00 .word 0x0077
10: 09 00 nop
12: 09 00 nop
14: 09 00 nop
16: 09 00 nop
18: 09 00 nop
1a: 09 00 nop
1c: 09 00 nop
1e: 09 00 nop
00000020 <__kernel_rt_sigreturn>:
20: 05 93 mov.w 2e <__kernel_rt_sigreturn+0xe>,r3 ! ad
22: 10 c3 trapa #16
24: 0b 20 or r0,r0
26: 0b 20 or r0,r0
28: 0b 20 or r0,r0
2a: 0b 20 or r0,r0
2c: 0b 20 or r0,r0
2e: ad 00 mov.w @(r0,r10),r0
30: 09 00 nop
32: 09 00 nop
34: 09 00 nop
36: 09 00 nop
38: 09 00 nop
3a: 09 00 nop
3c: 09 00 nop
3e: 09 00 nop
*/
int
unw_is_signal_frame (unw_cursor_t *cursor)
{
#ifdef __linux__
struct cursor *c = (struct cursor *) cursor;
unw_word_t w0, ip;
unw_addr_space_t as;
unw_accessors_t *a;
void *arg;
int ret;
as = c->dwarf.as;
a = unw_get_accessors_int (as);
arg = c->dwarf.as_arg;
ip = c->dwarf.ip;
ret = (*a->access_mem) (as, ip, &w0, 0, arg);
if (ret < 0)
return ret;
if (w0 != 0xc3109305)
return 0;
ret = (*a->access_mem) (as, ip+4, &w0, 0, arg);
if (ret < 0)
return ret;
if (w0 != 0x200b200b)
return 0;
ret = (*a->access_mem) (as, ip+8, &w0, 0, arg);
if (ret < 0)
return ret;
if (w0 != 0x200b200b)
return 0;
ret = (*a->access_mem) (as, ip+12, &w0, 0, arg);
if (ret < 0)
return ret;
if (w0 == 0x0077200b)
return 1; /* non-RT */
else if (w0 == 0x00ad200b)
return 2; /* RT */
/* does not look like a signal frame */
return 0;
#else
return -UNW_ENOINFO;
#endif
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/tools/superpmi/superpmi-shared/methodcontextiterator.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "simpletimer.h"
// Class to implement method context hive reading and iterating.
class MethodContextIterator
{
public:
MethodContextIterator(bool progressReport = false)
: m_hFile(INVALID_HANDLE_VALUE)
, m_fileSize(0)
, m_methodContextNumber(0)
, m_mc(nullptr)
, m_indexCount(-1)
, m_index(0)
, m_indexes(nullptr)
, m_progressReport(progressReport)
, m_progressRate(1000)
, m_timer(nullptr)
{
if (m_progressReport)
{
m_timer = new SimpleTimer();
}
}
MethodContextIterator(const int indexCount, const int* indexes, bool progressReport = false)
: m_hFile(INVALID_HANDLE_VALUE)
, m_fileSize(0)
, m_methodContextNumber(0)
, m_mc(nullptr)
, m_indexCount(indexCount)
, m_index(0)
, m_indexes(indexes)
, m_progressReport(progressReport)
, m_progressRate(1000)
, m_timer(nullptr)
{
if (m_progressReport)
{
m_timer = new SimpleTimer();
}
}
~MethodContextIterator()
{
Destroy();
}
bool Initialize(const char* fileName);
bool Destroy();
bool MoveNext();
// The iterator class owns the memory returned by Current(); the caller should not delete it.
MethodContext* Current()
{
return m_mc;
}
// In this case, we are giving ownership of the MethodContext* to the caller. So, null out m_mc
// before we return, so we don't attempt to delete it in this class.
MethodContext* CurrentTakeOwnership()
{
MethodContext* ret = m_mc;
m_mc = nullptr;
return ret;
}
// Return the file position offset of the current method context.
__int64 CurrentPos()
{
return m_pos.QuadPart;
}
int MethodContextNumber()
{
return m_methodContextNumber;
}
private:
HANDLE m_hFile;
int64_t m_fileSize;
int m_methodContextNumber;
MethodContext* m_mc;
LARGE_INTEGER m_pos;
// If m_indexCount==-1, use all method contexts. Otherwise, m_indexCount is the number of elements in the
// m_indexes array, which contains a sorted set of method context indexes to return. In this case, m_index
// is the index of the current element in m_indexes.
const int m_indexCount;
int m_index;
const int* m_indexes;
// Should we log a progress report as we are loading the method contexts?
// The timer is only used when m_progressReport==true.
bool m_progressReport;
const int m_progressRate; // Report progress every `m_progressRate` method contexts.
SimpleTimer* m_timer;
};
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "simpletimer.h"
// Class to implement method context hive reading and iterating.
class MethodContextIterator
{
public:
MethodContextIterator(bool progressReport = false)
: m_hFile(INVALID_HANDLE_VALUE)
, m_fileSize(0)
, m_methodContextNumber(0)
, m_mc(nullptr)
, m_indexCount(-1)
, m_index(0)
, m_indexes(nullptr)
, m_progressReport(progressReport)
, m_progressRate(1000)
, m_timer(nullptr)
{
if (m_progressReport)
{
m_timer = new SimpleTimer();
}
}
MethodContextIterator(const int indexCount, const int* indexes, bool progressReport = false)
: m_hFile(INVALID_HANDLE_VALUE)
, m_fileSize(0)
, m_methodContextNumber(0)
, m_mc(nullptr)
, m_indexCount(indexCount)
, m_index(0)
, m_indexes(indexes)
, m_progressReport(progressReport)
, m_progressRate(1000)
, m_timer(nullptr)
{
if (m_progressReport)
{
m_timer = new SimpleTimer();
}
}
~MethodContextIterator()
{
Destroy();
}
bool Initialize(const char* fileName);
bool Destroy();
bool MoveNext();
// The iterator class owns the memory returned by Current(); the caller should not delete it.
MethodContext* Current()
{
return m_mc;
}
// In this case, we are giving ownership of the MethodContext* to the caller. So, null out m_mc
// before we return, so we don't attempt to delete it in this class.
MethodContext* CurrentTakeOwnership()
{
MethodContext* ret = m_mc;
m_mc = nullptr;
return ret;
}
// Return the file position offset of the current method context.
__int64 CurrentPos()
{
return m_pos.QuadPart;
}
int MethodContextNumber()
{
return m_methodContextNumber;
}
private:
HANDLE m_hFile;
int64_t m_fileSize;
int m_methodContextNumber;
MethodContext* m_mc;
LARGE_INTEGER m_pos;
// If m_indexCount==-1, use all method contexts. Otherwise, m_indexCount is the number of elements in the
// m_indexes array, which contains a sorted set of method context indexes to return. In this case, m_index
// is the index of the current element in m_indexes.
const int m_indexCount;
int m_index;
const int* m_indexes;
// Should we log a progress report as we are loading the method contexts?
// The timer is only used when m_progressReport==true.
bool m_progressReport;
const int m_progressRate; // Report progress every `m_progressRate` method contexts.
SimpleTimer* m_timer;
};
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/unit-tests/test-mono-embed.c | /*
* test-mono-embed.c: Unit test for embed mono.
*/
#define _TESTCASE_
#include <mono/jit/jit.h>
#include <embed/teste.c>
| /*
* test-mono-embed.c: Unit test for embed mono.
*/
#define _TESTCASE_
#include <mono/jit/jit.h>
#include <embed/teste.c>
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/dlls/mscordbi/cordb-blocking-obj.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: CORDB-BLOCKING-OBJ.CPP
//
#include <cordb-blocking-obj.h>
#include <cordb.h>
CordbBlockingObjectEnum::CordbBlockingObjectEnum(Connection* conn) : CordbBaseMono(conn) {}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::Next(ULONG celt,
CorDebugBlockingObject values[],
ULONG* pceltFetched)
{
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - Next - NOT IMPLEMENTED\n"));
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::Skip(ULONG celt)
{
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - Skip - NOT IMPLEMENTED\n"));
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::Reset(void)
{
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - Reset - NOT IMPLEMENTED\n"));
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::Clone(ICorDebugEnum** ppEnum)
{
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - Clone - NOT IMPLEMENTED\n"));
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::GetCount(ULONG* pcelt)
{
pcelt = 0;
return S_OK;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::QueryInterface(REFIID id, void** ppInterface)
{
if (id == IID_ICorDebugBlockingObjectEnum)
*ppInterface = (ICorDebugBlockingObjectEnum*)this;
else if (id == IID_IUnknown)
*ppInterface = (IUnknown*)(ICorDebugBlockingObjectEnum*)this;
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - QueryInterface - IMPLEMENTED\n"));
AddRef();
return S_OK;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: CORDB-BLOCKING-OBJ.CPP
//
#include <cordb-blocking-obj.h>
#include <cordb.h>
CordbBlockingObjectEnum::CordbBlockingObjectEnum(Connection* conn) : CordbBaseMono(conn) {}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::Next(ULONG celt,
CorDebugBlockingObject values[],
ULONG* pceltFetched)
{
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - Next - NOT IMPLEMENTED\n"));
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::Skip(ULONG celt)
{
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - Skip - NOT IMPLEMENTED\n"));
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::Reset(void)
{
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - Reset - NOT IMPLEMENTED\n"));
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::Clone(ICorDebugEnum** ppEnum)
{
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - Clone - NOT IMPLEMENTED\n"));
return E_NOTIMPL;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::GetCount(ULONG* pcelt)
{
pcelt = 0;
return S_OK;
}
HRESULT STDMETHODCALLTYPE CordbBlockingObjectEnum::QueryInterface(REFIID id, void** ppInterface)
{
if (id == IID_ICorDebugBlockingObjectEnum)
*ppInterface = (ICorDebugBlockingObjectEnum*)this;
else if (id == IID_IUnknown)
*ppInterface = (IUnknown*)(ICorDebugBlockingObjectEnum*)this;
LOG((LF_CORDB, LL_INFO100000, "CordbBlockingObjectEnum - QueryInterface - IMPLEMENTED\n"));
AddRef();
return S_OK;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/c_runtime/printf/printf.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: printf.h
**
** Purpose: Containts common testing functions for printf
**
**
**==========================================================================*/
#ifndef __printf_H__
#define __printf_H__
inline void DoStrTest_printf(const char *formatstr, char* param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoStrTest DoStrTest_printf
inline void DoWStrTest_printf(const char *formatstr, WCHAR* param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoWStrTest DoWStrTest_printf
inline void DoPointerTest_printf(const char *formatstr, void* param, char* paramstr,
const char *checkstr1)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr1))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr1), ret);
}
}
#define DoPointerTest DoPointerTest_printf
inline void DoCountTest_printf(const char *formatstr, int param, const char *checkstr)
{
int ret;
int n = -1;
ret = printf(formatstr, &n);
if (n != param)
{
Fail("Expected count parameter to resolve to %d, got %d\n", param, n);
}
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoCountTest DoCountTest_printf
inline void DoShortCountTest_printf(const char *formatstr, int param, const char *checkstr)
{
int ret;
short int n = -1;
ret = printf(formatstr, &n);
if (n != param)
{
Fail("Expected count parameter to resolve to %d, got %d\n", param, n);
}
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoShortCountTest DoShortCountTest_printf
inline void DoCharTest_printf(const char *formatstr, char param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoCharTest DoCharTest_printf
inline void DoWCharTest_printf(const char *formatstr, WCHAR param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoWCharTest DoWCharTest_printf
inline void DoNumTest_printf(const char *formatstr, int param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoNumTest DoNumTest_printf
inline void DoI64Test_printf(const char *formatstr, INT64 param, char *valuestr,
const char *checkstr1)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr1))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr1), ret);
}
}
#define DoI64Test DoI64Test_printf
inline void DoDoubleTest_printf(const char *formatstr, double param,
const char *checkstr1, const char *checkstr2)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr1) && ret != strlen(checkstr2))
{
Fail("Expected printf to return %d or %d, got %d.\n",
strlen(checkstr1), strlen(checkstr2), ret);
}
}
#define DoDoubleTest DoDoubleTest_printf
inline void DoArgumentPrecTest_printf(const char *formatstr, int precision, void *param,
char *paramstr, const char *checkstr1, const char *checkstr2)
{
int ret;
ret = printf(formatstr, precision, param);
if (ret != strlen(checkstr1) && ret != strlen(checkstr2))
{
Fail("Expected printf to return %d or %d, got %d.\n",
strlen(checkstr1), strlen(checkstr2), ret);
}
}
#define DoArgumentPrecTest DoArgumentPrecTest_printf
inline void DoArgumentPrecDoubleTest_printf(const char *formatstr, int precision, double param,
const char *checkstr1, const char *checkstr2)
{
int ret;
ret = printf(formatstr, precision, param);
if (ret != strlen(checkstr1) && ret != strlen(checkstr2))
{
Fail("Expected printf to return %d or %d, got %d.\n",
strlen(checkstr1), strlen(checkstr2), ret);
}
}
#define DoArgumentPrecDoubleTest DoArgumentPrecDoubleTest_printf
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: printf.h
**
** Purpose: Containts common testing functions for printf
**
**
**==========================================================================*/
#ifndef __printf_H__
#define __printf_H__
inline void DoStrTest_printf(const char *formatstr, char* param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoStrTest DoStrTest_printf
inline void DoWStrTest_printf(const char *formatstr, WCHAR* param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoWStrTest DoWStrTest_printf
inline void DoPointerTest_printf(const char *formatstr, void* param, char* paramstr,
const char *checkstr1)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr1))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr1), ret);
}
}
#define DoPointerTest DoPointerTest_printf
inline void DoCountTest_printf(const char *formatstr, int param, const char *checkstr)
{
int ret;
int n = -1;
ret = printf(formatstr, &n);
if (n != param)
{
Fail("Expected count parameter to resolve to %d, got %d\n", param, n);
}
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoCountTest DoCountTest_printf
inline void DoShortCountTest_printf(const char *formatstr, int param, const char *checkstr)
{
int ret;
short int n = -1;
ret = printf(formatstr, &n);
if (n != param)
{
Fail("Expected count parameter to resolve to %d, got %d\n", param, n);
}
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoShortCountTest DoShortCountTest_printf
inline void DoCharTest_printf(const char *formatstr, char param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoCharTest DoCharTest_printf
inline void DoWCharTest_printf(const char *formatstr, WCHAR param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoWCharTest DoWCharTest_printf
inline void DoNumTest_printf(const char *formatstr, int param, const char *checkstr)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr), ret);
}
}
#define DoNumTest DoNumTest_printf
inline void DoI64Test_printf(const char *formatstr, INT64 param, char *valuestr,
const char *checkstr1)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr1))
{
Fail("Expected printf to return %d, got %d.\n",
strlen(checkstr1), ret);
}
}
#define DoI64Test DoI64Test_printf
inline void DoDoubleTest_printf(const char *formatstr, double param,
const char *checkstr1, const char *checkstr2)
{
int ret;
ret = printf(formatstr, param);
if (ret != strlen(checkstr1) && ret != strlen(checkstr2))
{
Fail("Expected printf to return %d or %d, got %d.\n",
strlen(checkstr1), strlen(checkstr2), ret);
}
}
#define DoDoubleTest DoDoubleTest_printf
inline void DoArgumentPrecTest_printf(const char *formatstr, int precision, void *param,
char *paramstr, const char *checkstr1, const char *checkstr2)
{
int ret;
ret = printf(formatstr, precision, param);
if (ret != strlen(checkstr1) && ret != strlen(checkstr2))
{
Fail("Expected printf to return %d or %d, got %d.\n",
strlen(checkstr1), strlen(checkstr2), ret);
}
}
#define DoArgumentPrecTest DoArgumentPrecTest_printf
inline void DoArgumentPrecDoubleTest_printf(const char *formatstr, int precision, double param,
const char *checkstr1, const char *checkstr2)
{
int ret;
ret = printf(formatstr, precision, param);
if (ret != strlen(checkstr1) && ret != strlen(checkstr2))
{
Fail("Expected printf to return %d or %d, got %d.\n",
strlen(checkstr1), strlen(checkstr2), ret);
}
}
#define DoArgumentPrecDoubleTest DoArgumentPrecDoubleTest_printf
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/libs/CMakeLists.txt | cmake_minimum_required(VERSION 3.6.2)
include(CheckCCompilerFlag)
if (CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
# CMake 3.14.5 contains bug fixes for iOS
cmake_minimum_required(VERSION 3.14.5)
elseif (CLR_CMAKE_TARGET_MACCATALYST)
# CMake 3.18.1 properly generates MacCatalyst C compiler
cmake_minimum_required(VERSION 3.18.1)
endif ()
if (WIN32)
cmake_policy(SET CMP0091 NEW)
else ()
cmake_policy(SET CMP0042 NEW)
endif ()
project(CoreFX C)
include(../../../eng/native/configurepaths.cmake)
include(${CLR_ENG_NATIVE_DIR}/configurecompiler.cmake)
include_directories(${CLR_SRC_NATIVE_DIR})
set(CMAKE_INCLUDE_CURRENT_DIR ON)
if (STATIC_LIBS_ONLY)
# Suppress exporting of the PAL APIs
add_definitions(-DPALEXPORT=EXTERN_C)
set(GEN_SHARED_LIB 0)
set(STATIC_LIB_DESTINATION lib)
else ()
set(GEN_SHARED_LIB 1)
set(STATIC_LIB_DESTINATION .)
endif ()
if (CLR_CMAKE_TARGET_UNIX OR CLR_CMAKE_TARGET_BROWSER)
set(CMAKE_MACOSX_RPATH ON)
if (CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
set(CMAKE_INSTALL_NAME_DIR "@rpath")
endif ()
set(CMAKE_INSTALL_PREFIX $ENV{__CMakeBinDir})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99")
add_compile_options(-I${CMAKE_CURRENT_SOURCE_DIR}/Common)
add_compile_options(-I${CMAKE_CURRENT_BINARY_DIR}/Common)
if (CLR_CMAKE_TARGET_BROWSER)
set(GEN_SHARED_LIB 0)
set(STATIC_LIB_DESTINATION .)
endif ()
if (CLR_CMAKE_TARGET_TVOS)
# with -fembed-bitcode passing -headerpad_max_install_names is not allowed so remove it from the CMake flags
string(REPLACE "-Wl,-headerpad_max_install_names" "" CMAKE_C_LINK_FLAGS ${CMAKE_C_LINK_FLAGS})
string(REPLACE "-Wl,-headerpad_max_install_names" "" CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS ${CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS})
add_compile_options(-fembed-bitcode)
add_link_options(-fembed-bitcode)
endif ()
if (CLR_CMAKE_TARGET_ANDROID)
if (CROSS_ROOTFS)
include_directories(SYSTEM "${CROSS_ROOTFS}/usr/include")
endif ()
endif ()
string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG)
add_compile_options(-O0)
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
add_compile_options(-O2)
endif ()
add_definitions(-DDEBUG)
# obtain settings from running coreclr\enablesanitizers.sh
string(FIND "$ENV{DEBUG_SANITIZERS}" "asan" __ASAN_POS)
string(FIND "$ENV{DEBUG_SANITIZERS}" "ubsan" __UBSAN_POS)
if (${__ASAN_POS} GREATER -1 OR ${__UBSAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS} -fsanitize=")
if (${__ASAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}address,")
message("Address Sanitizer (asan) enabled")
endif ()
if (${__UBSAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}undefined")
message("Undefined Behavior Sanitizer (ubsan) enabled")
endif ()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${CLR_SANITIZE_LINK_FLAGS}")
# -Wl and --gc-sections: drop unused sections\functions (similar to Windows /Gy function-level-linking)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CLR_SANITIZE_LINK_FLAGS} -Wl,--gc-sections")
endif ()
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL RELEASE)
# Use O1 option when the clang version is smaller than 3.9
# Otherwise use O3 option in release build
if (CLR_CMAKE_TARGET_ARCH_ARMV7L AND DEFINED ENV{CROSSCOMPILE} AND CMAKE_C_COMPILER_VERSION VERSION_LESS 3.9)
add_compile_options (-O1)
else ()
add_compile_options (-O3)
endif ()
else ()
message(FATAL_ERROR "Unknown build type. Set CMAKE_BUILD_TYPE to DEBUG or RELEASE.")
endif ()
if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
add_definitions(-D__APPLE_USE_RFC_3542)
endif ()
if (CLR_CMAKE_TARGET_LINUX)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_GNU_SOURCE")
endif ()
else ()
set(CMAKE_SHARED_LIBRARY_PREFIX "")
# we only need to build System.Globalization.Native when building static libs.
if (STATIC_LIBS_ONLY)
add_subdirectory(System.Globalization.Native)
endif ()
endif ()
add_subdirectory(System.IO.Compression.Native)
if (CLR_CMAKE_TARGET_UNIX OR CLR_CMAKE_TARGET_BROWSER)
include(configure.cmake)
if (NOT CLR_CMAKE_TARGET_BROWSER AND NOT CLR_CMAKE_TARGET_MACCATALYST AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS AND NOT CLR_CMAKE_TARGET_ANDROID)
add_subdirectory(System.IO.Ports.Native)
endif ()
if (CMAKE_C_COMPILER_ID STREQUAL Clang)
add_compile_options(-Weverything)
add_compile_options(-Wno-format-nonliteral)
add_compile_options(-Wno-disabled-macro-expansion)
add_compile_options(-Wno-padded)
add_compile_options(-Wno-empty-translation-unit)
add_compile_options(-Wno-cast-align)
add_compile_options(-Wno-typedef-redefinition)
add_compile_options(-Wno-c11-extensions)
add_compile_options(-Wno-thread-safety-analysis)
endif ()
add_subdirectory(System.Native)
if (CLR_CMAKE_TARGET_BROWSER)
# skip for now
elseif (CLR_CMAKE_TARGET_MACCATALYST)
add_subdirectory(System.Net.Security.Native)
# System.Security.Cryptography.Native is intentionally disabled on iOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_IOS)
add_subdirectory(System.Net.Security.Native)
# System.Security.Cryptography.Native is intentionally disabled on iOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_TVOS)
#add_subdirectory(System.Net.Security.Native) # no gssapi on tvOS, see https://developer.apple.com/documentation/gss
# System.Security.Cryptography.Native is intentionally disabled on tvOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_ANDROID)
add_subdirectory(System.Security.Cryptography.Native.Android)
else ()
add_subdirectory(System.Globalization.Native)
add_subdirectory(System.Net.Security.Native)
add_subdirectory(System.Security.Cryptography.Native)
endif ()
if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
add_subdirectory(System.Security.Cryptography.Native.Apple)
endif ()
endif ()
| cmake_minimum_required(VERSION 3.6.2)
include(CheckCCompilerFlag)
if (CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
# CMake 3.14.5 contains bug fixes for iOS
cmake_minimum_required(VERSION 3.14.5)
elseif (CLR_CMAKE_TARGET_MACCATALYST)
# CMake 3.18.1 properly generates MacCatalyst C compiler
cmake_minimum_required(VERSION 3.18.1)
endif ()
if (WIN32)
cmake_policy(SET CMP0091 NEW)
else ()
cmake_policy(SET CMP0042 NEW)
endif ()
project(CoreFX C)
include(../../../eng/native/configurepaths.cmake)
include(${CLR_ENG_NATIVE_DIR}/configurecompiler.cmake)
include_directories(${CLR_SRC_NATIVE_DIR})
set(CMAKE_INCLUDE_CURRENT_DIR ON)
if (STATIC_LIBS_ONLY)
# Suppress exporting of the PAL APIs
add_definitions(-DPALEXPORT=EXTERN_C)
set(GEN_SHARED_LIB 0)
set(STATIC_LIB_DESTINATION lib)
else ()
set(GEN_SHARED_LIB 1)
set(STATIC_LIB_DESTINATION .)
endif ()
if (CLR_CMAKE_TARGET_UNIX OR CLR_CMAKE_TARGET_BROWSER)
set(CMAKE_MACOSX_RPATH ON)
if (CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
set(CMAKE_INSTALL_NAME_DIR "@rpath")
endif ()
set(CMAKE_INSTALL_PREFIX $ENV{__CMakeBinDir})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99")
add_compile_options(-I${CMAKE_CURRENT_SOURCE_DIR}/Common)
add_compile_options(-I${CMAKE_CURRENT_BINARY_DIR}/Common)
if (CLR_CMAKE_TARGET_BROWSER)
set(GEN_SHARED_LIB 0)
set(STATIC_LIB_DESTINATION .)
endif ()
if (CLR_CMAKE_TARGET_TVOS)
# with -fembed-bitcode passing -headerpad_max_install_names is not allowed so remove it from the CMake flags
string(REPLACE "-Wl,-headerpad_max_install_names" "" CMAKE_C_LINK_FLAGS ${CMAKE_C_LINK_FLAGS})
string(REPLACE "-Wl,-headerpad_max_install_names" "" CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS ${CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS})
add_compile_options(-fembed-bitcode)
add_link_options(-fembed-bitcode)
endif ()
if (CLR_CMAKE_TARGET_ANDROID)
if (CROSS_ROOTFS)
include_directories(SYSTEM "${CROSS_ROOTFS}/usr/include")
endif ()
endif ()
string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG)
add_compile_options(-O0)
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
add_compile_options(-O2)
endif ()
add_definitions(-DDEBUG)
# obtain settings from running coreclr\enablesanitizers.sh
string(FIND "$ENV{DEBUG_SANITIZERS}" "asan" __ASAN_POS)
string(FIND "$ENV{DEBUG_SANITIZERS}" "ubsan" __UBSAN_POS)
if (${__ASAN_POS} GREATER -1 OR ${__UBSAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS} -fsanitize=")
if (${__ASAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}address,")
message("Address Sanitizer (asan) enabled")
endif ()
if (${__UBSAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}undefined")
message("Undefined Behavior Sanitizer (ubsan) enabled")
endif ()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${CLR_SANITIZE_LINK_FLAGS}")
# -Wl and --gc-sections: drop unused sections\functions (similar to Windows /Gy function-level-linking)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CLR_SANITIZE_LINK_FLAGS} -Wl,--gc-sections")
endif ()
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL RELEASE)
# Use O1 option when the clang version is smaller than 3.9
# Otherwise use O3 option in release build
if (CLR_CMAKE_TARGET_ARCH_ARMV7L AND DEFINED ENV{CROSSCOMPILE} AND CMAKE_C_COMPILER_VERSION VERSION_LESS 3.9)
add_compile_options (-O1)
else ()
add_compile_options (-O3)
endif ()
else ()
message(FATAL_ERROR "Unknown build type. Set CMAKE_BUILD_TYPE to DEBUG or RELEASE.")
endif ()
if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
add_definitions(-D__APPLE_USE_RFC_3542)
endif ()
if (CLR_CMAKE_TARGET_LINUX)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_GNU_SOURCE")
endif ()
else ()
set(CMAKE_SHARED_LIBRARY_PREFIX "")
# we only need to build System.Globalization.Native when building static libs.
if (STATIC_LIBS_ONLY)
add_subdirectory(System.Globalization.Native)
endif ()
endif ()
add_subdirectory(System.IO.Compression.Native)
if (CLR_CMAKE_TARGET_UNIX OR CLR_CMAKE_TARGET_BROWSER)
include(configure.cmake)
if (NOT CLR_CMAKE_TARGET_BROWSER AND NOT CLR_CMAKE_TARGET_MACCATALYST AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS AND NOT CLR_CMAKE_TARGET_ANDROID)
add_subdirectory(System.IO.Ports.Native)
endif ()
if (CMAKE_C_COMPILER_ID STREQUAL Clang)
add_compile_options(-Weverything)
add_compile_options(-Wno-format-nonliteral)
add_compile_options(-Wno-disabled-macro-expansion)
add_compile_options(-Wno-padded)
add_compile_options(-Wno-empty-translation-unit)
add_compile_options(-Wno-cast-align)
add_compile_options(-Wno-typedef-redefinition)
add_compile_options(-Wno-c11-extensions)
add_compile_options(-Wno-thread-safety-analysis)
endif ()
add_subdirectory(System.Native)
if (CLR_CMAKE_TARGET_BROWSER)
# skip for now
elseif (CLR_CMAKE_TARGET_MACCATALYST)
add_subdirectory(System.Net.Security.Native)
# System.Security.Cryptography.Native is intentionally disabled on iOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_IOS)
add_subdirectory(System.Net.Security.Native)
# System.Security.Cryptography.Native is intentionally disabled on iOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_TVOS)
#add_subdirectory(System.Net.Security.Native) # no gssapi on tvOS, see https://developer.apple.com/documentation/gss
# System.Security.Cryptography.Native is intentionally disabled on tvOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_ANDROID)
add_subdirectory(System.Security.Cryptography.Native.Android)
else ()
add_subdirectory(System.Globalization.Native)
add_subdirectory(System.Net.Security.Native)
add_subdirectory(System.Security.Cryptography.Native)
endif ()
if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
add_subdirectory(System.Security.Cryptography.Native.Apple)
endif ()
endif ()
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/mini/helpers.c | /**
* \file
* Assorted routines
*
* (C) 2003 Ximian, Inc.
*/
#include <config.h>
#include "mini.h"
#include <ctype.h>
#include <mono/metadata/opcodes.h>
#ifndef HOST_WIN32
#include <unistd.h>
#endif
#ifndef DISABLE_JIT
#ifndef DISABLE_LOGGING
#ifdef MINI_OP
#undef MINI_OP
#endif
#ifdef MINI_OP3
#undef MINI_OP3
#endif
// This, instead of an array of pointers, to optimize away a pointer and a relocation per string.
#define MSGSTRFIELD(line) MSGSTRFIELD1(line)
#define MSGSTRFIELD1(line) str##line
static const struct msgstr_t {
#define MINI_OP(a,b,dest,src1,src2) char MSGSTRFIELD(__LINE__) [sizeof (b)];
#define MINI_OP3(a,b,dest,src1,src2,src3) char MSGSTRFIELD(__LINE__) [sizeof (b)];
#include "mini-ops.h"
#undef MINI_OP
#undef MINI_OP3
} opstr = {
#define MINI_OP(a,b,dest,src1,src2) b,
#define MINI_OP3(a,b,dest,src1,src2,src3) b,
#include "mini-ops.h"
#undef MINI_OP
#undef MINI_OP3
};
static const gint16 opidx [] = {
#define MINI_OP(a,b,dest,src1,src2) offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)),
#define MINI_OP3(a,b,dest,src1,src2,src3) offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)),
#include "mini-ops.h"
#undef MINI_OP
#undef MINI_OP3
};
#endif /* DISABLE_LOGGING */
#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
#if !defined(TARGET_ARM64) && !defined(__APPLE__)
#define emit_debug_info TRUE
#else
#define emit_debug_info FALSE
#endif
#else
#define emit_debug_info FALSE
#endif
/*This enables us to use the right tooling when building the cross compiler for iOS.*/
#if defined (__APPLE__) && defined (TARGET_ARM) && (defined(__i386__) || defined(__x86_64__))
//#define ARCH_PREFIX "/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/"
#endif
#define ARCH_PREFIX ""
//#define ARCH_PREFIX "powerpc64-linux-gnu-"
const char*
mono_inst_name (int op) {
#ifndef DISABLE_LOGGING
if (op >= OP_LOAD && op <= OP_LAST)
return (const char*)&opstr + opidx [op - OP_LOAD];
if (op < OP_LOAD)
return mono_opcode_name (op);
g_error ("unknown opcode name for %d", op);
return NULL;
#else
g_error ("unknown opcode name for %d", op);
g_assert_not_reached ();
#endif
}
void
mono_blockset_print (MonoCompile *cfg, MonoBitSet *set, const char *name, guint idom)
{
#ifndef DISABLE_LOGGING
int i;
if (name)
g_print ("%s:", name);
mono_bitset_foreach_bit (set, i, cfg->num_bblocks) {
if (idom == i)
g_print (" [BB%d]", cfg->bblocks [i]->block_num);
else
g_print (" BB%d", cfg->bblocks [i]->block_num);
}
g_print ("\n");
#endif
}
/**
* \param cfg compilation context
* \param code a pointer to the code
* \param size the code size in bytes
*
* Disassemble to code to stdout.
*/
void
mono_disassemble_code (MonoCompile *cfg, guint8 *code, int size, char *id)
{
#ifndef DISABLE_LOGGING
GHashTable *offset_to_bb_hash = NULL;
int i, cindex, bb_num;
FILE *ofd;
#ifdef HOST_WIN32
const char *tmp = g_get_tmp_dir ();
#endif
char *as_file;
char *o_file;
int unused G_GNUC_UNUSED;
#ifdef HOST_WIN32
as_file = g_strdup_printf ("%s/test.s", tmp);
if (!(ofd = fopen (as_file, "w")))
g_assert_not_reached ();
#else
i = g_file_open_tmp (NULL, &as_file, NULL);
ofd = fdopen (i, "w");
g_assert (ofd);
#endif
for (i = 0; id [i]; ++i) {
if (i == 0 && isdigit (id [i]))
fprintf (ofd, "_");
else if (!isalnum (id [i]))
fprintf (ofd, "_");
else
fprintf (ofd, "%c", id [i]);
}
fprintf (ofd, ":\n");
if (emit_debug_info && cfg != NULL) {
MonoBasicBlock *bb;
fprintf (ofd, ".stabs \"\",100,0,0,.Ltext0\n");
fprintf (ofd, ".stabs \"<BB>\",100,0,0,.Ltext0\n");
fprintf (ofd, ".Ltext0:\n");
offset_to_bb_hash = g_hash_table_new (NULL, NULL);
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
g_hash_table_insert (offset_to_bb_hash, GINT_TO_POINTER (bb->native_offset), GINT_TO_POINTER (bb->block_num + 1));
}
}
cindex = 0;
for (i = 0; i < size; ++i) {
if (emit_debug_info && cfg != NULL) {
bb_num = GPOINTER_TO_INT (g_hash_table_lookup (offset_to_bb_hash, GINT_TO_POINTER (i)));
if (bb_num) {
fprintf (ofd, "\n.stabd 68,0,%d\n", bb_num - 1);
cindex = 0;
}
}
if (cindex == 0) {
fprintf (ofd, "\n.byte %u", (unsigned int) code [i]);
} else {
fprintf (ofd, ",%u", (unsigned int) code [i]);
}
cindex++;
if (cindex == 64)
cindex = 0;
}
fprintf (ofd, "\n");
fclose (ofd);
#ifdef __APPLE__
#ifdef __ppc64__
#define DIS_CMD "otool64 -v -t"
#else
#define DIS_CMD "otool -v -t"
#endif
#else
#if defined(sparc) && !defined(__GNUC__)
#define DIS_CMD "dis"
#elif defined(TARGET_X86)
#define DIS_CMD "objdump -l -d"
#elif defined(TARGET_AMD64)
#if defined(HOST_WIN32)
#define DIS_CMD "x86_64-w64-mingw32-objdump.exe -M x86-64 -d"
#else
#define DIS_CMD "objdump -l -d"
#endif
#else
#define DIS_CMD "objdump -d"
#endif
#endif
#if defined(sparc)
#define AS_CMD "as -xarch=v9"
#elif defined (TARGET_X86)
# if defined(__APPLE__)
# define AS_CMD "as -arch i386"
# else
# define AS_CMD "as -gstabs"
# endif
#elif defined (TARGET_AMD64)
# if defined (__APPLE__)
# define AS_CMD "as -arch x86_64"
# else
# define AS_CMD "as -gstabs"
# endif
#elif defined (TARGET_ARM)
# if defined (__APPLE__)
# define AS_CMD "as -arch arm"
# else
# define AS_CMD "as -gstabs"
# endif
#elif defined (TARGET_ARM64)
# if defined (__APPLE__)
# define AS_CMD "clang -c -arch arm64 -g -x assembler"
# else
# define AS_CMD "as -gstabs"
# endif
#elif defined(__mips__) && (_MIPS_SIM == _ABIO32)
#define AS_CMD "as -mips32"
#elif defined(__ppc64__)
#define AS_CMD "as -arch ppc64"
#elif defined(__powerpc64__)
#define AS_CMD "as -mppc64"
#elif defined (TARGET_RISCV64)
#define AS_CMD "as -march=rv64ima"
#elif defined (TARGET_RISCV32)
#define AS_CMD "as -march=rv32ima"
#else
#define AS_CMD "as"
#endif
#ifdef HOST_WIN32
o_file = g_strdup_printf ("%s/test.o", tmp);
#else
i = g_file_open_tmp (NULL, &o_file, NULL);
close (i);
#endif
#ifdef HAVE_SYSTEM
char *cmd = g_strdup_printf (ARCH_PREFIX AS_CMD " %s -o %s", as_file, o_file);
unused = system (cmd);
g_free (cmd);
char *objdump_args = g_getenv ("MONO_OBJDUMP_ARGS");
if (!objdump_args)
objdump_args = g_strdup ("");
fflush (stdout);
#if (defined(__arm__) || defined(__aarch64__)) && !defined(TARGET_OSX)
/*
* The arm assembler inserts ELF directives instructing objdump to display
* everything as data.
*/
cmd = g_strdup_printf (ARCH_PREFIX "strip -s %s", o_file);
unused = system (cmd);
g_free (cmd);
#endif
cmd = g_strdup_printf (ARCH_PREFIX DIS_CMD " %s %s", objdump_args, o_file);
unused = system (cmd);
g_free (cmd);
g_free (objdump_args);
#else
g_assert_not_reached ();
#endif /* HAVE_SYSTEM */
#ifndef HOST_WIN32
unlink (o_file);
unlink (as_file);
#endif
g_free (o_file);
g_free (as_file);
#endif
}
#else /* DISABLE_JIT */
void
mono_blockset_print (MonoCompile *cfg, MonoBitSet *set, const char *name, guint idom)
{
}
#endif /* DISABLE_JIT */
| /**
* \file
* Assorted routines
*
* (C) 2003 Ximian, Inc.
*/
#include <config.h>
#include "mini.h"
#include <ctype.h>
#include <mono/metadata/opcodes.h>
#ifndef HOST_WIN32
#include <unistd.h>
#endif
#ifndef DISABLE_JIT
#ifndef DISABLE_LOGGING
#ifdef MINI_OP
#undef MINI_OP
#endif
#ifdef MINI_OP3
#undef MINI_OP3
#endif
// This, instead of an array of pointers, to optimize away a pointer and a relocation per string.
#define MSGSTRFIELD(line) MSGSTRFIELD1(line)
#define MSGSTRFIELD1(line) str##line
static const struct msgstr_t {
#define MINI_OP(a,b,dest,src1,src2) char MSGSTRFIELD(__LINE__) [sizeof (b)];
#define MINI_OP3(a,b,dest,src1,src2,src3) char MSGSTRFIELD(__LINE__) [sizeof (b)];
#include "mini-ops.h"
#undef MINI_OP
#undef MINI_OP3
} opstr = {
#define MINI_OP(a,b,dest,src1,src2) b,
#define MINI_OP3(a,b,dest,src1,src2,src3) b,
#include "mini-ops.h"
#undef MINI_OP
#undef MINI_OP3
};
static const gint16 opidx [] = {
#define MINI_OP(a,b,dest,src1,src2) offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)),
#define MINI_OP3(a,b,dest,src1,src2,src3) offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)),
#include "mini-ops.h"
#undef MINI_OP
#undef MINI_OP3
};
#endif /* DISABLE_LOGGING */
#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
#if !defined(TARGET_ARM64) && !defined(__APPLE__)
#define emit_debug_info TRUE
#else
#define emit_debug_info FALSE
#endif
#else
#define emit_debug_info FALSE
#endif
/*This enables us to use the right tooling when building the cross compiler for iOS.*/
#if defined (__APPLE__) && defined (TARGET_ARM) && (defined(__i386__) || defined(__x86_64__))
//#define ARCH_PREFIX "/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/"
#endif
#define ARCH_PREFIX ""
//#define ARCH_PREFIX "powerpc64-linux-gnu-"
const char*
mono_inst_name (int op) {
#ifndef DISABLE_LOGGING
if (op >= OP_LOAD && op <= OP_LAST)
return (const char*)&opstr + opidx [op - OP_LOAD];
if (op < OP_LOAD)
return mono_opcode_name (op);
g_error ("unknown opcode name for %d", op);
return NULL;
#else
g_error ("unknown opcode name for %d", op);
g_assert_not_reached ();
#endif
}
void
mono_blockset_print (MonoCompile *cfg, MonoBitSet *set, const char *name, guint idom)
{
#ifndef DISABLE_LOGGING
int i;
if (name)
g_print ("%s:", name);
mono_bitset_foreach_bit (set, i, cfg->num_bblocks) {
if (idom == i)
g_print (" [BB%d]", cfg->bblocks [i]->block_num);
else
g_print (" BB%d", cfg->bblocks [i]->block_num);
}
g_print ("\n");
#endif
}
/**
* \param cfg compilation context
* \param code a pointer to the code
* \param size the code size in bytes
*
* Disassemble to code to stdout.
*/
void
mono_disassemble_code (MonoCompile *cfg, guint8 *code, int size, char *id)
{
#ifndef DISABLE_LOGGING
GHashTable *offset_to_bb_hash = NULL;
int i, cindex, bb_num;
FILE *ofd;
#ifdef HOST_WIN32
const char *tmp = g_get_tmp_dir ();
#endif
char *as_file;
char *o_file;
int unused G_GNUC_UNUSED;
#ifdef HOST_WIN32
as_file = g_strdup_printf ("%s/test.s", tmp);
if (!(ofd = fopen (as_file, "w")))
g_assert_not_reached ();
#else
i = g_file_open_tmp (NULL, &as_file, NULL);
ofd = fdopen (i, "w");
g_assert (ofd);
#endif
for (i = 0; id [i]; ++i) {
if (i == 0 && isdigit (id [i]))
fprintf (ofd, "_");
else if (!isalnum (id [i]))
fprintf (ofd, "_");
else
fprintf (ofd, "%c", id [i]);
}
fprintf (ofd, ":\n");
if (emit_debug_info && cfg != NULL) {
MonoBasicBlock *bb;
fprintf (ofd, ".stabs \"\",100,0,0,.Ltext0\n");
fprintf (ofd, ".stabs \"<BB>\",100,0,0,.Ltext0\n");
fprintf (ofd, ".Ltext0:\n");
offset_to_bb_hash = g_hash_table_new (NULL, NULL);
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
g_hash_table_insert (offset_to_bb_hash, GINT_TO_POINTER (bb->native_offset), GINT_TO_POINTER (bb->block_num + 1));
}
}
cindex = 0;
for (i = 0; i < size; ++i) {
if (emit_debug_info && cfg != NULL) {
bb_num = GPOINTER_TO_INT (g_hash_table_lookup (offset_to_bb_hash, GINT_TO_POINTER (i)));
if (bb_num) {
fprintf (ofd, "\n.stabd 68,0,%d\n", bb_num - 1);
cindex = 0;
}
}
if (cindex == 0) {
fprintf (ofd, "\n.byte %u", (unsigned int) code [i]);
} else {
fprintf (ofd, ",%u", (unsigned int) code [i]);
}
cindex++;
if (cindex == 64)
cindex = 0;
}
fprintf (ofd, "\n");
fclose (ofd);
#ifdef __APPLE__
#ifdef __ppc64__
#define DIS_CMD "otool64 -v -t"
#else
#define DIS_CMD "otool -v -t"
#endif
#else
#if defined(sparc) && !defined(__GNUC__)
#define DIS_CMD "dis"
#elif defined(TARGET_X86)
#define DIS_CMD "objdump -l -d"
#elif defined(TARGET_AMD64)
#if defined(HOST_WIN32)
#define DIS_CMD "x86_64-w64-mingw32-objdump.exe -M x86-64 -d"
#else
#define DIS_CMD "objdump -l -d"
#endif
#else
#define DIS_CMD "objdump -d"
#endif
#endif
#if defined(sparc)
#define AS_CMD "as -xarch=v9"
#elif defined (TARGET_X86)
# if defined(__APPLE__)
# define AS_CMD "as -arch i386"
# else
# define AS_CMD "as -gstabs"
# endif
#elif defined (TARGET_AMD64)
# if defined (__APPLE__)
# define AS_CMD "as -arch x86_64"
# else
# define AS_CMD "as -gstabs"
# endif
#elif defined (TARGET_ARM)
# if defined (__APPLE__)
# define AS_CMD "as -arch arm"
# else
# define AS_CMD "as -gstabs"
# endif
#elif defined (TARGET_ARM64)
# if defined (__APPLE__)
# define AS_CMD "clang -c -arch arm64 -g -x assembler"
# else
# define AS_CMD "as -gstabs"
# endif
#elif defined(__mips__) && (_MIPS_SIM == _ABIO32)
#define AS_CMD "as -mips32"
#elif defined(__ppc64__)
#define AS_CMD "as -arch ppc64"
#elif defined(__powerpc64__)
#define AS_CMD "as -mppc64"
#elif defined (TARGET_RISCV64)
#define AS_CMD "as -march=rv64ima"
#elif defined (TARGET_RISCV32)
#define AS_CMD "as -march=rv32ima"
#else
#define AS_CMD "as"
#endif
#ifdef HOST_WIN32
o_file = g_strdup_printf ("%s/test.o", tmp);
#else
i = g_file_open_tmp (NULL, &o_file, NULL);
close (i);
#endif
#ifdef HAVE_SYSTEM
char *cmd = g_strdup_printf (ARCH_PREFIX AS_CMD " %s -o %s", as_file, o_file);
unused = system (cmd);
g_free (cmd);
char *objdump_args = g_getenv ("MONO_OBJDUMP_ARGS");
if (!objdump_args)
objdump_args = g_strdup ("");
fflush (stdout);
#if (defined(__arm__) || defined(__aarch64__)) && !defined(TARGET_OSX)
/*
* The arm assembler inserts ELF directives instructing objdump to display
* everything as data.
*/
cmd = g_strdup_printf (ARCH_PREFIX "strip -s %s", o_file);
unused = system (cmd);
g_free (cmd);
#endif
cmd = g_strdup_printf (ARCH_PREFIX DIS_CMD " %s %s", objdump_args, o_file);
unused = system (cmd);
g_free (cmd);
g_free (objdump_args);
#else
g_assert_not_reached ();
#endif /* HAVE_SYSTEM */
#ifndef HOST_WIN32
unlink (o_file);
unlink (as_file);
#endif
g_free (o_file);
g_free (as_file);
#endif
}
#else /* DISABLE_JIT */
void
mono_blockset_print (MonoCompile *cfg, MonoBitSet *set, const char *name, guint idom)
{
}
#endif /* DISABLE_JIT */
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/utilcode/stacktrace.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
#include "stdafx.h"
#include "stacktrace.h"
#include <imagehlp.h>
#include "corhlpr.h"
#include "utilcode.h"
#include "pedecoder.h" // for IMAGE_FILE_MACHINE_NATIVE
#include <minipal/utils.h>
//This is a workaround. We need to work with the debugger team to figure
//out how the module handle of the CLR can be found in a SxS safe way.
HMODULE GetCLRModuleHack()
{
static HMODULE s_hModCLR = 0;
if (!s_hModCLR)
{
s_hModCLR = GetModuleHandleA(MAIN_CLR_DLL_NAME_A);
}
return s_hModCLR;
}
HINSTANCE LoadImageHlp()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
SCAN_IGNORE_FAULT; // Faults from Wsz funcs are handled.
return LoadLibraryExA("imagehlp.dll", NULL, 0);
}
HINSTANCE LoadDbgHelp()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
SCAN_IGNORE_FAULT; // Faults from Wsz funcs are handled.
return LoadLibraryExA("dbghelp.dll", NULL, 0);
}
/****************************************************************************
* SymCallback *
*---------------------*
* Description:
* Callback for imghelp.
****************************************************************************/
BOOL __stdcall SymCallback
(
HANDLE hProcess,
ULONG ActionCode,
PVOID CallbackData,
PVOID UserContext
)
{
WRAPPER_NO_CONTRACT;
switch (ActionCode)
{
case CBA_DEBUG_INFO:
OutputDebugStringA("IMGHLP: ");
OutputDebugStringA((LPCSTR) CallbackData);
OutputDebugStringA("\n");
break;
case CBA_DEFERRED_SYMBOL_LOAD_START:
OutputDebugStringA("IMGHLP: Deferred symbol load start ");
OutputDebugStringA(((IMAGEHLP_DEFERRED_SYMBOL_LOAD*)CallbackData)->FileName);
OutputDebugStringA("\n");
break;
case CBA_DEFERRED_SYMBOL_LOAD_COMPLETE:
OutputDebugStringA("IMGHLP: Deferred symbol load complete ");
OutputDebugStringA(((IMAGEHLP_DEFERRED_SYMBOL_LOAD*)CallbackData)->FileName);
OutputDebugStringA("\n");
break;
case CBA_DEFERRED_SYMBOL_LOAD_FAILURE:
OutputDebugStringA("IMGHLP: Deferred symbol load failure ");
OutputDebugStringA(((IMAGEHLP_DEFERRED_SYMBOL_LOAD*)CallbackData)->FileName);
OutputDebugStringA("\n");
break;
case CBA_DEFERRED_SYMBOL_LOAD_PARTIAL:
OutputDebugStringA("IMGHLP: Deferred symbol load partial ");
OutputDebugStringA(((IMAGEHLP_DEFERRED_SYMBOL_LOAD*)CallbackData)->FileName);
OutputDebugStringA("\n");
break;
}
return FALSE;
}
// @TODO_IA64: all of this stack trace stuff is pretty much broken on 64-bit
// right now because this code doesn't use the new SymXxxx64 functions.
#define LOCAL_ASSERT(x)
//
//--- Macros ------------------------------------------------------------------
//
//
// Types and Constants --------------------------------------------------------
//
struct SYM_INFO
{
DWORD_PTR dwOffset;
char achModule[cchMaxAssertModuleLen];
char achSymbol[cchMaxAssertSymbolLen];
};
//--- Function Pointers to APIs in IMAGEHLP.DLL. Loaded dynamically. ---------
typedef LPAPI_VERSION (__stdcall *pfnImgHlp_ImagehlpApiVersionEx)(
LPAPI_VERSION AppVersion
);
typedef BOOL (__stdcall *pfnImgHlp_StackWalk)(
DWORD MachineType,
HANDLE hProcess,
HANDLE hThread,
LPSTACKFRAME StackFrame,
LPVOID ContextRecord,
PREAD_PROCESS_MEMORY_ROUTINE ReadMemoryRoutine,
PFUNCTION_TABLE_ACCESS_ROUTINE FunctionTableAccessRoutine,
PGET_MODULE_BASE_ROUTINE GetModuleBaseRoutine,
PTRANSLATE_ADDRESS_ROUTINE TranslateAddress
);
#ifdef HOST_64BIT
typedef DWORD64 (__stdcall *pfnImgHlp_SymGetModuleBase64)(
IN HANDLE hProcess,
IN DWORD64 dwAddr
);
typedef IMAGEHLP_SYMBOL64 PLAT_IMAGEHLP_SYMBOL;
typedef IMAGEHLP_MODULE64 PLAT_IMAGEHLP_MODULE;
#else
typedef IMAGEHLP_SYMBOL PLAT_IMAGEHLP_SYMBOL;
typedef IMAGEHLP_MODULE PLAT_IMAGEHLP_MODULE;
#endif
#undef IMAGEHLP_SYMBOL
#undef IMAGEHLP_MODULE
typedef BOOL (__stdcall *pfnImgHlp_SymGetModuleInfo)(
IN HANDLE hProcess,
IN DWORD_PTR dwAddr,
OUT PLAT_IMAGEHLP_MODULE* ModuleInfo
);
typedef LPVOID (__stdcall *pfnImgHlp_SymFunctionTableAccess)(
HANDLE hProcess,
DWORD_PTR AddrBase
);
typedef BOOL (__stdcall *pfnImgHlp_SymGetSymFromAddr)(
IN HANDLE hProcess,
IN DWORD_PTR dwAddr,
OUT DWORD_PTR* pdwDisplacement,
OUT PLAT_IMAGEHLP_SYMBOL* Symbol
);
typedef BOOL (__stdcall *pfnImgHlp_SymInitialize)(
IN HANDLE hProcess,
IN LPSTR UserSearchPath,
IN BOOL fInvadeProcess
);
typedef BOOL (__stdcall *pfnImgHlp_SymUnDName)(
IN PLAT_IMAGEHLP_SYMBOL* sym, // Symbol to undecorate
OUT LPSTR UnDecName, // Buffer to store undecorated name in
IN DWORD UnDecNameLength // Size of the buffer
);
typedef BOOL (__stdcall *pfnImgHlp_SymLoadModule)(
IN HANDLE hProcess,
IN HANDLE hFile,
IN PSTR ImageName,
IN PSTR ModuleName,
IN DWORD_PTR BaseOfDll,
IN DWORD SizeOfDll
);
typedef BOOL (_stdcall *pfnImgHlp_SymRegisterCallback)(
IN HANDLE hProcess,
IN PSYMBOL_REGISTERED_CALLBACK CallbackFunction,
IN PVOID UserContext
);
typedef DWORD (_stdcall *pfnImgHlp_SymSetOptions)(
IN DWORD SymOptions
);
typedef DWORD (_stdcall *pfnImgHlp_SymGetOptions)(
);
struct IMGHLPFN_LOAD
{
LPCSTR pszFnName;
LPVOID * ppvfn;
};
#if defined(HOST_64BIT)
typedef void (*pfn_GetRuntimeStackWalkInfo)(
IN ULONG64 ControlPc,
OUT UINT_PTR* pModuleBase,
OUT UINT_PTR* pFuncEntry
);
#endif // HOST_64BIT
//
// Globals --------------------------------------------------------------------
//
static BOOL g_fLoadedImageHlp = FALSE; // set to true on success
static BOOL g_fLoadedImageHlpFailed = FALSE; // set to true on failure
static HINSTANCE g_hinstImageHlp = NULL;
static HINSTANCE g_hinstDbgHelp = NULL;
static HANDLE g_hProcess = NULL;
pfnImgHlp_ImagehlpApiVersionEx _ImagehlpApiVersionEx;
pfnImgHlp_StackWalk _StackWalk;
pfnImgHlp_SymGetModuleInfo _SymGetModuleInfo;
pfnImgHlp_SymFunctionTableAccess _SymFunctionTableAccess;
pfnImgHlp_SymGetSymFromAddr _SymGetSymFromAddr;
pfnImgHlp_SymInitialize _SymInitialize;
pfnImgHlp_SymUnDName _SymUnDName;
pfnImgHlp_SymLoadModule _SymLoadModule;
pfnImgHlp_SymRegisterCallback _SymRegisterCallback;
pfnImgHlp_SymSetOptions _SymSetOptions;
pfnImgHlp_SymGetOptions _SymGetOptions;
#if defined(HOST_64BIT)
pfn_GetRuntimeStackWalkInfo _GetRuntimeStackWalkInfo;
#endif // HOST_64BIT
IMGHLPFN_LOAD ailFuncList[] =
{
{ "ImagehlpApiVersionEx", (LPVOID*)&_ImagehlpApiVersionEx },
{ "StackWalk", (LPVOID*)&_StackWalk },
{ "SymGetModuleInfo", (LPVOID*)&_SymGetModuleInfo },
{ "SymFunctionTableAccess", (LPVOID*)&_SymFunctionTableAccess },
{ "SymGetSymFromAddr", (LPVOID*)&_SymGetSymFromAddr },
{ "SymInitialize", (LPVOID*)&_SymInitialize },
{ "SymUnDName", (LPVOID*)&_SymUnDName },
{ "SymLoadModule", (LPVOID*)&_SymLoadModule },
{ "SymRegisterCallback", (LPVOID*)&_SymRegisterCallback },
{ "SymSetOptions", (LPVOID*)&_SymSetOptions },
{ "SymGetOptions", (LPVOID*)&_SymGetOptions },
};
/****************************************************************************
* FillSymbolSearchPath *
*----------------------*
* Description:
* Manually pick out all the symbol path information we need for a real
* stack trace to work. This includes the default NT symbol paths and
* places on a VBL build machine where they should live.
****************************************************************************/
#define MAX_SYM_PATH (1024*8)
#define DEFAULT_SYM_PATH W("symsrv*symsrv.dll*\\\\symbols\\symbols;")
#define STR_ENGINE_NAME MAIN_CLR_DLL_NAME_W
LPSTR FillSymbolSearchPathThrows(CQuickBytes &qb)
{
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
SCAN_IGNORE_FAULT; // Faults from Wsz funcs are handled.
#ifndef DACCESS_COMPILE
// not allowed to do allocation if current thread suspends EE.
if (IsSuspendEEThread ())
return NULL;
#endif
InlineSString<MAX_SYM_PATH> rcBuff ; // Working buffer
int chTotal = 0; // How full is working buffer.
int ch;
// If the NT symbol server path vars are there, then use those.
chTotal = WszGetEnvironmentVariable(W("_NT_SYMBOL_PATH"), rcBuff);
if (chTotal + 1 < MAX_SYM_PATH)
rcBuff.Append(W(';'));
// Copy the defacto NT symbol path as well.
size_t sympathLength = chTotal + ARRAY_SIZE(DEFAULT_SYM_PATH) + 1;
// integer overflow occurred
if (sympathLength < (size_t)chTotal || sympathLength < ARRAY_SIZE(DEFAULT_SYM_PATH))
{
return NULL;
}
if (sympathLength < MAX_SYM_PATH)
{
rcBuff.Append(DEFAULT_SYM_PATH);
chTotal = rcBuff.GetCount();
}
// Next, if there is a URTTARGET, add that since that is where ndpsetup places
// your symobls on an install.
PathString rcBuffTemp;
ch = WszGetEnvironmentVariable(W("URTTARGET"), rcBuffTemp);
rcBuff.Append(rcBuffTemp);
if (ch != 0 && (chTotal + ch + 1 < MAX_SYM_PATH))
{
size_t chNewTotal = chTotal + ch;
if (chNewTotal < (size_t)chTotal || chNewTotal < (size_t)ch)
{ // integer overflow occurred
return NULL;
}
chTotal += ch;
rcBuff.Append(W(';'));
}
#ifndef SELF_NO_HOST
// Fetch the path location of the engine dll and add that path as well, just
// in case URTARGET didn't cut it either.
// For no-host builds of utilcode, we don't necessarily have an engine DLL in the
// process, so skip this part.
ch = WszGetModuleFileName(GetCLRModuleHack(), rcBuffTemp);
size_t pathLocationLength = chTotal + ch + 1;
// integer overflow occurred
if (pathLocationLength < (size_t)chTotal || pathLocationLength < (size_t)ch)
{
return NULL;
}
if (ch != 0 && (pathLocationLength < MAX_SYM_PATH))
{
chTotal = chTotal + ch - ARRAY_SIZE(STR_ENGINE_NAME);
rcBuff.Append(W(';'));
}
#endif
// Now we have a working buffer with a bunch of interesting stuff. Time
// to convert it back to ansi for the imagehlp api's. Allocate the buffer
// 2x bigger to handle worst case for MBCS.
ch = ::WszWideCharToMultiByte(CP_ACP, WC_NO_BEST_FIT_CHARS, rcBuff, -1, 0, 0, 0, 0);
LPSTR szRtn = (LPSTR) qb.AllocNoThrow(ch + 1);
if (!szRtn)
return NULL;
WszWideCharToMultiByte(CP_ACP, WC_NO_BEST_FIT_CHARS, rcBuff, -1, szRtn, ch+1, 0, 0);
return (szRtn);
}
LPSTR FillSymbolSearchPath(CQuickBytes &qb)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
SCAN_IGNORE_FAULT; // Faults from Wsz funcs are handled.
LPSTR retval;
HRESULT hr = S_OK;
EX_TRY
{
retval = FillSymbolSearchPathThrows(qb);
}
EX_CATCH_HRESULT(hr);
if (hr != S_OK)
{
SetLastError(hr);
retval = NULL;
}
return retval;
}
/****************************************************************************
* MagicInit *
*-----------*
* Description:
* Initializes the symbol loading code. Currently called (if necessary)
* at the beginning of each method that might need ImageHelp to be
* loaded.
****************************************************************************/
void MagicInit()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
if (g_fLoadedImageHlp || g_fLoadedImageHlpFailed)
{
return;
}
g_hProcess = GetCurrentProcess();
if (g_hinstDbgHelp == NULL)
{
g_hinstDbgHelp = LoadDbgHelp();
}
if (NULL == g_hinstDbgHelp)
{
// Imagehlp.dll has dependency on dbghelp.dll through delay load.
// If dbghelp.dll is not available, Imagehlp.dll initializes API's like ImageApiVersionEx to
// some dummy function. Then we AV when we use data from _ImagehlpApiVersionEx
g_fLoadedImageHlpFailed = TRUE;
return;
}
//
// Try to load imagehlp.dll
//
if (g_hinstImageHlp == NULL) {
g_hinstImageHlp = LoadImageHlp();
}
LOCAL_ASSERT(g_hinstImageHlp);
if (NULL == g_hinstImageHlp)
{
g_fLoadedImageHlpFailed = TRUE;
return;
}
//
// Try to get the API entrypoints in imagehlp.dll
//
for (int i = 0; i < ARRAY_SIZE(ailFuncList); i++)
{
*(ailFuncList[i].ppvfn) = GetProcAddress(
g_hinstImageHlp,
ailFuncList[i].pszFnName);
LOCAL_ASSERT(*(ailFuncList[i].ppvfn));
if (!*(ailFuncList[i].ppvfn))
{
g_fLoadedImageHlpFailed = TRUE;
return;
}
}
API_VERSION AppVersion = { 4, 0, API_VERSION_NUMBER, 0 };
LPAPI_VERSION papiver = _ImagehlpApiVersionEx(&AppVersion);
//
// We assume any version 4 or greater is OK.
//
LOCAL_ASSERT(papiver->Revision >= 4);
if (papiver->Revision < 4)
{
g_fLoadedImageHlpFailed = TRUE;
return;
}
g_fLoadedImageHlp = TRUE;
//
// Initialize imagehlp.dll. A NULL search path is supposed to resolve
// symbols but never works. So pull in everything and put some additional
// hints that might help out a dev box.
//
_SymSetOptions(_SymGetOptions() | SYMOPT_DEFERRED_LOADS|SYMOPT_DEBUG);
#ifndef HOST_64BIT
_SymRegisterCallback(g_hProcess, SymCallback, 0);
#endif
CQuickBytes qbSearchPath;
LPSTR szSearchPath = FillSymbolSearchPath(qbSearchPath);
_SymInitialize(g_hProcess, szSearchPath, TRUE);
return;
}
/****************************************************************************
* FillSymbolInfo *
*----------------*
* Description:
* Fills in a SYM_INFO structure
****************************************************************************/
void FillSymbolInfo
(
SYM_INFO *psi,
DWORD_PTR dwAddr
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
if (!g_fLoadedImageHlp)
{
return;
}
LOCAL_ASSERT(psi);
memset(psi, 0, sizeof(SYM_INFO));
PLAT_IMAGEHLP_MODULE mi;
mi.SizeOfStruct = sizeof(mi);
if (!_SymGetModuleInfo(g_hProcess, dwAddr, &mi))
{
strcpy_s(psi->achModule, ARRAY_SIZE(psi->achModule), "<no module>");
}
else
{
strcpy_s(psi->achModule, ARRAY_SIZE(psi->achModule), mi.ModuleName);
_strupr_s(psi->achModule, ARRAY_SIZE(psi->achModule));
}
CHAR rgchUndec[256];
const CHAR * pszSymbol = NULL;
// Name field of IMAGEHLP_SYMBOL is dynamically sized.
// Pad with space for 255 characters.
union
{
CHAR rgchSymbol[sizeof(PLAT_IMAGEHLP_SYMBOL) + 255];
PLAT_IMAGEHLP_SYMBOL sym;
};
__try
{
sym.SizeOfStruct = sizeof(PLAT_IMAGEHLP_SYMBOL);
sym.Address = dwAddr;
sym.MaxNameLength = 255;
if (_SymGetSymFromAddr(g_hProcess, dwAddr, &psi->dwOffset, &sym))
{
pszSymbol = sym.Name;
if (_SymUnDName(&sym, rgchUndec, STRING_LENGTH(rgchUndec)))
{
pszSymbol = rgchUndec;
}
}
else
{
pszSymbol = "<no symbol>";
}
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
pszSymbol = "<EX: no symbol>";
psi->dwOffset = dwAddr - mi.BaseOfImage;
}
strcpy_s(psi->achSymbol, ARRAY_SIZE(psi->achSymbol), pszSymbol);
}
/****************************************************************************
* FunctionTableAccess *
*---------------------*
* Description:
* Helper for imagehlp's StackWalk API.
****************************************************************************/
LPVOID __stdcall FunctionTableAccess
(
HANDLE hProcess,
DWORD_PTR dwPCAddr
)
{
WRAPPER_NO_CONTRACT;
HANDLE hFuncEntry = _SymFunctionTableAccess( hProcess, dwPCAddr );
#if defined(HOST_64BIT)
if (hFuncEntry == NULL)
{
if (_GetRuntimeStackWalkInfo == NULL)
{
_GetRuntimeStackWalkInfo = (pfn_GetRuntimeStackWalkInfo)
GetProcAddress(GetCLRModuleHack(), "GetRuntimeStackWalkInfo");
if (_GetRuntimeStackWalkInfo == NULL)
return NULL;
}
_GetRuntimeStackWalkInfo((ULONG64)dwPCAddr, NULL, (UINT_PTR*)(&hFuncEntry));
}
#endif // HOST_64BIT
return hFuncEntry;
}
/****************************************************************************
* GetModuleBase *
*---------------*
* Description:
* Helper for imagehlp's StackWalk API. Retrieves the base address of
* the module containing the giving virtual address.
*
* NOTE: If the module information for the given module hasnot yet been
* loaded, then it is loaded on this call.
*
* Return:
* Base virtual address where the module containing ReturnAddress is
* loaded, or 0 if the address cannot be determined.
****************************************************************************/
DWORD_PTR __stdcall GetModuleBase
(
HANDLE hProcess,
DWORD_PTR dwAddr
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
PLAT_IMAGEHLP_MODULE ModuleInfo;
ModuleInfo.SizeOfStruct = sizeof(ModuleInfo);
if (_SymGetModuleInfo(hProcess, dwAddr, &ModuleInfo))
{
return ModuleInfo.BaseOfImage;
}
else
{
MEMORY_BASIC_INFORMATION mbi;
if (VirtualQueryEx(hProcess, (LPVOID)dwAddr, &mbi, sizeof(mbi)))
{
if (mbi.Type & MEM_IMAGE)
{
char achFile[MAX_LONGPATH] = {0};
DWORD cch;
cch = GetModuleFileNameA(
(HINSTANCE)mbi.AllocationBase,
achFile,
MAX_LONGPATH);
// Ignore the return code since we can't do anything with it.
_SymLoadModule(
hProcess,
NULL,
((cch) ? achFile : NULL),
NULL,
(DWORD_PTR)mbi.AllocationBase,
0);
return (DWORD_PTR)mbi.AllocationBase;
}
}
}
#if defined(HOST_64BIT)
if (_GetRuntimeStackWalkInfo == NULL)
{
_GetRuntimeStackWalkInfo = (pfn_GetRuntimeStackWalkInfo)
GetProcAddress(GetCLRModuleHack(), "GetRuntimeStackWalkInfo");
if (_GetRuntimeStackWalkInfo == NULL)
return NULL;
}
DWORD_PTR moduleBase;
_GetRuntimeStackWalkInfo((ULONG64)dwAddr, (UINT_PTR*)&moduleBase, NULL);
if (moduleBase != NULL)
return moduleBase;
#endif // HOST_64BIT
return 0;
}
#if !defined(DACCESS_COMPILE)
/****************************************************************************
* GetStackBacktrace *
*-------------------*
* Description:
* Gets a stacktrace of the current stack, including symbols.
*
* Return:
* The number of elements actually retrieved.
****************************************************************************/
UINT GetStackBacktrace
(
UINT ifrStart, // How many stack elements to skip before starting.
UINT cfrTotal, // How many elements to trace after starting.
DWORD_PTR* pdwEip, // Array to be filled with stack addresses.
SYM_INFO* psiSymbols, // This array is filled with symbol information.
// It should be big enough to hold cfrTotal elts.
// If NULL, no symbol information is stored.
CONTEXT * pContext // Context to use (or NULL to use current)
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
UINT nElements = 0;
DWORD_PTR* pdw = pdwEip;
SYM_INFO* psi = psiSymbols;
MagicInit();
memset(pdwEip, 0, cfrTotal*sizeof(DWORD_PTR));
if (psiSymbols)
{
memset(psiSymbols, 0, cfrTotal * sizeof(SYM_INFO));
}
if (!g_fLoadedImageHlp)
{
return 0;
}
CONTEXT context;
if (pContext == NULL)
{
ClrCaptureContext(&context);
}
else
{
memcpy(&context, pContext, sizeof(CONTEXT));
}
#ifdef HOST_64BIT
STACKFRAME64 stkfrm;
memset(&stkfrm, 0, sizeof(STACKFRAME64));
#else
STACKFRAME stkfrm;
memset(&stkfrm, 0, sizeof(STACKFRAME));
#endif
stkfrm.AddrPC.Mode = AddrModeFlat;
stkfrm.AddrStack.Mode = AddrModeFlat;
stkfrm.AddrFrame.Mode = AddrModeFlat;
#if defined(_M_IX86)
stkfrm.AddrPC.Offset = context.Eip;
stkfrm.AddrStack.Offset = context.Esp;
stkfrm.AddrFrame.Offset = context.Ebp; // Frame Pointer
#endif
#ifndef HOST_X86
// If we don't have a user-supplied context, then don't skip any frames.
// So ignore this function (GetStackBackTrace)
// ClrCaptureContext on x86 gives us the ESP/EBP/EIP of its caller's caller
// so we don't need to do this.
if (pContext == NULL)
{
ifrStart += 1;
}
#endif // !HOST_X86
for (UINT i = 0; i < ifrStart + cfrTotal; i++)
{
if (!_StackWalk(IMAGE_FILE_MACHINE_NATIVE,
g_hProcess,
GetCurrentThread(),
&stkfrm,
&context,
NULL,
(PFUNCTION_TABLE_ACCESS_ROUTINE)FunctionTableAccess,
(PGET_MODULE_BASE_ROUTINE)GetModuleBase,
NULL))
{
break;
}
if (i >= ifrStart)
{
*pdw++ = stkfrm.AddrPC.Offset;
nElements++;
if (psi)
{
FillSymbolInfo(psi++, stkfrm.AddrPC.Offset);
}
}
}
LOCAL_ASSERT(nElements == (UINT)(pdw - pdwEip));
return nElements;
}
#endif // !defined(DACCESS_COMPILE)
/****************************************************************************
* GetStringFromSymbolInfo *
*-------------------------*
* Description:
* Actually prints the info into the string for the symbol.
****************************************************************************/
#ifdef HOST_64BIT
#define FMT_ADDR_BARE "%08x`%08x"
#define FMT_ADDR_OFFSET "%llX"
#else
#define FMT_ADDR_BARE "%08x"
#define FMT_ADDR_OFFSET "%lX"
#endif
void GetStringFromSymbolInfo
(
DWORD_PTR dwAddr,
SYM_INFO *psi, // @parm Pointer to SYMBOL_INFO. Can be NULL.
__out_ecount (cchMaxAssertStackLevelStringLen) CHAR *pszString // @parm Place to put string.
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
LOCAL_ASSERT(pszString);
// <module>! <symbol> + 0x<offset> 0x<addr>\n
if (psi)
{
sprintf_s(pszString,
cchMaxAssertStackLevelStringLen,
"%s! %s + 0x" FMT_ADDR_OFFSET " (0x" FMT_ADDR_BARE ")",
(psi->achModule[0]) ? psi->achModule : "<no module>",
(psi->achSymbol[0]) ? psi->achSymbol : "<no symbol>",
psi->dwOffset,
DBG_ADDR(dwAddr));
}
else
{
sprintf_s(pszString, cchMaxAssertStackLevelStringLen, "<symbols not available> (0x%p)", (void *)dwAddr);
}
LOCAL_ASSERT(strlen(pszString) < cchMaxAssertStackLevelStringLen);
}
#if !defined(DACCESS_COMPILE)
/****************************************************************************
* GetStringFromStackLevels *
*--------------------------*
* Description:
* Retrieves a string from the stack frame. If more than one frame, they
* are separated by newlines
****************************************************************************/
void GetStringFromStackLevels
(
UINT ifrStart, // @parm How many stack elements to skip before starting.
UINT cfrTotal, // @parm How many elements to trace after starting.
// Can't be more than cfrMaxAssertStackLevels.
__out_ecount(cchMaxAssertStackLevelStringLen * cfrTotal) CHAR *pszString, // @parm Place to put string.
// Max size will be cchMaxAssertStackLevelStringLen * cfrTotal.
CONTEXT * pContext // @parm Context to start the stack trace at; null for current context.
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
LOCAL_ASSERT(pszString);
LOCAL_ASSERT(cfrTotal < cfrMaxAssertStackLevels);
*pszString = '\0';
if (cfrTotal == 0)
{
return;
}
DWORD_PTR rgdwStackAddrs[cfrMaxAssertStackLevels];
SYM_INFO rgsi[cfrMaxAssertStackLevels];
// Ignore this function (GetStringFromStackLevels) if we don't have a user-supplied context.
if (pContext == NULL)
{
ifrStart += 1;
}
UINT uiRetrieved =
GetStackBacktrace(ifrStart, cfrTotal, rgdwStackAddrs, rgsi, pContext);
// First level
CHAR aszLevel[cchMaxAssertStackLevelStringLen];
GetStringFromSymbolInfo(rgdwStackAddrs[0], &rgsi[0], aszLevel);
size_t bufSize = cchMaxAssertStackLevelStringLen * cfrTotal;
strcpy_s(pszString, bufSize, aszLevel);
// Additional levels
for (UINT i = 1; i < uiRetrieved; ++i)
{
strcat_s(pszString, bufSize, "\n");
GetStringFromSymbolInfo(rgdwStackAddrs[i],
&rgsi[i], aszLevel);
strcat_s(pszString, bufSize, aszLevel);
}
LOCAL_ASSERT(strlen(pszString) <= cchMaxAssertStackLevelStringLen * cfrTotal);
}
#endif // !defined(DACCESS_COMPILE)
/****************************************************************************
* GetStringFromAddr *
*-------------------*
* Description:
* Returns a string from an address.
****************************************************************************/
void GetStringFromAddr
(
DWORD_PTR dwAddr,
_Out_writes_(cchMaxAssertStackLevelStringLen) LPSTR szString // Place to put string.
// Buffer must hold at least cchMaxAssertStackLevelStringLen.
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
LOCAL_ASSERT(szString);
SYM_INFO si;
FillSymbolInfo(&si, dwAddr);
sprintf_s(szString,
cchMaxAssertStackLevelStringLen,
"%s! %s + 0x%p (0x%p)",
(si.achModule[0]) ? si.achModule : "<no module>",
(si.achSymbol[0]) ? si.achSymbol : "<no symbol>",
(void*)si.dwOffset,
(void*)dwAddr);
}
/****************************************************************************
* MagicDeinit *
*-------------*
* Description:
* Cleans up for the symbol loading code. Should be called before exit
* to free the dynamically loaded imagehlp.dll.
****************************************************************************/
void MagicDeinit(void)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
if (g_hinstImageHlp)
{
FreeLibrary(g_hinstImageHlp);
g_hinstImageHlp = NULL;
g_fLoadedImageHlp = FALSE;
}
}
#if defined(HOST_X86)
/****************************************************************************
* ClrCaptureContext *
*-------------------*
* Description:
* Exactly the contents of RtlCaptureContext for Win7 - Win2K doesn't
* support this, so we need it for CoreCLR 4, if we require Win2K support
****************************************************************************/
extern "C" __declspec(naked) void __stdcall
ClrCaptureContext(_Out_ PCONTEXT ctx)
{
__asm {
push ebx;
mov ebx,dword ptr [esp+8]
mov dword ptr [ebx+0B0h],eax
mov dword ptr [ebx+0ACh],ecx
mov dword ptr [ebx+0A8h],edx
mov eax,dword ptr [esp]
mov dword ptr [ebx+0A4h],eax
mov dword ptr [ebx+0A0h],esi
mov dword ptr [ebx+09Ch],edi
mov word ptr [ebx+0BCh],cs
mov word ptr [ebx+098h],ds
mov word ptr [ebx+094h],es
mov word ptr [ebx+090h],fs
mov word ptr [ebx+08Ch],gs
mov word ptr [ebx+0C8h],ss
pushfd
pop dword ptr [ebx+0C0h]
mov eax,dword ptr [ebp+4]
mov dword ptr [ebx+0B8h],eax
mov eax,dword ptr [ebp]
mov dword ptr [ebx+0B4h],eax
lea eax,[ebp+8]
mov dword ptr [ebx+0C4h],eax
mov dword ptr [ebx],10007h
pop ebx
ret 4
}
}
#endif // HOST_X86
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
#include "stdafx.h"
#include "stacktrace.h"
#include <imagehlp.h>
#include "corhlpr.h"
#include "utilcode.h"
#include "pedecoder.h" // for IMAGE_FILE_MACHINE_NATIVE
#include <minipal/utils.h>
//This is a workaround. We need to work with the debugger team to figure
//out how the module handle of the CLR can be found in a SxS safe way.
HMODULE GetCLRModuleHack()
{
static HMODULE s_hModCLR = 0;
if (!s_hModCLR)
{
s_hModCLR = GetModuleHandleA(MAIN_CLR_DLL_NAME_A);
}
return s_hModCLR;
}
HINSTANCE LoadImageHlp()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
SCAN_IGNORE_FAULT; // Faults from Wsz funcs are handled.
return LoadLibraryExA("imagehlp.dll", NULL, 0);
}
HINSTANCE LoadDbgHelp()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
SCAN_IGNORE_FAULT; // Faults from Wsz funcs are handled.
return LoadLibraryExA("dbghelp.dll", NULL, 0);
}
/****************************************************************************
* SymCallback *
*---------------------*
* Description:
* Callback for imghelp.
****************************************************************************/
BOOL __stdcall SymCallback
(
HANDLE hProcess,
ULONG ActionCode,
PVOID CallbackData,
PVOID UserContext
)
{
WRAPPER_NO_CONTRACT;
switch (ActionCode)
{
case CBA_DEBUG_INFO:
OutputDebugStringA("IMGHLP: ");
OutputDebugStringA((LPCSTR) CallbackData);
OutputDebugStringA("\n");
break;
case CBA_DEFERRED_SYMBOL_LOAD_START:
OutputDebugStringA("IMGHLP: Deferred symbol load start ");
OutputDebugStringA(((IMAGEHLP_DEFERRED_SYMBOL_LOAD*)CallbackData)->FileName);
OutputDebugStringA("\n");
break;
case CBA_DEFERRED_SYMBOL_LOAD_COMPLETE:
OutputDebugStringA("IMGHLP: Deferred symbol load complete ");
OutputDebugStringA(((IMAGEHLP_DEFERRED_SYMBOL_LOAD*)CallbackData)->FileName);
OutputDebugStringA("\n");
break;
case CBA_DEFERRED_SYMBOL_LOAD_FAILURE:
OutputDebugStringA("IMGHLP: Deferred symbol load failure ");
OutputDebugStringA(((IMAGEHLP_DEFERRED_SYMBOL_LOAD*)CallbackData)->FileName);
OutputDebugStringA("\n");
break;
case CBA_DEFERRED_SYMBOL_LOAD_PARTIAL:
OutputDebugStringA("IMGHLP: Deferred symbol load partial ");
OutputDebugStringA(((IMAGEHLP_DEFERRED_SYMBOL_LOAD*)CallbackData)->FileName);
OutputDebugStringA("\n");
break;
}
return FALSE;
}
// @TODO_IA64: all of this stack trace stuff is pretty much broken on 64-bit
// right now because this code doesn't use the new SymXxxx64 functions.
#define LOCAL_ASSERT(x)
//
//--- Macros ------------------------------------------------------------------
//
//
// Types and Constants --------------------------------------------------------
//
struct SYM_INFO
{
DWORD_PTR dwOffset;
char achModule[cchMaxAssertModuleLen];
char achSymbol[cchMaxAssertSymbolLen];
};
//--- Function Pointers to APIs in IMAGEHLP.DLL. Loaded dynamically. ---------
typedef LPAPI_VERSION (__stdcall *pfnImgHlp_ImagehlpApiVersionEx)(
LPAPI_VERSION AppVersion
);
typedef BOOL (__stdcall *pfnImgHlp_StackWalk)(
DWORD MachineType,
HANDLE hProcess,
HANDLE hThread,
LPSTACKFRAME StackFrame,
LPVOID ContextRecord,
PREAD_PROCESS_MEMORY_ROUTINE ReadMemoryRoutine,
PFUNCTION_TABLE_ACCESS_ROUTINE FunctionTableAccessRoutine,
PGET_MODULE_BASE_ROUTINE GetModuleBaseRoutine,
PTRANSLATE_ADDRESS_ROUTINE TranslateAddress
);
#ifdef HOST_64BIT
typedef DWORD64 (__stdcall *pfnImgHlp_SymGetModuleBase64)(
IN HANDLE hProcess,
IN DWORD64 dwAddr
);
typedef IMAGEHLP_SYMBOL64 PLAT_IMAGEHLP_SYMBOL;
typedef IMAGEHLP_MODULE64 PLAT_IMAGEHLP_MODULE;
#else
typedef IMAGEHLP_SYMBOL PLAT_IMAGEHLP_SYMBOL;
typedef IMAGEHLP_MODULE PLAT_IMAGEHLP_MODULE;
#endif
#undef IMAGEHLP_SYMBOL
#undef IMAGEHLP_MODULE
typedef BOOL (__stdcall *pfnImgHlp_SymGetModuleInfo)(
IN HANDLE hProcess,
IN DWORD_PTR dwAddr,
OUT PLAT_IMAGEHLP_MODULE* ModuleInfo
);
typedef LPVOID (__stdcall *pfnImgHlp_SymFunctionTableAccess)(
HANDLE hProcess,
DWORD_PTR AddrBase
);
typedef BOOL (__stdcall *pfnImgHlp_SymGetSymFromAddr)(
IN HANDLE hProcess,
IN DWORD_PTR dwAddr,
OUT DWORD_PTR* pdwDisplacement,
OUT PLAT_IMAGEHLP_SYMBOL* Symbol
);
typedef BOOL (__stdcall *pfnImgHlp_SymInitialize)(
IN HANDLE hProcess,
IN LPSTR UserSearchPath,
IN BOOL fInvadeProcess
);
typedef BOOL (__stdcall *pfnImgHlp_SymUnDName)(
IN PLAT_IMAGEHLP_SYMBOL* sym, // Symbol to undecorate
OUT LPSTR UnDecName, // Buffer to store undecorated name in
IN DWORD UnDecNameLength // Size of the buffer
);
typedef BOOL (__stdcall *pfnImgHlp_SymLoadModule)(
IN HANDLE hProcess,
IN HANDLE hFile,
IN PSTR ImageName,
IN PSTR ModuleName,
IN DWORD_PTR BaseOfDll,
IN DWORD SizeOfDll
);
typedef BOOL (_stdcall *pfnImgHlp_SymRegisterCallback)(
IN HANDLE hProcess,
IN PSYMBOL_REGISTERED_CALLBACK CallbackFunction,
IN PVOID UserContext
);
typedef DWORD (_stdcall *pfnImgHlp_SymSetOptions)(
IN DWORD SymOptions
);
typedef DWORD (_stdcall *pfnImgHlp_SymGetOptions)(
);
struct IMGHLPFN_LOAD
{
LPCSTR pszFnName;
LPVOID * ppvfn;
};
#if defined(HOST_64BIT)
typedef void (*pfn_GetRuntimeStackWalkInfo)(
IN ULONG64 ControlPc,
OUT UINT_PTR* pModuleBase,
OUT UINT_PTR* pFuncEntry
);
#endif // HOST_64BIT
//
// Globals --------------------------------------------------------------------
//
static BOOL g_fLoadedImageHlp = FALSE; // set to true on success
static BOOL g_fLoadedImageHlpFailed = FALSE; // set to true on failure
static HINSTANCE g_hinstImageHlp = NULL;
static HINSTANCE g_hinstDbgHelp = NULL;
static HANDLE g_hProcess = NULL;
pfnImgHlp_ImagehlpApiVersionEx _ImagehlpApiVersionEx;
pfnImgHlp_StackWalk _StackWalk;
pfnImgHlp_SymGetModuleInfo _SymGetModuleInfo;
pfnImgHlp_SymFunctionTableAccess _SymFunctionTableAccess;
pfnImgHlp_SymGetSymFromAddr _SymGetSymFromAddr;
pfnImgHlp_SymInitialize _SymInitialize;
pfnImgHlp_SymUnDName _SymUnDName;
pfnImgHlp_SymLoadModule _SymLoadModule;
pfnImgHlp_SymRegisterCallback _SymRegisterCallback;
pfnImgHlp_SymSetOptions _SymSetOptions;
pfnImgHlp_SymGetOptions _SymGetOptions;
#if defined(HOST_64BIT)
pfn_GetRuntimeStackWalkInfo _GetRuntimeStackWalkInfo;
#endif // HOST_64BIT
IMGHLPFN_LOAD ailFuncList[] =
{
{ "ImagehlpApiVersionEx", (LPVOID*)&_ImagehlpApiVersionEx },
{ "StackWalk", (LPVOID*)&_StackWalk },
{ "SymGetModuleInfo", (LPVOID*)&_SymGetModuleInfo },
{ "SymFunctionTableAccess", (LPVOID*)&_SymFunctionTableAccess },
{ "SymGetSymFromAddr", (LPVOID*)&_SymGetSymFromAddr },
{ "SymInitialize", (LPVOID*)&_SymInitialize },
{ "SymUnDName", (LPVOID*)&_SymUnDName },
{ "SymLoadModule", (LPVOID*)&_SymLoadModule },
{ "SymRegisterCallback", (LPVOID*)&_SymRegisterCallback },
{ "SymSetOptions", (LPVOID*)&_SymSetOptions },
{ "SymGetOptions", (LPVOID*)&_SymGetOptions },
};
/****************************************************************************
* FillSymbolSearchPath *
*----------------------*
* Description:
* Manually pick out all the symbol path information we need for a real
* stack trace to work. This includes the default NT symbol paths and
* places on a VBL build machine where they should live.
****************************************************************************/
#define MAX_SYM_PATH (1024*8)
#define DEFAULT_SYM_PATH W("symsrv*symsrv.dll*\\\\symbols\\symbols;")
#define STR_ENGINE_NAME MAIN_CLR_DLL_NAME_W
LPSTR FillSymbolSearchPathThrows(CQuickBytes &qb)
{
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
SCAN_IGNORE_FAULT; // Faults from Wsz funcs are handled.
#ifndef DACCESS_COMPILE
// not allowed to do allocation if current thread suspends EE.
if (IsSuspendEEThread ())
return NULL;
#endif
InlineSString<MAX_SYM_PATH> rcBuff ; // Working buffer
int chTotal = 0; // How full is working buffer.
int ch;
// If the NT symbol server path vars are there, then use those.
chTotal = WszGetEnvironmentVariable(W("_NT_SYMBOL_PATH"), rcBuff);
if (chTotal + 1 < MAX_SYM_PATH)
rcBuff.Append(W(';'));
// Copy the defacto NT symbol path as well.
size_t sympathLength = chTotal + ARRAY_SIZE(DEFAULT_SYM_PATH) + 1;
// integer overflow occurred
if (sympathLength < (size_t)chTotal || sympathLength < ARRAY_SIZE(DEFAULT_SYM_PATH))
{
return NULL;
}
if (sympathLength < MAX_SYM_PATH)
{
rcBuff.Append(DEFAULT_SYM_PATH);
chTotal = rcBuff.GetCount();
}
// Next, if there is a URTTARGET, add that since that is where ndpsetup places
// your symobls on an install.
PathString rcBuffTemp;
ch = WszGetEnvironmentVariable(W("URTTARGET"), rcBuffTemp);
rcBuff.Append(rcBuffTemp);
if (ch != 0 && (chTotal + ch + 1 < MAX_SYM_PATH))
{
size_t chNewTotal = chTotal + ch;
if (chNewTotal < (size_t)chTotal || chNewTotal < (size_t)ch)
{ // integer overflow occurred
return NULL;
}
chTotal += ch;
rcBuff.Append(W(';'));
}
#ifndef SELF_NO_HOST
// Fetch the path location of the engine dll and add that path as well, just
// in case URTARGET didn't cut it either.
// For no-host builds of utilcode, we don't necessarily have an engine DLL in the
// process, so skip this part.
ch = WszGetModuleFileName(GetCLRModuleHack(), rcBuffTemp);
size_t pathLocationLength = chTotal + ch + 1;
// integer overflow occurred
if (pathLocationLength < (size_t)chTotal || pathLocationLength < (size_t)ch)
{
return NULL;
}
if (ch != 0 && (pathLocationLength < MAX_SYM_PATH))
{
chTotal = chTotal + ch - ARRAY_SIZE(STR_ENGINE_NAME);
rcBuff.Append(W(';'));
}
#endif
// Now we have a working buffer with a bunch of interesting stuff. Time
// to convert it back to ansi for the imagehlp api's. Allocate the buffer
// 2x bigger to handle worst case for MBCS.
ch = ::WszWideCharToMultiByte(CP_ACP, WC_NO_BEST_FIT_CHARS, rcBuff, -1, 0, 0, 0, 0);
LPSTR szRtn = (LPSTR) qb.AllocNoThrow(ch + 1);
if (!szRtn)
return NULL;
WszWideCharToMultiByte(CP_ACP, WC_NO_BEST_FIT_CHARS, rcBuff, -1, szRtn, ch+1, 0, 0);
return (szRtn);
}
LPSTR FillSymbolSearchPath(CQuickBytes &qb)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
SCAN_IGNORE_FAULT; // Faults from Wsz funcs are handled.
LPSTR retval;
HRESULT hr = S_OK;
EX_TRY
{
retval = FillSymbolSearchPathThrows(qb);
}
EX_CATCH_HRESULT(hr);
if (hr != S_OK)
{
SetLastError(hr);
retval = NULL;
}
return retval;
}
/****************************************************************************
* MagicInit *
*-----------*
* Description:
* Initializes the symbol loading code. Currently called (if necessary)
* at the beginning of each method that might need ImageHelp to be
* loaded.
****************************************************************************/
void MagicInit()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
if (g_fLoadedImageHlp || g_fLoadedImageHlpFailed)
{
return;
}
g_hProcess = GetCurrentProcess();
if (g_hinstDbgHelp == NULL)
{
g_hinstDbgHelp = LoadDbgHelp();
}
if (NULL == g_hinstDbgHelp)
{
// Imagehlp.dll has dependency on dbghelp.dll through delay load.
// If dbghelp.dll is not available, Imagehlp.dll initializes API's like ImageApiVersionEx to
// some dummy function. Then we AV when we use data from _ImagehlpApiVersionEx
g_fLoadedImageHlpFailed = TRUE;
return;
}
//
// Try to load imagehlp.dll
//
if (g_hinstImageHlp == NULL) {
g_hinstImageHlp = LoadImageHlp();
}
LOCAL_ASSERT(g_hinstImageHlp);
if (NULL == g_hinstImageHlp)
{
g_fLoadedImageHlpFailed = TRUE;
return;
}
//
// Try to get the API entrypoints in imagehlp.dll
//
for (int i = 0; i < ARRAY_SIZE(ailFuncList); i++)
{
*(ailFuncList[i].ppvfn) = GetProcAddress(
g_hinstImageHlp,
ailFuncList[i].pszFnName);
LOCAL_ASSERT(*(ailFuncList[i].ppvfn));
if (!*(ailFuncList[i].ppvfn))
{
g_fLoadedImageHlpFailed = TRUE;
return;
}
}
API_VERSION AppVersion = { 4, 0, API_VERSION_NUMBER, 0 };
LPAPI_VERSION papiver = _ImagehlpApiVersionEx(&AppVersion);
//
// We assume any version 4 or greater is OK.
//
LOCAL_ASSERT(papiver->Revision >= 4);
if (papiver->Revision < 4)
{
g_fLoadedImageHlpFailed = TRUE;
return;
}
g_fLoadedImageHlp = TRUE;
//
// Initialize imagehlp.dll. A NULL search path is supposed to resolve
// symbols but never works. So pull in everything and put some additional
// hints that might help out a dev box.
//
_SymSetOptions(_SymGetOptions() | SYMOPT_DEFERRED_LOADS|SYMOPT_DEBUG);
#ifndef HOST_64BIT
_SymRegisterCallback(g_hProcess, SymCallback, 0);
#endif
CQuickBytes qbSearchPath;
LPSTR szSearchPath = FillSymbolSearchPath(qbSearchPath);
_SymInitialize(g_hProcess, szSearchPath, TRUE);
return;
}
/****************************************************************************
* FillSymbolInfo *
*----------------*
* Description:
* Fills in a SYM_INFO structure
****************************************************************************/
void FillSymbolInfo
(
SYM_INFO *psi,
DWORD_PTR dwAddr
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
if (!g_fLoadedImageHlp)
{
return;
}
LOCAL_ASSERT(psi);
memset(psi, 0, sizeof(SYM_INFO));
PLAT_IMAGEHLP_MODULE mi;
mi.SizeOfStruct = sizeof(mi);
if (!_SymGetModuleInfo(g_hProcess, dwAddr, &mi))
{
strcpy_s(psi->achModule, ARRAY_SIZE(psi->achModule), "<no module>");
}
else
{
strcpy_s(psi->achModule, ARRAY_SIZE(psi->achModule), mi.ModuleName);
_strupr_s(psi->achModule, ARRAY_SIZE(psi->achModule));
}
CHAR rgchUndec[256];
const CHAR * pszSymbol = NULL;
// Name field of IMAGEHLP_SYMBOL is dynamically sized.
// Pad with space for 255 characters.
union
{
CHAR rgchSymbol[sizeof(PLAT_IMAGEHLP_SYMBOL) + 255];
PLAT_IMAGEHLP_SYMBOL sym;
};
__try
{
sym.SizeOfStruct = sizeof(PLAT_IMAGEHLP_SYMBOL);
sym.Address = dwAddr;
sym.MaxNameLength = 255;
if (_SymGetSymFromAddr(g_hProcess, dwAddr, &psi->dwOffset, &sym))
{
pszSymbol = sym.Name;
if (_SymUnDName(&sym, rgchUndec, STRING_LENGTH(rgchUndec)))
{
pszSymbol = rgchUndec;
}
}
else
{
pszSymbol = "<no symbol>";
}
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
pszSymbol = "<EX: no symbol>";
psi->dwOffset = dwAddr - mi.BaseOfImage;
}
strcpy_s(psi->achSymbol, ARRAY_SIZE(psi->achSymbol), pszSymbol);
}
/****************************************************************************
* FunctionTableAccess *
*---------------------*
* Description:
* Helper for imagehlp's StackWalk API.
****************************************************************************/
LPVOID __stdcall FunctionTableAccess
(
HANDLE hProcess,
DWORD_PTR dwPCAddr
)
{
WRAPPER_NO_CONTRACT;
HANDLE hFuncEntry = _SymFunctionTableAccess( hProcess, dwPCAddr );
#if defined(HOST_64BIT)
if (hFuncEntry == NULL)
{
if (_GetRuntimeStackWalkInfo == NULL)
{
_GetRuntimeStackWalkInfo = (pfn_GetRuntimeStackWalkInfo)
GetProcAddress(GetCLRModuleHack(), "GetRuntimeStackWalkInfo");
if (_GetRuntimeStackWalkInfo == NULL)
return NULL;
}
_GetRuntimeStackWalkInfo((ULONG64)dwPCAddr, NULL, (UINT_PTR*)(&hFuncEntry));
}
#endif // HOST_64BIT
return hFuncEntry;
}
/****************************************************************************
* GetModuleBase *
*---------------*
* Description:
* Helper for imagehlp's StackWalk API. Retrieves the base address of
* the module containing the giving virtual address.
*
* NOTE: If the module information for the given module hasnot yet been
* loaded, then it is loaded on this call.
*
* Return:
* Base virtual address where the module containing ReturnAddress is
* loaded, or 0 if the address cannot be determined.
****************************************************************************/
DWORD_PTR __stdcall GetModuleBase
(
HANDLE hProcess,
DWORD_PTR dwAddr
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
PLAT_IMAGEHLP_MODULE ModuleInfo;
ModuleInfo.SizeOfStruct = sizeof(ModuleInfo);
if (_SymGetModuleInfo(hProcess, dwAddr, &ModuleInfo))
{
return ModuleInfo.BaseOfImage;
}
else
{
MEMORY_BASIC_INFORMATION mbi;
if (VirtualQueryEx(hProcess, (LPVOID)dwAddr, &mbi, sizeof(mbi)))
{
if (mbi.Type & MEM_IMAGE)
{
char achFile[MAX_LONGPATH] = {0};
DWORD cch;
cch = GetModuleFileNameA(
(HINSTANCE)mbi.AllocationBase,
achFile,
MAX_LONGPATH);
// Ignore the return code since we can't do anything with it.
_SymLoadModule(
hProcess,
NULL,
((cch) ? achFile : NULL),
NULL,
(DWORD_PTR)mbi.AllocationBase,
0);
return (DWORD_PTR)mbi.AllocationBase;
}
}
}
#if defined(HOST_64BIT)
if (_GetRuntimeStackWalkInfo == NULL)
{
_GetRuntimeStackWalkInfo = (pfn_GetRuntimeStackWalkInfo)
GetProcAddress(GetCLRModuleHack(), "GetRuntimeStackWalkInfo");
if (_GetRuntimeStackWalkInfo == NULL)
return NULL;
}
DWORD_PTR moduleBase;
_GetRuntimeStackWalkInfo((ULONG64)dwAddr, (UINT_PTR*)&moduleBase, NULL);
if (moduleBase != NULL)
return moduleBase;
#endif // HOST_64BIT
return 0;
}
#if !defined(DACCESS_COMPILE)
/****************************************************************************
* GetStackBacktrace *
*-------------------*
* Description:
* Gets a stacktrace of the current stack, including symbols.
*
* Return:
* The number of elements actually retrieved.
****************************************************************************/
UINT GetStackBacktrace
(
UINT ifrStart, // How many stack elements to skip before starting.
UINT cfrTotal, // How many elements to trace after starting.
DWORD_PTR* pdwEip, // Array to be filled with stack addresses.
SYM_INFO* psiSymbols, // This array is filled with symbol information.
// It should be big enough to hold cfrTotal elts.
// If NULL, no symbol information is stored.
CONTEXT * pContext // Context to use (or NULL to use current)
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
UINT nElements = 0;
DWORD_PTR* pdw = pdwEip;
SYM_INFO* psi = psiSymbols;
MagicInit();
memset(pdwEip, 0, cfrTotal*sizeof(DWORD_PTR));
if (psiSymbols)
{
memset(psiSymbols, 0, cfrTotal * sizeof(SYM_INFO));
}
if (!g_fLoadedImageHlp)
{
return 0;
}
CONTEXT context;
if (pContext == NULL)
{
ClrCaptureContext(&context);
}
else
{
memcpy(&context, pContext, sizeof(CONTEXT));
}
#ifdef HOST_64BIT
STACKFRAME64 stkfrm;
memset(&stkfrm, 0, sizeof(STACKFRAME64));
#else
STACKFRAME stkfrm;
memset(&stkfrm, 0, sizeof(STACKFRAME));
#endif
stkfrm.AddrPC.Mode = AddrModeFlat;
stkfrm.AddrStack.Mode = AddrModeFlat;
stkfrm.AddrFrame.Mode = AddrModeFlat;
#if defined(_M_IX86)
stkfrm.AddrPC.Offset = context.Eip;
stkfrm.AddrStack.Offset = context.Esp;
stkfrm.AddrFrame.Offset = context.Ebp; // Frame Pointer
#endif
#ifndef HOST_X86
// If we don't have a user-supplied context, then don't skip any frames.
// So ignore this function (GetStackBackTrace)
// ClrCaptureContext on x86 gives us the ESP/EBP/EIP of its caller's caller
// so we don't need to do this.
if (pContext == NULL)
{
ifrStart += 1;
}
#endif // !HOST_X86
for (UINT i = 0; i < ifrStart + cfrTotal; i++)
{
if (!_StackWalk(IMAGE_FILE_MACHINE_NATIVE,
g_hProcess,
GetCurrentThread(),
&stkfrm,
&context,
NULL,
(PFUNCTION_TABLE_ACCESS_ROUTINE)FunctionTableAccess,
(PGET_MODULE_BASE_ROUTINE)GetModuleBase,
NULL))
{
break;
}
if (i >= ifrStart)
{
*pdw++ = stkfrm.AddrPC.Offset;
nElements++;
if (psi)
{
FillSymbolInfo(psi++, stkfrm.AddrPC.Offset);
}
}
}
LOCAL_ASSERT(nElements == (UINT)(pdw - pdwEip));
return nElements;
}
#endif // !defined(DACCESS_COMPILE)
/****************************************************************************
* GetStringFromSymbolInfo *
*-------------------------*
* Description:
* Actually prints the info into the string for the symbol.
****************************************************************************/
#ifdef HOST_64BIT
#define FMT_ADDR_BARE "%08x`%08x"
#define FMT_ADDR_OFFSET "%llX"
#else
#define FMT_ADDR_BARE "%08x"
#define FMT_ADDR_OFFSET "%lX"
#endif
void GetStringFromSymbolInfo
(
DWORD_PTR dwAddr,
SYM_INFO *psi, // @parm Pointer to SYMBOL_INFO. Can be NULL.
__out_ecount (cchMaxAssertStackLevelStringLen) CHAR *pszString // @parm Place to put string.
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
LOCAL_ASSERT(pszString);
// <module>! <symbol> + 0x<offset> 0x<addr>\n
if (psi)
{
sprintf_s(pszString,
cchMaxAssertStackLevelStringLen,
"%s! %s + 0x" FMT_ADDR_OFFSET " (0x" FMT_ADDR_BARE ")",
(psi->achModule[0]) ? psi->achModule : "<no module>",
(psi->achSymbol[0]) ? psi->achSymbol : "<no symbol>",
psi->dwOffset,
DBG_ADDR(dwAddr));
}
else
{
sprintf_s(pszString, cchMaxAssertStackLevelStringLen, "<symbols not available> (0x%p)", (void *)dwAddr);
}
LOCAL_ASSERT(strlen(pszString) < cchMaxAssertStackLevelStringLen);
}
#if !defined(DACCESS_COMPILE)
/****************************************************************************
* GetStringFromStackLevels *
*--------------------------*
* Description:
* Retrieves a string from the stack frame. If more than one frame, they
* are separated by newlines
****************************************************************************/
void GetStringFromStackLevels
(
UINT ifrStart, // @parm How many stack elements to skip before starting.
UINT cfrTotal, // @parm How many elements to trace after starting.
// Can't be more than cfrMaxAssertStackLevels.
__out_ecount(cchMaxAssertStackLevelStringLen * cfrTotal) CHAR *pszString, // @parm Place to put string.
// Max size will be cchMaxAssertStackLevelStringLen * cfrTotal.
CONTEXT * pContext // @parm Context to start the stack trace at; null for current context.
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
LOCAL_ASSERT(pszString);
LOCAL_ASSERT(cfrTotal < cfrMaxAssertStackLevels);
*pszString = '\0';
if (cfrTotal == 0)
{
return;
}
DWORD_PTR rgdwStackAddrs[cfrMaxAssertStackLevels];
SYM_INFO rgsi[cfrMaxAssertStackLevels];
// Ignore this function (GetStringFromStackLevels) if we don't have a user-supplied context.
if (pContext == NULL)
{
ifrStart += 1;
}
UINT uiRetrieved =
GetStackBacktrace(ifrStart, cfrTotal, rgdwStackAddrs, rgsi, pContext);
// First level
CHAR aszLevel[cchMaxAssertStackLevelStringLen];
GetStringFromSymbolInfo(rgdwStackAddrs[0], &rgsi[0], aszLevel);
size_t bufSize = cchMaxAssertStackLevelStringLen * cfrTotal;
strcpy_s(pszString, bufSize, aszLevel);
// Additional levels
for (UINT i = 1; i < uiRetrieved; ++i)
{
strcat_s(pszString, bufSize, "\n");
GetStringFromSymbolInfo(rgdwStackAddrs[i],
&rgsi[i], aszLevel);
strcat_s(pszString, bufSize, aszLevel);
}
LOCAL_ASSERT(strlen(pszString) <= cchMaxAssertStackLevelStringLen * cfrTotal);
}
#endif // !defined(DACCESS_COMPILE)
/****************************************************************************
* GetStringFromAddr *
*-------------------*
* Description:
* Returns a string from an address.
****************************************************************************/
void GetStringFromAddr
(
DWORD_PTR dwAddr,
_Out_writes_(cchMaxAssertStackLevelStringLen) LPSTR szString // Place to put string.
// Buffer must hold at least cchMaxAssertStackLevelStringLen.
)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
LOCAL_ASSERT(szString);
SYM_INFO si;
FillSymbolInfo(&si, dwAddr);
sprintf_s(szString,
cchMaxAssertStackLevelStringLen,
"%s! %s + 0x%p (0x%p)",
(si.achModule[0]) ? si.achModule : "<no module>",
(si.achSymbol[0]) ? si.achSymbol : "<no symbol>",
(void*)si.dwOffset,
(void*)dwAddr);
}
/****************************************************************************
* MagicDeinit *
*-------------*
* Description:
* Cleans up for the symbol loading code. Should be called before exit
* to free the dynamically loaded imagehlp.dll.
****************************************************************************/
void MagicDeinit(void)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
if (g_hinstImageHlp)
{
FreeLibrary(g_hinstImageHlp);
g_hinstImageHlp = NULL;
g_fLoadedImageHlp = FALSE;
}
}
#if defined(HOST_X86)
/****************************************************************************
* ClrCaptureContext *
*-------------------*
* Description:
* Exactly the contents of RtlCaptureContext for Win7 - Win2K doesn't
* support this, so we need it for CoreCLR 4, if we require Win2K support
****************************************************************************/
extern "C" __declspec(naked) void __stdcall
ClrCaptureContext(_Out_ PCONTEXT ctx)
{
__asm {
push ebx;
mov ebx,dword ptr [esp+8]
mov dword ptr [ebx+0B0h],eax
mov dword ptr [ebx+0ACh],ecx
mov dword ptr [ebx+0A8h],edx
mov eax,dword ptr [esp]
mov dword ptr [ebx+0A4h],eax
mov dword ptr [ebx+0A0h],esi
mov dword ptr [ebx+09Ch],edi
mov word ptr [ebx+0BCh],cs
mov word ptr [ebx+098h],ds
mov word ptr [ebx+094h],es
mov word ptr [ebx+090h],fs
mov word ptr [ebx+08Ch],gs
mov word ptr [ebx+0C8h],ss
pushfd
pop dword ptr [ebx+0C0h]
mov eax,dword ptr [ebp+4]
mov dword ptr [ebx+0B8h],eax
mov eax,dword ptr [ebp]
mov dword ptr [ebx+0B4h],eax
lea eax,[ebp+8]
mov dword ptr [ebx+0C4h],eax
mov dword ptr [ebx],10007h
pop ebx
ret 4
}
}
#endif // HOST_X86
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/c_runtime/vsprintf/test18/test18.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test18.c
**
** Purpose: Test #18 for the vsprintf function.
**
**
**===================================================================*/
#include <palsuite.h>
#include "../vsprintf.h"
/*
* Notes: memcmp is used, as is strlen.
*/
PALTEST(c_runtime_vsprintf_test18_paltest_vsprintf_test18, "c_runtime/vsprintf/test18/paltest_vsprintf_test18")
{
double val = 2560.001;
double neg = -2560.001;
if (PAL_Initialize(argc, argv) != 0)
{
return(FAIL);
}
DoDoubleTest("foo %G", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %lG", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %hG", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %LG", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %I64G", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %5G", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %-5G", val, "foo 2560 ", "foo 2560 ");
DoDoubleTest("foo %.1G", val, "foo 3E+003", "foo 3E+03");
DoDoubleTest("foo %.2G", val, "foo 2.6E+003", "foo 2.6E+03");
DoDoubleTest("foo %.12G", val, "foo 2560.001", "foo 2560.001");
DoDoubleTest("foo %06G", val, "foo 002560", "foo 002560");
DoDoubleTest("foo %#G", val, "foo 2560.00", "foo 2560.00");
DoDoubleTest("foo %+G", val, "foo +2560", "foo +2560");
DoDoubleTest("foo % G", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %+G", neg, "foo -2560", "foo -2560");
DoDoubleTest("foo % G", neg, "foo -2560", "foo -2560");
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test18.c
**
** Purpose: Test #18 for the vsprintf function.
**
**
**===================================================================*/
#include <palsuite.h>
#include "../vsprintf.h"
/*
* Notes: memcmp is used, as is strlen.
*/
PALTEST(c_runtime_vsprintf_test18_paltest_vsprintf_test18, "c_runtime/vsprintf/test18/paltest_vsprintf_test18")
{
double val = 2560.001;
double neg = -2560.001;
if (PAL_Initialize(argc, argv) != 0)
{
return(FAIL);
}
DoDoubleTest("foo %G", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %lG", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %hG", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %LG", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %I64G", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %5G", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %-5G", val, "foo 2560 ", "foo 2560 ");
DoDoubleTest("foo %.1G", val, "foo 3E+003", "foo 3E+03");
DoDoubleTest("foo %.2G", val, "foo 2.6E+003", "foo 2.6E+03");
DoDoubleTest("foo %.12G", val, "foo 2560.001", "foo 2560.001");
DoDoubleTest("foo %06G", val, "foo 002560", "foo 002560");
DoDoubleTest("foo %#G", val, "foo 2560.00", "foo 2560.00");
DoDoubleTest("foo %+G", val, "foo +2560", "foo +2560");
DoDoubleTest("foo % G", val, "foo 2560", "foo 2560");
DoDoubleTest("foo %+G", neg, "foo -2560", "foo -2560");
DoDoubleTest("foo % G", neg, "foo -2560", "foo -2560");
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/utils/w32subset.h | /**
* \file
* Define Win32 API subset defaults.
* Other subsetters can fork this file, or
* define symbols ahead of it, or after it (with undef).
*
* Note that #if of an undefined symbols is defined as if 0,
* so that an implicit default here.
*
* Copyright 2019 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef HAVE_API_SUPPORT_WIN32_BSTR
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_BSTR 1
#else
#define HAVE_API_SUPPORT_WIN32_BSTR 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CANCEL_IO
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CANCEL_IO 1
#else
#define HAVE_API_SUPPORT_WIN32_CANCEL_IO 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CANCEL_IO_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CANCEL_IO_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_CANCEL_IO_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CANCEL_SYNCHRONOUS_IO
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CANCEL_SYNCHRONOUS_IO 1
#else
#define HAVE_API_SUPPORT_WIN32_CANCEL_SYNCHRONOUS_IO 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV 1
#else
#define HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CONSOLE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CONSOLE 1
#else
#define HAVE_API_SUPPORT_WIN32_CONSOLE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_COPY_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_COPY_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_COPY_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_COPY_FILE2
#if G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_COPY_FILE2 1
#else
#define HAVE_API_SUPPORT_WIN32_COPY_FILE2 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_COREE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_COREE 1
#else
#define HAVE_API_SUPPORT_WIN32_COREE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CREATE_PROCESS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CREATE_PROCESS 1
#else
#define HAVE_API_SUPPORT_WIN32_CREATE_PROCESS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CREATE_PROCESS_WITH_LOGON
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CREATE_PROCESS_WITH_LOGON 1
#else
#define HAVE_API_SUPPORT_WIN32_CREATE_PROCESS_WITH_LOGON 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE 1
#else
#define HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_DISCONNECT_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_DISCONNECT_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_DISCONNECT_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_ENUM_PROCESSES
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_ENUM_PROCESSES 1
#else
#define HAVE_API_SUPPORT_WIN32_ENUM_PROCESSES 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_ENUM_PROCESS_MODULES
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_ENUM_PROCESS_MODULES 1
#else
#define HAVE_API_SUPPORT_WIN32_ENUM_PROCESS_MODULES 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_ENUM_WINDOWS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_ENUM_WINDOWS 1
#else
#define HAVE_API_SUPPORT_WIN32_ENUM_WINDOWS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_FILE_MAPPING
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_FILE_MAPPING 1
#else
#define HAVE_API_SUPPORT_WIN32_FILE_MAPPING 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_FILE_MAPPING_FROM_APP
#if G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_FILE_MAPPING_FROM_APP 1
#else
#define HAVE_API_SUPPORT_WIN32_FILE_MAPPING_FROM_APP 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_FORMAT_MESSAGE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_FORMAT_MESSAGE 1
#else
#define HAVE_API_SUPPORT_WIN32_FORMAT_MESSAGE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_ACP
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_ACP 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_ACP 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_CP_INFO_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_CP_INFO_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_CP_INFO_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_DRIVE_TYPE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_DRIVE_TYPE 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_DRIVE_TYPE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_FILE_SIZE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_FILE_SIZE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_FILE_SIZE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_FILE_VERSION_INFO
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_FILE_VERSION_INFO 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_FILE_VERSION_INFO 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_LOGICAL_DRIVE_STRINGS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_LOGICAL_DRIVE_STRINGS 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_LOGICAL_DRIVE_STRINGS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_BASE_NAME
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_BASE_NAME 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_BASE_NAME 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_FILE_NAME_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_FILE_NAME_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_FILE_NAME_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_INFORMATION
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_INFORMATION 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_INFORMATION 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_PRIORITY_CLASS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_PRIORITY_CLASS 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_PRIORITY_CLASS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_PROCESS_TIMES
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_PROCESS_TIMES 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_PROCESS_TIMES 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_STD_HANDLE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_STD_HANDLE 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_STD_HANDLE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIME_AS_FILE_TIME
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIME_AS_FILE_TIME 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIME_AS_FILE_TIME 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIMES
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIMES 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIMES 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_WORKING_SET_SIZE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_WORKING_SET_SIZE 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_WORKING_SET_SIZE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_IS_WOW64_PROCESS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_IS_WOW64_PROCESS 1
#else
#define HAVE_API_SUPPORT_WIN32_IS_WOW64_PROCESS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOAD_LIBRARY
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOAD_LIBRARY 1
#else
#define HAVE_API_SUPPORT_WIN32_LOAD_LIBRARY 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOAD_PACKAGED_LIBRARY
#if G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOAD_PACKAGED_LIBRARY 1
#else
#define HAVE_API_SUPPORT_WIN32_LOAD_PACKAGED_LIBRARY 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOCAL_ALLOC_FREE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOCAL_ALLOC_FREE 1
#else
#define HAVE_API_SUPPORT_WIN32_LOCAL_ALLOC_FREE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOCAL_INFO
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOCAL_INFO 1
#else
#define HAVE_API_SUPPORT_WIN32_LOCAL_INFO 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOCK_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOCK_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_LOCK_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_MOVE_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_MOVE_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_MOVE_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_MOVE_FILE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_MOVE_FILE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_MOVE_FILE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_MSG_WAIT_FOR_MULTIPLE_OBJECTS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_MSG_WAIT_FOR_MULTIPLE_OBJECTS 1
#else
#define HAVE_API_SUPPORT_WIN32_MSG_WAIT_FOR_MULTIPLE_OBJECTS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_OPEN_PROCESS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_OPEN_PROCESS 1
#else
#define HAVE_API_SUPPORT_WIN32_OPEN_PROCESS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_OPEN_THREAD
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_OPEN_THREAD 1
#else
#define HAVE_API_SUPPORT_WIN32_OPEN_THREAD 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_REPLACE_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_REPLACE_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_REPLACE_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_RESET_STKOFLW
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_RESET_STKOFLW 1
#else
#define HAVE_API_SUPPORT_WIN32_RESET_STKOFLW 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SAFE_ARRAY
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SAFE_ARRAY 1
#else
#define HAVE_API_SUPPORT_WIN32_SAFE_ARRAY 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_ERROR_MODE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_ERROR_MODE 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_ERROR_MODE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_PRIORITY_CLASS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_PRIORITY_CLASS 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_PRIORITY_CLASS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_THREAD_CONTEXT
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_CONTEXT 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_CONTEXT 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_THREAD_DESCRIPTION
#if G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_DESCRIPTION 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_DESCRIPTION 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_THREAD_STACK_GUARANTEE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_STACK_GUARANTEE 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_STACK_GUARANTEE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_WORKING_SET_SIZE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_WORKING_SET_SIZE 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_WORKING_SET_SIZE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SHELL_EXECUTE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SHELL_EXECUTE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_SHELL_EXECUTE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SIGNAL_OBJECT_AND_WAIT
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SIGNAL_OBJECT_AND_WAIT 1
#else
#define HAVE_API_SUPPORT_WIN32_SIGNAL_OBJECT_AND_WAIT 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_TIMERS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_TIMERS 1
#else
#define HAVE_API_SUPPORT_WIN32_TIMERS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_TRANSMIT_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_TRANSMIT_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_TRANSMIT_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_UNLOCK_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_UNLOCK_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_UNLOCK_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_VER_LANGUAGE_NAME
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_VER_LANGUAGE_NAME 1
#else
#define HAVE_API_SUPPORT_WIN32_VER_LANGUAGE_NAME 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_VER_QUERY_VALUE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_VER_QUERY_VALUE 1
#else
#define HAVE_API_SUPPORT_WIN32_VER_QUERY_VALUE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_PIPE_OPEN_CLOSE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_PIPE_OPEN_CLOSE 1
#else
#define HAVE_API_SUPPORT_WIN32_PIPE_OPEN_CLOSE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CONTEXT_XSTATE
#define HAVE_API_SUPPORT_WIN32_CONTEXT_XSTATE 0
#endif
| /**
* \file
* Define Win32 API subset defaults.
* Other subsetters can fork this file, or
* define symbols ahead of it, or after it (with undef).
*
* Note that #if of an undefined symbols is defined as if 0,
* so that an implicit default here.
*
* Copyright 2019 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef HAVE_API_SUPPORT_WIN32_BSTR
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_BSTR 1
#else
#define HAVE_API_SUPPORT_WIN32_BSTR 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CANCEL_IO
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CANCEL_IO 1
#else
#define HAVE_API_SUPPORT_WIN32_CANCEL_IO 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CANCEL_IO_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CANCEL_IO_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_CANCEL_IO_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CANCEL_SYNCHRONOUS_IO
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CANCEL_SYNCHRONOUS_IO 1
#else
#define HAVE_API_SUPPORT_WIN32_CANCEL_SYNCHRONOUS_IO 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV 1
#else
#define HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CONSOLE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CONSOLE 1
#else
#define HAVE_API_SUPPORT_WIN32_CONSOLE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_COPY_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_COPY_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_COPY_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_COPY_FILE2
#if G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_COPY_FILE2 1
#else
#define HAVE_API_SUPPORT_WIN32_COPY_FILE2 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_COREE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_COREE 1
#else
#define HAVE_API_SUPPORT_WIN32_COREE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CREATE_PROCESS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CREATE_PROCESS 1
#else
#define HAVE_API_SUPPORT_WIN32_CREATE_PROCESS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CREATE_PROCESS_WITH_LOGON
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CREATE_PROCESS_WITH_LOGON 1
#else
#define HAVE_API_SUPPORT_WIN32_CREATE_PROCESS_WITH_LOGON 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE 1
#else
#define HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_CREATE_SEMAPHORE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_DISCONNECT_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_DISCONNECT_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_DISCONNECT_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_ENUM_PROCESSES
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_ENUM_PROCESSES 1
#else
#define HAVE_API_SUPPORT_WIN32_ENUM_PROCESSES 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_ENUM_PROCESS_MODULES
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_ENUM_PROCESS_MODULES 1
#else
#define HAVE_API_SUPPORT_WIN32_ENUM_PROCESS_MODULES 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_ENUM_WINDOWS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_ENUM_WINDOWS 1
#else
#define HAVE_API_SUPPORT_WIN32_ENUM_WINDOWS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_FILE_MAPPING
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_FILE_MAPPING 1
#else
#define HAVE_API_SUPPORT_WIN32_FILE_MAPPING 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_FILE_MAPPING_FROM_APP
#if G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_FILE_MAPPING_FROM_APP 1
#else
#define HAVE_API_SUPPORT_WIN32_FILE_MAPPING_FROM_APP 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_FORMAT_MESSAGE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_FORMAT_MESSAGE 1
#else
#define HAVE_API_SUPPORT_WIN32_FORMAT_MESSAGE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_ACP
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_ACP 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_ACP 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_CP_INFO_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_CP_INFO_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_CP_INFO_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_DRIVE_TYPE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_DRIVE_TYPE 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_DRIVE_TYPE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_FILE_SIZE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_FILE_SIZE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_FILE_SIZE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_FILE_VERSION_INFO
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_FILE_VERSION_INFO 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_FILE_VERSION_INFO 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_LOGICAL_DRIVE_STRINGS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_LOGICAL_DRIVE_STRINGS 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_LOGICAL_DRIVE_STRINGS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_BASE_NAME
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_BASE_NAME 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_BASE_NAME 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_FILE_NAME_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_FILE_NAME_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_FILE_NAME_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_HANDLE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_MODULE_INFORMATION
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_INFORMATION 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_MODULE_INFORMATION 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_PRIORITY_CLASS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_PRIORITY_CLASS 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_PRIORITY_CLASS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_PROCESS_TIMES
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_PROCESS_TIMES 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_PROCESS_TIMES 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_STD_HANDLE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_STD_HANDLE 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_STD_HANDLE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIME_AS_FILE_TIME
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIME_AS_FILE_TIME 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIME_AS_FILE_TIME 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIMES
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIMES 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_SYSTEM_TIMES 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_GET_WORKING_SET_SIZE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_GET_WORKING_SET_SIZE 1
#else
#define HAVE_API_SUPPORT_WIN32_GET_WORKING_SET_SIZE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_IS_WOW64_PROCESS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_IS_WOW64_PROCESS 1
#else
#define HAVE_API_SUPPORT_WIN32_IS_WOW64_PROCESS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOAD_LIBRARY
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOAD_LIBRARY 1
#else
#define HAVE_API_SUPPORT_WIN32_LOAD_LIBRARY 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOAD_PACKAGED_LIBRARY
#if G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOAD_PACKAGED_LIBRARY 1
#else
#define HAVE_API_SUPPORT_WIN32_LOAD_PACKAGED_LIBRARY 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOCAL_ALLOC_FREE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOCAL_ALLOC_FREE 1
#else
#define HAVE_API_SUPPORT_WIN32_LOCAL_ALLOC_FREE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOCAL_INFO
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOCAL_INFO 1
#else
#define HAVE_API_SUPPORT_WIN32_LOCAL_INFO 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_LOCK_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_LOCK_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_LOCK_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_MOVE_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_MOVE_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_MOVE_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_MOVE_FILE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_MOVE_FILE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_MOVE_FILE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_MSG_WAIT_FOR_MULTIPLE_OBJECTS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_MSG_WAIT_FOR_MULTIPLE_OBJECTS 1
#else
#define HAVE_API_SUPPORT_WIN32_MSG_WAIT_FOR_MULTIPLE_OBJECTS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_OPEN_PROCESS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_OPEN_PROCESS 1
#else
#define HAVE_API_SUPPORT_WIN32_OPEN_PROCESS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_OPEN_THREAD
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_OPEN_THREAD 1
#else
#define HAVE_API_SUPPORT_WIN32_OPEN_THREAD 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_REPLACE_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_REPLACE_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_REPLACE_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_RESET_STKOFLW
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_RESET_STKOFLW 1
#else
#define HAVE_API_SUPPORT_WIN32_RESET_STKOFLW 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SAFE_ARRAY
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SAFE_ARRAY 1
#else
#define HAVE_API_SUPPORT_WIN32_SAFE_ARRAY 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_ERROR_MODE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_ERROR_MODE 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_ERROR_MODE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_PRIORITY_CLASS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_PRIORITY_CLASS 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_PRIORITY_CLASS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_THREAD_CONTEXT
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_CONTEXT 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_CONTEXT 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_THREAD_DESCRIPTION
#if G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_DESCRIPTION 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_DESCRIPTION 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_THREAD_STACK_GUARANTEE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_STACK_GUARANTEE 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_THREAD_STACK_GUARANTEE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SET_WORKING_SET_SIZE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SET_WORKING_SET_SIZE 1
#else
#define HAVE_API_SUPPORT_WIN32_SET_WORKING_SET_SIZE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SHELL_EXECUTE_EX
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SHELL_EXECUTE_EX 1
#else
#define HAVE_API_SUPPORT_WIN32_SHELL_EXECUTE_EX 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_SIGNAL_OBJECT_AND_WAIT
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_SIGNAL_OBJECT_AND_WAIT 1
#else
#define HAVE_API_SUPPORT_WIN32_SIGNAL_OBJECT_AND_WAIT 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_TIMERS
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_TIMERS 1
#else
#define HAVE_API_SUPPORT_WIN32_TIMERS 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_TRANSMIT_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_TRANSMIT_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_TRANSMIT_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_UNLOCK_FILE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT) || \
G_HAVE_API_SUPPORT(HAVE_UWP_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_UNLOCK_FILE 1
#else
#define HAVE_API_SUPPORT_WIN32_UNLOCK_FILE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_VER_LANGUAGE_NAME
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_VER_LANGUAGE_NAME 1
#else
#define HAVE_API_SUPPORT_WIN32_VER_LANGUAGE_NAME 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_VER_QUERY_VALUE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_VER_QUERY_VALUE 1
#else
#define HAVE_API_SUPPORT_WIN32_VER_QUERY_VALUE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_PIPE_OPEN_CLOSE
#if G_HAVE_API_SUPPORT(HAVE_CLASSIC_WINAPI_SUPPORT)
#define HAVE_API_SUPPORT_WIN32_PIPE_OPEN_CLOSE 1
#else
#define HAVE_API_SUPPORT_WIN32_PIPE_OPEN_CLOSE 0
#endif
#endif
#ifndef HAVE_API_SUPPORT_WIN32_CONTEXT_XSTATE
#define HAVE_API_SUPPORT_WIN32_CONTEXT_XSTATE 0
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/mono/mono/tests/libtest.cpp | // Libtest.c needs to be C++ for Visual C++ to accept __thiscall.
// gcc accepts __thiscall in C.
#include "libtest.c"
| // Libtest.c needs to be C++ for Visual C++ to accept __thiscall.
// gcc accepts __thiscall in C.
#include "libtest.c"
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/vm/stringliteralmap.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Header: Map used for interning of string literals.
**
===========================================================*/
#include "common.h"
#include "eeconfig.h"
#include "stringliteralmap.h"
/*
Thread safety in GlobalStringLiteralMap / StringLiteralMap
A single lock protects the N StringLiteralMap objects and single
GlobalStringLiteralMap rooted in the SystemDomain at any time. It is
SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal
At one time each StringLiteralMap had it's own lock to protect
the entry hash table as well, and Interlocked operations were done on the
ref count of the contained StringLiteralEntries. But anything of import
needed to be done under the global lock mentioned above or races would
result. (For example, an app domain shuts down, doing final release on
a StringLiteralEntry, but at that moment the entry is being handed out
in another appdomain and addref'd only after the count went to 0.)
The rule is:
Any AddRef()/Release() calls on StringLiteralEntry need to be under the lock.
Any insert/deletes from the StringLiteralMap or GlobalStringLiteralMap
need to be done under the lock.
The only thing you can do without the lock is look up an existing StringLiteralEntry
in an StringLiteralMap hash table. This is true because these lookup calls
will all come before destruction of the map, the hash table is safe for multiple readers,
and we know the StringLiteralEntry so found 1) can't be destroyed because that table keeps
an AddRef on it and 2) isn't internally modified once created.
*/
#define GLOBAL_STRING_TABLE_BUCKET_SIZE 128
#define INIT_NUM_APP_DOMAIN_STRING_BUCKETS 59
#define INIT_NUM_GLOBAL_STRING_BUCKETS 131
// assumes that memory pools's per block data is same as sizeof (StringLiteralEntry)
#define EEHASH_MEMORY_POOL_GROW_COUNT 128
StringLiteralEntryArray *StringLiteralEntry::s_EntryList = NULL;
DWORD StringLiteralEntry::s_UsedEntries = NULL;
StringLiteralEntry *StringLiteralEntry::s_FreeEntryList = NULL;
StringLiteralMap::StringLiteralMap()
: m_StringToEntryHashTable(NULL)
, m_MemoryPool(NULL)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
}
CONTRACTL_END;
}
void StringLiteralMap::Init()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
PRECONDITION(CheckPointer(this));
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
// Allocate the memory pool and set the initial count to quarter as grow count
m_MemoryPool = new MemoryPool (SIZEOF_EEHASH_ENTRY, EEHASH_MEMORY_POOL_GROW_COUNT, EEHASH_MEMORY_POOL_GROW_COUNT/4);
m_StringToEntryHashTable = new EEUnicodeStringLiteralHashTable ();
LockOwner lock = {&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal), IsOwnerOfCrst};
if (!m_StringToEntryHashTable->Init(INIT_NUM_APP_DOMAIN_STRING_BUCKETS, &lock, m_MemoryPool))
ThrowOutOfMemory();
}
StringLiteralMap::~StringLiteralMap()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
// We do need to take the globalstringliteralmap lock because we are manipulating
// StringLiteralEntry objects that belong to it.
// Note that we remember the current entry and relaese it only when the
// enumerator has advanced to the next entry so that we don't endup deleteing the
// current entry itself and killing the enumerator.
if (m_StringToEntryHashTable != NULL)
{
// We need the global lock anytime we release StringLiteralEntry objects
CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMapNoCreate()->m_HashTableCrstGlobal));
StringLiteralEntry *pEntry = NULL;
EEHashTableIteration Iter;
#ifdef _DEBUG
m_StringToEntryHashTable->SuppressSyncCheck();
#endif
m_StringToEntryHashTable->IterateStart(&Iter);
if (m_StringToEntryHashTable->IterateNext(&Iter))
{
pEntry = (StringLiteralEntry*)m_StringToEntryHashTable->IterateGetValue(&Iter);
while (m_StringToEntryHashTable->IterateNext(&Iter))
{
// Release the previous entry
_ASSERTE(pEntry);
pEntry->Release();
// Set the
pEntry = (StringLiteralEntry*)m_StringToEntryHashTable->IterateGetValue(&Iter);
}
// Release the last entry
_ASSERTE(pEntry);
pEntry->Release();
}
// else there were no entries.
// Delete the hash table first. The dtor of the hash table would clean up all the entries.
delete m_StringToEntryHashTable;
}
// Delete the pool later, since the dtor above would need it.
if (m_MemoryPool != NULL)
delete m_MemoryPool;
}
STRINGREF *StringLiteralMap::GetStringLiteral(EEStringData *pStringData, BOOL bAddIfNotFound, BOOL bAppDomainWontUnload)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(CheckPointer(pStringData));
}
CONTRACTL_END;
HashDatum Data;
DWORD dwHash = m_StringToEntryHashTable->GetHash(pStringData);
// Retrieve the string literal from the global string literal map.
CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
// TODO: We can be more efficient by checking our local hash table now to see if
// someone beat us to inserting it. (m_StringToEntryHashTable->GetValue(pStringData, &Data))
// (Rather than waiting until after we look the string up in the global map)
StringLiteralEntryHolder pEntry(SystemDomain::GetGlobalStringLiteralMap()->GetStringLiteral(pStringData, dwHash, bAddIfNotFound));
_ASSERTE(pEntry || !bAddIfNotFound);
// If pEntry is non-null then the entry exists in the Global map. (either we retrieved it or added it just now)
if (pEntry)
{
// If the entry exists in the Global map and the appdomain wont ever unload then we really don't need to add a
// hashentry in the appdomain specific map.
// TODO: except that by not inserting into our local table we always take the global map lock
// and come into this path, when we could succeed at a lock free lookup above.
if (!bAppDomainWontUnload)
{
// Make sure some other thread has not already added it.
if (!m_StringToEntryHashTable->GetValue(pStringData, &Data))
{
// Insert the handle to the string into the hash table.
m_StringToEntryHashTable->InsertValue(pStringData, (LPVOID)pEntry, FALSE);
}
else
{
pEntry.Release(); //while we're still under lock
}
}
#ifdef _DEBUG
else
{
LOG((LF_APPDOMAIN, LL_INFO10000, "Avoided adding String literal to appdomain map: size: %d bytes\n", pStringData->GetCharCount()));
}
#endif
pEntry.SuppressRelease();
STRINGREF *pStrObj = NULL;
// Retrieve the string objectref from the string literal entry.
pStrObj = pEntry->GetStringObject();
_ASSERTE(!bAddIfNotFound || pStrObj);
return pStrObj;
}
// If the bAddIfNotFound flag is set then we better have a string
// string object at this point.
_ASSERTE(!bAddIfNotFound);
return NULL;
}
STRINGREF *StringLiteralMap::GetInternedString(STRINGREF *pString, BOOL bAddIfNotFound, BOOL bAppDomainWontUnload)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(CheckPointer(pString));
}
CONTRACTL_END;
HashDatum Data;
EEStringData StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
DWORD dwHash = m_StringToEntryHashTable->GetHash(&StringData);
if (m_StringToEntryHashTable->GetValue(&StringData, &Data, dwHash))
{
STRINGREF *pStrObj = NULL;
pStrObj = ((StringLiteralEntry*)Data)->GetStringObject();
_ASSERTE(!bAddIfNotFound || pStrObj);
return pStrObj;
}
else
{
CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
// TODO: We can be more efficient by checking our local hash table now to see if
// someone beat us to inserting it. (m_StringToEntryHashTable->GetValue(pStringData, &Data))
// (Rather than waiting until after we look the string up in the global map)
// Retrieve the string literal from the global string literal map.
StringLiteralEntryHolder pEntry(SystemDomain::GetGlobalStringLiteralMap()->GetInternedString(pString, dwHash, bAddIfNotFound));
_ASSERTE(pEntry || !bAddIfNotFound);
// If pEntry is non-null then the entry exists in the Global map. (either we retrieved it or added it just now)
if (pEntry)
{
// If the entry exists in the Global map and the appdomain wont ever unload then we really don't need to add a
// hashentry in the appdomain specific map.
// TODO: except that by not inserting into our local table we always take the global map lock
// and come into this path, when we could succeed at a lock free lookup above.
if (!bAppDomainWontUnload)
{
// Since GlobalStringLiteralMap::GetInternedString() could have caused a GC,
// we need to recreate the string data.
StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
// Make sure some other thread has not already added it.
if (!m_StringToEntryHashTable->GetValue(&StringData, &Data))
{
// Insert the handle to the string into the hash table.
m_StringToEntryHashTable->InsertValue(&StringData, (LPVOID)pEntry, FALSE);
}
else
{
pEntry.Release(); // while we're under lock
}
}
pEntry.SuppressRelease();
// Retrieve the string objectref from the string literal entry.
STRINGREF *pStrObj = NULL;
pStrObj = pEntry->GetStringObject();
return pStrObj;
}
}
// If the bAddIfNotFound flag is set then we better have a string
// string object at this point.
_ASSERTE(!bAddIfNotFound);
return NULL;
}
GlobalStringLiteralMap::GlobalStringLiteralMap()
: m_StringToEntryHashTable(NULL)
, m_MemoryPool(NULL)
, m_HashTableCrstGlobal(CrstGlobalStrLiteralMap)
, m_PinnedHeapHandleTable(SystemDomain::System(), GLOBAL_STRING_TABLE_BUCKET_SIZE)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
#ifdef _DEBUG
m_PinnedHeapHandleTable.RegisterCrstDebug(&m_HashTableCrstGlobal);
#endif
}
GlobalStringLiteralMap::~GlobalStringLiteralMap()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
// if we are deleting the map then either it is shutdown time or else there was a race trying to create
// the initial map and this one was the loser
// (i.e. two threads made a map and the InterlockedCompareExchange failed for one of them and
// now it is deleting the map)
//
// if it's not the main map, then the map we are deleting better be empty!
// there must be *some* global table
_ASSERTE(SystemDomain::GetGlobalStringLiteralMapNoCreate() != NULL);
if (SystemDomain::GetGlobalStringLiteralMapNoCreate() != this)
{
// if this isn't the real global table then it must be empty
_ASSERTE(m_StringToEntryHashTable->IsEmpty());
// Delete the hash table first. The dtor of the hash table would clean up all the entries.
delete m_StringToEntryHashTable;
// Delete the pool later, since the dtor above would need it.
delete m_MemoryPool;
}
else
{
// We are shutting down, the OS will reclaim the memory from the StringLiteralEntries,
// m_MemoryPool and m_StringToEntryHashTable.
_ASSERTE(g_fProcessDetach);
}
}
void GlobalStringLiteralMap::Init()
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(this));
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
// Allocate the memory pool and set the initial count to quarter as grow count
m_MemoryPool = new MemoryPool (SIZEOF_EEHASH_ENTRY, EEHASH_MEMORY_POOL_GROW_COUNT, EEHASH_MEMORY_POOL_GROW_COUNT/4);
m_StringToEntryHashTable = new EEUnicodeStringLiteralHashTable ();
LockOwner lock = {&m_HashTableCrstGlobal, IsOwnerOfCrst};
if (!m_StringToEntryHashTable->Init(INIT_NUM_GLOBAL_STRING_BUCKETS, &lock, m_MemoryPool))
ThrowOutOfMemory();
}
StringLiteralEntry *GlobalStringLiteralMap::GetStringLiteral(EEStringData *pStringData, DWORD dwHash, BOOL bAddIfNotFound)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(CheckPointer(pStringData));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
HashDatum Data;
StringLiteralEntry *pEntry = NULL;
if (m_StringToEntryHashTable->GetValueSpeculative(pStringData, &Data, dwHash)) // Since we hold the critical section here, we can safely use the speculative variant of GetValue
{
pEntry = (StringLiteralEntry*)Data;
// If the entry is already in the table then addref it before we return it.
if (pEntry)
pEntry->AddRef();
}
else
{
if (bAddIfNotFound)
pEntry = AddStringLiteral(pStringData);
}
return pEntry;
}
StringLiteralEntry *GlobalStringLiteralMap::GetInternedString(STRINGREF *pString, DWORD dwHash, BOOL bAddIfNotFound)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(CheckPointer(pString));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
EEStringData StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
HashDatum Data;
StringLiteralEntry *pEntry = NULL;
if (m_StringToEntryHashTable->GetValue(&StringData, &Data, dwHash))
{
pEntry = (StringLiteralEntry*)Data;
// If the entry is already in the table then addref it before we return it.
if (pEntry)
pEntry->AddRef();
}
else
{
if (bAddIfNotFound)
pEntry = AddInternedString(pString);
}
return pEntry;
}
#ifdef LOGGING
static void LogStringLiteral(_In_z_ const char* action, EEStringData *pStringData)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
int length = pStringData->GetCharCount();
length = min(length, 100);
WCHAR *szString = (WCHAR *)_alloca((length + 1) * sizeof(WCHAR));
memcpyNoGCRefs((void*)szString, (void*)pStringData->GetStringBuffer(), length * sizeof(WCHAR));
szString[length] = '\0';
LOG((LF_APPDOMAIN, LL_INFO10000, "String literal \"%S\" %s to Global map, size %d bytes\n", szString, action, pStringData->GetCharCount()));
}
#endif
STRINGREF AllocateStringObject(EEStringData *pStringData)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// Create the COM+ string object.
DWORD cCount = pStringData->GetCharCount();
STRINGREF strObj = AllocateString(cCount);
GCPROTECT_BEGIN(strObj)
{
// Copy the string constant into the COM+ string object. The code
// will add an extra null at the end for safety purposes, but since
// we support embedded nulls, one should never treat the string as
// null termianted.
LPWSTR strDest = strObj->GetBuffer();
memcpyNoGCRefs(strDest, pStringData->GetStringBuffer(), cCount*sizeof(WCHAR));
strDest[cCount] = 0;
}
GCPROTECT_END();
return strObj;
}
StringLiteralEntry *GlobalStringLiteralMap::AddStringLiteral(EEStringData *pStringData)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
StringLiteralEntry *pRet;
{
PinnedHeapHandleBlockHolder pStrObj(&m_PinnedHeapHandleTable,1);
// Create the COM+ string object.
STRINGREF strObj = AllocateStringObject(pStringData);
// Allocate a handle for the string.
SetObjectReference(pStrObj[0], (OBJECTREF) strObj);
// Allocate the StringLiteralEntry.
StringLiteralEntryHolder pEntry(StringLiteralEntry::AllocateEntry(pStringData, (STRINGREF*)pStrObj[0]));
pStrObj.SuppressRelease();
// Insert the handle to the string into the hash table.
m_StringToEntryHashTable->InsertValue(pStringData, (LPVOID)pEntry, FALSE);
pEntry.SuppressRelease();
pRet = pEntry;
#ifdef LOGGING
LogStringLiteral("added", pStringData);
#endif
}
return pRet;
}
StringLiteralEntry *GlobalStringLiteralMap::AddInternedString(STRINGREF *pString)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
EEStringData StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
StringLiteralEntry *pRet;
{
PinnedHeapHandleBlockHolder pStrObj(&m_PinnedHeapHandleTable,1);
SetObjectReference(pStrObj[0], (OBJECTREF) *pString);
// Since the allocation might have caused a GC we need to re-get the
// string data.
StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
StringLiteralEntryHolder pEntry(StringLiteralEntry::AllocateEntry(&StringData, (STRINGREF*)pStrObj[0]));
pStrObj.SuppressRelease();
// Insert the handle to the string into the hash table.
m_StringToEntryHashTable->InsertValue(&StringData, (LPVOID)pEntry, FALSE);
pEntry.SuppressRelease();
pRet = pEntry;
}
return pRet;
}
void GlobalStringLiteralMap::RemoveStringLiteralEntry(StringLiteralEntry *pEntry)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pEntry));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
PRECONDITION(CheckPointer(this));
}
CONTRACTL_END;
// Remove the entry from the hash table.
{
GCX_COOP();
EEStringData StringData;
pEntry->GetStringData(&StringData);
BOOL bSuccess;
bSuccess = m_StringToEntryHashTable->DeleteValue(&StringData);
// this assert is comented out to accomodate case when StringLiteralEntryHolder
// releases this object after failed insertion into hash
//_ASSERTE(bSuccess);
#ifdef LOGGING
// We need to do this logging within the GCX_COOP(), as a gc will render
// our StringData pointers stale.
if (bSuccess)
{
LogStringLiteral("removed", &StringData);
}
#endif
// Release the object handle that the entry was using.
STRINGREF *pObjRef = pEntry->GetStringObject();
m_PinnedHeapHandleTable.ReleaseHandles((OBJECTREF*)pObjRef, 1);
}
// We do not delete the StringLiteralEntry itself that will be done in the
// release method of the StringLiteralEntry.
}
StringLiteralEntry *StringLiteralEntry::AllocateEntry(EEStringData *pStringData, STRINGREF *pStringObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS; // GC_TRIGGERS because in the precondition below GetGlobalStringLiteralMap() might need to create the map
MODE_COOPERATIVE;
PRECONDITION(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
// Note: we don't synchronize here because allocateEntry is called when HashCrst is held.
void *pMem = NULL;
if (s_FreeEntryList != NULL)
{
pMem = s_FreeEntryList;
s_FreeEntryList = s_FreeEntryList->m_pNext;
_ASSERTE (((StringLiteralEntry*)pMem)->m_bDeleted);
}
else
{
if (s_EntryList == NULL || (s_UsedEntries >= MAX_ENTRIES_PER_CHUNK))
{
StringLiteralEntryArray *pNew = new StringLiteralEntryArray();
pNew->m_pNext = s_EntryList;
s_EntryList = pNew;
s_UsedEntries = 0;
}
pMem = &(s_EntryList->m_Entries[s_UsedEntries++*sizeof(StringLiteralEntry)]);
}
_ASSERTE (pMem && "Unable to allocate String literal Entry");
return new (pMem) StringLiteralEntry (pStringData, pStringObj);
}
void StringLiteralEntry::DeleteEntry (StringLiteralEntry *pEntry)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(SystemDomain::GetGlobalStringLiteralMapNoCreate()->m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
_ASSERTE (VolatileLoad(&pEntry->m_dwRefCount) == 0);
#ifdef _DEBUG
memset (pEntry, 0xc, sizeof(StringLiteralEntry));
#endif
#ifdef _DEBUG
pEntry->m_bDeleted = TRUE;
#endif
// The free list needs protection from the m_HashTableCrstGlobal
pEntry->m_pNext = s_FreeEntryList;
s_FreeEntryList = pEntry;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Header: Map used for interning of string literals.
**
===========================================================*/
#include "common.h"
#include "eeconfig.h"
#include "stringliteralmap.h"
/*
Thread safety in GlobalStringLiteralMap / StringLiteralMap
A single lock protects the N StringLiteralMap objects and single
GlobalStringLiteralMap rooted in the SystemDomain at any time. It is
SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal
At one time each StringLiteralMap had it's own lock to protect
the entry hash table as well, and Interlocked operations were done on the
ref count of the contained StringLiteralEntries. But anything of import
needed to be done under the global lock mentioned above or races would
result. (For example, an app domain shuts down, doing final release on
a StringLiteralEntry, but at that moment the entry is being handed out
in another appdomain and addref'd only after the count went to 0.)
The rule is:
Any AddRef()/Release() calls on StringLiteralEntry need to be under the lock.
Any insert/deletes from the StringLiteralMap or GlobalStringLiteralMap
need to be done under the lock.
The only thing you can do without the lock is look up an existing StringLiteralEntry
in an StringLiteralMap hash table. This is true because these lookup calls
will all come before destruction of the map, the hash table is safe for multiple readers,
and we know the StringLiteralEntry so found 1) can't be destroyed because that table keeps
an AddRef on it and 2) isn't internally modified once created.
*/
#define GLOBAL_STRING_TABLE_BUCKET_SIZE 128
#define INIT_NUM_APP_DOMAIN_STRING_BUCKETS 59
#define INIT_NUM_GLOBAL_STRING_BUCKETS 131
// assumes that memory pools's per block data is same as sizeof (StringLiteralEntry)
#define EEHASH_MEMORY_POOL_GROW_COUNT 128
StringLiteralEntryArray *StringLiteralEntry::s_EntryList = NULL;
DWORD StringLiteralEntry::s_UsedEntries = NULL;
StringLiteralEntry *StringLiteralEntry::s_FreeEntryList = NULL;
StringLiteralMap::StringLiteralMap()
: m_StringToEntryHashTable(NULL)
, m_MemoryPool(NULL)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
}
CONTRACTL_END;
}
void StringLiteralMap::Init()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
PRECONDITION(CheckPointer(this));
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
// Allocate the memory pool and set the initial count to quarter as grow count
m_MemoryPool = new MemoryPool (SIZEOF_EEHASH_ENTRY, EEHASH_MEMORY_POOL_GROW_COUNT, EEHASH_MEMORY_POOL_GROW_COUNT/4);
m_StringToEntryHashTable = new EEUnicodeStringLiteralHashTable ();
LockOwner lock = {&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal), IsOwnerOfCrst};
if (!m_StringToEntryHashTable->Init(INIT_NUM_APP_DOMAIN_STRING_BUCKETS, &lock, m_MemoryPool))
ThrowOutOfMemory();
}
StringLiteralMap::~StringLiteralMap()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
// We do need to take the globalstringliteralmap lock because we are manipulating
// StringLiteralEntry objects that belong to it.
// Note that we remember the current entry and relaese it only when the
// enumerator has advanced to the next entry so that we don't endup deleteing the
// current entry itself and killing the enumerator.
if (m_StringToEntryHashTable != NULL)
{
// We need the global lock anytime we release StringLiteralEntry objects
CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMapNoCreate()->m_HashTableCrstGlobal));
StringLiteralEntry *pEntry = NULL;
EEHashTableIteration Iter;
#ifdef _DEBUG
m_StringToEntryHashTable->SuppressSyncCheck();
#endif
m_StringToEntryHashTable->IterateStart(&Iter);
if (m_StringToEntryHashTable->IterateNext(&Iter))
{
pEntry = (StringLiteralEntry*)m_StringToEntryHashTable->IterateGetValue(&Iter);
while (m_StringToEntryHashTable->IterateNext(&Iter))
{
// Release the previous entry
_ASSERTE(pEntry);
pEntry->Release();
// Set the
pEntry = (StringLiteralEntry*)m_StringToEntryHashTable->IterateGetValue(&Iter);
}
// Release the last entry
_ASSERTE(pEntry);
pEntry->Release();
}
// else there were no entries.
// Delete the hash table first. The dtor of the hash table would clean up all the entries.
delete m_StringToEntryHashTable;
}
// Delete the pool later, since the dtor above would need it.
if (m_MemoryPool != NULL)
delete m_MemoryPool;
}
STRINGREF *StringLiteralMap::GetStringLiteral(EEStringData *pStringData, BOOL bAddIfNotFound, BOOL bAppDomainWontUnload)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(CheckPointer(pStringData));
}
CONTRACTL_END;
HashDatum Data;
DWORD dwHash = m_StringToEntryHashTable->GetHash(pStringData);
// Retrieve the string literal from the global string literal map.
CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
// TODO: We can be more efficient by checking our local hash table now to see if
// someone beat us to inserting it. (m_StringToEntryHashTable->GetValue(pStringData, &Data))
// (Rather than waiting until after we look the string up in the global map)
StringLiteralEntryHolder pEntry(SystemDomain::GetGlobalStringLiteralMap()->GetStringLiteral(pStringData, dwHash, bAddIfNotFound));
_ASSERTE(pEntry || !bAddIfNotFound);
// If pEntry is non-null then the entry exists in the Global map. (either we retrieved it or added it just now)
if (pEntry)
{
// If the entry exists in the Global map and the appdomain wont ever unload then we really don't need to add a
// hashentry in the appdomain specific map.
// TODO: except that by not inserting into our local table we always take the global map lock
// and come into this path, when we could succeed at a lock free lookup above.
if (!bAppDomainWontUnload)
{
// Make sure some other thread has not already added it.
if (!m_StringToEntryHashTable->GetValue(pStringData, &Data))
{
// Insert the handle to the string into the hash table.
m_StringToEntryHashTable->InsertValue(pStringData, (LPVOID)pEntry, FALSE);
}
else
{
pEntry.Release(); //while we're still under lock
}
}
#ifdef _DEBUG
else
{
LOG((LF_APPDOMAIN, LL_INFO10000, "Avoided adding String literal to appdomain map: size: %d bytes\n", pStringData->GetCharCount()));
}
#endif
pEntry.SuppressRelease();
STRINGREF *pStrObj = NULL;
// Retrieve the string objectref from the string literal entry.
pStrObj = pEntry->GetStringObject();
_ASSERTE(!bAddIfNotFound || pStrObj);
return pStrObj;
}
// If the bAddIfNotFound flag is set then we better have a string
// string object at this point.
_ASSERTE(!bAddIfNotFound);
return NULL;
}
STRINGREF *StringLiteralMap::GetInternedString(STRINGREF *pString, BOOL bAddIfNotFound, BOOL bAppDomainWontUnload)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(CheckPointer(pString));
}
CONTRACTL_END;
HashDatum Data;
EEStringData StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
DWORD dwHash = m_StringToEntryHashTable->GetHash(&StringData);
if (m_StringToEntryHashTable->GetValue(&StringData, &Data, dwHash))
{
STRINGREF *pStrObj = NULL;
pStrObj = ((StringLiteralEntry*)Data)->GetStringObject();
_ASSERTE(!bAddIfNotFound || pStrObj);
return pStrObj;
}
else
{
CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
// TODO: We can be more efficient by checking our local hash table now to see if
// someone beat us to inserting it. (m_StringToEntryHashTable->GetValue(pStringData, &Data))
// (Rather than waiting until after we look the string up in the global map)
// Retrieve the string literal from the global string literal map.
StringLiteralEntryHolder pEntry(SystemDomain::GetGlobalStringLiteralMap()->GetInternedString(pString, dwHash, bAddIfNotFound));
_ASSERTE(pEntry || !bAddIfNotFound);
// If pEntry is non-null then the entry exists in the Global map. (either we retrieved it or added it just now)
if (pEntry)
{
// If the entry exists in the Global map and the appdomain wont ever unload then we really don't need to add a
// hashentry in the appdomain specific map.
// TODO: except that by not inserting into our local table we always take the global map lock
// and come into this path, when we could succeed at a lock free lookup above.
if (!bAppDomainWontUnload)
{
// Since GlobalStringLiteralMap::GetInternedString() could have caused a GC,
// we need to recreate the string data.
StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
// Make sure some other thread has not already added it.
if (!m_StringToEntryHashTable->GetValue(&StringData, &Data))
{
// Insert the handle to the string into the hash table.
m_StringToEntryHashTable->InsertValue(&StringData, (LPVOID)pEntry, FALSE);
}
else
{
pEntry.Release(); // while we're under lock
}
}
pEntry.SuppressRelease();
// Retrieve the string objectref from the string literal entry.
STRINGREF *pStrObj = NULL;
pStrObj = pEntry->GetStringObject();
return pStrObj;
}
}
// If the bAddIfNotFound flag is set then we better have a string
// string object at this point.
_ASSERTE(!bAddIfNotFound);
return NULL;
}
GlobalStringLiteralMap::GlobalStringLiteralMap()
: m_StringToEntryHashTable(NULL)
, m_MemoryPool(NULL)
, m_HashTableCrstGlobal(CrstGlobalStrLiteralMap)
, m_PinnedHeapHandleTable(SystemDomain::System(), GLOBAL_STRING_TABLE_BUCKET_SIZE)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
#ifdef _DEBUG
m_PinnedHeapHandleTable.RegisterCrstDebug(&m_HashTableCrstGlobal);
#endif
}
GlobalStringLiteralMap::~GlobalStringLiteralMap()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
// if we are deleting the map then either it is shutdown time or else there was a race trying to create
// the initial map and this one was the loser
// (i.e. two threads made a map and the InterlockedCompareExchange failed for one of them and
// now it is deleting the map)
//
// if it's not the main map, then the map we are deleting better be empty!
// there must be *some* global table
_ASSERTE(SystemDomain::GetGlobalStringLiteralMapNoCreate() != NULL);
if (SystemDomain::GetGlobalStringLiteralMapNoCreate() != this)
{
// if this isn't the real global table then it must be empty
_ASSERTE(m_StringToEntryHashTable->IsEmpty());
// Delete the hash table first. The dtor of the hash table would clean up all the entries.
delete m_StringToEntryHashTable;
// Delete the pool later, since the dtor above would need it.
delete m_MemoryPool;
}
else
{
// We are shutting down, the OS will reclaim the memory from the StringLiteralEntries,
// m_MemoryPool and m_StringToEntryHashTable.
_ASSERTE(g_fProcessDetach);
}
}
void GlobalStringLiteralMap::Init()
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(this));
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
// Allocate the memory pool and set the initial count to quarter as grow count
m_MemoryPool = new MemoryPool (SIZEOF_EEHASH_ENTRY, EEHASH_MEMORY_POOL_GROW_COUNT, EEHASH_MEMORY_POOL_GROW_COUNT/4);
m_StringToEntryHashTable = new EEUnicodeStringLiteralHashTable ();
LockOwner lock = {&m_HashTableCrstGlobal, IsOwnerOfCrst};
if (!m_StringToEntryHashTable->Init(INIT_NUM_GLOBAL_STRING_BUCKETS, &lock, m_MemoryPool))
ThrowOutOfMemory();
}
StringLiteralEntry *GlobalStringLiteralMap::GetStringLiteral(EEStringData *pStringData, DWORD dwHash, BOOL bAddIfNotFound)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(CheckPointer(pStringData));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
HashDatum Data;
StringLiteralEntry *pEntry = NULL;
if (m_StringToEntryHashTable->GetValueSpeculative(pStringData, &Data, dwHash)) // Since we hold the critical section here, we can safely use the speculative variant of GetValue
{
pEntry = (StringLiteralEntry*)Data;
// If the entry is already in the table then addref it before we return it.
if (pEntry)
pEntry->AddRef();
}
else
{
if (bAddIfNotFound)
pEntry = AddStringLiteral(pStringData);
}
return pEntry;
}
StringLiteralEntry *GlobalStringLiteralMap::GetInternedString(STRINGREF *pString, DWORD dwHash, BOOL bAddIfNotFound)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(CheckPointer(pString));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
EEStringData StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
HashDatum Data;
StringLiteralEntry *pEntry = NULL;
if (m_StringToEntryHashTable->GetValue(&StringData, &Data, dwHash))
{
pEntry = (StringLiteralEntry*)Data;
// If the entry is already in the table then addref it before we return it.
if (pEntry)
pEntry->AddRef();
}
else
{
if (bAddIfNotFound)
pEntry = AddInternedString(pString);
}
return pEntry;
}
#ifdef LOGGING
static void LogStringLiteral(_In_z_ const char* action, EEStringData *pStringData)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
int length = pStringData->GetCharCount();
length = min(length, 100);
WCHAR *szString = (WCHAR *)_alloca((length + 1) * sizeof(WCHAR));
memcpyNoGCRefs((void*)szString, (void*)pStringData->GetStringBuffer(), length * sizeof(WCHAR));
szString[length] = '\0';
LOG((LF_APPDOMAIN, LL_INFO10000, "String literal \"%S\" %s to Global map, size %d bytes\n", szString, action, pStringData->GetCharCount()));
}
#endif
STRINGREF AllocateStringObject(EEStringData *pStringData)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// Create the COM+ string object.
DWORD cCount = pStringData->GetCharCount();
STRINGREF strObj = AllocateString(cCount);
GCPROTECT_BEGIN(strObj)
{
// Copy the string constant into the COM+ string object. The code
// will add an extra null at the end for safety purposes, but since
// we support embedded nulls, one should never treat the string as
// null termianted.
LPWSTR strDest = strObj->GetBuffer();
memcpyNoGCRefs(strDest, pStringData->GetStringBuffer(), cCount*sizeof(WCHAR));
strDest[cCount] = 0;
}
GCPROTECT_END();
return strObj;
}
StringLiteralEntry *GlobalStringLiteralMap::AddStringLiteral(EEStringData *pStringData)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
StringLiteralEntry *pRet;
{
PinnedHeapHandleBlockHolder pStrObj(&m_PinnedHeapHandleTable,1);
// Create the COM+ string object.
STRINGREF strObj = AllocateStringObject(pStringData);
// Allocate a handle for the string.
SetObjectReference(pStrObj[0], (OBJECTREF) strObj);
// Allocate the StringLiteralEntry.
StringLiteralEntryHolder pEntry(StringLiteralEntry::AllocateEntry(pStringData, (STRINGREF*)pStrObj[0]));
pStrObj.SuppressRelease();
// Insert the handle to the string into the hash table.
m_StringToEntryHashTable->InsertValue(pStringData, (LPVOID)pEntry, FALSE);
pEntry.SuppressRelease();
pRet = pEntry;
#ifdef LOGGING
LogStringLiteral("added", pStringData);
#endif
}
return pRet;
}
StringLiteralEntry *GlobalStringLiteralMap::AddInternedString(STRINGREF *pString)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(this));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
EEStringData StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
StringLiteralEntry *pRet;
{
PinnedHeapHandleBlockHolder pStrObj(&m_PinnedHeapHandleTable,1);
SetObjectReference(pStrObj[0], (OBJECTREF) *pString);
// Since the allocation might have caused a GC we need to re-get the
// string data.
StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
StringLiteralEntryHolder pEntry(StringLiteralEntry::AllocateEntry(&StringData, (STRINGREF*)pStrObj[0]));
pStrObj.SuppressRelease();
// Insert the handle to the string into the hash table.
m_StringToEntryHashTable->InsertValue(&StringData, (LPVOID)pEntry, FALSE);
pEntry.SuppressRelease();
pRet = pEntry;
}
return pRet;
}
void GlobalStringLiteralMap::RemoveStringLiteralEntry(StringLiteralEntry *pEntry)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pEntry));
PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
PRECONDITION(CheckPointer(this));
}
CONTRACTL_END;
// Remove the entry from the hash table.
{
GCX_COOP();
EEStringData StringData;
pEntry->GetStringData(&StringData);
BOOL bSuccess;
bSuccess = m_StringToEntryHashTable->DeleteValue(&StringData);
// this assert is comented out to accomodate case when StringLiteralEntryHolder
// releases this object after failed insertion into hash
//_ASSERTE(bSuccess);
#ifdef LOGGING
// We need to do this logging within the GCX_COOP(), as a gc will render
// our StringData pointers stale.
if (bSuccess)
{
LogStringLiteral("removed", &StringData);
}
#endif
// Release the object handle that the entry was using.
STRINGREF *pObjRef = pEntry->GetStringObject();
m_PinnedHeapHandleTable.ReleaseHandles((OBJECTREF*)pObjRef, 1);
}
// We do not delete the StringLiteralEntry itself that will be done in the
// release method of the StringLiteralEntry.
}
StringLiteralEntry *StringLiteralEntry::AllocateEntry(EEStringData *pStringData, STRINGREF *pStringObj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS; // GC_TRIGGERS because in the precondition below GetGlobalStringLiteralMap() might need to create the map
MODE_COOPERATIVE;
PRECONDITION(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
// Note: we don't synchronize here because allocateEntry is called when HashCrst is held.
void *pMem = NULL;
if (s_FreeEntryList != NULL)
{
pMem = s_FreeEntryList;
s_FreeEntryList = s_FreeEntryList->m_pNext;
_ASSERTE (((StringLiteralEntry*)pMem)->m_bDeleted);
}
else
{
if (s_EntryList == NULL || (s_UsedEntries >= MAX_ENTRIES_PER_CHUNK))
{
StringLiteralEntryArray *pNew = new StringLiteralEntryArray();
pNew->m_pNext = s_EntryList;
s_EntryList = pNew;
s_UsedEntries = 0;
}
pMem = &(s_EntryList->m_Entries[s_UsedEntries++*sizeof(StringLiteralEntry)]);
}
_ASSERTE (pMem && "Unable to allocate String literal Entry");
return new (pMem) StringLiteralEntry (pStringData, pStringObj);
}
void StringLiteralEntry::DeleteEntry (StringLiteralEntry *pEntry)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(SystemDomain::GetGlobalStringLiteralMapNoCreate()->m_HashTableCrstGlobal.OwnedByCurrentThread());
}
CONTRACTL_END;
_ASSERTE (VolatileLoad(&pEntry->m_dwRefCount) == 0);
#ifdef _DEBUG
memset (pEntry, 0xc, sizeof(StringLiteralEntry));
#endif
#ifdef _DEBUG
pEntry->m_bDeleted = TRUE;
#endif
// The free list needs protection from the m_HashTableCrstGlobal
pEntry->m_pNext = s_FreeEntryList;
s_FreeEntryList = pEntry;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/md/databuffer.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: DataBuffer.h
//
//
// Class code:DataBuffer provides secure access to a block of memory.
//
// ======================================================================================
#pragma once
#include "external.h"
// --------------------------------------------------------------------------------------
//
// This class provides secure access to a block of memory.
//
class DataBuffer
{
private:
//
// Private data
//
// The memory block of size code:m_cbSize. Can be non-NULL even if code:m_cbSize is 0.
__field_bcount(m_cbSize)
BYTE *m_pbData;
// Size of the memory block starting at code:m_pbData. If it is 0, then value of code:m_pbData can be
// anything (incl. NULL).
UINT32 m_cbSize;
public:
//
// Initialization
//
// Creates empty memory block.
inline DataBuffer();
// Creates memory block (pbData, of size cbSize).
inline DataBuffer(
_In_reads_bytes_(cbSize) BYTE *pbData,
UINT32 cbSize);
// Creates memory block copy.
inline DataBuffer(
const DataBuffer &source);
// Initializes memory block to empty data. The object could be already initialzied.
inline void Clear();
// Initializes memory block to data (pbData, of size cbSize). The object should be empty before.
inline void Init(
_In_reads_bytes_(cbSize) BYTE *pbData,
UINT32 cbSize);
//
// Getters
//
// Reads data of type T without skipping the read data (returns pointer to the type in *ppTypeData).
// Returns FALSE if there's not enough data (of size T) in the blob, doesn't initialize the pointer
// *ppTypeData then.
// Returns TRUE otherwise, fills *ppTypeData with the "read" type start, but doesn't move the memory
// block (doesn't skip the "read" data).
template<class T>
__checkReturn
inline BOOL PeekData(
_Outptr_ T **ppTypeData);
// Reads data of type T at offset nOffset without skipping the read data (returns pointer to the type in
// *ppTypeData).
// Returns FALSE if there's not enough data (of size T) at offset nOffset in the buffer, doesn't
// initialize the pointer *ppTypeData then.
// Returns TRUE otherwise, fills *ppTypeData with the type start, but doesn't move the memory block
// (doesn't skip any "read" data).
template<class T>
__checkReturn
inline BOOL PeekDataAt(
UINT32 nOffset,
_Outptr_ T **ppTypeData);
// Reads data of type T and skips the data (instead of reading the bytes, returns pointer to the type in
// *ppTypeData).
// Returns FALSE if there's not enough data (of size T) in the blob, doesn't initialize the pointer
// *ppTypeData then.
// Returns TRUE otherwise, fills *ppTypeData with the "read" type start and moves the memory block
// behind the "read" type.
template<class T>
__checkReturn
inline BOOL GetData(
_Outptr_ T **ppTypeData);
// Reads data of size cbDataSize and skips the data (instead of reading the bytes, returns pointer to
// the bytes in *ppbDataPointer).
// Returns FALSE if there's not enough data in the blob, doesn't initialize the pointer *ppbDataPointer
// then.
// Returns TRUE otherwise, fills *ppbDataPointer with the "read" data start and moves the memory block
// behind the "read" data.
__checkReturn
inline BOOL GetDataOfSize(
UINT32 cbDataSize,
_Out_writes_bytes_(cbDataSize) BYTE **ppbDataPointer);
// Returns TRUE if the represented memory is empty.
inline BOOL IsEmpty() const
{ return (m_cbSize == 0); }
// Gets pointer to the represented data buffer (can be random pointer if size of the data is 0).
// Note: Should be used exceptionally. Try to use other operations instead.
inline BYTE *GetDataPointer()
{ return m_pbData; }
// Gets pointer to the represented data buffer (can be random pointer if size of the data is 0).
// Note: Should be used exceptionally. Try to use other operations instead.
inline const BYTE *GetDataPointer() const
{ return m_pbData; }
// Gets pointer right behind the represented data buffer (can be random pointer if size of the data is
// 0).
inline const BYTE *GetDataPointerBehind() const
{ return m_pbData + m_cbSize; }
// Gets the size of represented memory.
inline UINT32 GetSize() const
{ return m_cbSize; }
//BOOL SkipBytes(UINT32 cbSize);
public:
//
// Operations
//
// Truncates the buffer to exact size (cbSize).
// Returns FALSE if there's less than cbSize data represented.
// Returns TRUE otherwise and truncates the represented data size to cbSize.
__checkReturn
inline BOOL TruncateToExactSize(UINT32 cbSize);
// Truncates the buffer by size (cbSize).
// Returns FALSE if there's less than cbSize data represented.
// Returns TRUE otherwise and truncates the represented data size by cbSize.
__checkReturn
inline BOOL TruncateBySize(UINT32 cbSize);
// Skips the buffer to exact size (cbSize).
// Returns FALSE if there's less than cbSize data represented.
// Returns TRUE otherwise and skips data at the beggining, so that the result has size cbSize.
__checkReturn
inline BOOL SkipToExactSize(UINT32 cbSize);
private:
//
// Helpers
//
// Skips 'cbSize' bytes in the represented memory block. The caller is responsible for making sure that
// the represented memory block contains at least 'cbSize' bytes, otherwise there will be a security
// issue.
// Should be used only internally, never call it from outside of this class.
inline void SkipBytes_InternalInsecure(UINT32 cbSize);
}; // class DataBuffer
#include "databuffer.inl"
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: DataBuffer.h
//
//
// Class code:DataBuffer provides secure access to a block of memory.
//
// ======================================================================================
#pragma once
#include "external.h"
// --------------------------------------------------------------------------------------
//
// This class provides secure access to a block of memory.
//
class DataBuffer
{
private:
//
// Private data
//
// The memory block of size code:m_cbSize. Can be non-NULL even if code:m_cbSize is 0.
__field_bcount(m_cbSize)
BYTE *m_pbData;
// Size of the memory block starting at code:m_pbData. If it is 0, then value of code:m_pbData can be
// anything (incl. NULL).
UINT32 m_cbSize;
public:
//
// Initialization
//
// Creates empty memory block.
inline DataBuffer();
// Creates memory block (pbData, of size cbSize).
inline DataBuffer(
_In_reads_bytes_(cbSize) BYTE *pbData,
UINT32 cbSize);
// Creates memory block copy.
inline DataBuffer(
const DataBuffer &source);
// Initializes memory block to empty data. The object could be already initialzied.
inline void Clear();
// Initializes memory block to data (pbData, of size cbSize). The object should be empty before.
inline void Init(
_In_reads_bytes_(cbSize) BYTE *pbData,
UINT32 cbSize);
//
// Getters
//
// Reads data of type T without skipping the read data (returns pointer to the type in *ppTypeData).
// Returns FALSE if there's not enough data (of size T) in the blob, doesn't initialize the pointer
// *ppTypeData then.
// Returns TRUE otherwise, fills *ppTypeData with the "read" type start, but doesn't move the memory
// block (doesn't skip the "read" data).
template<class T>
__checkReturn
inline BOOL PeekData(
_Outptr_ T **ppTypeData);
// Reads data of type T at offset nOffset without skipping the read data (returns pointer to the type in
// *ppTypeData).
// Returns FALSE if there's not enough data (of size T) at offset nOffset in the buffer, doesn't
// initialize the pointer *ppTypeData then.
// Returns TRUE otherwise, fills *ppTypeData with the type start, but doesn't move the memory block
// (doesn't skip any "read" data).
template<class T>
__checkReturn
inline BOOL PeekDataAt(
UINT32 nOffset,
_Outptr_ T **ppTypeData);
// Reads data of type T and skips the data (instead of reading the bytes, returns pointer to the type in
// *ppTypeData).
// Returns FALSE if there's not enough data (of size T) in the blob, doesn't initialize the pointer
// *ppTypeData then.
// Returns TRUE otherwise, fills *ppTypeData with the "read" type start and moves the memory block
// behind the "read" type.
template<class T>
__checkReturn
inline BOOL GetData(
_Outptr_ T **ppTypeData);
// Reads data of size cbDataSize and skips the data (instead of reading the bytes, returns pointer to
// the bytes in *ppbDataPointer).
// Returns FALSE if there's not enough data in the blob, doesn't initialize the pointer *ppbDataPointer
// then.
// Returns TRUE otherwise, fills *ppbDataPointer with the "read" data start and moves the memory block
// behind the "read" data.
__checkReturn
inline BOOL GetDataOfSize(
UINT32 cbDataSize,
_Out_writes_bytes_(cbDataSize) BYTE **ppbDataPointer);
// Returns TRUE if the represented memory is empty.
inline BOOL IsEmpty() const
{ return (m_cbSize == 0); }
// Gets pointer to the represented data buffer (can be random pointer if size of the data is 0).
// Note: Should be used exceptionally. Try to use other operations instead.
inline BYTE *GetDataPointer()
{ return m_pbData; }
// Gets pointer to the represented data buffer (can be random pointer if size of the data is 0).
// Note: Should be used exceptionally. Try to use other operations instead.
inline const BYTE *GetDataPointer() const
{ return m_pbData; }
// Gets pointer right behind the represented data buffer (can be random pointer if size of the data is
// 0).
inline const BYTE *GetDataPointerBehind() const
{ return m_pbData + m_cbSize; }
// Gets the size of represented memory.
inline UINT32 GetSize() const
{ return m_cbSize; }
//BOOL SkipBytes(UINT32 cbSize);
public:
//
// Operations
//
// Truncates the buffer to exact size (cbSize).
// Returns FALSE if there's less than cbSize data represented.
// Returns TRUE otherwise and truncates the represented data size to cbSize.
__checkReturn
inline BOOL TruncateToExactSize(UINT32 cbSize);
// Truncates the buffer by size (cbSize).
// Returns FALSE if there's less than cbSize data represented.
// Returns TRUE otherwise and truncates the represented data size by cbSize.
__checkReturn
inline BOOL TruncateBySize(UINT32 cbSize);
// Skips the buffer to exact size (cbSize).
// Returns FALSE if there's less than cbSize data represented.
// Returns TRUE otherwise and skips data at the beggining, so that the result has size cbSize.
__checkReturn
inline BOOL SkipToExactSize(UINT32 cbSize);
private:
//
// Helpers
//
// Skips 'cbSize' bytes in the represented memory block. The caller is responsible for making sure that
// the represented memory block contains at least 'cbSize' bytes, otherwise there will be a security
// issue.
// Should be used only internally, never call it from outside of this class.
inline void SkipBytes_InternalInsecure(UINT32 cbSize);
}; // class DataBuffer
#include "databuffer.inl"
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/tools/superpmi/superpmi/neardiffer.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// nearDiffer.h - differ that handles code that is very similar
//----------------------------------------------------------
#ifndef _nearDiffer
#define _nearDiffer
#include "methodcontext.h"
#include "compileresult.h"
class NearDiffer
{
public:
NearDiffer(const char* targetArch, bool useCorDisTools)
: TargetArchitecture(targetArch)
, UseCoreDisTools(useCorDisTools)
#ifdef USE_COREDISTOOLS
, corAsmDiff(nullptr)
#endif // USE_COREDISTOOLS
{
}
~NearDiffer();
bool InitAsmDiff();
bool compare(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
const char* TargetArchitecture;
const bool UseCoreDisTools;
private:
void DumpCodeBlock(unsigned char* block, ULONG blocksize, void* originalAddr);
bool compareCodeSection(MethodContext* mc,
CompileResult* cr1,
CompileResult* cr2,
unsigned char* block1,
ULONG blocksize1,
unsigned char* datablock1,
ULONG datablockSize1,
void* originalBlock1,
void* originalDataBlock1,
void* otherCodeBlock1,
ULONG otherCodeBlockSize1,
unsigned char* block2,
ULONG blocksize2,
unsigned char* datablock2,
ULONG datablockSize2,
void* originalBlock2,
void* originalDataBlock2,
void* otherCodeBlock2,
ULONG otherCodeBlockSize2);
bool compareReadOnlyDataBlock(MethodContext* mc,
CompileResult* cr1,
CompileResult* cr2,
unsigned char* block1,
ULONG blocksize1,
void* originalDataBlock1,
unsigned char* block2,
ULONG blocksize2,
void* originalDataBlock2);
bool compareEHInfo(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
bool compareGCInfo(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
bool compareVars(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
bool compareBoundaries(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
static bool compareOffsets(
const void* payload, size_t blockOffset, size_t instrLen, uint64_t offset1, uint64_t offset2);
#ifdef USE_COREDISTOOLS
static bool __cdecl CoreDisCompareOffsetsCallback(
const void* payload, size_t blockOffset, size_t instrLen, uint64_t offset1, uint64_t offset2);
CorAsmDiff* corAsmDiff;
#endif // USE_COREDISTOOLS
#ifdef USE_MSVCDIS
DIS* GetMsVcDis();
#endif // USE_MSVCDIS
};
#endif // _nearDiffer
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// nearDiffer.h - differ that handles code that is very similar
//----------------------------------------------------------
#ifndef _nearDiffer
#define _nearDiffer
#include "methodcontext.h"
#include "compileresult.h"
class NearDiffer
{
public:
NearDiffer(const char* targetArch, bool useCorDisTools)
: TargetArchitecture(targetArch)
, UseCoreDisTools(useCorDisTools)
#ifdef USE_COREDISTOOLS
, corAsmDiff(nullptr)
#endif // USE_COREDISTOOLS
{
}
~NearDiffer();
bool InitAsmDiff();
bool compare(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
const char* TargetArchitecture;
const bool UseCoreDisTools;
private:
void DumpCodeBlock(unsigned char* block, ULONG blocksize, void* originalAddr);
bool compareCodeSection(MethodContext* mc,
CompileResult* cr1,
CompileResult* cr2,
unsigned char* block1,
ULONG blocksize1,
unsigned char* datablock1,
ULONG datablockSize1,
void* originalBlock1,
void* originalDataBlock1,
void* otherCodeBlock1,
ULONG otherCodeBlockSize1,
unsigned char* block2,
ULONG blocksize2,
unsigned char* datablock2,
ULONG datablockSize2,
void* originalBlock2,
void* originalDataBlock2,
void* otherCodeBlock2,
ULONG otherCodeBlockSize2);
bool compareReadOnlyDataBlock(MethodContext* mc,
CompileResult* cr1,
CompileResult* cr2,
unsigned char* block1,
ULONG blocksize1,
void* originalDataBlock1,
unsigned char* block2,
ULONG blocksize2,
void* originalDataBlock2);
bool compareEHInfo(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
bool compareGCInfo(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
bool compareVars(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
bool compareBoundaries(MethodContext* mc, CompileResult* cr1, CompileResult* cr2);
static bool compareOffsets(
const void* payload, size_t blockOffset, size_t instrLen, uint64_t offset1, uint64_t offset2);
#ifdef USE_COREDISTOOLS
static bool __cdecl CoreDisCompareOffsetsCallback(
const void* payload, size_t blockOffset, size_t instrLen, uint64_t offset1, uint64_t offset2);
CorAsmDiff* corAsmDiff;
#endif // USE_COREDISTOOLS
#ifdef USE_MSVCDIS
DIS* GetMsVcDis();
#endif // USE_MSVCDIS
};
#endif // _nearDiffer
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/src/libunwind/src/loongarch64/Gresume.c | /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2021 Loongson Technology Corporation Limited
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/* FIXME for LoongArch64. */
#include <stdlib.h>
#include "unwind_i.h"
#ifndef UNW_REMOTE_ONLY
HIDDEN inline int
loongarch64_local_resume (unw_addr_space_t as, unw_cursor_t *cursor, void *arg)
{
struct cursor *c = (struct cursor *) cursor;
unw_tdep_context_t *uc = c->uc;
if (c->sigcontext_format == LOONGARCH64_SCF_NONE)
{
/* Since there are no signals involved here we restore EH and non scratch
registers only. */
unsigned long sp = uc->uc_mcontext.__gregs[3];
unsigned long ra = uc->uc_mcontext.__gregs[1];
asm volatile (
"move $t0, %0\n"
"move $t1, %1\n"
"move $t2, %2\n"
"ld.d $fp, $t0, 22*8\n"
"ld.d $s0, $t0, 23*8\n"
"ld.d $s1, $t0, 24*8\n"
"ld.d $s2, $t0, 25*8\n"
"ld.d $s3, $t0, 26*8\n"
"ld.d $s4, $t0, 27*8\n"
"ld.d $s5, $t0, 28*8\n"
"ld.d $s6, $t0, 29*8\n"
"ld.d $s7, $t0, 30*8\n"
"ld.d $s8, $t0, 31*8\n"
"move $ra, $t2\n"
"move $sp, $t1\n"
"jirl $r0, $ra, 0\n"
:
: "r" (uc->uc_mcontext.__gregs),
"r" (sp),
"r" (ra)
);
}
else /* c->sigcontext_format == LOONGARCH64_SCF_LINUX_RT_SIGFRAME */
{
int i;
struct sigcontext *sc = (struct sigcontext *) c->sigcontext_addr;
sc->sc_pc = c->dwarf.ip;
for (i = UNW_LOONGARCH64_R0; i <= UNW_LOONGARCH64_R31; i++)
sc->sc_regs[i] = uc->uc_mcontext.__gregs[i];
Debug (8, "resuming at ip=0x%lx via sigreturn() (trampoline @ 0x%lx, sp @ 0x%lx)\n",
c->dwarf.ip, c->sigcontext_pc, c->sigcontext_sp);
asm volatile (
"move $sp, %0\n"
"jirl $r0, %1, 0\n"
: : "r" (c->sigcontext_sp), "r" (c->sigcontext_pc)
);
}
unreachable();
return -UNW_EINVAL;
}
#endif /* !UNW_REMOTE_ONLY */
static inline void
establish_machine_state (struct cursor *c)
{
unw_addr_space_t as = c->dwarf.as;
void *arg = c->dwarf.as_arg;
unw_word_t val;
int reg;
Debug (8, "copying out cursor state\n");
for (reg = UNW_LOONGARCH64_R0; reg <= UNW_LOONGARCH64_R31; reg++)
{
Debug (16, "copying %s %d\n", unw_regname (reg), reg);
if (tdep_access_reg (c, reg, &val, 0) >= 0)
as->acc.access_reg (as, reg, &val, 1, arg);
}
}
int
unw_resume (unw_cursor_t *cursor)
{
struct cursor *c = (struct cursor *) cursor;
Debug (1, "(cursor=%p)\n", c);
if (!c->dwarf.ip)
{
/* This can happen easily when the frame-chain gets truncated
due to bad or missing unwind-info. */
Debug (1, "refusing to resume execution at address 0\n");
return -UNW_EINVAL;
}
establish_machine_state (c);
return (*c->dwarf.as->acc.resume) (c->dwarf.as, (unw_cursor_t *) c,
c->dwarf.as_arg);
}
| /* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2021 Loongson Technology Corporation Limited
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/* FIXME for LoongArch64. */
#include <stdlib.h>
#include "unwind_i.h"
#ifndef UNW_REMOTE_ONLY
HIDDEN inline int
loongarch64_local_resume (unw_addr_space_t as, unw_cursor_t *cursor, void *arg)
{
struct cursor *c = (struct cursor *) cursor;
unw_tdep_context_t *uc = c->uc;
if (c->sigcontext_format == LOONGARCH64_SCF_NONE)
{
/* Since there are no signals involved here we restore EH and non scratch
registers only. */
unsigned long sp = uc->uc_mcontext.__gregs[3];
unsigned long ra = uc->uc_mcontext.__gregs[1];
asm volatile (
"move $t0, %0\n"
"move $t1, %1\n"
"move $t2, %2\n"
"ld.d $fp, $t0, 22*8\n"
"ld.d $s0, $t0, 23*8\n"
"ld.d $s1, $t0, 24*8\n"
"ld.d $s2, $t0, 25*8\n"
"ld.d $s3, $t0, 26*8\n"
"ld.d $s4, $t0, 27*8\n"
"ld.d $s5, $t0, 28*8\n"
"ld.d $s6, $t0, 29*8\n"
"ld.d $s7, $t0, 30*8\n"
"ld.d $s8, $t0, 31*8\n"
"move $ra, $t2\n"
"move $sp, $t1\n"
"jirl $r0, $ra, 0\n"
:
: "r" (uc->uc_mcontext.__gregs),
"r" (sp),
"r" (ra)
);
}
else /* c->sigcontext_format == LOONGARCH64_SCF_LINUX_RT_SIGFRAME */
{
int i;
struct sigcontext *sc = (struct sigcontext *) c->sigcontext_addr;
sc->sc_pc = c->dwarf.ip;
for (i = UNW_LOONGARCH64_R0; i <= UNW_LOONGARCH64_R31; i++)
sc->sc_regs[i] = uc->uc_mcontext.__gregs[i];
Debug (8, "resuming at ip=0x%lx via sigreturn() (trampoline @ 0x%lx, sp @ 0x%lx)\n",
c->dwarf.ip, c->sigcontext_pc, c->sigcontext_sp);
asm volatile (
"move $sp, %0\n"
"jirl $r0, %1, 0\n"
: : "r" (c->sigcontext_sp), "r" (c->sigcontext_pc)
);
}
unreachable();
return -UNW_EINVAL;
}
#endif /* !UNW_REMOTE_ONLY */
static inline void
establish_machine_state (struct cursor *c)
{
unw_addr_space_t as = c->dwarf.as;
void *arg = c->dwarf.as_arg;
unw_word_t val;
int reg;
Debug (8, "copying out cursor state\n");
for (reg = UNW_LOONGARCH64_R0; reg <= UNW_LOONGARCH64_R31; reg++)
{
Debug (16, "copying %s %d\n", unw_regname (reg), reg);
if (tdep_access_reg (c, reg, &val, 0) >= 0)
as->acc.access_reg (as, reg, &val, 1, arg);
}
}
int
unw_resume (unw_cursor_t *cursor)
{
struct cursor *c = (struct cursor *) cursor;
Debug (1, "(cursor=%p)\n", c);
if (!c->dwarf.ip)
{
/* This can happen easily when the frame-chain gets truncated
due to bad or missing unwind-info. */
Debug (1, "refusing to resume execution at address 0\n");
return -UNW_EINVAL;
}
establish_machine_state (c);
return (*c->dwarf.as->acc.resume) (c->dwarf.as, (unw_cursor_t *) c,
c->dwarf.as_arg);
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/native/corehost/ijwhost/arm/bootstrap_thunk.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef IJW_BOOTSTRAP_THUNK_H
#define IJW_BOOTSTRAP_THUNK_H
#if !defined(TARGET_ARM)
#error "This file should only be included on arm builds."
#endif
#include "pal.h"
#include "corhdr.h"
extern "C" void start_runtime_thunk_stub();
#include <pshpack1.h>
//=================================================================================
class bootstrap_thunk
{
private:
WORD m_rgCode[4];
std::uintptr_t m_pBootstrapCode;
pal::dll_t m_dll; // pal::dll_t of this module
std::uintptr_t* m_slot; // VTable slot for this thunk
std::uint32_t m_token; // Token for this thunk
public:
// Get thunk from the address that the thunk code provided
static bootstrap_thunk *get_thunk_from_cookie(std::uintptr_t cookie);
// Get thunk from the thunk code entry point address
static bootstrap_thunk *get_thunk_from_entrypoint(std::uintptr_t entryAddr);
// Initializes the thunk to point to pThunkInitFcn that will load the
// runtime and perform the real thunk initialization.
void initialize(std::uintptr_t pThunkInitFcn,
pal::dll_t dll,
std::uint32_t token,
std::uintptr_t *pSlot);
// Returns the slot address of the vtable entry for this thunk
std::uintptr_t *get_slot_address();
// Returns the pal::dll_t for this thunk's module
pal::dll_t get_dll_handle();
// Returns the token of this thunk
std::uint32_t get_token();
std::uintptr_t get_entrypoint();
};
#include <poppack.h>
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef IJW_BOOTSTRAP_THUNK_H
#define IJW_BOOTSTRAP_THUNK_H
#if !defined(TARGET_ARM)
#error "This file should only be included on arm builds."
#endif
#include "pal.h"
#include "corhdr.h"
extern "C" void start_runtime_thunk_stub();
#include <pshpack1.h>
//=================================================================================
class bootstrap_thunk
{
private:
WORD m_rgCode[4];
std::uintptr_t m_pBootstrapCode;
pal::dll_t m_dll; // pal::dll_t of this module
std::uintptr_t* m_slot; // VTable slot for this thunk
std::uint32_t m_token; // Token for this thunk
public:
// Get thunk from the address that the thunk code provided
static bootstrap_thunk *get_thunk_from_cookie(std::uintptr_t cookie);
// Get thunk from the thunk code entry point address
static bootstrap_thunk *get_thunk_from_entrypoint(std::uintptr_t entryAddr);
// Initializes the thunk to point to pThunkInitFcn that will load the
// runtime and perform the real thunk initialization.
void initialize(std::uintptr_t pThunkInitFcn,
pal::dll_t dll,
std::uint32_t token,
std::uintptr_t *pSlot);
// Returns the slot address of the vtable entry for this thunk
std::uintptr_t *get_slot_address();
// Returns the pal::dll_t for this thunk's module
pal::dll_t get_dll_handle();
// Returns the token of this thunk
std::uint32_t get_token();
std::uintptr_t get_entrypoint();
};
#include <poppack.h>
#endif
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/utilcode/makepath.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/***
*makepath.c - create path name from components
*
*
*Purpose:
* To provide support for creation of full path names from components
*
*******************************************************************************/
#include "stdafx.h"
#include "winwrap.h"
#include "utilcode.h"
#include "ex.h"
/***
*void Makepath() - build path name from components
*
*Purpose:
* create a path name from its individual components
*
*Entry:
* CQuickWSTR &szPath - Buffer for constructed path
* WCHAR *drive - pointer to drive component, may or may not contain
* trailing ':'
* WCHAR *dir - pointer to subdirectory component, may or may not include
* leading and/or trailing '/' or '\' characters
* WCHAR *fname - pointer to file base name component
* WCHAR *ext - pointer to extension component, may or may not contain
* a leading '.'.
*
*Exit:
* path - pointer to constructed path name
*
*Exceptions:
*
*******************************************************************************/
void MakePath (
_Out_ CQuickWSTR &szPath,
_In_ LPCWSTR drive,
_In_ LPCWSTR dir,
_In_ LPCWSTR fname,
_In_ LPCWSTR ext
)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END
SIZE_T maxCount = 4 // Possible separators between components, plus null terminator
+ (drive != nullptr ? 2 : 0)
+ (dir != nullptr ? wcslen(dir) : 0)
+ (fname != nullptr ? wcslen(fname) : 0)
+ (ext != nullptr ? wcslen(ext) : 0);
LPWSTR path = szPath.AllocNoThrow(maxCount);
const WCHAR *p;
DWORD count = 0;
/* we assume that the arguments are in the following form (although we
* do not diagnose invalid arguments or illegal filenames (such as
* names longer than 8.3 or with illegal characters in them)
*
* drive:
* A ; or
* A:
* dir:
* \top\next\last\ ; or
* /top/next/last/ ; or
* either of the above forms with either/both the leading
* and trailing / or \ removed. Mixed use of '/' and '\' is
* also tolerated
* fname:
* any valid file name
* ext:
* any valid extension (none if empty or null )
*/
/* copy drive */
if (drive && *drive) {
*path++ = *drive;
*path++ = _T(':');
count += 2;
}
/* copy dir */
if ((p = dir)) {
while (*p) {
*path++ = *p++;
count++;
_ASSERTE(count < maxCount);
}
// suppress warning for the following line; this is safe but would require significant code
// delta for prefast to understand.
#ifdef _PREFAST_
#pragma warning( suppress: 26001 )
#endif
if (*(p-1) != _T('/') && *(p-1) != _T('\\')) {
*path++ = _T('\\');
count++;
_ASSERTE(count < maxCount);
}
}
/* copy fname */
if ((p = fname)) {
while (*p) {
*path++ = *p++;
count++;
_ASSERTE(count < maxCount);
}
}
/* copy ext, including 0-terminator - check to see if a '.' needs
* to be inserted.
*/
if ((p = ext)) {
if (*p && *p != _T('.')) {
*path++ = _T('.');
count++;
_ASSERTE(count < maxCount);
}
while ((*path++ = *p++)) {
count++;
_ASSERTE(count < maxCount);
}
}
else {
/* better add the 0-terminator */
*path = _T('\0');
}
szPath.Shrink(count + 1);
}
// Returns the directory for clr module. So, if path was for "C:\Dir1\Dir2\Filename.DLL",
// then this would return "C:\Dir1\Dir2\" (note the trailing backslash).HRESULT GetClrModuleDirectory(SString& wszPath)
HRESULT GetClrModuleDirectory(SString& wszPath)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
DWORD dwRet = GetClrModulePathName(wszPath);
if (dwRet == 0)
{ // Some other error.
return HRESULT_FROM_GetLastError();
}
CopySystemDirectory(wszPath, wszPath);
return S_OK;
}
//
// Returns path name from a file name.
// Example: For input "C:\Windows\System.dll" returns "C:\Windows\".
// Warning: The input file name string might be destroyed.
//
// Arguments:
// pPathString - [in] SString with file name
//
// pBuffer - [out] SString .
//
// Return Value:
// S_OK - Output buffer contains path name.
// other errors - If Sstring throws.
//
HRESULT CopySystemDirectory(const SString& pPathString,
SString& pbuffer)
{
HRESULT hr = S_OK;
EX_TRY
{
pbuffer.Set(pPathString);
SString::Iterator iter = pbuffer.End();
if (pbuffer.FindBack(iter,DIRECTORY_SEPARATOR_CHAR_W))
{
iter++;
pbuffer.Truncate(iter);
}
else
{
hr = E_UNEXPECTED;
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/***
*makepath.c - create path name from components
*
*
*Purpose:
* To provide support for creation of full path names from components
*
*******************************************************************************/
#include "stdafx.h"
#include "winwrap.h"
#include "utilcode.h"
#include "ex.h"
/***
*void Makepath() - build path name from components
*
*Purpose:
* create a path name from its individual components
*
*Entry:
* CQuickWSTR &szPath - Buffer for constructed path
* WCHAR *drive - pointer to drive component, may or may not contain
* trailing ':'
* WCHAR *dir - pointer to subdirectory component, may or may not include
* leading and/or trailing '/' or '\' characters
* WCHAR *fname - pointer to file base name component
* WCHAR *ext - pointer to extension component, may or may not contain
* a leading '.'.
*
*Exit:
* path - pointer to constructed path name
*
*Exceptions:
*
*******************************************************************************/
void MakePath (
_Out_ CQuickWSTR &szPath,
_In_ LPCWSTR drive,
_In_ LPCWSTR dir,
_In_ LPCWSTR fname,
_In_ LPCWSTR ext
)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END
SIZE_T maxCount = 4 // Possible separators between components, plus null terminator
+ (drive != nullptr ? 2 : 0)
+ (dir != nullptr ? wcslen(dir) : 0)
+ (fname != nullptr ? wcslen(fname) : 0)
+ (ext != nullptr ? wcslen(ext) : 0);
LPWSTR path = szPath.AllocNoThrow(maxCount);
const WCHAR *p;
DWORD count = 0;
/* we assume that the arguments are in the following form (although we
* do not diagnose invalid arguments or illegal filenames (such as
* names longer than 8.3 or with illegal characters in them)
*
* drive:
* A ; or
* A:
* dir:
* \top\next\last\ ; or
* /top/next/last/ ; or
* either of the above forms with either/both the leading
* and trailing / or \ removed. Mixed use of '/' and '\' is
* also tolerated
* fname:
* any valid file name
* ext:
* any valid extension (none if empty or null )
*/
/* copy drive */
if (drive && *drive) {
*path++ = *drive;
*path++ = _T(':');
count += 2;
}
/* copy dir */
if ((p = dir)) {
while (*p) {
*path++ = *p++;
count++;
_ASSERTE(count < maxCount);
}
// suppress warning for the following line; this is safe but would require significant code
// delta for prefast to understand.
#ifdef _PREFAST_
#pragma warning( suppress: 26001 )
#endif
if (*(p-1) != _T('/') && *(p-1) != _T('\\')) {
*path++ = _T('\\');
count++;
_ASSERTE(count < maxCount);
}
}
/* copy fname */
if ((p = fname)) {
while (*p) {
*path++ = *p++;
count++;
_ASSERTE(count < maxCount);
}
}
/* copy ext, including 0-terminator - check to see if a '.' needs
* to be inserted.
*/
if ((p = ext)) {
if (*p && *p != _T('.')) {
*path++ = _T('.');
count++;
_ASSERTE(count < maxCount);
}
while ((*path++ = *p++)) {
count++;
_ASSERTE(count < maxCount);
}
}
else {
/* better add the 0-terminator */
*path = _T('\0');
}
szPath.Shrink(count + 1);
}
// Returns the directory for clr module. So, if path was for "C:\Dir1\Dir2\Filename.DLL",
// then this would return "C:\Dir1\Dir2\" (note the trailing backslash).HRESULT GetClrModuleDirectory(SString& wszPath)
HRESULT GetClrModuleDirectory(SString& wszPath)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
DWORD dwRet = GetClrModulePathName(wszPath);
if (dwRet == 0)
{ // Some other error.
return HRESULT_FROM_GetLastError();
}
CopySystemDirectory(wszPath, wszPath);
return S_OK;
}
//
// Returns path name from a file name.
// Example: For input "C:\Windows\System.dll" returns "C:\Windows\".
// Warning: The input file name string might be destroyed.
//
// Arguments:
// pPathString - [in] SString with file name
//
// pBuffer - [out] SString .
//
// Return Value:
// S_OK - Output buffer contains path name.
// other errors - If Sstring throws.
//
HRESULT CopySystemDirectory(const SString& pPathString,
SString& pbuffer)
{
HRESULT hr = S_OK;
EX_TRY
{
pbuffer.Set(pPathString);
SString::Iterator iter = pbuffer.End();
if (pbuffer.FindBack(iter,DIRECTORY_SEPARATOR_CHAR_W))
{
iter++;
pbuffer.Truncate(iter);
}
else
{
hr = E_UNEXPECTED;
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/jit/gcencode.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GCEncode XX
XX XX
XX Logic to encode the JIT method header and GC pointer tables XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#pragma warning(disable : 4244) // loss of data int -> char ..
#endif
#include "gcinfotypes.h"
#include "patchpointinfo.h"
ReturnKind GCTypeToReturnKind(CorInfoGCType gcType)
{
switch (gcType)
{
case TYPE_GC_NONE:
return RT_Scalar;
case TYPE_GC_REF:
return RT_Object;
case TYPE_GC_BYREF:
return RT_ByRef;
default:
_ASSERTE(!"TYP_GC_OTHER is unexpected");
return RT_Illegal;
}
}
ReturnKind GCInfo::getReturnKind()
{
switch (compiler->info.compRetType)
{
case TYP_REF:
return RT_Object;
case TYP_BYREF:
return RT_ByRef;
case TYP_STRUCT:
{
CORINFO_CLASS_HANDLE structType = compiler->info.compMethodInfo->args.retTypeClass;
var_types retType = compiler->getReturnTypeForStruct(structType, compiler->info.compCallConv);
switch (retType)
{
case TYP_REF:
return RT_Object;
case TYP_BYREF:
return RT_ByRef;
case TYP_STRUCT:
if (compiler->IsHfa(structType))
{
#ifdef TARGET_X86
_ASSERTE(false && "HFAs not expected for X86");
#endif // TARGET_X86
return RT_Scalar;
}
else
{
// Multi-reg return
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
compiler->info.compCompHnd->getClassGClayout(structType, gcPtrs);
ReturnKind first = GCTypeToReturnKind((CorInfoGCType)gcPtrs[0]);
ReturnKind second = GCTypeToReturnKind((CorInfoGCType)gcPtrs[1]);
return GetStructReturnKind(first, second);
}
#ifdef TARGET_X86
case TYP_FLOAT:
case TYP_DOUBLE:
return RT_Float;
#endif // TARGET_X86
default:
return RT_Scalar;
}
}
#ifdef TARGET_X86
case TYP_FLOAT:
case TYP_DOUBLE:
return RT_Float;
#endif // TARGET_X86
default:
return RT_Scalar;
}
}
#if !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS)
// gcMarkFilterVarsPinned - Walk all lifetimes and make it so that anything
// live in a filter is marked as pinned (often by splitting the lifetime
// so that *only* the filter region is pinned). This should only be
// called once (after generating all lifetimes, but before slot ids are
// finalized.
//
// DevDiv 376329 - The VM has to double report filters and their parent frame
// because they occur during the 1st pass and the parent frame doesn't go dead
// until we start unwinding in the 2nd pass.
//
// Untracked locals will only be reported in non-filter funclets and the
// parent.
// Registers can't be double reported by 2 frames since they're different.
// That just leaves stack variables which might be double reported.
//
// Technically double reporting is only a problem when the GC has to relocate a
// reference. So we avoid that problem by marking all live tracked stack
// variables as pinned inside the filter. Thus if they are double reported, it
// won't be a problem since they won't be double relocated.
//
void GCInfo::gcMarkFilterVarsPinned()
{
assert(compiler->ehAnyFunclets());
for (EHblkDsc* const HBtab : EHClauses(compiler))
{
if (HBtab->HasFilter())
{
const UNATIVE_OFFSET filterBeg = compiler->ehCodeOffset(HBtab->ebdFilter);
const UNATIVE_OFFSET filterEnd = compiler->ehCodeOffset(HBtab->ebdHndBeg);
for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext)
{
// Get hold of the variable's flags.
const unsigned lowBits = varTmp->vpdVarNum & OFFSET_MASK;
// Compute the actual lifetime offsets.
const unsigned begOffs = varTmp->vpdBegOfs;
const unsigned endOffs = varTmp->vpdEndOfs;
// Special case: skip any 0-length lifetimes.
if (endOffs == begOffs)
{
continue;
}
// Skip lifetimes with no overlap with the filter
if ((endOffs <= filterBeg) || (begOffs >= filterEnd))
{
continue;
}
#ifndef JIT32_GCENCODER
// Because there is no nesting within filters, nothing
// should be already pinned.
// For JIT32_GCENCODER, we should not do this check as gcVarPtrList are always sorted by vpdBegOfs
// which means that we could see some varPtrDsc that were already pinned by previous splitting.
assert((lowBits & pinned_OFFSET_FLAG) == 0);
#endif // JIT32_GCENCODER
if (begOffs < filterBeg)
{
if (endOffs > filterEnd)
{
// The variable lifetime is starts before AND ends after
// the filter, so we need to create 2 new lifetimes:
// (1) a pinned one for the filter
// (2) a regular one for after the filter
// and then adjust the original lifetime to end before
// the filter.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Splitting lifetime for filter: [%04X, %04X).\nOld: ", filterBeg, filterEnd);
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
varPtrDsc* desc1 = new (compiler, CMK_GC) varPtrDsc;
desc1->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
desc1->vpdBegOfs = filterBeg;
desc1->vpdEndOfs = filterEnd;
varPtrDsc* desc2 = new (compiler, CMK_GC) varPtrDsc;
desc2->vpdVarNum = varTmp->vpdVarNum;
desc2->vpdBegOfs = filterEnd;
desc2->vpdEndOfs = endOffs;
varTmp->vpdEndOfs = filterBeg;
gcInsertVarPtrDscSplit(desc1, varTmp);
gcInsertVarPtrDscSplit(desc2, varTmp);
#ifdef DEBUG
if (compiler->verbose)
{
printf("New (1 of 3): ");
gcDumpVarPtrDsc(varTmp);
printf("New (2 of 3): ");
gcDumpVarPtrDsc(desc1);
printf("New (3 of 3): ");
gcDumpVarPtrDsc(desc2);
}
#endif // DEBUG
}
else
{
// The variable lifetime started before the filter and ends
// somewhere inside it, so we only create 1 new lifetime,
// and then adjust the original lifetime to end before
// the filter.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Splitting lifetime for filter.\nOld: ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
varPtrDsc* desc = new (compiler, CMK_GC) varPtrDsc;
desc->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
desc->vpdBegOfs = filterBeg;
desc->vpdEndOfs = endOffs;
varTmp->vpdEndOfs = filterBeg;
gcInsertVarPtrDscSplit(desc, varTmp);
#ifdef DEBUG
if (compiler->verbose)
{
printf("New (1 of 2): ");
gcDumpVarPtrDsc(varTmp);
printf("New (2 of 2): ");
gcDumpVarPtrDsc(desc);
}
#endif // DEBUG
}
}
else
{
if (endOffs > filterEnd)
{
// The variable lifetime starts inside the filter and
// ends somewhere after it, so we create 1 new
// lifetime for the part inside the filter and adjust
// the start of the original lifetime to be the end
// of the filter
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Splitting lifetime for filter.\nOld: ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
varPtrDsc* desc = new (compiler, CMK_GC) varPtrDsc;
#ifndef JIT32_GCENCODER
desc->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
desc->vpdBegOfs = begOffs;
desc->vpdEndOfs = filterEnd;
varTmp->vpdBegOfs = filterEnd;
#else
// Mark varTmp as pinned and generated use varPtrDsc(desc) as non-pinned
// since gcInsertVarPtrDscSplit requires that varTmp->vpdBegOfs must precede desc->vpdBegOfs
desc->vpdVarNum = varTmp->vpdVarNum;
desc->vpdBegOfs = filterEnd;
desc->vpdEndOfs = endOffs;
varTmp->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
varTmp->vpdEndOfs = filterEnd;
#endif
gcInsertVarPtrDscSplit(desc, varTmp);
#ifdef DEBUG
if (compiler->verbose)
{
printf("New (1 of 2): ");
gcDumpVarPtrDsc(desc);
printf("New (2 of 2): ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
}
else
{
// The variable lifetime is completely within the filter,
// so just add the pinned flag.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Pinning lifetime for filter.\nOld: ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
varTmp->vpdVarNum |= pinned_OFFSET_FLAG;
#ifdef DEBUG
if (compiler->verbose)
{
printf("New : ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
}
}
}
} // HasFilter
} // Foreach EH
}
// gcInsertVarPtrDscSplit - Insert varPtrDsc that were created by splitting lifetimes
// From gcMarkFilterVarsPinned, we may have created one or two `varPtrDsc`s due to splitting lifetimes
// and these newly created `varPtrDsc`s should be inserted in gcVarPtrList.
// However the semantics of this call depend on the architecture.
//
// x86-GCInfo requires gcVarPtrList to be sorted by vpdBegOfs.
// Every time inserting an entry we should keep the order of entries.
// So this function searches for a proper insertion point from "begin" then "desc" gets inserted.
//
// For other architectures(ones that uses GCInfo{En|De}coder), we don't need any sort.
// So the argument "begin" is unused and "desc" will be inserted at the front of the list.
void GCInfo::gcInsertVarPtrDscSplit(varPtrDsc* desc, varPtrDsc* begin)
{
#ifndef JIT32_GCENCODER
(void)begin;
desc->vpdNext = gcVarPtrList;
gcVarPtrList = desc;
#else // JIT32_GCENCODER
// "desc" and "begin" must not be null
assert(desc != nullptr);
assert(begin != nullptr);
// The caller must guarantee that desc's BegOfs is equal or greater than begin's
// since we will search for insertion point from "begin"
assert(desc->vpdBegOfs >= begin->vpdBegOfs);
varPtrDsc* varTmp = begin->vpdNext;
varPtrDsc* varInsert = begin;
while (varTmp != nullptr && varTmp->vpdBegOfs < desc->vpdBegOfs)
{
varInsert = varTmp;
varTmp = varTmp->vpdNext;
}
// Insert point cannot be null
assert(varInsert != nullptr);
desc->vpdNext = varInsert->vpdNext;
varInsert->vpdNext = desc;
#endif // JIT32_GCENCODER
}
#ifdef DEBUG
void GCInfo::gcDumpVarPtrDsc(varPtrDsc* desc)
{
const int offs = (desc->vpdVarNum & ~OFFSET_MASK);
const GCtype gcType = (desc->vpdVarNum & byref_OFFSET_FLAG) ? GCT_BYREF : GCT_GCREF;
const bool isPin = (desc->vpdVarNum & pinned_OFFSET_FLAG) != 0;
printf("[%08X] %s%s var at [%s", dspPtr(desc), GCtypeStr(gcType), isPin ? "pinned-ptr" : "",
compiler->isFramePointerUsed() ? STR_FPBASE : STR_SPBASE);
if (offs < 0)
{
printf("-%02XH", -offs);
}
else if (offs > 0)
{
printf("+%02XH", +offs);
}
printf("] live from %04X to %04X\n", desc->vpdBegOfs, desc->vpdEndOfs);
}
#endif // DEBUG
#endif // !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS)
#ifdef JIT32_GCENCODER
#include "emit.h"
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
// (see jit.h) #define REGEN_SHORTCUTS 0
// To Regenerate the compressed info header shortcuts, define REGEN_SHORTCUTS
// and use the following command line pipe/filter to give you the 128
// most useful encodings.
//
// find . -name regen.txt | xargs cat | grep InfoHdr | sort | uniq -c | sort -r | head -128
// (see jit.h) #define REGEN_CALLPAT 0
// To Regenerate the compressed info header shortcuts, define REGEN_CALLPAT
// and use the following command line pipe/filter to give you the 80
// most useful encodings.
//
// find . -name regen.txt | xargs cat | grep CallSite | sort | uniq -c | sort -r | head -80
#if REGEN_SHORTCUTS || REGEN_CALLPAT
static FILE* logFile = NULL;
CRITICAL_SECTION logFileLock;
#endif
#if REGEN_CALLPAT
static void regenLog(unsigned codeDelta,
unsigned argMask,
unsigned regMask,
unsigned argCnt,
unsigned byrefArgMask,
unsigned byrefRegMask,
BYTE* base,
unsigned enSize)
{
CallPattern pat;
pat.fld.argCnt = (argCnt < 0xff) ? argCnt : 0xff;
pat.fld.regMask = (regMask < 0xff) ? regMask : 0xff;
pat.fld.argMask = (argMask < 0xff) ? argMask : 0xff;
pat.fld.codeDelta = (codeDelta < 0xff) ? codeDelta : 0xff;
if (logFile == NULL)
{
logFile = fopen("regen.txt", "a");
InitializeCriticalSection(&logFileLock);
}
assert(((enSize > 0) && (enSize < 256)) && ((pat.val & 0xffffff) != 0xffffff));
EnterCriticalSection(&logFileLock);
fprintf(logFile, "CallSite( 0x%08x, 0x%02x%02x, 0x", pat.val, byrefArgMask, byrefRegMask);
while (enSize > 0)
{
fprintf(logFile, "%02x", *base++);
enSize--;
}
fprintf(logFile, "),\n");
fflush(logFile);
LeaveCriticalSection(&logFileLock);
}
#endif
#if REGEN_SHORTCUTS
static void regenLog(unsigned encoding, InfoHdr* header, InfoHdr* state)
{
if (logFile == NULL)
{
logFile = fopen("regen.txt", "a");
InitializeCriticalSection(&logFileLock);
}
EnterCriticalSection(&logFileLock);
fprintf(logFile, "InfoHdr( %2d, %2d, %1d, %1d, %1d,"
" %1d, %1d, %1d, %1d, %1d,"
" %1d, %1d, %1d, %1d, %1d, %1d,"
" %1d, %1d, %1d,"
" %1d, %2d, %2d,"
" %2d, %2d, %2d, %2d, %2d, %2d), \n",
state->prologSize, state->epilogSize, state->epilogCount, state->epilogAtEnd, state->ediSaved,
state->esiSaved, state->ebxSaved, state->ebpSaved, state->ebpFrame, state->interruptible,
state->doubleAlign, state->security, state->handlers, state->localloc, state->editNcontinue, state->varargs,
state->profCallbacks, state->genericsContext, state->genericsContextIsMethodDesc, state->returnKind,
state->argCount, state->frameSize,
(state->untrackedCnt <= SET_UNTRACKED_MAX) ? state->untrackedCnt : HAS_UNTRACKED,
(state->varPtrTableSize == 0) ? 0 : HAS_VARPTR,
(state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET) ? 0 : HAS_GS_COOKIE_OFFSET,
(state->syncStartOffset == INVALID_SYNC_OFFSET) ? 0 : HAS_SYNC_OFFSET,
(state->syncStartOffset == INVALID_SYNC_OFFSET) ? 0 : HAS_SYNC_OFFSET,
(state->revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET) ? 0 : HAS_REV_PINVOKE_FRAME_OFFSET);
fflush(logFile);
LeaveCriticalSection(&logFileLock);
}
#endif
/*****************************************************************************
*
* Given the four parameters return the index into the callPatternTable[]
* that is used to encoding these four items. If an exact match cannot
* found then ignore the codeDelta and search the table again for a near
* match.
* Returns 0..79 for an exact match or
* (delta<<8) | (0..79) for a near match.
* A near match will be encoded using two bytes, the first byte will
* skip the adjustment delta that prevented an exact match and the
* rest of the delta plus the other three items are encoded in the
* second byte.
*/
int FASTCALL lookupCallPattern(unsigned argCnt, unsigned regMask, unsigned argMask, unsigned codeDelta)
{
if ((argCnt <= CP_MAX_ARG_CNT) && (argMask <= CP_MAX_ARG_MASK))
{
CallPattern pat;
pat.fld.argCnt = argCnt;
pat.fld.regMask = regMask; // EBP,EBX,ESI,EDI
pat.fld.argMask = argMask;
pat.fld.codeDelta = codeDelta;
bool codeDeltaOK = (pat.fld.codeDelta == codeDelta);
unsigned bestDelta2 = 0xff;
unsigned bestPattern = 0xff;
unsigned patval = pat.val;
assert(sizeof(CallPattern) == sizeof(unsigned));
const unsigned* curp = &callPatternTable[0];
for (unsigned inx = 0; inx < 80; inx++, curp++)
{
unsigned curval = *curp;
if ((patval == curval) && codeDeltaOK)
return inx;
if (((patval ^ curval) & 0xffffff) == 0)
{
unsigned delta2 = codeDelta - (curval >> 24);
if (delta2 < bestDelta2)
{
bestDelta2 = delta2;
bestPattern = inx;
}
}
}
if (bestPattern != 0xff)
{
return (bestDelta2 << 8) | bestPattern;
}
}
return -1;
}
static bool initNeeded3(unsigned cur, unsigned tgt, unsigned max, unsigned* hint)
{
assert(cur != tgt);
unsigned tmp = tgt;
unsigned nib = 0;
unsigned cnt = 0;
while (tmp > max)
{
nib = tmp & 0x07;
tmp >>= 3;
if (tmp == cur)
{
*hint = nib;
return false;
}
cnt++;
}
*hint = tmp;
return true;
}
static bool initNeeded4(unsigned cur, unsigned tgt, unsigned max, unsigned* hint)
{
assert(cur != tgt);
unsigned tmp = tgt;
unsigned nib = 0;
unsigned cnt = 0;
while (tmp > max)
{
nib = tmp & 0x0f;
tmp >>= 4;
if (tmp == cur)
{
*hint = nib;
return false;
}
cnt++;
}
*hint = tmp;
return true;
}
static int bigEncoding3(unsigned cur, unsigned tgt, unsigned max)
{
assert(cur != tgt);
unsigned tmp = tgt;
unsigned nib = 0;
unsigned cnt = 0;
while (tmp > max)
{
nib = tmp & 0x07;
tmp >>= 3;
if (tmp == cur)
break;
cnt++;
}
return cnt;
}
static int bigEncoding4(unsigned cur, unsigned tgt, unsigned max)
{
assert(cur != tgt);
unsigned tmp = tgt;
unsigned nib = 0;
unsigned cnt = 0;
while (tmp > max)
{
nib = tmp & 0x0f;
tmp >>= 4;
if (tmp == cur)
break;
cnt++;
}
return cnt;
}
BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state, BYTE& codeSet)
{
BYTE encoding = 0xff;
codeSet = 1; // codeSet is 1 or 2, depending on whether the returned encoding
// corresponds to InfoHdrAdjust, or InfoHdrAdjust2 enumerations.
if (state->argCount != header.argCount)
{
// We have one-byte encodings for 0..8
if (header.argCount <= SET_ARGCOUNT_MAX)
{
state->argCount = header.argCount;
encoding = SET_ARGCOUNT + header.argCount;
goto DO_RETURN;
}
else
{
unsigned hint;
if (initNeeded4(state->argCount, header.argCount, SET_ARGCOUNT_MAX, &hint))
{
assert(hint <= SET_ARGCOUNT_MAX);
state->argCount = hint;
encoding = SET_ARGCOUNT + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0xf);
state->argCount <<= 4;
state->argCount += hint;
encoding = NEXT_FOUR_ARGCOUNT + hint;
goto DO_RETURN;
}
}
}
if (state->frameSize != header.frameSize)
{
// We have one-byte encodings for 0..7
if (header.frameSize <= SET_FRAMESIZE_MAX)
{
state->frameSize = header.frameSize;
encoding = SET_FRAMESIZE + header.frameSize;
goto DO_RETURN;
}
else
{
unsigned hint;
if (initNeeded4(state->frameSize, header.frameSize, SET_FRAMESIZE_MAX, &hint))
{
assert(hint <= SET_FRAMESIZE_MAX);
state->frameSize = hint;
encoding = SET_FRAMESIZE + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0xf);
state->frameSize <<= 4;
state->frameSize += hint;
encoding = NEXT_FOUR_FRAMESIZE + hint;
goto DO_RETURN;
}
}
}
if ((state->epilogCount != header.epilogCount) || (state->epilogAtEnd != header.epilogAtEnd))
{
if (header.epilogCount > SET_EPILOGCNT_MAX)
IMPL_LIMITATION("More than SET_EPILOGCNT_MAX epilogs");
state->epilogCount = header.epilogCount;
state->epilogAtEnd = header.epilogAtEnd;
encoding = SET_EPILOGCNT + header.epilogCount * 2;
if (header.epilogAtEnd)
encoding++;
goto DO_RETURN;
}
if (state->varPtrTableSize != header.varPtrTableSize)
{
assert(state->varPtrTableSize == 0 || state->varPtrTableSize == HAS_VARPTR);
if (state->varPtrTableSize == 0)
{
state->varPtrTableSize = HAS_VARPTR;
encoding = FLIP_VAR_PTR_TABLE_SZ;
goto DO_RETURN;
}
else if (header.varPtrTableSize == 0)
{
state->varPtrTableSize = 0;
encoding = FLIP_VAR_PTR_TABLE_SZ;
goto DO_RETURN;
}
}
if (state->untrackedCnt != header.untrackedCnt)
{
assert(state->untrackedCnt <= SET_UNTRACKED_MAX || state->untrackedCnt == HAS_UNTRACKED);
// We have one-byte encodings for 0..3
if (header.untrackedCnt <= SET_UNTRACKED_MAX)
{
state->untrackedCnt = header.untrackedCnt;
encoding = SET_UNTRACKED + header.untrackedCnt;
goto DO_RETURN;
}
else if (state->untrackedCnt != HAS_UNTRACKED)
{
state->untrackedCnt = HAS_UNTRACKED;
encoding = FFFF_UNTRACKED_CNT;
goto DO_RETURN;
}
}
if (state->epilogSize != header.epilogSize)
{
// We have one-byte encodings for 0..10
if (header.epilogSize <= SET_EPILOGSIZE_MAX)
{
state->epilogSize = header.epilogSize;
encoding = SET_EPILOGSIZE + header.epilogSize;
goto DO_RETURN;
}
else
{
unsigned hint;
if (initNeeded3(state->epilogSize, header.epilogSize, SET_EPILOGSIZE_MAX, &hint))
{
assert(hint <= SET_EPILOGSIZE_MAX);
state->epilogSize = hint;
encoding = SET_EPILOGSIZE + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0x7);
state->epilogSize <<= 3;
state->epilogSize += hint;
encoding = NEXT_THREE_EPILOGSIZE + hint;
goto DO_RETURN;
}
}
}
if (state->prologSize != header.prologSize)
{
// We have one-byte encodings for 0..16
if (header.prologSize <= SET_PROLOGSIZE_MAX)
{
state->prologSize = header.prologSize;
encoding = SET_PROLOGSIZE + header.prologSize;
goto DO_RETURN;
}
else
{
unsigned hint;
assert(SET_PROLOGSIZE_MAX > 15);
if (initNeeded3(state->prologSize, header.prologSize, 15, &hint))
{
assert(hint <= 15);
state->prologSize = hint;
encoding = SET_PROLOGSIZE + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0x7);
state->prologSize <<= 3;
state->prologSize += hint;
encoding = NEXT_THREE_PROLOGSIZE + hint;
goto DO_RETURN;
}
}
}
if (state->ediSaved != header.ediSaved)
{
state->ediSaved = header.ediSaved;
encoding = FLIP_EDI_SAVED;
goto DO_RETURN;
}
if (state->esiSaved != header.esiSaved)
{
state->esiSaved = header.esiSaved;
encoding = FLIP_ESI_SAVED;
goto DO_RETURN;
}
if (state->ebxSaved != header.ebxSaved)
{
state->ebxSaved = header.ebxSaved;
encoding = FLIP_EBX_SAVED;
goto DO_RETURN;
}
if (state->ebpSaved != header.ebpSaved)
{
state->ebpSaved = header.ebpSaved;
encoding = FLIP_EBP_SAVED;
goto DO_RETURN;
}
if (state->ebpFrame != header.ebpFrame)
{
state->ebpFrame = header.ebpFrame;
encoding = FLIP_EBP_FRAME;
goto DO_RETURN;
}
if (state->interruptible != header.interruptible)
{
state->interruptible = header.interruptible;
encoding = FLIP_INTERRUPTIBLE;
goto DO_RETURN;
}
#if DOUBLE_ALIGN
if (state->doubleAlign != header.doubleAlign)
{
state->doubleAlign = header.doubleAlign;
encoding = FLIP_DOUBLE_ALIGN;
goto DO_RETURN;
}
#endif
if (state->security != header.security)
{
state->security = header.security;
encoding = FLIP_SECURITY;
goto DO_RETURN;
}
if (state->handlers != header.handlers)
{
state->handlers = header.handlers;
encoding = FLIP_HANDLERS;
goto DO_RETURN;
}
if (state->localloc != header.localloc)
{
state->localloc = header.localloc;
encoding = FLIP_LOCALLOC;
goto DO_RETURN;
}
if (state->editNcontinue != header.editNcontinue)
{
state->editNcontinue = header.editNcontinue;
encoding = FLIP_EDITnCONTINUE;
goto DO_RETURN;
}
if (state->varargs != header.varargs)
{
state->varargs = header.varargs;
encoding = FLIP_VARARGS;
goto DO_RETURN;
}
if (state->profCallbacks != header.profCallbacks)
{
state->profCallbacks = header.profCallbacks;
encoding = FLIP_PROF_CALLBACKS;
goto DO_RETURN;
}
if (state->genericsContext != header.genericsContext)
{
state->genericsContext = header.genericsContext;
encoding = FLIP_HAS_GENERICS_CONTEXT;
goto DO_RETURN;
}
if (state->genericsContextIsMethodDesc != header.genericsContextIsMethodDesc)
{
state->genericsContextIsMethodDesc = header.genericsContextIsMethodDesc;
encoding = FLIP_GENERICS_CONTEXT_IS_METHODDESC;
goto DO_RETURN;
}
if (state->returnKind != header.returnKind)
{
state->returnKind = header.returnKind;
codeSet = 2; // Two byte encoding
encoding = header.returnKind;
_ASSERTE(encoding < SET_RET_KIND_MAX);
goto DO_RETURN;
}
if (state->gsCookieOffset != header.gsCookieOffset)
{
assert(state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET || state->gsCookieOffset == HAS_GS_COOKIE_OFFSET);
if (state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET)
{
// header.gsCookieOffset is non-zero. We can set it
// to zero using FLIP_HAS_GS_COOKIE
state->gsCookieOffset = HAS_GS_COOKIE_OFFSET;
encoding = FLIP_HAS_GS_COOKIE;
goto DO_RETURN;
}
else if (header.gsCookieOffset == INVALID_GS_COOKIE_OFFSET)
{
state->gsCookieOffset = INVALID_GS_COOKIE_OFFSET;
encoding = FLIP_HAS_GS_COOKIE;
goto DO_RETURN;
}
}
if (state->syncStartOffset != header.syncStartOffset)
{
assert(state->syncStartOffset == INVALID_SYNC_OFFSET || state->syncStartOffset == HAS_SYNC_OFFSET);
if (state->syncStartOffset == INVALID_SYNC_OFFSET)
{
// header.syncStartOffset is non-zero. We can set it
// to zero using FLIP_SYNC
state->syncStartOffset = HAS_SYNC_OFFSET;
encoding = FLIP_SYNC;
goto DO_RETURN;
}
else if (header.syncStartOffset == INVALID_SYNC_OFFSET)
{
state->syncStartOffset = INVALID_SYNC_OFFSET;
encoding = FLIP_SYNC;
goto DO_RETURN;
}
}
if (state->revPInvokeOffset != header.revPInvokeOffset)
{
assert(state->revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET ||
state->revPInvokeOffset == HAS_REV_PINVOKE_FRAME_OFFSET);
if (state->revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET)
{
// header.revPInvokeOffset is non-zero.
state->revPInvokeOffset = HAS_REV_PINVOKE_FRAME_OFFSET;
encoding = FLIP_REV_PINVOKE_FRAME;
goto DO_RETURN;
}
else if (header.revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET)
{
state->revPInvokeOffset = INVALID_REV_PINVOKE_OFFSET;
encoding = FLIP_REV_PINVOKE_FRAME;
goto DO_RETURN;
}
}
DO_RETURN:
_ASSERTE(encoding < MORE_BYTES_TO_FOLLOW);
if (!state->isHeaderMatch(header))
encoding |= MORE_BYTES_TO_FOLLOW;
return encoding;
}
static int measureDistance(const InfoHdr& header, const InfoHdrSmall* p, int closeness)
{
int distance = 0;
if (p->untrackedCnt != header.untrackedCnt)
{
if (header.untrackedCnt > 3)
{
if (p->untrackedCnt != HAS_UNTRACKED)
distance += 1;
}
else
{
distance += 1;
}
if (distance >= closeness)
return distance;
}
if (p->varPtrTableSize != header.varPtrTableSize)
{
if (header.varPtrTableSize != 0)
{
if (p->varPtrTableSize != HAS_VARPTR)
distance += 1;
}
else
{
assert(p->varPtrTableSize == HAS_VARPTR);
distance += 1;
}
if (distance >= closeness)
return distance;
}
if (p->frameSize != header.frameSize)
{
distance += 1;
if (distance >= closeness)
return distance;
// We have one-byte encodings for 0..7
if (header.frameSize > SET_FRAMESIZE_MAX)
{
distance += bigEncoding4(p->frameSize, header.frameSize, SET_FRAMESIZE_MAX);
if (distance >= closeness)
return distance;
}
}
if (p->argCount != header.argCount)
{
distance += 1;
if (distance >= closeness)
return distance;
// We have one-byte encodings for 0..8
if (header.argCount > SET_ARGCOUNT_MAX)
{
distance += bigEncoding4(p->argCount, header.argCount, SET_ARGCOUNT_MAX);
if (distance >= closeness)
return distance;
}
}
if (p->prologSize != header.prologSize)
{
distance += 1;
if (distance >= closeness)
return distance;
// We have one-byte encodings for 0..16
if (header.prologSize > SET_PROLOGSIZE_MAX)
{
assert(SET_PROLOGSIZE_MAX > 15);
distance += bigEncoding3(p->prologSize, header.prologSize, 15);
if (distance >= closeness)
return distance;
}
}
if (p->epilogSize != header.epilogSize)
{
distance += 1;
if (distance >= closeness)
return distance;
// We have one-byte encodings for 0..10
if (header.epilogSize > SET_EPILOGSIZE_MAX)
{
distance += bigEncoding3(p->epilogSize, header.epilogSize, SET_EPILOGSIZE_MAX);
if (distance >= closeness)
return distance;
}
}
if ((p->epilogCount != header.epilogCount) || (p->epilogAtEnd != header.epilogAtEnd))
{
distance += 1;
if (distance >= closeness)
return distance;
if (header.epilogCount > SET_EPILOGCNT_MAX)
IMPL_LIMITATION("More than SET_EPILOGCNT_MAX epilogs");
}
if (p->ediSaved != header.ediSaved)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->esiSaved != header.esiSaved)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->ebxSaved != header.ebxSaved)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->ebpSaved != header.ebpSaved)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->ebpFrame != header.ebpFrame)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->interruptible != header.interruptible)
{
distance += 1;
if (distance >= closeness)
return distance;
}
#if DOUBLE_ALIGN
if (p->doubleAlign != header.doubleAlign)
{
distance += 1;
if (distance >= closeness)
return distance;
}
#endif
if (p->security != header.security)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->handlers != header.handlers)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->localloc != header.localloc)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->editNcontinue != header.editNcontinue)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->varargs != header.varargs)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->profCallbacks != header.profCallbacks)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->genericsContext != header.genericsContext)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->genericsContextIsMethodDesc != header.genericsContextIsMethodDesc)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->returnKind != header.returnKind)
{
// Setting the ReturnKind requires two bytes of encoding.
distance += 2;
if (distance >= closeness)
return distance;
}
if (header.gsCookieOffset != INVALID_GS_COOKIE_OFFSET)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (header.syncStartOffset != INVALID_SYNC_OFFSET)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (header.revPInvokeOffset != INVALID_REV_PINVOKE_OFFSET)
{
distance += 1;
if (distance >= closeness)
return distance;
}
return distance;
}
// DllMain calls gcInitEncoderLookupTable to fill in this table
/* extern */ int infoHdrLookup[IH_MAX_PROLOG_SIZE + 2];
/* static */ void GCInfo::gcInitEncoderLookupTable()
{
const InfoHdrSmall* p = &infoHdrShortcut[0];
int lo = -1;
int hi = 0;
int n;
for (n = 0; n < 128; n++, p++)
{
if (p->prologSize != lo)
{
if (p->prologSize < lo)
{
assert(p->prologSize == 0);
hi = IH_MAX_PROLOG_SIZE;
}
else
hi = p->prologSize;
assert(hi <= IH_MAX_PROLOG_SIZE);
while (lo < hi)
infoHdrLookup[++lo] = n;
if (lo == IH_MAX_PROLOG_SIZE)
break;
}
}
assert(lo == IH_MAX_PROLOG_SIZE);
assert(infoHdrLookup[IH_MAX_PROLOG_SIZE] < 128);
while (p->prologSize == lo)
{
n++;
if (n >= 128)
break;
p++;
}
infoHdrLookup[++lo] = n;
#ifdef DEBUG
//
// We do some other DEBUG only validity checks here
//
assert(callCommonDelta[0] < callCommonDelta[1]);
assert(callCommonDelta[1] < callCommonDelta[2]);
assert(callCommonDelta[2] < callCommonDelta[3]);
assert(sizeof(CallPattern) == sizeof(unsigned));
unsigned maxMarks = 0;
for (unsigned inx = 0; inx < 80; inx++)
{
CallPattern pat;
pat.val = callPatternTable[inx];
assert(pat.fld.codeDelta <= CP_MAX_CODE_DELTA);
if (pat.fld.codeDelta == CP_MAX_CODE_DELTA)
maxMarks |= 0x01;
assert(pat.fld.argCnt <= CP_MAX_ARG_CNT);
if (pat.fld.argCnt == CP_MAX_ARG_CNT)
maxMarks |= 0x02;
assert(pat.fld.argMask <= CP_MAX_ARG_MASK);
if (pat.fld.argMask == CP_MAX_ARG_MASK)
maxMarks |= 0x04;
}
assert(maxMarks == 0x07);
#endif
}
const int NO_CACHED_HEADER = -1;
BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more, int* pCached)
{
// First try the cached value for an exact match, if there is one
//
int n = *pCached;
const InfoHdrSmall* p;
if (n != NO_CACHED_HEADER)
{
p = &infoHdrShortcut[n];
if (p->isHeaderMatch(header))
{
// exact match found
GetInfoHdr(n, state);
*more = 0;
return n;
}
}
// Next search the table for an exact match
// Only search entries that have a matching prolog size
// Note: lo and hi are saved here as they specify the
// range of entries that have the correct prolog size
//
unsigned psz = header.prologSize;
int lo = 0;
int hi = 0;
if (psz <= IH_MAX_PROLOG_SIZE)
{
lo = infoHdrLookup[psz];
hi = infoHdrLookup[psz + 1];
p = &infoHdrShortcut[lo];
for (n = lo; n < hi; n++, p++)
{
assert(psz == p->prologSize);
if (p->isHeaderMatch(header))
{
// exact match found
GetInfoHdr(n, state);
*pCached = n; // cache the value
*more = 0;
return n;
}
}
}
//
// no exact match in infoHdrShortcut[]
//
// find the nearest entry in the table
//
int nearest = -1;
int closeness = 255; // (i.e. not very close)
//
// Calculate the minimum acceptable distance
// if we find an entry that is at least this close
// we will stop the search and use that value
//
int min_acceptable_distance = 1;
if (header.frameSize > SET_FRAMESIZE_MAX)
{
++min_acceptable_distance;
if (header.frameSize > 32)
++min_acceptable_distance;
}
if (header.argCount > SET_ARGCOUNT_MAX)
{
++min_acceptable_distance;
if (header.argCount > 32)
++min_acceptable_distance;
}
// First try the cached value
// and see if it meets the minimum acceptable distance
//
if (*pCached != NO_CACHED_HEADER)
{
p = &infoHdrShortcut[*pCached];
int distance = measureDistance(header, p, closeness);
assert(distance > 0);
if (distance <= min_acceptable_distance)
{
GetInfoHdr(*pCached, state);
*more = distance;
return 0x80 | *pCached;
}
else
{
closeness = distance;
nearest = *pCached;
}
}
// Then try the ones pointed to by [lo..hi),
// (i.e. the ones that have the correct prolog size)
//
p = &infoHdrShortcut[lo];
for (n = lo; n < hi; n++, p++)
{
if (n == *pCached)
continue; // already tried this one
int distance = measureDistance(header, p, closeness);
assert(distance > 0);
if (distance <= min_acceptable_distance)
{
GetInfoHdr(n, state);
*pCached = n; // Cache this value
*more = distance;
return 0x80 | n;
}
else if (distance < closeness)
{
closeness = distance;
nearest = n;
}
}
int last = infoHdrLookup[IH_MAX_PROLOG_SIZE + 1];
assert(last <= 128);
// Then try all the rest [0..last-1]
p = &infoHdrShortcut[0];
for (n = 0; n < last; n++, p++)
{
if (n == *pCached)
continue; // already tried this one
if ((n >= lo) && (n < hi))
continue; // already tried these
int distance = measureDistance(header, p, closeness);
assert(distance > 0);
if (distance <= min_acceptable_distance)
{
GetInfoHdr(n, state);
*pCached = n; // Cache this value
*more = distance;
return 0x80 | n;
}
else if (distance < closeness)
{
closeness = distance;
nearest = n;
}
}
//
// If we reach here then there was no adjacent neighbor
// in infoHdrShortcut[], closeness indicate how many extra
// bytes we will need to encode this item.
//
assert((nearest >= 0) && (nearest <= 127));
GetInfoHdr(nearest, state);
*pCached = nearest; // Cache this value
*more = closeness;
return 0x80 | nearest;
}
/*****************************************************************************
*
* Write the initial part of the method info block. This is called twice;
* first to compute the size needed for the info (mask=0), the second time
* to actually generate the contents of the table (mask=-1,dest!=NULL).
*/
size_t GCInfo::gcInfoBlockHdrSave(
BYTE* dest, int mask, unsigned methodSize, unsigned prologSize, unsigned epilogSize, InfoHdr* header, int* pCached)
{
#ifdef DEBUG
if (compiler->verbose)
printf("*************** In gcInfoBlockHdrSave()\n");
#endif
size_t size = 0;
#if VERIFY_GC_TABLES
*castto(dest, unsigned short*)++ = 0xFEEF;
size += sizeof(short);
#endif
/* Write the method size first (using between 1 and 5 bytes) */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
if (mask)
printf("GCINFO: methodSize = %04X\n", methodSize);
if (mask)
printf("GCINFO: prologSize = %04X\n", prologSize);
if (mask)
printf("GCINFO: epilogSize = %04X\n", epilogSize);
}
#endif
size_t methSz = encodeUnsigned(dest, methodSize);
size += methSz;
dest += methSz & mask;
//
// New style InfoBlk Header
//
// Typically only uses one-byte to store everything.
//
if (mask == 0)
{
memset(header, 0, sizeof(InfoHdr));
*pCached = NO_CACHED_HEADER;
}
assert(FitsIn<unsigned char>(prologSize));
header->prologSize = static_cast<unsigned char>(prologSize);
assert(FitsIn<unsigned char>(epilogSize));
header->epilogSize = static_cast<unsigned char>(epilogSize);
header->epilogCount = compiler->GetEmitter()->emitGetEpilogCnt();
if (header->epilogCount != compiler->GetEmitter()->emitGetEpilogCnt())
IMPL_LIMITATION("emitGetEpilogCnt() does not fit in InfoHdr::epilogCount");
header->epilogAtEnd = compiler->GetEmitter()->emitHasEpilogEnd();
if (compiler->codeGen->regSet.rsRegsModified(RBM_EDI))
header->ediSaved = 1;
if (compiler->codeGen->regSet.rsRegsModified(RBM_ESI))
header->esiSaved = 1;
if (compiler->codeGen->regSet.rsRegsModified(RBM_EBX))
header->ebxSaved = 1;
header->interruptible = compiler->codeGen->GetInterruptible();
if (!compiler->isFramePointerUsed())
{
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
{
header->ebpSaved = true;
assert(!compiler->codeGen->regSet.rsRegsModified(RBM_EBP));
}
#endif
if (compiler->codeGen->regSet.rsRegsModified(RBM_EBP))
{
header->ebpSaved = true;
}
}
else
{
header->ebpSaved = true;
header->ebpFrame = true;
}
#if DOUBLE_ALIGN
header->doubleAlign = compiler->genDoubleAlign();
#endif
header->security = false;
header->handlers = compiler->ehHasCallableHandlers();
header->localloc = compiler->compLocallocUsed;
header->varargs = compiler->info.compIsVarArgs;
header->profCallbacks = compiler->info.compProfilerCallback;
header->editNcontinue = compiler->opts.compDbgEnC;
header->genericsContext = compiler->lvaReportParamTypeArg();
header->genericsContextIsMethodDesc =
header->genericsContext && (compiler->info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC));
ReturnKind returnKind = getReturnKind();
_ASSERTE(IsValidReturnKind(returnKind) && "Return Kind must be valid");
_ASSERTE(!IsStructReturnKind(returnKind) && "Struct Return Kinds Unexpected for JIT32");
_ASSERTE(((int)returnKind < (int)SET_RET_KIND_MAX) && "ReturnKind has no legal encoding");
header->returnKind = returnKind;
header->gsCookieOffset = INVALID_GS_COOKIE_OFFSET;
if (compiler->getNeedsGSSecurityCookie())
{
assert(compiler->lvaGSSecurityCookie != BAD_VAR_NUM);
int stkOffs = compiler->lvaTable[compiler->lvaGSSecurityCookie].GetStackOffset();
header->gsCookieOffset = compiler->isFramePointerUsed() ? -stkOffs : stkOffs;
assert(header->gsCookieOffset != INVALID_GS_COOKIE_OFFSET);
}
header->syncStartOffset = INVALID_SYNC_OFFSET;
header->syncEndOffset = INVALID_SYNC_OFFSET;
#ifndef UNIX_X86_ABI
// JIT is responsible for synchronization on funclet-based EH model that x86/Linux uses.
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
assert(compiler->syncStartEmitCookie != NULL);
header->syncStartOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncStartEmitCookie, 0);
assert(header->syncStartOffset != INVALID_SYNC_OFFSET);
assert(compiler->syncEndEmitCookie != NULL);
header->syncEndOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncEndEmitCookie, 0);
assert(header->syncEndOffset != INVALID_SYNC_OFFSET);
assert(header->syncStartOffset < header->syncEndOffset);
// synchronized methods can't have more than 1 epilog
assert(header->epilogCount <= 1);
}
#endif
header->revPInvokeOffset = INVALID_REV_PINVOKE_OFFSET;
if (compiler->opts.IsReversePInvoke())
{
assert(compiler->lvaReversePInvokeFrameVar != BAD_VAR_NUM);
int stkOffs = compiler->lvaTable[compiler->lvaReversePInvokeFrameVar].GetStackOffset();
header->revPInvokeOffset = compiler->isFramePointerUsed() ? -stkOffs : stkOffs;
assert(header->revPInvokeOffset != INVALID_REV_PINVOKE_OFFSET);
}
assert((compiler->compArgSize & 0x3) == 0);
size_t argCount =
(compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
assert(argCount <= MAX_USHORT_SIZE_T);
header->argCount = static_cast<unsigned short>(argCount);
header->frameSize = compiler->compLclFrameSize / sizeof(int);
if (header->frameSize != (compiler->compLclFrameSize / sizeof(int)))
IMPL_LIMITATION("compLclFrameSize does not fit in InfoHdr::frameSize");
if (mask == 0)
{
gcCountForHeader((UNALIGNED unsigned int*)&header->untrackedCnt,
(UNALIGNED unsigned int*)&header->varPtrTableSize);
}
//
// If the high-order bit of headerEncoding is set
// then additional bytes will update the InfoHdr state
// until the fully state is encoded
//
InfoHdr state;
int more = 0;
BYTE headerEncoding = encodeHeaderFirst(*header, &state, &more, pCached);
++size;
if (mask)
{
#if REGEN_SHORTCUTS
regenLog(headerEncoding, header, &state);
#endif
*dest++ = headerEncoding;
BYTE encoding = headerEncoding;
BYTE codeSet = 1;
while (encoding & MORE_BYTES_TO_FOLLOW)
{
encoding = encodeHeaderNext(*header, &state, codeSet);
#if REGEN_SHORTCUTS
regenLog(headerEncoding, header, &state);
#endif
_ASSERTE((codeSet == 1 || codeSet == 2) && "Encoding must correspond to InfoHdrAdjust or InfoHdrAdjust2");
if (codeSet == 2)
{
*dest++ = NEXT_OPCODE | MORE_BYTES_TO_FOLLOW;
++size;
}
*dest++ = encoding;
++size;
}
}
else
{
size += more;
}
if (header->untrackedCnt > SET_UNTRACKED_MAX)
{
unsigned count = header->untrackedCnt;
unsigned sz = encodeUnsigned(mask ? dest : NULL, count);
size += sz;
dest += (sz & mask);
}
if (header->varPtrTableSize != 0)
{
unsigned count = header->varPtrTableSize;
unsigned sz = encodeUnsigned(mask ? dest : NULL, count);
size += sz;
dest += (sz & mask);
}
if (header->gsCookieOffset != INVALID_GS_COOKIE_OFFSET)
{
assert(mask == 0 || state.gsCookieOffset == HAS_GS_COOKIE_OFFSET);
unsigned offset = header->gsCookieOffset;
unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
if (header->syncStartOffset != INVALID_SYNC_OFFSET)
{
assert(mask == 0 || state.syncStartOffset == HAS_SYNC_OFFSET);
{
unsigned offset = header->syncStartOffset;
unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
{
unsigned offset = header->syncEndOffset;
unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
}
if (header->revPInvokeOffset != INVALID_REV_PINVOKE_OFFSET)
{
assert(mask == 0 || state.revPInvokeOffset == HAS_REV_PINVOKE_FRAME_OFFSET);
unsigned offset = header->revPInvokeOffset;
unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
if (header->epilogCount)
{
/* Generate table unless one epilog at the end of the method */
if (header->epilogAtEnd == 0 || header->epilogCount != 1)
{
#if VERIFY_GC_TABLES
*castto(dest, unsigned short*)++ = 0xFACE;
size += sizeof(short);
#endif
/* Simply write a sorted array of offsets using encodeUDelta */
gcEpilogTable = mask ? dest : NULL;
gcEpilogPrevOffset = 0;
size_t sz = compiler->GetEmitter()->emitGenEpilogLst(gcRecordEpilog, this);
/* Add the size of the epilog table to the total size */
size += sz;
dest += (sz & mask);
}
}
#if DISPLAY_SIZES
if (mask)
{
if (compiler->codeGen->GetInterruptible())
{
genMethodICnt++;
}
else
{
genMethodNCnt++;
}
}
#endif // DISPLAY_SIZES
return size;
}
/*****************************************************************************
*
* Return the size of the pointer tracking tables.
*/
size_t GCInfo::gcPtrTableSize(const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset)
{
BYTE temp[16 + 1];
#ifdef DEBUG
temp[16] = 0xAB; // Set some marker
#endif
/* Compute the total size of the tables */
size_t size = gcMakeRegPtrTable(temp, 0, header, codeSize, pArgTabOffset);
assert(temp[16] == 0xAB); // Check that marker didnt get overwritten
return size;
}
/*****************************************************************************
* Encode the callee-saved registers into 3 bits.
*/
unsigned gceEncodeCalleeSavedRegs(unsigned regs)
{
unsigned encodedRegs = 0;
if (regs & RBM_EBX)
encodedRegs |= 0x04;
if (regs & RBM_ESI)
encodedRegs |= 0x02;
if (regs & RBM_EDI)
encodedRegs |= 0x01;
return encodedRegs;
}
/*****************************************************************************
* Is the next entry for a byref pointer. If so, emit the prefix for the
* interruptible encoding. Check only for pushes and registers
*/
inline BYTE* gceByrefPrefixI(GCInfo::regPtrDsc* rpd, BYTE* dest)
{
// For registers, we don't need a prefix if it is going dead.
assert(rpd->rpdArg || rpd->rpdCompiler.rpdDel == 0);
if (!rpd->rpdArg || rpd->rpdArgType == GCInfo::rpdARG_PUSH)
if (rpd->rpdGCtypeGet() == GCT_BYREF)
*dest++ = 0xBF;
return dest;
}
/*****************************************************************************/
/* These functions are needed to work around a VC5.0 compiler bug */
/* DO NOT REMOVE, unless you are sure that the free build works */
static int zeroFN()
{
return 0;
}
static int (*zeroFunc)() = zeroFN;
/*****************************************************************************
* Modelling of the GC ptrs pushed on the stack
*/
typedef unsigned pasMaskType;
#define BITS_IN_pasMask (BITS_IN_BYTE * sizeof(pasMaskType))
#define HIGHEST_pasMask_BIT (((pasMaskType)0x1) << (BITS_IN_pasMask - 1))
//-----------------------------------------------------------------------------
class PendingArgsStack
{
public:
PendingArgsStack(unsigned maxDepth, Compiler* pComp);
void pasPush(GCtype gcType);
void pasPop(unsigned count);
void pasKill(unsigned gcCount);
unsigned pasCurDepth()
{
return pasDepth;
}
pasMaskType pasArgMask()
{
assert(pasDepth <= BITS_IN_pasMask);
return pasBottomMask;
}
pasMaskType pasByrefArgMask()
{
assert(pasDepth <= BITS_IN_pasMask);
return pasByrefBottomMask;
}
bool pasHasGCptrs();
// Use these in the case where there actually are more ptrs than pasArgMask
unsigned pasEnumGCoffsCount();
#define pasENUM_START ((unsigned)-1)
#define pasENUM_LAST ((unsigned)-2)
#define pasENUM_END ((unsigned)-3)
unsigned pasEnumGCoffs(unsigned iter, unsigned* offs);
protected:
unsigned pasMaxDepth;
unsigned pasDepth;
pasMaskType pasBottomMask; // The first 32 args
pasMaskType pasByrefBottomMask; // byref qualifier for pasBottomMask
BYTE* pasTopArray; // More than 32 args are represented here
unsigned pasPtrsInTopArray; // How many GCptrs here
};
//-----------------------------------------------------------------------------
PendingArgsStack::PendingArgsStack(unsigned maxDepth, Compiler* pComp)
: pasMaxDepth(maxDepth)
, pasDepth(0)
, pasBottomMask(0)
, pasByrefBottomMask(0)
, pasTopArray(NULL)
, pasPtrsInTopArray(0)
{
/* Do we need an array as well as the mask ? */
if (pasMaxDepth > BITS_IN_pasMask)
pasTopArray = pComp->getAllocator(CMK_Unknown).allocate<BYTE>(pasMaxDepth - BITS_IN_pasMask);
}
//-----------------------------------------------------------------------------
void PendingArgsStack::pasPush(GCtype gcType)
{
assert(pasDepth < pasMaxDepth);
if (pasDepth < BITS_IN_pasMask)
{
/* Shift the mask */
pasBottomMask <<= 1;
pasByrefBottomMask <<= 1;
if (needsGC(gcType))
{
pasBottomMask |= 1;
if (gcType == GCT_BYREF)
pasByrefBottomMask |= 1;
}
}
else
{
/* Push on array */
pasTopArray[pasDepth - BITS_IN_pasMask] = (BYTE)gcType;
if (gcType)
pasPtrsInTopArray++;
}
pasDepth++;
}
//-----------------------------------------------------------------------------
void PendingArgsStack::pasPop(unsigned count)
{
assert(pasDepth >= count);
/* First pop from array (if applicable) */
for (/**/; (pasDepth > BITS_IN_pasMask) && count; pasDepth--, count--)
{
unsigned topIndex = pasDepth - BITS_IN_pasMask - 1;
GCtype topArg = (GCtype)pasTopArray[topIndex];
if (needsGC(topArg))
pasPtrsInTopArray--;
}
if (count == 0)
return;
/* Now un-shift the mask */
assert(pasPtrsInTopArray == 0);
assert(count <= BITS_IN_pasMask);
if (count == BITS_IN_pasMask) // (x>>32) is a nop on x86. So special-case it
{
pasBottomMask = pasByrefBottomMask = 0;
pasDepth = 0;
}
else
{
pasBottomMask >>= count;
pasByrefBottomMask >>= count;
pasDepth -= count;
}
}
//-----------------------------------------------------------------------------
// Kill (but don't pop) the top 'gcCount' args
void PendingArgsStack::pasKill(unsigned gcCount)
{
assert(gcCount != 0);
/* First kill args in array (if any) */
for (unsigned curPos = pasDepth; (curPos > BITS_IN_pasMask) && gcCount; curPos--)
{
unsigned curIndex = curPos - BITS_IN_pasMask - 1;
GCtype curArg = (GCtype)pasTopArray[curIndex];
if (needsGC(curArg))
{
pasTopArray[curIndex] = GCT_NONE;
pasPtrsInTopArray--;
gcCount--;
}
}
/* Now kill bits from the mask */
assert(pasPtrsInTopArray == 0);
assert(gcCount <= BITS_IN_pasMask);
for (unsigned bitPos = 1; gcCount; bitPos <<= 1)
{
assert(pasBottomMask != 0);
if (pasBottomMask & bitPos)
{
pasBottomMask &= ~bitPos;
pasByrefBottomMask &= ~bitPos;
--gcCount;
}
else
{
assert(bitPos != HIGHEST_pasMask_BIT);
}
}
}
//-----------------------------------------------------------------------------
// Used for the case where there are more than BITS_IN_pasMask args on stack,
// but none are any pointers. May avoid reporting anything to GCinfo
bool PendingArgsStack::pasHasGCptrs()
{
if (pasDepth <= BITS_IN_pasMask)
return pasBottomMask != 0;
else
return pasBottomMask != 0 || pasPtrsInTopArray != 0;
}
//-----------------------------------------------------------------------------
// Iterates over mask and array to return total count.
// Use only when you are going to emit a table of the offsets
unsigned PendingArgsStack::pasEnumGCoffsCount()
{
/* Should only be used in the worst case, when just the mask can't be used */
assert(pasDepth > BITS_IN_pasMask && pasHasGCptrs());
/* Count number of set bits in mask */
unsigned count = 0;
for (pasMaskType mask = 0x1, i = 0; i < BITS_IN_pasMask; mask <<= 1, i++)
{
if (mask & pasBottomMask)
count++;
}
return count + pasPtrsInTopArray;
}
//-----------------------------------------------------------------------------
// Initalize enumeration by passing in iter=pasENUM_START.
// Continue by passing in the return value as the new value of iter
// End of enumeration when pasENUM_END is returned
// If return value != pasENUM_END, *offs is set to the offset for GCinfo
unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned* offs)
{
if (iter == pasENUM_LAST)
return pasENUM_END;
unsigned i = (iter == pasENUM_START) ? pasDepth : iter;
for (/**/; i > BITS_IN_pasMask; i--)
{
GCtype curArg = (GCtype)pasTopArray[i - BITS_IN_pasMask - 1];
if (needsGC(curArg))
{
unsigned offset;
offset = (pasDepth - i) * TARGET_POINTER_SIZE;
if (curArg == GCT_BYREF)
offset |= byref_OFFSET_FLAG;
*offs = offset;
return i - 1;
}
}
if (!pasBottomMask)
return pasENUM_END;
// Have we already processed some of the bits in pasBottomMask ?
i = (iter == pasENUM_START || iter >= BITS_IN_pasMask) ? 0 // no
: iter; // yes
for (pasMaskType mask = 0x1 << i; mask; i++, mask <<= 1)
{
if (mask & pasBottomMask)
{
unsigned lvl = (pasDepth > BITS_IN_pasMask) ? (pasDepth - BITS_IN_pasMask) : 0; // How many in pasTopArray[]
lvl += i;
unsigned offset;
offset = lvl * TARGET_POINTER_SIZE;
if (mask & pasByrefBottomMask)
offset |= byref_OFFSET_FLAG;
*offs = offset;
unsigned remMask = -int(mask << 1);
return ((pasBottomMask & remMask) ? (i + 1) : pasENUM_LAST);
}
}
assert(!"Shouldnt reach here");
return pasENUM_END;
}
/*****************************************************************************
*
* Generate the register pointer map, and return its total size in bytes. If
* 'mask' is 0, we don't actually store any data in 'dest' (except for one
* entry, which is never more than 10 bytes), so this can be used to merely
* compute the size of the table.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset)
{
unsigned varNum;
LclVarDsc* varDsc;
size_t totalSize = 0;
unsigned lastOffset;
/* The mask should be all 0's or all 1's */
assert(mask == 0 || mask == -1);
/* Start computing the total size of the table */
bool emitArgTabOffset = (header.varPtrTableSize != 0 || header.untrackedCnt > SET_UNTRACKED_MAX);
if (mask != 0 && emitArgTabOffset)
{
assert(*pArgTabOffset <= MAX_UNSIGNED_SIZE_T);
unsigned sz = encodeUnsigned(dest, static_cast<unsigned>(*pArgTabOffset));
dest += sz;
totalSize += sz;
}
#if VERIFY_GC_TABLES
if (mask)
{
*(short*)dest = (short)0xBEEF;
dest += sizeof(short);
}
totalSize += sizeof(short);
#endif
/**************************************************************************
*
* Untracked ptr variables
*
**************************************************************************
*/
#if DEBUG
unsigned untrackedCount = 0;
unsigned varPtrTableSize = 0;
gcCountForHeader(&untrackedCount, &varPtrTableSize);
assert(untrackedCount == header.untrackedCnt);
assert(varPtrTableSize == header.varPtrTableSize);
#endif // DEBUG
if (header.untrackedCnt != 0)
{
// Write the table of untracked pointer variables.
int lastoffset = 0;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
// Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
// reported through its parent local
continue;
}
if (varTypeIsGC(varDsc->TypeGet()))
{
if (!gcIsUntrackedLocalOrNonEnregisteredArg(varNum))
{
continue;
}
int offset = varDsc->GetStackOffset();
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
offset += compiler->codeGen->genTotalFrameSize();
#endif
// The lower bits of the offset encode properties of the stk ptr
assert(~OFFSET_MASK % sizeof(offset) == 0);
if (varDsc->TypeGet() == TYP_BYREF)
{
// Or in byref_OFFSET_FLAG for 'byref' pointer tracking
offset |= byref_OFFSET_FLAG;
}
if (varDsc->lvPinned)
{
// Or in pinned_OFFSET_FLAG for 'pinned' pointer tracking
offset |= pinned_OFFSET_FLAG;
}
int encodedoffset = lastoffset - offset;
lastoffset = offset;
if (mask == 0)
totalSize += encodeSigned(NULL, encodedoffset);
else
{
unsigned sz = encodeSigned(dest, encodedoffset);
dest += sz;
totalSize += sz;
}
}
else if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame && varDsc->HasGCPtr())
{
ClassLayout* layout = varDsc->GetLayout();
unsigned slots = layout->GetSlotCount();
for (unsigned i = 0; i < slots; i++)
{
if (!layout->IsGCPtr(i))
{
continue;
}
unsigned offset = varDsc->GetStackOffset() + i * TARGET_POINTER_SIZE;
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
{
offset += compiler->codeGen->genTotalFrameSize();
}
#endif
if (layout->GetGCPtrType(i) == TYP_BYREF)
{
offset |= byref_OFFSET_FLAG; // indicate it is a byref GC pointer
}
int encodedoffset = lastoffset - offset;
lastoffset = offset;
if (mask == 0)
{
totalSize += encodeSigned(NULL, encodedoffset);
}
else
{
unsigned sz = encodeSigned(dest, encodedoffset);
dest += sz;
totalSize += sz;
}
}
}
}
/* Count&Write spill temps that hold pointers */
assert(compiler->codeGen->regSet.tmpAllFree());
for (TempDsc* tempItem = compiler->codeGen->regSet.tmpListBeg(); tempItem != nullptr;
tempItem = compiler->codeGen->regSet.tmpListNxt(tempItem))
{
if (varTypeIsGC(tempItem->tdTempType()))
{
{
int offset;
offset = tempItem->tdTempOffs();
if (tempItem->tdTempType() == TYP_BYREF)
{
offset |= byref_OFFSET_FLAG;
}
int encodedoffset = lastoffset - offset;
lastoffset = offset;
if (mask == 0)
{
totalSize += encodeSigned(NULL, encodedoffset);
}
else
{
unsigned sz = encodeSigned(dest, encodedoffset);
dest += sz;
totalSize += sz;
}
}
}
}
}
#if VERIFY_GC_TABLES
if (mask)
{
*(short*)dest = (short)0xCAFE;
dest += sizeof(short);
}
totalSize += sizeof(short);
#endif
/**************************************************************************
*
* Generate the table of stack pointer variable lifetimes.
*
**************************************************************************
*/
bool keepThisAlive = false;
if (!compiler->info.compIsStatic)
{
unsigned thisArgNum = compiler->info.compThisArg;
gcIsUntrackedLocalOrNonEnregisteredArg(thisArgNum, &keepThisAlive);
}
// First we check for the most common case - no lifetimes at all.
if (header.varPtrTableSize != 0)
{
#if !defined(FEATURE_EH_FUNCLETS)
if (keepThisAlive)
{
// Encoding of untracked variables does not support reporting
// "this". So report it as a tracked variable with a liveness
// extending over the entire method.
assert(compiler->lvaTable[compiler->info.compThisArg].TypeGet() == TYP_REF);
unsigned varOffs = compiler->lvaTable[compiler->info.compThisArg].GetStackOffset();
/* For negative stack offsets we must reset the low bits,
* take abs and then set them back */
varOffs = abs(static_cast<int>(varOffs));
varOffs |= this_OFFSET_FLAG;
size_t sz = 0;
sz = encodeUnsigned(mask ? (dest + sz) : NULL, varOffs);
sz += encodeUDelta(mask ? (dest + sz) : NULL, 0, 0);
sz += encodeUDelta(mask ? (dest + sz) : NULL, codeSize, 0);
dest += (sz & mask);
totalSize += sz;
}
#endif // !FEATURE_EH_FUNCLETS
/* We'll use a delta encoding for the lifetime offsets */
lastOffset = 0;
for (varPtrDsc* varTmp = gcVarPtrList; varTmp; varTmp = varTmp->vpdNext)
{
unsigned varOffs;
unsigned lowBits;
unsigned begOffs;
unsigned endOffs;
assert(~OFFSET_MASK % TARGET_POINTER_SIZE == 0);
/* Get hold of the variable's stack offset */
lowBits = varTmp->vpdVarNum & OFFSET_MASK;
/* For negative stack offsets we must reset the low bits,
* take abs and then set them back */
varOffs = abs(static_cast<int>(varTmp->vpdVarNum & ~OFFSET_MASK));
varOffs |= lowBits;
/* Compute the actual lifetime offsets */
begOffs = varTmp->vpdBegOfs;
endOffs = varTmp->vpdEndOfs;
/* Special case: skip any 0-length lifetimes */
if (endOffs == begOffs)
continue;
/* Are we counting or generating? */
size_t sz = 0;
sz = encodeUnsigned(mask ? (dest + sz) : NULL, varOffs);
sz += encodeUDelta(mask ? (dest + sz) : NULL, begOffs, lastOffset);
sz += encodeUDelta(mask ? (dest + sz) : NULL, endOffs, begOffs);
dest += (sz & mask);
totalSize += sz;
/* The next entry will be relative to the one we just processed */
lastOffset = begOffs;
}
}
if (pArgTabOffset != NULL)
*pArgTabOffset = totalSize;
#if VERIFY_GC_TABLES
if (mask)
{
*(short*)dest = (short)0xBABE;
dest += sizeof(short);
}
totalSize += sizeof(short);
#endif
if (!mask && emitArgTabOffset)
{
assert(*pArgTabOffset <= MAX_UNSIGNED_SIZE_T);
totalSize += encodeUnsigned(NULL, static_cast<unsigned>(*pArgTabOffset));
}
/**************************************************************************
*
* Prepare to generate the pointer register/argument map
*
**************************************************************************
*/
lastOffset = 0;
if (compiler->codeGen->GetInterruptible())
{
#ifdef TARGET_X86
assert(compiler->IsFullPtrRegMapRequired());
unsigned ptrRegs = 0;
regPtrDsc* genRegPtrTemp;
/* Walk the list of pointer register/argument entries */
for (genRegPtrTemp = gcRegPtrList; genRegPtrTemp; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
BYTE* base = dest;
unsigned nextOffset;
DWORD codeDelta;
nextOffset = genRegPtrTemp->rpdOffs;
/*
Encoding table for methods that are fully interruptible
The encoding used is as follows:
ptr reg dead 00RRRDDD [RRR != 100]
ptr reg live 01RRRDDD [RRR != 100]
non-ptr arg push 10110DDD [SSS == 110]
ptr arg push 10SSSDDD [SSS != 110] && [SSS != 111]
ptr arg pop 11CCCDDD [CCC != 000] && [CCC != 110] && [CCC != 111]
little skip 11000DDD [CCC == 000]
bigger skip 11110BBB [CCC == 110]
The values used in the above encodings are as follows:
DDD code offset delta from previous entry (0-7)
BBB bigger delta 000=8,001=16,010=24,...,111=64
RRR register number (EAX=000,ECX=001,EDX=010,EBX=011,
EBP=101,ESI=110,EDI=111), ESP=100 is reserved
SSS argument offset from base of stack. This is
redundant for frameless methods as we can
infer it from the previous pushes+pops. However,
for EBP-methods, we only report GC pushes, and
so we need SSS
CCC argument count being popped (includes only ptrs for EBP methods)
The following are the 'large' versions:
large delta skip 10111000 [0xB8] , encodeUnsigned(delta)
large ptr arg push 11111000 [0xF8] , encodeUnsigned(pushCount)
large non-ptr arg push 11111001 [0xF9] , encodeUnsigned(pushCount)
large ptr arg pop 11111100 [0xFC] , encodeUnsigned(popCount)
large arg dead 11111101 [0xFD] , encodeUnsigned(popCount) for caller-pop args.
Any GC args go dead after the call,
but are still sitting on the stack
this pointer prefix 10111100 [0xBC] the next encoding is a ptr live
or a ptr arg push
and contains the this pointer
interior or by-ref 10111111 [0xBF] the next encoding is a ptr live
pointer prefix or a ptr arg push
and contains an interior
or by-ref pointer
The value 11111111 [0xFF] indicates the end of the table.
*/
codeDelta = nextOffset - lastOffset;
assert((int)codeDelta >= 0);
// If the code delta is between 8 and (64+7),
// generate a 'bigger delta' encoding
if ((codeDelta >= 8) && (codeDelta <= (64 + 7)))
{
unsigned biggerDelta = ((codeDelta - 8) & 0x38) + 8;
*dest++ = 0xF0 | ((biggerDelta - 8) >> 3);
lastOffset += biggerDelta;
codeDelta &= 0x07;
}
// If the code delta is still bigger than 7,
// generate a 'large code delta' encoding
if (codeDelta > 7)
{
*dest++ = 0xB8;
dest += encodeUnsigned(dest, codeDelta);
codeDelta = 0;
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
/* Is this a pointer argument or register entry? */
if (genRegPtrTemp->rpdArg)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_KILL)
{
if (codeDelta)
{
/*
Use the small encoding:
little delta skip 11000DDD [0xC0]
*/
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0xC0 | (BYTE)codeDelta;
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
/* Caller-pop arguments are dead after call but are still
sitting on the stack */
*dest++ = 0xFD;
assert(genRegPtrTemp->rpdPtrArg != 0);
dest += encodeUnsigned(dest, genRegPtrTemp->rpdPtrArg);
}
else if (genRegPtrTemp->rpdPtrArg < 6 && genRegPtrTemp->rpdGCtypeGet())
{
/* Is the argument offset/count smaller than 6 ? */
dest = gceByrefPrefixI(genRegPtrTemp, dest);
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH || (genRegPtrTemp->rpdPtrArg != 0))
{
/*
Use the small encoding:
ptr arg push 10SSSDDD [SSS != 110] && [SSS != 111]
ptr arg pop 11CCCDDD [CCC != 110] && [CCC != 111]
*/
bool isPop = genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP;
*dest++ = 0x80 | (BYTE)codeDelta | genRegPtrTemp->rpdPtrArg << 3 | isPop << 6;
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
else
{
assert(!"Check this");
}
}
else if (genRegPtrTemp->rpdGCtypeGet() == GCT_NONE)
{
/*
Use the small encoding:
` non-ptr arg push 10110DDD [0xB0] (push of sizeof(int))
*/
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0xB0 | (BYTE)codeDelta;
#ifndef UNIX_X86_ABI
assert(!compiler->isFramePointerUsed());
#endif
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
else
{
/* Will have to use large encoding;
* first do the code delta
*/
if (codeDelta)
{
/*
Use the small encoding:
little delta skip 11000DDD [0xC0]
*/
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0xC0 | (BYTE)codeDelta;
}
/*
Now append a large argument record:
large ptr arg push 11111000 [0xF8]
large ptr arg pop 11111100 [0xFC]
*/
bool isPop = genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP;
dest = gceByrefPrefixI(genRegPtrTemp, dest);
*dest++ = 0xF8 | (isPop << 2);
dest += encodeUnsigned(dest, genRegPtrTemp->rpdPtrArg);
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
}
else
{
unsigned regMask;
/* Record any registers that are becoming dead */
regMask = genRegPtrTemp->rpdCompiler.rpdDel & ptrRegs;
while (regMask) // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
{
unsigned tmpMask;
regNumber regNum;
/* Get hold of the next register bit */
tmpMask = genFindLowestReg(regMask);
assert(tmpMask);
/* Remember the new state of this register */
ptrRegs &= ~tmpMask;
/* Figure out which register the next bit corresponds to */
regNum = genRegNumFromMask(tmpMask);
assert(regNum <= 7);
/* Reserve ESP, regNum==4 for future use */
assert(regNum != 4);
/*
Generate a small encoding:
ptr reg dead 00RRRDDD
*/
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0x00 | regNum << 3 | (BYTE)codeDelta;
/* Turn the bit we've just generated off and continue */
regMask -= tmpMask; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
/* Remember the new 'last' offset */
lastOffset = nextOffset;
/* Any entries that follow will be at the same offset */
codeDelta = zeroFunc(); /* DO NOT REMOVE */
}
/* Record any registers that are becoming live */
regMask = genRegPtrTemp->rpdCompiler.rpdAdd & ~ptrRegs;
while (regMask) // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
{
unsigned tmpMask;
regNumber regNum;
/* Get hold of the next register bit */
tmpMask = genFindLowestReg(regMask);
assert(tmpMask);
/* Remember the new state of this register */
ptrRegs |= tmpMask;
/* Figure out which register the next bit corresponds to */
regNum = genRegNumFromMask(tmpMask);
assert(regNum <= 7);
/*
Generate a small encoding:
ptr reg live 01RRRDDD
*/
dest = gceByrefPrefixI(genRegPtrTemp, dest);
if (!keepThisAlive && genRegPtrTemp->rpdIsThis)
{
// Mark with 'this' pointer prefix
*dest++ = 0xBC;
// Can only have one bit set in regMask
assert(regMask == tmpMask);
}
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0x40 | (regNum << 3) | (BYTE)codeDelta;
/* Turn the bit we've just generated off and continue */
regMask -= tmpMask; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
/* Remember the new 'last' offset */
lastOffset = nextOffset;
/* Any entries that follow will be at the same offset */
codeDelta = zeroFunc(); /* DO NOT REMOVE */
}
}
/* Keep track of the total amount of generated stuff */
totalSize += dest - base;
/* Go back to the buffer start if we're not generating a table */
if (!mask)
dest = base;
}
#endif // TARGET_X86
/* Terminate the table with 0xFF */
*dest = 0xFF;
dest -= mask;
totalSize++;
}
else if (compiler->isFramePointerUsed()) // GetInterruptible() is false
{
#ifdef TARGET_X86
/*
Encoding table for methods with an EBP frame and
that are not fully interruptible
The encoding used is as follows:
this pointer encodings:
01000000 this pointer in EBX
00100000 this pointer in ESI
00010000 this pointer in EDI
tiny encoding:
0bsdDDDD
requires code delta > 0 & delta < 16 (4-bits)
requires pushed argmask == 0
where DDDD is code delta
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
small encoding:
1DDDDDDD bsdAAAAA
requires code delta < 120 (7-bits)
requires pushed argmask < 64 (5-bits)
where DDDDDDD is code delta
AAAAA is the pushed args mask
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
medium encoding
0xFD aaaaaaaa AAAAdddd bseDDDDD
requires code delta < 512 (9-bits)
requires pushed argmask < 2048 (12-bits)
where DDDDD is the upper 5-bits of the code delta
dddd is the low 4-bits of the code delta
AAAA is the upper 4-bits of the pushed arg mask
aaaaaaaa is the low 8-bits of the pushed arg mask
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
e indicates that register EDI is a live pointer
medium encoding with interior pointers
0xF9 DDDDDDDD bsdAAAAAA iiiIIIII
requires code delta < 256 (8-bits)
requires pushed argmask < 64 (5-bits)
where DDDDDDD is the code delta
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
AAAAA is the pushed arg mask
iii indicates that EBX,EDI,ESI are interior pointers
IIIII indicates that bits in the arg mask are interior
pointers
large encoding
0xFE [0BSD0bsd][32-bit code delta][32-bit argMask]
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
B indicates that register EBX is an interior pointer
S indicates that register ESI is an interior pointer
D indicates that register EDI is an interior pointer
requires pushed argmask < 32-bits
large encoding with interior pointers
0xFA [0BSD0bsd][32-bit code delta][32-bit argMask][32-bit interior pointer mask]
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
B indicates that register EBX is an interior pointer
S indicates that register ESI is an interior pointer
D indicates that register EDI is an interior pointer
requires pushed argmask < 32-bits
requires pushed iArgmask < 32-bits
huge encoding This is the only encoding that supports
a pushed argmask which is greater than
32-bits.
0xFB [0BSD0bsd][32-bit code delta]
[32-bit table count][32-bit table size]
[pushed ptr offsets table...]
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
B indicates that register EBX is an interior pointer
S indicates that register ESI is an interior pointer
D indicates that register EDI is an interior pointer
the list count is the number of entries in the list
the list size gives the byte-length of the list
the offsets in the list are variable-length
*/
/* If "this" is enregistered, note it. We do this explicitly here as
IsFullPtrRegMapRequired()==false, and so we don't have any regPtrDsc's. */
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaTable[compiler->info.compThisArg].lvRegister)
{
unsigned thisRegMask = genRegMask(compiler->lvaTable[compiler->info.compThisArg].GetRegNum());
unsigned thisPtrRegEnc = gceEncodeCalleeSavedRegs(thisRegMask) << 4;
if (thisPtrRegEnc)
{
totalSize += 1;
if (mask)
*dest++ = thisPtrRegEnc;
}
}
CallDsc* call;
assert(compiler->IsFullPtrRegMapRequired() == false);
/* Walk the list of pointer register/argument entries */
for (call = gcCallDescList; call; call = call->cdNext)
{
BYTE* base = dest;
unsigned nextOffset;
/* Figure out the code offset of this entry */
nextOffset = call->cdOffs;
/* Compute the distance from the previous call */
DWORD codeDelta = nextOffset - lastOffset;
assert((int)codeDelta >= 0);
/* Remember the new 'last' offset */
lastOffset = nextOffset;
/* Compute the register mask */
unsigned gcrefRegMask = 0;
unsigned byrefRegMask = 0;
gcrefRegMask |= gceEncodeCalleeSavedRegs(call->cdGCrefRegs);
byrefRegMask |= gceEncodeCalleeSavedRegs(call->cdByrefRegs);
assert((gcrefRegMask & byrefRegMask) == 0);
unsigned regMask = gcrefRegMask | byrefRegMask;
bool byref = (byrefRegMask | call->u1.cdByrefArgMask) != 0;
/* Check for the really large argument offset case */
/* The very rare Huge encodings */
if (call->cdArgCnt)
{
unsigned argNum;
DWORD argCnt = call->cdArgCnt;
DWORD argBytes = 0;
BYTE* pArgBytes = DUMMY_INIT(NULL);
if (mask != 0)
{
*dest++ = 0xFB;
*dest++ = (byrefRegMask << 4) | regMask;
*(DWORD*)dest = codeDelta;
dest += sizeof(DWORD);
*(DWORD*)dest = argCnt;
dest += sizeof(DWORD);
// skip the byte-size for now. Just note where it will go
pArgBytes = dest;
dest += sizeof(DWORD);
}
for (argNum = 0; argNum < argCnt; argNum++)
{
unsigned eltSize;
eltSize = encodeUnsigned(dest, call->cdArgTable[argNum]);
argBytes += eltSize;
if (mask)
dest += eltSize;
}
if (mask == 0)
{
dest = base + 1 + 1 + 3 * sizeof(DWORD) + argBytes;
}
else
{
assert(dest == pArgBytes + sizeof(argBytes) + argBytes);
*(DWORD*)pArgBytes = argBytes;
}
}
/* Check if we can use a tiny encoding */
else if ((codeDelta < 16) && (codeDelta != 0) && (call->u1.cdArgMask == 0) && !byref)
{
*dest++ = (regMask << 4) | (BYTE)codeDelta;
}
/* Check if we can use the small encoding */
else if ((codeDelta < 0x79) && (call->u1.cdArgMask <= 0x1F) && !byref)
{
*dest++ = 0x80 | (BYTE)codeDelta;
*dest++ = call->u1.cdArgMask | (regMask << 5);
}
/* Check if we can use the medium encoding */
else if (codeDelta <= 0x01FF && call->u1.cdArgMask <= 0x0FFF && !byref)
{
*dest++ = 0xFD;
*dest++ = call->u1.cdArgMask;
*dest++ = ((call->u1.cdArgMask >> 4) & 0xF0) | ((BYTE)codeDelta & 0x0F);
*dest++ = (regMask << 5) | (BYTE)((codeDelta >> 4) & 0x1F);
}
/* Check if we can use the medium encoding with byrefs */
else if (codeDelta <= 0x0FF && call->u1.cdArgMask <= 0x01F)
{
*dest++ = 0xF9;
*dest++ = (BYTE)codeDelta;
*dest++ = (regMask << 5) | call->u1.cdArgMask;
*dest++ = (byrefRegMask << 5) | call->u1.cdByrefArgMask;
}
/* We'll use the large encoding */
else if (!byref)
{
*dest++ = 0xFE;
*dest++ = (byrefRegMask << 4) | regMask;
*(DWORD*)dest = codeDelta;
dest += sizeof(DWORD);
*(DWORD*)dest = call->u1.cdArgMask;
dest += sizeof(DWORD);
}
/* We'll use the large encoding with byrefs */
else
{
*dest++ = 0xFA;
*dest++ = (byrefRegMask << 4) | regMask;
*(DWORD*)dest = codeDelta;
dest += sizeof(DWORD);
*(DWORD*)dest = call->u1.cdArgMask;
dest += sizeof(DWORD);
*(DWORD*)dest = call->u1.cdByrefArgMask;
dest += sizeof(DWORD);
}
/* Keep track of the total amount of generated stuff */
totalSize += dest - base;
/* Go back to the buffer start if we're not generating a table */
if (!mask)
dest = base;
}
#endif // TARGET_X86
/* Terminate the table with 0xFF */
*dest = 0xFF;
dest -= mask;
totalSize++;
}
else // GetInterruptible() is false and we have an EBP-less frame
{
assert(compiler->IsFullPtrRegMapRequired());
#ifdef TARGET_X86
regPtrDsc* genRegPtrTemp;
regNumber thisRegNum = regNumber(0);
PendingArgsStack pasStk(compiler->GetEmitter()->emitMaxStackDepth, compiler);
/* Walk the list of pointer register/argument entries */
for (genRegPtrTemp = gcRegPtrList; genRegPtrTemp; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
/*
* Encoding table for methods without an EBP frame and
* that are not fully interruptible
*
* The encoding used is as follows:
*
* push 000DDDDD ESP push one item with 5-bit delta
* push 00100000 [pushCount] ESP push multiple items
* reserved 0010xxxx xxxx != 0000
* reserved 0011xxxx
* skip 01000000 [Delta] Skip Delta, arbitrary sized delta
* skip 0100DDDD Skip small Delta, for call (DDDD != 0)
* pop 01CCDDDD ESP pop CC items with 4-bit delta (CC != 00)
* call 1PPPPPPP Call Pattern, P=[0..79]
* call 1101pbsd DDCCCMMM Call RegMask=pbsd,ArgCnt=CCC,
* ArgMask=MMM Delta=commonDelta[DD]
* call 1110pbsd [ArgCnt] [ArgMask] Call ArgCnt,RegMask=pbsd,ArgMask
* call 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
* [32-bit PndCnt][32-bit PndSize][PndOffs...]
* iptr 11110000 [IPtrMask] Arbitrary Interior Pointer Mask
* thisptr 111101RR This pointer is in Register RR
* 00=EDI,01=ESI,10=EBX,11=EBP
* reserved 111100xx xx != 00
* reserved 111110xx xx != 00
* reserved 11111xxx xxx != 000 && xxx != 111(EOT)
*
* The value 11111111 [0xFF] indicates the end of the table. (EOT)
*
* An offset (at which stack-walking is performed) without an explicit encoding
* is assumed to be a trivial call-site (no GC registers, stack empty before and
* after) to avoid having to encode all trivial calls.
*
* Note on the encoding used for interior pointers
*
* The iptr encoding must immediately precede a call encoding. It is used
* to transform a normal GC pointer addresses into an interior pointers for
* GC purposes. The mask supplied to the iptr encoding is read from the
* least signicant bit to the most signicant bit. (i.e the lowest bit is
* read first)
*
* p indicates that register EBP is a live pointer
* b indicates that register EBX is a live pointer
* s indicates that register ESI is a live pointer
* d indicates that register EDI is a live pointer
* P indicates that register EBP is an interior pointer
* B indicates that register EBX is an interior pointer
* S indicates that register ESI is an interior pointer
* D indicates that register EDI is an interior pointer
*
* As an example the following sequence indicates that EDI.ESI and the
* second pushed pointer in ArgMask are really interior pointers. The
* pointer in ESI in a normal pointer:
*
* iptr 11110000 00010011 => read Interior Ptr, Interior Ptr,
* Normal Ptr, Normal Ptr, Interior Ptr
*
* call 11010011 DDCCC011 RRRR=1011 => read EDI is a GC-pointer,
* ESI is a GC-pointer.
* EBP is a GC-pointer
* MMM=0011 => read two GC-pointers arguments
* on the stack (nested call)
*
* Since the call instruction mentions 5 GC-pointers we list them in
* the required order: EDI, ESI, EBP, 1st-pushed pointer, 2nd-pushed pointer
*
* And we apply the Interior Pointer mask mmmm=10011 to the five GC-pointers
* we learn that EDI and ESI are interior GC-pointers and that
* the second push arg is an interior GC-pointer.
*/
BYTE* base = dest;
bool usePopEncoding;
unsigned regMask;
unsigned argMask;
unsigned byrefRegMask;
unsigned byrefArgMask;
DWORD callArgCnt;
unsigned nextOffset;
DWORD codeDelta;
nextOffset = genRegPtrTemp->rpdOffs;
/* Compute the distance from the previous call */
codeDelta = nextOffset - lastOffset;
assert((int)codeDelta >= 0);
#if REGEN_CALLPAT
// Must initialize this flag to true when REGEN_CALLPAT is on
usePopEncoding = true;
unsigned origCodeDelta = codeDelta;
#endif
if (!keepThisAlive && genRegPtrTemp->rpdIsThis)
{
unsigned tmpMask = genRegPtrTemp->rpdCompiler.rpdAdd;
/* tmpMask must have exactly one bit set */
assert(tmpMask && ((tmpMask & (tmpMask - 1)) == 0));
thisRegNum = genRegNumFromMask(tmpMask);
switch (thisRegNum)
{
case 0: // EAX
case 1: // ECX
case 2: // EDX
case 4: // ESP
break;
case 7: // EDI
*dest++ = 0xF4; /* 11110100 This pointer is in EDI */
break;
case 6: // ESI
*dest++ = 0xF5; /* 11110100 This pointer is in ESI */
break;
case 3: // EBX
*dest++ = 0xF6; /* 11110100 This pointer is in EBX */
break;
case 5: // EBP
*dest++ = 0xF7; /* 11110100 This pointer is in EBP */
break;
default:
break;
}
}
/* Is this a stack pointer change or call? */
if (genRegPtrTemp->rpdArg)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_KILL)
{
// kill 'rpdPtrArg' number of pointer variables in pasStk
pasStk.pasKill(genRegPtrTemp->rpdPtrArg);
}
/* Is this a call site? */
else if (genRegPtrTemp->rpdCall)
{
/* This is a true call site */
/* Remember the new 'last' offset */
lastOffset = nextOffset;
callArgCnt = genRegPtrTemp->rpdPtrArg;
unsigned gcrefRegMask = genRegPtrTemp->rpdCallGCrefRegs;
byrefRegMask = genRegPtrTemp->rpdCallByrefRegs;
assert((gcrefRegMask & byrefRegMask) == 0);
regMask = gcrefRegMask | byrefRegMask;
/* adjust argMask for this call-site */
pasStk.pasPop(callArgCnt);
/* Do we have to use the fat encoding */
if (pasStk.pasCurDepth() > BITS_IN_pasMask && pasStk.pasHasGCptrs())
{
/* use fat encoding:
* 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
* [32-bit PndCnt][32-bit PndSize][PndOffs...]
*/
DWORD pndCount = pasStk.pasEnumGCoffsCount();
DWORD pndSize = 0;
BYTE* pPndSize = DUMMY_INIT(NULL);
if (mask)
{
*dest++ = 0xF8;
*dest++ = (byrefRegMask << 4) | regMask;
*(DWORD*)dest = codeDelta;
dest += sizeof(DWORD);
*(DWORD*)dest = callArgCnt;
dest += sizeof(DWORD);
*(DWORD*)dest = pndCount;
dest += sizeof(DWORD);
pPndSize = dest;
dest += sizeof(DWORD); // Leave space for pndSize
}
unsigned offs, iter;
for (iter = pasStk.pasEnumGCoffs(pasENUM_START, &offs); pndCount;
iter = pasStk.pasEnumGCoffs(iter, &offs), pndCount--)
{
unsigned eltSize = encodeUnsigned(dest, offs);
pndSize += eltSize;
if (mask)
dest += eltSize;
}
assert(iter == pasENUM_END);
if (mask == 0)
{
dest = base + 2 + 4 * sizeof(DWORD) + pndSize;
}
else
{
assert(pPndSize + sizeof(pndSize) + pndSize == dest);
*(DWORD*)pPndSize = pndSize;
}
goto NEXT_RPD;
}
argMask = byrefArgMask = 0;
if (pasStk.pasHasGCptrs())
{
assert(pasStk.pasCurDepth() <= BITS_IN_pasMask);
argMask = pasStk.pasArgMask();
byrefArgMask = pasStk.pasByrefArgMask();
}
/* Shouldn't be reporting trivial call-sites */
assert(regMask || argMask || callArgCnt || pasStk.pasCurDepth());
// Emit IPtrMask if needed
#define CHK_NON_INTRPT_ESP_IPtrMask \
\
if (byrefRegMask || byrefArgMask) \
{ \
*dest++ = 0xF0; \
unsigned imask = (byrefArgMask << 4) | byrefRegMask; \
dest += encodeUnsigned(dest, imask); \
}
/* When usePopEncoding is true:
* this is not an interesting call site
* because nothing is live here.
*/
usePopEncoding = ((callArgCnt < 4) && (regMask == 0) && (argMask == 0));
if (!usePopEncoding)
{
int pattern = lookupCallPattern(callArgCnt, regMask, argMask, codeDelta);
if (pattern != -1)
{
if (pattern > 0xff)
{
codeDelta = pattern >> 8;
pattern &= 0xff;
if (codeDelta >= 16)
{
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta);
codeDelta = 0;
}
else
{
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
*dest++ = 0x40 | (BYTE)codeDelta;
}
}
// Emit IPtrMask if needed
CHK_NON_INTRPT_ESP_IPtrMask;
assert((pattern >= 0) && (pattern < 80));
*dest++ = 0x80 | pattern;
goto NEXT_RPD;
}
/* See if we can use 2nd call encoding
* 1101RRRR DDCCCMMM encoding */
if ((callArgCnt <= 7) && (argMask <= 7))
{
unsigned inx; // callCommonDelta[] index
unsigned maxCommonDelta = callCommonDelta[3];
if (codeDelta > maxCommonDelta)
{
if (codeDelta > maxCommonDelta + 15)
{
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta - maxCommonDelta);
}
else
{
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
*dest++ = 0x40 | (BYTE)(codeDelta - maxCommonDelta);
}
codeDelta = maxCommonDelta;
inx = 3;
goto EMIT_2ND_CALL_ENCODING;
}
for (inx = 0; inx < 4; inx++)
{
if (codeDelta == callCommonDelta[inx])
{
EMIT_2ND_CALL_ENCODING:
// Emit IPtrMask if needed
CHK_NON_INTRPT_ESP_IPtrMask;
*dest++ = 0xD0 | regMask;
*dest++ = (inx << 6) | (callArgCnt << 3) | argMask;
goto NEXT_RPD;
}
}
unsigned minCommonDelta = callCommonDelta[0];
if ((codeDelta > minCommonDelta) && (codeDelta < maxCommonDelta))
{
assert((minCommonDelta + 16) > maxCommonDelta);
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
*dest++ = 0x40 | (BYTE)(codeDelta - minCommonDelta);
codeDelta = minCommonDelta;
inx = 0;
goto EMIT_2ND_CALL_ENCODING;
}
}
}
if (codeDelta >= 16)
{
unsigned i = (usePopEncoding ? 15 : 0);
/* use encoding: */
/* skip 01000000 [Delta] arbitrary sized delta */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta - i);
codeDelta = i;
}
if ((codeDelta > 0) || usePopEncoding)
{
if (usePopEncoding)
{
/* use encoding: */
/* pop 01CCDDDD ESP pop CC items, 4-bit delta */
if (callArgCnt || codeDelta)
*dest++ = (BYTE)(0x40 | (callArgCnt << 4) | codeDelta);
goto NEXT_RPD;
}
else
{
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
*dest++ = 0x40 | (BYTE)codeDelta;
}
}
// Emit IPtrMask if needed
CHK_NON_INTRPT_ESP_IPtrMask;
/* use encoding: */
/* call 1110RRRR [ArgCnt] [ArgMask] */
*dest++ = 0xE0 | regMask;
dest += encodeUnsigned(dest, callArgCnt);
dest += encodeUnsigned(dest, argMask);
}
else
{
/* This is a push or a pop site */
/* Remember the new 'last' offset */
lastOffset = nextOffset;
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP)
{
/* This must be a gcArgPopSingle */
assert(genRegPtrTemp->rpdPtrArg == 1);
if (codeDelta >= 16)
{
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta - 15);
codeDelta = 15;
}
/* use encoding: */
/* pop1 0101DDDD ESP pop one item, 4-bit delta */
*dest++ = 0x50 | (BYTE)codeDelta;
/* adjust argMask for this pop */
pasStk.pasPop(1);
}
else
{
/* This is a push */
if (codeDelta >= 32)
{
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta - 31);
codeDelta = 31;
}
assert(codeDelta < 32);
/* use encoding: */
/* push 000DDDDD ESP push one item, 5-bit delta */
*dest++ = (BYTE)codeDelta;
/* adjust argMask for this push */
pasStk.pasPush(genRegPtrTemp->rpdGCtypeGet());
}
}
}
/* We ignore the register live/dead information, since the
* rpdCallRegMask contains all the liveness information
* that we need
*/
NEXT_RPD:
totalSize += dest - base;
/* Go back to the buffer start if we're not generating a table */
if (!mask)
dest = base;
#if REGEN_CALLPAT
if ((mask == -1) && (usePopEncoding == false) && ((dest - base) > 0))
regenLog(origCodeDelta, argMask, regMask, callArgCnt, byrefArgMask, byrefRegMask, base, (dest - base));
#endif
}
/* Verify that we pop every arg that was pushed and that argMask is 0 */
assert(pasStk.pasCurDepth() == 0);
#endif // TARGET_X86
/* Terminate the table with 0xFF */
*dest = 0xFF;
dest -= mask;
totalSize++;
}
#if VERIFY_GC_TABLES
if (mask)
{
*(short*)dest = (short)0xBEEB;
dest += sizeof(short);
}
totalSize += sizeof(short);
#endif
#if MEASURE_PTRTAB_SIZE
if (mask)
s_gcTotalPtrTabSize += totalSize;
#endif
return totalSize;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
/*****************************************************************************/
#if DUMP_GC_TABLES
/*****************************************************************************
*
* Dump the contents of a GC pointer table.
*/
#include "gcdump.h"
#if VERIFY_GC_TABLES
const bool verifyGCTables = true;
#else
const bool verifyGCTables = false;
#endif
/*****************************************************************************
*
* Dump the info block header.
*/
size_t GCInfo::gcInfoBlockHdrDump(const BYTE* table, InfoHdr* header, unsigned* methodSize)
{
GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
printf("Method info block:\n");
return gcDump.DumpInfoHdr(table, header, methodSize, verifyGCTables);
}
/*****************************************************************************/
size_t GCInfo::gcDumpPtrTable(const BYTE* table, const InfoHdr& header, unsigned methodSize)
{
printf("Pointer table:\n");
GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
return gcDump.DumpGCTable(table, header, methodSize, verifyGCTables);
}
/*****************************************************************************
*
* Find all the live pointers in a stack frame.
*/
void GCInfo::gcFindPtrsInFrame(const void* infoBlock, const void* codeBlock, unsigned offs)
{
GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
gcDump.DumpPtrsInFrame((PTR_CBYTE)infoBlock, (const BYTE*)codeBlock, offs, verifyGCTables);
}
#endif // DUMP_GC_TABLES
#else // !JIT32_GCENCODER
#include "gcinfoencoder.h"
// Do explicit instantiation.
template class JitHashTable<RegSlotIdKey, RegSlotIdKey, GcSlotId>;
template class JitHashTable<StackSlotIdKey, StackSlotIdKey, GcSlotId>;
#if defined(DEBUG) || DUMP_GC_TABLES
// This is a copy of GcStackSlotBaseNames from gcinfotypes.h so we can compile in to non-DEBUG builds.
const char* const JitGcStackSlotBaseNames[] = {"caller.sp", "sp", "frame"};
static const char* const GcSlotFlagsNames[] = {"",
"(byref) ",
"(pinned) ",
"(byref, pinned) ",
"(untracked) ",
"(byref, untracked) ",
"(pinned, untracked) ",
"(byref, pinned, untracked) "};
// I'm making a local wrapper class for GcInfoEncoder so that can add logging of my own (DLD).
class GcInfoEncoderWithLogging
{
GcInfoEncoder* m_gcInfoEncoder;
bool m_doLogging;
public:
GcInfoEncoderWithLogging(GcInfoEncoder* gcInfoEncoder, bool verbose)
: m_gcInfoEncoder(gcInfoEncoder), m_doLogging(verbose INDEBUG(|| JitConfig.JitGCInfoLogging() != 0))
{
}
GcSlotId GetStackSlotId(INT32 spOffset, GcSlotFlags flags, GcStackSlotBase spBase = GC_CALLER_SP_REL)
{
GcSlotId newSlotId = m_gcInfoEncoder->GetStackSlotId(spOffset, flags, spBase);
if (m_doLogging)
{
printf("Stack slot id for offset %d (%s0x%x) (%s) %s= %d.\n", spOffset, spOffset < 0 ? "-" : "",
abs(spOffset), JitGcStackSlotBaseNames[spBase], GcSlotFlagsNames[flags & 7], newSlotId);
}
return newSlotId;
}
GcSlotId GetRegisterSlotId(UINT32 regNum, GcSlotFlags flags)
{
GcSlotId newSlotId = m_gcInfoEncoder->GetRegisterSlotId(regNum, flags);
if (m_doLogging)
{
printf("Register slot id for reg %s %s= %d.\n", getRegName(regNum), GcSlotFlagsNames[flags & 7], newSlotId);
}
return newSlotId;
}
void SetSlotState(UINT32 instructionOffset, GcSlotId slotId, GcSlotState slotState)
{
m_gcInfoEncoder->SetSlotState(instructionOffset, slotId, slotState);
if (m_doLogging)
{
printf("Set state of slot %d at instr offset 0x%x to %s.\n", slotId, instructionOffset,
(slotState == GC_SLOT_LIVE ? "Live" : "Dead"));
}
}
void DefineCallSites(UINT32* pCallSites, BYTE* pCallSiteSizes, UINT32 numCallSites)
{
m_gcInfoEncoder->DefineCallSites(pCallSites, pCallSiteSizes, numCallSites);
if (m_doLogging)
{
printf("Defining %d call sites:\n", numCallSites);
for (UINT32 k = 0; k < numCallSites; k++)
{
printf(" Offset 0x%x, size %d.\n", pCallSites[k], pCallSiteSizes[k]);
}
}
}
void DefineInterruptibleRange(UINT32 startInstructionOffset, UINT32 length)
{
m_gcInfoEncoder->DefineInterruptibleRange(startInstructionOffset, length);
if (m_doLogging)
{
printf("Defining interruptible range: [0x%x, 0x%x).\n", startInstructionOffset,
startInstructionOffset + length);
}
}
void SetCodeLength(UINT32 length)
{
m_gcInfoEncoder->SetCodeLength(length);
if (m_doLogging)
{
printf("Set code length to %d.\n", length);
}
}
void SetReturnKind(ReturnKind returnKind)
{
m_gcInfoEncoder->SetReturnKind(returnKind);
if (m_doLogging)
{
printf("Set ReturnKind to %s.\n", ReturnKindToString(returnKind));
}
}
void SetStackBaseRegister(UINT32 registerNumber)
{
m_gcInfoEncoder->SetStackBaseRegister(registerNumber);
if (m_doLogging)
{
printf("Set stack base register to %s.\n", getRegName(registerNumber));
}
}
void SetPrologSize(UINT32 prologSize)
{
m_gcInfoEncoder->SetPrologSize(prologSize);
if (m_doLogging)
{
printf("Set prolog size 0x%x.\n", prologSize);
}
}
void SetGSCookieStackSlot(INT32 spOffsetGSCookie, UINT32 validRangeStart, UINT32 validRangeEnd)
{
m_gcInfoEncoder->SetGSCookieStackSlot(spOffsetGSCookie, validRangeStart, validRangeEnd);
if (m_doLogging)
{
printf("Set GS Cookie stack slot to %d, valid from 0x%x to 0x%x.\n", spOffsetGSCookie, validRangeStart,
validRangeEnd);
}
}
void SetPSPSymStackSlot(INT32 spOffsetPSPSym)
{
m_gcInfoEncoder->SetPSPSymStackSlot(spOffsetPSPSym);
if (m_doLogging)
{
printf("Set PSPSym stack slot to %d.\n", spOffsetPSPSym);
}
}
void SetGenericsInstContextStackSlot(INT32 spOffsetGenericsContext, GENERIC_CONTEXTPARAM_TYPE type)
{
m_gcInfoEncoder->SetGenericsInstContextStackSlot(spOffsetGenericsContext, type);
if (m_doLogging)
{
printf("Set generic instantiation context stack slot to %d, type is %s.\n", spOffsetGenericsContext,
(type == GENERIC_CONTEXTPARAM_THIS
? "THIS"
: (type == GENERIC_CONTEXTPARAM_MT ? "MT"
: (type == GENERIC_CONTEXTPARAM_MD ? "MD" : "UNKNOWN!"))));
}
}
void SetSecurityObjectStackSlot(INT32 spOffset)
{
m_gcInfoEncoder->SetSecurityObjectStackSlot(spOffset);
if (m_doLogging)
{
printf("Set security object stack slot to %d.\n", spOffset);
}
}
void SetIsVarArg()
{
m_gcInfoEncoder->SetIsVarArg();
if (m_doLogging)
{
printf("SetIsVarArg.\n");
}
}
#ifdef TARGET_AMD64
void SetWantsReportOnlyLeaf()
{
m_gcInfoEncoder->SetWantsReportOnlyLeaf();
if (m_doLogging)
{
printf("Set WantsReportOnlyLeaf.\n");
}
}
#elif defined(TARGET_ARMARCH)
void SetHasTailCalls()
{
m_gcInfoEncoder->SetHasTailCalls();
if (m_doLogging)
{
printf("Set HasTailCalls.\n");
}
}
#endif // TARGET_AMD64
void SetSizeOfStackOutgoingAndScratchArea(UINT32 size)
{
m_gcInfoEncoder->SetSizeOfStackOutgoingAndScratchArea(size);
if (m_doLogging)
{
printf("Set Outgoing stack arg area size to %d.\n", size);
}
}
};
#define GCENCODER_WITH_LOGGING(withLog, realEncoder) \
GcInfoEncoderWithLogging withLog##Var(realEncoder, INDEBUG(compiler->verbose ||) compiler->opts.dspGCtbls); \
GcInfoEncoderWithLogging* withLog = &withLog##Var;
#else // !(defined(DEBUG) || DUMP_GC_TABLES)
#define GCENCODER_WITH_LOGGING(withLog, realEncoder) GcInfoEncoder* withLog = realEncoder;
#endif // !(defined(DEBUG) || DUMP_GC_TABLES)
void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSize, unsigned prologSize)
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("*************** In gcInfoBlockHdrSave()\n");
}
#endif
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
// Can't create tables if we've not saved code.
gcInfoEncoderWithLog->SetCodeLength(methodSize);
gcInfoEncoderWithLog->SetReturnKind(getReturnKind());
if (compiler->isFramePointerUsed())
{
gcInfoEncoderWithLog->SetStackBaseRegister(REG_FPBASE);
}
if (compiler->info.compIsVarArgs)
{
gcInfoEncoderWithLog->SetIsVarArg();
}
// No equivalents.
// header->profCallbacks = compiler->info.compProfilerCallback;
// header->editNcontinue = compiler->opts.compDbgEnC;
//
if (compiler->lvaReportParamTypeArg())
{
// The predicate above is true only if there is an extra generic context parameter, not for
// the case where the generic context is provided by "this."
assert((SIZE_T)compiler->info.compTypeCtxtArg != BAD_VAR_NUM);
GENERIC_CONTEXTPARAM_TYPE ctxtParamType = GENERIC_CONTEXTPARAM_NONE;
switch (compiler->info.compMethodInfo->options & CORINFO_GENERICS_CTXT_MASK)
{
case CORINFO_GENERICS_CTXT_FROM_METHODDESC:
ctxtParamType = GENERIC_CONTEXTPARAM_MD;
break;
case CORINFO_GENERICS_CTXT_FROM_METHODTABLE:
ctxtParamType = GENERIC_CONTEXTPARAM_MT;
break;
case CORINFO_GENERICS_CTXT_FROM_THIS: // See comment above.
default:
// If we have a generic context parameter, then we should have
// one of the two options flags handled above.
assert(false);
}
const int offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
compiler->isFramePointerUsed());
#ifdef DEBUG
if (compiler->opts.IsOSR())
{
// Sanity check the offset vs saved patchpoint info.
//
const PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
#if defined(TARGET_AMD64)
// PP info has FP relative offset, to get to caller SP we need to
// subtract off 2 register slots (saved FP, saved RA).
//
const int osrOffset = ppInfo->GenericContextArgOffset() - 2 * REGSIZE_BYTES;
assert(offset == osrOffset);
#elif defined(TARGET_ARM64)
// PP info has virtual offset. This is also the caller SP offset.
//
const int osrOffset = ppInfo->GenericContextArgOffset();
assert(offset == osrOffset);
#endif
}
#endif
gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(offset, ctxtParamType);
}
// As discussed above, handle the case where the generics context is obtained via
// the method table of "this".
else if (compiler->lvaKeepAliveAndReportThis())
{
assert(compiler->info.compThisArg != BAD_VAR_NUM);
// OSR can report the root method's frame slot, if that method reported context.
// If not, the OSR frame will have saved the needed context.
//
bool useRootFrameSlot = true;
if (compiler->opts.IsOSR())
{
const PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
useRootFrameSlot = ppInfo->HasKeptAliveThis();
}
const int offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
compiler->isFramePointerUsed(), useRootFrameSlot);
#ifdef DEBUG
if (compiler->opts.IsOSR() && useRootFrameSlot)
{
// Sanity check the offset vs saved patchpoint info.
//
const PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
#if defined(TARGET_AMD64)
// PP info has FP relative offset, to get to caller SP we need to
// subtract off 2 register slots (saved FP, saved RA).
//
const int osrOffset = ppInfo->KeptAliveThisOffset() - 2 * REGSIZE_BYTES;
assert(offset == osrOffset);
#elif defined(TARGET_ARM64)
// PP info has virtual offset. This is also the caller SP offset.
//
const int osrOffset = ppInfo->KeptAliveThisOffset();
assert(offset == osrOffset);
#endif
}
#endif
gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(offset, GENERIC_CONTEXTPARAM_THIS);
}
if (compiler->getNeedsGSSecurityCookie())
{
assert(compiler->lvaGSSecurityCookie != BAD_VAR_NUM);
// The lv offset is FP-relative, and the using code expects caller-sp relative, so translate.
const int offset = compiler->lvaGetCallerSPRelativeOffset(compiler->lvaGSSecurityCookie);
// The code offset ranges assume that the GS Cookie slot is initialized in the prolog, and is valid
// through the remainder of the method. We will not query for the GS Cookie while we're in an epilog,
// so the question of where in the epilog it becomes invalid is moot.
gcInfoEncoderWithLog->SetGSCookieStackSlot(offset, prologSize, methodSize);
}
else if (compiler->lvaReportParamTypeArg() || compiler->lvaKeepAliveAndReportThis())
{
gcInfoEncoderWithLog->SetPrologSize(prologSize);
}
#if defined(FEATURE_EH_FUNCLETS)
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
#ifdef TARGET_AMD64
// The PSPSym is relative to InitialSP on X64 and CallerSP on other platforms.
gcInfoEncoderWithLog->SetPSPSymStackSlot(compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym));
#else // !TARGET_AMD64
gcInfoEncoderWithLog->SetPSPSymStackSlot(compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
#endif // !TARGET_AMD64
}
#ifdef TARGET_AMD64
if (compiler->ehAnyFunclets())
{
// Set this to avoid double-reporting the parent frame (unlike JIT64)
gcInfoEncoderWithLog->SetWantsReportOnlyLeaf();
}
#endif // TARGET_AMD64
#endif // FEATURE_EH_FUNCLETS
#ifdef TARGET_ARMARCH
if (compiler->codeGen->GetHasTailCalls())
{
gcInfoEncoderWithLog->SetHasTailCalls();
}
#endif // TARGET_ARMARCH
#if FEATURE_FIXED_OUT_ARGS
// outgoing stack area size
gcInfoEncoderWithLog->SetSizeOfStackOutgoingAndScratchArea(compiler->lvaOutgoingArgSpaceSize);
#endif // FEATURE_FIXED_OUT_ARGS
#if DISPLAY_SIZES
if (compiler->codeGen->GetInterruptible())
{
genMethodICnt++;
}
else
{
genMethodNCnt++;
}
#endif // DISPLAY_SIZES
}
#if defined(DEBUG) || DUMP_GC_TABLES
#define Encoder GcInfoEncoderWithLogging
#else
#define Encoder GcInfoEncoder
#endif
// Small helper class to handle the No-GC-Interrupt callbacks
// when reporting interruptible ranges.
//
// Encoder should be either GcInfoEncoder or GcInfoEncoderWithLogging
//
struct InterruptibleRangeReporter
{
unsigned prevStart;
Encoder* gcInfoEncoderWithLog;
InterruptibleRangeReporter(unsigned _prevStart, Encoder* _gcInfo)
: prevStart(_prevStart), gcInfoEncoderWithLog(_gcInfo)
{
}
// This callback is called for each insGroup marked with
// IGF_NOGCINTERRUPT (currently just prologs and epilogs).
// Report everything between the previous region and the current
// region as interruptible.
bool operator()(unsigned igFuncIdx, unsigned igOffs, unsigned igSize)
{
if (igOffs < prevStart)
{
// We're still in the main method prolog, which has already
// had it's interruptible range reported.
assert(igFuncIdx == 0);
assert(igOffs + igSize <= prevStart);
return true;
}
assert(igOffs >= prevStart);
if (igOffs > prevStart)
{
gcInfoEncoderWithLog->DefineInterruptibleRange(prevStart, igOffs - prevStart);
}
prevStart = igOffs + igSize;
return true;
}
};
void GCInfo::gcMakeRegPtrTable(
GcInfoEncoder* gcInfoEncoder, unsigned codeSize, unsigned prologSize, MakeRegPtrMode mode, unsigned* callCntRef)
{
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
const bool noTrackedGCSlots =
(compiler->opts.MinOpts() && !compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) &&
!JitConfig.JitMinOptsTrackGCrefs());
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
m_regSlotMap = new (compiler->getAllocator()) RegSlotMap(compiler->getAllocator());
m_stackSlotMap = new (compiler->getAllocator()) StackSlotMap(compiler->getAllocator());
}
/**************************************************************************
*
* Untracked ptr variables
*
**************************************************************************
*/
/* Count&Write untracked locals and non-enregistered args */
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
// Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
// reported through its parent local.
continue;
}
if (varTypeIsGC(varDsc->TypeGet()))
{
// Do we have an argument or local variable?
if (!varDsc->lvIsParam)
{
// If is is pinned, it must be an untracked local.
assert(!varDsc->lvPinned || !varDsc->lvTracked);
if (varDsc->lvTracked || !varDsc->lvOnFrame)
{
continue;
}
}
else
{
// Stack-passed arguments which are not enregistered
// are always reported in this "untracked stack
// pointers" section of the GC info even if lvTracked==true
// Has this argument been fully enregistered?
CLANG_FORMAT_COMMENT_ANCHOR;
if (!varDsc->lvOnFrame)
{
// If a CEE_JMP has been used, then we need to report all the arguments
// even if they are enregistered, since we will be using this value
// in a JMP call. Note that this is subtle as we require that
// argument offsets are always fixed up properly even if lvRegister
// is set.
if (!compiler->compJmpOpUsed)
{
continue;
}
}
else
{
if (varDsc->lvIsRegArg && varDsc->lvTracked)
{
// If this register-passed arg is tracked, then
// it has been allocated space near the other
// pointer variables and we have accurate life-
// time info. It will be reported with
// gcVarPtrList in the "tracked-pointer" section.
continue;
}
}
}
// If we haven't continued to the next variable, we should report this as an untracked local.
CLANG_FORMAT_COMMENT_ANCHOR;
GcSlotFlags flags = GC_SLOT_UNTRACKED;
if (varDsc->TypeGet() == TYP_BYREF)
{
// Or in byref_OFFSET_FLAG for 'byref' pointer tracking
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
if (varDsc->lvPinned)
{
// Or in pinned_OFFSET_FLAG for 'pinned' pointer tracking
flags = (GcSlotFlags)(flags | GC_SLOT_PINNED);
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
if (varDsc->lvFramePointerBased)
{
stackSlotBase = GC_FRAMEREG_REL;
}
if (noTrackedGCSlots)
{
// No need to hash/lookup untracked GC refs; just grab a new Slot Id.
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
gcInfoEncoderWithLog->GetStackSlotId(varDsc->GetStackOffset(), flags, stackSlotBase);
}
}
else
{
StackSlotIdKey sskey(varDsc->GetStackOffset(), (stackSlotBase == GC_FRAMEREG_REL), flags);
GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId =
gcInfoEncoderWithLog->GetStackSlotId(varDsc->GetStackOffset(), flags, stackSlotBase);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
}
}
// If this is a TYP_STRUCT, handle its GC pointers.
// Note that the enregisterable struct types cannot have GC pointers in them.
if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
ClassLayout* layout = varDsc->GetLayout();
unsigned slots = layout->GetSlotCount();
for (unsigned i = 0; i < slots; i++)
{
if (!layout->IsGCPtr(i))
{
continue;
}
int offset = varDsc->GetStackOffset() + i * TARGET_POINTER_SIZE;
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
offset += compiler->codeGen->genTotalFrameSize();
#endif
GcSlotFlags flags = GC_SLOT_UNTRACKED;
if (layout->GetGCPtrType(i) == TYP_BYREF)
{
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
if (varDsc->lvFramePointerBased)
{
stackSlotBase = GC_FRAMEREG_REL;
}
StackSlotIdKey sskey(offset, (stackSlotBase == GC_FRAMEREG_REL), flags);
GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId = gcInfoEncoderWithLog->GetStackSlotId(offset, flags, stackSlotBase);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
}
}
}
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
// Count&Write spill temps that hold pointers.
assert(compiler->codeGen->regSet.tmpAllFree());
for (TempDsc* tempItem = compiler->codeGen->regSet.tmpListBeg(); tempItem != nullptr;
tempItem = compiler->codeGen->regSet.tmpListNxt(tempItem))
{
if (varTypeIsGC(tempItem->tdTempType()))
{
int offset = tempItem->tdTempOffs();
GcSlotFlags flags = GC_SLOT_UNTRACKED;
if (tempItem->tdTempType() == TYP_BYREF)
{
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
if (compiler->isFramePointerUsed())
{
stackSlotBase = GC_FRAMEREG_REL;
}
StackSlotIdKey sskey(offset, (stackSlotBase == GC_FRAMEREG_REL), flags);
GcSlotId varSlotId;
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId = gcInfoEncoderWithLog->GetStackSlotId(offset, flags, stackSlotBase);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
}
if (compiler->lvaKeepAliveAndReportThis())
{
// We need to report the cached copy as an untracked pointer
assert(compiler->info.compThisArg != BAD_VAR_NUM);
assert(!compiler->lvaReportParamTypeArg());
GcSlotFlags flags = GC_SLOT_UNTRACKED;
if (compiler->lvaTable[compiler->info.compThisArg].TypeGet() == TYP_BYREF)
{
// Or in GC_SLOT_INTERIOR for 'byref' pointer tracking
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
GcStackSlotBase stackSlotBase = compiler->isFramePointerUsed() ? GC_FRAMEREG_REL : GC_SP_REL;
gcInfoEncoderWithLog->GetStackSlotId(compiler->lvaCachedGenericContextArgOffset(), flags, stackSlotBase);
}
}
// Generate the table of tracked stack pointer variable lifetimes.
gcMakeVarPtrTable(gcInfoEncoder, mode);
/**************************************************************************
*
* Prepare to generate the pointer register/argument map
*
**************************************************************************
*/
if (compiler->codeGen->GetInterruptible())
{
assert(compiler->IsFullPtrRegMapRequired());
regMaskSmall ptrRegs = 0;
regPtrDsc* regStackArgFirst = nullptr;
// Walk the list of pointer register/argument entries.
for (regPtrDsc* genRegPtrTemp = gcRegPtrList; genRegPtrTemp != nullptr; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (genRegPtrTemp->rpdArg)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_KILL)
{
// Kill all arguments for a call
if ((mode == MAKE_REG_PTR_MODE_DO_WORK) && (regStackArgFirst != nullptr))
{
// Record any outgoing arguments as becoming dead
gcInfoRecordGCStackArgsDead(gcInfoEncoder, genRegPtrTemp->rpdOffs, regStackArgFirst,
genRegPtrTemp);
}
regStackArgFirst = nullptr;
}
else if (genRegPtrTemp->rpdGCtypeGet() != GCT_NONE)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH || (genRegPtrTemp->rpdPtrArg != 0))
{
bool isPop = genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP;
assert(!isPop);
gcInfoRecordGCStackArgLive(gcInfoEncoder, mode, genRegPtrTemp);
if (regStackArgFirst == nullptr)
{
regStackArgFirst = genRegPtrTemp;
}
}
else
{
// We know it's a POP. Sometimes we'll record a POP for a call, just to make sure
// the call site is recorded.
// This is just the negation of the condition:
assert(genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP && genRegPtrTemp->rpdPtrArg == 0);
// This asserts that we only get here when we're recording a call site.
assert(genRegPtrTemp->rpdArg && genRegPtrTemp->rpdIsCallInstr());
// Kill all arguments for a call
if ((mode == MAKE_REG_PTR_MODE_DO_WORK) && (regStackArgFirst != nullptr))
{
// Record any outgoing arguments as becoming dead
gcInfoRecordGCStackArgsDead(gcInfoEncoder, genRegPtrTemp->rpdOffs, regStackArgFirst,
genRegPtrTemp);
}
regStackArgFirst = nullptr;
}
}
}
else
{
// Record any registers that are becoming dead.
regMaskSmall regMask = genRegPtrTemp->rpdCompiler.rpdDel & ptrRegs;
regMaskSmall byRefMask = 0;
if (genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF)
{
byRefMask = regMask;
}
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_DEAD,
byRefMask, &ptrRegs);
// Record any registers that are becoming live.
regMask = genRegPtrTemp->rpdCompiler.rpdAdd & ~ptrRegs;
byRefMask = 0;
// As far as I (DLD, 2010) can tell, there's one GCtype for the entire genRegPtrTemp, so if
// it says byref then all the registers in "regMask" contain byrefs.
if (genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF)
{
byRefMask = regMask;
}
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_LIVE,
byRefMask, &ptrRegs);
}
}
// Now we can declare the entire method body fully interruptible.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
assert(prologSize <= codeSize);
// Now exempt any other region marked as IGF_NOGCINTERRUPT
// Currently just prologs and epilogs.
InterruptibleRangeReporter reporter(prologSize, gcInfoEncoderWithLog);
compiler->GetEmitter()->emitGenNoGCLst(reporter);
prologSize = reporter.prevStart;
// Report any remainder
if (prologSize < codeSize)
{
gcInfoEncoderWithLog->DefineInterruptibleRange(prologSize, codeSize - prologSize);
}
}
}
else if (compiler->isFramePointerUsed()) // GetInterruptible() is false, and we're using EBP as a frame pointer.
{
assert(compiler->IsFullPtrRegMapRequired() == false);
// Walk the list of pointer register/argument entries.
// First count them.
unsigned numCallSites = 0;
// Now we can allocate the information.
unsigned* pCallSites = nullptr;
BYTE* pCallSiteSizes = nullptr;
unsigned callSiteNum = 0;
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
if (gcCallDescList != nullptr)
{
if (noTrackedGCSlots)
{
// We have the call count from the previous run.
numCallSites = *callCntRef;
// If there are no calls, tell the world and bail.
if (numCallSites == 0)
{
gcInfoEncoderWithLog->DefineCallSites(nullptr, nullptr, 0);
return;
}
}
else
{
for (CallDsc* call = gcCallDescList; call != nullptr; call = call->cdNext)
{
numCallSites++;
}
}
pCallSites = new (compiler, CMK_GC) unsigned[numCallSites];
pCallSiteSizes = new (compiler, CMK_GC) BYTE[numCallSites];
}
}
// Now consider every call.
for (CallDsc* call = gcCallDescList; call != nullptr; call = call->cdNext)
{
// Figure out the code offset of this entry.
unsigned nextOffset = call->cdOffs;
// As far as I (DLD, 2010) can determine by asking around, the "call->u1.cdArgMask"
// and "cdArgCnt" cases are to handle x86 situations in which a call expression is nested as an
// argument to an outer call. The "natural" (evaluation-order-preserving) thing to do is to
// evaluate the outer call's arguments, pushing those that are not enregistered, until you
// encounter the nested call. These parts of the call description, then, describe the "pending"
// pushed arguments. This situation does not exist outside of x86, where we're going to use a
// fixed-size stack frame: in situations like this nested call, we would evaluate the pending
// arguments to temporaries, and only "push" them (really, write them to the outgoing argument section
// of the stack frame) when it's the outer call's "turn." So we can assert that these
// situations never occur.
assert(call->u1.cdArgMask == 0 && call->cdArgCnt == 0);
// Other than that, we just have to deal with the regmasks.
regMaskSmall gcrefRegMask = call->cdGCrefRegs & RBM_CALLEE_SAVED;
regMaskSmall byrefRegMask = call->cdByrefRegs & RBM_CALLEE_SAVED;
assert((gcrefRegMask & byrefRegMask) == 0);
regMaskSmall regMask = gcrefRegMask | byrefRegMask;
assert(call->cdOffs >= call->cdCallInstrSize);
// call->cdOffs is actually the offset of the instruction *following* the call, so subtract
// the call instruction size to get the offset of the actual call instruction...
unsigned callOffset = nextOffset - call->cdCallInstrSize;
if (noTrackedGCSlots && regMask == 0)
{
// No live GC refs in regs at the call -> don't record the call.
}
else
{
// Append an entry for the call if doing the real thing.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
pCallSites[callSiteNum] = callOffset;
pCallSiteSizes[callSiteNum] = call->cdCallInstrSize;
}
callSiteNum++;
// Record that these registers are live before the call...
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, callOffset, regMask, GC_SLOT_LIVE, byrefRegMask,
nullptr);
// ...and dead after.
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, nextOffset, regMask, GC_SLOT_DEAD, byrefRegMask,
nullptr);
}
}
// Make sure we've recorded the expected number of calls
assert(mode != MAKE_REG_PTR_MODE_DO_WORK || numCallSites == callSiteNum);
// Return the actual recorded call count to the caller
*callCntRef = callSiteNum;
// OK, define the call sites.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
gcInfoEncoderWithLog->DefineCallSites(pCallSites, pCallSiteSizes, numCallSites);
}
}
else // GetInterruptible() is false and we have an EBP-less frame
{
assert(compiler->IsFullPtrRegMapRequired());
// Walk the list of pointer register/argument entries */
// First count them.
unsigned numCallSites = 0;
// Now we can allocate the information (if we're in the "DO_WORK" pass...)
unsigned* pCallSites = nullptr;
BYTE* pCallSiteSizes = nullptr;
unsigned callSiteNum = 0;
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
for (regPtrDsc* genRegPtrTemp = gcRegPtrList; genRegPtrTemp != nullptr;
genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (genRegPtrTemp->rpdArg && genRegPtrTemp->rpdIsCallInstr())
{
numCallSites++;
}
}
if (numCallSites > 0)
{
pCallSites = new (compiler, CMK_GC) unsigned[numCallSites];
pCallSiteSizes = new (compiler, CMK_GC) BYTE[numCallSites];
}
}
for (regPtrDsc* genRegPtrTemp = gcRegPtrList; genRegPtrTemp != nullptr; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (genRegPtrTemp->rpdArg)
{
// Is this a call site?
if (genRegPtrTemp->rpdIsCallInstr())
{
// This is a true call site.
regMaskSmall gcrefRegMask = genRegMaskFromCalleeSavedMask(genRegPtrTemp->rpdCallGCrefRegs);
regMaskSmall byrefRegMask = genRegMaskFromCalleeSavedMask(genRegPtrTemp->rpdCallByrefRegs);
assert((gcrefRegMask & byrefRegMask) == 0);
regMaskSmall regMask = gcrefRegMask | byrefRegMask;
// The "rpdOffs" is (apparently) the offset of the following instruction already.
// GcInfoEncoder wants the call instruction, so subtract the width of the call instruction.
assert(genRegPtrTemp->rpdOffs >= genRegPtrTemp->rpdCallInstrSize);
unsigned callOffset = genRegPtrTemp->rpdOffs - genRegPtrTemp->rpdCallInstrSize;
// Tell the GCInfo encoder about these registers. We say that the registers become live
// before the call instruction, and dead after.
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, callOffset, regMask, GC_SLOT_LIVE, byrefRegMask,
nullptr);
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_DEAD,
byrefRegMask, nullptr);
// Also remember the call site.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
assert(pCallSites != nullptr && pCallSiteSizes != nullptr);
pCallSites[callSiteNum] = callOffset;
pCallSiteSizes[callSiteNum] = genRegPtrTemp->rpdCallInstrSize;
callSiteNum++;
}
}
else
{
// These are reporting outgoing stack arguments, but we don't need to report anything
// for partially interruptible
assert(genRegPtrTemp->rpdGCtypeGet() != GCT_NONE);
assert(genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH);
}
}
}
// The routine is fully interruptible.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
gcInfoEncoderWithLog->DefineCallSites(pCallSites, pCallSiteSizes, numCallSites);
}
}
}
void GCInfo::gcInfoRecordGCRegStateChange(GcInfoEncoder* gcInfoEncoder,
MakeRegPtrMode mode,
unsigned instrOffset,
regMaskSmall regMask,
GcSlotState newState,
regMaskSmall byRefMask,
regMaskSmall* pPtrRegs)
{
// Precondition: byRefMask is a subset of regMask.
assert((byRefMask & ~regMask) == 0);
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
while (regMask)
{
// Get hold of the next register bit.
regMaskTP tmpMask = genFindLowestReg(regMask);
assert(tmpMask);
// Remember the new state of this register.
if (pPtrRegs != nullptr)
{
if (newState == GC_SLOT_DEAD)
{
*pPtrRegs &= ~tmpMask;
}
else
{
*pPtrRegs |= tmpMask;
}
}
// Figure out which register the next bit corresponds to.
regNumber regNum = genRegNumFromMask(tmpMask);
/* Reserve SP future use */
assert(regNum != REG_SPBASE);
GcSlotFlags regFlags = GC_SLOT_BASE;
if ((tmpMask & byRefMask) != 0)
{
regFlags = (GcSlotFlags)(regFlags | GC_SLOT_INTERIOR);
}
RegSlotIdKey rskey(regNum, regFlags);
GcSlotId regSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_regSlotMap->Lookup(rskey, ®SlotId))
{
regSlotId = gcInfoEncoderWithLog->GetRegisterSlotId(regNum, regFlags);
m_regSlotMap->Set(rskey, regSlotId);
}
}
else
{
bool b = m_regSlotMap->Lookup(rskey, ®SlotId);
assert(b); // Should have been added in the first pass.
gcInfoEncoderWithLog->SetSlotState(instrOffset, regSlotId, newState);
}
// Turn the bit we've just generated off and continue.
regMask -= tmpMask; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
}
}
/**************************************************************************
*
* gcMakeVarPtrTable - Generate the table of tracked stack pointer
* variable lifetimes.
*
* In the first pass we'll allocate slot Ids
* In the second pass we actually generate the lifetimes.
*
**************************************************************************
*/
void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode)
{
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
// Make sure any flags we hide in the offset are in the bits guaranteed
// unused by alignment
C_ASSERT((OFFSET_MASK + 1) <= sizeof(int));
#ifdef DEBUG
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
// Tracked variables can't be pinned, and the encoding takes
// advantage of that by using the same bit for 'pinned' and 'this'
// Since we don't track 'this', we should never see either flag here.
// Check it now before we potentially add some pinned flags.
for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext)
{
const unsigned flags = varTmp->vpdVarNum & OFFSET_MASK;
assert((flags & pinned_OFFSET_FLAG) == 0);
assert((flags & this_OFFSET_FLAG) == 0);
}
}
#endif // DEBUG
// Only need to do this once, and only if we have EH.
if ((mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) && compiler->ehAnyFunclets())
{
gcMarkFilterVarsPinned();
}
for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext)
{
C_ASSERT((OFFSET_MASK + 1) <= sizeof(int));
// Get hold of the variable's stack offset.
unsigned lowBits = varTmp->vpdVarNum & OFFSET_MASK;
// For negative stack offsets we must reset the low bits
int varOffs = static_cast<int>(varTmp->vpdVarNum & ~OFFSET_MASK);
// Compute the actual lifetime offsets.
unsigned begOffs = varTmp->vpdBegOfs;
unsigned endOffs = varTmp->vpdEndOfs;
// Special case: skip any 0-length lifetimes.
if (endOffs == begOffs)
{
continue;
}
GcSlotFlags flags = GC_SLOT_BASE;
if ((lowBits & byref_OFFSET_FLAG) != 0)
{
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
if ((lowBits & pinned_OFFSET_FLAG) != 0)
{
flags = (GcSlotFlags)(flags | GC_SLOT_PINNED);
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
if (compiler->isFramePointerUsed())
{
stackSlotBase = GC_FRAMEREG_REL;
}
StackSlotIdKey sskey(varOffs, (stackSlotBase == GC_FRAMEREG_REL), flags);
GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId = gcInfoEncoderWithLog->GetStackSlotId(varOffs, flags, stackSlotBase);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
else
{
bool b = m_stackSlotMap->Lookup(sskey, &varSlotId);
assert(b); // Should have been added in the first pass.
// Live from the beginning to the end.
gcInfoEncoderWithLog->SetSlotState(begOffs, varSlotId, GC_SLOT_LIVE);
gcInfoEncoderWithLog->SetSlotState(endOffs, varSlotId, GC_SLOT_DEAD);
}
}
}
void GCInfo::gcInfoRecordGCStackArgLive(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode, regPtrDsc* genStackPtr)
{
// On non-x86 platforms, don't have pointer argument push/pop/kill declarations.
// But we use the same mechanism to record writes into the outgoing argument space...
assert(genStackPtr->rpdGCtypeGet() != GCT_NONE);
assert(genStackPtr->rpdArg);
assert(genStackPtr->rpdArgTypeGet() == rpdARG_PUSH);
// We only need to report these when we're doing fuly-interruptible
assert(compiler->codeGen->GetInterruptible());
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
StackSlotIdKey sskey(genStackPtr->rpdPtrArg, false,
GcSlotFlags(genStackPtr->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE));
GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId = gcInfoEncoderWithLog->GetStackSlotId(sskey.m_offset, (GcSlotFlags)sskey.m_flags, GC_SP_REL);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
else
{
bool b = m_stackSlotMap->Lookup(sskey, &varSlotId);
assert(b); // Should have been added in the first pass.
// Live until the call.
gcInfoEncoderWithLog->SetSlotState(genStackPtr->rpdOffs, varSlotId, GC_SLOT_LIVE);
}
}
void GCInfo::gcInfoRecordGCStackArgsDead(GcInfoEncoder* gcInfoEncoder,
unsigned instrOffset,
regPtrDsc* genStackPtrFirst,
regPtrDsc* genStackPtrLast)
{
// After a call all of the outgoing arguments are marked as dead.
// The calling loop keeps track of the first argument pushed for this call
// and passes it in as genStackPtrFirst.
// genStackPtrLast is the call.
// Re-walk that list and mark all outgoing arguments that we're marked as live
// earlier, as going dead after the call.
// We only need to report these when we're doing fuly-interruptible
assert(compiler->codeGen->GetInterruptible());
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
for (regPtrDsc* genRegPtrTemp = genStackPtrFirst; genRegPtrTemp != genStackPtrLast;
genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (!genRegPtrTemp->rpdArg)
{
continue;
}
assert(genRegPtrTemp->rpdGCtypeGet() != GCT_NONE);
assert(genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH);
StackSlotIdKey sskey(genRegPtrTemp->rpdPtrArg, false,
genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE);
GcSlotId varSlotId;
bool b = m_stackSlotMap->Lookup(sskey, &varSlotId);
assert(b); // Should have been added in the first pass.
// Live until the call.
gcInfoEncoderWithLog->SetSlotState(instrOffset, varSlotId, GC_SLOT_DEAD);
}
}
#undef GCENCODER_WITH_LOGGING
#endif // !JIT32_GCENCODER
/*****************************************************************************/
/*****************************************************************************/
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GCEncode XX
XX XX
XX Logic to encode the JIT method header and GC pointer tables XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#pragma warning(disable : 4244) // loss of data int -> char ..
#endif
#include "gcinfotypes.h"
#include "patchpointinfo.h"
ReturnKind GCTypeToReturnKind(CorInfoGCType gcType)
{
switch (gcType)
{
case TYPE_GC_NONE:
return RT_Scalar;
case TYPE_GC_REF:
return RT_Object;
case TYPE_GC_BYREF:
return RT_ByRef;
default:
_ASSERTE(!"TYP_GC_OTHER is unexpected");
return RT_Illegal;
}
}
ReturnKind GCInfo::getReturnKind()
{
switch (compiler->info.compRetType)
{
case TYP_REF:
return RT_Object;
case TYP_BYREF:
return RT_ByRef;
case TYP_STRUCT:
{
CORINFO_CLASS_HANDLE structType = compiler->info.compMethodInfo->args.retTypeClass;
var_types retType = compiler->getReturnTypeForStruct(structType, compiler->info.compCallConv);
switch (retType)
{
case TYP_REF:
return RT_Object;
case TYP_BYREF:
return RT_ByRef;
case TYP_STRUCT:
if (compiler->IsHfa(structType))
{
#ifdef TARGET_X86
_ASSERTE(false && "HFAs not expected for X86");
#endif // TARGET_X86
return RT_Scalar;
}
else
{
// Multi-reg return
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
compiler->info.compCompHnd->getClassGClayout(structType, gcPtrs);
ReturnKind first = GCTypeToReturnKind((CorInfoGCType)gcPtrs[0]);
ReturnKind second = GCTypeToReturnKind((CorInfoGCType)gcPtrs[1]);
return GetStructReturnKind(first, second);
}
#ifdef TARGET_X86
case TYP_FLOAT:
case TYP_DOUBLE:
return RT_Float;
#endif // TARGET_X86
default:
return RT_Scalar;
}
}
#ifdef TARGET_X86
case TYP_FLOAT:
case TYP_DOUBLE:
return RT_Float;
#endif // TARGET_X86
default:
return RT_Scalar;
}
}
#if !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS)
// gcMarkFilterVarsPinned - Walk all lifetimes and make it so that anything
// live in a filter is marked as pinned (often by splitting the lifetime
// so that *only* the filter region is pinned). This should only be
// called once (after generating all lifetimes, but before slot ids are
// finalized.
//
// DevDiv 376329 - The VM has to double report filters and their parent frame
// because they occur during the 1st pass and the parent frame doesn't go dead
// until we start unwinding in the 2nd pass.
//
// Untracked locals will only be reported in non-filter funclets and the
// parent.
// Registers can't be double reported by 2 frames since they're different.
// That just leaves stack variables which might be double reported.
//
// Technically double reporting is only a problem when the GC has to relocate a
// reference. So we avoid that problem by marking all live tracked stack
// variables as pinned inside the filter. Thus if they are double reported, it
// won't be a problem since they won't be double relocated.
//
void GCInfo::gcMarkFilterVarsPinned()
{
assert(compiler->ehAnyFunclets());
for (EHblkDsc* const HBtab : EHClauses(compiler))
{
if (HBtab->HasFilter())
{
const UNATIVE_OFFSET filterBeg = compiler->ehCodeOffset(HBtab->ebdFilter);
const UNATIVE_OFFSET filterEnd = compiler->ehCodeOffset(HBtab->ebdHndBeg);
for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext)
{
// Get hold of the variable's flags.
const unsigned lowBits = varTmp->vpdVarNum & OFFSET_MASK;
// Compute the actual lifetime offsets.
const unsigned begOffs = varTmp->vpdBegOfs;
const unsigned endOffs = varTmp->vpdEndOfs;
// Special case: skip any 0-length lifetimes.
if (endOffs == begOffs)
{
continue;
}
// Skip lifetimes with no overlap with the filter
if ((endOffs <= filterBeg) || (begOffs >= filterEnd))
{
continue;
}
#ifndef JIT32_GCENCODER
// Because there is no nesting within filters, nothing
// should be already pinned.
// For JIT32_GCENCODER, we should not do this check as gcVarPtrList are always sorted by vpdBegOfs
// which means that we could see some varPtrDsc that were already pinned by previous splitting.
assert((lowBits & pinned_OFFSET_FLAG) == 0);
#endif // JIT32_GCENCODER
if (begOffs < filterBeg)
{
if (endOffs > filterEnd)
{
// The variable lifetime is starts before AND ends after
// the filter, so we need to create 2 new lifetimes:
// (1) a pinned one for the filter
// (2) a regular one for after the filter
// and then adjust the original lifetime to end before
// the filter.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Splitting lifetime for filter: [%04X, %04X).\nOld: ", filterBeg, filterEnd);
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
varPtrDsc* desc1 = new (compiler, CMK_GC) varPtrDsc;
desc1->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
desc1->vpdBegOfs = filterBeg;
desc1->vpdEndOfs = filterEnd;
varPtrDsc* desc2 = new (compiler, CMK_GC) varPtrDsc;
desc2->vpdVarNum = varTmp->vpdVarNum;
desc2->vpdBegOfs = filterEnd;
desc2->vpdEndOfs = endOffs;
varTmp->vpdEndOfs = filterBeg;
gcInsertVarPtrDscSplit(desc1, varTmp);
gcInsertVarPtrDscSplit(desc2, varTmp);
#ifdef DEBUG
if (compiler->verbose)
{
printf("New (1 of 3): ");
gcDumpVarPtrDsc(varTmp);
printf("New (2 of 3): ");
gcDumpVarPtrDsc(desc1);
printf("New (3 of 3): ");
gcDumpVarPtrDsc(desc2);
}
#endif // DEBUG
}
else
{
// The variable lifetime started before the filter and ends
// somewhere inside it, so we only create 1 new lifetime,
// and then adjust the original lifetime to end before
// the filter.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Splitting lifetime for filter.\nOld: ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
varPtrDsc* desc = new (compiler, CMK_GC) varPtrDsc;
desc->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
desc->vpdBegOfs = filterBeg;
desc->vpdEndOfs = endOffs;
varTmp->vpdEndOfs = filterBeg;
gcInsertVarPtrDscSplit(desc, varTmp);
#ifdef DEBUG
if (compiler->verbose)
{
printf("New (1 of 2): ");
gcDumpVarPtrDsc(varTmp);
printf("New (2 of 2): ");
gcDumpVarPtrDsc(desc);
}
#endif // DEBUG
}
}
else
{
if (endOffs > filterEnd)
{
// The variable lifetime starts inside the filter and
// ends somewhere after it, so we create 1 new
// lifetime for the part inside the filter and adjust
// the start of the original lifetime to be the end
// of the filter
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Splitting lifetime for filter.\nOld: ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
varPtrDsc* desc = new (compiler, CMK_GC) varPtrDsc;
#ifndef JIT32_GCENCODER
desc->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
desc->vpdBegOfs = begOffs;
desc->vpdEndOfs = filterEnd;
varTmp->vpdBegOfs = filterEnd;
#else
// Mark varTmp as pinned and generated use varPtrDsc(desc) as non-pinned
// since gcInsertVarPtrDscSplit requires that varTmp->vpdBegOfs must precede desc->vpdBegOfs
desc->vpdVarNum = varTmp->vpdVarNum;
desc->vpdBegOfs = filterEnd;
desc->vpdEndOfs = endOffs;
varTmp->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
varTmp->vpdEndOfs = filterEnd;
#endif
gcInsertVarPtrDscSplit(desc, varTmp);
#ifdef DEBUG
if (compiler->verbose)
{
printf("New (1 of 2): ");
gcDumpVarPtrDsc(desc);
printf("New (2 of 2): ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
}
else
{
// The variable lifetime is completely within the filter,
// so just add the pinned flag.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Pinning lifetime for filter.\nOld: ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
varTmp->vpdVarNum |= pinned_OFFSET_FLAG;
#ifdef DEBUG
if (compiler->verbose)
{
printf("New : ");
gcDumpVarPtrDsc(varTmp);
}
#endif // DEBUG
}
}
}
} // HasFilter
} // Foreach EH
}
// gcInsertVarPtrDscSplit - Insert varPtrDsc that were created by splitting lifetimes
// From gcMarkFilterVarsPinned, we may have created one or two `varPtrDsc`s due to splitting lifetimes
// and these newly created `varPtrDsc`s should be inserted in gcVarPtrList.
// However the semantics of this call depend on the architecture.
//
// x86-GCInfo requires gcVarPtrList to be sorted by vpdBegOfs.
// Every time inserting an entry we should keep the order of entries.
// So this function searches for a proper insertion point from "begin" then "desc" gets inserted.
//
// For other architectures(ones that uses GCInfo{En|De}coder), we don't need any sort.
// So the argument "begin" is unused and "desc" will be inserted at the front of the list.
void GCInfo::gcInsertVarPtrDscSplit(varPtrDsc* desc, varPtrDsc* begin)
{
#ifndef JIT32_GCENCODER
(void)begin;
desc->vpdNext = gcVarPtrList;
gcVarPtrList = desc;
#else // JIT32_GCENCODER
// "desc" and "begin" must not be null
assert(desc != nullptr);
assert(begin != nullptr);
// The caller must guarantee that desc's BegOfs is equal or greater than begin's
// since we will search for insertion point from "begin"
assert(desc->vpdBegOfs >= begin->vpdBegOfs);
varPtrDsc* varTmp = begin->vpdNext;
varPtrDsc* varInsert = begin;
while (varTmp != nullptr && varTmp->vpdBegOfs < desc->vpdBegOfs)
{
varInsert = varTmp;
varTmp = varTmp->vpdNext;
}
// Insert point cannot be null
assert(varInsert != nullptr);
desc->vpdNext = varInsert->vpdNext;
varInsert->vpdNext = desc;
#endif // JIT32_GCENCODER
}
#ifdef DEBUG
void GCInfo::gcDumpVarPtrDsc(varPtrDsc* desc)
{
const int offs = (desc->vpdVarNum & ~OFFSET_MASK);
const GCtype gcType = (desc->vpdVarNum & byref_OFFSET_FLAG) ? GCT_BYREF : GCT_GCREF;
const bool isPin = (desc->vpdVarNum & pinned_OFFSET_FLAG) != 0;
printf("[%08X] %s%s var at [%s", dspPtr(desc), GCtypeStr(gcType), isPin ? "pinned-ptr" : "",
compiler->isFramePointerUsed() ? STR_FPBASE : STR_SPBASE);
if (offs < 0)
{
printf("-%02XH", -offs);
}
else if (offs > 0)
{
printf("+%02XH", +offs);
}
printf("] live from %04X to %04X\n", desc->vpdBegOfs, desc->vpdEndOfs);
}
#endif // DEBUG
#endif // !defined(JIT32_GCENCODER) || defined(FEATURE_EH_FUNCLETS)
#ifdef JIT32_GCENCODER
#include "emit.h"
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
// (see jit.h) #define REGEN_SHORTCUTS 0
// To Regenerate the compressed info header shortcuts, define REGEN_SHORTCUTS
// and use the following command line pipe/filter to give you the 128
// most useful encodings.
//
// find . -name regen.txt | xargs cat | grep InfoHdr | sort | uniq -c | sort -r | head -128
// (see jit.h) #define REGEN_CALLPAT 0
// To Regenerate the compressed info header shortcuts, define REGEN_CALLPAT
// and use the following command line pipe/filter to give you the 80
// most useful encodings.
//
// find . -name regen.txt | xargs cat | grep CallSite | sort | uniq -c | sort -r | head -80
#if REGEN_SHORTCUTS || REGEN_CALLPAT
static FILE* logFile = NULL;
CRITICAL_SECTION logFileLock;
#endif
#if REGEN_CALLPAT
static void regenLog(unsigned codeDelta,
unsigned argMask,
unsigned regMask,
unsigned argCnt,
unsigned byrefArgMask,
unsigned byrefRegMask,
BYTE* base,
unsigned enSize)
{
CallPattern pat;
pat.fld.argCnt = (argCnt < 0xff) ? argCnt : 0xff;
pat.fld.regMask = (regMask < 0xff) ? regMask : 0xff;
pat.fld.argMask = (argMask < 0xff) ? argMask : 0xff;
pat.fld.codeDelta = (codeDelta < 0xff) ? codeDelta : 0xff;
if (logFile == NULL)
{
logFile = fopen("regen.txt", "a");
InitializeCriticalSection(&logFileLock);
}
assert(((enSize > 0) && (enSize < 256)) && ((pat.val & 0xffffff) != 0xffffff));
EnterCriticalSection(&logFileLock);
fprintf(logFile, "CallSite( 0x%08x, 0x%02x%02x, 0x", pat.val, byrefArgMask, byrefRegMask);
while (enSize > 0)
{
fprintf(logFile, "%02x", *base++);
enSize--;
}
fprintf(logFile, "),\n");
fflush(logFile);
LeaveCriticalSection(&logFileLock);
}
#endif
#if REGEN_SHORTCUTS
static void regenLog(unsigned encoding, InfoHdr* header, InfoHdr* state)
{
if (logFile == NULL)
{
logFile = fopen("regen.txt", "a");
InitializeCriticalSection(&logFileLock);
}
EnterCriticalSection(&logFileLock);
fprintf(logFile, "InfoHdr( %2d, %2d, %1d, %1d, %1d,"
" %1d, %1d, %1d, %1d, %1d,"
" %1d, %1d, %1d, %1d, %1d, %1d,"
" %1d, %1d, %1d,"
" %1d, %2d, %2d,"
" %2d, %2d, %2d, %2d, %2d, %2d), \n",
state->prologSize, state->epilogSize, state->epilogCount, state->epilogAtEnd, state->ediSaved,
state->esiSaved, state->ebxSaved, state->ebpSaved, state->ebpFrame, state->interruptible,
state->doubleAlign, state->security, state->handlers, state->localloc, state->editNcontinue, state->varargs,
state->profCallbacks, state->genericsContext, state->genericsContextIsMethodDesc, state->returnKind,
state->argCount, state->frameSize,
(state->untrackedCnt <= SET_UNTRACKED_MAX) ? state->untrackedCnt : HAS_UNTRACKED,
(state->varPtrTableSize == 0) ? 0 : HAS_VARPTR,
(state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET) ? 0 : HAS_GS_COOKIE_OFFSET,
(state->syncStartOffset == INVALID_SYNC_OFFSET) ? 0 : HAS_SYNC_OFFSET,
(state->syncStartOffset == INVALID_SYNC_OFFSET) ? 0 : HAS_SYNC_OFFSET,
(state->revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET) ? 0 : HAS_REV_PINVOKE_FRAME_OFFSET);
fflush(logFile);
LeaveCriticalSection(&logFileLock);
}
#endif
/*****************************************************************************
*
* Given the four parameters return the index into the callPatternTable[]
* that is used to encoding these four items. If an exact match cannot
* found then ignore the codeDelta and search the table again for a near
* match.
* Returns 0..79 for an exact match or
* (delta<<8) | (0..79) for a near match.
* A near match will be encoded using two bytes, the first byte will
* skip the adjustment delta that prevented an exact match and the
* rest of the delta plus the other three items are encoded in the
* second byte.
*/
int FASTCALL lookupCallPattern(unsigned argCnt, unsigned regMask, unsigned argMask, unsigned codeDelta)
{
if ((argCnt <= CP_MAX_ARG_CNT) && (argMask <= CP_MAX_ARG_MASK))
{
CallPattern pat;
pat.fld.argCnt = argCnt;
pat.fld.regMask = regMask; // EBP,EBX,ESI,EDI
pat.fld.argMask = argMask;
pat.fld.codeDelta = codeDelta;
bool codeDeltaOK = (pat.fld.codeDelta == codeDelta);
unsigned bestDelta2 = 0xff;
unsigned bestPattern = 0xff;
unsigned patval = pat.val;
assert(sizeof(CallPattern) == sizeof(unsigned));
const unsigned* curp = &callPatternTable[0];
for (unsigned inx = 0; inx < 80; inx++, curp++)
{
unsigned curval = *curp;
if ((patval == curval) && codeDeltaOK)
return inx;
if (((patval ^ curval) & 0xffffff) == 0)
{
unsigned delta2 = codeDelta - (curval >> 24);
if (delta2 < bestDelta2)
{
bestDelta2 = delta2;
bestPattern = inx;
}
}
}
if (bestPattern != 0xff)
{
return (bestDelta2 << 8) | bestPattern;
}
}
return -1;
}
static bool initNeeded3(unsigned cur, unsigned tgt, unsigned max, unsigned* hint)
{
assert(cur != tgt);
unsigned tmp = tgt;
unsigned nib = 0;
unsigned cnt = 0;
while (tmp > max)
{
nib = tmp & 0x07;
tmp >>= 3;
if (tmp == cur)
{
*hint = nib;
return false;
}
cnt++;
}
*hint = tmp;
return true;
}
static bool initNeeded4(unsigned cur, unsigned tgt, unsigned max, unsigned* hint)
{
assert(cur != tgt);
unsigned tmp = tgt;
unsigned nib = 0;
unsigned cnt = 0;
while (tmp > max)
{
nib = tmp & 0x0f;
tmp >>= 4;
if (tmp == cur)
{
*hint = nib;
return false;
}
cnt++;
}
*hint = tmp;
return true;
}
static int bigEncoding3(unsigned cur, unsigned tgt, unsigned max)
{
assert(cur != tgt);
unsigned tmp = tgt;
unsigned nib = 0;
unsigned cnt = 0;
while (tmp > max)
{
nib = tmp & 0x07;
tmp >>= 3;
if (tmp == cur)
break;
cnt++;
}
return cnt;
}
static int bigEncoding4(unsigned cur, unsigned tgt, unsigned max)
{
assert(cur != tgt);
unsigned tmp = tgt;
unsigned nib = 0;
unsigned cnt = 0;
while (tmp > max)
{
nib = tmp & 0x0f;
tmp >>= 4;
if (tmp == cur)
break;
cnt++;
}
return cnt;
}
BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state, BYTE& codeSet)
{
BYTE encoding = 0xff;
codeSet = 1; // codeSet is 1 or 2, depending on whether the returned encoding
// corresponds to InfoHdrAdjust, or InfoHdrAdjust2 enumerations.
if (state->argCount != header.argCount)
{
// We have one-byte encodings for 0..8
if (header.argCount <= SET_ARGCOUNT_MAX)
{
state->argCount = header.argCount;
encoding = SET_ARGCOUNT + header.argCount;
goto DO_RETURN;
}
else
{
unsigned hint;
if (initNeeded4(state->argCount, header.argCount, SET_ARGCOUNT_MAX, &hint))
{
assert(hint <= SET_ARGCOUNT_MAX);
state->argCount = hint;
encoding = SET_ARGCOUNT + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0xf);
state->argCount <<= 4;
state->argCount += hint;
encoding = NEXT_FOUR_ARGCOUNT + hint;
goto DO_RETURN;
}
}
}
if (state->frameSize != header.frameSize)
{
// We have one-byte encodings for 0..7
if (header.frameSize <= SET_FRAMESIZE_MAX)
{
state->frameSize = header.frameSize;
encoding = SET_FRAMESIZE + header.frameSize;
goto DO_RETURN;
}
else
{
unsigned hint;
if (initNeeded4(state->frameSize, header.frameSize, SET_FRAMESIZE_MAX, &hint))
{
assert(hint <= SET_FRAMESIZE_MAX);
state->frameSize = hint;
encoding = SET_FRAMESIZE + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0xf);
state->frameSize <<= 4;
state->frameSize += hint;
encoding = NEXT_FOUR_FRAMESIZE + hint;
goto DO_RETURN;
}
}
}
if ((state->epilogCount != header.epilogCount) || (state->epilogAtEnd != header.epilogAtEnd))
{
if (header.epilogCount > SET_EPILOGCNT_MAX)
IMPL_LIMITATION("More than SET_EPILOGCNT_MAX epilogs");
state->epilogCount = header.epilogCount;
state->epilogAtEnd = header.epilogAtEnd;
encoding = SET_EPILOGCNT + header.epilogCount * 2;
if (header.epilogAtEnd)
encoding++;
goto DO_RETURN;
}
if (state->varPtrTableSize != header.varPtrTableSize)
{
assert(state->varPtrTableSize == 0 || state->varPtrTableSize == HAS_VARPTR);
if (state->varPtrTableSize == 0)
{
state->varPtrTableSize = HAS_VARPTR;
encoding = FLIP_VAR_PTR_TABLE_SZ;
goto DO_RETURN;
}
else if (header.varPtrTableSize == 0)
{
state->varPtrTableSize = 0;
encoding = FLIP_VAR_PTR_TABLE_SZ;
goto DO_RETURN;
}
}
if (state->untrackedCnt != header.untrackedCnt)
{
assert(state->untrackedCnt <= SET_UNTRACKED_MAX || state->untrackedCnt == HAS_UNTRACKED);
// We have one-byte encodings for 0..3
if (header.untrackedCnt <= SET_UNTRACKED_MAX)
{
state->untrackedCnt = header.untrackedCnt;
encoding = SET_UNTRACKED + header.untrackedCnt;
goto DO_RETURN;
}
else if (state->untrackedCnt != HAS_UNTRACKED)
{
state->untrackedCnt = HAS_UNTRACKED;
encoding = FFFF_UNTRACKED_CNT;
goto DO_RETURN;
}
}
if (state->epilogSize != header.epilogSize)
{
// We have one-byte encodings for 0..10
if (header.epilogSize <= SET_EPILOGSIZE_MAX)
{
state->epilogSize = header.epilogSize;
encoding = SET_EPILOGSIZE + header.epilogSize;
goto DO_RETURN;
}
else
{
unsigned hint;
if (initNeeded3(state->epilogSize, header.epilogSize, SET_EPILOGSIZE_MAX, &hint))
{
assert(hint <= SET_EPILOGSIZE_MAX);
state->epilogSize = hint;
encoding = SET_EPILOGSIZE + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0x7);
state->epilogSize <<= 3;
state->epilogSize += hint;
encoding = NEXT_THREE_EPILOGSIZE + hint;
goto DO_RETURN;
}
}
}
if (state->prologSize != header.prologSize)
{
// We have one-byte encodings for 0..16
if (header.prologSize <= SET_PROLOGSIZE_MAX)
{
state->prologSize = header.prologSize;
encoding = SET_PROLOGSIZE + header.prologSize;
goto DO_RETURN;
}
else
{
unsigned hint;
assert(SET_PROLOGSIZE_MAX > 15);
if (initNeeded3(state->prologSize, header.prologSize, 15, &hint))
{
assert(hint <= 15);
state->prologSize = hint;
encoding = SET_PROLOGSIZE + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0x7);
state->prologSize <<= 3;
state->prologSize += hint;
encoding = NEXT_THREE_PROLOGSIZE + hint;
goto DO_RETURN;
}
}
}
if (state->ediSaved != header.ediSaved)
{
state->ediSaved = header.ediSaved;
encoding = FLIP_EDI_SAVED;
goto DO_RETURN;
}
if (state->esiSaved != header.esiSaved)
{
state->esiSaved = header.esiSaved;
encoding = FLIP_ESI_SAVED;
goto DO_RETURN;
}
if (state->ebxSaved != header.ebxSaved)
{
state->ebxSaved = header.ebxSaved;
encoding = FLIP_EBX_SAVED;
goto DO_RETURN;
}
if (state->ebpSaved != header.ebpSaved)
{
state->ebpSaved = header.ebpSaved;
encoding = FLIP_EBP_SAVED;
goto DO_RETURN;
}
if (state->ebpFrame != header.ebpFrame)
{
state->ebpFrame = header.ebpFrame;
encoding = FLIP_EBP_FRAME;
goto DO_RETURN;
}
if (state->interruptible != header.interruptible)
{
state->interruptible = header.interruptible;
encoding = FLIP_INTERRUPTIBLE;
goto DO_RETURN;
}
#if DOUBLE_ALIGN
if (state->doubleAlign != header.doubleAlign)
{
state->doubleAlign = header.doubleAlign;
encoding = FLIP_DOUBLE_ALIGN;
goto DO_RETURN;
}
#endif
if (state->security != header.security)
{
state->security = header.security;
encoding = FLIP_SECURITY;
goto DO_RETURN;
}
if (state->handlers != header.handlers)
{
state->handlers = header.handlers;
encoding = FLIP_HANDLERS;
goto DO_RETURN;
}
if (state->localloc != header.localloc)
{
state->localloc = header.localloc;
encoding = FLIP_LOCALLOC;
goto DO_RETURN;
}
if (state->editNcontinue != header.editNcontinue)
{
state->editNcontinue = header.editNcontinue;
encoding = FLIP_EDITnCONTINUE;
goto DO_RETURN;
}
if (state->varargs != header.varargs)
{
state->varargs = header.varargs;
encoding = FLIP_VARARGS;
goto DO_RETURN;
}
if (state->profCallbacks != header.profCallbacks)
{
state->profCallbacks = header.profCallbacks;
encoding = FLIP_PROF_CALLBACKS;
goto DO_RETURN;
}
if (state->genericsContext != header.genericsContext)
{
state->genericsContext = header.genericsContext;
encoding = FLIP_HAS_GENERICS_CONTEXT;
goto DO_RETURN;
}
if (state->genericsContextIsMethodDesc != header.genericsContextIsMethodDesc)
{
state->genericsContextIsMethodDesc = header.genericsContextIsMethodDesc;
encoding = FLIP_GENERICS_CONTEXT_IS_METHODDESC;
goto DO_RETURN;
}
if (state->returnKind != header.returnKind)
{
state->returnKind = header.returnKind;
codeSet = 2; // Two byte encoding
encoding = header.returnKind;
_ASSERTE(encoding < SET_RET_KIND_MAX);
goto DO_RETURN;
}
if (state->gsCookieOffset != header.gsCookieOffset)
{
assert(state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET || state->gsCookieOffset == HAS_GS_COOKIE_OFFSET);
if (state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET)
{
// header.gsCookieOffset is non-zero. We can set it
// to zero using FLIP_HAS_GS_COOKIE
state->gsCookieOffset = HAS_GS_COOKIE_OFFSET;
encoding = FLIP_HAS_GS_COOKIE;
goto DO_RETURN;
}
else if (header.gsCookieOffset == INVALID_GS_COOKIE_OFFSET)
{
state->gsCookieOffset = INVALID_GS_COOKIE_OFFSET;
encoding = FLIP_HAS_GS_COOKIE;
goto DO_RETURN;
}
}
if (state->syncStartOffset != header.syncStartOffset)
{
assert(state->syncStartOffset == INVALID_SYNC_OFFSET || state->syncStartOffset == HAS_SYNC_OFFSET);
if (state->syncStartOffset == INVALID_SYNC_OFFSET)
{
// header.syncStartOffset is non-zero. We can set it
// to zero using FLIP_SYNC
state->syncStartOffset = HAS_SYNC_OFFSET;
encoding = FLIP_SYNC;
goto DO_RETURN;
}
else if (header.syncStartOffset == INVALID_SYNC_OFFSET)
{
state->syncStartOffset = INVALID_SYNC_OFFSET;
encoding = FLIP_SYNC;
goto DO_RETURN;
}
}
if (state->revPInvokeOffset != header.revPInvokeOffset)
{
assert(state->revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET ||
state->revPInvokeOffset == HAS_REV_PINVOKE_FRAME_OFFSET);
if (state->revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET)
{
// header.revPInvokeOffset is non-zero.
state->revPInvokeOffset = HAS_REV_PINVOKE_FRAME_OFFSET;
encoding = FLIP_REV_PINVOKE_FRAME;
goto DO_RETURN;
}
else if (header.revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET)
{
state->revPInvokeOffset = INVALID_REV_PINVOKE_OFFSET;
encoding = FLIP_REV_PINVOKE_FRAME;
goto DO_RETURN;
}
}
DO_RETURN:
_ASSERTE(encoding < MORE_BYTES_TO_FOLLOW);
if (!state->isHeaderMatch(header))
encoding |= MORE_BYTES_TO_FOLLOW;
return encoding;
}
static int measureDistance(const InfoHdr& header, const InfoHdrSmall* p, int closeness)
{
int distance = 0;
if (p->untrackedCnt != header.untrackedCnt)
{
if (header.untrackedCnt > 3)
{
if (p->untrackedCnt != HAS_UNTRACKED)
distance += 1;
}
else
{
distance += 1;
}
if (distance >= closeness)
return distance;
}
if (p->varPtrTableSize != header.varPtrTableSize)
{
if (header.varPtrTableSize != 0)
{
if (p->varPtrTableSize != HAS_VARPTR)
distance += 1;
}
else
{
assert(p->varPtrTableSize == HAS_VARPTR);
distance += 1;
}
if (distance >= closeness)
return distance;
}
if (p->frameSize != header.frameSize)
{
distance += 1;
if (distance >= closeness)
return distance;
// We have one-byte encodings for 0..7
if (header.frameSize > SET_FRAMESIZE_MAX)
{
distance += bigEncoding4(p->frameSize, header.frameSize, SET_FRAMESIZE_MAX);
if (distance >= closeness)
return distance;
}
}
if (p->argCount != header.argCount)
{
distance += 1;
if (distance >= closeness)
return distance;
// We have one-byte encodings for 0..8
if (header.argCount > SET_ARGCOUNT_MAX)
{
distance += bigEncoding4(p->argCount, header.argCount, SET_ARGCOUNT_MAX);
if (distance >= closeness)
return distance;
}
}
if (p->prologSize != header.prologSize)
{
distance += 1;
if (distance >= closeness)
return distance;
// We have one-byte encodings for 0..16
if (header.prologSize > SET_PROLOGSIZE_MAX)
{
assert(SET_PROLOGSIZE_MAX > 15);
distance += bigEncoding3(p->prologSize, header.prologSize, 15);
if (distance >= closeness)
return distance;
}
}
if (p->epilogSize != header.epilogSize)
{
distance += 1;
if (distance >= closeness)
return distance;
// We have one-byte encodings for 0..10
if (header.epilogSize > SET_EPILOGSIZE_MAX)
{
distance += bigEncoding3(p->epilogSize, header.epilogSize, SET_EPILOGSIZE_MAX);
if (distance >= closeness)
return distance;
}
}
if ((p->epilogCount != header.epilogCount) || (p->epilogAtEnd != header.epilogAtEnd))
{
distance += 1;
if (distance >= closeness)
return distance;
if (header.epilogCount > SET_EPILOGCNT_MAX)
IMPL_LIMITATION("More than SET_EPILOGCNT_MAX epilogs");
}
if (p->ediSaved != header.ediSaved)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->esiSaved != header.esiSaved)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->ebxSaved != header.ebxSaved)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->ebpSaved != header.ebpSaved)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->ebpFrame != header.ebpFrame)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->interruptible != header.interruptible)
{
distance += 1;
if (distance >= closeness)
return distance;
}
#if DOUBLE_ALIGN
if (p->doubleAlign != header.doubleAlign)
{
distance += 1;
if (distance >= closeness)
return distance;
}
#endif
if (p->security != header.security)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->handlers != header.handlers)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->localloc != header.localloc)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->editNcontinue != header.editNcontinue)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->varargs != header.varargs)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->profCallbacks != header.profCallbacks)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->genericsContext != header.genericsContext)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->genericsContextIsMethodDesc != header.genericsContextIsMethodDesc)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (p->returnKind != header.returnKind)
{
// Setting the ReturnKind requires two bytes of encoding.
distance += 2;
if (distance >= closeness)
return distance;
}
if (header.gsCookieOffset != INVALID_GS_COOKIE_OFFSET)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (header.syncStartOffset != INVALID_SYNC_OFFSET)
{
distance += 1;
if (distance >= closeness)
return distance;
}
if (header.revPInvokeOffset != INVALID_REV_PINVOKE_OFFSET)
{
distance += 1;
if (distance >= closeness)
return distance;
}
return distance;
}
// DllMain calls gcInitEncoderLookupTable to fill in this table
/* extern */ int infoHdrLookup[IH_MAX_PROLOG_SIZE + 2];
/* static */ void GCInfo::gcInitEncoderLookupTable()
{
const InfoHdrSmall* p = &infoHdrShortcut[0];
int lo = -1;
int hi = 0;
int n;
for (n = 0; n < 128; n++, p++)
{
if (p->prologSize != lo)
{
if (p->prologSize < lo)
{
assert(p->prologSize == 0);
hi = IH_MAX_PROLOG_SIZE;
}
else
hi = p->prologSize;
assert(hi <= IH_MAX_PROLOG_SIZE);
while (lo < hi)
infoHdrLookup[++lo] = n;
if (lo == IH_MAX_PROLOG_SIZE)
break;
}
}
assert(lo == IH_MAX_PROLOG_SIZE);
assert(infoHdrLookup[IH_MAX_PROLOG_SIZE] < 128);
while (p->prologSize == lo)
{
n++;
if (n >= 128)
break;
p++;
}
infoHdrLookup[++lo] = n;
#ifdef DEBUG
//
// We do some other DEBUG only validity checks here
//
assert(callCommonDelta[0] < callCommonDelta[1]);
assert(callCommonDelta[1] < callCommonDelta[2]);
assert(callCommonDelta[2] < callCommonDelta[3]);
assert(sizeof(CallPattern) == sizeof(unsigned));
unsigned maxMarks = 0;
for (unsigned inx = 0; inx < 80; inx++)
{
CallPattern pat;
pat.val = callPatternTable[inx];
assert(pat.fld.codeDelta <= CP_MAX_CODE_DELTA);
if (pat.fld.codeDelta == CP_MAX_CODE_DELTA)
maxMarks |= 0x01;
assert(pat.fld.argCnt <= CP_MAX_ARG_CNT);
if (pat.fld.argCnt == CP_MAX_ARG_CNT)
maxMarks |= 0x02;
assert(pat.fld.argMask <= CP_MAX_ARG_MASK);
if (pat.fld.argMask == CP_MAX_ARG_MASK)
maxMarks |= 0x04;
}
assert(maxMarks == 0x07);
#endif
}
const int NO_CACHED_HEADER = -1;
BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more, int* pCached)
{
// First try the cached value for an exact match, if there is one
//
int n = *pCached;
const InfoHdrSmall* p;
if (n != NO_CACHED_HEADER)
{
p = &infoHdrShortcut[n];
if (p->isHeaderMatch(header))
{
// exact match found
GetInfoHdr(n, state);
*more = 0;
return n;
}
}
// Next search the table for an exact match
// Only search entries that have a matching prolog size
// Note: lo and hi are saved here as they specify the
// range of entries that have the correct prolog size
//
unsigned psz = header.prologSize;
int lo = 0;
int hi = 0;
if (psz <= IH_MAX_PROLOG_SIZE)
{
lo = infoHdrLookup[psz];
hi = infoHdrLookup[psz + 1];
p = &infoHdrShortcut[lo];
for (n = lo; n < hi; n++, p++)
{
assert(psz == p->prologSize);
if (p->isHeaderMatch(header))
{
// exact match found
GetInfoHdr(n, state);
*pCached = n; // cache the value
*more = 0;
return n;
}
}
}
//
// no exact match in infoHdrShortcut[]
//
// find the nearest entry in the table
//
int nearest = -1;
int closeness = 255; // (i.e. not very close)
//
// Calculate the minimum acceptable distance
// if we find an entry that is at least this close
// we will stop the search and use that value
//
int min_acceptable_distance = 1;
if (header.frameSize > SET_FRAMESIZE_MAX)
{
++min_acceptable_distance;
if (header.frameSize > 32)
++min_acceptable_distance;
}
if (header.argCount > SET_ARGCOUNT_MAX)
{
++min_acceptable_distance;
if (header.argCount > 32)
++min_acceptable_distance;
}
// First try the cached value
// and see if it meets the minimum acceptable distance
//
if (*pCached != NO_CACHED_HEADER)
{
p = &infoHdrShortcut[*pCached];
int distance = measureDistance(header, p, closeness);
assert(distance > 0);
if (distance <= min_acceptable_distance)
{
GetInfoHdr(*pCached, state);
*more = distance;
return 0x80 | *pCached;
}
else
{
closeness = distance;
nearest = *pCached;
}
}
// Then try the ones pointed to by [lo..hi),
// (i.e. the ones that have the correct prolog size)
//
p = &infoHdrShortcut[lo];
for (n = lo; n < hi; n++, p++)
{
if (n == *pCached)
continue; // already tried this one
int distance = measureDistance(header, p, closeness);
assert(distance > 0);
if (distance <= min_acceptable_distance)
{
GetInfoHdr(n, state);
*pCached = n; // Cache this value
*more = distance;
return 0x80 | n;
}
else if (distance < closeness)
{
closeness = distance;
nearest = n;
}
}
int last = infoHdrLookup[IH_MAX_PROLOG_SIZE + 1];
assert(last <= 128);
// Then try all the rest [0..last-1]
p = &infoHdrShortcut[0];
for (n = 0; n < last; n++, p++)
{
if (n == *pCached)
continue; // already tried this one
if ((n >= lo) && (n < hi))
continue; // already tried these
int distance = measureDistance(header, p, closeness);
assert(distance > 0);
if (distance <= min_acceptable_distance)
{
GetInfoHdr(n, state);
*pCached = n; // Cache this value
*more = distance;
return 0x80 | n;
}
else if (distance < closeness)
{
closeness = distance;
nearest = n;
}
}
//
// If we reach here then there was no adjacent neighbor
// in infoHdrShortcut[], closeness indicate how many extra
// bytes we will need to encode this item.
//
assert((nearest >= 0) && (nearest <= 127));
GetInfoHdr(nearest, state);
*pCached = nearest; // Cache this value
*more = closeness;
return 0x80 | nearest;
}
/*****************************************************************************
*
* Write the initial part of the method info block. This is called twice;
* first to compute the size needed for the info (mask=0), the second time
* to actually generate the contents of the table (mask=-1,dest!=NULL).
*/
size_t GCInfo::gcInfoBlockHdrSave(
BYTE* dest, int mask, unsigned methodSize, unsigned prologSize, unsigned epilogSize, InfoHdr* header, int* pCached)
{
#ifdef DEBUG
if (compiler->verbose)
printf("*************** In gcInfoBlockHdrSave()\n");
#endif
size_t size = 0;
#if VERIFY_GC_TABLES
*castto(dest, unsigned short*)++ = 0xFEEF;
size += sizeof(short);
#endif
/* Write the method size first (using between 1 and 5 bytes) */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
if (mask)
printf("GCINFO: methodSize = %04X\n", methodSize);
if (mask)
printf("GCINFO: prologSize = %04X\n", prologSize);
if (mask)
printf("GCINFO: epilogSize = %04X\n", epilogSize);
}
#endif
size_t methSz = encodeUnsigned(dest, methodSize);
size += methSz;
dest += methSz & mask;
//
// New style InfoBlk Header
//
// Typically only uses one-byte to store everything.
//
if (mask == 0)
{
memset(header, 0, sizeof(InfoHdr));
*pCached = NO_CACHED_HEADER;
}
assert(FitsIn<unsigned char>(prologSize));
header->prologSize = static_cast<unsigned char>(prologSize);
assert(FitsIn<unsigned char>(epilogSize));
header->epilogSize = static_cast<unsigned char>(epilogSize);
header->epilogCount = compiler->GetEmitter()->emitGetEpilogCnt();
if (header->epilogCount != compiler->GetEmitter()->emitGetEpilogCnt())
IMPL_LIMITATION("emitGetEpilogCnt() does not fit in InfoHdr::epilogCount");
header->epilogAtEnd = compiler->GetEmitter()->emitHasEpilogEnd();
if (compiler->codeGen->regSet.rsRegsModified(RBM_EDI))
header->ediSaved = 1;
if (compiler->codeGen->regSet.rsRegsModified(RBM_ESI))
header->esiSaved = 1;
if (compiler->codeGen->regSet.rsRegsModified(RBM_EBX))
header->ebxSaved = 1;
header->interruptible = compiler->codeGen->GetInterruptible();
if (!compiler->isFramePointerUsed())
{
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
{
header->ebpSaved = true;
assert(!compiler->codeGen->regSet.rsRegsModified(RBM_EBP));
}
#endif
if (compiler->codeGen->regSet.rsRegsModified(RBM_EBP))
{
header->ebpSaved = true;
}
}
else
{
header->ebpSaved = true;
header->ebpFrame = true;
}
#if DOUBLE_ALIGN
header->doubleAlign = compiler->genDoubleAlign();
#endif
header->security = false;
header->handlers = compiler->ehHasCallableHandlers();
header->localloc = compiler->compLocallocUsed;
header->varargs = compiler->info.compIsVarArgs;
header->profCallbacks = compiler->info.compProfilerCallback;
header->editNcontinue = compiler->opts.compDbgEnC;
header->genericsContext = compiler->lvaReportParamTypeArg();
header->genericsContextIsMethodDesc =
header->genericsContext && (compiler->info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC));
ReturnKind returnKind = getReturnKind();
_ASSERTE(IsValidReturnKind(returnKind) && "Return Kind must be valid");
_ASSERTE(!IsStructReturnKind(returnKind) && "Struct Return Kinds Unexpected for JIT32");
_ASSERTE(((int)returnKind < (int)SET_RET_KIND_MAX) && "ReturnKind has no legal encoding");
header->returnKind = returnKind;
header->gsCookieOffset = INVALID_GS_COOKIE_OFFSET;
if (compiler->getNeedsGSSecurityCookie())
{
assert(compiler->lvaGSSecurityCookie != BAD_VAR_NUM);
int stkOffs = compiler->lvaTable[compiler->lvaGSSecurityCookie].GetStackOffset();
header->gsCookieOffset = compiler->isFramePointerUsed() ? -stkOffs : stkOffs;
assert(header->gsCookieOffset != INVALID_GS_COOKIE_OFFSET);
}
header->syncStartOffset = INVALID_SYNC_OFFSET;
header->syncEndOffset = INVALID_SYNC_OFFSET;
#ifndef UNIX_X86_ABI
// JIT is responsible for synchronization on funclet-based EH model that x86/Linux uses.
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
assert(compiler->syncStartEmitCookie != NULL);
header->syncStartOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncStartEmitCookie, 0);
assert(header->syncStartOffset != INVALID_SYNC_OFFSET);
assert(compiler->syncEndEmitCookie != NULL);
header->syncEndOffset = compiler->GetEmitter()->emitCodeOffset(compiler->syncEndEmitCookie, 0);
assert(header->syncEndOffset != INVALID_SYNC_OFFSET);
assert(header->syncStartOffset < header->syncEndOffset);
// synchronized methods can't have more than 1 epilog
assert(header->epilogCount <= 1);
}
#endif
header->revPInvokeOffset = INVALID_REV_PINVOKE_OFFSET;
if (compiler->opts.IsReversePInvoke())
{
assert(compiler->lvaReversePInvokeFrameVar != BAD_VAR_NUM);
int stkOffs = compiler->lvaTable[compiler->lvaReversePInvokeFrameVar].GetStackOffset();
header->revPInvokeOffset = compiler->isFramePointerUsed() ? -stkOffs : stkOffs;
assert(header->revPInvokeOffset != INVALID_REV_PINVOKE_OFFSET);
}
assert((compiler->compArgSize & 0x3) == 0);
size_t argCount =
(compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
assert(argCount <= MAX_USHORT_SIZE_T);
header->argCount = static_cast<unsigned short>(argCount);
header->frameSize = compiler->compLclFrameSize / sizeof(int);
if (header->frameSize != (compiler->compLclFrameSize / sizeof(int)))
IMPL_LIMITATION("compLclFrameSize does not fit in InfoHdr::frameSize");
if (mask == 0)
{
gcCountForHeader((UNALIGNED unsigned int*)&header->untrackedCnt,
(UNALIGNED unsigned int*)&header->varPtrTableSize);
}
//
// If the high-order bit of headerEncoding is set
// then additional bytes will update the InfoHdr state
// until the fully state is encoded
//
InfoHdr state;
int more = 0;
BYTE headerEncoding = encodeHeaderFirst(*header, &state, &more, pCached);
++size;
if (mask)
{
#if REGEN_SHORTCUTS
regenLog(headerEncoding, header, &state);
#endif
*dest++ = headerEncoding;
BYTE encoding = headerEncoding;
BYTE codeSet = 1;
while (encoding & MORE_BYTES_TO_FOLLOW)
{
encoding = encodeHeaderNext(*header, &state, codeSet);
#if REGEN_SHORTCUTS
regenLog(headerEncoding, header, &state);
#endif
_ASSERTE((codeSet == 1 || codeSet == 2) && "Encoding must correspond to InfoHdrAdjust or InfoHdrAdjust2");
if (codeSet == 2)
{
*dest++ = NEXT_OPCODE | MORE_BYTES_TO_FOLLOW;
++size;
}
*dest++ = encoding;
++size;
}
}
else
{
size += more;
}
if (header->untrackedCnt > SET_UNTRACKED_MAX)
{
unsigned count = header->untrackedCnt;
unsigned sz = encodeUnsigned(mask ? dest : NULL, count);
size += sz;
dest += (sz & mask);
}
if (header->varPtrTableSize != 0)
{
unsigned count = header->varPtrTableSize;
unsigned sz = encodeUnsigned(mask ? dest : NULL, count);
size += sz;
dest += (sz & mask);
}
if (header->gsCookieOffset != INVALID_GS_COOKIE_OFFSET)
{
assert(mask == 0 || state.gsCookieOffset == HAS_GS_COOKIE_OFFSET);
unsigned offset = header->gsCookieOffset;
unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
if (header->syncStartOffset != INVALID_SYNC_OFFSET)
{
assert(mask == 0 || state.syncStartOffset == HAS_SYNC_OFFSET);
{
unsigned offset = header->syncStartOffset;
unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
{
unsigned offset = header->syncEndOffset;
unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
}
if (header->revPInvokeOffset != INVALID_REV_PINVOKE_OFFSET)
{
assert(mask == 0 || state.revPInvokeOffset == HAS_REV_PINVOKE_FRAME_OFFSET);
unsigned offset = header->revPInvokeOffset;
unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
if (header->epilogCount)
{
/* Generate table unless one epilog at the end of the method */
if (header->epilogAtEnd == 0 || header->epilogCount != 1)
{
#if VERIFY_GC_TABLES
*castto(dest, unsigned short*)++ = 0xFACE;
size += sizeof(short);
#endif
/* Simply write a sorted array of offsets using encodeUDelta */
gcEpilogTable = mask ? dest : NULL;
gcEpilogPrevOffset = 0;
size_t sz = compiler->GetEmitter()->emitGenEpilogLst(gcRecordEpilog, this);
/* Add the size of the epilog table to the total size */
size += sz;
dest += (sz & mask);
}
}
#if DISPLAY_SIZES
if (mask)
{
if (compiler->codeGen->GetInterruptible())
{
genMethodICnt++;
}
else
{
genMethodNCnt++;
}
}
#endif // DISPLAY_SIZES
return size;
}
/*****************************************************************************
*
* Return the size of the pointer tracking tables.
*/
size_t GCInfo::gcPtrTableSize(const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset)
{
BYTE temp[16 + 1];
#ifdef DEBUG
temp[16] = 0xAB; // Set some marker
#endif
/* Compute the total size of the tables */
size_t size = gcMakeRegPtrTable(temp, 0, header, codeSize, pArgTabOffset);
assert(temp[16] == 0xAB); // Check that marker didnt get overwritten
return size;
}
/*****************************************************************************
* Encode the callee-saved registers into 3 bits.
*/
unsigned gceEncodeCalleeSavedRegs(unsigned regs)
{
unsigned encodedRegs = 0;
if (regs & RBM_EBX)
encodedRegs |= 0x04;
if (regs & RBM_ESI)
encodedRegs |= 0x02;
if (regs & RBM_EDI)
encodedRegs |= 0x01;
return encodedRegs;
}
/*****************************************************************************
* Is the next entry for a byref pointer. If so, emit the prefix for the
* interruptible encoding. Check only for pushes and registers
*/
inline BYTE* gceByrefPrefixI(GCInfo::regPtrDsc* rpd, BYTE* dest)
{
// For registers, we don't need a prefix if it is going dead.
assert(rpd->rpdArg || rpd->rpdCompiler.rpdDel == 0);
if (!rpd->rpdArg || rpd->rpdArgType == GCInfo::rpdARG_PUSH)
if (rpd->rpdGCtypeGet() == GCT_BYREF)
*dest++ = 0xBF;
return dest;
}
/*****************************************************************************/
/* These functions are needed to work around a VC5.0 compiler bug */
/* DO NOT REMOVE, unless you are sure that the free build works */
static int zeroFN()
{
return 0;
}
static int (*zeroFunc)() = zeroFN;
/*****************************************************************************
* Modelling of the GC ptrs pushed on the stack
*/
typedef unsigned pasMaskType;
#define BITS_IN_pasMask (BITS_IN_BYTE * sizeof(pasMaskType))
#define HIGHEST_pasMask_BIT (((pasMaskType)0x1) << (BITS_IN_pasMask - 1))
//-----------------------------------------------------------------------------
class PendingArgsStack
{
public:
PendingArgsStack(unsigned maxDepth, Compiler* pComp);
void pasPush(GCtype gcType);
void pasPop(unsigned count);
void pasKill(unsigned gcCount);
unsigned pasCurDepth()
{
return pasDepth;
}
pasMaskType pasArgMask()
{
assert(pasDepth <= BITS_IN_pasMask);
return pasBottomMask;
}
pasMaskType pasByrefArgMask()
{
assert(pasDepth <= BITS_IN_pasMask);
return pasByrefBottomMask;
}
bool pasHasGCptrs();
// Use these in the case where there actually are more ptrs than pasArgMask
unsigned pasEnumGCoffsCount();
#define pasENUM_START ((unsigned)-1)
#define pasENUM_LAST ((unsigned)-2)
#define pasENUM_END ((unsigned)-3)
unsigned pasEnumGCoffs(unsigned iter, unsigned* offs);
protected:
unsigned pasMaxDepth;
unsigned pasDepth;
pasMaskType pasBottomMask; // The first 32 args
pasMaskType pasByrefBottomMask; // byref qualifier for pasBottomMask
BYTE* pasTopArray; // More than 32 args are represented here
unsigned pasPtrsInTopArray; // How many GCptrs here
};
//-----------------------------------------------------------------------------
PendingArgsStack::PendingArgsStack(unsigned maxDepth, Compiler* pComp)
: pasMaxDepth(maxDepth)
, pasDepth(0)
, pasBottomMask(0)
, pasByrefBottomMask(0)
, pasTopArray(NULL)
, pasPtrsInTopArray(0)
{
/* Do we need an array as well as the mask ? */
if (pasMaxDepth > BITS_IN_pasMask)
pasTopArray = pComp->getAllocator(CMK_Unknown).allocate<BYTE>(pasMaxDepth - BITS_IN_pasMask);
}
//-----------------------------------------------------------------------------
void PendingArgsStack::pasPush(GCtype gcType)
{
assert(pasDepth < pasMaxDepth);
if (pasDepth < BITS_IN_pasMask)
{
/* Shift the mask */
pasBottomMask <<= 1;
pasByrefBottomMask <<= 1;
if (needsGC(gcType))
{
pasBottomMask |= 1;
if (gcType == GCT_BYREF)
pasByrefBottomMask |= 1;
}
}
else
{
/* Push on array */
pasTopArray[pasDepth - BITS_IN_pasMask] = (BYTE)gcType;
if (gcType)
pasPtrsInTopArray++;
}
pasDepth++;
}
//-----------------------------------------------------------------------------
void PendingArgsStack::pasPop(unsigned count)
{
assert(pasDepth >= count);
/* First pop from array (if applicable) */
for (/**/; (pasDepth > BITS_IN_pasMask) && count; pasDepth--, count--)
{
unsigned topIndex = pasDepth - BITS_IN_pasMask - 1;
GCtype topArg = (GCtype)pasTopArray[topIndex];
if (needsGC(topArg))
pasPtrsInTopArray--;
}
if (count == 0)
return;
/* Now un-shift the mask */
assert(pasPtrsInTopArray == 0);
assert(count <= BITS_IN_pasMask);
if (count == BITS_IN_pasMask) // (x>>32) is a nop on x86. So special-case it
{
pasBottomMask = pasByrefBottomMask = 0;
pasDepth = 0;
}
else
{
pasBottomMask >>= count;
pasByrefBottomMask >>= count;
pasDepth -= count;
}
}
//-----------------------------------------------------------------------------
// Kill (but don't pop) the top 'gcCount' args
void PendingArgsStack::pasKill(unsigned gcCount)
{
assert(gcCount != 0);
/* First kill args in array (if any) */
for (unsigned curPos = pasDepth; (curPos > BITS_IN_pasMask) && gcCount; curPos--)
{
unsigned curIndex = curPos - BITS_IN_pasMask - 1;
GCtype curArg = (GCtype)pasTopArray[curIndex];
if (needsGC(curArg))
{
pasTopArray[curIndex] = GCT_NONE;
pasPtrsInTopArray--;
gcCount--;
}
}
/* Now kill bits from the mask */
assert(pasPtrsInTopArray == 0);
assert(gcCount <= BITS_IN_pasMask);
for (unsigned bitPos = 1; gcCount; bitPos <<= 1)
{
assert(pasBottomMask != 0);
if (pasBottomMask & bitPos)
{
pasBottomMask &= ~bitPos;
pasByrefBottomMask &= ~bitPos;
--gcCount;
}
else
{
assert(bitPos != HIGHEST_pasMask_BIT);
}
}
}
//-----------------------------------------------------------------------------
// Used for the case where there are more than BITS_IN_pasMask args on stack,
// but none are any pointers. May avoid reporting anything to GCinfo
bool PendingArgsStack::pasHasGCptrs()
{
if (pasDepth <= BITS_IN_pasMask)
return pasBottomMask != 0;
else
return pasBottomMask != 0 || pasPtrsInTopArray != 0;
}
//-----------------------------------------------------------------------------
// Iterates over mask and array to return total count.
// Use only when you are going to emit a table of the offsets
unsigned PendingArgsStack::pasEnumGCoffsCount()
{
/* Should only be used in the worst case, when just the mask can't be used */
assert(pasDepth > BITS_IN_pasMask && pasHasGCptrs());
/* Count number of set bits in mask */
unsigned count = 0;
for (pasMaskType mask = 0x1, i = 0; i < BITS_IN_pasMask; mask <<= 1, i++)
{
if (mask & pasBottomMask)
count++;
}
return count + pasPtrsInTopArray;
}
//-----------------------------------------------------------------------------
// Initalize enumeration by passing in iter=pasENUM_START.
// Continue by passing in the return value as the new value of iter
// End of enumeration when pasENUM_END is returned
// If return value != pasENUM_END, *offs is set to the offset for GCinfo
unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned* offs)
{
if (iter == pasENUM_LAST)
return pasENUM_END;
unsigned i = (iter == pasENUM_START) ? pasDepth : iter;
for (/**/; i > BITS_IN_pasMask; i--)
{
GCtype curArg = (GCtype)pasTopArray[i - BITS_IN_pasMask - 1];
if (needsGC(curArg))
{
unsigned offset;
offset = (pasDepth - i) * TARGET_POINTER_SIZE;
if (curArg == GCT_BYREF)
offset |= byref_OFFSET_FLAG;
*offs = offset;
return i - 1;
}
}
if (!pasBottomMask)
return pasENUM_END;
// Have we already processed some of the bits in pasBottomMask ?
i = (iter == pasENUM_START || iter >= BITS_IN_pasMask) ? 0 // no
: iter; // yes
for (pasMaskType mask = 0x1 << i; mask; i++, mask <<= 1)
{
if (mask & pasBottomMask)
{
unsigned lvl = (pasDepth > BITS_IN_pasMask) ? (pasDepth - BITS_IN_pasMask) : 0; // How many in pasTopArray[]
lvl += i;
unsigned offset;
offset = lvl * TARGET_POINTER_SIZE;
if (mask & pasByrefBottomMask)
offset |= byref_OFFSET_FLAG;
*offs = offset;
unsigned remMask = -int(mask << 1);
return ((pasBottomMask & remMask) ? (i + 1) : pasENUM_LAST);
}
}
assert(!"Shouldnt reach here");
return pasENUM_END;
}
/*****************************************************************************
*
* Generate the register pointer map, and return its total size in bytes. If
* 'mask' is 0, we don't actually store any data in 'dest' (except for one
* entry, which is never more than 10 bytes), so this can be used to merely
* compute the size of the table.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset)
{
unsigned varNum;
LclVarDsc* varDsc;
size_t totalSize = 0;
unsigned lastOffset;
/* The mask should be all 0's or all 1's */
assert(mask == 0 || mask == -1);
/* Start computing the total size of the table */
bool emitArgTabOffset = (header.varPtrTableSize != 0 || header.untrackedCnt > SET_UNTRACKED_MAX);
if (mask != 0 && emitArgTabOffset)
{
assert(*pArgTabOffset <= MAX_UNSIGNED_SIZE_T);
unsigned sz = encodeUnsigned(dest, static_cast<unsigned>(*pArgTabOffset));
dest += sz;
totalSize += sz;
}
#if VERIFY_GC_TABLES
if (mask)
{
*(short*)dest = (short)0xBEEF;
dest += sizeof(short);
}
totalSize += sizeof(short);
#endif
/**************************************************************************
*
* Untracked ptr variables
*
**************************************************************************
*/
#if DEBUG
unsigned untrackedCount = 0;
unsigned varPtrTableSize = 0;
gcCountForHeader(&untrackedCount, &varPtrTableSize);
assert(untrackedCount == header.untrackedCnt);
assert(varPtrTableSize == header.varPtrTableSize);
#endif // DEBUG
if (header.untrackedCnt != 0)
{
// Write the table of untracked pointer variables.
int lastoffset = 0;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
// Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
// reported through its parent local
continue;
}
if (varTypeIsGC(varDsc->TypeGet()))
{
if (!gcIsUntrackedLocalOrNonEnregisteredArg(varNum))
{
continue;
}
int offset = varDsc->GetStackOffset();
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
offset += compiler->codeGen->genTotalFrameSize();
#endif
// The lower bits of the offset encode properties of the stk ptr
assert(~OFFSET_MASK % sizeof(offset) == 0);
if (varDsc->TypeGet() == TYP_BYREF)
{
// Or in byref_OFFSET_FLAG for 'byref' pointer tracking
offset |= byref_OFFSET_FLAG;
}
if (varDsc->lvPinned)
{
// Or in pinned_OFFSET_FLAG for 'pinned' pointer tracking
offset |= pinned_OFFSET_FLAG;
}
int encodedoffset = lastoffset - offset;
lastoffset = offset;
if (mask == 0)
totalSize += encodeSigned(NULL, encodedoffset);
else
{
unsigned sz = encodeSigned(dest, encodedoffset);
dest += sz;
totalSize += sz;
}
}
else if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame && varDsc->HasGCPtr())
{
ClassLayout* layout = varDsc->GetLayout();
unsigned slots = layout->GetSlotCount();
for (unsigned i = 0; i < slots; i++)
{
if (!layout->IsGCPtr(i))
{
continue;
}
unsigned offset = varDsc->GetStackOffset() + i * TARGET_POINTER_SIZE;
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
{
offset += compiler->codeGen->genTotalFrameSize();
}
#endif
if (layout->GetGCPtrType(i) == TYP_BYREF)
{
offset |= byref_OFFSET_FLAG; // indicate it is a byref GC pointer
}
int encodedoffset = lastoffset - offset;
lastoffset = offset;
if (mask == 0)
{
totalSize += encodeSigned(NULL, encodedoffset);
}
else
{
unsigned sz = encodeSigned(dest, encodedoffset);
dest += sz;
totalSize += sz;
}
}
}
}
/* Count&Write spill temps that hold pointers */
assert(compiler->codeGen->regSet.tmpAllFree());
for (TempDsc* tempItem = compiler->codeGen->regSet.tmpListBeg(); tempItem != nullptr;
tempItem = compiler->codeGen->regSet.tmpListNxt(tempItem))
{
if (varTypeIsGC(tempItem->tdTempType()))
{
{
int offset;
offset = tempItem->tdTempOffs();
if (tempItem->tdTempType() == TYP_BYREF)
{
offset |= byref_OFFSET_FLAG;
}
int encodedoffset = lastoffset - offset;
lastoffset = offset;
if (mask == 0)
{
totalSize += encodeSigned(NULL, encodedoffset);
}
else
{
unsigned sz = encodeSigned(dest, encodedoffset);
dest += sz;
totalSize += sz;
}
}
}
}
}
#if VERIFY_GC_TABLES
if (mask)
{
*(short*)dest = (short)0xCAFE;
dest += sizeof(short);
}
totalSize += sizeof(short);
#endif
/**************************************************************************
*
* Generate the table of stack pointer variable lifetimes.
*
**************************************************************************
*/
bool keepThisAlive = false;
if (!compiler->info.compIsStatic)
{
unsigned thisArgNum = compiler->info.compThisArg;
gcIsUntrackedLocalOrNonEnregisteredArg(thisArgNum, &keepThisAlive);
}
// First we check for the most common case - no lifetimes at all.
if (header.varPtrTableSize != 0)
{
#if !defined(FEATURE_EH_FUNCLETS)
if (keepThisAlive)
{
// Encoding of untracked variables does not support reporting
// "this". So report it as a tracked variable with a liveness
// extending over the entire method.
assert(compiler->lvaTable[compiler->info.compThisArg].TypeGet() == TYP_REF);
unsigned varOffs = compiler->lvaTable[compiler->info.compThisArg].GetStackOffset();
/* For negative stack offsets we must reset the low bits,
* take abs and then set them back */
varOffs = abs(static_cast<int>(varOffs));
varOffs |= this_OFFSET_FLAG;
size_t sz = 0;
sz = encodeUnsigned(mask ? (dest + sz) : NULL, varOffs);
sz += encodeUDelta(mask ? (dest + sz) : NULL, 0, 0);
sz += encodeUDelta(mask ? (dest + sz) : NULL, codeSize, 0);
dest += (sz & mask);
totalSize += sz;
}
#endif // !FEATURE_EH_FUNCLETS
/* We'll use a delta encoding for the lifetime offsets */
lastOffset = 0;
for (varPtrDsc* varTmp = gcVarPtrList; varTmp; varTmp = varTmp->vpdNext)
{
unsigned varOffs;
unsigned lowBits;
unsigned begOffs;
unsigned endOffs;
assert(~OFFSET_MASK % TARGET_POINTER_SIZE == 0);
/* Get hold of the variable's stack offset */
lowBits = varTmp->vpdVarNum & OFFSET_MASK;
/* For negative stack offsets we must reset the low bits,
* take abs and then set them back */
varOffs = abs(static_cast<int>(varTmp->vpdVarNum & ~OFFSET_MASK));
varOffs |= lowBits;
/* Compute the actual lifetime offsets */
begOffs = varTmp->vpdBegOfs;
endOffs = varTmp->vpdEndOfs;
/* Special case: skip any 0-length lifetimes */
if (endOffs == begOffs)
continue;
/* Are we counting or generating? */
size_t sz = 0;
sz = encodeUnsigned(mask ? (dest + sz) : NULL, varOffs);
sz += encodeUDelta(mask ? (dest + sz) : NULL, begOffs, lastOffset);
sz += encodeUDelta(mask ? (dest + sz) : NULL, endOffs, begOffs);
dest += (sz & mask);
totalSize += sz;
/* The next entry will be relative to the one we just processed */
lastOffset = begOffs;
}
}
if (pArgTabOffset != NULL)
*pArgTabOffset = totalSize;
#if VERIFY_GC_TABLES
if (mask)
{
*(short*)dest = (short)0xBABE;
dest += sizeof(short);
}
totalSize += sizeof(short);
#endif
if (!mask && emitArgTabOffset)
{
assert(*pArgTabOffset <= MAX_UNSIGNED_SIZE_T);
totalSize += encodeUnsigned(NULL, static_cast<unsigned>(*pArgTabOffset));
}
/**************************************************************************
*
* Prepare to generate the pointer register/argument map
*
**************************************************************************
*/
lastOffset = 0;
if (compiler->codeGen->GetInterruptible())
{
#ifdef TARGET_X86
assert(compiler->IsFullPtrRegMapRequired());
unsigned ptrRegs = 0;
regPtrDsc* genRegPtrTemp;
/* Walk the list of pointer register/argument entries */
for (genRegPtrTemp = gcRegPtrList; genRegPtrTemp; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
BYTE* base = dest;
unsigned nextOffset;
DWORD codeDelta;
nextOffset = genRegPtrTemp->rpdOffs;
/*
Encoding table for methods that are fully interruptible
The encoding used is as follows:
ptr reg dead 00RRRDDD [RRR != 100]
ptr reg live 01RRRDDD [RRR != 100]
non-ptr arg push 10110DDD [SSS == 110]
ptr arg push 10SSSDDD [SSS != 110] && [SSS != 111]
ptr arg pop 11CCCDDD [CCC != 000] && [CCC != 110] && [CCC != 111]
little skip 11000DDD [CCC == 000]
bigger skip 11110BBB [CCC == 110]
The values used in the above encodings are as follows:
DDD code offset delta from previous entry (0-7)
BBB bigger delta 000=8,001=16,010=24,...,111=64
RRR register number (EAX=000,ECX=001,EDX=010,EBX=011,
EBP=101,ESI=110,EDI=111), ESP=100 is reserved
SSS argument offset from base of stack. This is
redundant for frameless methods as we can
infer it from the previous pushes+pops. However,
for EBP-methods, we only report GC pushes, and
so we need SSS
CCC argument count being popped (includes only ptrs for EBP methods)
The following are the 'large' versions:
large delta skip 10111000 [0xB8] , encodeUnsigned(delta)
large ptr arg push 11111000 [0xF8] , encodeUnsigned(pushCount)
large non-ptr arg push 11111001 [0xF9] , encodeUnsigned(pushCount)
large ptr arg pop 11111100 [0xFC] , encodeUnsigned(popCount)
large arg dead 11111101 [0xFD] , encodeUnsigned(popCount) for caller-pop args.
Any GC args go dead after the call,
but are still sitting on the stack
this pointer prefix 10111100 [0xBC] the next encoding is a ptr live
or a ptr arg push
and contains the this pointer
interior or by-ref 10111111 [0xBF] the next encoding is a ptr live
pointer prefix or a ptr arg push
and contains an interior
or by-ref pointer
The value 11111111 [0xFF] indicates the end of the table.
*/
codeDelta = nextOffset - lastOffset;
assert((int)codeDelta >= 0);
// If the code delta is between 8 and (64+7),
// generate a 'bigger delta' encoding
if ((codeDelta >= 8) && (codeDelta <= (64 + 7)))
{
unsigned biggerDelta = ((codeDelta - 8) & 0x38) + 8;
*dest++ = 0xF0 | ((biggerDelta - 8) >> 3);
lastOffset += biggerDelta;
codeDelta &= 0x07;
}
// If the code delta is still bigger than 7,
// generate a 'large code delta' encoding
if (codeDelta > 7)
{
*dest++ = 0xB8;
dest += encodeUnsigned(dest, codeDelta);
codeDelta = 0;
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
/* Is this a pointer argument or register entry? */
if (genRegPtrTemp->rpdArg)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_KILL)
{
if (codeDelta)
{
/*
Use the small encoding:
little delta skip 11000DDD [0xC0]
*/
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0xC0 | (BYTE)codeDelta;
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
/* Caller-pop arguments are dead after call but are still
sitting on the stack */
*dest++ = 0xFD;
assert(genRegPtrTemp->rpdPtrArg != 0);
dest += encodeUnsigned(dest, genRegPtrTemp->rpdPtrArg);
}
else if (genRegPtrTemp->rpdPtrArg < 6 && genRegPtrTemp->rpdGCtypeGet())
{
/* Is the argument offset/count smaller than 6 ? */
dest = gceByrefPrefixI(genRegPtrTemp, dest);
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH || (genRegPtrTemp->rpdPtrArg != 0))
{
/*
Use the small encoding:
ptr arg push 10SSSDDD [SSS != 110] && [SSS != 111]
ptr arg pop 11CCCDDD [CCC != 110] && [CCC != 111]
*/
bool isPop = genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP;
*dest++ = 0x80 | (BYTE)codeDelta | genRegPtrTemp->rpdPtrArg << 3 | isPop << 6;
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
else
{
assert(!"Check this");
}
}
else if (genRegPtrTemp->rpdGCtypeGet() == GCT_NONE)
{
/*
Use the small encoding:
` non-ptr arg push 10110DDD [0xB0] (push of sizeof(int))
*/
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0xB0 | (BYTE)codeDelta;
#ifndef UNIX_X86_ABI
assert(!compiler->isFramePointerUsed());
#endif
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
else
{
/* Will have to use large encoding;
* first do the code delta
*/
if (codeDelta)
{
/*
Use the small encoding:
little delta skip 11000DDD [0xC0]
*/
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0xC0 | (BYTE)codeDelta;
}
/*
Now append a large argument record:
large ptr arg push 11111000 [0xF8]
large ptr arg pop 11111100 [0xFC]
*/
bool isPop = genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP;
dest = gceByrefPrefixI(genRegPtrTemp, dest);
*dest++ = 0xF8 | (isPop << 2);
dest += encodeUnsigned(dest, genRegPtrTemp->rpdPtrArg);
/* Remember the new 'last' offset */
lastOffset = nextOffset;
}
}
else
{
unsigned regMask;
/* Record any registers that are becoming dead */
regMask = genRegPtrTemp->rpdCompiler.rpdDel & ptrRegs;
while (regMask) // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
{
unsigned tmpMask;
regNumber regNum;
/* Get hold of the next register bit */
tmpMask = genFindLowestReg(regMask);
assert(tmpMask);
/* Remember the new state of this register */
ptrRegs &= ~tmpMask;
/* Figure out which register the next bit corresponds to */
regNum = genRegNumFromMask(tmpMask);
assert(regNum <= 7);
/* Reserve ESP, regNum==4 for future use */
assert(regNum != 4);
/*
Generate a small encoding:
ptr reg dead 00RRRDDD
*/
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0x00 | regNum << 3 | (BYTE)codeDelta;
/* Turn the bit we've just generated off and continue */
regMask -= tmpMask; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
/* Remember the new 'last' offset */
lastOffset = nextOffset;
/* Any entries that follow will be at the same offset */
codeDelta = zeroFunc(); /* DO NOT REMOVE */
}
/* Record any registers that are becoming live */
regMask = genRegPtrTemp->rpdCompiler.rpdAdd & ~ptrRegs;
while (regMask) // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
{
unsigned tmpMask;
regNumber regNum;
/* Get hold of the next register bit */
tmpMask = genFindLowestReg(regMask);
assert(tmpMask);
/* Remember the new state of this register */
ptrRegs |= tmpMask;
/* Figure out which register the next bit corresponds to */
regNum = genRegNumFromMask(tmpMask);
assert(regNum <= 7);
/*
Generate a small encoding:
ptr reg live 01RRRDDD
*/
dest = gceByrefPrefixI(genRegPtrTemp, dest);
if (!keepThisAlive && genRegPtrTemp->rpdIsThis)
{
// Mark with 'this' pointer prefix
*dest++ = 0xBC;
// Can only have one bit set in regMask
assert(regMask == tmpMask);
}
assert((codeDelta & 0x7) == codeDelta);
*dest++ = 0x40 | (regNum << 3) | (BYTE)codeDelta;
/* Turn the bit we've just generated off and continue */
regMask -= tmpMask; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
/* Remember the new 'last' offset */
lastOffset = nextOffset;
/* Any entries that follow will be at the same offset */
codeDelta = zeroFunc(); /* DO NOT REMOVE */
}
}
/* Keep track of the total amount of generated stuff */
totalSize += dest - base;
/* Go back to the buffer start if we're not generating a table */
if (!mask)
dest = base;
}
#endif // TARGET_X86
/* Terminate the table with 0xFF */
*dest = 0xFF;
dest -= mask;
totalSize++;
}
else if (compiler->isFramePointerUsed()) // GetInterruptible() is false
{
#ifdef TARGET_X86
/*
Encoding table for methods with an EBP frame and
that are not fully interruptible
The encoding used is as follows:
this pointer encodings:
01000000 this pointer in EBX
00100000 this pointer in ESI
00010000 this pointer in EDI
tiny encoding:
0bsdDDDD
requires code delta > 0 & delta < 16 (4-bits)
requires pushed argmask == 0
where DDDD is code delta
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
small encoding:
1DDDDDDD bsdAAAAA
requires code delta < 120 (7-bits)
requires pushed argmask < 64 (5-bits)
where DDDDDDD is code delta
AAAAA is the pushed args mask
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
medium encoding
0xFD aaaaaaaa AAAAdddd bseDDDDD
requires code delta < 512 (9-bits)
requires pushed argmask < 2048 (12-bits)
where DDDDD is the upper 5-bits of the code delta
dddd is the low 4-bits of the code delta
AAAA is the upper 4-bits of the pushed arg mask
aaaaaaaa is the low 8-bits of the pushed arg mask
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
e indicates that register EDI is a live pointer
medium encoding with interior pointers
0xF9 DDDDDDDD bsdAAAAAA iiiIIIII
requires code delta < 256 (8-bits)
requires pushed argmask < 64 (5-bits)
where DDDDDDD is the code delta
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
AAAAA is the pushed arg mask
iii indicates that EBX,EDI,ESI are interior pointers
IIIII indicates that bits in the arg mask are interior
pointers
large encoding
0xFE [0BSD0bsd][32-bit code delta][32-bit argMask]
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
B indicates that register EBX is an interior pointer
S indicates that register ESI is an interior pointer
D indicates that register EDI is an interior pointer
requires pushed argmask < 32-bits
large encoding with interior pointers
0xFA [0BSD0bsd][32-bit code delta][32-bit argMask][32-bit interior pointer mask]
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
B indicates that register EBX is an interior pointer
S indicates that register ESI is an interior pointer
D indicates that register EDI is an interior pointer
requires pushed argmask < 32-bits
requires pushed iArgmask < 32-bits
huge encoding This is the only encoding that supports
a pushed argmask which is greater than
32-bits.
0xFB [0BSD0bsd][32-bit code delta]
[32-bit table count][32-bit table size]
[pushed ptr offsets table...]
b indicates that register EBX is a live pointer
s indicates that register ESI is a live pointer
d indicates that register EDI is a live pointer
B indicates that register EBX is an interior pointer
S indicates that register ESI is an interior pointer
D indicates that register EDI is an interior pointer
the list count is the number of entries in the list
the list size gives the byte-length of the list
the offsets in the list are variable-length
*/
/* If "this" is enregistered, note it. We do this explicitly here as
IsFullPtrRegMapRequired()==false, and so we don't have any regPtrDsc's. */
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaTable[compiler->info.compThisArg].lvRegister)
{
unsigned thisRegMask = genRegMask(compiler->lvaTable[compiler->info.compThisArg].GetRegNum());
unsigned thisPtrRegEnc = gceEncodeCalleeSavedRegs(thisRegMask) << 4;
if (thisPtrRegEnc)
{
totalSize += 1;
if (mask)
*dest++ = thisPtrRegEnc;
}
}
CallDsc* call;
assert(compiler->IsFullPtrRegMapRequired() == false);
/* Walk the list of pointer register/argument entries */
for (call = gcCallDescList; call; call = call->cdNext)
{
BYTE* base = dest;
unsigned nextOffset;
/* Figure out the code offset of this entry */
nextOffset = call->cdOffs;
/* Compute the distance from the previous call */
DWORD codeDelta = nextOffset - lastOffset;
assert((int)codeDelta >= 0);
/* Remember the new 'last' offset */
lastOffset = nextOffset;
/* Compute the register mask */
unsigned gcrefRegMask = 0;
unsigned byrefRegMask = 0;
gcrefRegMask |= gceEncodeCalleeSavedRegs(call->cdGCrefRegs);
byrefRegMask |= gceEncodeCalleeSavedRegs(call->cdByrefRegs);
assert((gcrefRegMask & byrefRegMask) == 0);
unsigned regMask = gcrefRegMask | byrefRegMask;
bool byref = (byrefRegMask | call->u1.cdByrefArgMask) != 0;
/* Check for the really large argument offset case */
/* The very rare Huge encodings */
if (call->cdArgCnt)
{
unsigned argNum;
DWORD argCnt = call->cdArgCnt;
DWORD argBytes = 0;
BYTE* pArgBytes = DUMMY_INIT(NULL);
if (mask != 0)
{
*dest++ = 0xFB;
*dest++ = (byrefRegMask << 4) | regMask;
*(DWORD*)dest = codeDelta;
dest += sizeof(DWORD);
*(DWORD*)dest = argCnt;
dest += sizeof(DWORD);
// skip the byte-size for now. Just note where it will go
pArgBytes = dest;
dest += sizeof(DWORD);
}
for (argNum = 0; argNum < argCnt; argNum++)
{
unsigned eltSize;
eltSize = encodeUnsigned(dest, call->cdArgTable[argNum]);
argBytes += eltSize;
if (mask)
dest += eltSize;
}
if (mask == 0)
{
dest = base + 1 + 1 + 3 * sizeof(DWORD) + argBytes;
}
else
{
assert(dest == pArgBytes + sizeof(argBytes) + argBytes);
*(DWORD*)pArgBytes = argBytes;
}
}
/* Check if we can use a tiny encoding */
else if ((codeDelta < 16) && (codeDelta != 0) && (call->u1.cdArgMask == 0) && !byref)
{
*dest++ = (regMask << 4) | (BYTE)codeDelta;
}
/* Check if we can use the small encoding */
else if ((codeDelta < 0x79) && (call->u1.cdArgMask <= 0x1F) && !byref)
{
*dest++ = 0x80 | (BYTE)codeDelta;
*dest++ = call->u1.cdArgMask | (regMask << 5);
}
/* Check if we can use the medium encoding */
else if (codeDelta <= 0x01FF && call->u1.cdArgMask <= 0x0FFF && !byref)
{
*dest++ = 0xFD;
*dest++ = call->u1.cdArgMask;
*dest++ = ((call->u1.cdArgMask >> 4) & 0xF0) | ((BYTE)codeDelta & 0x0F);
*dest++ = (regMask << 5) | (BYTE)((codeDelta >> 4) & 0x1F);
}
/* Check if we can use the medium encoding with byrefs */
else if (codeDelta <= 0x0FF && call->u1.cdArgMask <= 0x01F)
{
*dest++ = 0xF9;
*dest++ = (BYTE)codeDelta;
*dest++ = (regMask << 5) | call->u1.cdArgMask;
*dest++ = (byrefRegMask << 5) | call->u1.cdByrefArgMask;
}
/* We'll use the large encoding */
else if (!byref)
{
*dest++ = 0xFE;
*dest++ = (byrefRegMask << 4) | regMask;
*(DWORD*)dest = codeDelta;
dest += sizeof(DWORD);
*(DWORD*)dest = call->u1.cdArgMask;
dest += sizeof(DWORD);
}
/* We'll use the large encoding with byrefs */
else
{
*dest++ = 0xFA;
*dest++ = (byrefRegMask << 4) | regMask;
*(DWORD*)dest = codeDelta;
dest += sizeof(DWORD);
*(DWORD*)dest = call->u1.cdArgMask;
dest += sizeof(DWORD);
*(DWORD*)dest = call->u1.cdByrefArgMask;
dest += sizeof(DWORD);
}
/* Keep track of the total amount of generated stuff */
totalSize += dest - base;
/* Go back to the buffer start if we're not generating a table */
if (!mask)
dest = base;
}
#endif // TARGET_X86
/* Terminate the table with 0xFF */
*dest = 0xFF;
dest -= mask;
totalSize++;
}
else // GetInterruptible() is false and we have an EBP-less frame
{
assert(compiler->IsFullPtrRegMapRequired());
#ifdef TARGET_X86
regPtrDsc* genRegPtrTemp;
regNumber thisRegNum = regNumber(0);
PendingArgsStack pasStk(compiler->GetEmitter()->emitMaxStackDepth, compiler);
/* Walk the list of pointer register/argument entries */
for (genRegPtrTemp = gcRegPtrList; genRegPtrTemp; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
/*
* Encoding table for methods without an EBP frame and
* that are not fully interruptible
*
* The encoding used is as follows:
*
* push 000DDDDD ESP push one item with 5-bit delta
* push 00100000 [pushCount] ESP push multiple items
* reserved 0010xxxx xxxx != 0000
* reserved 0011xxxx
* skip 01000000 [Delta] Skip Delta, arbitrary sized delta
* skip 0100DDDD Skip small Delta, for call (DDDD != 0)
* pop 01CCDDDD ESP pop CC items with 4-bit delta (CC != 00)
* call 1PPPPPPP Call Pattern, P=[0..79]
* call 1101pbsd DDCCCMMM Call RegMask=pbsd,ArgCnt=CCC,
* ArgMask=MMM Delta=commonDelta[DD]
* call 1110pbsd [ArgCnt] [ArgMask] Call ArgCnt,RegMask=pbsd,ArgMask
* call 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
* [32-bit PndCnt][32-bit PndSize][PndOffs...]
* iptr 11110000 [IPtrMask] Arbitrary Interior Pointer Mask
* thisptr 111101RR This pointer is in Register RR
* 00=EDI,01=ESI,10=EBX,11=EBP
* reserved 111100xx xx != 00
* reserved 111110xx xx != 00
* reserved 11111xxx xxx != 000 && xxx != 111(EOT)
*
* The value 11111111 [0xFF] indicates the end of the table. (EOT)
*
* An offset (at which stack-walking is performed) without an explicit encoding
* is assumed to be a trivial call-site (no GC registers, stack empty before and
* after) to avoid having to encode all trivial calls.
*
* Note on the encoding used for interior pointers
*
* The iptr encoding must immediately precede a call encoding. It is used
* to transform a normal GC pointer addresses into an interior pointers for
* GC purposes. The mask supplied to the iptr encoding is read from the
* least signicant bit to the most signicant bit. (i.e the lowest bit is
* read first)
*
* p indicates that register EBP is a live pointer
* b indicates that register EBX is a live pointer
* s indicates that register ESI is a live pointer
* d indicates that register EDI is a live pointer
* P indicates that register EBP is an interior pointer
* B indicates that register EBX is an interior pointer
* S indicates that register ESI is an interior pointer
* D indicates that register EDI is an interior pointer
*
* As an example the following sequence indicates that EDI.ESI and the
* second pushed pointer in ArgMask are really interior pointers. The
* pointer in ESI in a normal pointer:
*
* iptr 11110000 00010011 => read Interior Ptr, Interior Ptr,
* Normal Ptr, Normal Ptr, Interior Ptr
*
* call 11010011 DDCCC011 RRRR=1011 => read EDI is a GC-pointer,
* ESI is a GC-pointer.
* EBP is a GC-pointer
* MMM=0011 => read two GC-pointers arguments
* on the stack (nested call)
*
* Since the call instruction mentions 5 GC-pointers we list them in
* the required order: EDI, ESI, EBP, 1st-pushed pointer, 2nd-pushed pointer
*
* And we apply the Interior Pointer mask mmmm=10011 to the five GC-pointers
* we learn that EDI and ESI are interior GC-pointers and that
* the second push arg is an interior GC-pointer.
*/
BYTE* base = dest;
bool usePopEncoding;
unsigned regMask;
unsigned argMask;
unsigned byrefRegMask;
unsigned byrefArgMask;
DWORD callArgCnt;
unsigned nextOffset;
DWORD codeDelta;
nextOffset = genRegPtrTemp->rpdOffs;
/* Compute the distance from the previous call */
codeDelta = nextOffset - lastOffset;
assert((int)codeDelta >= 0);
#if REGEN_CALLPAT
// Must initialize this flag to true when REGEN_CALLPAT is on
usePopEncoding = true;
unsigned origCodeDelta = codeDelta;
#endif
if (!keepThisAlive && genRegPtrTemp->rpdIsThis)
{
unsigned tmpMask = genRegPtrTemp->rpdCompiler.rpdAdd;
/* tmpMask must have exactly one bit set */
assert(tmpMask && ((tmpMask & (tmpMask - 1)) == 0));
thisRegNum = genRegNumFromMask(tmpMask);
switch (thisRegNum)
{
case 0: // EAX
case 1: // ECX
case 2: // EDX
case 4: // ESP
break;
case 7: // EDI
*dest++ = 0xF4; /* 11110100 This pointer is in EDI */
break;
case 6: // ESI
*dest++ = 0xF5; /* 11110100 This pointer is in ESI */
break;
case 3: // EBX
*dest++ = 0xF6; /* 11110100 This pointer is in EBX */
break;
case 5: // EBP
*dest++ = 0xF7; /* 11110100 This pointer is in EBP */
break;
default:
break;
}
}
/* Is this a stack pointer change or call? */
if (genRegPtrTemp->rpdArg)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_KILL)
{
// kill 'rpdPtrArg' number of pointer variables in pasStk
pasStk.pasKill(genRegPtrTemp->rpdPtrArg);
}
/* Is this a call site? */
else if (genRegPtrTemp->rpdCall)
{
/* This is a true call site */
/* Remember the new 'last' offset */
lastOffset = nextOffset;
callArgCnt = genRegPtrTemp->rpdPtrArg;
unsigned gcrefRegMask = genRegPtrTemp->rpdCallGCrefRegs;
byrefRegMask = genRegPtrTemp->rpdCallByrefRegs;
assert((gcrefRegMask & byrefRegMask) == 0);
regMask = gcrefRegMask | byrefRegMask;
/* adjust argMask for this call-site */
pasStk.pasPop(callArgCnt);
/* Do we have to use the fat encoding */
if (pasStk.pasCurDepth() > BITS_IN_pasMask && pasStk.pasHasGCptrs())
{
/* use fat encoding:
* 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
* [32-bit PndCnt][32-bit PndSize][PndOffs...]
*/
DWORD pndCount = pasStk.pasEnumGCoffsCount();
DWORD pndSize = 0;
BYTE* pPndSize = DUMMY_INIT(NULL);
if (mask)
{
*dest++ = 0xF8;
*dest++ = (byrefRegMask << 4) | regMask;
*(DWORD*)dest = codeDelta;
dest += sizeof(DWORD);
*(DWORD*)dest = callArgCnt;
dest += sizeof(DWORD);
*(DWORD*)dest = pndCount;
dest += sizeof(DWORD);
pPndSize = dest;
dest += sizeof(DWORD); // Leave space for pndSize
}
unsigned offs, iter;
for (iter = pasStk.pasEnumGCoffs(pasENUM_START, &offs); pndCount;
iter = pasStk.pasEnumGCoffs(iter, &offs), pndCount--)
{
unsigned eltSize = encodeUnsigned(dest, offs);
pndSize += eltSize;
if (mask)
dest += eltSize;
}
assert(iter == pasENUM_END);
if (mask == 0)
{
dest = base + 2 + 4 * sizeof(DWORD) + pndSize;
}
else
{
assert(pPndSize + sizeof(pndSize) + pndSize == dest);
*(DWORD*)pPndSize = pndSize;
}
goto NEXT_RPD;
}
argMask = byrefArgMask = 0;
if (pasStk.pasHasGCptrs())
{
assert(pasStk.pasCurDepth() <= BITS_IN_pasMask);
argMask = pasStk.pasArgMask();
byrefArgMask = pasStk.pasByrefArgMask();
}
/* Shouldn't be reporting trivial call-sites */
assert(regMask || argMask || callArgCnt || pasStk.pasCurDepth());
// Emit IPtrMask if needed
#define CHK_NON_INTRPT_ESP_IPtrMask \
\
if (byrefRegMask || byrefArgMask) \
{ \
*dest++ = 0xF0; \
unsigned imask = (byrefArgMask << 4) | byrefRegMask; \
dest += encodeUnsigned(dest, imask); \
}
/* When usePopEncoding is true:
* this is not an interesting call site
* because nothing is live here.
*/
usePopEncoding = ((callArgCnt < 4) && (regMask == 0) && (argMask == 0));
if (!usePopEncoding)
{
int pattern = lookupCallPattern(callArgCnt, regMask, argMask, codeDelta);
if (pattern != -1)
{
if (pattern > 0xff)
{
codeDelta = pattern >> 8;
pattern &= 0xff;
if (codeDelta >= 16)
{
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta);
codeDelta = 0;
}
else
{
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
*dest++ = 0x40 | (BYTE)codeDelta;
}
}
// Emit IPtrMask if needed
CHK_NON_INTRPT_ESP_IPtrMask;
assert((pattern >= 0) && (pattern < 80));
*dest++ = 0x80 | pattern;
goto NEXT_RPD;
}
/* See if we can use 2nd call encoding
* 1101RRRR DDCCCMMM encoding */
if ((callArgCnt <= 7) && (argMask <= 7))
{
unsigned inx; // callCommonDelta[] index
unsigned maxCommonDelta = callCommonDelta[3];
if (codeDelta > maxCommonDelta)
{
if (codeDelta > maxCommonDelta + 15)
{
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta - maxCommonDelta);
}
else
{
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
*dest++ = 0x40 | (BYTE)(codeDelta - maxCommonDelta);
}
codeDelta = maxCommonDelta;
inx = 3;
goto EMIT_2ND_CALL_ENCODING;
}
for (inx = 0; inx < 4; inx++)
{
if (codeDelta == callCommonDelta[inx])
{
EMIT_2ND_CALL_ENCODING:
// Emit IPtrMask if needed
CHK_NON_INTRPT_ESP_IPtrMask;
*dest++ = 0xD0 | regMask;
*dest++ = (inx << 6) | (callArgCnt << 3) | argMask;
goto NEXT_RPD;
}
}
unsigned minCommonDelta = callCommonDelta[0];
if ((codeDelta > minCommonDelta) && (codeDelta < maxCommonDelta))
{
assert((minCommonDelta + 16) > maxCommonDelta);
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
*dest++ = 0x40 | (BYTE)(codeDelta - minCommonDelta);
codeDelta = minCommonDelta;
inx = 0;
goto EMIT_2ND_CALL_ENCODING;
}
}
}
if (codeDelta >= 16)
{
unsigned i = (usePopEncoding ? 15 : 0);
/* use encoding: */
/* skip 01000000 [Delta] arbitrary sized delta */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta - i);
codeDelta = i;
}
if ((codeDelta > 0) || usePopEncoding)
{
if (usePopEncoding)
{
/* use encoding: */
/* pop 01CCDDDD ESP pop CC items, 4-bit delta */
if (callArgCnt || codeDelta)
*dest++ = (BYTE)(0x40 | (callArgCnt << 4) | codeDelta);
goto NEXT_RPD;
}
else
{
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
*dest++ = 0x40 | (BYTE)codeDelta;
}
}
// Emit IPtrMask if needed
CHK_NON_INTRPT_ESP_IPtrMask;
/* use encoding: */
/* call 1110RRRR [ArgCnt] [ArgMask] */
*dest++ = 0xE0 | regMask;
dest += encodeUnsigned(dest, callArgCnt);
dest += encodeUnsigned(dest, argMask);
}
else
{
/* This is a push or a pop site */
/* Remember the new 'last' offset */
lastOffset = nextOffset;
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP)
{
/* This must be a gcArgPopSingle */
assert(genRegPtrTemp->rpdPtrArg == 1);
if (codeDelta >= 16)
{
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta - 15);
codeDelta = 15;
}
/* use encoding: */
/* pop1 0101DDDD ESP pop one item, 4-bit delta */
*dest++ = 0x50 | (BYTE)codeDelta;
/* adjust argMask for this pop */
pasStk.pasPop(1);
}
else
{
/* This is a push */
if (codeDelta >= 32)
{
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
dest += encodeUnsigned(dest, codeDelta - 31);
codeDelta = 31;
}
assert(codeDelta < 32);
/* use encoding: */
/* push 000DDDDD ESP push one item, 5-bit delta */
*dest++ = (BYTE)codeDelta;
/* adjust argMask for this push */
pasStk.pasPush(genRegPtrTemp->rpdGCtypeGet());
}
}
}
/* We ignore the register live/dead information, since the
* rpdCallRegMask contains all the liveness information
* that we need
*/
NEXT_RPD:
totalSize += dest - base;
/* Go back to the buffer start if we're not generating a table */
if (!mask)
dest = base;
#if REGEN_CALLPAT
if ((mask == -1) && (usePopEncoding == false) && ((dest - base) > 0))
regenLog(origCodeDelta, argMask, regMask, callArgCnt, byrefArgMask, byrefRegMask, base, (dest - base));
#endif
}
/* Verify that we pop every arg that was pushed and that argMask is 0 */
assert(pasStk.pasCurDepth() == 0);
#endif // TARGET_X86
/* Terminate the table with 0xFF */
*dest = 0xFF;
dest -= mask;
totalSize++;
}
#if VERIFY_GC_TABLES
if (mask)
{
*(short*)dest = (short)0xBEEB;
dest += sizeof(short);
}
totalSize += sizeof(short);
#endif
#if MEASURE_PTRTAB_SIZE
if (mask)
s_gcTotalPtrTabSize += totalSize;
#endif
return totalSize;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
/*****************************************************************************/
#if DUMP_GC_TABLES
/*****************************************************************************
*
* Dump the contents of a GC pointer table.
*/
#include "gcdump.h"
#if VERIFY_GC_TABLES
const bool verifyGCTables = true;
#else
const bool verifyGCTables = false;
#endif
/*****************************************************************************
*
* Dump the info block header.
*/
size_t GCInfo::gcInfoBlockHdrDump(const BYTE* table, InfoHdr* header, unsigned* methodSize)
{
GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
printf("Method info block:\n");
return gcDump.DumpInfoHdr(table, header, methodSize, verifyGCTables);
}
/*****************************************************************************/
size_t GCInfo::gcDumpPtrTable(const BYTE* table, const InfoHdr& header, unsigned methodSize)
{
printf("Pointer table:\n");
GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
return gcDump.DumpGCTable(table, header, methodSize, verifyGCTables);
}
/*****************************************************************************
*
* Find all the live pointers in a stack frame.
*/
void GCInfo::gcFindPtrsInFrame(const void* infoBlock, const void* codeBlock, unsigned offs)
{
GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
gcDump.DumpPtrsInFrame((PTR_CBYTE)infoBlock, (const BYTE*)codeBlock, offs, verifyGCTables);
}
#endif // DUMP_GC_TABLES
#else // !JIT32_GCENCODER
#include "gcinfoencoder.h"
// Do explicit instantiation.
template class JitHashTable<RegSlotIdKey, RegSlotIdKey, GcSlotId>;
template class JitHashTable<StackSlotIdKey, StackSlotIdKey, GcSlotId>;
#if defined(DEBUG) || DUMP_GC_TABLES
// This is a copy of GcStackSlotBaseNames from gcinfotypes.h so we can compile in to non-DEBUG builds.
const char* const JitGcStackSlotBaseNames[] = {"caller.sp", "sp", "frame"};
static const char* const GcSlotFlagsNames[] = {"",
"(byref) ",
"(pinned) ",
"(byref, pinned) ",
"(untracked) ",
"(byref, untracked) ",
"(pinned, untracked) ",
"(byref, pinned, untracked) "};
// I'm making a local wrapper class for GcInfoEncoder so that can add logging of my own (DLD).
class GcInfoEncoderWithLogging
{
GcInfoEncoder* m_gcInfoEncoder;
bool m_doLogging;
public:
GcInfoEncoderWithLogging(GcInfoEncoder* gcInfoEncoder, bool verbose)
: m_gcInfoEncoder(gcInfoEncoder), m_doLogging(verbose INDEBUG(|| JitConfig.JitGCInfoLogging() != 0))
{
}
GcSlotId GetStackSlotId(INT32 spOffset, GcSlotFlags flags, GcStackSlotBase spBase = GC_CALLER_SP_REL)
{
GcSlotId newSlotId = m_gcInfoEncoder->GetStackSlotId(spOffset, flags, spBase);
if (m_doLogging)
{
printf("Stack slot id for offset %d (%s0x%x) (%s) %s= %d.\n", spOffset, spOffset < 0 ? "-" : "",
abs(spOffset), JitGcStackSlotBaseNames[spBase], GcSlotFlagsNames[flags & 7], newSlotId);
}
return newSlotId;
}
GcSlotId GetRegisterSlotId(UINT32 regNum, GcSlotFlags flags)
{
GcSlotId newSlotId = m_gcInfoEncoder->GetRegisterSlotId(regNum, flags);
if (m_doLogging)
{
printf("Register slot id for reg %s %s= %d.\n", getRegName(regNum), GcSlotFlagsNames[flags & 7], newSlotId);
}
return newSlotId;
}
void SetSlotState(UINT32 instructionOffset, GcSlotId slotId, GcSlotState slotState)
{
m_gcInfoEncoder->SetSlotState(instructionOffset, slotId, slotState);
if (m_doLogging)
{
printf("Set state of slot %d at instr offset 0x%x to %s.\n", slotId, instructionOffset,
(slotState == GC_SLOT_LIVE ? "Live" : "Dead"));
}
}
void DefineCallSites(UINT32* pCallSites, BYTE* pCallSiteSizes, UINT32 numCallSites)
{
m_gcInfoEncoder->DefineCallSites(pCallSites, pCallSiteSizes, numCallSites);
if (m_doLogging)
{
printf("Defining %d call sites:\n", numCallSites);
for (UINT32 k = 0; k < numCallSites; k++)
{
printf(" Offset 0x%x, size %d.\n", pCallSites[k], pCallSiteSizes[k]);
}
}
}
void DefineInterruptibleRange(UINT32 startInstructionOffset, UINT32 length)
{
m_gcInfoEncoder->DefineInterruptibleRange(startInstructionOffset, length);
if (m_doLogging)
{
printf("Defining interruptible range: [0x%x, 0x%x).\n", startInstructionOffset,
startInstructionOffset + length);
}
}
void SetCodeLength(UINT32 length)
{
m_gcInfoEncoder->SetCodeLength(length);
if (m_doLogging)
{
printf("Set code length to %d.\n", length);
}
}
void SetReturnKind(ReturnKind returnKind)
{
m_gcInfoEncoder->SetReturnKind(returnKind);
if (m_doLogging)
{
printf("Set ReturnKind to %s.\n", ReturnKindToString(returnKind));
}
}
void SetStackBaseRegister(UINT32 registerNumber)
{
m_gcInfoEncoder->SetStackBaseRegister(registerNumber);
if (m_doLogging)
{
printf("Set stack base register to %s.\n", getRegName(registerNumber));
}
}
void SetPrologSize(UINT32 prologSize)
{
m_gcInfoEncoder->SetPrologSize(prologSize);
if (m_doLogging)
{
printf("Set prolog size 0x%x.\n", prologSize);
}
}
void SetGSCookieStackSlot(INT32 spOffsetGSCookie, UINT32 validRangeStart, UINT32 validRangeEnd)
{
m_gcInfoEncoder->SetGSCookieStackSlot(spOffsetGSCookie, validRangeStart, validRangeEnd);
if (m_doLogging)
{
printf("Set GS Cookie stack slot to %d, valid from 0x%x to 0x%x.\n", spOffsetGSCookie, validRangeStart,
validRangeEnd);
}
}
void SetPSPSymStackSlot(INT32 spOffsetPSPSym)
{
m_gcInfoEncoder->SetPSPSymStackSlot(spOffsetPSPSym);
if (m_doLogging)
{
printf("Set PSPSym stack slot to %d.\n", spOffsetPSPSym);
}
}
void SetGenericsInstContextStackSlot(INT32 spOffsetGenericsContext, GENERIC_CONTEXTPARAM_TYPE type)
{
m_gcInfoEncoder->SetGenericsInstContextStackSlot(spOffsetGenericsContext, type);
if (m_doLogging)
{
printf("Set generic instantiation context stack slot to %d, type is %s.\n", spOffsetGenericsContext,
(type == GENERIC_CONTEXTPARAM_THIS
? "THIS"
: (type == GENERIC_CONTEXTPARAM_MT ? "MT"
: (type == GENERIC_CONTEXTPARAM_MD ? "MD" : "UNKNOWN!"))));
}
}
void SetSecurityObjectStackSlot(INT32 spOffset)
{
m_gcInfoEncoder->SetSecurityObjectStackSlot(spOffset);
if (m_doLogging)
{
printf("Set security object stack slot to %d.\n", spOffset);
}
}
void SetIsVarArg()
{
m_gcInfoEncoder->SetIsVarArg();
if (m_doLogging)
{
printf("SetIsVarArg.\n");
}
}
#ifdef TARGET_AMD64
void SetWantsReportOnlyLeaf()
{
m_gcInfoEncoder->SetWantsReportOnlyLeaf();
if (m_doLogging)
{
printf("Set WantsReportOnlyLeaf.\n");
}
}
#elif defined(TARGET_ARMARCH)
void SetHasTailCalls()
{
m_gcInfoEncoder->SetHasTailCalls();
if (m_doLogging)
{
printf("Set HasTailCalls.\n");
}
}
#endif // TARGET_AMD64
void SetSizeOfStackOutgoingAndScratchArea(UINT32 size)
{
m_gcInfoEncoder->SetSizeOfStackOutgoingAndScratchArea(size);
if (m_doLogging)
{
printf("Set Outgoing stack arg area size to %d.\n", size);
}
}
};
#define GCENCODER_WITH_LOGGING(withLog, realEncoder) \
GcInfoEncoderWithLogging withLog##Var(realEncoder, INDEBUG(compiler->verbose ||) compiler->opts.dspGCtbls); \
GcInfoEncoderWithLogging* withLog = &withLog##Var;
#else // !(defined(DEBUG) || DUMP_GC_TABLES)
#define GCENCODER_WITH_LOGGING(withLog, realEncoder) GcInfoEncoder* withLog = realEncoder;
#endif // !(defined(DEBUG) || DUMP_GC_TABLES)
void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSize, unsigned prologSize)
{
#ifdef DEBUG
if (compiler->verbose)
{
printf("*************** In gcInfoBlockHdrSave()\n");
}
#endif
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
// Can't create tables if we've not saved code.
gcInfoEncoderWithLog->SetCodeLength(methodSize);
gcInfoEncoderWithLog->SetReturnKind(getReturnKind());
if (compiler->isFramePointerUsed())
{
gcInfoEncoderWithLog->SetStackBaseRegister(REG_FPBASE);
}
if (compiler->info.compIsVarArgs)
{
gcInfoEncoderWithLog->SetIsVarArg();
}
// No equivalents.
// header->profCallbacks = compiler->info.compProfilerCallback;
// header->editNcontinue = compiler->opts.compDbgEnC;
//
if (compiler->lvaReportParamTypeArg())
{
// The predicate above is true only if there is an extra generic context parameter, not for
// the case where the generic context is provided by "this."
assert((SIZE_T)compiler->info.compTypeCtxtArg != BAD_VAR_NUM);
GENERIC_CONTEXTPARAM_TYPE ctxtParamType = GENERIC_CONTEXTPARAM_NONE;
switch (compiler->info.compMethodInfo->options & CORINFO_GENERICS_CTXT_MASK)
{
case CORINFO_GENERICS_CTXT_FROM_METHODDESC:
ctxtParamType = GENERIC_CONTEXTPARAM_MD;
break;
case CORINFO_GENERICS_CTXT_FROM_METHODTABLE:
ctxtParamType = GENERIC_CONTEXTPARAM_MT;
break;
case CORINFO_GENERICS_CTXT_FROM_THIS: // See comment above.
default:
// If we have a generic context parameter, then we should have
// one of the two options flags handled above.
assert(false);
}
const int offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
compiler->isFramePointerUsed());
#ifdef DEBUG
if (compiler->opts.IsOSR())
{
// Sanity check the offset vs saved patchpoint info.
//
const PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
#if defined(TARGET_AMD64)
// PP info has FP relative offset, to get to caller SP we need to
// subtract off 2 register slots (saved FP, saved RA).
//
const int osrOffset = ppInfo->GenericContextArgOffset() - 2 * REGSIZE_BYTES;
assert(offset == osrOffset);
#elif defined(TARGET_ARM64)
// PP info has virtual offset. This is also the caller SP offset.
//
const int osrOffset = ppInfo->GenericContextArgOffset();
assert(offset == osrOffset);
#endif
}
#endif
gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(offset, ctxtParamType);
}
// As discussed above, handle the case where the generics context is obtained via
// the method table of "this".
else if (compiler->lvaKeepAliveAndReportThis())
{
assert(compiler->info.compThisArg != BAD_VAR_NUM);
// OSR can report the root method's frame slot, if that method reported context.
// If not, the OSR frame will have saved the needed context.
//
bool useRootFrameSlot = true;
if (compiler->opts.IsOSR())
{
const PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
useRootFrameSlot = ppInfo->HasKeptAliveThis();
}
const int offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
compiler->isFramePointerUsed(), useRootFrameSlot);
#ifdef DEBUG
if (compiler->opts.IsOSR() && useRootFrameSlot)
{
// Sanity check the offset vs saved patchpoint info.
//
const PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
#if defined(TARGET_AMD64)
// PP info has FP relative offset, to get to caller SP we need to
// subtract off 2 register slots (saved FP, saved RA).
//
const int osrOffset = ppInfo->KeptAliveThisOffset() - 2 * REGSIZE_BYTES;
assert(offset == osrOffset);
#elif defined(TARGET_ARM64)
// PP info has virtual offset. This is also the caller SP offset.
//
const int osrOffset = ppInfo->KeptAliveThisOffset();
assert(offset == osrOffset);
#endif
}
#endif
gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(offset, GENERIC_CONTEXTPARAM_THIS);
}
if (compiler->getNeedsGSSecurityCookie())
{
assert(compiler->lvaGSSecurityCookie != BAD_VAR_NUM);
// The lv offset is FP-relative, and the using code expects caller-sp relative, so translate.
const int offset = compiler->lvaGetCallerSPRelativeOffset(compiler->lvaGSSecurityCookie);
// The code offset ranges assume that the GS Cookie slot is initialized in the prolog, and is valid
// through the remainder of the method. We will not query for the GS Cookie while we're in an epilog,
// so the question of where in the epilog it becomes invalid is moot.
gcInfoEncoderWithLog->SetGSCookieStackSlot(offset, prologSize, methodSize);
}
else if (compiler->lvaReportParamTypeArg() || compiler->lvaKeepAliveAndReportThis())
{
gcInfoEncoderWithLog->SetPrologSize(prologSize);
}
#if defined(FEATURE_EH_FUNCLETS)
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
#ifdef TARGET_AMD64
// The PSPSym is relative to InitialSP on X64 and CallerSP on other platforms.
gcInfoEncoderWithLog->SetPSPSymStackSlot(compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym));
#else // !TARGET_AMD64
gcInfoEncoderWithLog->SetPSPSymStackSlot(compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
#endif // !TARGET_AMD64
}
#ifdef TARGET_AMD64
if (compiler->ehAnyFunclets())
{
// Set this to avoid double-reporting the parent frame (unlike JIT64)
gcInfoEncoderWithLog->SetWantsReportOnlyLeaf();
}
#endif // TARGET_AMD64
#endif // FEATURE_EH_FUNCLETS
#ifdef TARGET_ARMARCH
if (compiler->codeGen->GetHasTailCalls())
{
gcInfoEncoderWithLog->SetHasTailCalls();
}
#endif // TARGET_ARMARCH
#if FEATURE_FIXED_OUT_ARGS
// outgoing stack area size
gcInfoEncoderWithLog->SetSizeOfStackOutgoingAndScratchArea(compiler->lvaOutgoingArgSpaceSize);
#endif // FEATURE_FIXED_OUT_ARGS
#if DISPLAY_SIZES
if (compiler->codeGen->GetInterruptible())
{
genMethodICnt++;
}
else
{
genMethodNCnt++;
}
#endif // DISPLAY_SIZES
}
#if defined(DEBUG) || DUMP_GC_TABLES
#define Encoder GcInfoEncoderWithLogging
#else
#define Encoder GcInfoEncoder
#endif
// Small helper class to handle the No-GC-Interrupt callbacks
// when reporting interruptible ranges.
//
// Encoder should be either GcInfoEncoder or GcInfoEncoderWithLogging
//
struct InterruptibleRangeReporter
{
unsigned prevStart;
Encoder* gcInfoEncoderWithLog;
InterruptibleRangeReporter(unsigned _prevStart, Encoder* _gcInfo)
: prevStart(_prevStart), gcInfoEncoderWithLog(_gcInfo)
{
}
// This callback is called for each insGroup marked with
// IGF_NOGCINTERRUPT (currently just prologs and epilogs).
// Report everything between the previous region and the current
// region as interruptible.
bool operator()(unsigned igFuncIdx, unsigned igOffs, unsigned igSize)
{
if (igOffs < prevStart)
{
// We're still in the main method prolog, which has already
// had it's interruptible range reported.
assert(igFuncIdx == 0);
assert(igOffs + igSize <= prevStart);
return true;
}
assert(igOffs >= prevStart);
if (igOffs > prevStart)
{
gcInfoEncoderWithLog->DefineInterruptibleRange(prevStart, igOffs - prevStart);
}
prevStart = igOffs + igSize;
return true;
}
};
void GCInfo::gcMakeRegPtrTable(
GcInfoEncoder* gcInfoEncoder, unsigned codeSize, unsigned prologSize, MakeRegPtrMode mode, unsigned* callCntRef)
{
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
const bool noTrackedGCSlots =
(compiler->opts.MinOpts() && !compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) &&
!JitConfig.JitMinOptsTrackGCrefs());
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
m_regSlotMap = new (compiler->getAllocator()) RegSlotMap(compiler->getAllocator());
m_stackSlotMap = new (compiler->getAllocator()) StackSlotMap(compiler->getAllocator());
}
/**************************************************************************
*
* Untracked ptr variables
*
**************************************************************************
*/
/* Count&Write untracked locals and non-enregistered args */
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
// Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
// reported through its parent local.
continue;
}
if (varTypeIsGC(varDsc->TypeGet()))
{
// Do we have an argument or local variable?
if (!varDsc->lvIsParam)
{
// If is is pinned, it must be an untracked local.
assert(!varDsc->lvPinned || !varDsc->lvTracked);
if (varDsc->lvTracked || !varDsc->lvOnFrame)
{
continue;
}
}
else
{
// Stack-passed arguments which are not enregistered
// are always reported in this "untracked stack
// pointers" section of the GC info even if lvTracked==true
// Has this argument been fully enregistered?
CLANG_FORMAT_COMMENT_ANCHOR;
if (!varDsc->lvOnFrame)
{
// If a CEE_JMP has been used, then we need to report all the arguments
// even if they are enregistered, since we will be using this value
// in a JMP call. Note that this is subtle as we require that
// argument offsets are always fixed up properly even if lvRegister
// is set.
if (!compiler->compJmpOpUsed)
{
continue;
}
}
else
{
if (varDsc->lvIsRegArg && varDsc->lvTracked)
{
// If this register-passed arg is tracked, then
// it has been allocated space near the other
// pointer variables and we have accurate life-
// time info. It will be reported with
// gcVarPtrList in the "tracked-pointer" section.
continue;
}
}
}
// If we haven't continued to the next variable, we should report this as an untracked local.
CLANG_FORMAT_COMMENT_ANCHOR;
GcSlotFlags flags = GC_SLOT_UNTRACKED;
if (varDsc->TypeGet() == TYP_BYREF)
{
// Or in byref_OFFSET_FLAG for 'byref' pointer tracking
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
if (varDsc->lvPinned)
{
// Or in pinned_OFFSET_FLAG for 'pinned' pointer tracking
flags = (GcSlotFlags)(flags | GC_SLOT_PINNED);
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
if (varDsc->lvFramePointerBased)
{
stackSlotBase = GC_FRAMEREG_REL;
}
if (noTrackedGCSlots)
{
// No need to hash/lookup untracked GC refs; just grab a new Slot Id.
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
gcInfoEncoderWithLog->GetStackSlotId(varDsc->GetStackOffset(), flags, stackSlotBase);
}
}
else
{
StackSlotIdKey sskey(varDsc->GetStackOffset(), (stackSlotBase == GC_FRAMEREG_REL), flags);
GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId =
gcInfoEncoderWithLog->GetStackSlotId(varDsc->GetStackOffset(), flags, stackSlotBase);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
}
}
// If this is a TYP_STRUCT, handle its GC pointers.
// Note that the enregisterable struct types cannot have GC pointers in them.
if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
ClassLayout* layout = varDsc->GetLayout();
unsigned slots = layout->GetSlotCount();
for (unsigned i = 0; i < slots; i++)
{
if (!layout->IsGCPtr(i))
{
continue;
}
int offset = varDsc->GetStackOffset() + i * TARGET_POINTER_SIZE;
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
// arguments are addressed relative to EBP.
if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
offset += compiler->codeGen->genTotalFrameSize();
#endif
GcSlotFlags flags = GC_SLOT_UNTRACKED;
if (layout->GetGCPtrType(i) == TYP_BYREF)
{
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
if (varDsc->lvFramePointerBased)
{
stackSlotBase = GC_FRAMEREG_REL;
}
StackSlotIdKey sskey(offset, (stackSlotBase == GC_FRAMEREG_REL), flags);
GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId = gcInfoEncoderWithLog->GetStackSlotId(offset, flags, stackSlotBase);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
}
}
}
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
// Count&Write spill temps that hold pointers.
assert(compiler->codeGen->regSet.tmpAllFree());
for (TempDsc* tempItem = compiler->codeGen->regSet.tmpListBeg(); tempItem != nullptr;
tempItem = compiler->codeGen->regSet.tmpListNxt(tempItem))
{
if (varTypeIsGC(tempItem->tdTempType()))
{
int offset = tempItem->tdTempOffs();
GcSlotFlags flags = GC_SLOT_UNTRACKED;
if (tempItem->tdTempType() == TYP_BYREF)
{
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
if (compiler->isFramePointerUsed())
{
stackSlotBase = GC_FRAMEREG_REL;
}
StackSlotIdKey sskey(offset, (stackSlotBase == GC_FRAMEREG_REL), flags);
GcSlotId varSlotId;
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId = gcInfoEncoderWithLog->GetStackSlotId(offset, flags, stackSlotBase);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
}
if (compiler->lvaKeepAliveAndReportThis())
{
// We need to report the cached copy as an untracked pointer
assert(compiler->info.compThisArg != BAD_VAR_NUM);
assert(!compiler->lvaReportParamTypeArg());
GcSlotFlags flags = GC_SLOT_UNTRACKED;
if (compiler->lvaTable[compiler->info.compThisArg].TypeGet() == TYP_BYREF)
{
// Or in GC_SLOT_INTERIOR for 'byref' pointer tracking
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
GcStackSlotBase stackSlotBase = compiler->isFramePointerUsed() ? GC_FRAMEREG_REL : GC_SP_REL;
gcInfoEncoderWithLog->GetStackSlotId(compiler->lvaCachedGenericContextArgOffset(), flags, stackSlotBase);
}
}
// Generate the table of tracked stack pointer variable lifetimes.
gcMakeVarPtrTable(gcInfoEncoder, mode);
/**************************************************************************
*
* Prepare to generate the pointer register/argument map
*
**************************************************************************
*/
if (compiler->codeGen->GetInterruptible())
{
assert(compiler->IsFullPtrRegMapRequired());
regMaskSmall ptrRegs = 0;
regPtrDsc* regStackArgFirst = nullptr;
// Walk the list of pointer register/argument entries.
for (regPtrDsc* genRegPtrTemp = gcRegPtrList; genRegPtrTemp != nullptr; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (genRegPtrTemp->rpdArg)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_KILL)
{
// Kill all arguments for a call
if ((mode == MAKE_REG_PTR_MODE_DO_WORK) && (regStackArgFirst != nullptr))
{
// Record any outgoing arguments as becoming dead
gcInfoRecordGCStackArgsDead(gcInfoEncoder, genRegPtrTemp->rpdOffs, regStackArgFirst,
genRegPtrTemp);
}
regStackArgFirst = nullptr;
}
else if (genRegPtrTemp->rpdGCtypeGet() != GCT_NONE)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH || (genRegPtrTemp->rpdPtrArg != 0))
{
bool isPop = genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP;
assert(!isPop);
gcInfoRecordGCStackArgLive(gcInfoEncoder, mode, genRegPtrTemp);
if (regStackArgFirst == nullptr)
{
regStackArgFirst = genRegPtrTemp;
}
}
else
{
// We know it's a POP. Sometimes we'll record a POP for a call, just to make sure
// the call site is recorded.
// This is just the negation of the condition:
assert(genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP && genRegPtrTemp->rpdPtrArg == 0);
// This asserts that we only get here when we're recording a call site.
assert(genRegPtrTemp->rpdArg && genRegPtrTemp->rpdIsCallInstr());
// Kill all arguments for a call
if ((mode == MAKE_REG_PTR_MODE_DO_WORK) && (regStackArgFirst != nullptr))
{
// Record any outgoing arguments as becoming dead
gcInfoRecordGCStackArgsDead(gcInfoEncoder, genRegPtrTemp->rpdOffs, regStackArgFirst,
genRegPtrTemp);
}
regStackArgFirst = nullptr;
}
}
}
else
{
// Record any registers that are becoming dead.
regMaskSmall regMask = genRegPtrTemp->rpdCompiler.rpdDel & ptrRegs;
regMaskSmall byRefMask = 0;
if (genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF)
{
byRefMask = regMask;
}
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_DEAD,
byRefMask, &ptrRegs);
// Record any registers that are becoming live.
regMask = genRegPtrTemp->rpdCompiler.rpdAdd & ~ptrRegs;
byRefMask = 0;
// As far as I (DLD, 2010) can tell, there's one GCtype for the entire genRegPtrTemp, so if
// it says byref then all the registers in "regMask" contain byrefs.
if (genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF)
{
byRefMask = regMask;
}
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_LIVE,
byRefMask, &ptrRegs);
}
}
// Now we can declare the entire method body fully interruptible.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
assert(prologSize <= codeSize);
// Now exempt any other region marked as IGF_NOGCINTERRUPT
// Currently just prologs and epilogs.
InterruptibleRangeReporter reporter(prologSize, gcInfoEncoderWithLog);
compiler->GetEmitter()->emitGenNoGCLst(reporter);
prologSize = reporter.prevStart;
// Report any remainder
if (prologSize < codeSize)
{
gcInfoEncoderWithLog->DefineInterruptibleRange(prologSize, codeSize - prologSize);
}
}
}
else if (compiler->isFramePointerUsed()) // GetInterruptible() is false, and we're using EBP as a frame pointer.
{
assert(compiler->IsFullPtrRegMapRequired() == false);
// Walk the list of pointer register/argument entries.
// First count them.
unsigned numCallSites = 0;
// Now we can allocate the information.
unsigned* pCallSites = nullptr;
BYTE* pCallSiteSizes = nullptr;
unsigned callSiteNum = 0;
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
if (gcCallDescList != nullptr)
{
if (noTrackedGCSlots)
{
// We have the call count from the previous run.
numCallSites = *callCntRef;
// If there are no calls, tell the world and bail.
if (numCallSites == 0)
{
gcInfoEncoderWithLog->DefineCallSites(nullptr, nullptr, 0);
return;
}
}
else
{
for (CallDsc* call = gcCallDescList; call != nullptr; call = call->cdNext)
{
numCallSites++;
}
}
pCallSites = new (compiler, CMK_GC) unsigned[numCallSites];
pCallSiteSizes = new (compiler, CMK_GC) BYTE[numCallSites];
}
}
// Now consider every call.
for (CallDsc* call = gcCallDescList; call != nullptr; call = call->cdNext)
{
// Figure out the code offset of this entry.
unsigned nextOffset = call->cdOffs;
// As far as I (DLD, 2010) can determine by asking around, the "call->u1.cdArgMask"
// and "cdArgCnt" cases are to handle x86 situations in which a call expression is nested as an
// argument to an outer call. The "natural" (evaluation-order-preserving) thing to do is to
// evaluate the outer call's arguments, pushing those that are not enregistered, until you
// encounter the nested call. These parts of the call description, then, describe the "pending"
// pushed arguments. This situation does not exist outside of x86, where we're going to use a
// fixed-size stack frame: in situations like this nested call, we would evaluate the pending
// arguments to temporaries, and only "push" them (really, write them to the outgoing argument section
// of the stack frame) when it's the outer call's "turn." So we can assert that these
// situations never occur.
assert(call->u1.cdArgMask == 0 && call->cdArgCnt == 0);
// Other than that, we just have to deal with the regmasks.
regMaskSmall gcrefRegMask = call->cdGCrefRegs & RBM_CALLEE_SAVED;
regMaskSmall byrefRegMask = call->cdByrefRegs & RBM_CALLEE_SAVED;
assert((gcrefRegMask & byrefRegMask) == 0);
regMaskSmall regMask = gcrefRegMask | byrefRegMask;
assert(call->cdOffs >= call->cdCallInstrSize);
// call->cdOffs is actually the offset of the instruction *following* the call, so subtract
// the call instruction size to get the offset of the actual call instruction...
unsigned callOffset = nextOffset - call->cdCallInstrSize;
if (noTrackedGCSlots && regMask == 0)
{
// No live GC refs in regs at the call -> don't record the call.
}
else
{
// Append an entry for the call if doing the real thing.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
pCallSites[callSiteNum] = callOffset;
pCallSiteSizes[callSiteNum] = call->cdCallInstrSize;
}
callSiteNum++;
// Record that these registers are live before the call...
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, callOffset, regMask, GC_SLOT_LIVE, byrefRegMask,
nullptr);
// ...and dead after.
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, nextOffset, regMask, GC_SLOT_DEAD, byrefRegMask,
nullptr);
}
}
// Make sure we've recorded the expected number of calls
assert(mode != MAKE_REG_PTR_MODE_DO_WORK || numCallSites == callSiteNum);
// Return the actual recorded call count to the caller
*callCntRef = callSiteNum;
// OK, define the call sites.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
gcInfoEncoderWithLog->DefineCallSites(pCallSites, pCallSiteSizes, numCallSites);
}
}
else // GetInterruptible() is false and we have an EBP-less frame
{
assert(compiler->IsFullPtrRegMapRequired());
// Walk the list of pointer register/argument entries */
// First count them.
unsigned numCallSites = 0;
// Now we can allocate the information (if we're in the "DO_WORK" pass...)
unsigned* pCallSites = nullptr;
BYTE* pCallSiteSizes = nullptr;
unsigned callSiteNum = 0;
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
for (regPtrDsc* genRegPtrTemp = gcRegPtrList; genRegPtrTemp != nullptr;
genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (genRegPtrTemp->rpdArg && genRegPtrTemp->rpdIsCallInstr())
{
numCallSites++;
}
}
if (numCallSites > 0)
{
pCallSites = new (compiler, CMK_GC) unsigned[numCallSites];
pCallSiteSizes = new (compiler, CMK_GC) BYTE[numCallSites];
}
}
for (regPtrDsc* genRegPtrTemp = gcRegPtrList; genRegPtrTemp != nullptr; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (genRegPtrTemp->rpdArg)
{
// Is this a call site?
if (genRegPtrTemp->rpdIsCallInstr())
{
// This is a true call site.
regMaskSmall gcrefRegMask = genRegMaskFromCalleeSavedMask(genRegPtrTemp->rpdCallGCrefRegs);
regMaskSmall byrefRegMask = genRegMaskFromCalleeSavedMask(genRegPtrTemp->rpdCallByrefRegs);
assert((gcrefRegMask & byrefRegMask) == 0);
regMaskSmall regMask = gcrefRegMask | byrefRegMask;
// The "rpdOffs" is (apparently) the offset of the following instruction already.
// GcInfoEncoder wants the call instruction, so subtract the width of the call instruction.
assert(genRegPtrTemp->rpdOffs >= genRegPtrTemp->rpdCallInstrSize);
unsigned callOffset = genRegPtrTemp->rpdOffs - genRegPtrTemp->rpdCallInstrSize;
// Tell the GCInfo encoder about these registers. We say that the registers become live
// before the call instruction, and dead after.
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, callOffset, regMask, GC_SLOT_LIVE, byrefRegMask,
nullptr);
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_DEAD,
byrefRegMask, nullptr);
// Also remember the call site.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
assert(pCallSites != nullptr && pCallSiteSizes != nullptr);
pCallSites[callSiteNum] = callOffset;
pCallSiteSizes[callSiteNum] = genRegPtrTemp->rpdCallInstrSize;
callSiteNum++;
}
}
else
{
// These are reporting outgoing stack arguments, but we don't need to report anything
// for partially interruptible
assert(genRegPtrTemp->rpdGCtypeGet() != GCT_NONE);
assert(genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH);
}
}
}
// The routine is fully interruptible.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
gcInfoEncoderWithLog->DefineCallSites(pCallSites, pCallSiteSizes, numCallSites);
}
}
}
void GCInfo::gcInfoRecordGCRegStateChange(GcInfoEncoder* gcInfoEncoder,
MakeRegPtrMode mode,
unsigned instrOffset,
regMaskSmall regMask,
GcSlotState newState,
regMaskSmall byRefMask,
regMaskSmall* pPtrRegs)
{
// Precondition: byRefMask is a subset of regMask.
assert((byRefMask & ~regMask) == 0);
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
while (regMask)
{
// Get hold of the next register bit.
regMaskTP tmpMask = genFindLowestReg(regMask);
assert(tmpMask);
// Remember the new state of this register.
if (pPtrRegs != nullptr)
{
if (newState == GC_SLOT_DEAD)
{
*pPtrRegs &= ~tmpMask;
}
else
{
*pPtrRegs |= tmpMask;
}
}
// Figure out which register the next bit corresponds to.
regNumber regNum = genRegNumFromMask(tmpMask);
/* Reserve SP future use */
assert(regNum != REG_SPBASE);
GcSlotFlags regFlags = GC_SLOT_BASE;
if ((tmpMask & byRefMask) != 0)
{
regFlags = (GcSlotFlags)(regFlags | GC_SLOT_INTERIOR);
}
RegSlotIdKey rskey(regNum, regFlags);
GcSlotId regSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_regSlotMap->Lookup(rskey, ®SlotId))
{
regSlotId = gcInfoEncoderWithLog->GetRegisterSlotId(regNum, regFlags);
m_regSlotMap->Set(rskey, regSlotId);
}
}
else
{
bool b = m_regSlotMap->Lookup(rskey, ®SlotId);
assert(b); // Should have been added in the first pass.
gcInfoEncoderWithLog->SetSlotState(instrOffset, regSlotId, newState);
}
// Turn the bit we've just generated off and continue.
regMask -= tmpMask; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
}
}
/**************************************************************************
*
* gcMakeVarPtrTable - Generate the table of tracked stack pointer
* variable lifetimes.
*
* In the first pass we'll allocate slot Ids
* In the second pass we actually generate the lifetimes.
*
**************************************************************************
*/
void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode)
{
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
// Make sure any flags we hide in the offset are in the bits guaranteed
// unused by alignment
C_ASSERT((OFFSET_MASK + 1) <= sizeof(int));
#ifdef DEBUG
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
// Tracked variables can't be pinned, and the encoding takes
// advantage of that by using the same bit for 'pinned' and 'this'
// Since we don't track 'this', we should never see either flag here.
// Check it now before we potentially add some pinned flags.
for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext)
{
const unsigned flags = varTmp->vpdVarNum & OFFSET_MASK;
assert((flags & pinned_OFFSET_FLAG) == 0);
assert((flags & this_OFFSET_FLAG) == 0);
}
}
#endif // DEBUG
// Only need to do this once, and only if we have EH.
if ((mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) && compiler->ehAnyFunclets())
{
gcMarkFilterVarsPinned();
}
for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext)
{
C_ASSERT((OFFSET_MASK + 1) <= sizeof(int));
// Get hold of the variable's stack offset.
unsigned lowBits = varTmp->vpdVarNum & OFFSET_MASK;
// For negative stack offsets we must reset the low bits
int varOffs = static_cast<int>(varTmp->vpdVarNum & ~OFFSET_MASK);
// Compute the actual lifetime offsets.
unsigned begOffs = varTmp->vpdBegOfs;
unsigned endOffs = varTmp->vpdEndOfs;
// Special case: skip any 0-length lifetimes.
if (endOffs == begOffs)
{
continue;
}
GcSlotFlags flags = GC_SLOT_BASE;
if ((lowBits & byref_OFFSET_FLAG) != 0)
{
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
if ((lowBits & pinned_OFFSET_FLAG) != 0)
{
flags = (GcSlotFlags)(flags | GC_SLOT_PINNED);
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
if (compiler->isFramePointerUsed())
{
stackSlotBase = GC_FRAMEREG_REL;
}
StackSlotIdKey sskey(varOffs, (stackSlotBase == GC_FRAMEREG_REL), flags);
GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId = gcInfoEncoderWithLog->GetStackSlotId(varOffs, flags, stackSlotBase);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
else
{
bool b = m_stackSlotMap->Lookup(sskey, &varSlotId);
assert(b); // Should have been added in the first pass.
// Live from the beginning to the end.
gcInfoEncoderWithLog->SetSlotState(begOffs, varSlotId, GC_SLOT_LIVE);
gcInfoEncoderWithLog->SetSlotState(endOffs, varSlotId, GC_SLOT_DEAD);
}
}
}
void GCInfo::gcInfoRecordGCStackArgLive(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode, regPtrDsc* genStackPtr)
{
// On non-x86 platforms, don't have pointer argument push/pop/kill declarations.
// But we use the same mechanism to record writes into the outgoing argument space...
assert(genStackPtr->rpdGCtypeGet() != GCT_NONE);
assert(genStackPtr->rpdArg);
assert(genStackPtr->rpdArgTypeGet() == rpdARG_PUSH);
// We only need to report these when we're doing fuly-interruptible
assert(compiler->codeGen->GetInterruptible());
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
StackSlotIdKey sskey(genStackPtr->rpdPtrArg, false,
GcSlotFlags(genStackPtr->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE));
GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId = gcInfoEncoderWithLog->GetStackSlotId(sskey.m_offset, (GcSlotFlags)sskey.m_flags, GC_SP_REL);
m_stackSlotMap->Set(sskey, varSlotId);
}
}
else
{
bool b = m_stackSlotMap->Lookup(sskey, &varSlotId);
assert(b); // Should have been added in the first pass.
// Live until the call.
gcInfoEncoderWithLog->SetSlotState(genStackPtr->rpdOffs, varSlotId, GC_SLOT_LIVE);
}
}
void GCInfo::gcInfoRecordGCStackArgsDead(GcInfoEncoder* gcInfoEncoder,
unsigned instrOffset,
regPtrDsc* genStackPtrFirst,
regPtrDsc* genStackPtrLast)
{
// After a call all of the outgoing arguments are marked as dead.
// The calling loop keeps track of the first argument pushed for this call
// and passes it in as genStackPtrFirst.
// genStackPtrLast is the call.
// Re-walk that list and mark all outgoing arguments that we're marked as live
// earlier, as going dead after the call.
// We only need to report these when we're doing fuly-interruptible
assert(compiler->codeGen->GetInterruptible());
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
for (regPtrDsc* genRegPtrTemp = genStackPtrFirst; genRegPtrTemp != genStackPtrLast;
genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (!genRegPtrTemp->rpdArg)
{
continue;
}
assert(genRegPtrTemp->rpdGCtypeGet() != GCT_NONE);
assert(genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH);
StackSlotIdKey sskey(genRegPtrTemp->rpdPtrArg, false,
genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE);
GcSlotId varSlotId;
bool b = m_stackSlotMap->Lookup(sskey, &varSlotId);
assert(b); // Should have been added in the first pass.
// Live until the call.
gcInfoEncoderWithLog->SetSlotState(instrOffset, varSlotId, GC_SLOT_DEAD);
}
}
#undef GCENCODER_WITH_LOGGING
#endif // !JIT32_GCENCODER
/*****************************************************************************/
/*****************************************************************************/
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/libraries/System.Reflection/src/MatchingRefApiCompatBaseline.txt | Compat issues with assembly System.Reflection:
MembersMustExist : Member 'protected System.ModuleHandle System.Reflection.Module.GetModuleHandleImpl()' does not exist in the reference but it does exist in the implementation.
MembersMustExist : Member 'public System.Reflection.ParameterInfo[] System.Reflection.MethodBase.GetParametersNoCopy()' does not exist in the reference but it does exist in the implementation.
MembersMustExist : Member 'public System.Reflection.MethodBase System.Reflection.MethodBase.MetadataDefinitionMethod.get()' does not exist in the reference but it does exist in the implementation.
MembersMustExist : Member 'public System.Int32 System.Reflection.MethodInfo.GenericParameterCount.get()' does not exist in the reference but it does exist in the implementation.
Total Issues: 4
| Compat issues with assembly System.Reflection:
MembersMustExist : Member 'protected System.ModuleHandle System.Reflection.Module.GetModuleHandleImpl()' does not exist in the reference but it does exist in the implementation.
MembersMustExist : Member 'public System.Reflection.ParameterInfo[] System.Reflection.MethodBase.GetParametersNoCopy()' does not exist in the reference but it does exist in the implementation.
MembersMustExist : Member 'public System.Reflection.MethodBase System.Reflection.MethodBase.MetadataDefinitionMethod.get()' does not exist in the reference but it does exist in the implementation.
MembersMustExist : Member 'public System.Int32 System.Reflection.MethodInfo.GenericParameterCount.get()' does not exist in the reference but it does exist in the implementation.
Total Issues: 4
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/tests/Interop/PInvoke/Generics/GenericsNative.VectorC.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <stdint.h>
#include <xplatform.h>
#include <platformdefines.h>
typedef struct {
char16_t e00;
char16_t e01;
char16_t e02;
char16_t e03;
char16_t e04;
char16_t e05;
char16_t e06;
char16_t e07;
} VectorC128;
typedef struct {
char16_t e00;
char16_t e01;
char16_t e02;
char16_t e03;
char16_t e04;
char16_t e05;
char16_t e06;
char16_t e07;
char16_t e08;
char16_t e09;
char16_t e10;
char16_t e11;
char16_t e12;
char16_t e13;
char16_t e14;
char16_t e15;
} VectorC256;
static VectorC128 VectorC128Value = { };
static VectorC256 VectorC256Value = { };
extern "C" DLL_EXPORT VectorC128 STDMETHODCALLTYPE GetVectorC128(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07)
{
char16_t value[8] = { e00, e01, e02, e03, e04, e05, e06, e07 };
return *reinterpret_cast<VectorC128*>(value);
}
extern "C" DLL_EXPORT VectorC256 STDMETHODCALLTYPE GetVectorC256(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07, char16_t e08, char16_t e09, char16_t e10, char16_t e11, char16_t e12, char16_t e13, char16_t e14, char16_t e15)
{
char16_t value[16] = { e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15 };
return *reinterpret_cast<VectorC256*>(value);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVectorC128Out(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07, VectorC128* pValue)
{
*pValue = GetVectorC128(e00, e01, e02, e03, e04, e05, e06, e07);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVectorC256Out(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07, char16_t e08, char16_t e09, char16_t e10, char16_t e11, char16_t e12, char16_t e13, char16_t e14, char16_t e15, VectorC256* pValue)
{
*pValue = GetVectorC256(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15);
}
extern "C" DLL_EXPORT const VectorC128* STDMETHODCALLTYPE GetVectorC128Ptr(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07)
{
GetVectorC128Out(e00, e01, e02, e03, e04, e05, e06, e07, &VectorC128Value);
return &VectorC128Value;
}
extern "C" DLL_EXPORT const VectorC256* STDMETHODCALLTYPE GetVectorC256Ptr(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07, char16_t e08, char16_t e09, char16_t e10, char16_t e11, char16_t e12, char16_t e13, char16_t e14, char16_t e15)
{
GetVectorC256Out(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, &VectorC256Value);
return &VectorC256Value;
}
extern "C" DLL_EXPORT VectorC128 STDMETHODCALLTYPE AddVectorC128(VectorC128 lhs, VectorC128 rhs)
{
throw "P/Invoke for Vector<char> should be unsupported.";
}
extern "C" DLL_EXPORT VectorC256 STDMETHODCALLTYPE AddVectorC256(VectorC256 lhs, VectorC256 rhs)
{
throw "P/Invoke for Vector<char> should be unsupported.";
}
extern "C" DLL_EXPORT VectorC128 STDMETHODCALLTYPE AddVectorC128s(const VectorC128* pValues, uint32_t count)
{
throw "P/Invoke for Vector<char> should be unsupported.";
}
extern "C" DLL_EXPORT VectorC256 STDMETHODCALLTYPE AddVectorC256s(const VectorC256* pValues, uint32_t count)
{
throw "P/Invoke for Vector<char> should be unsupported.";
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <stdint.h>
#include <xplatform.h>
#include <platformdefines.h>
typedef struct {
char16_t e00;
char16_t e01;
char16_t e02;
char16_t e03;
char16_t e04;
char16_t e05;
char16_t e06;
char16_t e07;
} VectorC128;
typedef struct {
char16_t e00;
char16_t e01;
char16_t e02;
char16_t e03;
char16_t e04;
char16_t e05;
char16_t e06;
char16_t e07;
char16_t e08;
char16_t e09;
char16_t e10;
char16_t e11;
char16_t e12;
char16_t e13;
char16_t e14;
char16_t e15;
} VectorC256;
static VectorC128 VectorC128Value = { };
static VectorC256 VectorC256Value = { };
extern "C" DLL_EXPORT VectorC128 STDMETHODCALLTYPE GetVectorC128(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07)
{
char16_t value[8] = { e00, e01, e02, e03, e04, e05, e06, e07 };
return *reinterpret_cast<VectorC128*>(value);
}
extern "C" DLL_EXPORT VectorC256 STDMETHODCALLTYPE GetVectorC256(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07, char16_t e08, char16_t e09, char16_t e10, char16_t e11, char16_t e12, char16_t e13, char16_t e14, char16_t e15)
{
char16_t value[16] = { e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15 };
return *reinterpret_cast<VectorC256*>(value);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVectorC128Out(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07, VectorC128* pValue)
{
*pValue = GetVectorC128(e00, e01, e02, e03, e04, e05, e06, e07);
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVectorC256Out(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07, char16_t e08, char16_t e09, char16_t e10, char16_t e11, char16_t e12, char16_t e13, char16_t e14, char16_t e15, VectorC256* pValue)
{
*pValue = GetVectorC256(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15);
}
extern "C" DLL_EXPORT const VectorC128* STDMETHODCALLTYPE GetVectorC128Ptr(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07)
{
GetVectorC128Out(e00, e01, e02, e03, e04, e05, e06, e07, &VectorC128Value);
return &VectorC128Value;
}
extern "C" DLL_EXPORT const VectorC256* STDMETHODCALLTYPE GetVectorC256Ptr(char16_t e00, char16_t e01, char16_t e02, char16_t e03, char16_t e04, char16_t e05, char16_t e06, char16_t e07, char16_t e08, char16_t e09, char16_t e10, char16_t e11, char16_t e12, char16_t e13, char16_t e14, char16_t e15)
{
GetVectorC256Out(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, &VectorC256Value);
return &VectorC256Value;
}
extern "C" DLL_EXPORT VectorC128 STDMETHODCALLTYPE AddVectorC128(VectorC128 lhs, VectorC128 rhs)
{
throw "P/Invoke for Vector<char> should be unsupported.";
}
extern "C" DLL_EXPORT VectorC256 STDMETHODCALLTYPE AddVectorC256(VectorC256 lhs, VectorC256 rhs)
{
throw "P/Invoke for Vector<char> should be unsupported.";
}
extern "C" DLL_EXPORT VectorC128 STDMETHODCALLTYPE AddVectorC128s(const VectorC128* pValues, uint32_t count)
{
throw "P/Invoke for Vector<char> should be unsupported.";
}
extern "C" DLL_EXPORT VectorC256 STDMETHODCALLTYPE AddVectorC256s(const VectorC256* pValues, uint32_t count)
{
throw "P/Invoke for Vector<char> should be unsupported.";
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/tests/JIT/Methodical/structs/StructStackParams.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This tests passing structs that are less than 64-bits in size, but that
// don't match the size of a primitive type, and passes them as the 6th
// parameter so that they are likely to wind up on the stack for ABIs that
// pass structs by value.
using System;
using System.Runtime.CompilerServices;
// Struct that's greater than 32-bits, but not a multiple of 32-bits.
public struct MyStruct1
{
public byte f1;
public byte f2;
public short f3;
public short f4;
}
// Struct that's less than 32-bits, but not the same size as any primitive type.
public struct MyStruct2
{
public byte f1;
public byte f2;
public byte f3;
}
// Struct that's less than 64-bits, but not the same size as any primitive type.
public struct MyStruct3
{
public short f1;
public short f2;
public short f3;
}
// Struct that's greater than 64-bits, but not a multiple of 64-bits.
public struct MyStruct4
{
public int f1;
public int f2;
public short f3;
}
public class MyProgram
{
const int Pass = 100;
const int Fail = -1;
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static byte GetByte(byte i)
{
return i;
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static short GetShort(short i)
{
return i;
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int GetInt(int i)
{
return i;
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Check1(int w, int i1, int i2, int i3, int i4, int i5, int i6, int i7, MyStruct1 s1)
{
if ((w != 1) || (s1.f1 != i1) || (s1.f2 != i2) || (s1.f3 != i3) || (s1.f4 != i4))
{
Console.WriteLine("Check1: FAIL");
return Fail;
}
Console.WriteLine("Check1: PASS");
return Pass;
}
public static int TestStruct1()
{
MyStruct1 s1;
s1.f1 = GetByte(1); s1.f2 = GetByte(2); s1.f3 = GetShort(3); s1.f4 = GetShort(4);
int x = (s1.f1 * s1.f2 * s1.f3 * s1.f4);
int y = (s1.f1 - s1.f2) * (s1.f3 - s1.f4);
int z = (s1.f1 + s1.f2) * (s1.f3 + s1.f4);
int w = (x + y) / z;
return Check1(w, 1, 2, 3, 4, 5, 6, 7, s1);
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Check2(int w, int i1, int i2, int i3, int i4, int i5, int i6, int i7, MyStruct2 s2)
{
if ((w != 2) || (s2.f1 != i1) || (s2.f2 != i2) || (s2.f3 != i3) || (i4 != 4))
{
Console.WriteLine("Check2: FAIL");
return Fail;
}
Console.WriteLine("Check2: PASS");
return Pass;
}
public static int TestStruct2()
{
MyStruct2 s2;
s2.f1 = GetByte(1); s2.f2 = GetByte(2); s2.f3 = GetByte(3);
int x = s2.f1 * s2.f2 * s2.f3;
int y = (s2.f1 + s2.f2) * s2.f3;
int z = s2.f1 + s2.f2 + s2.f3;
int w = (x + y) / z;
return Check2(w, 1, 2, 3, 4, 5, 6, 7, s2);
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Check3(int w, int i1, int i2, int i3, int i4, int i5, int i6, int i7, MyStruct3 s3)
{
if ((w != 2) || (s3.f1 != i1) || (s3.f2 != i2) || (s3.f3 != i3) || (i4 != 4))
{
Console.WriteLine("Check3: FAIL");
return Fail;
}
Console.WriteLine("Check3: PASS");
return Pass;
}
public static int TestStruct3()
{
MyStruct3 s3;
s3.f1 = GetByte(1); s3.f2 = GetByte(2); s3.f3 = GetByte(3);
int x = s3.f1 * s3.f2 * s3.f3;
int y = (s3.f1 + s3.f2) * s3.f3;
int z = s3.f1 + s3.f2 + s3.f3;
int w = (x + y) / z;
return Check3(w, 1, 2, 3, 4, 5, 6, 7, s3);
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Check4(int w, int i1, int i2, int i3, int i4, int i5, int i6, int i7, MyStruct4 s4)
{
if ((w != 2) || (s4.f1 != i1) || (s4.f2 != i2) || (s4.f3 != i3) || (i4 != 4))
{
Console.WriteLine("Check4: FAIL");
return Fail;
}
Console.WriteLine("Check4: PASS");
return Pass;
}
public static int TestStruct4()
{
MyStruct4 s4;
s4.f1 = GetInt(1); s4.f2 = GetInt(2); s4.f3 = GetShort(3);
int x = s4.f1 * s4.f2 * s4.f3;
int y = (s4.f1 + s4.f2) * s4.f3;
int z = s4.f1 + s4.f2 + s4.f3;
int w = (x + y) / z;
return Check4(w, 1, 2, 3, 4, 5, 6, 7, s4);
}
public static int Main()
{
int retVal = Pass;
if (TestStruct1() != Pass)
{
retVal = Fail;
}
if (TestStruct2() != Pass)
{
retVal = Fail;
}
if (TestStruct3() != Pass)
{
retVal = Fail;
}
if (TestStruct4() != Pass)
{
retVal = Fail;
}
return retVal;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This tests passing structs that are less than 64-bits in size, but that
// don't match the size of a primitive type, and passes them as the 6th
// parameter so that they are likely to wind up on the stack for ABIs that
// pass structs by value.
using System;
using System.Runtime.CompilerServices;
// Struct that's greater than 32-bits, but not a multiple of 32-bits.
public struct MyStruct1
{
public byte f1;
public byte f2;
public short f3;
public short f4;
}
// Struct that's less than 32-bits, but not the same size as any primitive type.
public struct MyStruct2
{
public byte f1;
public byte f2;
public byte f3;
}
// Struct that's less than 64-bits, but not the same size as any primitive type.
public struct MyStruct3
{
public short f1;
public short f2;
public short f3;
}
// Struct that's greater than 64-bits, but not a multiple of 64-bits.
public struct MyStruct4
{
public int f1;
public int f2;
public short f3;
}
public class MyProgram
{
const int Pass = 100;
const int Fail = -1;
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static byte GetByte(byte i)
{
return i;
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static short GetShort(short i)
{
return i;
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int GetInt(int i)
{
return i;
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Check1(int w, int i1, int i2, int i3, int i4, int i5, int i6, int i7, MyStruct1 s1)
{
if ((w != 1) || (s1.f1 != i1) || (s1.f2 != i2) || (s1.f3 != i3) || (s1.f4 != i4))
{
Console.WriteLine("Check1: FAIL");
return Fail;
}
Console.WriteLine("Check1: PASS");
return Pass;
}
public static int TestStruct1()
{
MyStruct1 s1;
s1.f1 = GetByte(1); s1.f2 = GetByte(2); s1.f3 = GetShort(3); s1.f4 = GetShort(4);
int x = (s1.f1 * s1.f2 * s1.f3 * s1.f4);
int y = (s1.f1 - s1.f2) * (s1.f3 - s1.f4);
int z = (s1.f1 + s1.f2) * (s1.f3 + s1.f4);
int w = (x + y) / z;
return Check1(w, 1, 2, 3, 4, 5, 6, 7, s1);
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Check2(int w, int i1, int i2, int i3, int i4, int i5, int i6, int i7, MyStruct2 s2)
{
if ((w != 2) || (s2.f1 != i1) || (s2.f2 != i2) || (s2.f3 != i3) || (i4 != 4))
{
Console.WriteLine("Check2: FAIL");
return Fail;
}
Console.WriteLine("Check2: PASS");
return Pass;
}
public static int TestStruct2()
{
MyStruct2 s2;
s2.f1 = GetByte(1); s2.f2 = GetByte(2); s2.f3 = GetByte(3);
int x = s2.f1 * s2.f2 * s2.f3;
int y = (s2.f1 + s2.f2) * s2.f3;
int z = s2.f1 + s2.f2 + s2.f3;
int w = (x + y) / z;
return Check2(w, 1, 2, 3, 4, 5, 6, 7, s2);
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Check3(int w, int i1, int i2, int i3, int i4, int i5, int i6, int i7, MyStruct3 s3)
{
if ((w != 2) || (s3.f1 != i1) || (s3.f2 != i2) || (s3.f3 != i3) || (i4 != 4))
{
Console.WriteLine("Check3: FAIL");
return Fail;
}
Console.WriteLine("Check3: PASS");
return Pass;
}
public static int TestStruct3()
{
MyStruct3 s3;
s3.f1 = GetByte(1); s3.f2 = GetByte(2); s3.f3 = GetByte(3);
int x = s3.f1 * s3.f2 * s3.f3;
int y = (s3.f1 + s3.f2) * s3.f3;
int z = s3.f1 + s3.f2 + s3.f3;
int w = (x + y) / z;
return Check3(w, 1, 2, 3, 4, 5, 6, 7, s3);
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int Check4(int w, int i1, int i2, int i3, int i4, int i5, int i6, int i7, MyStruct4 s4)
{
if ((w != 2) || (s4.f1 != i1) || (s4.f2 != i2) || (s4.f3 != i3) || (i4 != 4))
{
Console.WriteLine("Check4: FAIL");
return Fail;
}
Console.WriteLine("Check4: PASS");
return Pass;
}
public static int TestStruct4()
{
MyStruct4 s4;
s4.f1 = GetInt(1); s4.f2 = GetInt(2); s4.f3 = GetShort(3);
int x = s4.f1 * s4.f2 * s4.f3;
int y = (s4.f1 + s4.f2) * s4.f3;
int z = s4.f1 + s4.f2 + s4.f3;
int w = (x + y) / z;
return Check4(w, 1, 2, 3, 4, 5, 6, 7, s4);
}
public static int Main()
{
int retVal = Pass;
if (TestStruct1() != Pass)
{
retVal = Fail;
}
if (TestStruct2() != Pass)
{
retVal = Fail;
}
if (TestStruct3() != Pass)
{
retVal = Fail;
}
if (TestStruct4() != Pass)
{
retVal = Fail;
}
return retVal;
}
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/coreclr/pal/tests/palsuite/c_runtime/qsort/test2/test2.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test2.c
**
** Purpose: Calls qsort to sort a buffer, and verifies that it has done
** the job correctly.
**
**
**==========================================================================*/
#include <palsuite.h>
int __cdecl twocharcmp_qsort_test2(const void *pa, const void *pb)
{
return memcmp(pa, pb, 2);
}
PALTEST(c_runtime_qsort_test2_paltest_qsort_test2, "c_runtime/qsort/test2/paltest_qsort_test2")
{
char before[] = "ccggaaiieehhddbbjjff";
const char after[] = "aabbccddeeffgghhiijj";
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
qsort(before, (sizeof(before) - 1) / 2, 2 * sizeof(char), twocharcmp_qsort_test2);
if (memcmp(before, after, sizeof(before)) != 0)
{
Fail("qsort did not correctly sort an array of 2-character "
"buffers.\n");
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test2.c
**
** Purpose: Calls qsort to sort a buffer, and verifies that it has done
** the job correctly.
**
**
**==========================================================================*/
#include <palsuite.h>
int __cdecl twocharcmp_qsort_test2(const void *pa, const void *pb)
{
return memcmp(pa, pb, 2);
}
PALTEST(c_runtime_qsort_test2_paltest_qsort_test2, "c_runtime/qsort/test2/paltest_qsort_test2")
{
char before[] = "ccggaaiieehhddbbjjff";
const char after[] = "aabbccddeeffgghhiijj";
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
qsort(before, (sizeof(before) - 1) / 2, 2 * sizeof(char), twocharcmp_qsort_test2);
if (memcmp(before, after, sizeof(before)) != 0)
{
Fail("qsort did not correctly sort an array of 2-character "
"buffers.\n");
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/libraries/Common/src/System/Text/StringBuilderCache.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Text
{
/// <summary>Provide a cached reusable instance of stringbuilder per thread.</summary>
internal static class StringBuilderCache
{
// The value 360 was chosen in discussion with performance experts as a compromise between using
// as little memory per thread as possible and still covering a large part of short-lived
// StringBuilder creations on the startup path of VS designers.
internal const int MaxBuilderSize = 360;
private const int DefaultCapacity = 16; // == StringBuilder.DefaultCapacity
// WARNING: We allow diagnostic tools to directly inspect this member (t_cachedInstance).
// See https://github.com/dotnet/corert/blob/master/Documentation/design-docs/diagnostics/diagnostics-tools-contract.md for more details.
// Please do not change the type, the name, or the semantic usage of this member without understanding the implication for tools.
// Get in touch with the diagnostics team if you have questions.
[ThreadStatic]
private static StringBuilder? t_cachedInstance;
/// <summary>Get a StringBuilder for the specified capacity.</summary>
/// <remarks>If a StringBuilder of an appropriate size is cached, it will be returned and the cache emptied.</remarks>
public static StringBuilder Acquire(int capacity = DefaultCapacity)
{
if (capacity <= MaxBuilderSize)
{
StringBuilder? sb = t_cachedInstance;
if (sb != null)
{
// Avoid stringbuilder block fragmentation by getting a new StringBuilder
// when the requested size is larger than the current capacity
if (capacity <= sb.Capacity)
{
t_cachedInstance = null;
sb.Clear();
return sb;
}
}
}
return new StringBuilder(capacity);
}
/// <summary>Place the specified builder in the cache if it is not too big.</summary>
public static void Release(StringBuilder sb)
{
if (sb.Capacity <= MaxBuilderSize)
{
t_cachedInstance = sb;
}
}
/// <summary>ToString() the stringbuilder, Release it to the cache, and return the resulting string.</summary>
public static string GetStringAndRelease(StringBuilder sb)
{
string result = sb.ToString();
Release(sb);
return result;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Text
{
/// <summary>Provide a cached reusable instance of stringbuilder per thread.</summary>
internal static class StringBuilderCache
{
// The value 360 was chosen in discussion with performance experts as a compromise between using
// as little memory per thread as possible and still covering a large part of short-lived
// StringBuilder creations on the startup path of VS designers.
internal const int MaxBuilderSize = 360;
private const int DefaultCapacity = 16; // == StringBuilder.DefaultCapacity
// WARNING: We allow diagnostic tools to directly inspect this member (t_cachedInstance).
// See https://github.com/dotnet/corert/blob/master/Documentation/design-docs/diagnostics/diagnostics-tools-contract.md for more details.
// Please do not change the type, the name, or the semantic usage of this member without understanding the implication for tools.
// Get in touch with the diagnostics team if you have questions.
[ThreadStatic]
private static StringBuilder? t_cachedInstance;
/// <summary>Get a StringBuilder for the specified capacity.</summary>
/// <remarks>If a StringBuilder of an appropriate size is cached, it will be returned and the cache emptied.</remarks>
public static StringBuilder Acquire(int capacity = DefaultCapacity)
{
if (capacity <= MaxBuilderSize)
{
StringBuilder? sb = t_cachedInstance;
if (sb != null)
{
// Avoid stringbuilder block fragmentation by getting a new StringBuilder
// when the requested size is larger than the current capacity
if (capacity <= sb.Capacity)
{
t_cachedInstance = null;
sb.Clear();
return sb;
}
}
}
return new StringBuilder(capacity);
}
/// <summary>Place the specified builder in the cache if it is not too big.</summary>
public static void Release(StringBuilder sb)
{
if (sb.Capacity <= MaxBuilderSize)
{
t_cachedInstance = sb;
}
}
/// <summary>ToString() the stringbuilder, Release it to the cache, and return the resulting string.</summary>
public static string GetStringAndRelease(StringBuilder sb)
{
string result = sb.ToString();
Release(sb);
return result;
}
}
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest667/Generated667.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated667.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated667.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/bft20a.xsl | <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:template match="bar">
<b>
<xsl:apply-templates/>
</b>
</xsl:template>
</xsl:stylesheet>
| <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:template match="bar">
<b>
<xsl:apply-templates/>
</b>
</xsl:template>
</xsl:stylesheet>
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/tests/JIT/Methodical/int64/signed/s_ldc_mul_il_r.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="s_ldc_mul.il" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="s_ldc_mul.il" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/libraries/System.Security.Permissions/src/System/ServiceProcess/ServiceControllerPermissionAttribute.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security;
using System.Security.Permissions;
namespace System.ServiceProcess
{
#if NETCOREAPP
[Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)]
#endif
[AttributeUsage(AttributeTargets.Method | AttributeTargets.Constructor | AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Assembly | AttributeTargets.Event, AllowMultiple = true, Inherited = false )]
public class ServiceControllerPermissionAttribute : CodeAccessSecurityAttribute
{
public ServiceControllerPermissionAttribute(SecurityAction action): base(action) { }
public string MachineName { get => null; set { } }
public ServiceControllerPermissionAccess PermissionAccess { get => default(ServiceControllerPermissionAccess); set { } }
public string ServiceName { get => null; set { } }
public override IPermission CreatePermission() { return default(IPermission); }
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security;
using System.Security.Permissions;
namespace System.ServiceProcess
{
#if NETCOREAPP
[Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)]
#endif
[AttributeUsage(AttributeTargets.Method | AttributeTargets.Constructor | AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Assembly | AttributeTargets.Event, AllowMultiple = true, Inherited = false )]
public class ServiceControllerPermissionAttribute : CodeAccessSecurityAttribute
{
public ServiceControllerPermissionAttribute(SecurityAction action): base(action) { }
public string MachineName { get => null; set { } }
public ServiceControllerPermissionAccess PermissionAccess { get => default(ServiceControllerPermissionAccess); set { } }
public string ServiceName { get => null; set { } }
public override IPermission CreatePermission() { return default(IPermission); }
}
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/tests/baseservices/threading/generics/Monitor/EnterExit04.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Threading;
public struct ValX1<T> {}
public class RefX1<T> {}
struct Gen<T>
{
public static void EnterExitTest<U>()
{
Type monitorT = typeof(Gen<T>);
Type monitorU = typeof(Gen<U>);
if(monitorU.Equals(monitorT))
throw new Exception("Invalid use of test case, T must not be equal to U - POSSIBLE TYPE SYSTEM BUG");
TestHelper myHelper = new TestHelper(Test_EnterExit04.nThreads);
TestHelper myHelper2 = new TestHelper(Test_EnterExit04.nThreads);
WaitHandle[] myWaiter = new WaitHandle[2];
myWaiter[0] = myHelper.m_Event;
myWaiter[1] = myHelper2.m_Event;
// for(int i=0;i<Test.nThreads;i++)
// {
// new MonitorDelegate(myHelper.Consumer).BeginInvoke(monitorT,null,null);
// new MonitorDelegate(myHelper2.Consumer).BeginInvoke(monitorU,null,null);
// }
for(int i=0; i<Test_EnterExit04.nThreads; i++)
{
ThreadPool.QueueUserWorkItem(state =>
{
myHelper.Consumer(monitorT);
});
ThreadPool.QueueUserWorkItem(state =>
{
myHelper2.Consumer(monitorU);
});
}
for(int i=0;i<6;i++)
{
if(WaitHandle.WaitAll(myWaiter,10000))//,true))
break;
if(myHelper.Error == true || myHelper2.Error == true)
break;
}
Test_EnterExit04.Eval(!(myHelper.Error || myHelper2.Error));
}
}
public class Test_EnterExit04
{
public static int nThreads = 10;
public static int counter = 0;
public static bool result = true;
public static void Eval(bool exp)
{
counter++;
if (!exp)
{
result = exp;
Console.WriteLine("Test Failed at location: " + counter);
}
}
public static int Main()
{
Gen<double>.EnterExitTest<int>();
Gen<string>.EnterExitTest<int>();
Gen<object>.EnterExitTest<int>();
Gen<Guid>.EnterExitTest<int>();
Gen<RefX1<int>>.EnterExitTest<int>();
Gen<RefX1<string>>.EnterExitTest<int>();
Gen<ValX1<int>>.EnterExitTest<int>();
Gen<ValX1<string>>.EnterExitTest<int>();
Gen<int>.EnterExitTest<double>();
Gen<string>.EnterExitTest<double>();
Gen<object>.EnterExitTest<double>();
Gen<Guid>.EnterExitTest<double>();
Gen<RefX1<int>>.EnterExitTest<double>();
Gen<RefX1<string>>.EnterExitTest<double>();
Gen<ValX1<int>>.EnterExitTest<double>();
Gen<ValX1<string>>.EnterExitTest<double>();
Gen<int>.EnterExitTest<string>();
Gen<double>.EnterExitTest<string>();
Gen<object>.EnterExitTest<string>();
Gen<Guid>.EnterExitTest<string>();
Gen<RefX1<int>>.EnterExitTest<string>();
Gen<RefX1<string>>.EnterExitTest<string>();
Gen<ValX1<int>>.EnterExitTest<string>();
Gen<ValX1<string>>.EnterExitTest<string>();
Gen<int>.EnterExitTest<object>();
Gen<double>.EnterExitTest<object>();
Gen<string>.EnterExitTest<object>();
Gen<Guid>.EnterExitTest<object>();
Gen<RefX1<int>>.EnterExitTest<object>();
Gen<RefX1<string>>.EnterExitTest<object>();
Gen<ValX1<int>>.EnterExitTest<object>();
Gen<ValX1<string>>.EnterExitTest<object>();
Gen<int>.EnterExitTest<Guid>();
Gen<double>.EnterExitTest<Guid>();
Gen<string>.EnterExitTest<Guid>();
Gen<object>.EnterExitTest<Guid>();
Gen<RefX1<int>>.EnterExitTest<Guid>();
Gen<RefX1<string>>.EnterExitTest<Guid>();
Gen<ValX1<int>>.EnterExitTest<Guid>();
Gen<ValX1<string>>.EnterExitTest<Guid>();
Gen<int>.EnterExitTest<RefX1<int>>();
Gen<double>.EnterExitTest<RefX1<int>>();
Gen<string>.EnterExitTest<RefX1<int>>();
Gen<object>.EnterExitTest<RefX1<int>>();
Gen<Guid>.EnterExitTest<RefX1<int>>();
Gen<RefX1<string>>.EnterExitTest<RefX1<int>>();
Gen<ValX1<int>>.EnterExitTest<RefX1<int>>();
Gen<ValX1<string>>.EnterExitTest<RefX1<int>>();
Gen<int>.EnterExitTest<RefX1<string>>();
Gen<double>.EnterExitTest<RefX1<string>>();
Gen<string>.EnterExitTest<RefX1<string>>();
Gen<object>.EnterExitTest<RefX1<string>>();
Gen<Guid>.EnterExitTest<RefX1<string>>();
Gen<RefX1<int>>.EnterExitTest<RefX1<string>>();
Gen<ValX1<int>>.EnterExitTest<RefX1<string>>();
Gen<ValX1<string>>.EnterExitTest<RefX1<string>>();
Gen<int>.EnterExitTest<ValX1<int>>();
Gen<double>.EnterExitTest<ValX1<int>>();
Gen<string>.EnterExitTest<ValX1<int>>(); //offending line
Gen<object>.EnterExitTest<ValX1<int>>(); //offending line
Gen<Guid>.EnterExitTest<ValX1<int>>();
Gen<RefX1<int>>.EnterExitTest<ValX1<int>>(); //offending line
Gen<RefX1<string>>.EnterExitTest<ValX1<int>>(); //offending line
Gen<ValX1<string>>.EnterExitTest<ValX1<int>>(); //offending line
Gen<int>.EnterExitTest<ValX1<string>>(); //offending line
Gen<double>.EnterExitTest<ValX1<string>>(); //offending line
Gen<string>.EnterExitTest<ValX1<string>>(); //offending line
Gen<object>.EnterExitTest<ValX1<string>>(); //offending line
Gen<Guid>.EnterExitTest<ValX1<string>>(); //offending line
Gen<RefX1<int>>.EnterExitTest<ValX1<string>>(); //offending line
Gen<RefX1<string>>.EnterExitTest<ValX1<string>>(); //offending line
Gen<ValX1<int>>.EnterExitTest<ValX1<string>>(); //offending line
if (result)
{
Console.WriteLine("Test Passed");
return 100;
}
else
{
Console.WriteLine("Test Failed");
return 1;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Threading;
public struct ValX1<T> {}
public class RefX1<T> {}
struct Gen<T>
{
public static void EnterExitTest<U>()
{
Type monitorT = typeof(Gen<T>);
Type monitorU = typeof(Gen<U>);
if(monitorU.Equals(monitorT))
throw new Exception("Invalid use of test case, T must not be equal to U - POSSIBLE TYPE SYSTEM BUG");
TestHelper myHelper = new TestHelper(Test_EnterExit04.nThreads);
TestHelper myHelper2 = new TestHelper(Test_EnterExit04.nThreads);
WaitHandle[] myWaiter = new WaitHandle[2];
myWaiter[0] = myHelper.m_Event;
myWaiter[1] = myHelper2.m_Event;
// for(int i=0;i<Test.nThreads;i++)
// {
// new MonitorDelegate(myHelper.Consumer).BeginInvoke(monitorT,null,null);
// new MonitorDelegate(myHelper2.Consumer).BeginInvoke(monitorU,null,null);
// }
for(int i=0; i<Test_EnterExit04.nThreads; i++)
{
ThreadPool.QueueUserWorkItem(state =>
{
myHelper.Consumer(monitorT);
});
ThreadPool.QueueUserWorkItem(state =>
{
myHelper2.Consumer(monitorU);
});
}
for(int i=0;i<6;i++)
{
if(WaitHandle.WaitAll(myWaiter,10000))//,true))
break;
if(myHelper.Error == true || myHelper2.Error == true)
break;
}
Test_EnterExit04.Eval(!(myHelper.Error || myHelper2.Error));
}
}
public class Test_EnterExit04
{
public static int nThreads = 10;
public static int counter = 0;
public static bool result = true;
public static void Eval(bool exp)
{
counter++;
if (!exp)
{
result = exp;
Console.WriteLine("Test Failed at location: " + counter);
}
}
public static int Main()
{
Gen<double>.EnterExitTest<int>();
Gen<string>.EnterExitTest<int>();
Gen<object>.EnterExitTest<int>();
Gen<Guid>.EnterExitTest<int>();
Gen<RefX1<int>>.EnterExitTest<int>();
Gen<RefX1<string>>.EnterExitTest<int>();
Gen<ValX1<int>>.EnterExitTest<int>();
Gen<ValX1<string>>.EnterExitTest<int>();
Gen<int>.EnterExitTest<double>();
Gen<string>.EnterExitTest<double>();
Gen<object>.EnterExitTest<double>();
Gen<Guid>.EnterExitTest<double>();
Gen<RefX1<int>>.EnterExitTest<double>();
Gen<RefX1<string>>.EnterExitTest<double>();
Gen<ValX1<int>>.EnterExitTest<double>();
Gen<ValX1<string>>.EnterExitTest<double>();
Gen<int>.EnterExitTest<string>();
Gen<double>.EnterExitTest<string>();
Gen<object>.EnterExitTest<string>();
Gen<Guid>.EnterExitTest<string>();
Gen<RefX1<int>>.EnterExitTest<string>();
Gen<RefX1<string>>.EnterExitTest<string>();
Gen<ValX1<int>>.EnterExitTest<string>();
Gen<ValX1<string>>.EnterExitTest<string>();
Gen<int>.EnterExitTest<object>();
Gen<double>.EnterExitTest<object>();
Gen<string>.EnterExitTest<object>();
Gen<Guid>.EnterExitTest<object>();
Gen<RefX1<int>>.EnterExitTest<object>();
Gen<RefX1<string>>.EnterExitTest<object>();
Gen<ValX1<int>>.EnterExitTest<object>();
Gen<ValX1<string>>.EnterExitTest<object>();
Gen<int>.EnterExitTest<Guid>();
Gen<double>.EnterExitTest<Guid>();
Gen<string>.EnterExitTest<Guid>();
Gen<object>.EnterExitTest<Guid>();
Gen<RefX1<int>>.EnterExitTest<Guid>();
Gen<RefX1<string>>.EnterExitTest<Guid>();
Gen<ValX1<int>>.EnterExitTest<Guid>();
Gen<ValX1<string>>.EnterExitTest<Guid>();
Gen<int>.EnterExitTest<RefX1<int>>();
Gen<double>.EnterExitTest<RefX1<int>>();
Gen<string>.EnterExitTest<RefX1<int>>();
Gen<object>.EnterExitTest<RefX1<int>>();
Gen<Guid>.EnterExitTest<RefX1<int>>();
Gen<RefX1<string>>.EnterExitTest<RefX1<int>>();
Gen<ValX1<int>>.EnterExitTest<RefX1<int>>();
Gen<ValX1<string>>.EnterExitTest<RefX1<int>>();
Gen<int>.EnterExitTest<RefX1<string>>();
Gen<double>.EnterExitTest<RefX1<string>>();
Gen<string>.EnterExitTest<RefX1<string>>();
Gen<object>.EnterExitTest<RefX1<string>>();
Gen<Guid>.EnterExitTest<RefX1<string>>();
Gen<RefX1<int>>.EnterExitTest<RefX1<string>>();
Gen<ValX1<int>>.EnterExitTest<RefX1<string>>();
Gen<ValX1<string>>.EnterExitTest<RefX1<string>>();
Gen<int>.EnterExitTest<ValX1<int>>();
Gen<double>.EnterExitTest<ValX1<int>>();
Gen<string>.EnterExitTest<ValX1<int>>(); //offending line
Gen<object>.EnterExitTest<ValX1<int>>(); //offending line
Gen<Guid>.EnterExitTest<ValX1<int>>();
Gen<RefX1<int>>.EnterExitTest<ValX1<int>>(); //offending line
Gen<RefX1<string>>.EnterExitTest<ValX1<int>>(); //offending line
Gen<ValX1<string>>.EnterExitTest<ValX1<int>>(); //offending line
Gen<int>.EnterExitTest<ValX1<string>>(); //offending line
Gen<double>.EnterExitTest<ValX1<string>>(); //offending line
Gen<string>.EnterExitTest<ValX1<string>>(); //offending line
Gen<object>.EnterExitTest<ValX1<string>>(); //offending line
Gen<Guid>.EnterExitTest<ValX1<string>>(); //offending line
Gen<RefX1<int>>.EnterExitTest<ValX1<string>>(); //offending line
Gen<RefX1<string>>.EnterExitTest<ValX1<string>>(); //offending line
Gen<ValX1<int>>.EnterExitTest<ValX1<string>>(); //offending line
if (result)
{
Console.WriteLine("Test Passed");
return 100;
}
else
{
Console.WriteLine("Test Failed");
return 1;
}
}
}
| -1 |
dotnet/runtime | 66,234 | Remove compiler warning suppression | Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | AaronRobinsonMSFT | 2022-03-05T03:45:49Z | 2022-03-09T00:57:12Z | 220e67755ae6323a01011b83070dff6f84b02519 | 853e494abd1c1e12d28a76829b4680af7f694afa | Remove compiler warning suppression. Contributes to https://github.com/dotnet/runtime/issues/66154
All changes, except warning 4267, impact both mono and coreclr. Warning 4267 is extensive in mono and will require more effort to address.
~~@janvorli or @am11 - there are updates to the libunwind source. I was planning on submitting a PR to https://github.com/libunwind/libunwind, is that appropriate?~~
Upstream PR: https://github.com/libunwind/libunwind/pull/333
/cc @GrabYourPitchforks | ./src/libraries/System.Threading.Tasks/tests/AggregateExceptionTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Xunit;
namespace System.Threading.Tasks.Tests
{
public class AggregateExceptionTests
{
[Fact]
public static void ConstructorBasic()
{
AggregateException ex = new AggregateException();
Assert.Equal(0, ex.InnerExceptions.Count);
Assert.True(ex.Message != null, "RunAggregateException_Constructor: FAILED. Message property is null when the default constructor is used, expected a default message");
ex = new AggregateException("message");
Assert.Equal(0, ex.InnerExceptions.Count);
Assert.True(ex.Message != null, "RunAggregateException_Constructor: FAILED. Message property is null when the default constructor(string) is used");
ex = new AggregateException("message", new Exception());
Assert.Equal(1, ex.InnerExceptions.Count);
Assert.True(ex.Message != null, "RunAggregateException_Constructor: FAILED. Message property is null when the default constructor(string, Exception) is used");
}
[Fact]
public static void ConstructorInvalidArguments()
{
AggregateException ex = new AggregateException();
Assert.Throws<ArgumentNullException>(() => new AggregateException("message", (Exception)null));
Assert.Throws<ArgumentNullException>(() => new AggregateException("message", (IEnumerable<Exception>)null));
AssertExtensions.Throws<ArgumentException>(null, () => ex = new AggregateException("message", new[] { new Exception(), null }));
}
[Fact]
public static void BaseExceptions()
{
AggregateException ex = new AggregateException();
Assert.Equal(ex.GetBaseException(), ex);
Exception[] innerExceptions = new Exception[0];
ex = new AggregateException(innerExceptions);
Assert.Equal(ex.GetBaseException(), ex);
innerExceptions = new Exception[1] { new AggregateException() };
ex = new AggregateException(innerExceptions);
Assert.Equal(ex.GetBaseException(), innerExceptions[0]);
innerExceptions = new Exception[2] { new AggregateException(), new AggregateException() };
ex = new AggregateException(innerExceptions);
Assert.Equal(ex.GetBaseException(), ex);
}
[Fact]
public static void Handle()
{
AggregateException ex = new AggregateException();
ex = new AggregateException(new[] { new ArgumentException(), new ArgumentException(), new ArgumentException() });
int handledCount = 0;
ex.Handle((e) =>
{
if (e is ArgumentException)
{
handledCount++;
return true;
}
return false;
});
Assert.Equal(handledCount, ex.InnerExceptions.Count);
}
[Fact]
public static void HandleInvalidCases()
{
AggregateException ex = new AggregateException();
Assert.Throws<ArgumentNullException>(() => ex.Handle(null));
ex = new AggregateException(new[] { new Exception(), new ArgumentException(), new ArgumentException() });
int handledCount = 0;
Assert.Throws<AggregateException>(
() => ex.Handle((e) =>
{
if (e is ArgumentException)
{
handledCount++;
return true;
}
return false;
}));
}
// Validates that flattening (including recursive) works.
[Fact]
public static void Flatten()
{
Exception exceptionA = new Exception("A");
Exception exceptionB = new Exception("B");
Exception exceptionC = new Exception("C");
AggregateException aggExceptionBase = new AggregateException("message", exceptionA, exceptionB, exceptionC);
Assert.Equal("message (A) (B) (C)", aggExceptionBase.Message);
// Verify flattening one with another.
// > Flattening (no recursion)...
AggregateException flattened1 = aggExceptionBase.Flatten();
Exception[] expected1 = new Exception[] {
exceptionA, exceptionB, exceptionC
};
Assert.Equal(expected1, flattened1.InnerExceptions);
Assert.Equal("message (A) (B) (C)", flattened1.Message);
// Verify flattening one with another, accounting for recursion.
AggregateException aggExceptionRecurse = new AggregateException("message", aggExceptionBase, aggExceptionBase);
AggregateException flattened2 = aggExceptionRecurse.Flatten();
Exception[] expected2 = new Exception[] {
exceptionA, exceptionB, exceptionC, exceptionA, exceptionB, exceptionC,
};
Assert.Equal(expected2, flattened2.InnerExceptions);
Assert.Equal("message (A) (B) (C) (A) (B) (C)", flattened2.Message);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Xunit;
namespace System.Threading.Tasks.Tests
{
public class AggregateExceptionTests
{
[Fact]
public static void ConstructorBasic()
{
AggregateException ex = new AggregateException();
Assert.Equal(0, ex.InnerExceptions.Count);
Assert.True(ex.Message != null, "RunAggregateException_Constructor: FAILED. Message property is null when the default constructor is used, expected a default message");
ex = new AggregateException("message");
Assert.Equal(0, ex.InnerExceptions.Count);
Assert.True(ex.Message != null, "RunAggregateException_Constructor: FAILED. Message property is null when the default constructor(string) is used");
ex = new AggregateException("message", new Exception());
Assert.Equal(1, ex.InnerExceptions.Count);
Assert.True(ex.Message != null, "RunAggregateException_Constructor: FAILED. Message property is null when the default constructor(string, Exception) is used");
}
[Fact]
public static void ConstructorInvalidArguments()
{
AggregateException ex = new AggregateException();
Assert.Throws<ArgumentNullException>(() => new AggregateException("message", (Exception)null));
Assert.Throws<ArgumentNullException>(() => new AggregateException("message", (IEnumerable<Exception>)null));
AssertExtensions.Throws<ArgumentException>(null, () => ex = new AggregateException("message", new[] { new Exception(), null }));
}
[Fact]
public static void BaseExceptions()
{
AggregateException ex = new AggregateException();
Assert.Equal(ex.GetBaseException(), ex);
Exception[] innerExceptions = new Exception[0];
ex = new AggregateException(innerExceptions);
Assert.Equal(ex.GetBaseException(), ex);
innerExceptions = new Exception[1] { new AggregateException() };
ex = new AggregateException(innerExceptions);
Assert.Equal(ex.GetBaseException(), innerExceptions[0]);
innerExceptions = new Exception[2] { new AggregateException(), new AggregateException() };
ex = new AggregateException(innerExceptions);
Assert.Equal(ex.GetBaseException(), ex);
}
[Fact]
public static void Handle()
{
AggregateException ex = new AggregateException();
ex = new AggregateException(new[] { new ArgumentException(), new ArgumentException(), new ArgumentException() });
int handledCount = 0;
ex.Handle((e) =>
{
if (e is ArgumentException)
{
handledCount++;
return true;
}
return false;
});
Assert.Equal(handledCount, ex.InnerExceptions.Count);
}
[Fact]
public static void HandleInvalidCases()
{
AggregateException ex = new AggregateException();
Assert.Throws<ArgumentNullException>(() => ex.Handle(null));
ex = new AggregateException(new[] { new Exception(), new ArgumentException(), new ArgumentException() });
int handledCount = 0;
Assert.Throws<AggregateException>(
() => ex.Handle((e) =>
{
if (e is ArgumentException)
{
handledCount++;
return true;
}
return false;
}));
}
// Validates that flattening (including recursive) works.
[Fact]
public static void Flatten()
{
Exception exceptionA = new Exception("A");
Exception exceptionB = new Exception("B");
Exception exceptionC = new Exception("C");
AggregateException aggExceptionBase = new AggregateException("message", exceptionA, exceptionB, exceptionC);
Assert.Equal("message (A) (B) (C)", aggExceptionBase.Message);
// Verify flattening one with another.
// > Flattening (no recursion)...
AggregateException flattened1 = aggExceptionBase.Flatten();
Exception[] expected1 = new Exception[] {
exceptionA, exceptionB, exceptionC
};
Assert.Equal(expected1, flattened1.InnerExceptions);
Assert.Equal("message (A) (B) (C)", flattened1.Message);
// Verify flattening one with another, accounting for recursion.
AggregateException aggExceptionRecurse = new AggregateException("message", aggExceptionBase, aggExceptionBase);
AggregateException flattened2 = aggExceptionRecurse.Flatten();
Exception[] expected2 = new Exception[] {
exceptionA, exceptionB, exceptionC, exceptionA, exceptionB, exceptionC,
};
Assert.Equal(expected2, flattened2.InnerExceptions);
Assert.Equal("message (A) (B) (C) (A) (B) (C)", flattened2.Message);
}
}
}
| -1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.