file_path
stringlengths
21
224
content
stringlengths
0
80.8M
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/generated/NvBlastExtTkSerialization-capn.h
// Generated by Cap'n Proto compiler, DO NOT EDIT // source: NvBlastExtTkSerialization-capn #ifndef CAPNP_INCLUDED_affe4498f275ee58_ #define CAPNP_INCLUDED_affe4498f275ee58_ #include <capnp/generated-header-support.h> #if CAPNP_VERSION != 6001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif #include "NvBlastExtLlSerialization-capn.h" namespace capnp { namespace schemas { CAPNP_DECLARE_SCHEMA(ffd67c4b7067dde6); CAPNP_DECLARE_SCHEMA(b7dbad810488a897); CAPNP_DECLARE_SCHEMA(bf661e95794f2749); } // namespace schemas } // namespace capnp namespace Nv { namespace Blast { namespace Serialization { struct TkAsset { TkAsset() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(ffd67c4b7067dde6, 0, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct TkAssetJointDesc { TkAssetJointDesc() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(b7dbad810488a897, 0, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvVec3 { NvVec3() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(bf661e95794f2749, 2, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; // ======================================================================================= class TkAsset::Reader { public: typedef TkAsset Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasAssetLL() const; inline ::Nv::Blast::Serialization::Asset::Reader getAssetLL() const; inline bool hasJointDescs() const; inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader getJointDescs() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class TkAsset::Builder { public: typedef TkAsset Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasAssetLL(); inline ::Nv::Blast::Serialization::Asset::Builder getAssetLL(); inline void setAssetLL( ::Nv::Blast::Serialization::Asset::Reader value); inline ::Nv::Blast::Serialization::Asset::Builder initAssetLL(); inline void adoptAssetLL(::capnp::Orphan< ::Nv::Blast::Serialization::Asset>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::Asset> disownAssetLL(); inline bool hasJointDescs(); inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder getJointDescs(); inline void setJointDescs( ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder initJointDescs(unsigned int size); inline void adoptJointDescs(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>> disownJointDescs(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class TkAsset::Pipeline { public: typedef TkAsset Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::Asset::Pipeline getAssetLL(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class TkAssetJointDesc::Reader { public: typedef TkAssetJointDesc Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getNodeIndices() const; inline bool hasAttachPositions() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader getAttachPositions() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class TkAssetJointDesc::Builder { public: typedef TkAssetJointDesc Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getNodeIndices(); inline void setNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initNodeIndices(unsigned int size); inline void adoptNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownNodeIndices(); inline bool hasAttachPositions(); inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder getAttachPositions(); inline void setAttachPositions( ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder initAttachPositions(unsigned int size); inline void adoptAttachPositions(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>> disownAttachPositions(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class TkAssetJointDesc::Pipeline { public: typedef TkAssetJointDesc Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvVec3::Reader { public: typedef NvVec3 Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline float getX() const; inline float getY() const; inline float getZ() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvVec3::Builder { public: typedef NvVec3 Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline float getX(); inline void setX(float value); inline float getY(); inline void setY(float value); inline float getZ(); inline void setZ(float value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvVec3::Pipeline { public: typedef NvVec3 Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE // ======================================================================================= inline bool TkAsset::Reader::hasAssetLL() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool TkAsset::Builder::hasAssetLL() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::Asset::Reader TkAsset::Reader::getAssetLL() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::Asset::Builder TkAsset::Builder::getAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::Asset::Pipeline TkAsset::Pipeline::getAssetLL() { return ::Nv::Blast::Serialization::Asset::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void TkAsset::Builder::setAssetLL( ::Nv::Blast::Serialization::Asset::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::Asset::Builder TkAsset::Builder::initAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void TkAsset::Builder::adoptAssetLL( ::capnp::Orphan< ::Nv::Blast::Serialization::Asset>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::Asset> TkAsset::Builder::disownAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool TkAsset::Reader::hasJointDescs() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool TkAsset::Builder::hasJointDescs() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader TkAsset::Reader::getJointDescs() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder TkAsset::Builder::getJointDescs() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void TkAsset::Builder::setJointDescs( ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder TkAsset::Builder::initJointDescs(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void TkAsset::Builder::adoptJointDescs( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>> TkAsset::Builder::disownJointDescs() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool TkAssetJointDesc::Reader::hasNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool TkAssetJointDesc::Builder::hasNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader TkAssetJointDesc::Reader::getNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder TkAssetJointDesc::Builder::getNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void TkAssetJointDesc::Builder::setNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void TkAssetJointDesc::Builder::setNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder TkAssetJointDesc::Builder::initNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void TkAssetJointDesc::Builder::adoptNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> TkAssetJointDesc::Builder::disownNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool TkAssetJointDesc::Reader::hasAttachPositions() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool TkAssetJointDesc::Builder::hasAttachPositions() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader TkAssetJointDesc::Reader::getAttachPositions() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder TkAssetJointDesc::Builder::getAttachPositions() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void TkAssetJointDesc::Builder::setAttachPositions( ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder TkAssetJointDesc::Builder::initAttachPositions(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void TkAssetJointDesc::Builder::adoptAttachPositions( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>> TkAssetJointDesc::Builder::disownAttachPositions() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline float NvVec3::Reader::getX() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getX() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setX(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline float NvVec3::Reader::getY() const { return _reader.getDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getY() { return _builder.getDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setY(float value) { _builder.setDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline float NvVec3::Reader::getZ() const { return _reader.getDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getZ() { return _builder.getDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setZ(float value) { _builder.setDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } } // namespace } // namespace } // namespace #endif // CAPNP_INCLUDED_affe4498f275ee58_
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringInternalCommon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINTERNALCOMMON_H #define NVBLASTINTERNALCOMMON_H #include "NvBlastExtAuthoringTypes.h" #include "NvBlastNvSharedHelpers.h" #include "NvBlastVolumeIntegrals.h" #include "NvVec2.h" #include "NvVec3.h" #include "NvPlane.h" #include "NvBounds3.h" #include "NvMath.h" #include <algorithm> namespace Nv { namespace Blast { /** Edge representation with index of parent facet */ struct EdgeWithParent { uint32_t s, e; // Starting and ending vertices uint32_t parent; // Parent facet index EdgeWithParent() : s(0), e(0), parent(0) {} EdgeWithParent(uint32_t s, uint32_t e, uint32_t p) : s(s), e(e), parent(p) {} }; /** Comparator for sorting edges according to parent facet number. */ struct EdgeComparator { bool operator()(const EdgeWithParent& a, const EdgeWithParent& b) const { if (a.parent == b.parent) { if (a.s == b.s) { return a.e < b.e; } else { return a.s < b.s; } } else { return a.parent < b.parent; } } }; inline bool operator<(const Edge& a, const Edge& b) { if (a.s == b.s) return a.e < b.e; else return a.s < b.s; } /** Vertex projection direction flag. */ enum ProjectionDirections { YZ_PLANE = 1 << 1, XY_PLANE = 1 << 2, ZX_PLANE = 1 << 3, // This is set when the dominant axis of the normal is negative // because when flattening to 2D the facet is viewed from the positive direction. // As a result, the winding order appears to flip if the normal is in the negative direction. OPPOSITE_WINDING = 1 << 4 }; /** Computes best direction to project points. */ NV_FORCE_INLINE ProjectionDirections getProjectionDirection(const nvidia::NvVec3& normal) { float maxv = std::max(std::abs(normal.x), std::max(std::abs(normal.y), std::abs(normal.z))); ProjectionDirections retVal; if (maxv == std::abs(normal.x)) { retVal = YZ_PLANE; if (normal.x < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } if (maxv == std::abs(normal.y)) { retVal = ZX_PLANE; if (normal.y > 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } retVal = XY_PLANE; if (normal.z < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } /** Computes point projected on given axis aligned plane. */ NV_FORCE_INLINE nvidia::NvVec2 getProjectedPoint(const nvidia::NvVec3& point, ProjectionDirections dir) { if (dir & YZ_PLANE) { return nvidia::NvVec2(point.y, point.z); } if (dir & ZX_PLANE) { return nvidia::NvVec2(point.x, point.z); } return nvidia::NvVec2(point.x, point.y); } NV_FORCE_INLINE nvidia::NvVec2 getProjectedPoint(const NvcVec3& point, ProjectionDirections dir) { return getProjectedPoint((const nvidia::NvVec3&)point, dir); } /** Computes point projected on given axis aligned plane, this method is polygon-winding aware. */ NV_FORCE_INLINE nvidia::NvVec2 getProjectedPointWithWinding(const nvidia::NvVec3& point, ProjectionDirections dir) { if (dir & YZ_PLANE) { if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.z, point.y); } else return nvidia::NvVec2(point.y, point.z); } if (dir & ZX_PLANE) { if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.z, point.x); } return nvidia::NvVec2(point.x, point.z); } if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.y, point.x); } return nvidia::NvVec2(point.x, point.y); } #define MAXIMUM_EXTENT 1000 * 1000 * 1000 #define BBOX_TEST_EPS 1e-5f /** Test fattened bounding box intersetion. */ NV_INLINE bool weakBoundingBoxIntersection(const nvidia::NvBounds3& aBox, const nvidia::NvBounds3& bBox) { if (std::max(aBox.minimum.x, bBox.minimum.x) > std::min(aBox.maximum.x, bBox.maximum.x) + BBOX_TEST_EPS) return false; if (std::max(aBox.minimum.y, bBox.minimum.y) > std::min(aBox.maximum.y, bBox.maximum.y) + BBOX_TEST_EPS) return false; if (std::max(aBox.minimum.z, bBox.minimum.z) > std::min(aBox.maximum.z, bBox.maximum.z) + BBOX_TEST_EPS) return false; return true; } /** Test segment vs plane intersection. If segment intersects the plane true is returned. Point of intersection is written into 'result'. */ NV_INLINE bool getPlaneSegmentIntersection(const nvidia::NvPlane& pl, const nvidia::NvVec3& a, const nvidia::NvVec3& b, nvidia::NvVec3& result) { float div = (b - a).dot(pl.n); if (nvidia::NvAbs(div) < 0.0001f) { if (pl.contains(a)) { result = a; return true; } else { return false; } } float t = (-a.dot(pl.n) - pl.d) / div; if (t < 0.0f || t > 1.0f) { return false; } result = (b - a) * t + a; return true; } #define POS_COMPARISON_OFFSET 1e-5f #define NORM_COMPARISON_OFFSET 1e-3f /** Vertex comparator for vertex welding. */ template<bool splitUVs> struct VrtCompare { // This implements a "less than" function for vertices. // Vertices a and b are considered equivalent if !(a < b) && !(b < a) bool operator()(const Vertex& a, const Vertex& b) const { if (a.p.x + POS_COMPARISON_OFFSET < b.p.x) return true; if (a.p.x - POS_COMPARISON_OFFSET > b.p.x) return false; if (a.p.y + POS_COMPARISON_OFFSET < b.p.y) return true; if (a.p.y - POS_COMPARISON_OFFSET > b.p.y) return false; if (a.p.z + POS_COMPARISON_OFFSET < b.p.z) return true; if (a.p.z - POS_COMPARISON_OFFSET > b.p.z) return false; if (a.n.x + NORM_COMPARISON_OFFSET < b.n.x) return true; if (a.n.x - NORM_COMPARISON_OFFSET > b.n.x) return false; if (a.n.y + NORM_COMPARISON_OFFSET < b.n.y) return true; if (a.n.y - NORM_COMPARISON_OFFSET > b.n.y) return false; if (a.n.z + NORM_COMPARISON_OFFSET < b.n.z) return true; if (a.n.z - NORM_COMPARISON_OFFSET > b.n.z) return false; // This is not actually needed if (!splitUVs) if (!splitUVs) return false; if (a.uv[0].x + NORM_COMPARISON_OFFSET < b.uv[0].x) return true; if (a.uv[0].x - NORM_COMPARISON_OFFSET > b.uv[0].x) return false; if (a.uv[0].y + NORM_COMPARISON_OFFSET < b.uv[0].y) return true; if (a.uv[0].y - NORM_COMPARISON_OFFSET > b.uv[0].y) return false; // This is not actually needed return false; }; }; typedef VrtCompare<true> VrtComp; typedef VrtCompare<false> VrtCompNoUV; /** Vertex comparator for vertex welding (not accounts normal and uv parameters of vertice). */ struct VrtPositionComparator { bool operator()(const NvcVec3& a, const NvcVec3& b) const { if (a.x + POS_COMPARISON_OFFSET < b.x) return true; if (a.x - POS_COMPARISON_OFFSET > b.x) return false; if (a.y + POS_COMPARISON_OFFSET < b.y) return true; if (a.y - POS_COMPARISON_OFFSET > b.y) return false; if (a.z + POS_COMPARISON_OFFSET < b.z) return true; if (a.z - POS_COMPARISON_OFFSET > b.z) return false; return false; }; bool operator()(const Vertex& a, const Vertex& b) const { return operator()(a.p, b.p); }; }; NV_INLINE float calculateCollisionHullVolumeAndCentroid(NvcVec3& centroid, const CollisionHull& hull) { class CollisionHullQuery { public: CollisionHullQuery(const CollisionHull& hull) : m_hull(hull) {} size_t faceCount() const { return (size_t)m_hull.polygonDataCount; } size_t vertexCount(size_t faceIndex) const { return (size_t)m_hull.polygonData[faceIndex].vertexCount; } NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const { return m_hull.points[m_hull.indices[m_hull.polygonData[faceIndex].indexBase + vertexIndex]]; } private: const CollisionHull& m_hull; }; return calculateMeshVolumeAndCentroid<CollisionHullQuery>(centroid, hull); } } // namespace Blast } // namespace Nv #endif
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringAcceleratorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAuthoringAcceleratorImpl.h" #include "NvBlastExtAuthoringMesh.h" #include "NvBlastExtAuthoringInternalCommon.h" #include "NvBlastGlobals.h" #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" namespace Nv { namespace Blast { DummyAccelerator::DummyAccelerator(int32_t count) : m_count(count) { m_current = 0; } void DummyAccelerator::release() { NVBLAST_DELETE(this, DummyAccelerator); } void DummyAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { m_current = 0; NV_UNUSED(pos); NV_UNUSED(ed); NV_UNUSED(fc); } void DummyAccelerator::setState(const NvcBounds3* bound) { m_current = 0; NV_UNUSED(bound); } void DummyAccelerator::setState(const NvcVec3& point) { m_current = 0; NV_UNUSED(point); } int32_t DummyAccelerator::getNextFacet() { if (m_current < m_count) { ++m_current; return m_current - 1; } else return -1; } Grid::Grid(int32_t resolution) : m_resolution(resolution) { /** Set up 3d grid */ m_r3 = resolution * resolution * resolution; m_spatialMap.resize(resolution * resolution * resolution); } void Grid::release() { NVBLAST_DELETE(this, Grid); } void Grid::setMesh(const Mesh* m) { nvidia::NvBounds3 bd = toNvShared(m->getBoundingBox()); m_mappedFacetCount = m->getFacetCount(); bd.fattenFast(0.001f); m_spos = fromNvShared(bd.minimum); m_deltas = { m_resolution / bd.getDimensions().x, m_resolution / bd.getDimensions().y, m_resolution / bd.getDimensions().z }; for (int32_t i = 0; i < m_r3; ++i) m_spatialMap[i].clear(); const float ofs = 0.001f; for (uint32_t fc = 0; fc < m->getFacetCount(); ++fc) { NvcBounds3 cfc = *m->getFacetBound(fc); int32_t is = (int32_t)std::max(0.f, (cfc.minimum.x - m_spos.x - ofs) * m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (cfc.maximum.x - m_spos.x + ofs) * m_deltas.x); int32_t js = (int32_t)std::max(0.f, (cfc.minimum.y - m_spos.y - ofs) * m_deltas.y); int32_t je = (int32_t)std::max(0.f, (cfc.maximum.y - m_spos.y + ofs) * m_deltas.y); int32_t ks = (int32_t)std::max(0.f, (cfc.minimum.z - m_spos.z - ofs) * m_deltas.z); int32_t ke = (int32_t)std::max(0.f, (cfc.maximum.z - m_spos.z + ofs) * m_deltas.z); for (int32_t i = is; i < m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_resolution && j <= je; ++j) { for (int32_t k = ks; k < m_resolution && k <= ke; ++k) { m_spatialMap[(i * m_resolution + j) * m_resolution + k].push_back(fc); } } } } } GridAccelerator::GridAccelerator(Grid* grd) { m_grid = grd; m_alreadyGotValue = 0; m_alreadyGotFlag.resize(1 << 12); m_cellList.resize(1 << 12); m_pointCmdDir = 0; } void GridAccelerator::release() { NVBLAST_DELETE(this, GridAccelerator); } void GridAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void GridAccelerator::setState(const NvcBounds3* facetBounding) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; NvcBounds3 cfc = *facetBounding; int32_t is = (int32_t)std::max(0.f, (cfc.minimum.x - m_grid->m_spos.x - 0.001f) * m_grid->m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (cfc.maximum.x - m_grid->m_spos.x + 0.001f) * m_grid->m_deltas.x); int32_t js = (int32_t)std::max(0.f, (cfc.minimum.y - m_grid->m_spos.y - 0.001f) * m_grid->m_deltas.y); int32_t je = (int32_t)std::max(0.f, (cfc.maximum.y - m_grid->m_spos.y + 0.001f) * m_grid->m_deltas.y); int32_t ks = (int32_t)std::max(0.f, (cfc.minimum.z - m_grid->m_spos.z - 0.001f) * m_grid->m_deltas.z); int32_t ke = (int32_t)std::max(0.f, (cfc.maximum.z - m_grid->m_spos.z + 0.001f) * m_grid->m_deltas.z); for (int32_t i = is; i < m_grid->m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_grid->m_resolution && j <= je; ++j) { for (int32_t k = ks; k < m_grid->m_resolution && k <= ke; ++k) { int32_t id = (i * m_grid->m_resolution + j) * m_grid->m_resolution + k; if (!m_grid->m_spatialMap[id].empty()) { m_cellList[m_gotCells++] = id; } } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void GridAccelerator::setPointCmpDirection(int32_t d) { m_pointCmdDir = d; } void GridAccelerator::setState(const NvcVec3& point) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; int32_t is = (int32_t)std::max(0.f, (point.x - m_grid->m_spos.x - 0.001f) * m_grid->m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (point.x - m_grid->m_spos.x + 0.001f) * m_grid->m_deltas.x); int32_t js = (int32_t)std::max(0.f, (point.y - m_grid->m_spos.y - 0.001f) * m_grid->m_deltas.y); int32_t je = (int32_t)std::max(0.f, (point.y - m_grid->m_spos.y + 0.001f) * m_grid->m_deltas.y); int32_t ks = 0; int32_t ke = m_grid->m_resolution; switch (m_pointCmdDir) { case 1: ks = (int32_t)std::max(0.f, (point.z - m_grid->m_spos.z - 0.001f) * m_grid->m_deltas.z); break; case -1: ke = (int32_t)std::max(0.f, (point.z - m_grid->m_spos.z + 0.001f) * m_grid->m_deltas.z); } for (int32_t i = is; i < m_grid->m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_grid->m_resolution && j <= je; ++j) { for (int32_t k = ks; k <= ke && k < m_grid->m_resolution; ++k) { int32_t id = (i * m_grid->m_resolution + j) * m_grid->m_resolution + k; if (!m_grid->m_spatialMap[id].empty()) { m_cellList[m_gotCells++] = id; } } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } int32_t GridAccelerator::getNextFacet() { int32_t facetId = -1; while (m_iteratorCell != -1) { if (m_iteratorFacet >= (int32_t)m_grid->m_spatialMap[m_iteratorCell].size()) { if (m_gotCells != 0) { m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; m_iteratorFacet = 0; } else { m_iteratorCell = -1; break; } } if (m_alreadyGotFlag[m_grid->m_spatialMap[m_iteratorCell][m_iteratorFacet]] != m_alreadyGotValue) { facetId = m_grid->m_spatialMap[m_iteratorCell][m_iteratorFacet]; m_iteratorFacet++; break; } else { m_iteratorFacet++; } } if (facetId != -1) { m_alreadyGotFlag[facetId] = m_alreadyGotValue; } return facetId; } BBoxBasedAccelerator::BBoxBasedAccelerator(const Mesh* mesh, int32_t resolution) : m_resolution(resolution), m_alreadyGotValue(1) { m_bounds = mesh->getBoundingBox(); m_spatialMap.resize(resolution * resolution * resolution); m_cells.resize(resolution * resolution * resolution); int32_t currentCell = 0; NvcVec3 incr = (m_bounds.maximum - m_bounds.minimum) * (1.0f / m_resolution); for (int32_t z = 0; z < resolution; ++z) { for (int32_t y = 0; y < resolution; ++y) { for (int32_t x = 0; x < resolution; ++x) { m_cells[currentCell].minimum.x = m_bounds.minimum.x + x * incr.x; m_cells[currentCell].minimum.y = m_bounds.minimum.y + y * incr.y; m_cells[currentCell].minimum.z = m_bounds.minimum.z + z * incr.z; m_cells[currentCell].maximum.x = m_bounds.minimum.x + (x + 1) * incr.x; m_cells[currentCell].maximum.y = m_bounds.minimum.y + (y + 1) * incr.y; m_cells[currentCell].maximum.z = m_bounds.minimum.z + (z + 1) * incr.z; ++currentCell; } } } m_cellList.resize(1 << 16); m_gotCells = 0; buildAccelStructure(mesh->getVertices(), mesh->getEdges(), mesh->getFacetsBuffer(), mesh->getFacetCount()); } void BBoxBasedAccelerator::release() { NVBLAST_DELETE(this, BBoxBasedAccelerator); } BBoxBasedAccelerator::~BBoxBasedAccelerator() { m_resolution = 0; toNvShared(m_bounds).setEmpty(); m_spatialMap.clear(); m_cells.clear(); m_cellList.clear(); } int32_t BBoxBasedAccelerator::getNextFacet() { int32_t facetId = -1; while (m_iteratorCell != -1) { if (m_iteratorFacet >= (int32_t)m_spatialMap[m_iteratorCell].size()) { if (m_gotCells != 0) { m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; m_iteratorFacet = 0; } else { m_iteratorCell = -1; break; } } if (m_alreadyGotFlag[m_spatialMap[m_iteratorCell][m_iteratorFacet]] != m_alreadyGotValue) { facetId = m_spatialMap[m_iteratorCell][m_iteratorFacet]; m_iteratorFacet++; break; } else { m_iteratorFacet++; } } if (facetId != -1) { m_alreadyGotFlag[facetId] = m_alreadyGotValue; } return facetId; } void BBoxBasedAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void BBoxBasedAccelerator::setState(const NvcBounds3* facetBox) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; for (uint32_t i = 0; i < m_cells.size(); ++i) { if (weakBoundingBoxIntersection(toNvShared(m_cells[i]), *toNvShared(facetBox))) { if (!m_spatialMap[i].empty()) m_cellList[m_gotCells++] = i; } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void BBoxBasedAccelerator::setState(const NvcVec3& p) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; int32_t perSlice = m_resolution * m_resolution; for (uint32_t i = 0; i < m_cells.size(); ++i) { if (toNvShared(m_cells[i]).contains(toNvShared(p))) { int32_t xyCellId = i % perSlice; for (int32_t zCell = 0; zCell < m_resolution; ++zCell) { int32_t cell = zCell * perSlice + xyCellId; if (!m_spatialMap[cell].empty()) m_cellList[m_gotCells++] = cell; } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void BBoxBasedAccelerator::buildAccelStructure(const Vertex* pos, const Edge* edges, const Facet* fc, int32_t facetCount) { for (int32_t facet = 0; facet < facetCount; ++facet) { nvidia::NvBounds3 bBox; bBox.setEmpty(); const Edge* edge = &edges[0] + fc->firstEdgeNumber; int32_t count = fc->edgesCount; for (int32_t ec = 0; ec < count; ++ec) { bBox.include(toNvShared(pos[edge->s].p)); bBox.include(toNvShared(pos[edge->e].p)); edge++; } for (uint32_t i = 0; i < m_cells.size(); ++i) { if (weakBoundingBoxIntersection(toNvShared(m_cells[i]), bBox)) { m_spatialMap[i].push_back(facet); } } fc++; } m_alreadyGotFlag.resize(facetCount, 0); } #define SWEEP_RESOLUTION 2048 void buildIndex(std::vector<SegmentToIndex>& segm, float offset, float mlt, std::vector<std::vector<uint32_t>>& blocks) { std::set<uint32_t> currentEnabled; uint32_t lastBlock = 0; for (uint32_t i = 0; i < segm.size(); ++i) { uint32_t currentBlock = (uint32_t)((segm[i].coord - offset) * mlt); if (currentBlock >= SWEEP_RESOLUTION) break; if (currentBlock != lastBlock) { for (uint32_t j = lastBlock + 1; j <= currentBlock; ++j) { for (auto id : currentEnabled) blocks[j].push_back(id); } lastBlock = currentBlock; } if (segm[i].end == false) { blocks[lastBlock].push_back(segm[i].index); currentEnabled.insert(segm[i].index); } else { currentEnabled.erase(segm[i].index); } } } SweepingAccelerator::SweepingAccelerator(const Nv::Blast::Mesh* in) { nvidia::NvBounds3 bnd; const Vertex* verts = in->getVertices(); const Edge* edges = in->getEdges(); m_facetCount = in->getFacetCount(); m_foundx.resize(m_facetCount, 0); m_foundy.resize(m_facetCount, 0); std::vector<SegmentToIndex> xevs; std::vector<SegmentToIndex> yevs; std::vector<SegmentToIndex> zevs; for (uint32_t i = 0; i < in->getFacetCount(); ++i) { const Facet* fc = in->getFacet(i); bnd.setEmpty(); for (uint32_t v = 0; v < fc->edgesCount; ++v) { bnd.include(toNvShared(verts[edges[v + fc->firstEdgeNumber].s].p)); } bnd.scaleFast(1.1f); xevs.push_back(SegmentToIndex(bnd.minimum.x, i, false)); xevs.push_back(SegmentToIndex(bnd.maximum.x, i, true)); yevs.push_back(SegmentToIndex(bnd.minimum.y, i, false)); yevs.push_back(SegmentToIndex(bnd.maximum.y, i, true)); zevs.push_back(SegmentToIndex(bnd.minimum.z, i, false)); zevs.push_back(SegmentToIndex(bnd.maximum.z, i, true)); } std::sort(xevs.begin(), xevs.end()); std::sort(yevs.begin(), yevs.end()); std::sort(zevs.begin(), zevs.end()); m_minimal.x = xevs[0].coord; m_minimal.y = yevs[0].coord; m_minimal.z = zevs[0].coord; m_maximal.x = xevs.back().coord; m_maximal.y = yevs.back().coord; m_maximal.z = zevs.back().coord; m_rescale = (m_maximal - m_minimal) * 1.01f; m_rescale.x = 1.0f / m_rescale.x * SWEEP_RESOLUTION; m_rescale.y = 1.0f / m_rescale.y * SWEEP_RESOLUTION; m_rescale.z = 1.0f / m_rescale.z * SWEEP_RESOLUTION; m_xSegm.resize(SWEEP_RESOLUTION); m_ySegm.resize(SWEEP_RESOLUTION); m_zSegm.resize(SWEEP_RESOLUTION); buildIndex(xevs, m_minimal.x, m_rescale.x, m_xSegm); buildIndex(yevs, m_minimal.y, m_rescale.y, m_ySegm); buildIndex(zevs, m_minimal.z, m_rescale.z, m_zSegm); m_iterId = 1; m_current = 0; } void SweepingAccelerator::release() { NVBLAST_DELETE(this, SweepingAccelerator); } void SweepingAccelerator::setState(const NvcBounds3* facetBounds) { m_current = 0; m_indices.clear(); nvidia::NvBounds3 bnd = *toNvShared(facetBounds); bnd.scaleFast(1.1f); uint32_t start = (uint32_t)((std::max(0.0f, bnd.minimum.x - m_minimal.x)) * m_rescale.x); uint32_t end = (uint32_t)((std::max(0.0f, bnd.maximum.x - m_minimal.x)) * m_rescale.x); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_xSegm[i]) { m_foundx[id] = m_iterId; } } start = (uint32_t)((std::max(0.0f, bnd.minimum.y - m_minimal.y)) * m_rescale.y); end = (uint32_t)((std::max(0.0f, bnd.maximum.y - m_minimal.y)) * m_rescale.y); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_ySegm[i]) { m_foundy[id] = m_iterId; } } start = (uint32_t)((std::max(0.0f, bnd.minimum.z - m_minimal.z)) * m_rescale.z); end = (uint32_t)((std::max(0.0f, bnd.maximum.z - m_minimal.z)) * m_rescale.z); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_zSegm[i]) { if (m_foundy[id] == m_iterId && m_foundx[id] == m_iterId) { m_foundx[id] = m_iterId + 1; m_foundy[id] = m_iterId + 1; m_indices.push_back(id); } } } m_iterId += 2; } void SweepingAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void SweepingAccelerator::setState(const NvcVec3& point) { m_indices.clear(); /*for (uint32_t i = 0; i < facetCount; ++i) { indices.push_back(i); }*/ uint32_t yIndex = (uint32_t)((point.y - m_minimal.y) * m_rescale.y); uint32_t xIndex = (uint32_t)((point.x - m_minimal.x) * m_rescale.x); for (uint32_t i = 0; i < m_xSegm[xIndex].size(); ++i) { m_foundx[m_xSegm[xIndex][i]] = m_iterId; } for (uint32_t i = 0; i < m_ySegm[yIndex].size(); ++i) { if (m_foundx[m_ySegm[yIndex][i]] == m_iterId) { m_indices.push_back(m_ySegm[yIndex][i]); } } m_iterId++; m_current = 0; NV_UNUSED(point); } int32_t SweepingAccelerator::getNextFacet() { if (static_cast<uint32_t>(m_current) < m_indices.size()) { ++m_current; return m_indices[m_current - 1]; } else return -1; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringAcceleratorImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGACCELERATORIMPL_H #define NVBLASTEXTAUTHORINGACCELERATORIMPL_H #include <set> #include <vector> #include "NvBlastExtAuthoringAccelerator.h" namespace Nv { namespace Blast { class Mesh; /** Dummy accelerator iterates through all facets of mesh. */ class DummyAccelerator : public SpatialAccelerator { public: /** \param[in] count Mesh facets count for which accelerator should be built. */ DummyAccelerator(int32_t count); virtual void release() override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: int32_t m_count; int32_t m_current; }; struct SegmentToIndex { float coord; uint32_t index; bool end; SegmentToIndex(float c, uint32_t i, bool end) : coord(c), index(i), end(end) {} bool operator<(const SegmentToIndex& in) const { if (coord < in.coord) return true; if (coord > in.coord) return false; return end < in.end; } }; class Grid : public SpatialGrid { public: friend class GridAccelerator; Grid(int32_t resolution); virtual void release() override; virtual void setMesh(const Nv::Blast::Mesh* m) override; private: int32_t m_resolution; int32_t m_r3; int32_t m_mappedFacetCount; NvcVec3 m_spos; NvcVec3 m_deltas; std::vector< std::vector<int32_t> > m_spatialMap; }; class GridAccelerator : public SpatialAccelerator // Iterator to traverse the grid { public: GridAccelerator(Grid* grd); virtual void release() override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override; private: Grid* m_grid; // Iterator data std::vector<uint32_t> m_alreadyGotFlag; uint32_t m_alreadyGotValue; std::vector<int32_t> m_cellList; int32_t m_gotCells; int32_t m_iteratorCell; int32_t m_iteratorFacet; int32_t m_pointCmdDir; }; class SweepingAccelerator : public SpatialAccelerator { public: /** \param[in] count Mesh facets count for which accelerator should be built. */ SweepingAccelerator(const Nv::Blast::Mesh* in); virtual void release() override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: /* For fast point test. */ std::vector<std::vector<uint32_t> > m_xSegm; std::vector<std::vector<uint32_t> > m_ySegm; std::vector<std::vector<uint32_t> > m_zSegm; std::vector<uint32_t> m_indices; std::vector<uint32_t> m_foundx; std::vector<uint32_t> m_foundy; uint32_t m_iterId; int32_t m_current; uint32_t m_facetCount; NvcVec3 m_minimal; NvcVec3 m_maximal; NvcVec3 m_rescale; }; /** Accelerator which builds map from 3d grid to initial mesh facets. To find all facets which possibly intersect given one, it return all facets which are pointed by grid cells, which intersects with bounding box of given facet. To find all facets which possibly cover given point, all facets which are pointed by cells in column which contains given point are returned. */ class BBoxBasedAccelerator : public SpatialAccelerator { public: /** \param[in] mesh Mesh for which acceleration structure should be built. \param[in] resolution Resolution on 3d grid. */ BBoxBasedAccelerator(const Mesh* mesh, int32_t resolution); virtual ~BBoxBasedAccelerator(); virtual void release() override; virtual int32_t getNextFacet() override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const NvcVec3& p) override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: void buildAccelStructure(const Vertex* pos, const Edge* edges, const Facet* fc, int32_t facetCount); int32_t m_resolution; NvcBounds3 m_bounds; std::vector< std::vector<int32_t> > m_spatialMap; std::vector<NvcBounds3> m_cells; // Iterator data std::vector<uint32_t> m_alreadyGotFlag; uint32_t m_alreadyGotValue; std::vector<int32_t> m_cellList; int32_t m_gotCells; int32_t m_iteratorCell; int32_t m_iteratorFacet; }; } // namespace Blast } // namsepace Nv #endif // ifndef NVBLASTEXTAUTHORINGACCELERATORIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringMeshImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTAUTHORINGMESHIMPL_H #define NVBLASTAUTHORINGMESHIMPL_H #include "NvBlastExtAuthoringMesh.h" #include "NvBounds3.h" #include <vector> #include <map> #include <set> namespace Nv { namespace Blast { /** Class for internal mesh representation */ class MeshImpl : public Mesh { public: /** Constructs mesh object from array of triangles. \param[in] position Array of vertex positions \param[in] normals Array of vertex normals \param[in] uv Array of vertex uv coordinates \param[in] verticesCount Vertices count \param[in] indices Array of vertex indices. Indices contain vertex index triplets which form a mesh triangle. \param[in] indicesCount Indices count (should be equal to numberOfTriangles * 3) */ MeshImpl(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount); /** Constructs mesh object from array of facets. \param[in] vertices Array of vertices \param[in] edges Array of edges \param[in] facets Array of facets \param[in] posCount Vertices count \param[in] edgesCount Edges count \param[in] facetsCount Facets count */ MeshImpl(const Vertex* vertices, const Edge* edges, const Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount); MeshImpl(const Vertex* vertices, uint32_t count); MeshImpl(const Vertex* vertices, uint32_t count, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride); ~MeshImpl(); virtual void release() override; /** Return true if mesh is valid */ bool isValid() const override; /** Return pointer on vertices array */ Vertex* getVerticesWritable() override; /** Return pointer on edges array */ Edge* getEdgesWritable() override; /** Return pointer on facets array */ Facet* getFacetsBufferWritable() override; /** Return pointer on vertices array */ const Vertex* getVertices() const override; /** Return pointer on edges array */ const Edge* getEdges() const override; /** Return pointer on facets array */ const Facet* getFacetsBuffer() const override; /** Return writable pointer on specified facet */ Facet* getFacetWritable(int32_t facet) override; /** Return writable pointer on specified facet */ const Facet* getFacet(int32_t facet) const override; /** Return edges count */ uint32_t getEdgesCount() const override; /** Return vertices count */ uint32_t getVerticesCount() const override; /** Return facet count */ uint32_t getFacetCount() const override; /** Return reference on mesh bounding box. */ const NvcBounds3& getBoundingBox() const override; /** Return writable reference on mesh bounding box. */ NvcBounds3& getBoundingBoxWritable() override; /** Recalculate bounding box */ void recalculateBoundingBox() override; /** Compute mesh volume and centroid. Assumes mesh has outward normals and no holes. */ float getMeshVolumeAndCentroid(NvcVec3& centroid) const override; /** Set per-facet material id. */ void setMaterialId(const int32_t* materialIds) override; /** Replaces an material id on faces with a new one */ void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) override; /** Set per-facet smoothing group. */ void setSmoothingGroup(const int32_t* smoothingGroups) override; /** Calculate per-facet bounding boxes. */ virtual void calcPerFacetBounds() override; /** Get pointer on facet bounding box, if not calculated return nullptr. */ virtual const NvcBounds3* getFacetBound(uint32_t index) const override; private: std::vector<Vertex> mVertices; std::vector<Edge> mEdges; std::vector<Facet> mFacets; nvidia::NvBounds3 mBounds; std::vector<nvidia::NvBounds3> mPerFacetBounds; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGMESHIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringMeshImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #define _CRT_SECURE_NO_WARNINGS #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringTypes.h" #include <NvBlastAssert.h> #include "NvMath.h" #include <NvBlastNvSharedHelpers.h> #include <NvBlastVolumeIntegrals.h> #include <cmath> #include <string.h> #include <vector> #include <algorithm> namespace Nv { namespace Blast { MeshImpl::MeshImpl(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount) { mVertices.resize(verticesCount); for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].p = position[i]; } if (normals != 0) { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].n = normals[i]; } } else { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].n = {0, 0, 0}; } } if (uv != 0) { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].uv[0] = uv[i]; } } else { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].uv[0] = {0, 0}; } } mEdges.resize(indicesCount); mFacets.resize(indicesCount / 3); int32_t facetId = 0; for (uint32_t i = 0; i < indicesCount; i += 3) { mEdges[i].s = indices[i]; mEdges[i].e = indices[i + 1]; mEdges[i + 1].s = indices[i + 1]; mEdges[i + 1].e = indices[i + 2]; mEdges[i + 2].s = indices[i + 2]; mEdges[i + 2].e = indices[i]; mFacets[facetId].firstEdgeNumber = i; mFacets[facetId].edgesCount = 3; mFacets[facetId].materialId = 0; //Unassigned for now mFacets[facetId].smoothingGroup = -1; facetId++; } recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, const Edge* edges, const Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount) { mVertices.resize(posCount); mEdges.resize(edgesCount); mFacets.resize(facetsCount); memcpy(mVertices.data(), vertices, sizeof(Vertex) * posCount); memcpy(mEdges.data(), edges, sizeof(Edge) * edgesCount); memcpy(mFacets.data(), facets, sizeof(Facet) * facetsCount); recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, uint32_t count) { mVertices = std::vector<Vertex>(vertices, vertices + count); mEdges.resize(count); mFacets.resize(count / 3); uint32_t vp = 0; for (uint32_t i = 0; i < count; i += 3) { mEdges[i].s = vp; mEdges[i].e = vp + 1; mEdges[i + 1].s = vp + 1; mEdges[i + 1].e = vp + 2; mEdges[i + 2].s = vp + 2; mEdges[i + 2].e = vp; vp += 3; } for (uint32_t i = 0; i < count / 3; ++i) { mFacets[i].edgesCount = 3; mFacets[i].firstEdgeNumber = i * 3; } recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, uint32_t count, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride) { mVertices = std::vector<Vertex>(vertices, vertices + count); mEdges.resize(indexCount); mFacets.resize(indexCount / 3); for (uint32_t i = 0; i < indexCount; i += 3) { mEdges[i].s = indices[i]; mEdges[i].e = indices[i + 1]; mEdges[i + 1].s = indices[i + 1]; mEdges[i + 1].e = indices[i + 2]; mEdges[i + 2].s = indices[i + 2]; mEdges[i + 2].e = indices[i]; } for (uint32_t i = 0; i < indexCount / 3; ++i) { mFacets[i].edgesCount = 3; mFacets[i].firstEdgeNumber = i * 3; mFacets[i].userData = 0; if (materials != nullptr) { mFacets[i].materialId = *(uint32_t*)((uint8_t*)materials + i * materialStride); } } recalculateBoundingBox(); } float MeshImpl::getMeshVolumeAndCentroid(NvcVec3& centroid) const { class MeshImplQuery { public: MeshImplQuery(const MeshImpl& mesh) : m_mesh(mesh) {} size_t faceCount() const { return (size_t)m_mesh.getFacetCount(); } size_t vertexCount(size_t faceIndex) const { return (size_t)m_mesh.getFacet((int32_t)faceIndex)->edgesCount; } NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const { const Nv::Blast::Facet* facet = m_mesh.getFacet(faceIndex); return m_mesh.getVertices()[m_mesh.getEdges()[facet->firstEdgeNumber + vertexIndex].s].p; } const MeshImpl& m_mesh; }; return calculateMeshVolumeAndCentroid<MeshImplQuery>(centroid, *this); } uint32_t MeshImpl::getFacetCount() const { return static_cast<uint32_t>(mFacets.size()); } Vertex* MeshImpl::getVerticesWritable() { return mVertices.data(); } Edge* MeshImpl::getEdgesWritable() { return mEdges.data(); } const Vertex* MeshImpl::getVertices() const { return mVertices.data(); } const Edge* MeshImpl::getEdges() const { return mEdges.data(); } uint32_t MeshImpl::getEdgesCount() const { return static_cast<uint32_t>(mEdges.size()); } uint32_t MeshImpl::getVerticesCount() const { return static_cast<uint32_t>(mVertices.size()); } Facet* MeshImpl::getFacetsBufferWritable() { return mFacets.data(); } const Facet* MeshImpl::getFacetsBuffer() const { return mFacets.data(); } Facet* MeshImpl::getFacetWritable(int32_t facet) { return &mFacets[facet]; } const Facet* MeshImpl::getFacet(int32_t facet) const { return &mFacets[facet]; } MeshImpl::~MeshImpl() { } void MeshImpl::release() { delete this; } const NvcBounds3& MeshImpl::getBoundingBox() const { return fromNvShared(mBounds); } NvcBounds3& MeshImpl::getBoundingBoxWritable() { return fromNvShared(mBounds); } void MeshImpl::recalculateBoundingBox() { mBounds.setEmpty(); for (uint32_t i = 0; i < mVertices.size(); ++i) { mBounds.include(toNvShared(mVertices[i].p)); } calcPerFacetBounds(); } const NvcBounds3* MeshImpl::getFacetBound(uint32_t index) const { if (mPerFacetBounds.empty()) { return nullptr; } return &fromNvShared(mPerFacetBounds[index]); } void MeshImpl::calcPerFacetBounds() { mPerFacetBounds.resize(mFacets.size()); for (uint32_t i = 0; i < mFacets.size(); ++i) { auto& fb = mPerFacetBounds[i]; fb.setEmpty(); for (uint32_t v = 0; v < mFacets[i].edgesCount; ++v) { fb.include(toNvShared(mVertices[mEdges[mFacets[i].firstEdgeNumber + v].s].p)); fb.include(toNvShared(mVertices[mEdges[mFacets[i].firstEdgeNumber + v].e].p)); } } } void MeshImpl::setMaterialId(const int32_t* materialId) { if (materialId != nullptr) { for (uint32_t i = 0; i < mFacets.size(); ++i) { mFacets[i].materialId = *materialId; ++materialId; } } } bool MeshImpl::isValid() const { return mVertices.size() > 0 && mEdges.size() > 0 && mFacets.size() > 0; } void MeshImpl::replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) { for (uint32_t i = 0; i < mFacets.size(); ++i) { if (mFacets[i].materialId == oldMaterialId) { mFacets[i].materialId = newMaterialId; } } } void MeshImpl::setSmoothingGroup(const int32_t* smoothingGroups) { if (smoothingGroups != nullptr) { for (uint32_t i = 0; i < mFacets.size(); ++i) { mFacets[i].smoothingGroup = *smoothingGroups; ++smoothingGroups; } } } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkActorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { TkActorImpl* TkActorImpl::create(const TkActorDesc& desc) { const TkAssetImpl* asset = static_cast<const TkAssetImpl*>(desc.asset); TkFamilyImpl* family = TkFamilyImpl::create(asset); NvBlastFamily* familyLL = family->getFamilyLLInternal(); Array<char>::type scratch((uint32_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(familyLL, logLL)); NvBlastActor* actorLL = NvBlastFamilyCreateFirstActor(familyLL, &desc, scratch.begin(), logLL); if (actorLL == nullptr) { NVBLAST_LOG_ERROR("TkActorImpl::create: low-level actor could not be created."); return nullptr; } TkActorImpl* actor = family->addActor(actorLL); if (actor != nullptr) { // Add internal joints const uint32_t internalJointCount = asset->getJointDescCountInternal(); const TkAssetJointDesc* jointDescs = asset->getJointDescsInternal(); const NvBlastSupportGraph graph = asset->getGraph(); TkJointImpl* joints = family->getInternalJoints(); for (uint32_t jointNum = 0; jointNum < internalJointCount; ++jointNum) { const TkAssetJointDesc& assetJointDesc = jointDescs[jointNum]; NVBLAST_ASSERT(assetJointDesc.nodeIndices[0] < graph.nodeCount && assetJointDesc.nodeIndices[1] < graph.nodeCount); TkJointDesc jointDesc; jointDesc.families[0] = jointDesc.families[1] = family; jointDesc.chunkIndices[0] = graph.chunkIndices[assetJointDesc.nodeIndices[0]]; jointDesc.chunkIndices[1] = graph.chunkIndices[assetJointDesc.nodeIndices[1]]; jointDesc.attachPositions[0] = assetJointDesc.attachPositions[0]; jointDesc.attachPositions[1] = assetJointDesc.attachPositions[1]; TkJointImpl* joint = new (joints + jointNum) TkJointImpl(jointDesc, family); actor->addJoint(joint->m_links[0]); } // Mark as damaged to trigger first split call. It could be the case that asset is already split into few actors initially. actor->markAsDamaged(); } return actor; } //////// Member functions //////// TkActorImpl::TkActorImpl() : m_actorLL(nullptr) , m_family(nullptr) , m_group(nullptr) , m_groupJobIndex(invalidIndex<uint32_t>()) , m_flags(0) , m_jointCount(0) { #if NV_PROFILE NvBlastTimersReset(&m_timers); #endif } TkActorImpl::~TkActorImpl() { } void TkActorImpl::release() { // Disassoaciate all joints // Copy joint array for safety against implementation of joint->setActor TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*getJointCountInternal())); TkJointImpl** stop = joints + getJointCountInternal(); TkJointImpl** jointHandle = joints; for (JointIt j(*this); (bool)j; ++j) { *jointHandle++ = *j; } jointHandle = joints; while (jointHandle < stop) { NVBLAST_ASSERT(*jointHandle != nullptr); NVBLAST_ASSERT((*jointHandle)->getDataInternal().actors[0] == this || (*jointHandle)->getDataInternal().actors[1] == this); (*jointHandle++)->setActors(nullptr, nullptr); } NVBLAST_ASSERT(getJointCountInternal() == 0); if (m_group != nullptr) { m_group->removeActor(*this); } if (m_actorLL != nullptr) { NvBlastActorDeactivate(m_actorLL, logLL); } if (m_family != nullptr) { m_family->removeActor(this); // Make sure we dispatch any remaining events when this family is emptied, since it will no longer be done by any group if (m_family->getActorCountInternal() == 0) { m_family->getQueue().dispatch(); } } } const NvBlastActor* TkActorImpl::getActorLL() const { return m_actorLL; } TkFamily& TkActorImpl::getFamily() const { return getFamilyImpl(); } uint32_t TkActorImpl::getIndex() const { return getIndexInternal(); } TkGroup* TkActorImpl::getGroup() const { return getGroupImpl(); } TkGroup* TkActorImpl::removeFromGroup() { if (m_group == nullptr) { NVBLAST_LOG_WARNING("TkActorImpl::removeFromGroup: actor not in a group."); return nullptr; } if (m_group->isProcessing()) { NVBLAST_LOG_ERROR("TkActorImpl::removeFromGroup: cannot alter Group while processing."); return nullptr; } TkGroup* group = m_group; return m_group->removeActor(*this) ? group : nullptr; } NvBlastFamily* TkActorImpl::getFamilyLL() const { return m_family->getFamilyLLInternal(); } const TkAsset* TkActorImpl::getAsset() const { return m_family->getAssetImpl(); } uint32_t TkActorImpl::getVisibleChunkCount() const { return NvBlastActorGetVisibleChunkCount(m_actorLL, logLL); } uint32_t TkActorImpl::getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const { return NvBlastActorGetVisibleChunkIndices(visibleChunkIndices, visibleChunkIndicesSize, m_actorLL, logLL); } uint32_t TkActorImpl::getGraphNodeCount() const { return NvBlastActorGetGraphNodeCount(m_actorLL, logLL); } uint32_t TkActorImpl::getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const { return NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeIndicesSize, m_actorLL, logLL); } const float* TkActorImpl::getBondHealths() const { return NvBlastActorGetBondHealths(m_actorLL, logLL); } uint32_t TkActorImpl::getSplitMaxActorCount() const { return NvBlastActorGetMaxActorCountForSplit(m_actorLL, logLL); } bool TkActorImpl::isDamaged() const { NVBLAST_ASSERT(!m_flags.isSet(TkActorFlag::DAMAGED) || (m_flags.isSet(TkActorFlag::DAMAGED) && m_flags.isSet(TkActorFlag::PENDING))); return m_flags.isSet(TkActorFlag::DAMAGED); } void TkActorImpl::markAsDamaged() { m_flags |= TkActorFlag::DAMAGED; makePending(); } void TkActorImpl::makePending() { if (m_group != nullptr && !isPending()) { m_group->enqueue(this); } m_flags |= TkActorFlag::PENDING; } TkActorImpl::operator Nv::Blast::TkActorData() const { TkActorData data = { m_family, userData, getIndex() }; return data; } void TkActorImpl::damage(const NvBlastDamageProgram& program, const void* programParams) { BLAST_PROFILE_SCOPE_L("TkActor::damage"); if (m_group == nullptr) { NVBLAST_LOG_WARNING("TkActor::damage: actor is not in a group, cannot fracture."); return; } if (m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::damage: group is being processed, cannot fracture this actor."); return; } if (NvBlastActorCanFracture(m_actorLL, logLL)) { m_damageBuffer.pushBack(DamageData{ program, programParams}); makePending(); } } void TkActorImpl::generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const void* programParams) const { BLAST_PROFILE_SCOPE_L("TkActor::generateFracture"); if (m_group && m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::generateFracture: group is being processed, cannot fracture this actor."); return; } // const context, must make m_timers mutable otherwise NvBlastActorGenerateFracture(commands, m_actorLL, program, programParams, logLL, const_cast<NvBlastTimers*>(&m_timers)); } void TkActorImpl::applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) { BLAST_PROFILE_SCOPE_L("TkActor::applyFracture"); if (m_group && m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::applyFracture: group is being processed, cannot fracture this actor."); return; } NvBlastActorApplyFracture(eventBuffers, m_actorLL, commands, logLL, &m_timers); if (commands->chunkFractureCount > 0 || commands->bondFractureCount > 0) { markAsDamaged(); TkFractureCommands* fevt = getFamilyImpl().getQueue().allocData<TkFractureCommands>(); fevt->tkActorData = *this; fevt->buffers = *commands; getFamilyImpl().getQueue().addEvent(fevt); getFamilyImpl().getQueue().dispatch(); } } uint32_t TkActorImpl::getJointCount() const { return getJointCountInternal(); } uint32_t TkActorImpl::getJoints(TkJoint** joints, uint32_t jointsSize) const { uint32_t jointsWritten = 0; for (JointIt j(*this); (bool)j && jointsWritten < jointsSize; ++j) { joints[jointsWritten++] = *j; } return jointsWritten; } bool TkActorImpl::hasExternalBonds() const { return NvBlastActorHasExternalBonds(m_actorLL, logLL); } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGUID.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKGUID_H #define NVBLASTTKGUID_H #include "NvPreprocessor.h" #if NV_WINDOWS_FAMILY #include <rpc.h> #else //#include <uuid/uuid.h> #include "NvBlastTime.h" #endif #include "NsHash.h" namespace Nv { namespace Blast { #if NV_WINDOWS_FAMILY NV_INLINE NvBlastID TkGenerateGUID(void* ptr) { NV_UNUSED(ptr); NV_COMPILE_TIME_ASSERT(sizeof(UUID) == sizeof(NvBlastID)); NvBlastID guid; UuidCreate(reinterpret_cast<UUID*>(&guid)); return guid; } #else NV_INLINE NvBlastID TkGenerateGUID(void* ptr) { // NV_COMPILE_TIME_ASSERT(sizeof(uuid_t) == sizeof(NvBlastID)); Time time; NvBlastID guid; // uuid_generate_random(reinterpret_cast<uuid_t&>(guid)); *reinterpret_cast<uint64_t*>(guid.data) = reinterpret_cast<uintptr_t>(ptr); *reinterpret_cast<int64_t*>(guid.data + 8) = time.getLastTickCount(); return guid; } #endif /** Compares two NvBlastIDs. \param[in] id1 A pointer to the first id to compare. \param[in] id2 A pointer to the second id to compare. \return true iff ids are equal. */ NV_INLINE bool TkGUIDsEqual(const NvBlastID* id1, const NvBlastID* id2) { return !memcmp(id1, id2, sizeof(NvBlastID)); } /** Clears an NvBlastID (sets all of its fields to zero). \param[out] id A pointer to the ID to clear. */ NV_INLINE void TkGUIDReset(NvBlastID* id) { memset(id, 0, sizeof(NvBlastID)); } /** Tests an NvBlastID to determine if it's zeroed. After calling TkGUIDReset on an ID, passing it to this function will return a value of true. \param[in] id A pointer to the ID to test. */ NV_INLINE bool TkGUIDIsZero(const NvBlastID* id) { return *reinterpret_cast<const uint64_t*>(&id->data[0]) == 0 && *reinterpret_cast<const uint64_t*>(&id->data[8]) == 0; } } // namespace Blast } // namespace Nv namespace nvidia { namespace shdfnd { // hash specialization for NvBlastID template <> struct Hash<NvBlastID> { uint32_t operator()(const NvBlastID& k) const { // "DJB" string hash uint32_t h = 5381; for (uint32_t i = 0; i < sizeof(k.data) / sizeof(k.data[0]); ++i) h = ((h << 5) + h) ^ uint32_t(k.data[i]); return h; } bool equal(const NvBlastID& k0, const NvBlastID& k1) const { return Nv::Blast::TkGUIDsEqual(&k0, &k1); } }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NVBLASTTKGUID_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTASKIMPL_H #define NVBLASTTKTASKIMPL_H #include "NvBlast.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkEventQueue.h" #include "NvBlastArray.h" #include <atomic> #include <mutex> #include <condition_variable> #include "NvBlastAssert.h" #include "NvBlastTkGroup.h" // TkGroupStats namespace Nv { namespace Blast { class TkGroupImpl; class TkActorImpl; class TkFamilyImpl; /** Transient structure describing a job and its results. */ struct TkWorkerJob { TkActorImpl* m_tkActor; //!< the actor to process TkActorImpl** m_newActors; //!< list of child actors created by splitting uint32_t m_newActorsCount; //!< the number of child actors created }; /** A list of equally sized memory blocks sharable between tasks. */ template<typename T> class SharedBlock { public: SharedBlock() : m_numElementsPerBlock(0), m_numBlocks(0), m_buffer(nullptr) {} /** Allocates one large memory block of elementsPerBlock*numBlocks elements. */ void allocate(uint32_t elementsPerBlock, uint32_t numBlocks) { NVBLAST_ASSERT(elementsPerBlock > 0 && numBlocks > 0); m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(elementsPerBlock*numBlocks*sizeof(T), "SharedBlock")); m_numElementsPerBlock = elementsPerBlock; m_numBlocks = numBlocks; } /** Returns the pointer to the first element of a block of numElementsPerBlock() elements. */ T* getBlock(uint32_t id) { NVBLAST_ASSERT(id < m_numBlocks || 0 == m_numElementsPerBlock); return &m_buffer[id*m_numElementsPerBlock]; } /** The number of elements available per block. */ uint32_t numElementsPerBlock() const { return m_numElementsPerBlock; } /** Frees the whole memory block. */ void release() { m_numBlocks = 0; m_numElementsPerBlock = 0; NVBLAST_FREE(m_buffer); m_buffer = nullptr; } private: uint32_t m_numElementsPerBlock; //!< elements available in one block uint32_t m_numBlocks; //!< number of virtual blocks available T* m_buffer; //!< contiguous memory for all blocks }; /** A preallocated, shared array from which can be allocated from in tasks. Intended to be used when the maximum amount of data (e.g. for a family) is known in advance. No further allocations take place on exhaustion. Exhaustion asserts in debug builds and overflows otherwise. */ template<typename T> class SharedBuffer { public: SharedBuffer() : m_capacity(0), m_used(0), m_buffer(nullptr) {} /** Atomically gets a pointer to the first element of an array of n elements. */ T* reserve(size_t n) { NVBLAST_ASSERT(m_used + n <= m_capacity); size_t start = m_used.fetch_add(n); return &m_buffer[start]; } /** Preallocates memory for capacity elements. */ void allocate(size_t capacity) { NVBLAST_ASSERT(m_buffer == nullptr); m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "SplitMemory")); m_capacity = capacity; } /** Preserves the memory allocated but resets to reserve from the beginning of the array. */ void reset() { m_used = 0; } /** Frees the preallocated array. */ void release() { NVBLAST_ASSERT(m_buffer != nullptr); NVBLAST_FREE(m_buffer); m_buffer = nullptr; m_capacity = m_used = 0; } private: size_t m_capacity; //!< available elements in the buffer std::atomic<size_t> m_used; //!< used elements in the buffer T* m_buffer; //!< the memory containing T's }; /** Allocates from a preallocated, externally owned memory block initialized with. When blocks run out of space, new ones are allocated and owned by this class. */ template<typename T> class LocalBuffer { public: /** Returns the pointer to the first element of an array of n elements. Allocates a new block of memory when exhausted, its size being the larger of n and capacity set with initialize(). */ T* allocate(size_t n) { if (m_used + n > m_capacity) { allocateNewBlock(n > m_capacity ? n : m_capacity); } size_t index = m_used; m_used += n; return &m_currentBlock[index]; } /** Release the additionally allocated memory blocks. The externally owned memory block remains untouched. */ void clear() { for (void* block : m_memoryBlocks) { NVBLAST_FREE(block); } m_memoryBlocks.clear(); } /** Set the externally owned memory block to start allocating from, with a size of capacity elements. */ void initialize(T* block, size_t capacity) { m_currentBlock = block; m_capacity = capacity; m_used = 0; } private: /** Allocates space for capacity elements. */ void allocateNewBlock(size_t capacity) { BLAST_PROFILE_SCOPE_L("Local Buffer allocation"); m_capacity = capacity; m_currentBlock = static_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "Blast LocalBuffer")); m_memoryBlocks.pushBack(m_currentBlock); m_used = 0; } InlineArray<void*, 4>::type m_memoryBlocks; //!< storage for memory blocks T* m_currentBlock; //!< memory block used to allocate from size_t m_used; //!< elements used in current block size_t m_capacity; //!< elements available in current block }; /** Holds the memory used by TkWorker for each family in each group. */ class SharedMemory { public: SharedMemory() : m_eventsMemory(0), m_eventsCount(0), m_refCount(0) {} /** Reserves n entries from preallocated memory. */ NvBlastActor** reserveNewActors(size_t n) { return m_newActorBuffers.reserve(n); } /** Reserves n entries from preallocated memory. */ TkActor** reserveNewTkActors(size_t n) { return m_newTkActorBuffers.reserve(n); } /** Allocates buffers to hold */ void allocate(TkFamilyImpl&); /** Resets the internal buffers to reserve from their beginning. Preserves the allocated memory. */ void reset() { m_newActorBuffers.reset(); m_newTkActorBuffers.reset(); } /** Increments the reference count. */ void addReference() { m_refCount++; } /** Increments the reference count by n. */ void addReference(size_t n) { m_refCount += n; } /** Decrements the reference count. Returns true if the count reached zero. */ bool removeReference() { m_refCount--; return !isUsed(); } /** Checks if the reference count is not zero. */ bool isUsed() { return m_refCount > 0; } /** Release the internal buffers' memory. */ void release() { m_newActorBuffers.release(); m_newTkActorBuffers.release(); } TkEventQueue m_events; //!< event queue shared across a group's actors of the same family uint32_t m_eventsMemory; //!< expected memory size for event data uint32_t m_eventsCount; //!< expected number of events private: size_t m_refCount; //!< helper for usage and releasing memory SharedBuffer<NvBlastActor*> m_newActorBuffers; //!< memory for splitting SharedBuffer<TkActor*> m_newTkActorBuffers; //!< memory for split events }; /** Thread worker fracturing and splitting actors sequentially. The list of actual jobs is provided by the group owning this worker. */ class TkWorker final : public TkGroupWorker { public: TkWorker() : m_id(~(uint32_t)0), m_group(nullptr), m_isBusy(false) {} void process(uint32_t jobID); void initialize(); void process(TkWorkerJob& job); uint32_t m_id; //!< this worker's id TkGroupImpl* m_group; //!< the group owning this worker LocalBuffer<NvBlastChunkFractureData> m_chunkBuffer; //!< memory manager for chunk event data LocalBuffer<NvBlastBondFractureData> m_bondBuffer; //!< memory manager for bonds event data void* m_splitScratch; NvBlastFractureBuffers m_tempBuffer; bool m_isBusy; #if NV_PROFILE TkGroupStats m_stats; #endif }; } } #endif // NVBLASTTKTASKIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFamilyImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Family); //////// Member functions //////// TkFamilyImpl::TkFamilyImpl() : m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr) { } TkFamilyImpl::TkFamilyImpl(const NvBlastID& id) : TkFamilyType(id), m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr) { } TkFamilyImpl::~TkFamilyImpl() { if (m_familyLL != nullptr) { uint32_t familyActorCount = NvBlastFamilyGetActorCount(m_familyLL, logLL); if (familyActorCount != 0) { NVBLAST_LOG_WARNING("TkFamilyImpl::~TkFamilyImpl(): family actor count is not 0."); } NVBLAST_FREE(m_familyLL); } } void TkFamilyImpl::release() { for (TkActorImpl& actor : m_actors) { if (actor.isActive()) { actor.release(); } } m_actors.clear(); NVBLAST_DELETE(this, TkFamilyImpl); } const NvBlastFamily* TkFamilyImpl::getFamilyLL() const { return m_familyLL; } TkActorImpl* TkFamilyImpl::addActor(NvBlastActor* actorLL) { TkActorImpl* actor = getActorByActorLL(actorLL); NVBLAST_ASSERT(actor); actor->m_actorLL = actorLL; actor->m_family = this; return actor; } void TkFamilyImpl::removeActor(TkActorImpl* actor) { NVBLAST_ASSERT(actor != nullptr && actor->m_family == this); //actor->m_family = nullptr; actor->m_actorLL = nullptr; } uint32_t TkFamilyImpl::getActorCount() const { return getActorCountInternal(); } uint32_t TkFamilyImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /*= 0*/) const { uint32_t actorCount = getActorCount(); if (actorCount <= indexStart) { NVBLAST_LOG_WARNING("TkFamilyImpl::getActors: indexStart beyond end of actor list."); return 0; } actorCount -= indexStart; if (actorCount > bufferSize) { actorCount = static_cast<uint32_t>(bufferSize); } uint32_t index = 0; for (const TkActorImpl& actor : m_actors) { if (actor.isActive()) { if (index >= indexStart) { if ((index - indexStart) >= actorCount) { break; } else { *buffer++ = const_cast<TkActorImpl*>(&actor); } } index++; } } return actorCount; } NV_INLINE bool areLLActorsEqual(const NvBlastActor* actor0, const NvBlastActor* actor1, Array<uint32_t>::type& scratch) { if (NvBlastActorGetGraphNodeCount(actor0, logLL) != NvBlastActorGetGraphNodeCount(actor1, logLL)) { return false; } const uint32_t chunkCount = NvBlastActorGetVisibleChunkCount(actor0, logLL); if (chunkCount != NvBlastActorGetVisibleChunkCount(actor1, logLL)) { return false; } scratch.resize(chunkCount * 2); NvBlastActorGetVisibleChunkIndices(scratch.begin(), chunkCount, actor0, logLL); NvBlastActorGetVisibleChunkIndices(scratch.begin() + chunkCount, chunkCount, actor1, logLL); return memcmp(scratch.begin(), scratch.begin() + chunkCount, chunkCount * sizeof(uint32_t)) == 0; } void TkFamilyImpl::reinitialize(const NvBlastFamily* newFamily, TkGroup* group) { NVBLAST_ASSERT(newFamily); #if NV_ENABLE_ASSERTS NvBlastID id0 = NvBlastFamilyGetAssetID(m_familyLL, logLL); NvBlastID id1 = NvBlastFamilyGetAssetID(newFamily, logLL); NVBLAST_ASSERT(TkGUIDsEqual(&id0, &id1)); #endif NVBLAST_ASSERT(NvBlastFamilyGetSize(m_familyLL, logLL) == NvBlastFamilyGetSize(newFamily, logLL)); // alloc and init new family const uint32_t blockSize = NvBlastFamilyGetSize(newFamily, logLL); NvBlastFamily* newFamilyCopy = (NvBlastFamily*)NVBLAST_ALLOC_NAMED(blockSize, "TkFamilyImpl::reinitialize"); memcpy(newFamilyCopy, newFamily, blockSize); NvBlastFamilySetAsset(newFamilyCopy, m_asset->getAssetLL(), logLL); // get actors from new family Array<NvBlastActor*>::type newLLActors(NvBlastFamilyGetActorCount(newFamilyCopy, logLL)); uint32_t actorCount = NvBlastFamilyGetActors(newLLActors.begin(), newLLActors.size(), newFamilyCopy, logLL); // reset actor families to nullptr (we use it as a flag later) for (TkActorImpl& actor : m_actors) { if (actor.isActive()) { actor.m_family = nullptr; } } // prepare split event with new actors auto newActorsSplitEvent = getQueue().allocData<TkSplitEvent>(); Array<TkActor*>::type children(actorCount); children.resizeUninitialized(0); newActorsSplitEvent->children = children.begin(); // scratch Array<uint32_t>::type scratch(m_asset->getChunkCount()); for (uint32_t i = 0; i < actorCount; ++i) { NvBlastActor* newLLActor = newLLActors[i]; uint32_t actorIndex = NvBlastActorGetIndex(newLLActor, logLL); TkActorImpl& tkActor = *getActorByIndex(actorIndex); tkActor.m_family = this; if (!tkActor.isActive() || !areLLActorsEqual(newLLActor, tkActor.m_actorLL, scratch)) { if (tkActor.isActive()) { auto removeSplitEvent = getQueue().allocData<TkSplitEvent>(); removeSplitEvent->parentData.family = this; removeSplitEvent->numChildren = 0; removeSplitEvent->parentData.userData = tkActor.userData; removeSplitEvent->parentData.index = tkActor.getIndex(); getQueue().addEvent(removeSplitEvent); } tkActor.m_actorLL = newLLActor; // switch groups TkGroupImpl* prevGroup = tkActor.m_group; if (prevGroup != group) { if (prevGroup) { prevGroup->removeActor(tkActor); } if (group) { group->addActor(tkActor); } } children.pushBack(&tkActor); } else { tkActor.m_actorLL = newLLActor; } } // if m_family is still nullptr for an active actor -> remove it. It doesn't exist in new family. for (TkActorImpl& tkActor : m_actors) { if (tkActor.isActive() && tkActor.m_family == nullptr) { tkActor.m_family = this; if (tkActor.m_group) { tkActor.m_group->removeActor(tkActor); } auto removeSplitEvent = getQueue().allocData<TkSplitEvent>(); removeSplitEvent->parentData.family = this; removeSplitEvent->numChildren = 0; removeSplitEvent->parentData.userData = tkActor.userData; removeSplitEvent->parentData.index = tkActor.getIndex(); getQueue().addEvent(removeSplitEvent); tkActor.m_actorLL = nullptr; } } // add split event with all new actors newActorsSplitEvent->parentData.family = this; newActorsSplitEvent->parentData.userData = 0; newActorsSplitEvent->parentData.index = invalidIndex<uint32_t>(); newActorsSplitEvent->numChildren = children.size(); if (newActorsSplitEvent->numChildren > 0) { getQueue().addEvent(newActorsSplitEvent); } // replace family NVBLAST_FREE(m_familyLL); m_familyLL = newFamilyCopy; // update joints for (TkActorImpl& tkActor : m_actors) { if (!tkActor.m_jointList.isEmpty()) { updateJoints(&tkActor); } } getQueue().dispatch(); } TkActorImpl* TkFamilyImpl::getActorByChunk(uint32_t chunk) { if (chunk >= NvBlastAssetGetChunkCount(m_asset->getAssetLLInternal(), logLL)) { NVBLAST_LOG_WARNING("TkFamilyImpl::getActorByChunk: invalid chunk index. Returning NULL."); return nullptr; } NvBlastActor* actorLL = NvBlastFamilyGetChunkActor(m_familyLL, chunk, logLL); return actorLL ? getActorByActorLL(actorLL) : nullptr; } void TkFamilyImpl::applyFractureInternal(const NvBlastFractureBuffers* commands) { NvBlastSupportGraph graph = getAsset()->getGraph(); // apply bond fracture commands on relevant actors { TkActorImpl* currActor = nullptr; NvBlastBondFractureData* bondFractures = commands->bondFractures; uint32_t bondFracturesCount = 0; auto applyFracture = [&]() { if (bondFracturesCount > 0) { if (currActor != nullptr && currActor->isActive()) { NvBlastFractureBuffers newCommands; newCommands.bondFractures = bondFractures; newCommands.bondFractureCount = bondFracturesCount; newCommands.chunkFractures = nullptr; newCommands.chunkFractureCount = 0; currActor->applyFracture(nullptr, &newCommands); } bondFractures += bondFracturesCount; bondFracturesCount = 0; } }; for (uint32_t i = 0; i < commands->bondFractureCount; ++i, ++bondFracturesCount) { const NvBlastBondFractureData& command = commands->bondFractures[i]; uint32_t chunk0 = graph.chunkIndices[command.nodeIndex0]; uint32_t chunk1 = graph.chunkIndices[command.nodeIndex1]; TkActorImpl* actor0 = getActorByChunk(chunk0); TkActorImpl* actor1 = getActorByChunk(chunk1); if (actor0 != actor1) { // skipping this event, bond already broken actor0 = nullptr; } if (actor0 != currActor) { applyFracture(); currActor = actor0; } } if (bondFracturesCount > 0) { applyFracture(); } } // apply chunk fracture commands on relevant actors { TkActorImpl* currActor = nullptr; NvBlastChunkFractureData* chunkFractures = commands->chunkFractures; uint32_t chunkFracturesCount = 0; auto applyFracture = [&]() { if (chunkFracturesCount > 0) { if (currActor != nullptr && currActor->isActive()) { NvBlastFractureBuffers newCommands; newCommands.bondFractures = nullptr; newCommands.bondFractureCount = 0; newCommands.chunkFractures = chunkFractures; newCommands.chunkFractureCount = chunkFracturesCount; currActor->applyFracture(nullptr, &newCommands); } chunkFractures += chunkFracturesCount; chunkFracturesCount = 0; } }; for (uint32_t i = 0; i < commands->chunkFractureCount; ++i, ++chunkFracturesCount) { const NvBlastChunkFractureData& command = commands->chunkFractures[i]; TkActorImpl* actor = getActorByChunk(command.chunkIndex); if (actor != currActor) { applyFracture(); currActor = actor; } } if (chunkFracturesCount > 0) { applyFracture(); } } } void TkFamilyImpl::updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue) { // Copy joint array for safety against implementation of joint->setActor TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*actor->getJointCountInternal())); TkJointImpl** stop = joints + actor->getJointCountInternal(); TkJointImpl** jointHandle = joints; for (TkActorImpl::JointIt j(*actor); (bool)j; ++j) { *jointHandle++ = *j; } jointHandle = joints; while (jointHandle < stop) { TkJointImpl* joint = *jointHandle++; const TkJointData& data = joint->getDataInternal(); TkActorImpl* actor0 = data.actors[0] != nullptr ? static_cast<TkActorImpl&>(*data.actors[0]).getFamilyImpl().getActorByChunk(data.chunkIndices[0]) : nullptr; TkActorImpl* actor1 = data.actors[1] != nullptr ? static_cast<TkActorImpl&>(*data.actors[1]).getFamilyImpl().getActorByChunk(data.chunkIndices[1]) : nullptr; joint->setActors(actor0, actor1, alternateQueue); } } const TkAsset* TkFamilyImpl::getAsset() const { return m_asset; } //////// Static functions //////// TkFamilyImpl* TkFamilyImpl::create(const TkAssetImpl* asset) { TkFamilyImpl* family = NVBLAST_NEW(TkFamilyImpl); family->m_asset = asset; void* mem = NVBLAST_ALLOC_NAMED(NvBlastAssetGetFamilyMemorySize(asset->getAssetLL(), logLL), "TkFamilyImpl::create"); family->m_familyLL = NvBlastAssetCreateFamily(mem, asset->getAssetLL(), logLL); //family->addListener(*TkFrameworkImpl::get()); if (family->m_familyLL == nullptr) { NVBLAST_LOG_ERROR("TkFamilyImpl::create: low-level family could not be created."); family->release(); return nullptr; } uint32_t maxActorCount = NvBlastFamilyGetMaxActorCount(family->m_familyLL, logLL); family->m_actors.resize(maxActorCount); family->m_internalJointBuffer.resize(asset->getJointDescCountInternal() * sizeof(TkJointImpl), 0); family->m_internalJointCount = asset->getJointDescCountInternal(); return family; } TkJointImpl** TkFamilyImpl::createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1) { JointSet* jointSet; const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID); uint32_t otherFamilyIndex; if (jointSetIndexEntry != nullptr) { otherFamilyIndex = jointSetIndexEntry->second; jointSet = m_jointSets[otherFamilyIndex]; } else { jointSet = NVBLAST_NEW(JointSet); NVBLAST_CHECK_ERROR(jointSet != nullptr, "TkFamilyImpl::addExternalJoint: failed to create joint set for other family ID.", return nullptr); jointSet->m_familyID = otherFamilyID; otherFamilyIndex = m_jointSets.size(); m_familyIDMap[otherFamilyID] = otherFamilyIndex; m_jointSets.pushBack(jointSet); } const ExternalJointKey key(chunkIndex0, chunkIndex1); const bool jointExists = jointSet->m_joints.find(key) != nullptr; NVBLAST_CHECK_WARNING(!jointExists, "TkFamilyImpl::addExternalJoint: joint already added.", return nullptr); return &jointSet->m_joints[key]; } bool TkFamilyImpl::deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1) { const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID); if (jointSetIndexEntry != nullptr) { const uint32_t jointSetIndex = jointSetIndexEntry->second; ExternalJointKey jointKey = ExternalJointKey(chunkIndex0, chunkIndex1); const HashMap<ExternalJointKey, TkJointImpl*>::type::Entry* e = m_jointSets[jointSetIndex]->m_joints.find(jointKey); if (e != nullptr) { joint = e->second; // Return value that was stored m_jointSets[jointSetIndex]->m_joints.erase(jointKey); // Delete the joint set if it is empty if (m_jointSets[jointSetIndex]->m_joints.size() == 0) { NVBLAST_DELETE(m_jointSets[jointSetIndex], JointSet); m_jointSets.replaceWithLast(jointSetIndex); m_familyIDMap.erase(otherFamilyID); if (jointSetIndex < m_jointSets.size()) { m_familyIDMap[m_jointSets[jointSetIndex]->m_familyID] = jointSetIndex; } } return true; } } return false; } TkJointImpl* TkFamilyImpl::findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const { const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(getFamilyID(otherFamily)); if (jointSetIndexEntry != nullptr) { const HashMap<ExternalJointKey, TkJointImpl*>::type::Entry* e = m_jointSets[jointSetIndexEntry->second]->m_joints.find(key); if (e != nullptr) { return e->second; } } return nullptr; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTask.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastTkTask.h" #include "NvCpuDispatcher.h" #include "NvBlastTkGroup.h" using namespace Nv::Blast; uint32_t TkGroupTaskManagerImpl::process(uint32_t workerCount) { NVBLAST_CHECK_WARNING(m_group != nullptr, "TkGroupTaskManager::process cannot process, no group set.", return 0); NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::process group is already being processed.", return 0); // at least one task must start, even when dispatcher has none specified uint32_t dispatcherThreads = m_taskManager.getCpuDispatcher()->getWorkerCount(); dispatcherThreads = dispatcherThreads > 0 ? dispatcherThreads : 1; // not expecting an arbitrary amount of tasks uint32_t availableTasks = TASKS_MAX_COUNT; // use workerCount tasks, unless dispatcher has less threads or less tasks are available uint32_t requestedTasks = workerCount > 0 ? workerCount : dispatcherThreads; requestedTasks = requestedTasks > dispatcherThreads ? dispatcherThreads : requestedTasks; requestedTasks = requestedTasks > availableTasks ? availableTasks : requestedTasks; // ensure the group has enough memory allocated for concurrent processing m_group->setWorkerCount(requestedTasks); // check if there is work to do uint32_t jobCount = m_group->startProcess(); if (jobCount) { // don't start more tasks than jobs are available requestedTasks = requestedTasks > jobCount ? jobCount : requestedTasks; // common counter for all tasks m_counter.reset(jobCount); // set to busy state m_sync.setCount(requestedTasks); // set up tasks for (uint32_t i = 0; i < requestedTasks; i++) { m_tasks[i].setup(m_group, &m_counter, &m_sync); m_tasks[i].setContinuation(m_taskManager, nullptr); m_tasks[i].removeReference(); } return requestedTasks; } // there was no work to be done return 0; } bool TkGroupTaskManagerImpl::wait(bool block) { if (block && !m_sync.isDone()) { m_sync.wait(); } if (m_sync.isDone()) { return m_group->endProcess(); } return false; } void TkGroupTaskManagerImpl::setGroup(TkGroup* group) { NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::setGroup trying to change group while processing.", return); m_group = group; } TkGroupTaskManager* TkGroupTaskManager::create(nvidia::task::NvTaskManager& taskManager, TkGroup* group) { return NVBLAST_NEW(TkGroupTaskManagerImpl) (taskManager, group); } void TkGroupTaskManagerImpl::release() { NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::release group is still being processed.", return); NVBLAST_DELETE(this, TkGroupTaskManagerImpl); }
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGroupImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "NvBlastAssert.h" #include "NvBlast.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkTaskImpl.h" #undef max #undef min #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Group); //////// Member functions //////// TkGroupImpl::TkGroupImpl() : m_actorCount(0), m_isProcessing(false) { #if NV_PROFILE memset(&m_stats, 0, sizeof(TkGroupStats)); #endif } TkGroupImpl::~TkGroupImpl() { NVBLAST_ASSERT(getActorCount() == 0); NVBLAST_ASSERT(m_sharedMemory.size() == 0); } void TkGroupImpl::release() { if (isProcessing()) { // abort all processing? NVBLAST_LOG_ERROR("TkGroup::release: cannot release Group while processing."); NVBLAST_ALWAYS_ASSERT_MESSAGE("TkGroup::release: cannot release Group while processing."); return; } for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { TkFamilyImpl* family = it->first; for (TkActorImpl& actor : family->getActorsInternal()) { if (actor.m_group == this) { removeActorInternal(actor); } } SharedMemory* mem = it->second; mem->release(); NVBLAST_DELETE(mem, SharedMemory); } m_sharedMemory.clear(); m_bondTempDataBlock.release(); m_chunkTempDataBlock.release(); m_bondEventDataBlock.release(); m_chunkEventDataBlock.release(); m_splitScratchBlock.release(); NVBLAST_DELETE(this, TkGroupImpl); } void TkGroupImpl::addActorsInternal(TkActorImpl** actors, uint32_t numActors) { for (uint32_t i = 0; i < numActors; i++) { addActorInternal(*actors[i]); } } void TkGroupImpl::addActorInternal(TkActorImpl& tkActor) { NVBLAST_ASSERT(tkActor.getGroup() == nullptr); tkActor.m_group = this; m_actorCount++; } bool TkGroupImpl::addActor(TkActor& actor) { TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor); if (tkActor.getGroup() != nullptr) { NVBLAST_LOG_ERROR("TkGroup::addActor: actor already belongs to a Group. Remove from current group first."); return false; } if (isProcessing()) { NVBLAST_LOG_ERROR("TkGroup::addActor: cannot alter Group while processing."); return false; } // mark the actor that it now belongs to this group addActorInternal(tkActor); // actors that were fractured already or have damage requested // must be enqueued to be processed if (tkActor.isPending()) { enqueue(&tkActor); } TkFamilyImpl& family = tkActor.getFamilyImpl(); SharedMemory* mem = m_sharedMemory[&family]; if (mem == nullptr) { // the actor belongs to a family not involved in this group yet // shared memory must be allocated and temporary buffers adjusted accordingly BLAST_PROFILE_ZONE_BEGIN("family memory"); mem = NVBLAST_NEW(SharedMemory); mem->allocate(family); m_sharedMemory[&family] = mem; BLAST_PROFILE_ZONE_END("family memory"); BLAST_PROFILE_ZONE_BEGIN("group memory"); const uint32_t workerCount = m_workers.size(); NvBlastLog theLog = logLL; // this group's tasks will use one temporary buffer each, which is of max size of, for all families involved const size_t requiredScratch = NvBlastActorGetRequiredScratchForSplit(tkActor.getActorLL(), theLog); if (static_cast<size_t>(m_splitScratchBlock.numElementsPerBlock()) < requiredScratch) { m_splitScratchBlock.release(); m_splitScratchBlock.allocate(static_cast<uint32_t>(requiredScratch), workerCount); } // generate and apply fracture may create an entry for each bond const uint32_t bondCount = NvBlastAssetGetBondCount(tkActor.getAsset()->getAssetLL(), theLog); if (m_bondTempDataBlock.numElementsPerBlock() < bondCount) { m_bondTempDataBlock.release(); m_bondTempDataBlock.allocate(bondCount, workerCount); m_bondEventDataBlock.release(); m_bondEventDataBlock.allocate(bondCount, workerCount); } // apply fracture may create an entry for each lower-support chunk const uint32_t graphNodeCount = NvBlastAssetGetSupportGraph(tkActor.getAsset()->getAssetLL(), theLog).nodeCount; const uint32_t subsupportChunkCount = NvBlastAssetGetChunkCount(tkActor.getAsset()->getAssetLL(), theLog) - NvBlastAssetGetFirstSubsupportChunkIndex(tkActor.getAsset()->getAssetLL(), theLog); const uint32_t chunkCount = graphNodeCount + subsupportChunkCount; if (m_chunkTempDataBlock.numElementsPerBlock() < chunkCount) { m_chunkTempDataBlock.release(); m_chunkTempDataBlock.allocate(chunkCount, workerCount); m_chunkEventDataBlock.release(); m_chunkEventDataBlock.allocate(chunkCount, workerCount); } BLAST_PROFILE_ZONE_END("group memory"); } mem->addReference(); return true; } uint32_t TkGroupImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /* = 0 */) const { BLAST_PROFILE_SCOPE_L("TkGroup::getActors"); uint32_t actorCount = m_actorCount; if (actorCount <= indexStart) { NVBLAST_LOG_WARNING("TkGroup::getActors: indexStart beyond end of actor list."); return 0; } actorCount -= indexStart; if (actorCount > bufferSize) { actorCount = bufferSize; } uint32_t index = 0; bool done = false; for (auto it = const_cast<TkGroupImpl*>(this)->m_sharedMemory.getIterator(); !it.done();++it) { TkFamilyImpl* fam = it->first; for (TkActorImpl& actor : fam->getActorsInternal()) { if (actor.m_group == this) { NVBLAST_ASSERT(actor.isActive()); if (index >= indexStart) { *buffer++ = &actor; } index++; done = (index - indexStart) >= actorCount; } if (done) break; } if (done) break; } return actorCount; } void TkGroupImpl::removeActorInternal(TkActorImpl& tkActor) { NVBLAST_ASSERT(tkActor.m_group == this); tkActor.m_group = nullptr; m_actorCount--; } void TkGroupImpl::releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem) { NVBLAST_ASSERT(mem != nullptr && m_sharedMemory[fam] == mem); mem->release(); m_sharedMemory.erase(fam); NVBLAST_DELETE(mem, SharedMemory); } bool TkGroupImpl::removeActor(TkActor& actor) { TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor); if (tkActor.getGroup() != this) { NVBLAST_LOG_ERROR("TkGroup::removeActor: actor does not belong to this Group."); return false; } if (isProcessing()) { NVBLAST_LOG_ERROR("TkGroup::removeActor: cannot alter Group while processing."); return false; } removeActorInternal(tkActor); // pending actors must be removed from the job queue as well if(tkActor.isPending()) { uint32_t index = tkActor.m_groupJobIndex; tkActor.m_groupJobIndex = invalidIndex<uint32_t>(); if (index < m_jobs.size()) { m_jobs.replaceWithLast(index); if (index < m_jobs.size()) { NVBLAST_ASSERT(m_jobs[index].m_tkActor->m_groupJobIndex == m_jobs.size()); NVBLAST_ASSERT(m_jobs[index].m_tkActor->isPending()); m_jobs[index].m_tkActor->m_groupJobIndex = index; } } } // if the actor is the last of its family in this group // the group-family memory can be released TkFamilyImpl* family = &tkActor.getFamilyImpl(); SharedMemory* mem = getSharedMemory(family); if (mem->removeReference()) { releaseSharedMemory(family, mem); } return true; } TkGroupImpl* TkGroupImpl::create(const TkGroupDesc& desc) { TkGroupImpl* group = NVBLAST_NEW(TkGroupImpl); group->setWorkerCount(desc.workerCount); return group; } void TkGroupImpl::setWorkerCount(uint32_t workerCount) { if (isProcessing()) { NVBLAST_LOG_WARNING("TkGroup::setWorkerCount: Group is still processing, call TkGroup::endProcess first."); return; } if (workerCount == 0) { NVBLAST_LOG_WARNING("TkGroup: attempting to create a Group with 0 workers. Forced to 1."); workerCount = 1; } if (workerCount != m_workers.size()) { m_workers.resize(workerCount); uint32_t workerId = 0; for (auto& worker : m_workers) { worker.m_id = workerId++; worker.m_group = this; } const uint32_t bondCount = m_bondTempDataBlock.numElementsPerBlock(); if (bondCount > 0) { m_bondTempDataBlock.release(); m_bondTempDataBlock.allocate(bondCount, workerCount); m_bondEventDataBlock.release(); m_bondEventDataBlock.allocate(bondCount, workerCount); } const uint32_t chunkCount = m_chunkTempDataBlock.numElementsPerBlock(); if (chunkCount > 0) { m_chunkTempDataBlock.release(); m_chunkTempDataBlock.allocate(chunkCount, workerCount); m_chunkEventDataBlock.release(); m_chunkEventDataBlock.allocate(chunkCount, workerCount); } const uint32_t scratchSize = m_splitScratchBlock.numElementsPerBlock(); if (scratchSize > 0) { m_splitScratchBlock.release(); m_splitScratchBlock.allocate(scratchSize, workerCount); } } } NV_INLINE uint32_t TkGroupImpl::getWorkerCount() const { return m_workers.size(); } uint32_t TkGroupImpl::startProcess() { BLAST_PROFILE_SCOPE_L("TkGroup::startProcess"); if (!setProcessing(true)) { NVBLAST_LOG_WARNING("TkGroup::process: Group is still processing, call TkGroup::endProcess first."); return 0; } if (m_jobs.size() > 0) { BLAST_PROFILE_ZONE_BEGIN("task setup"); BLAST_PROFILE_ZONE_BEGIN("setup job queue"); for (const auto& job : m_jobs) { const TkActorImpl* a = job.m_tkActor; SharedMemory* mem = getSharedMemory(&a->getFamilyImpl()); const uint32_t damageCount = a->m_damageBuffer.size(); // applyFracture'd actor do not necessarily have damage queued NVBLAST_ASSERT(damageCount > 0 || a->m_flags.isSet(TkActorFlag::DAMAGED)); // no reason to be here without these NVBLAST_ASSERT(a->m_flags.isSet(TkActorFlag::PENDING)); NVBLAST_ASSERT(a->m_group == this); // collect the amount of event payload memory to preallocate for TkWorkers mem->m_eventsMemory += damageCount * (sizeof(TkFractureCommands) + sizeof(TkFractureEvents)) + sizeof(TkSplitEvent); // collect the amount of event entries to preallocate for TkWorkers // (two TkFracture* events per damage plus one TkSplitEvent) mem->m_eventsCount += 2 * damageCount + 1; } BLAST_PROFILE_ZONE_END("setup job queue"); BLAST_PROFILE_ZONE_BEGIN("memory protect"); for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { // preallocate the event memory for TkWorkers SharedMemory* mem = it->second; mem->m_events.reserveData(mem->m_eventsMemory); mem->m_events.reserveEvents(mem->m_eventsCount); // these counters are not used anymore // reset them immediately for next time mem->m_eventsCount = 0; mem->m_eventsMemory = 0; // switch to parallel mode mem->m_events.protect(true); } BLAST_PROFILE_ZONE_END("memory protect"); BLAST_PROFILE_ZONE_END("task setup"); for (auto&worker : m_workers) { worker.initialize(); } return m_jobs.size(); } else { bool success = setProcessing(false); NVBLAST_ASSERT(success); NV_UNUSED(success); return 0; } } bool TkGroupImpl::endProcess() { if (isProcessing()) { BLAST_PROFILE_SCOPE_L("TkGroupImpl::endProcess"); if (m_jobs.size() > 0) { #if NV_PROFILE BLAST_PROFILE_ZONE_BEGIN("accumulate timers"); NvBlastTimers accumulated; NvBlastTimersReset(&accumulated); uint32_t jobCount = 0; int64_t workerTime = 0; for (TkWorker& worker : m_workers) { accumulated += worker.m_stats.timers; jobCount += worker.m_stats.processedActorsCount; workerTime += worker.m_stats.workerTime; } m_stats.timers = accumulated; m_stats.processedActorsCount = jobCount; m_stats.workerTime = workerTime; BLAST_PROFILE_ZONE_END("accumulate timers"); #endif BLAST_PROFILE_ZONE_BEGIN("job update"); for (auto& j : m_jobs) { if (j.m_newActorsCount) { TkFamilyImpl* fam = &j.m_tkActor->getFamilyImpl(); SharedMemory* mem = getSharedMemory(fam); // as LL is implemented, where newActorsCount the parent is always deleted removeActorInternal(*j.m_tkActor); mem->removeReference(); addActorsInternal(j.m_newActors, j.m_newActorsCount); mem->addReference(j.m_newActorsCount); // Update joints mem->m_events.protect(false); // allow allocations again BLAST_PROFILE_ZONE_BEGIN("updateJoints"); fam->updateJoints(j.m_tkActor, &mem->m_events); BLAST_PROFILE_ZONE_END("updateJoints"); } // virtually dequeue the actor // the queue itself is cleared right after this loop j.m_tkActor->m_flags.clear(TkActorFlag::PENDING); j.m_tkActor->m_groupJobIndex = invalidIndex<uint32_t>(); BLAST_PROFILE_ZONE_BEGIN("damageBuffer.clear"); j.m_tkActor->m_damageBuffer.clear(); BLAST_PROFILE_ZONE_END("damageBuffer.clear"); } m_jobs.clear(); BLAST_PROFILE_ZONE_END("job update"); BLAST_PROFILE_ZONE_BEGIN("event dispatch"); for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { BLAST_PROFILE_SCOPE_L("event dispatch"); TkFamilyImpl* family = it->first; SharedMemory* mem = it->second; NVBLAST_ASSERT(family != nullptr); NVBLAST_ASSERT(mem != nullptr && mem->isUsed()); // where no actor of a family has split, // its group/family event queue has not been // unprotected in the jobs loop above mem->m_events.protect(false); family->getQueue().dispatch(mem->m_events); mem->m_events.reset(); mem->reset(); } BLAST_PROFILE_ZONE_END("event dispatch"); BLAST_PROFILE_ZONE_BEGIN("event memory release"); for (auto& worker : m_workers) { worker.m_bondBuffer.clear(); worker.m_chunkBuffer.clear(); } BLAST_PROFILE_ZONE_END("event memory release"); } bool success = setProcessing(false); NVBLAST_ASSERT(success); return success; } return false; } bool TkGroupImpl::setProcessing(bool value) { bool expected = !value; return m_isProcessing.compare_exchange_strong(expected, value); } void TkGroupImpl::enqueue(TkActorImpl* tkActor) { NVBLAST_ASSERT(tkActor->getGroupImpl() != nullptr); NVBLAST_ASSERT(tkActor->getGroupImpl() == this); NVBLAST_ASSERT(isInvalidIndex(tkActor->m_groupJobIndex)); NVBLAST_ASSERT(isProcessing() == false); #if NV_DEBUG for (TkWorkerJob& j : m_jobs) { NVBLAST_ASSERT(j.m_tkActor != tkActor); } #endif tkActor->m_groupJobIndex = m_jobs.size(); TkWorkerJob& j = m_jobs.insert(); j.m_tkActor = tkActor; } TkGroupWorker* TkGroupImpl::acquireWorker() { BLAST_PROFILE_SCOPE_L("TkGroupImpl::acquireWorker"); std::unique_lock<std::mutex> lk(m_workerMtx); for (auto& worker:m_workers) { if (!worker.m_isBusy) { worker.m_isBusy = true; return &worker; } } return nullptr; } void TkGroupImpl::returnWorker(TkGroupWorker* worker) { BLAST_PROFILE_SCOPE_L("TkGroupImpl::returnWorker"); std::unique_lock<std::mutex> lk(m_workerMtx); auto w = static_cast<TkWorker*>(worker); NVBLAST_CHECK_WARNING(w->m_group == this, "TkGroup::returnWorker worker does not belong to this group.", return); w->m_isBusy = false; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTime.h" #include "NvBlastTkTaskImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkGroupImpl.h" using namespace Nv::Blast; void SharedMemory::allocate(TkFamilyImpl& tkFamily) { NVBLAST_ASSERT(m_refCount == 0); const NvBlastAsset* assetLL = tkFamily.getAsset()->getAssetLL(); // at most leafChunkCount actors can be created within a family // tasks will grab their portion out of these memory blocks uint32_t leafChunkCount = NvBlastAssetGetLeafChunkCount(assetLL, logLL); m_newActorBuffers.allocate(2 * leafChunkCount); // GWD-167 workaround (2*) m_newTkActorBuffers.allocate(leafChunkCount); } /** Creates a TkEvent::FractureCommand according to the input buffer for tkActor into events queue using the LocalBuffers to store the actual event data. */ NV_FORCE_INLINE void reportFractureCommands( const NvBlastFractureBuffers& buffer, LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer, TkEventQueue& events, const TkActorImpl* tkActor) { NvBlastBondFractureData* bdata = nullptr; if (buffer.bondFractureCount > 0) { bdata = bondBuffer.allocate(buffer.bondFractureCount); memcpy(bdata, buffer.bondFractures, sizeof(NvBlastBondFractureData)*buffer.bondFractureCount); } NvBlastChunkFractureData* cdata = nullptr; if (buffer.chunkFractureCount > 0) { cdata = chunkBuffer.allocate(buffer.chunkFractureCount); memcpy(cdata, buffer.chunkFractures, sizeof(NvBlastChunkFractureData)*buffer.chunkFractureCount); } TkFractureCommands* fevt = events.allocData<TkFractureCommands>(); fevt->tkActorData = *tkActor; fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata }; events.addEvent(fevt); } /** Creates a TkEvent::FractureEvent according to the input buffer for tkActor into events queue using the LocalBuffers to store the actual event data. */ NV_FORCE_INLINE void reportFractureEvents( const NvBlastFractureBuffers& buffer, LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer, TkEventQueue& events, const TkActorImpl* tkActor) { uint32_t result[4] = { 0,0,0,0 }; NvBlastBondFractureData* bdata = nullptr; if (buffer.bondFractureCount > 0) { bdata = bondBuffer.allocate(buffer.bondFractureCount); for (uint32_t b = 0; b < buffer.bondFractureCount; ++b) { bdata[b] = buffer.bondFractures[b]; result[buffer.bondFractures[b].health > 0 ? 0 : 1]++; } } NvBlastChunkFractureData* cdata = nullptr; if (buffer.chunkFractureCount > 0) { cdata = chunkBuffer.allocate(buffer.chunkFractureCount); for (uint32_t c = 0; c < buffer.chunkFractureCount; ++c) { cdata[c] = buffer.chunkFractures[c]; result[buffer.chunkFractures[c].health > 0 ? 2 : 3]++; } } TkFractureEvents* fevt = events.allocData<TkFractureEvents>(); fevt->tkActorData = *tkActor; fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata }; fevt->bondsDamaged = result[0]; fevt->bondsBroken = result[1]; fevt->chunksDamaged = result[2]; fevt->chunksBroken = result[3]; events.addEvent(fevt); } void TkWorker::initialize() { // temporary memory used to generate and apply fractures // it must fit for the largest family involved in the group that owns this worker NvBlastBondFractureData* bondFractureData = m_group->m_bondTempDataBlock.getBlock(m_id); uint32_t bondFractureCount = m_group->m_bondTempDataBlock.numElementsPerBlock(); NvBlastChunkFractureData* chunkFractureData = m_group->m_chunkTempDataBlock.getBlock(m_id); uint32_t chunkFractureCount = m_group->m_chunkTempDataBlock.numElementsPerBlock(); m_tempBuffer = { bondFractureCount, chunkFractureCount, bondFractureData, chunkFractureData }; // temporary memory used to split the actor // large enough for the largest family involved m_splitScratch = m_group->m_splitScratchBlock.getBlock(m_id); // to avoid unnecessary allocations, preallocated memory exists to fit all chunks and bonds taking damage once // where multiple damage occurs, more memory will be allocated on demand (this may thwart other threads doing the same) m_bondBuffer.initialize(m_group->m_bondEventDataBlock.getBlock(m_id), m_group->m_bondEventDataBlock.numElementsPerBlock()); m_chunkBuffer.initialize(m_group->m_chunkEventDataBlock.getBlock(m_id), m_group->m_chunkEventDataBlock.numElementsPerBlock()); #if NV_PROFILE NvBlastTimersReset(&m_stats.timers); m_stats.processedActorsCount = 0; #endif } void TkWorker::process(TkWorkerJob& j) { NvBlastTimers* timers = nullptr; BLAST_PROFILE_SCOPE_M("TkActor"); TkActorImpl* tkActor = j.m_tkActor; const uint32_t tkActorIndex = tkActor->getIndex(); NvBlastActor* actorLL = tkActor->getActorLLInternal(); TkFamilyImpl& family = tkActor->getFamilyImpl(); SharedMemory* mem = m_group->getSharedMemory(&family); TkEventQueue& events = mem->m_events; NVBLAST_ASSERT(tkActor->getGroupImpl() == m_group); NVBLAST_ASSERT(tkActor->m_flags.isSet(TkActorFlag::PENDING)); #if NV_PROFILE timers = &m_stats.timers; *timers += tkActor->m_timers; NvBlastTimersReset(&tkActor->m_timers); m_stats.processedActorsCount++; #endif // generate and apply fracture for all damage requested on this actor // and queue events accordingly for (const auto& damage : tkActor->m_damageBuffer) { NvBlastFractureBuffers commandBuffer = m_tempBuffer; BLAST_PROFILE_ZONE_BEGIN("Material"); NvBlastActorGenerateFracture(&commandBuffer, actorLL, damage.program, damage.programParams, logLL, timers); BLAST_PROFILE_ZONE_END("Material"); if (commandBuffer.chunkFractureCount > 0 || commandBuffer.bondFractureCount > 0) { BLAST_PROFILE_SCOPE_M("Fill Command Events"); reportFractureCommands(commandBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor); } NvBlastFractureBuffers eventBuffer = m_tempBuffer; BLAST_PROFILE_ZONE_BEGIN("Fracture"); NvBlastActorApplyFracture(&eventBuffer, actorLL, &commandBuffer, logLL, timers); BLAST_PROFILE_ZONE_END("Fracture"); if (eventBuffer.chunkFractureCount > 0 || eventBuffer.bondFractureCount > 0) { BLAST_PROFILE_SCOPE_M("Fill Fracture Events"); tkActor->m_flags |= (TkActorFlag::DAMAGED); reportFractureEvents(eventBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor); } } // split the actor, which could have been damaged directly though the TkActor's fracture functions // i.e. it did not have damage queued for the above loop NvBlastActorSplitEvent splitEvent = { nullptr, nullptr }; if (tkActor->isDamaged()) { BLAST_PROFILE_ZONE_BEGIN("Split Memory"); uint32_t maxActorCount = NvBlastActorGetMaxActorCountForSplit(actorLL, logLL); splitEvent.newActors = mem->reserveNewActors(maxActorCount); BLAST_PROFILE_ZONE_END("Split Memory"); BLAST_PROFILE_ZONE_BEGIN("Split"); j.m_newActorsCount = NvBlastActorSplit(&splitEvent, actorLL, maxActorCount, m_splitScratch, logLL, timers); BLAST_PROFILE_ZONE_END("Split"); tkActor->m_flags.clear(TkActorFlag::DAMAGED); } else { j.m_newActorsCount = 0; } // update the TkActor according to the LL split results and queue events accordingly if (j.m_newActorsCount > 0) { NVBLAST_ASSERT(splitEvent.deletedActor == tkActor->getActorLL()); BLAST_PROFILE_ZONE_BEGIN("memory new actors"); auto tkSplitEvent = events.allocData<TkSplitEvent>(); tkSplitEvent->children = mem->reserveNewTkActors(j.m_newActorsCount); tkSplitEvent->numChildren = j.m_newActorsCount; tkSplitEvent->parentData.family = &family; tkSplitEvent->parentData.userData = tkActor->userData; tkSplitEvent->parentData.index = tkActorIndex; family.removeActor(tkActor); BLAST_PROFILE_ZONE_END("memory new actors"); BLAST_PROFILE_ZONE_BEGIN("create new actors"); for (uint32_t i = 0; i < j.m_newActorsCount; ++i) { TkActorImpl* newActor = family.addActor(splitEvent.newActors[i]); tkSplitEvent->children[i] = newActor; } j.m_newActors = reinterpret_cast<TkActorImpl**>(tkSplitEvent->children); BLAST_PROFILE_ZONE_END("create new actors"); BLAST_PROFILE_ZONE_BEGIN("split event"); events.addEvent(tkSplitEvent); BLAST_PROFILE_ZONE_END("split event"); } j.m_tkActor->m_flags.clear(TkActorFlag::PENDING); } void TkWorker::process(uint32_t jobID) { TkWorkerJob& j = m_group->m_jobs[jobID]; process(j); }
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkActorImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKACTORIMPL_H #define NVBLASTTKACTORIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" #include "NvBlastIteratorBase.h" #include "NvBlastTkJointImpl.h" #include "NvBlast.h" #include "NvBlastTkActor.h" #include "NvFlags.h" namespace Nv { namespace Blast { // Forward declarations: class TkGroupImpl; class TkFamilyImpl; class TkAssetImpl; class TkJointImpl; /** Struct-enum for actor status flags, used in TkGroup processing. */ struct TkActorFlag { enum Enum { DAMAGED = (1 << 0), //!< The actor had fractures applied successfully and will take the split step. PENDING = (1 << 1), //!< The actor will be processed when its group executes, used to update job queues when moving group. }; }; /** Implementation of TkActor. */ class TkActorImpl : public TkActor { public: TkActorImpl(); ~TkActorImpl(); // Begin TkActor virtual const NvBlastActor* getActorLL() const override; virtual TkFamily& getFamily() const override; virtual uint32_t getIndex() const override; virtual TkGroup* getGroup() const override; virtual TkGroup* removeFromGroup() override; virtual const TkAsset* getAsset() const override; virtual uint32_t getVisibleChunkCount() const override; virtual uint32_t getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const override; virtual uint32_t getGraphNodeCount() const override; virtual uint32_t getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const override; virtual const float* getBondHealths() const override; virtual uint32_t getSplitMaxActorCount() const override; virtual void damage(const NvBlastDamageProgram& program, const void* programParams) override; virtual bool isPending() const override; virtual void generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const void* programParams) const override; virtual void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) override; virtual uint32_t getJointCount() const override; virtual uint32_t getJoints(TkJoint** joints, uint32_t jointsSize) const override; virtual bool hasExternalBonds() const override; // End TkActor // Begin TkObject virtual void release() override; // End TkObject // Public methods /** Factory create method. \param[in] desc Actor descriptor set by the user. \return a pointer to a new TkActorImpl object if successful, NULL otherwise. */ static TkActorImpl* create(const TkActorDesc& desc); /** TkActorImpl objects are created in an array within a TkFamilyImpl. Actors may become 'inactive' without their memory being freed. If inactive, the actor should be treated as if it has been released. \return the active status of this TkActorImpl. */ bool isActive() const; /** Utility to return the low-level family to which the low-level actor belongs. \return a pointer to the NvBlastFamily to which the low-level actor belongs. */ NvBlastFamily* getFamilyLL() const; /** Utility to access the TkFamily to which this actor belongs. \return a reference to the TkFamilyImpl to which this TkActorImpl belongs. */ TkFamilyImpl& getFamilyImpl() const; /** \return the index of this actor with its TkFamilyImpl. */ uint32_t getIndexInternal() const; /** Access to the group to which this actor belongs, if any. \return a pointer to the TkGroupImpl to which this TkActorImpl belongs, if any. If this actor is not in a group, this function returns NULL. */ TkGroupImpl* getGroupImpl() const; /** Access to the low-level actor associated with this TkActorImpl. \return a pointer to the NvBlastActor associated with this TkActorImpl. If this actor is inactive (see isActive), this function returns NULL. */ NvBlastActor* getActorLLInternal() const; /** \return the number of TkJointImpl objects that reference this actor. */ uint32_t getJointCountInternal() const; /** Joint iterator. Usage: Given a TkActorImpl a, for (TkActorImpl::JointIt i(a); (bool)i; ++i) { TkJointImpl* joint = (TkJointImpl*)i; // ... } */ class JointIt : public DList::It { public: /** Constructed from an actor. */ JointIt(const TkActorImpl& actor, Direction dir = Forward); /** Current joint. */ TkJointImpl* operator * () const; }; /** Implicit converter to TkActorData for events. */ operator Nv::Blast::TkActorData() const; private: /** Functions to raise or check 'damaged' state: this actor will take the split step. 'damaged' actors automatically become 'pending' also. */ void markAsDamaged(); bool isDamaged() const; /** Raise actor to 'pending' state: this actor will be processed when its group executes next. Enqueues the actor in its group's job list if a group is set. Otherwise the group will enqueue the actor when it is added. */ void makePending(); /** Functions to add or remove an internal reference to a joint. (Joints and actors mutually reference each other.) */ void addJoint(TkJointLink& jointLink); void removeJoint(TkJointLink& jointLink); struct DamageData { NvBlastDamageProgram program; const void* programParams; }; // Data NvBlastActor* m_actorLL; //!< The low-level actor associated with this actor TkFamilyImpl* m_family; //!< The TkFamilyImpl to which this actor belongs TkGroupImpl* m_group; //!< The TkGroupImpl (if any) to which this actor belongs uint32_t m_groupJobIndex; //!< The index of this actor's job within its group's job list nvidia::NvFlags<TkActorFlag::Enum, char> m_flags; //!< Status flags for this actor Array<DamageData>::type m_damageBuffer; //!< Buffered damage input uint32_t m_jointCount; //!< The number of joints referenced in m_jointList DList m_jointList; //!< A doubly-linked list of joint references //#if NV_PROFILE NvBlastTimers m_timers; //!< If profiling, each actor stores timing data //#endif friend class TkWorker; // m_damageBuffer and m_flags friend class TkGroupImpl; friend class TkFamilyImpl; friend class TkJointImpl; friend class TkFrameworkImpl; }; //////// TkActorImpl inline methods //////// NV_INLINE TkFamilyImpl& TkActorImpl::getFamilyImpl() const { NVBLAST_ASSERT(m_family != nullptr); return *m_family; } NV_INLINE uint32_t TkActorImpl::getIndexInternal() const { NVBLAST_ASSERT(isActive()); return NvBlastActorGetIndex(m_actorLL, logLL); } NV_INLINE NvBlastActor* TkActorImpl::getActorLLInternal() const { return m_actorLL; } NV_INLINE uint32_t TkActorImpl::getJointCountInternal() const { return m_jointCount; } NV_INLINE TkGroupImpl* TkActorImpl::getGroupImpl() const { return m_group; } NV_INLINE bool TkActorImpl::isActive() const { return m_actorLL != nullptr; } NV_INLINE bool TkActorImpl::isPending() const { return m_flags.isSet(TkActorFlag::PENDING); } NV_INLINE void TkActorImpl::addJoint(TkJointLink& jointLink) { NVBLAST_ASSERT(m_jointList.isSolitary(jointLink)); m_jointList.insertHead(jointLink); ++m_jointCount; } NV_INLINE void TkActorImpl::removeJoint(TkJointLink& jointLink) { NVBLAST_ASSERT(!m_jointList.isSolitary(jointLink)); NVBLAST_ASSERT(m_jointCount > 0); if (m_jointCount > 0) { --m_jointCount; m_jointList.remove(jointLink); } } //////// TkActorImpl::JointIt methods //////// NV_INLINE TkActorImpl::JointIt::JointIt(const TkActorImpl& actor, Direction dir) : DList::It(actor.m_jointList, dir) {} NV_INLINE TkJointImpl* TkActorImpl::JointIt::operator * () const { const DLink* link = (const DLink*)(*this); return reinterpret_cast<const TkJointLink*>(link)->m_joint; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKACTORIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkCommon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKCOMMON_H #define NVBLASTTKCOMMON_H #include "NvBlastGlobals.h" #include "NvBlastTkGUID.h" // Macro to define standard object classes. An intermediate class is defined which holds common implementations. #define NVBLASTTK_IMPL_DECLARE(_name) \ class Tk##_name##Type : public Tk##_name \ { \ public: \ /* Blank constructor generates a new NvBlastID and informs framework */ \ Tk##_name##Type() \ { \ memset(&m_ID, 0, sizeof(NvBlastID)); \ setID(TkGenerateGUID(this)); \ TkFrameworkImpl::get()->onCreate(*this); \ } \ \ /* This constructor takes an existing NvBlastID and informs framework */ \ Tk##_name##Type(const NvBlastID& id) \ { \ memset(&m_ID, 0, sizeof(NvBlastID)); \ setID(id); \ TkFrameworkImpl::get()->onCreate(*this); \ } \ \ /* Destructor informs framework */ \ ~Tk##_name##Type() { TkFrameworkImpl::get()->onDestroy(*this); } \ \ /* Begin TkIdentifiable */ \ virtual void setID(const NvBlastID& id) override \ { \ /* Inform framework of ID change */ \ TkFrameworkImpl::get()->onIDChange(*this, m_ID, id); \ m_ID = id; \ } \ virtual const NvBlastID& getID() const override { return getIDInternal(); } \ virtual const TkType& getType() const override { return s_type; } \ /* End TkIdentifiable */ \ \ /* Begin public API */ \ \ /* Inline method for internal access to NvBlastID */ \ const NvBlastID& getIDInternal() const { return m_ID; } \ \ /* End public API */ \ \ /* Static type information */ \ static TkTypeImpl s_type; \ \ private: \ NvBlastID m_ID; /* NvBlastID for a TkIdentifiable object */ \ }; \ \ /* Derive object implementation from common implementation class above */ \ class Tk##_name##Impl final : public Tk##_name##Type // Macro to declare standard object interfaces, enums, etc. #define NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE(_id0, _id1, _id2, _id3) \ /* Begin TkObject */ \ virtual void release() override; \ /* End TkObject */ \ \ /* Enums */ \ \ /* Generate a ClassID enum used to identify this TkIdentifiable. */ \ enum { ClassID = NVBLAST_FOURCC(_id0, _id1, _id2, _id3) } // Macro to define class type data #define NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(_name) \ TkTypeImpl Tk##_name##Type::s_type("Tk" #_name, Tk##_name##Impl::ClassID, 0) #endif // ifndef NVBLASTTKCOMMON_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkAssetImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlast.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Asset); //////// Member functions //////// TkAssetImpl::TkAssetImpl() : m_assetLL(nullptr), m_ownsAsset(false) { } TkAssetImpl::TkAssetImpl(const NvBlastID& id) : TkAssetType(id), m_assetLL(nullptr), m_ownsAsset(false) { } TkAssetImpl::~TkAssetImpl() { if (m_assetLL != nullptr && m_ownsAsset) { NVBLAST_FREE(m_assetLL); } } const NvBlastAsset* TkAssetImpl::getAssetLL() const { return getAssetLLInternal(); } uint32_t TkAssetImpl::getChunkCount() const { return NvBlastAssetGetChunkCount(m_assetLL, logLL); } uint32_t TkAssetImpl::getLeafChunkCount() const { return NvBlastAssetGetLeafChunkCount(m_assetLL, logLL); } uint32_t TkAssetImpl::getBondCount() const { return NvBlastAssetGetBondCount(m_assetLL, logLL); } const NvBlastChunk* TkAssetImpl::getChunks() const { return NvBlastAssetGetChunks(m_assetLL, logLL); } const NvBlastBond* TkAssetImpl::getBonds() const { return NvBlastAssetGetBonds(m_assetLL, logLL); } const NvBlastSupportGraph TkAssetImpl::getGraph() const { return NvBlastAssetGetSupportGraph(m_assetLL, logLL); } uint32_t TkAssetImpl::getDataSize() const { return NvBlastAssetGetSize(m_assetLL, logLL); } uint32_t TkAssetImpl::getJointDescCount() const { return getJointDescCountInternal(); } const TkAssetJointDesc* TkAssetImpl::getJointDescs() const { return getJointDescsInternal(); } void TkAssetImpl::release() { const TkType& tkType = TkFamilyImpl::s_type; const uint32_t num = TkFrameworkImpl::get()->getObjectCount(tkType); if (num) { Array<TkIdentifiable*>::type dependents(num); TkFrameworkImpl::get()->getObjects(dependents.begin(), dependents.size(), tkType); for (TkObject* o : dependents) { TkFamilyImpl* f = static_cast<TkFamilyImpl*>(o); if (f->getAssetImpl() == this) { f->release(); } } } NVBLAST_DELETE(this, TkAssetImpl); } //////// Static functions //////// TkAssetImpl* TkAssetImpl::create(const TkAssetDesc& desc) { TkAssetImpl* asset = NVBLAST_NEW(TkAssetImpl); Array<char>::type scratch((uint32_t)NvBlastGetRequiredScratchForCreateAsset(&desc, logLL)); void* mem = NVBLAST_ALLOC_NAMED(NvBlastGetAssetMemorySize(&desc, logLL), "TkAssetImpl::create"); asset->m_assetLL = NvBlastCreateAsset(mem, &desc, scratch.begin(), logLL); if (asset->m_assetLL == nullptr) { NVBLAST_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created."); asset->release(); return nullptr; } if (desc.bondFlags != nullptr) { for (uint32_t bondN = 0; bondN < desc.bondCount; ++bondN) { if (0 != (desc.bondFlags[bondN] & TkAssetDesc::BondJointed)) { const NvBlastBondDesc& bondDesc = desc.bondDescs[bondN]; const uint32_t c0 = bondDesc.chunkIndices[0]; const uint32_t c1 = bondDesc.chunkIndices[1]; if (c0 >= desc.chunkCount || c1 >= desc.chunkCount) { NVBLAST_LOG_WARNING("TkAssetImpl::create: joint flag set for badly described bond. No joint descriptor created."); continue; } if (!asset->addJointDesc(c0, c1)) { NVBLAST_LOG_WARNING("TkAssetImpl::create: no bond corresponds to the user-described bond indices. No joint descriptor created."); } } } } asset->m_ownsAsset = true; // asset->setID(NvBlastAssetGetID(asset->m_assetLL, logLL)); // Keeping LL and Tk IDs distinct return asset; } TkAssetImpl* TkAssetImpl::create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset) { TkAssetImpl* asset = NVBLAST_NEW(TkAssetImpl); //NOTE: Why are we passing in a const NvBlastAsset* and then discarding the const? asset->m_assetLL = const_cast<NvBlastAsset*>(assetLL); if (asset->m_assetLL == nullptr) { NVBLAST_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created."); asset->release(); return nullptr; } asset->m_ownsAsset = ownsAsset; asset->setID(NvBlastAssetGetID(asset->m_assetLL, logLL)); asset->m_jointDescs.resize(jointDescCount); for (uint32_t i = 0; i < asset->m_jointDescs.size(); ++i) { asset->m_jointDescs[i] = jointDescs[i]; } return asset; } bool TkAssetImpl::addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1) { if (m_assetLL == nullptr) { return false; } const uint32_t upperSupportChunkCount = NvBlastAssetGetFirstSubsupportChunkIndex(m_assetLL, logLL); if (chunkIndex0 >= upperSupportChunkCount || chunkIndex1 >= upperSupportChunkCount) { return false; } const uint32_t* chunkToGraphNodeMap = NvBlastAssetGetChunkToGraphNodeMap(m_assetLL, logLL); const uint32_t node0 = chunkToGraphNodeMap[chunkIndex0]; const uint32_t node1 = chunkToGraphNodeMap[chunkIndex1]; const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(m_assetLL, logLL); if (node0 >= graph.nodeCount || node1 >= graph.nodeCount) { return false; } // Find bond index // Iterate through all neighbors of node0 chunk uint32_t bondIndex = 0xFFFFFFFF; for (uint32_t i = graph.adjacencyPartition[node0]; i < graph.adjacencyPartition[node0 + 1]; i++) { if (graph.adjacentNodeIndices[i] == node1) { bondIndex = graph.adjacentBondIndices[i]; break; } } if (bondIndex >= NvBlastAssetGetBondCount(m_assetLL, logLL)) { return false; } const NvBlastBond& bond = NvBlastAssetGetBonds(m_assetLL, logLL)[bondIndex]; TkAssetJointDesc jointDesc; jointDesc.attachPositions[0] = jointDesc.attachPositions[1] = nvidia::NvVec3(bond.centroid[0], bond.centroid[1], bond.centroid[2]); jointDesc.nodeIndices[0] = node0; jointDesc.nodeIndices[1] = node1; m_jointDescs.pushBack(jointDesc); return true; } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTypeImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTYPEIMPL_H #define NVBLASTTKTYPEIMPL_H #include "NvPreprocessor.h" #include "NvBlastTkType.h" namespace Nv { namespace Blast { /** Implementation of TkType, storing class information for TkIdentifiable-derived classes. */ class TkTypeImpl : public TkType { public: TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version); // Begin TkType virtual const char* getName() const override { return getNameInternal(); } virtual uint32_t getVersion() const override { return getVersionInternal(); } // End TkType // Public methods /** Access to the class name. \return a C string pointer to the class name. */ const char* getNameInternal() const; /** Access to the data format version for the class. \return the data format version. */ uint32_t getVersionInternal() const; /** Access to a unique identifier for the class (set using the NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE macro). \return the class's unique identifier. */ uint32_t getID() const; /** Access to a runtime-unique small index for the class. \return the index for the class. */ uint32_t getIndex() const; /** \return whether or not the index has been set (see setIndex) to a valid value. */ bool indexIsValid() const; private: enum { InvalidIndex = 0xFFFFFFFF }; /** Sets the type index. \param[in] index The index to set. */ void setIndex(uint32_t index); const char* m_name; //!< The name of the class, set by the constructor. uint32_t m_ID; //!< The unique identifier for the class, set by the constructor. uint32_t m_version; //!< The data format version for the class, set by the constructor. uint32_t m_index; //!< The index set for this class, set using setIndex(). friend class TkFrameworkImpl; }; //////// TkTypeImpl inline methods //////// NV_INLINE TkTypeImpl::TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version) : m_name(typeName) , m_ID(typeID) , m_version(version) , m_index((uint32_t)InvalidIndex) { } NV_INLINE const char* TkTypeImpl::getNameInternal() const { return m_name; } NV_INLINE uint32_t TkTypeImpl::getVersionInternal() const { return m_version; } NV_INLINE uint32_t TkTypeImpl::getID() const { return m_ID; } NV_INLINE uint32_t TkTypeImpl::getIndex() const { return m_index; } NV_INLINE bool TkTypeImpl::indexIsValid() const { return m_index != (uint32_t)InvalidIndex; } NV_INLINE void TkTypeImpl::setIndex(uint32_t index) { m_index = index; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKTYPEIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFrameworkImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKFRAMEWORKIMPL_H #define NVBLASTTKFRAMEWORKIMPL_H #include "NvBlastTkFramework.h" #include "NvBlastInternalProfiler.h" #include "NvBlastTkCommon.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastHashSet.h" namespace Nv { namespace Blast { // Forward declarations class TkTypeImpl; class TkJointImpl; /** Implementation of TkFramework */ class TkFrameworkImpl : public TkFramework { public: TkFrameworkImpl(); ~TkFrameworkImpl(); // Begin TkFramework virtual void release() override; virtual const TkType* getType(TkTypeIndex::Enum typeIndex) const override; virtual TkIdentifiable* findObjectByID(const NvBlastID& id) const override; virtual uint32_t getObjectCount(const TkType& type) const override; virtual uint32_t getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart = 0) const override; virtual bool reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap = nullptr, bool keepBondNormalChunkOrder = false) const override; virtual bool ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const override; virtual TkAsset* createAsset(const TkAssetDesc& desc) override; virtual TkAsset* createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false) override; virtual TkGroup* createGroup(const TkGroupDesc& desc) override; virtual TkActor* createActor(const TkActorDesc& desc) override; virtual TkJoint* createJoint(const TkJointDesc& desc) override; // End TkFramework // Public methods /** To be called by any TkIdentifiable object when it is created, so the framework can track it. */ void onCreate(TkIdentifiable& object); /** To be called by any TkIdentifiable object when it is deleted, so the framework can stop tracking it. */ void onDestroy(TkIdentifiable& object); /** Special onCreate method for joints, since they are not TkIdentifiable. */ void onCreate(TkJointImpl& joint); /** Special onDestroy method for joints, since they are not TkIdentifiable. */ void onDestroy(TkJointImpl& joint); /** Must be called whenever a TkIdentifiable object's ID is changed, so that the framework can associate the new ID with it. */ void onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr); /** Internal (non-virtual) method to find a TkIdentifiable object based upon its NvBlastID. */ TkIdentifiable* findObjectByIDInternal(const NvBlastID& id) const; // Access to singleton /** Retrieve the global singleton. */ static TkFrameworkImpl* get(); /** Set the global singleton, if it's not already set, or set it to NULL. Returns true iff successful. */ static bool set(TkFrameworkImpl* framework); private: // Enums enum { ClassID = NVBLAST_FOURCC('T', 'K', 'F', 'W') }; //!< TkFramework identifier token, used in serialization // Static data static TkFrameworkImpl* s_framework; //!< Global (singleton) object pointer // Types InlineArray<const TkTypeImpl*, TkTypeIndex::TypeCount>::type m_types; //!< TkIdentifiable static type data HashMap<uint32_t, uint32_t>::type m_typeIDToIndex; //!< Map to type data keyed by ClassID // Objects and object names HashMap<NvBlastID, TkIdentifiable*>::type m_IDToObject; //!< Map to all TkIdentifiable objects, keyed by NvBlastID InlineArray<Array<TkIdentifiable*>::type, TkTypeIndex::TypeCount>::type m_objects; //!< Catalog of all TkIdentifiable objects, grouped by type. (Revisit implementation.) // Track external joints (to do: make this a pool) HashSet<TkJointImpl*>::type m_joints; //!< All internal joints }; //////// TkFrameworkImpl inline methods //////// NV_INLINE TkIdentifiable* TkFrameworkImpl::findObjectByIDInternal(const NvBlastID& id) const { const auto entry = m_IDToObject.find(id); if (entry == nullptr) { return nullptr; } return entry->second; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKFRAMEWORKIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskManager.cpp
// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "NvTask.h" #include "NvTaskDefine.h" #include "NvCpuDispatcher.h" #include "NvGpuDispatcher.h" #include "NvErrorCallback.h" #include "NvBlastGlobals.h" #include "NvBlastAssert.h" #include "NvBlastAtomic.h" #include "NvBlastAllocator.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include <mutex> using namespace nvidia; using namespace nvidia::task; namespace Nv { namespace Blast { class MutexScopedLock { std::mutex& mMutex; NV_NOCOPY(MutexScopedLock) public: NV_INLINE MutexScopedLock(std::mutex& mutex) : mMutex(mutex) { mMutex.lock(); } NV_INLINE ~MutexScopedLock() { mMutex.unlock(); } }; #define LOCK() MutexScopedLock __lock__(mMutex) constexpr int EOL = -1; typedef HashMap<const char *, NvTaskID>::type NvBlastTkTaskNameToIDMap; struct NvBlastTkTaskDepTableRow { NvTaskID mTaskID; int mNextDep; }; typedef Array<NvBlastTkTaskDepTableRow>::type NvBlastTkTaskDepTable; struct NvTaskAccess : public NvTask { void setTaskID(NvTaskID taskID) { mTaskID = taskID; } void setTm(NvTaskManager* tm) { mTm = tm; } }; NvTaskAccess& ACCESS(NvTask& task) { return reinterpret_cast<NvTaskAccess&>(task); } NvTaskAccess* ACCESS(NvTask* task) { return reinterpret_cast<NvTaskAccess*>(task); } struct NvLightCpuTaskAccess : public NvLightCpuTask { bool atomicIncrementRefCount() { return Nv::Blast::atomicIncrement(&mRefCount); } bool atomicDecrementRefCount() { return Nv::Blast::atomicDecrement(&mRefCount); } }; NvLightCpuTaskAccess& ACCESS(NvLightCpuTask& task) { return reinterpret_cast<NvLightCpuTaskAccess&>(task); } class NvBlastTkTaskTableRow { public: NvBlastTkTaskTableRow() : mRefCount( 1 ), mStartDep(EOL), mLastDep(EOL) {} void addDependency( NvBlastTkTaskDepTable& depTable, NvTaskID taskID ) { int newDep = int(depTable.size()); NvBlastTkTaskDepTableRow row; row.mTaskID = taskID; row.mNextDep = EOL; depTable.pushBack( row ); if( mLastDep == EOL ) { mStartDep = mLastDep = newDep; } else { depTable[ uint32_t(mLastDep) ].mNextDep = newDep; mLastDep = newDep; } } NvTask * mTask; volatile int mRefCount; NvTaskType::Enum mType; int mStartDep; int mLastDep; }; typedef Array<NvBlastTkTaskTableRow>::type NvTaskTable; /* Implementation of NvTaskManager abstract API */ class NvBlastTkTaskManager : public NvTaskManager { NV_NOCOPY(NvBlastTkTaskManager) public: NvBlastTkTaskManager(NvErrorCallback& , NvCpuDispatcher*, NvGpuDispatcher*); ~NvBlastTkTaskManager(); void setCpuDispatcher( NvCpuDispatcher& ref ) { mCpuDispatcher = &ref; } NvCpuDispatcher* getCpuDispatcher() const { return mCpuDispatcher; } void setGpuDispatcher( NvGpuDispatcher& ref ) { mGpuDispatcher = &ref; } NvGpuDispatcher* getGpuDispatcher() const { return mGpuDispatcher; } void resetDependencies(); void startSimulation(); void stopSimulation(); void taskCompleted( NvTask& task ); NvTaskID getNamedTask( const char *name ); NvTaskID submitNamedTask( NvTask *task, const char *name, NvTaskType::Enum type = NvTaskType::TT_CPU ); NvTaskID submitUnnamedTask( NvTask& task, NvTaskType::Enum type = NvTaskType::TT_CPU ); NvTask* getTaskFromID( NvTaskID ); bool dispatchTask( NvTaskID taskID, bool gpuGroupStart ); bool resolveRow( NvTaskID taskID, bool gpuGroupStart ); void release(); void finishBefore( NvTask& task, NvTaskID taskID ); void startAfter( NvTask& task, NvTaskID taskID ); void addReference( NvTaskID taskID ); void decrReference( NvTaskID taskID ); int32_t getReference( NvTaskID taskID ) const; void decrReference( NvLightCpuTask& lighttask ); void addReference( NvLightCpuTask& lighttask ); void emitStartEvent(NvBaseTask& basetask, uint32_t threadId); void emitStopEvent(NvBaseTask& basetask, uint32_t threadId); NvErrorCallback& mErrorCallback; NvCpuDispatcher* mCpuDispatcher; NvGpuDispatcher* mGpuDispatcher; NvBlastTkTaskNameToIDMap mName2IDmap; volatile int mPendingTasks; std::mutex mMutex; NvBlastTkTaskDepTable mDepTable; NvTaskTable mTaskTable; Array<NvTaskID>::type mStartDispatch; }; NvBlastTkTaskManager::NvBlastTkTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* cpuDispatcher, NvGpuDispatcher* gpuDispatcher) : mErrorCallback (errorCallback) , mCpuDispatcher( cpuDispatcher ) , mGpuDispatcher( gpuDispatcher ) , mPendingTasks( 0 ) , mDepTable(NV_DEBUG_EXP("NvBlastTkTaskDepTable")) , mTaskTable(NV_DEBUG_EXP("NvTaskTable")) , mStartDispatch(NV_DEBUG_EXP("StartDispatch")) { } NvBlastTkTaskManager::~NvBlastTkTaskManager() { } void NvBlastTkTaskManager::release() { NVBLAST_DELETE(this, NvBlastTkTaskManager); } void NvBlastTkTaskManager::decrReference(NvLightCpuTask& lighttask) { /* This does not need a lock! */ if (!ACCESS(lighttask).atomicDecrementRefCount()) { NVBLAST_ASSERT(mCpuDispatcher); if (mCpuDispatcher) { mCpuDispatcher->submitTask(lighttask); } else { lighttask.release(); } } } void NvBlastTkTaskManager::addReference(NvLightCpuTask& lighttask) { /* This does not need a lock! */ ACCESS(lighttask).atomicIncrementRefCount(); } void NvBlastTkTaskManager::emitStartEvent(NvBaseTask& basetask, uint32_t threadId) { NvBaseTask* tmp = &basetask; NV_UNUSED(tmp); NV_UNUSED(threadId); /* This does not need a lock! */ #if NV_SUPPORT_NVTASK_PROFILING && NV_PROFILE //NV_COMPILE_TIME_ASSERT(sizeof(NvProfileEventId::mEventId) == sizeof(NvBaseTask::mEventID)); if (NvBlastGlobalGetProfilerCallback()) NvBlastGlobalGetProfilerCallback()->zoneStart(basetask.getName(), true, 0); #endif } void NvBlastTkTaskManager::emitStopEvent(NvBaseTask& basetask, uint32_t threadId) { NvBaseTask* tmp = &basetask; NV_UNUSED(tmp); NV_UNUSED(threadId); /* This does not need a lock! */ if (NvBlastGlobalGetProfilerCallback()) NvBlastGlobalGetProfilerCallback()->zoneEnd(nullptr, basetask.getName(), true, 0); #if NV_SUPPORT_NVTASK_PROFILING && NV_PROFILE //NV_COMPILE_TIME_ASSERT(sizeof(NvProfileEventId::mEventId) == sizeof(NvBaseTask::mEventID)); #endif } /* * Called by the owner (Scene) at the start of every frame, before * asking for tasks to be submitted. */ void NvBlastTkTaskManager::resetDependencies() { NVBLAST_ASSERT( !mPendingTasks ); // only valid if you don't resubmit named tasks, this is true for the SDK NVBLAST_ASSERT( mCpuDispatcher ); mTaskTable.clear(); mDepTable.clear(); mName2IDmap.clear(); mPendingTasks = 0; } /* * Called by the owner (Scene) to start simulating the task graph. * Dispatch all tasks with refCount == 1 */ void NvBlastTkTaskManager::startSimulation() { NVBLAST_ASSERT( mCpuDispatcher ); if( mGpuDispatcher ) { mGpuDispatcher->startSimulation(); } /* Handle empty task graph */ if( mPendingTasks == 0 ) { return; } bool gpuDispatch = false; for( NvTaskID i = 0 ; i < mTaskTable.size() ; i++ ) { if( mTaskTable[ i ].mType == NvTaskType::TT_COMPLETED ) { continue; } if( !Nv::Blast::atomicDecrement( &mTaskTable[ i ].mRefCount ) ) { mStartDispatch.pushBack(i); } } for( uint32_t i=0; i<mStartDispatch.size(); ++i) { gpuDispatch |= dispatchTask( mStartDispatch[i], gpuDispatch ); } //mStartDispatch.resize(0); mStartDispatch.forceSize_Unsafe(0); if( mGpuDispatcher && gpuDispatch ) { mGpuDispatcher->finishGroup(); } } void NvBlastTkTaskManager::stopSimulation() { if( mGpuDispatcher ) { mGpuDispatcher->stopSimulation(); } } NvTaskID NvBlastTkTaskManager::getNamedTask( const char *name ) { const NvBlastTkTaskNameToIDMap::Entry *ret; { LOCK(); ret = mName2IDmap.find( name ); } if( ret ) { return ret->second; } else { // create named entry in task table, without a task return submitNamedTask( NULL, name, NvTaskType::TT_NOT_PRESENT ); } } NvTask* NvBlastTkTaskManager::getTaskFromID( NvTaskID id ) { LOCK(); // todo: reader lock necessary? return mTaskTable[ id ].mTask; } /* If called at runtime, must be thread-safe */ NvTaskID NvBlastTkTaskManager::submitNamedTask( NvTask *task, const char *name, NvTaskType::Enum type ) { if( task ) { ACCESS(task)->setTm(this); task->submitted(); } LOCK(); const NvBlastTkTaskNameToIDMap::Entry *ret = mName2IDmap.find( name ); if( ret ) { NvTaskID prereg = ret->second; if( task ) { /* name was registered for us by a dependent task */ NVBLAST_ASSERT( !mTaskTable[ prereg ].mTask ); NVBLAST_ASSERT( mTaskTable[ prereg ].mType == NvTaskType::TT_NOT_PRESENT ); mTaskTable[ prereg ].mTask = task; mTaskTable[ prereg ].mType = type; ACCESS(task)->setTaskID(prereg); } return prereg; } else { Nv::Blast::atomicIncrement(&mPendingTasks); NvTaskID id = static_cast<NvTaskID>(mTaskTable.size()); mName2IDmap[ name ] = id; if( task ) { ACCESS(task)->setTaskID(id); } NvBlastTkTaskTableRow r; r.mTask = task; r.mType = type; mTaskTable.pushBack(r); return id; } } /* * Add an unnamed task to the task table */ NvTaskID NvBlastTkTaskManager::submitUnnamedTask( NvTask& task, NvTaskType::Enum type ) { Nv::Blast::atomicIncrement(&mPendingTasks); ACCESS(task).setTm(this); task.submitted(); LOCK(); ACCESS(task).setTaskID(static_cast<NvTaskID>(mTaskTable.size())); NvBlastTkTaskTableRow r; r.mTask = &task; r.mType = type; mTaskTable.pushBack(r); return task.getTaskID(); } /* Called by worker threads (or cooperating application threads) when a * NvTask has completed. Propogate depdenencies, decrementing all * referenced tasks' refCounts. If any of those reach zero, activate * those tasks. */ void NvBlastTkTaskManager::taskCompleted( NvTask& task ) { LOCK(); if( resolveRow( task.getTaskID(), false ) ) { mGpuDispatcher->finishGroup(); } } /* ================== Private Functions ======================= */ /* * Add a dependency to force 'task' to complete before the * referenced 'taskID' is allowed to be dispatched. */ void NvBlastTkTaskManager::finishBefore( NvTask& task, NvTaskID taskID ) { LOCK(); NVBLAST_ASSERT( mTaskTable[ taskID ].mType != NvTaskType::TT_COMPLETED ); mTaskTable[ task.getTaskID() ].addDependency( mDepTable, taskID ); Nv::Blast::atomicIncrement( &mTaskTable[ taskID ].mRefCount ); } /* * Add a dependency to force 'task' to wait for the referenced 'taskID' * to complete before it is allowed to be dispatched. */ void NvBlastTkTaskManager::startAfter( NvTask& task, NvTaskID taskID ) { LOCK(); NVBLAST_ASSERT( mTaskTable[ taskID ].mType != NvTaskType::TT_COMPLETED ); mTaskTable[ taskID ].addDependency( mDepTable, task.getTaskID() ); Nv::Blast::atomicIncrement( &mTaskTable[ task.getTaskID() ].mRefCount ); } void NvBlastTkTaskManager::addReference( NvTaskID taskID ) { LOCK(); Nv::Blast::atomicIncrement( &mTaskTable[ taskID ].mRefCount ); } /* * Remove one reference count from a task. Must be done here to make it thread safe. */ void NvBlastTkTaskManager::decrReference( NvTaskID taskID ) { LOCK(); if( !Nv::Blast::atomicDecrement( &mTaskTable[ taskID ].mRefCount ) ) { if( dispatchTask( taskID, false ) ) { mGpuDispatcher->finishGroup(); } } } int32_t NvBlastTkTaskManager::getReference(NvTaskID taskID) const { return mTaskTable[ taskID ].mRefCount; } /* * A task has completed, decrement all dependencies and submit tasks * that are ready to run. Signal simulation end if ther are no more * pending tasks. */ bool NvBlastTkTaskManager::resolveRow( NvTaskID taskID, bool gpuGroupStart ) { int depRow = mTaskTable[ taskID ].mStartDep; while( depRow != EOL ) { NvBlastTkTaskDepTableRow& row = mDepTable[ uint32_t(depRow) ]; NvBlastTkTaskTableRow& dtt = mTaskTable[ row.mTaskID ]; if( !Nv::Blast::atomicDecrement( &dtt.mRefCount ) ) { gpuGroupStart |= dispatchTask( row.mTaskID, gpuGroupStart ); } depRow = row.mNextDep; } Nv::Blast::atomicDecrement( &mPendingTasks ); return gpuGroupStart; } /* * Submit a ready task to its appropriate dispatcher. */ bool NvBlastTkTaskManager::dispatchTask( NvTaskID taskID, bool gpuGroupStart ) { LOCK(); // todo: reader lock necessary? NvBlastTkTaskTableRow& tt = mTaskTable[ taskID ]; // prevent re-submission if( tt.mType == NvTaskType::TT_COMPLETED ) { mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "NvTask dispatched twice", __FILE__, __LINE__); return false; } switch ( tt.mType ) { case NvTaskType::TT_CPU: mCpuDispatcher->submitTask( *tt.mTask ); break; case NvTaskType::TT_GPU: #if NV_WINDOWS_FAMILY if( mGpuDispatcher ) { if( !gpuGroupStart ) { mGpuDispatcher->startGroup(); } mGpuDispatcher->submitTask( *tt.mTask ); gpuGroupStart = true; } else #endif { mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "No GPU dispatcher", __FILE__, __LINE__); } break; case NvTaskType::TT_NOT_PRESENT: /* No task registered with this taskID, resolve its dependencies */ NVBLAST_ASSERT(!tt.mTask); gpuGroupStart |= resolveRow( taskID, gpuGroupStart ); break; case NvTaskType::TT_COMPLETED: default: mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "Unknown task type", __FILE__, __LINE__); gpuGroupStart |= resolveRow( taskID, gpuGroupStart ); break; } tt.mType = NvTaskType::TT_COMPLETED; return gpuGroupStart; } } // namespace Blast } // namespace Nv // Implement NvTaskManager factory namespace nvidia { namespace task { NvTaskManager* NvTaskManager::createTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* cpuDispatcher, NvGpuDispatcher* gpuDispatcher) { return NVBLAST_NEW(Nv::Blast::NvBlastTkTaskManager)(errorCallback, cpuDispatcher, gpuDispatcher); } } }
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGroupImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKGROUPIMPL_H #define NVBLASTTKGROUPIMPL_H #include "NvBlastTkTaskImpl.h" #include "NvBlastTkGroup.h" #include "NvBlastTkTypeImpl.h" namespace Nv { namespace Blast { class TkActorImpl; class TkFamilyImpl; NVBLASTTK_IMPL_DECLARE(Group) { ~TkGroupImpl(); public: TkGroupImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('G', 'R', 'P', '\0'); static TkGroupImpl* create(const TkGroupDesc& desc); // Begin TkGroup virtual bool addActor(TkActor& actor) override; virtual uint32_t getActorCount() const override; virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override; virtual uint32_t startProcess() override; virtual bool endProcess() override; virtual void getStats(TkGroupStats& stats) const override; virtual void setWorkerCount(uint32_t workerCount) override; virtual uint32_t getWorkerCount() const override; virtual TkGroupWorker* acquireWorker() override; virtual void returnWorker(TkGroupWorker*) override; // End TkGroup // TkGroupImpl API /** Remove the actor from this group if the actor actually belongs to it and the group is not processing. \param[in] actor The TkActor to remove. \return true if removing succeeded, false otherwise */ bool removeActor(TkActor& actor); /** Add the actor to this group's job queue. It is the caller's responsibility to add an actor only once. This condition is checked in debug builds. */ void enqueue(TkActorImpl* tkActor); /** Atomically check if this group is processing actors. @see setProcessing() \return true between startProcess() and endProcess() calls, false otherwise */ bool isProcessing() const; private: /** Atomically set the processing state. This function checks for the current state before changing it. @see isProcessing() \param[in] value the value of the new state \return true if the new state could be set, false otherwise */ bool setProcessing(bool value); /** Get the group-family shared memory for the specified family. To be used when the memory is expected to already exist. */ SharedMemory* getSharedMemory(TkFamilyImpl* family); void releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem); // functions to add/remove actors _without_ group-family memory management void addActorInternal(TkActorImpl& tkActor); void addActorsInternal(TkActorImpl** actors, uint32_t numActors); void removeActorInternal(TkActorImpl& tkActor); uint32_t m_actorCount; //!< number of actors in this group HashMap<TkFamilyImpl*, SharedMemory*>::type m_sharedMemory; //!< memory sharable by actors in the same family in this group // it is assumed no more than the asset's number of bond and chunks fracture commands are produced SharedBlock<NvBlastChunkFractureData> m_chunkTempDataBlock; //!< chunk data for damage/fracture SharedBlock<NvBlastBondFractureData> m_bondTempDataBlock; //!< bond data for damage/fracture SharedBlock<NvBlastChunkFractureData> m_chunkEventDataBlock; //!< initial memory block for event data SharedBlock<NvBlastBondFractureData> m_bondEventDataBlock; //!< initial memory block for event data SharedBlock<char> m_splitScratchBlock; //!< split scratch memory std::atomic<bool> m_isProcessing; //!< true while workers are processing Array<TkWorker>::type m_workers; //!< this group's workers Array<TkWorkerJob>::type m_jobs; //!< this group's process jobs //#if NV_PROFILE TkGroupStats m_stats; //!< accumulated group's worker stats //#endif std::mutex m_workerMtx; friend class TkWorker; }; NV_INLINE bool TkGroupImpl::isProcessing() const { return m_isProcessing.load(); } NV_INLINE void TkGroupImpl::getStats(TkGroupStats& stats) const { #if NV_PROFILE memcpy(&stats, &m_stats, sizeof(TkGroupStats)); #else NV_UNUSED(stats); #endif } NV_INLINE uint32_t TkGroupImpl::getActorCount() const { return m_actorCount; } NV_INLINE SharedMemory* TkGroupImpl::getSharedMemory(TkFamilyImpl* family) { SharedMemory* mem = m_sharedMemory[family]; NVBLAST_ASSERT(mem != nullptr); return mem; } NV_FORCE_INLINE void operator +=(NvBlastTimers& lhs, const NvBlastTimers& rhs) { lhs.material += rhs.material; lhs.fracture += rhs.fracture; lhs.island += rhs.fracture; lhs.partition += rhs.partition; lhs.visibility += rhs.visibility; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKGROUPIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFrameworkImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAssert.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastGlobals.h" #include <algorithm> using namespace nvidia; using namespace nvidia::shdfnd; NV_INLINE bool operator < (const NvBlastID& id1, const NvBlastID& id2) { return memcmp(&id1, &id2, sizeof(NvBlastID)) < 0; } namespace Nv { namespace Blast { //////// Local definitions //////// // Map type ID to static type data #define NVBLASTTK_REGISTER_TYPE(_name) \ if (!Tk##_name##Impl::s_type.indexIsValid()) \ { \ Tk##_name##Impl::s_type.setIndex(TkTypeIndex::_name); \ } \ m_types[TkTypeIndex::_name] = &Tk##_name##Impl::s_type; \ m_typeIDToIndex[Tk##_name##Impl::s_type.getID()] = TkTypeIndex::_name #define NVBLASTTK_RELEASE_TYPE(_name) \ { \ TkTypeImpl& type = Tk##_name##Impl::s_type; \ auto& toRelease = m_objects[type.getIndex()]; \ for (TkObject* obj : toRelease) \ { \ obj->release(); \ } \ } //////// TkFrameworkImpl static variables //////// TkFrameworkImpl* TkFrameworkImpl::s_framework = nullptr; //////// TkFrameworkImpl static function //////// TkFrameworkImpl* TkFrameworkImpl::get() { return s_framework; } bool TkFrameworkImpl::set(TkFrameworkImpl* framework) { if (s_framework != nullptr) { if (framework != nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::set: framework already set. Pass NULL to this function to destroy framework."); return false; } NVBLAST_DELETE(s_framework, TkFrameworkImpl); } s_framework = framework; return true; } //////// TkFrameworkImpl methods //////// TkFrameworkImpl::TkFrameworkImpl() : TkFramework() { // Register types m_types.resize(TkTypeIndex::TypeCount); m_objects.resize(TkTypeIndex::TypeCount); NVBLASTTK_REGISTER_TYPE(Asset); NVBLASTTK_REGISTER_TYPE(Family); NVBLASTTK_REGISTER_TYPE(Group); } TkFrameworkImpl::~TkFrameworkImpl() { } void TkFrameworkImpl::release() { // Special release of joints, which are not TkIdentifiable: Array<TkJointImpl*>::type joints; // Since the EraseIterator is not exposed joints.reserve(m_joints.size()); for (auto j = m_joints.getIterator(); !j.done(); ++j) { joints.pushBack(*j); } for (uint32_t i = 0; i < joints.size(); ++i) { joints[i]->release(); } NVBLAST_ASSERT(m_joints.size() == 0); joints.reset(); // Since we will be deleting the allocator NVBLASTTK_RELEASE_TYPE(Group); NVBLASTTK_RELEASE_TYPE(Asset); set(nullptr); } const TkType* TkFrameworkImpl::getType(TkTypeIndex::Enum typeIndex) const { if (typeIndex < 0 || typeIndex >= TkTypeIndex::TypeCount) { NVBLAST_LOG_WARNING("TkFrameworkImpl::getType: invalid typeIndex."); return nullptr; } return m_types[typeIndex]; } TkIdentifiable* TkFrameworkImpl::findObjectByID(const NvBlastID& id) const { TkIdentifiable* object = findObjectByIDInternal(id); if (object == nullptr) { NVBLAST_LOG_WARNING("TkFrameworkImpl::findObjectByID: object not found."); } return object; } uint32_t TkFrameworkImpl::getObjectCount(const TkType& type) const { const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex(); if (index >= m_objects.size()) { NVBLAST_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized."); return 0; } return m_objects[index].size(); } uint32_t TkFrameworkImpl::getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart /* = 0 */) const { const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex(); if (index >= m_objects.size()) { NVBLAST_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized."); return 0; } const auto& objectArray = m_objects[index]; uint32_t objectCount = objectArray.size(); if (objectCount <= indexStart) { NVBLAST_LOG_WARNING("TkFrameworkImpl::getObjects: indexStart beyond end of object list."); return 0; } objectCount -= indexStart; if (objectCount > bufferSize) { objectCount = bufferSize; } memcpy(buffer, objectArray.begin() + indexStart, objectCount * sizeof(TkObject*)); return objectCount; } bool TkFrameworkImpl::reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap /*= nullptr*/, bool keepBondNormalChunkOrder /*= false*/) const { uint32_t* map = chunkReorderMap != nullptr ? chunkReorderMap : static_cast<uint32_t*>(NVBLAST_ALLOC_NAMED(chunkCount * sizeof(uint32_t), "reorderAssetDescChunks:chunkReorderMap")); void* scratch = NVBLAST_ALLOC_NAMED(chunkCount * sizeof(NvBlastChunkDesc), "reorderAssetDescChunks:scratch"); const bool result = NvBlastReorderAssetDescChunks(chunkDescs, chunkCount, bondDescs, bondCount, map, keepBondNormalChunkOrder, scratch, logLL); NVBLAST_FREE(scratch); if (chunkReorderMap == nullptr) { NVBLAST_FREE(map); } return result; } bool TkFrameworkImpl::ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const { void* scratch = NVBLAST_ALLOC_NAMED(chunkCount, "ensureAssetExactSupportCoverage:scratch"); const bool result = NvBlastEnsureAssetExactSupportCoverage(chunkDescs, chunkCount, scratch, logLL); NVBLAST_FREE(scratch); return result; } TkAsset* TkFrameworkImpl::createAsset(const TkAssetDesc& desc) { TkAssetImpl* asset = TkAssetImpl::create(desc); if (asset == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset."); } return asset; } TkAsset* TkFrameworkImpl::createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset) { TkAssetImpl* asset = TkAssetImpl::create(assetLL, jointDescs, jointDescCount, ownsAsset); if (asset == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset."); } return asset; } TkGroup* TkFrameworkImpl::createGroup(const TkGroupDesc& desc) { TkGroupImpl* group = TkGroupImpl::create(desc); if (group == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createGroup: failed to create group."); } return group; } TkActor* TkFrameworkImpl::createActor(const TkActorDesc& desc) { TkActor* actor = TkActorImpl::create(desc); if (actor == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createActor: failed to create actor."); } return actor; } TkJoint* TkFrameworkImpl::createJoint(const TkJointDesc& desc) { TkJointImpl** handle0 = nullptr; TkJointImpl** handle1 = nullptr; TkFamilyImpl* family0 = static_cast<TkFamilyImpl*>(desc.families[0]); TkFamilyImpl* family1 = static_cast<TkFamilyImpl*>(desc.families[1]); NVBLAST_CHECK_ERROR(family0 != nullptr || family1 != nullptr, "TkFrameworkImpl::createJoint: at least one family in the TkJointDesc must be valid.", return nullptr); NVBLAST_CHECK_ERROR(family0 == nullptr || desc.chunkIndices[0] < family0->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is invalid.", return nullptr); NVBLAST_CHECK_ERROR(family1 == nullptr || desc.chunkIndices[1] < family1->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is invalid.", return nullptr); const bool actorsAreTheSame = family0 == family1 && family0->getActorByChunk(desc.chunkIndices[0]) == family1->getActorByChunk(desc.chunkIndices[1]); NVBLAST_CHECK_ERROR(!actorsAreTheSame, "TkFrameworkImpl::createJoint: the chunks listed in the TkJointDesc must be in different actors.", return nullptr); if (family0 != nullptr) { const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family0->getAssetImpl()->getAssetLLInternal(), logLL)[desc.chunkIndices[0]]); NVBLAST_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is not a support chunk in the asset for desc.families[0]. Joint not created.", return nullptr); handle0 = family0->createExternalJointHandle(getFamilyID(family1), desc.chunkIndices[0], desc.chunkIndices[1]); NVBLAST_CHECK_ERROR(handle0 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[0]. Joint not created.", return nullptr); } if (family1 != nullptr) { const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family1->getAssetImpl()->getAssetLLInternal(), logLL)[desc.chunkIndices[1]]); NVBLAST_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is not a support chunk in the asset for desc.families[1]. Joint not created.", return nullptr); if (family1 != family0) { handle1 = family1->createExternalJointHandle(getFamilyID(family0), desc.chunkIndices[1], desc.chunkIndices[0]); NVBLAST_CHECK_ERROR(handle1 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[1]. Joint not created.", return nullptr); } } TkJointImpl* joint = NVBLAST_NEW(TkJointImpl)(desc, nullptr); NVBLAST_CHECK_ERROR(joint != nullptr, "TkFrameworkImpl::createJoint: failed to create joint.", return nullptr); const TkJointData& jointData = joint->getDataInternal(); if (handle0 != nullptr) { *handle0 = joint; static_cast<TkActorImpl*>(jointData.actors[0])->addJoint(joint->m_links[0]); } if (handle1 != nullptr) { *handle1 = joint; if (jointData.actors[0] != jointData.actors[1]) { static_cast<TkActorImpl*>(jointData.actors[1])->addJoint(joint->m_links[1]); } } return joint; } void TkFrameworkImpl::onCreate(TkIdentifiable& object) { const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType()); const uint32_t index = type.getIndex(); if (index >= m_objects.size()) { if (!isInvalidIndex(index)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::addObject: object type unrecognized."); } return; } auto& objectArray = m_objects[index]; NVBLAST_ASSERT(objectArray.find(&object) == objectArray.end()); objectArray.pushBack(&object); } void TkFrameworkImpl::onDestroy(TkIdentifiable& object) { // remove from id map if present const auto id = object.getID(); if (!TkGUIDIsZero(&id)) { m_IDToObject.erase(id); } // remove from object list const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType()); const uint32_t index = type.getIndex(); if (index >= m_objects.size()) { if (!isInvalidIndex(index)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::removeObject: object type unrecognized."); } return; } auto& objectArray = m_objects[index]; objectArray.findAndReplaceWithLast(&object); } void TkFrameworkImpl::onCreate(TkJointImpl& joint) { NVBLAST_CHECK_ERROR(m_joints.insert(&joint), "TkFrameworkImpl::onCreate: Joint already tracked.", return); } void TkFrameworkImpl::onDestroy(TkJointImpl& joint) { NVBLAST_CHECK_ERROR(m_joints.erase(&joint), "TkFrameworkImpl::onDestroy: Joint not tracked.", return); } void TkFrameworkImpl::onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr) { if (!TkGUIDIsZero(&IDPrev)) { if (!m_IDToObject.erase(IDPrev)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with previous ID doesn't exist."); } } if (!TkGUIDIsZero(&IDCurr)) { auto& value = m_IDToObject[IDCurr]; if (value != nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with new ID already exists."); return; } value = &object; } } } // namespace Blast } // namespace Nv //////// Global API implementation //////// Nv::Blast::TkFramework* NvBlastTkFrameworkCreate() { if (Nv::Blast::TkFrameworkImpl::get() != nullptr) { NVBLAST_LOG_ERROR("TkFramework::create: framework already created. Use TkFramework::get() to access."); return nullptr; } Nv::Blast::TkFrameworkImpl* framework = NVBLAST_NEW(Nv::Blast::TkFrameworkImpl) (); Nv::Blast::TkFrameworkImpl::set(framework); return Nv::Blast::TkFrameworkImpl::get(); } Nv::Blast::TkFramework* NvBlastTkFrameworkGet() { return Nv::Blast::TkFrameworkImpl::get(); }
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTask.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTASK_H #define NVBLASTTKTASK_H #include "NvBlastTkGroupTaskManager.h" #include "NvTask.h" #include "NvBlastTkGroup.h" #include <atomic> #include <mutex> #include <condition_variable> namespace Nv { namespace Blast { /** Counting synchronization object for waiting on TkWorkers to finish. */ class TkTaskSync { public: /** Initializes with an expected number of notifications. */ TkTaskSync(uint32_t count) : m_count(count) {} /** Blocks until the expected number of notifications happened. */ void wait() { std::unique_lock<std::mutex> lk(m_mutex); m_cv.wait(lk, [&] { return m_count == 0; }); } /** Decrement the wait() count by one. */ void notify() { //PERF_SCOPE_H("TaskSync::notify"); std::unique_lock<std::mutex> lk(m_mutex); if (m_count > 0) { m_count--; } if (m_count == 0) { lk.unlock(); m_cv.notify_one(); } } /** Peek if notifications are pending. */ bool isDone() { std::unique_lock<std::mutex> lk(m_mutex); return m_count == 0; } /** Sets the expected number of notifications for wait() to unblock. */ void setCount(uint32_t count) { m_count = count; } private: std::mutex m_mutex; std::condition_variable m_cv; uint32_t m_count; }; /** Common job counter for all tasks. */ class TkAtomicCounter { public: TkAtomicCounter() : m_current(0), m_maxCount(0) {} bool isValid(uint32_t val) { return val < m_maxCount; } uint32_t next() { return m_current.fetch_add(1); } void reset(uint32_t maxCount) { m_maxCount = maxCount; m_current = 0; } private: std::atomic<uint32_t> m_current; uint32_t m_maxCount; }; /** A task running one group job after the other until done. Synchronizes atomically with its siblings. */ class TkGroupWorkerTask : public nvidia::task::NvLightCpuTask { public: TkGroupWorkerTask() : NvLightCpuTask(), m_group(nullptr), m_counter(nullptr), m_sync(nullptr) { } void setup(TkGroup* group, TkAtomicCounter* counter, TkTaskSync* sync) { m_group = group; m_counter = counter; m_sync = sync; } virtual void run() override { Nv::Blast::TkGroupWorker* worker = m_group->acquireWorker(); uint32_t jobID = m_counter->next(); while (m_counter->isValid(jobID)) { worker->process(jobID); jobID = m_counter->next(); } m_group->returnWorker(worker); } virtual void release() override { NvLightCpuTask::release(); // release the sync last m_sync->notify(); } virtual const char* getName() const override { return "BlastGroupWorkerTask"; } private: TkGroup* m_group; TkAtomicCounter* m_counter; TkTaskSync* m_sync; }; /** Implements TkGroupTaskManager */ class TkGroupTaskManagerImpl : public TkGroupTaskManager { public: TkGroupTaskManagerImpl(nvidia::task::NvTaskManager& taskManager, TkGroup* group) : m_taskManager(taskManager), m_sync(0), m_group(group) {} // TkGroupTaskManager API virtual void setGroup(TkGroup*) override; virtual uint32_t process(uint32_t) override; virtual void release() override; virtual bool wait(bool block) override; private: static const uint32_t TASKS_MAX_COUNT = 16; nvidia::task::NvTaskManager& m_taskManager; TkAtomicCounter m_counter; TkGroupWorkerTask m_tasks[TASKS_MAX_COUNT]; TkTaskSync m_sync; TkGroup* m_group; }; } // namespace Blast } // namespace Nv #endif // NVBLASTTKTASK_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkJointImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" namespace Nv { namespace Blast { //////// Member functions //////// TkJointImpl::TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner) : m_owner(owner) { userData = nullptr; // Do not fire off a creation event. Creation events will only be fired when a family-internal joint is created. NVBLAST_ASSERT(desc.families[0] != nullptr || desc.families[1] != nullptr); NVBLAST_ASSERT(desc.families[0] == nullptr || desc.chunkIndices[0] < static_cast<TkFamilyImpl*>(desc.families[0])->getAssetImpl()->getChunkCount()); NVBLAST_ASSERT(desc.attachPositions[0].isFinite()); NVBLAST_ASSERT(desc.families[1] == nullptr || desc.chunkIndices[1] < static_cast<TkFamilyImpl*>(desc.families[1])->getAssetImpl()->getChunkCount()); NVBLAST_ASSERT(desc.attachPositions[1].isFinite()); for (int i = 0; i < 2; ++i) { m_data.actors[i] = desc.families[i] != nullptr ? static_cast<TkFamilyImpl*>(desc.families[i])->getActorByChunk(desc.chunkIndices[i]) : nullptr; m_data.chunkIndices[i] = desc.chunkIndices[i]; m_data.attachPositions[i] = desc.attachPositions[i]; m_links[i].m_joint = this; } if (owner == nullptr) { TkFrameworkImpl::get()->onCreate(*this); } } void TkJointImpl::release() { removeReferencesInActors(); if (m_owner != nullptr) { // Internal joint m_owner->releaseJoint(*this); } else { // External joint removeReferencesInFamilies(); TkFrameworkImpl::get()->onDestroy(*this); NVBLAST_DELETE(this, TkJointImpl); } } void TkJointImpl::setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue) { NVBLAST_ASSERT(m_data.actors[0] != nullptr || m_data.actors[1] != nullptr); const bool unreferenced = (actor0 == nullptr && m_data.actors[0] != nullptr) || (actor1 == nullptr && m_data.actors[1] != nullptr); removeReferencesInActors(); if (!unreferenced) { if (actor0 != nullptr) { actor0->addJoint(m_links[0]); } if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only need one joint reference { actor1->addJoint(m_links[1]); } } // We do _not_ return if m_data.m_actors[0] == actor0 && m_data.m_actors[1] == actor1 since // this leads to a bug. This function will only be called when an actor is split. It is // possible that the two TkActors in a joint are the same as before, but in this case one // of the actors will be the split actor. Since will be represented by a different // physical actor, this case still needs to be reported in an event. Returning when neither // TkActor has changed will prevent that, and lead to unwanted joint disconnection. const uint32_t familyToUse = m_data.actors[0] != actor0 ? 0 : 1; TkEventQueue* q = alternateQueue == nullptr ? &static_cast<TkActorImpl*>(m_data.actors[familyToUse])->getFamilyImpl().getQueue() : alternateQueue; const bool jointWasInternal = m_data.actors[0] == m_data.actors[1]; if (unreferenced) { removeReferencesInFamilies(); actor0 = actor1 = nullptr; // Make both new actors NULL } if (!jointWasInternal || actor0 != actor1) { // The original actors were different, or they are now, signal a joint update TkJointUpdateEvent* e = q->allocData<TkJointUpdateEvent>(); e->joint = this; e->subtype = unreferenced ? TkJointUpdateEvent::Unreferenced : (jointWasInternal ? TkJointUpdateEvent::External : TkJointUpdateEvent::Changed); m_data.actors[0] = actor0; m_data.actors[1] = actor1; q->addEvent(e); } else if (jointWasInternal) { // The joint was originally created within the same actor and now it remains within the same actor. m_data.actors[0] = m_data.actors[1] = actor0; } } const TkJointData TkJointImpl::getData() const { return getDataInternal(); } void TkJointImpl::removeReferencesInActors() { TkActorImpl* actor0 = static_cast<TkActorImpl*>(m_data.actors[0]); TkActorImpl* actor1 = static_cast<TkActorImpl*>(m_data.actors[1]); if (actor0 != nullptr) { actor0->removeJoint(m_links[0]); } if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only had one joint reference { actor1->removeJoint(m_links[1]); } } void TkJointImpl::removeReferencesInFamilies() { if (m_owner != nullptr) { return; // Only concerned with external joints } NVBLAST_ASSERT(m_data.actors[0] != m_data.actors[1] || m_data.actors[0] == nullptr); // This is enforced by the initial assumption in TkFrameworkImpl::createJoint. for (int i = 0; i < 2; ++i) { if (m_data.actors[i] != nullptr) { TkFamilyImpl& family = static_cast<TkActorImpl*>(m_data.actors[i])->getFamilyImpl(); TkJointImpl* joint = nullptr; const bool found = family.deleteExternalJointHandle(joint, getFamilyID(m_data.actors[i ^ 1]), m_data.chunkIndices[i], m_data.chunkIndices[i ^ 1]); NVBLAST_ASSERT((!found && m_data.actors[i ^ 1] == nullptr) || joint == this); // Might not be found if the actors in a family are in the process of being deleted NV_UNUSED(found); } } } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkAssetImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKASSETIMPL_H #define NVBLASTTKASSETIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastTkJoint.h" #include "NvBlastTkAsset.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastArray.h" // Forward declarations struct NvBlastAsset; namespace Nv { namespace Blast { /** Implementation of TkAsset */ NVBLASTTK_IMPL_DECLARE(Asset) { public: TkAssetImpl(); TkAssetImpl(const NvBlastID& id); ~TkAssetImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('A', 'S', 'S', 'T'); // Public methods /** Factory create method. This method creates a low-level asset and stores a reference to it. \param[in] desc Asset descriptor set by the user. \return a pointer to a new TkAssetImpl object if successful, NULL otherwise. */ static TkAssetImpl* create(const TkAssetDesc& desc); /** Static method to create an asset from an existing low-level asset. \param[in] assetLL A valid low-level asset passed in by the user. \param[in] jointDescs Optional joint descriptors to add to the new asset. \param[in] jointDescCount The number of joint descriptors in the jointDescs array. If non-zero, jointDescs cannot be NULL. \param[in] ownsAsset Whether or not to let this TkAssetImpl object release the low-level NvBlastAsset memory upon its own release. \return a pointer to a new TkAssetImpl object if successful, NULL otherwise. */ static TkAssetImpl* create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false); /** \return a pointer to the underlying low-level NvBlastAsset associated with this asset. */ const NvBlastAsset* getAssetLLInternal() const; /** \return the number of internal joint descriptors stored with this asset. */ uint32_t getJointDescCountInternal() const; /** \return the array of internal joint descriptors stored with this asset, with size given by getJointDescCountInternal(). */ const TkAssetJointDesc* getJointDescsInternal() const; // Begin TkAsset virtual const NvBlastAsset* getAssetLL() const override; virtual uint32_t getChunkCount() const override; virtual uint32_t getLeafChunkCount() const override; virtual uint32_t getBondCount() const override; virtual const NvBlastChunk* getChunks() const override; virtual const NvBlastBond* getBonds() const override; virtual const NvBlastSupportGraph getGraph() const override; virtual uint32_t getDataSize() const override; virtual uint32_t getJointDescCount() const override; virtual const TkAssetJointDesc* getJointDescs() const override; // End TkAsset private: /** Utility to add a joint descriptor between the indexed chunks. The two chunks must be support chunks, and there must exist a bond between them. The joint's attachment positions will be the bond centroid. \param[in] chunkIndex0 The first chunk index. \param[in] chunkIndex1 The second chunk index. \return true iff successful. */ bool addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1); NvBlastAsset* m_assetLL; //!< The underlying low-level asset. Array<TkAssetJointDesc>::type m_jointDescs; //!< The array of internal joint descriptors. bool m_ownsAsset; //!< Whether or not this asset should release its low-level asset upon its own release. }; //////// TkAssetImpl inline methods //////// NV_INLINE const NvBlastAsset* TkAssetImpl::getAssetLLInternal() const { return m_assetLL; } NV_INLINE uint32_t TkAssetImpl::getJointDescCountInternal() const { return m_jointDescs.size(); } NV_INLINE const TkAssetJointDesc* TkAssetImpl::getJointDescsInternal() const { return m_jointDescs.begin(); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKASSETIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkEventQueue.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKEVENTQUEUE_H #define NVBLASTTKEVENTQUEUE_H #include <algorithm> #include <vector> #include <mutex> #include <atomic> #include "NvBlastTkFrameworkImpl.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { /** A dispatcher queue providing preallocation and thread-safe insertions therein. Typical usage: - preallocate space for events and payload: - reserveEvents, reserveData - enable asserts to detect undersized storage (allocations are not thread safe): - protect(true) - get pointers to payload data and events to fill in, thread safe for preallocated memory: - allocData, addEvent - back on main thread, ensure consistency: - protect(false) - continue adding events and payload on main thread if necessary like above (allocations are safe here) eventually dispatch, or reset if dispatched by proxy */ class TkEventQueue { public: TkEventQueue() : m_currentEvent(0), m_poolCapacity(0), m_pool(nullptr), m_allowAllocs(true) {} /** Peek events queue for dispatch. Do not use in protected state. */ operator const Array<TkEvent>::type&() { NVBLAST_ASSERT(m_allowAllocs); NVBLAST_ASSERT(m_currentEvent == m_events.size()); return m_events; } /** Debug help to catch (unwanted) allocations during task work. Note that this will not actually avoid allocations, but assert in debug builds. Set true before using in distributed environment. Set false to return to single-thread mode. */ void protect(bool enable) { // During parallel use, m_events.size() and m_currentEvent are allowed to diverge. // This is fine because resizeUninitialized does not alter the stored data. NVBLAST_ASSERT(m_currentEvent <= m_events.capacity()); m_events.resizeUninitialized(m_currentEvent); m_allowAllocs = !enable; } /** Restores initial state. Data memory is currently not being reused. To be improved. */ void reset() { m_events.clear(); m_currentEvent = 0; for (void* mem : m_memory) { NVBLAST_FREE(mem); } m_memory.clear(); m_currentData = 0; m_allowAllocs = true; m_poolCapacity = 0; m_pool = nullptr; } /** Queue an event with a payload. */ template<class T> void addEvent(T* payload) { uint32_t index = m_currentEvent.fetch_add(1); // Should not allocate in protected state. NVBLAST_ASSERT(m_allowAllocs || m_currentEvent <= m_events.capacity()); m_events.resizeUninitialized(m_currentEvent); // During parallel use, m_events.size() and m_currentEvent are allowed to diverge. // Consistency is restored in protect(). NVBLAST_ASSERT(!m_allowAllocs || m_currentEvent == m_events.size()); TkEvent& evt = m_events[index]; evt.type = TkEvent::Type(T::EVENT_TYPE); evt.payload = payload; } /** Request storage for payload. */ template<typename T> T* allocData() { uint32_t index = m_currentData.fetch_add(sizeof(T)); if (m_currentData <= m_poolCapacity) { return reinterpret_cast<T*>(&m_pool[index]); } else { // Could do larger block allocation here. reserveData(sizeof(T)); // Account for the requested size. m_currentData = sizeof(T); return reinterpret_cast<T*>(&m_pool[0]); } } /** Preallocate a memory block of size Bytes for payload data. Note that this will inevitably allocate a new memory block. Subsequent calls to allocData will use this memory piecewise. */ void reserveData(size_t size) { NVBLAST_ASSERT(m_allowAllocs); m_pool = reinterpret_cast<uint8_t*>(allocDataBySize(size)); m_poolCapacity = size; m_currentData = 0; } /** Preallocate space for events. */ void reserveEvents(uint32_t n) { NVBLAST_ASSERT(m_allowAllocs); m_events.reserve(m_events.size() + n); } /** Add a listener to dispatch to. */ void addListener(TkEventListener& l) { m_listeners.pushBack(&l); } /** Remove a listener from dispatch list. */ void removeListener(TkEventListener& l) { m_listeners.findAndReplaceWithLast(&l); } /** Dispatch the stored events to the registered listeners. After dispatch, all data is invalidated. */ void dispatch() { dispatch(*this); reset(); } /** Proxy function to dispatch events to this queue's listeners. */ void dispatch(const Array<TkEvent>::type& events) const { if (events.size()) { for (TkEventListener* l : m_listeners) { BLAST_PROFILE_SCOPE_M("TkEventQueue::dispatch"); l->receive(events.begin(), events.size()); } } } private: /** Allocates and stores a block of size Bytes of payload data. */ void* allocDataBySize(size_t size) { void* memory = nullptr; if (size > 0) { memory = NVBLAST_ALLOC_NAMED(size, "TkEventQueue Data"); m_memory.pushBack(memory); } return memory; } Array<TkEvent>::type m_events; //!< holds events Array<void*>::type m_memory; //!< holds allocated data memory blocks std::atomic<uint32_t> m_currentEvent; //!< reference index for event insertion std::atomic<uint32_t> m_currentData; //!< reference index for data insertion size_t m_poolCapacity; //!< size of the currently active memory block (m_pool) uint8_t* m_pool; //!< the current memory block allocData() uses bool m_allowAllocs; //!< assert guard InlineArray<TkEventListener*,4>::type m_listeners; //!< objects to dispatch to }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKEVENTQUEUE_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkJointImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKJOINTIMPL_H #define NVBLASTTKJOINTIMPL_H #include "NvBlastTkJoint.h" #include "NvBlastTkCommon.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" #include <atomic> namespace Nv { namespace Blast { // Forward declarations class TkActorImpl; class TkJointImpl; class TkFamilyImpl; class TkEventQueue; /** Double-sided link (DLink) which holds a reference back to a joint which contains it. */ struct TkJointLink : public DLink { TkJointImpl* m_joint; //!< The joint containing this link. }; /** Implementation of TkJoint. */ class TkJointImpl : public TkJoint { public: /** Blank constructor only creates valid TkJointLinks (point back to this object) */ TkJointImpl(); /** This constructor sets all internal data. If the joint is defined in an asset, the family instanced from that asset will own this joint, and the 'owner' parameter is that family. Otherwise, in the case where a joint is created from TkFramwork::createJoint, the joint is not owned by a family and 'owner' will be NULL. */ TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner); // Begin TkObject virtual void release() override; // End TkObject // Begin TkJoint virtual const TkJointData getData() const override; // End TkJoint // Public API /** Internal method to access a const reference to the joint data. \return a const reference to the joint data. */ const TkJointData& getDataInternal() const; /** Internal method to access a non-const reference to the joint data. \return a non-const reference to the joint data. */ TkJointData& getDataWritable(); /** Set the actors that this joint attaches to. When the actors are different from the joint's current actors, an event will be generated on one of the actors' families event queues to signal the change. Alternatively, if alternateQueue is not NULL then it will be used to hold the event. If a non-NULL attached actor becomes NULL, then this joint will detach its references to both actors (if they exist) and send an event of subtype Unreferenced. This signals the user that the joint may be deleted. \param[in] actor0 The new TkActor to replace the first attached actor. \param[in] actor1 The new TkActor to replace the second attached actor. \param[in] alternateQueue If not NULL, this queue will be used to hold events generated by this function. */ void setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue = nullptr); /** Ensures that any attached actors no longer refer to this joint. */ void removeReferencesInActors(); /** Ensures that any attached actors' families no longer refer to this joint. External joints (created using TkFramework::createJoint) are referenced by the attached actors' families. */ void removeReferencesInFamilies(); private: TkJointData m_data; //!< The data given to the user: attached actors, chunk indices, and actor-local attachment positions. TkJointLink m_links[2]; //!< One link for each actor in m_data.m_actors. If m_data.m_actors[0] == m_data.m_actors[1], then only m_links[0] is used. TkFamilyImpl* m_owner; //!< The owning family if this is an internal joint created during TkFramework::createActor() from a TkAssetDesc with joint flags. friend class TkFrameworkImpl; friend class TkFamilyImpl; friend class TkActorImpl; }; //////// TkJointImpl inline methods //////// NV_INLINE TkJointImpl::TkJointImpl() { m_links[0].m_joint = m_links[1].m_joint = this; } NV_INLINE const TkJointData& TkJointImpl::getDataInternal() const { return m_data; } NV_INLINE TkJointData& TkJointImpl::getDataWritable() { return m_data; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKJOINTIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFamilyImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKFAMILYIMPL_H #define NVBLASTTKFAMILYIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastTkFamily.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkEventQueue.h" #include "NvBlastHashSet.h" #include "NvBlastHashMap.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" // Forward declarations struct NvBlastFamily; namespace Nv { namespace Blast { // Forward declarations class TkGroupImpl; class TkAssetImpl; NVBLASTTK_IMPL_DECLARE(Family) { public: TkFamilyImpl(); TkFamilyImpl(const NvBlastID& id); ~TkFamilyImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('A', 'C', 'T', 'F'); // Begin TkFamily virtual const NvBlastFamily* getFamilyLL() const override; virtual uint32_t getActorCount() const override; virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override; virtual void addListener(TkEventListener& l) override { m_queue.addListener(l); } virtual void removeListener(TkEventListener& l) override { m_queue.removeListener(l); } virtual void applyFracture(const NvBlastFractureBuffers* commands) override { applyFractureInternal(commands); } virtual const TkAsset* getAsset() const override; virtual void reinitialize(const NvBlastFamily* newFamily, TkGroup* group) override; // End TkFamily // Public methods static TkFamilyImpl* create(const TkAssetImpl* asset); const TkAssetImpl* getAssetImpl() const; NvBlastFamily* getFamilyLLInternal() const; uint32_t getActorCountInternal() const; TkActorImpl* addActor(NvBlastActor* actorLL); void applyFractureInternal(const NvBlastFractureBuffers* commands); void removeActor(TkActorImpl* actorLL); TkEventQueue& getQueue() { return m_queue; } TkActorImpl* getActorByActorLL(const NvBlastActor* actorLL); void updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue = nullptr); Array<TkActorImpl>::type& getActorsInternal(); uint32_t getInternalJointCount() const; TkJointImpl* getInternalJoints() const; TkJointImpl** createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1); bool deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1); void releaseJoint(TkJointImpl& joint); TkActorImpl* getActorByChunk(uint32_t chunkIndex); typedef nvidia::shdfnd::Pair<uint32_t, uint32_t> ExternalJointKey; //!< The chunk indices within the TkFamily objects joined by the joint. These chunks will be support chunks. TkJointImpl* findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const; private: TkActorImpl* getActorByIndex(uint32_t index); struct JointSet { NvBlastID m_familyID; HashMap<ExternalJointKey, TkJointImpl*>::type m_joints; }; typedef HashMap<NvBlastID, uint32_t>::type FamilyIDMap; NvBlastFamily* m_familyLL; Array<TkActorImpl>::type m_actors; uint32_t m_internalJointCount; Array<uint8_t>::type m_internalJointBuffer; Array<JointSet*>::type m_jointSets; FamilyIDMap m_familyIDMap; const TkAssetImpl* m_asset; TkEventQueue m_queue; }; //////// TkFamilyImpl inline methods //////// NV_INLINE const TkAssetImpl* TkFamilyImpl::getAssetImpl() const { return m_asset; } NV_INLINE NvBlastFamily* TkFamilyImpl::getFamilyLLInternal() const { return m_familyLL; } NV_INLINE uint32_t TkFamilyImpl::getActorCountInternal() const { NVBLAST_ASSERT(m_familyLL != nullptr); return NvBlastFamilyGetActorCount(m_familyLL, logLL); } NV_INLINE TkActorImpl* TkFamilyImpl::getActorByIndex(uint32_t index) { NVBLAST_ASSERT(index < m_actors.size()); return &m_actors[index]; } NV_INLINE TkActorImpl* TkFamilyImpl::getActorByActorLL(const NvBlastActor* actorLL) { uint32_t index = NvBlastActorGetIndex(actorLL, logLL); return getActorByIndex(index); } NV_INLINE Array<TkActorImpl>::type& TkFamilyImpl::getActorsInternal() { return m_actors; } NV_INLINE uint32_t TkFamilyImpl::getInternalJointCount() const { return m_internalJointCount; } NV_INLINE TkJointImpl* TkFamilyImpl::getInternalJoints() const { return const_cast<TkJointImpl*>(reinterpret_cast<const TkJointImpl*>(m_internalJointBuffer.begin())); } NV_INLINE void TkFamilyImpl::releaseJoint(TkJointImpl& joint) { NVBLAST_ASSERT(joint.m_owner == this); NVBLAST_ASSERT(&joint >= getInternalJoints() && &joint < getInternalJoints() + getInternalJointCount() * sizeof(TkJointImpl)); joint.~TkJointImpl(); joint.m_owner = nullptr; } //////// Inline global functions //////// NV_INLINE const NvBlastID& getFamilyID(const TkActor* actor) { return actor != nullptr ? static_cast<const TkActorImpl*>(actor)->getFamilyImpl().getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); } NV_INLINE const NvBlastID& getFamilyID(const TkFamilyImpl* family) { return family != nullptr ? family->getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKFAMILYIMPL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastInternalProfiler.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINTERNALPROFILER_H #define NVBLASTINTERNALPROFILER_H #include "NvPreprocessor.h" #if NV_NVTX #include "nvToolsExt.h" NV_INLINE void platformZoneStart(const char* name) { nvtxRangePushA(name); } NV_INLINE void platformZoneEnd() { nvtxRangePop(); } #else NV_INLINE void platformZoneStart(const char*) { } NV_INLINE void platformZoneEnd() { } #endif namespace Nv { namespace Blast { /** Profiler detail to be reported. The higher setting is used, the more details are reported. */ struct InternalProfilerDetail { enum Level { LOW, MEDIUM, HIGH }; }; NV_C_API void NvBlastInternalProfilerSetPlatformEnabled(bool platformEnabled); NV_C_API void NvBlastInternalProfilerSetDetail(Nv::Blast::InternalProfilerDetail::Level); NV_C_API Nv::Blast::InternalProfilerDetail::Level NvBlastInternalProfilerGetDetail(); #if NV_PROFILE NV_C_API void NvBlastProfilerBegin(const char* name, Nv::Blast::InternalProfilerDetail::Level); NV_C_API void NvBlastProfilerEnd(const void* name, Nv::Blast::InternalProfilerDetail::Level); class ProfileScope { public: ProfileScope(const char* name, InternalProfilerDetail::Level level) :m_name(name), m_level(level) { NvBlastProfilerBegin(m_name, m_level); } ~ProfileScope() { NvBlastProfilerEnd(m_name, m_level); } private: const char* m_name; InternalProfilerDetail::Level m_level; }; #define BLAST_PROFILE_PREFIX "Blast: " #define BLAST_PROFILE_ZONE_BEGIN(name) Nv::Blast::NvBlastProfilerBegin(BLAST_PROFILE_PREFIX name, Nv::Blast::InternalProfilerDetail::HIGH) #define BLAST_PROFILE_ZONE_END(name) Nv::Blast::NvBlastProfilerEnd(BLAST_PROFILE_PREFIX name, Nv::Blast::InternalProfilerDetail::HIGH) #define BLAST_PROFILE_SCOPE(name, detail) Nv::Blast::ProfileScope NV_CONCAT(_scope,__LINE__) (BLAST_PROFILE_PREFIX name, detail) #define BLAST_PROFILE_SCOPE_L(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::LOW) #define BLAST_PROFILE_SCOPE_M(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::MEDIUM) #define BLAST_PROFILE_SCOPE_H(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::HIGH) #else #define BLAST_PROFILE_ZONE_BEGIN(name) #define BLAST_PROFILE_ZONE_END(name) #define BLAST_PROFILE_SCOPE_L(name) #define BLAST_PROFILE_SCOPE_M(name) #define BLAST_PROFILE_SCOPE_H(name) #endif } // namespace Blast } // namespace Nv #endif
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastGlobals.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastAssert.h" #include "NvAllocatorCallback.h" #include "NvErrorCallback.h" #include "NsGlobals.h" #include <cstdlib> #include <sstream> #include <iostream> #if NV_WINDOWS_FAMILY #include <windows.h> #endif #if NV_WINDOWS_FAMILY || NV_LINUX_FAMILY #include <malloc.h> #endif namespace Nv { namespace Blast { #if NV_WINDOWS_FAMILY // on win32 we only have 8-byte alignment guaranteed, but the CRT provides special aligned allocation fns NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return _aligned_malloc(size, 16); } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { _aligned_free(ptr); } #elif NV_LINUX_FAMILY NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return ::memalign(16, size); } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { ::free(ptr); } #else NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { const int A = 16; unsigned char* mem = (unsigned char*)malloc(size + A); const unsigned char offset = (unsigned char)((uintptr_t)A - (uintptr_t)mem % A - 1); mem += offset; *mem++ = offset; return mem; } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { if (ptr != nullptr) { unsigned char* mem = (unsigned char*)ptr; const unsigned char offset = *--mem; ::free(mem - offset); } } #endif class DefaultAllocatorCallback : public nvidia::NvAllocatorCallback { public: virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) override { NV_UNUSED(typeName); NV_UNUSED(filename); NV_UNUSED(line); return platformAlignedAlloc(size); } virtual void deallocate(void* ptr) override { platformAlignedFree(ptr); } }; DefaultAllocatorCallback s_defaultAllocatorCallback; class DefaultErrorCallback : public nvidia::NvErrorCallback { virtual void reportError(nvidia::NvErrorCode::Enum code, const char* msg, const char* file, int line) override { #if 1 || NV_DEBUG || NV_CHECKED std::stringstream str; str << "NvBlast "; bool critical = false; switch (code) { case nvidia::NvErrorCode::eNO_ERROR: str << "[Info]"; critical = false; break; case nvidia::NvErrorCode::eDEBUG_INFO: str << "[Debug Info]"; critical = false; break; case nvidia::NvErrorCode::eDEBUG_WARNING: str << "[Debug Warning]"; critical = false; break; case nvidia::NvErrorCode::eINVALID_PARAMETER: str << "[Invalid Parameter]"; critical = true; break; case nvidia::NvErrorCode::eINVALID_OPERATION: str << "[Invalid Operation]"; critical = true; break; case nvidia::NvErrorCode::eOUT_OF_MEMORY: str << "[Out of] Memory"; critical = true; break; case nvidia::NvErrorCode::eINTERNAL_ERROR: str << "[Internal Error]"; critical = true; break; case nvidia::NvErrorCode::eABORT: str << "[Abort]"; critical = true; break; case nvidia::NvErrorCode::ePERF_WARNING: str << "[Perf Warning]"; critical = false; break; default: NVBLAST_ASSERT(false); } str << file << "(" << line << "): " << msg << "\n"; std::string message = str.str(); std::cout << message; #if NV_WINDOWS_FAMILY OutputDebugStringA(message.c_str()); #endif NVBLAST_ASSERT_WITH_MESSAGE(!critical, message.c_str()); #else NV_UNUSED(code); NV_UNUSED(msg); NV_UNUSED(file); NV_UNUSED(line); #endif } }; static DefaultErrorCallback s_defaultErrorCallback; static nvidia::NvAllocatorCallback* s_allocatorCallback = &s_defaultAllocatorCallback; static nvidia::NvErrorCallback* s_errorCallback = &s_defaultErrorCallback; nvidia::NvProfilerCallback *g_profilerCallback = nullptr; } // namespace Blast } // namespace Nv //////// Global API implementation //////// nvidia::NvAllocatorCallback* NvBlastGlobalGetAllocatorCallback() { return Nv::Blast::s_allocatorCallback; } void NvBlastGlobalSetAllocatorCallback(nvidia::NvAllocatorCallback* allocator) { Nv::Blast::s_allocatorCallback = allocator ? allocator : &Nv::Blast::s_defaultAllocatorCallback; } nvidia::NvErrorCallback* NvBlastGlobalGetErrorCallback() { return Nv::Blast::s_errorCallback; } void NvBlastGlobalSetErrorCallback(nvidia::NvErrorCallback* errorCallback) { Nv::Blast::s_errorCallback = errorCallback ? errorCallback : &Nv::Blast::s_defaultErrorCallback; } nvidia::NvProfilerCallback* NvBlastGlobalGetProfilerCallback() { return Nv::Blast::g_profilerCallback; } void NvBlastGlobalSetProfilerCallback(nvidia::NvProfilerCallback* profilerCallback) { Nv::Blast::g_profilerCallback = profilerCallback; }
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastInternalProfiler.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "stdint.h" #include "NvProfiler.h" #include "NvBlastGlobals.h" #include "NvBlastInternalProfiler.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { #define SUPPORTS_THREAD_LOCAL (!NV_VC || NV_VC > 12) struct InternalProfilerData { const char* name; void* data; }; #if SUPPORTS_THREAD_LOCAL static const int32_t PROFILER_MAX_NESTED_DEPTH = 64; static thread_local InternalProfilerData th_ProfileData[PROFILER_MAX_NESTED_DEPTH]; static thread_local int32_t th_depth = 0; #endif extern nvidia::NvProfilerCallback *g_profilerCallback; /** Wraps the nvidia::NvProfilerCallback set in NvBlastGlobalSetProfilerCallback. */ class InternalProfiler { public: /** Construct a InternalProfiler with platform specific profiler signals disabled. */ InternalProfiler() : m_platformEnabled(false) {} void zoneStart(const char* name) { #if SUPPORTS_THREAD_LOCAL if (g_profilerCallback) { void* data = g_profilerCallback->zoneStart(name, false, 0xb1a57); if (th_depth < PROFILER_MAX_NESTED_DEPTH && th_depth >= 0) { th_ProfileData[th_depth].name = name; th_ProfileData[th_depth].data = data; th_depth++; } else { NVBLAST_ASSERT(th_depth < PROFILER_MAX_NESTED_DEPTH && th_depth >= 0); } } #endif if (m_platformEnabled) { platformZoneStart(name); } } void zoneEnd() { #if SUPPORTS_THREAD_LOCAL if (g_profilerCallback) { th_depth--; if (th_depth >= 0) { InternalProfilerData& pd = th_ProfileData[th_depth]; g_profilerCallback->zoneEnd(pd.data, pd.name, false, 0xb1a57); } else { NVBLAST_ASSERT(th_depth >= 0); } } #endif if (m_platformEnabled) { platformZoneEnd(); } } ////// local interface ////// /** Enable or disable platform specific profiler signals. Disabled by default. \param[in] enabled true enables, false disables platform profiler calls. */ void setPlatformEnabled(bool enabled) { m_platformEnabled = enabled; } private: bool m_platformEnabled; }; static InternalProfiler g_InternalProfiler; static InternalProfilerDetail::Level g_ProfilerDetail = InternalProfilerDetail::LOW; void NvBlastInternalProfilerSetPlatformEnabled(bool platformEnabled) { return g_InternalProfiler.setPlatformEnabled(platformEnabled); } void NvBlastInternalProfilerSetDetail(InternalProfilerDetail::Level level) { g_ProfilerDetail = level; } InternalProfilerDetail::Level NvBlastProfilerGetDetail() { return g_ProfilerDetail; } void NvBlastProfilerBegin(const char* name, InternalProfilerDetail::Level level) { if (level <= NvBlastProfilerGetDetail()) { g_InternalProfiler.zoneStart(name); } } void NvBlastProfilerEnd(const void* /*name*/, InternalProfilerDetail::Level level) { if (level <= NvBlastProfilerGetDetail()) { g_InternalProfiler.zoneEnd(); } } } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTARRAY_H #define NVBLASTARRAY_H #include "NvBlastAllocator.h" #include "NsInlineArray.h" namespace Nv { namespace Blast { /** Wrapped NvShared Array that uses NvBlastGlobals AllocatorCallback. */ template <class T> struct Array { typedef nvidia::shdfnd::Array<T, Allocator> type; }; /** Wrapped NvShared InlineArray that uses NvBlastGlobals AllocatorCallback. InlineArraya is array that pre-allocates for N elements. */ template <class T, uint32_t N> struct InlineArray { typedef nvidia::shdfnd::InlineArray<T, N, Allocator> type; }; } // namespace Blast } // namespace Nv #endif // #ifndef NVBLASTARRAY_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedPriorityQueue.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDPRIORITYQUEUE_H #define NVBLASTFIXEDPRIORITYQUEUE_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { /*! FixedPriorityQueue is a priority queue container which is intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; // placement new on this memory FixedPriorityQueue<SomeClass>* arr = new (buf) FixedPriorityQueue<SomeClass>(); // you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left buf = buf + FixedPriorityQueue<SomeClass>::requiredMemorySize(capacity); buf: +------------------------------------------------------------+ | uint32_t | T[0] | T[1] | T[2] | ... | +------------------------------------------------------------+ */ template <typename A> struct Less { bool operator()(const A& a, const A& b) const { return a < b; } }; template<class Element, class Comparator = Less<Element> > class FixedPriorityQueue : protected Comparator // inherit so that stateless comparators take no space { public: FixedPriorityQueue(const Comparator& less = Comparator()) : Comparator(less), mHeapSize(0) { } ~FixedPriorityQueue() { } static size_t requiredMemorySize(uint32_t capacity) { return align16(sizeof(FixedPriorityQueue<Element, Comparator>)) + align16(capacity * sizeof(Element)); } //! Get the element with the highest priority const Element top() const { return data()[0]; } //! Get the element with the highest priority Element top() { return data()[0]; } //! Check to whether the priority queue is empty bool empty() const { return (mHeapSize == 0); } //! Empty the priority queue void clear() { mHeapSize = 0; } //! Insert a new element into the priority queue. Only valid when size() is less than Capacity void push(const Element& value) { uint32_t newIndex; uint32_t parentIndex = parent(mHeapSize); for (newIndex = mHeapSize; newIndex > 0 && compare(value, data()[parentIndex]); newIndex = parentIndex, parentIndex= parent(newIndex)) { data()[ newIndex ] = data()[parentIndex]; } data()[newIndex] = value; mHeapSize++; NVBLAST_ASSERT(valid()); } //! Delete the highest priority element. Only valid when non-empty. Element pop() { NVBLAST_ASSERT(mHeapSize > 0); uint32_t i, child; //try to avoid LHS uint32_t tempHs = mHeapSize-1; mHeapSize = tempHs; Element min = data()[0]; Element last = data()[tempHs]; for (i = 0; (child = left(i)) < tempHs; i = child) { /* Find highest priority child */ const uint32_t rightChild = child + 1; child += ((rightChild < tempHs) & compare((data()[rightChild]), (data()[child]))) ? 1 : 0; if(compare(last, data()[child])) break; data()[i] = data()[child]; } data()[ i ] = last; NVBLAST_ASSERT(valid()); return min; } //! Make sure the priority queue sort all elements correctly bool valid() const { const Element& min = data()[0]; for(uint32_t i=1; i<mHeapSize; ++i) { if(compare(data()[i], min)) return false; } return true; } //! Return number of elements in the priority queue uint32_t size() const { return mHeapSize; } private: uint32_t mHeapSize; NV_FORCE_INLINE Element* data() { return (Element*)((char*)this + sizeof(FixedPriorityQueue<Element, Comparator>)); } NV_FORCE_INLINE Element* data() const { return (Element*)((char*)this + sizeof(FixedPriorityQueue<Element, Comparator>)); } bool compare(const Element& a, const Element& b) const { return Comparator::operator()(a,b); } static uint32_t left(uint32_t nodeIndex) { return (nodeIndex << 1) + 1; } static uint32_t parent(uint32_t nodeIndex) { return (nodeIndex - 1) >> 1; } FixedPriorityQueue<Element, Comparator>& operator = (const FixedPriorityQueue<Element, Comparator>); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDPRIORITYQUEUE_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastTime.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTime.h" #include "NvBlast.h" #include <cstring> namespace Nv { namespace Blast { const double Time::s_secondsPerTick = Time::getTickDuration(); } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastTime.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTIME_H #define NVBLASTTIME_H #include "NvBlastTypes.h" namespace Nv { namespace Blast { class Time { public: Time() : m_lastTickCount(getTimeTicks()) {} int64_t getElapsedTicks() { const int64_t lastTickCount = m_lastTickCount; m_lastTickCount = getTimeTicks(); return m_lastTickCount - lastTickCount; } int64_t peekElapsedTicks() const { return getTimeTicks() - m_lastTickCount; } int64_t getLastTickCount() const { return m_lastTickCount; } static double seconds(int64_t ticks) { return s_secondsPerTick * ticks; } private: int64_t getTimeTicks() const; static double getTickDuration(); int64_t m_lastTickCount; static const double s_secondsPerTick; }; } // namespace Blast } // namespace Nv //////// Time inline functions for various platforms //////// #if NV_MICROSOFT_FAMILY #include "NvBlastIncludeWindows.h" NV_INLINE int64_t Nv::Blast::Time::getTimeTicks() const { LARGE_INTEGER a; QueryPerformanceCounter(&a); return a.QuadPart; } NV_INLINE double Nv::Blast::Time::getTickDuration() { LARGE_INTEGER a; QueryPerformanceFrequency(&a); return 1.0 / (double)a.QuadPart; } #elif NV_UNIX_FAMILY #include <time.h> NV_INLINE int64_t Nv::Blast::Time::getTimeTicks() const { struct timespec mCurrTimeInt; clock_gettime(CLOCK_REALTIME, &mCurrTimeInt); return (static_cast<int64_t>(mCurrTimeInt.tv_sec) * 1000000000) + (static_cast<int64_t>(mCurrTimeInt.tv_nsec)); } NV_INLINE double Nv::Blast::Time::getTickDuration() { return 1.e-9; } #endif #endif // #ifndef NVBLASTTIME_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedBoolArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDBOOLARRAY_H #define NVBLASTFIXEDBOOLARRAY_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" #include <cstring> namespace Nv { namespace Blast { /*! FixedBoolArray is an array of bools of fixed size, it's intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; const uint32_t size = 100; // placement new on this memory FixedBoolArray* arr = new (buf) FixedBoolArray(size); // you can get max requiredMemorySize by an bitMap to use memory left buf = buf + FixedBoolArray<SomeClass>::requiredMemorySize(size); buf: +------------------------------------------------------------+ | uint32_t | bool0 | bool1 | bool2 | ... | +------------------------------------------------------------+ */ class FixedBoolArray { public: explicit FixedBoolArray(uint32_t size) { m_size = size; } static size_t requiredMemorySize(uint32_t size) { return align16(sizeof(FixedBoolArray)) + align16(size); } void clear() { memset(data(), 0, m_size); } void fill() { memset(data(), 1, m_size); } int test(uint32_t index) const { NVBLAST_ASSERT(index < m_size); return data()[index]; } void set(uint32_t index) { NVBLAST_ASSERT(index < m_size); data()[index] = 1; } void setData(const char* newData, uint32_t newSize) { m_size = newSize; memcpy(data(), newData, m_size); } const char* getData() const { return data(); } uint32_t getSize() const { return m_size; } void reset(uint32_t index) { NVBLAST_ASSERT(index < m_size); data()[index] = 0; } private: uint32_t m_size; NV_FORCE_INLINE char* data() { return ((char*)this + sizeof(FixedBoolArray)); } NV_FORCE_INLINE const char* data() const { return ((char*)this + sizeof(FixedBoolArray)); } private: FixedBoolArray(const FixedBoolArray& that); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDBOOLARRAY_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastMath.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTMATH_H #define NVBLASTMATH_H #include <math.h> namespace Nv { namespace Blast { namespace VecMath { NV_INLINE void div(float a[3], float divisor) { for (int i = 0; i < 3; i++) a[i] /= divisor; } NV_INLINE void mul(float a[3], float multiplier) { for (int i = 0; i < 3; i++) a[i] *= multiplier; } NV_INLINE void add(const float a[3], float b[3]) { for (int i = 0; i < 3; i++) b[i] = a[i] + b[i]; } NV_INLINE void add(const float a[3], const float b[3], float r[3]) { for (int i = 0; i < 3; i++) r[i] = a[i] + b[i]; } NV_INLINE void sub(const float a[3], const float b[3], float r[3]) { for (int i = 0; i < 3; i++) r[i] = a[i] - b[i]; } NV_INLINE float dot(const float a[3], const float b[3]) { float r = 0; for (int i = 0; i < 3; i++) r += a[i] * b[i]; return r; } NV_INLINE float length(const float a[3]) { return sqrtf(dot(a, a)); } NV_INLINE float dist(const float a[3], const float b[3]) { float v[3]; sub(a, b, v); return length(v); } NV_INLINE float normal(const float a[3], float r[3]) { float d = length(a); for (int i = 0; i < 3; i++) r[i] = a[i] / d; return d; } } // namespace VecMath } // namespace Blast } // namespace Nv #endif // #ifndef NVBLASTMATH_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedQueue.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDQUEUE_H #define NVBLASTFIXEDQUEUE_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { /*! FixedQueue is a queue container which is intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; // placement new on this memory FixedQueue<SomeClass>* arr = new (buf) FixedQueue<SomeClass>(); // you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left buf = buf + FixedQueue<SomeClass>::requiredMemorySize(capacity); */ template <class T> class FixedQueue { public: explicit FixedQueue(uint32_t maxEntries) : m_num(0), m_head(0), m_tail(0), m_maxEntries(maxEntries) { } static size_t requiredMemorySize(uint32_t capacity) { return align16(sizeof(FixedQueue<T>)) + align16(capacity * sizeof(T)); } T popFront() { NVBLAST_ASSERT(m_num>0); m_num--; T& element = data()[m_tail]; m_tail = (m_tail+1) % (m_maxEntries); return element; } T front() { NVBLAST_ASSERT(m_num>0); return data()[m_tail]; } T popBack() { NVBLAST_ASSERT(m_num>0); m_num--; m_head = (m_head-1) % (m_maxEntries); return data()[m_head]; } T back() { NVBLAST_ASSERT(m_num>0); uint32_t headAccess = (m_head-1) % (m_maxEntries); return data()[headAccess]; } bool pushBack(const T& element) { if (m_num == m_maxEntries) return false; data()[m_head] = element; m_num++; m_head = (m_head+1) % (m_maxEntries); return true; } bool empty() const { return m_num == 0; } uint32_t size() const { return m_num; } private: uint32_t m_num; uint32_t m_head; uint32_t m_tail; uint32_t m_maxEntries; T* data() { return (T*)((char*)this + sizeof(FixedQueue<T>)); } private: FixedQueue(const FixedQueue& that); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDQUEUE_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDARRAY_H #define NVBLASTFIXEDARRAY_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { /*! FixedArray is a sequential container which is intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; // placement new on this memory FixedArray<SomeClass>* arr = new (buf) FixedArray<SomeClass>(); // you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left buf = buf + FixedArray<SomeClass>::requiredMemorySize(capacity); buf: +------------------------------------------------------------+ | uint32_t | T[0] | T[1] | T[2] | ... | +------------------------------------------------------------+ !!!TODO: - check ctor/dtor of elements calls */ template <class T> class FixedArray { public: explicit FixedArray() : m_size(0) { } static size_t requiredMemorySize(uint32_t capacity) { return align16(sizeof(FixedArray<T>)) + align16(capacity * sizeof(T)); } NV_FORCE_INLINE T& pushBack(T& t) { new (data() + m_size) T(t); return data()[m_size++]; } T popBack() { NVBLAST_ASSERT(m_size); T t = data()[m_size - 1]; data()[--m_size].~T(); return t; } void clear() { for(T* first = data(); first < data() + m_size; ++first) first->~T(); m_size = 0; } NV_FORCE_INLINE void forceSize_Unsafe(uint32_t s) { m_size = s; } NV_FORCE_INLINE T& operator[](uint32_t idx) { NVBLAST_ASSERT(idx < m_size); return data()[idx]; } NV_FORCE_INLINE const T& operator[](uint32_t idx) const { NVBLAST_ASSERT(idx < m_size); return data()[idx]; } NV_FORCE_INLINE T& at(uint32_t idx) { NVBLAST_ASSERT(idx < m_size); return data()[idx]; } NV_FORCE_INLINE const T& at(uint32_t idx) const { NVBLAST_ASSERT(idx < m_size); return data()[idx]; } NV_FORCE_INLINE uint32_t size() const { return m_size; } private: uint32_t m_size; NV_FORCE_INLINE T* data() { return (T*)((char*)this + sizeof(FixedArray<T>)); } private: FixedArray(const FixedArray& that); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDARRAY_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastHashSet.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTHASHSET_H #define NVBLASTHASHSET_H #include "NvBlastAllocator.h" #include "NsHashSet.h" namespace Nv { namespace Blast { /** Wrapped NvShared HashSet that uses NvBlastGlobals AllocatorCallback. */ template <class Key, class HashFn = nvidia::shdfnd::Hash<Key>> struct HashSet { typedef nvidia::shdfnd::HashSet<Key, HashFn, Allocator> type; }; } // namespace Blast } // namespace Nv #endif // #ifndef NVBLASTHASHSET_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastIteratorBase.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTITERATORBASE_H #define NVBLASTITERATORBASE_H #include "NvBlastIndexFns.h" namespace Nv { namespace Blast { /** Common functionality and implementation for iterators over an index, using invalidIndex<T>() to indicate termination. Derived class needs to implement increment operators. */ template<typename T> class IteratorBase { public: /** Constructor sets m_curr value */ IteratorBase(T curr); /** Validity of current value. */ operator bool() const; /** Current value. */ operator T() const; protected: T m_curr; }; //////// IteratorBase<T> inline methods //////// template<typename T> NV_INLINE IteratorBase<T>::IteratorBase(T curr) : m_curr(curr) { } template<typename T> NV_INLINE IteratorBase<T>::operator bool() const { return !isInvalidIndex<T>(m_curr); } template<typename T> NV_INLINE IteratorBase<T>::operator T() const { return m_curr; } /** Common functionality and implementation for an indexed linked list iterator */ template<typename IndexType> class LListIt : public IteratorBase<IndexType> { public: LListIt(IndexType curr, IndexType* links); /** Pre-increment. Only use if valid() == true. */ uint32_t operator ++ (); protected: IndexType* m_links; }; //////// LListIt<IndexType> inline methods //////// template<typename IndexType> NV_INLINE LListIt<IndexType>::LListIt(IndexType curr, IndexType* links) : IteratorBase<IndexType>(curr), m_links(links) { } template<typename IndexType> NV_INLINE uint32_t LListIt<IndexType>::operator ++ () { NVBLAST_ASSERT((bool)(*this)); return (this->m_curr = m_links[this->m_curr]); } /** Common functionality and implementation for an IndexDList<IndexType> iterator */ template<typename IndexType> class DListIt : public IteratorBase<IndexType> { public: DListIt(IndexType curr, IndexDLink<IndexType>* links); /** Pre-increment. Only use if valid() == true. */ uint32_t operator ++ (); protected: IndexDLink<IndexType>* m_links; }; //////// DListIt<IndexType> inline methods //////// template<typename IndexType> NV_INLINE DListIt<IndexType>::DListIt(IndexType curr, IndexDLink<IndexType>* links) : IteratorBase<IndexType>(curr), m_links(links) { } template<typename IndexType> NV_INLINE uint32_t DListIt<IndexType>::operator ++ () { NVBLAST_ASSERT((bool)(*this)); return (this->m_curr = m_links[this->m_curr].m_adj[1]); } } // end namespace Blast } // end namespace Nv #endif // #ifndef NVBLASTITERATORBASE_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastTimers.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlast.h" #include "NvBlastTime.h" #include <cstring> extern "C" { void NvBlastTimersReset(NvBlastTimers* timers) { memset(timers, 0, sizeof(NvBlastTimers)); } double NvBlastTicksToSeconds(int64_t ticks) { return Nv::Blast::Time::seconds(ticks); } } // extern "C"
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastMemory.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTMEMORY_H #define NVBLASTMEMORY_H #include <math.h> namespace Nv { namespace Blast { /** Utility function to align the given value to the next 16-byte boundary. Returns the aligned value. */ template<typename T> NV_INLINE T align16(T value) { return (value + 0xF)&~(T)0xF; } /** Offset void* pointer by 'offset' bytes helper-functions */ template <typename T> NV_INLINE T pointerOffset(void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<char*>(p)+offset); } template <typename T> NV_INLINE T pointerOffset(const void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<const char*>(p)+offset); } NV_INLINE const void* pointerOffset(const void* p, ptrdiff_t offset) { return pointerOffset<const void*>(p, offset); } NV_INLINE void* pointerOffset(void* p, ptrdiff_t offset) { return pointerOffset<void*>(p, offset); } } // namespace Blast } // namespace Nv /** Block data offset and accessor macro. */ #define NvBlastBlockData(_dataType, _name, _accessor) \ _dataType* _accessor() const \ { \ return (_dataType*)((uintptr_t)this + _name); \ } \ uint32_t _name /** Block data offset and accessor macro for an array (includes an _accessor##ArraySize() function which returns the last expression). */ #define NvBlastBlockArrayData(_dataType, _name, _accessor, _sizeExpr) \ _dataType* _accessor() const \ { \ return (_dataType*)((uintptr_t)this + _name); \ } \ uint32_t _accessor##ArraySize() const \ { \ return _sizeExpr; \ } \ uint32_t _name /** Block data offset generation macros. */ /** Start offset generation with this. */ #define NvBlastCreateOffsetStart(_baseOffset) \ size_t _lastOffset = _baseOffset; \ size_t _lastSize = 0 /** Create the next offset generation with this. The value will be aligned to a 16-byte boundary. */ #define NvBlastCreateOffsetAlign16(_name, _size) \ _name = align16(_lastOffset + _lastSize); \ _lastOffset = _name; \ _lastSize = _size /** End offset generation with this. It evaluates to the (16-byte aligned) total size of the data block. */ #define NvBlastCreateOffsetEndAlign16() \ align16(_lastOffset + _lastSize) /** Stack allocation */ #if NV_WINDOWS_FAMILY #include <malloc.h> #define NvBlastAlloca(x) _alloca(x) #elif NV_LINUX || NV_ANDROID #include <alloca.h> #define NvBlastAlloca(x) alloca(x) #elif NV_APPLE_FAMILY #include <alloca.h> #define NvBlastAlloca(x) alloca(x) #endif #endif // #ifndef NVBLASTMEMORY_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastAssert.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAssert.h" #include <stdio.h> #include <stdlib.h> #if NV_WINDOWS_FAMILY #include <crtdbg.h> #endif extern "C" { void NvBlastAssertHandler(const char* expr, const char* file, int line, bool& ignore) { NV_UNUSED(ignore); // is used only in debug windows config char buffer[1024]; #if NV_WINDOWS_FAMILY sprintf_s(buffer, 1024, "%s(%d) : Assertion failed: %s\n", file, line, expr); #else sprintf(buffer, "%s(%d) : Assertion failed: %s\n", file, line, expr); #endif puts(buffer); #if NV_WINDOWS_FAMILY && NV_DEBUG // _CrtDbgReport returns -1 on error, 1 on 'retry', 0 otherwise including 'ignore'. // Hitting 'abort' will terminate the process immediately. int result = _CrtDbgReport(_CRT_ASSERT, file, line, NULL, "%s", buffer); int mode = _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_REPORT_MODE); ignore = _CRTDBG_MODE_WNDW == mode && result == 0; if (ignore) return; __debugbreak(); #elif (NV_WINDOWS_FAMILY && NV_CHECKED) __debugbreak(); #else abort(); #endif } } // extern "C"
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastVolumeIntegrals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTVOLUMEINTEGRALS_H #define NVBLASTVOLUMEINTEGRALS_H #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast{ /** Calculate the volume and centroid of a closed mesh with outward-pointing normals. \param[out] centroid the calculated centroid of the given mesh \param[in] mesh a class of templated type MeshQuery MeshQuery must support the following functions: size_t faceCount() size_t vertexCount(size_t faceIndex) NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) \return the volume of the given mesh */ template<class MeshQuery> NV_INLINE float calculateMeshVolumeAndCentroid(NvcVec3& centroid, const MeshQuery& mesh) { centroid = { 0.0f, 0.0f, 0.0f }; // First find an approximate centroid for a more accurate calculation size_t N = 0; NvcVec3 disp = { 0.0f, 0.0f, 0.0f }; for (size_t i = 0; i < mesh.faceCount(); ++i) { const size_t faceVertexCount = mesh.vertexCount(i); for (size_t j = 0; j < faceVertexCount; ++j) { disp = disp + mesh.vertex(i, j); } N += faceVertexCount; } if (N == 0) { return 0.0f; } disp = disp / (float)N; float sixV = 0.0f; for (size_t i = 0; i < mesh.faceCount(); ++i) { const size_t faceVertexCount = mesh.vertexCount(i); if (faceVertexCount < 3) { continue; } const NvcVec3 a = mesh.vertex(i, 0) - disp; NvcVec3 b = mesh.vertex(i, 1) - disp; for (size_t j = 2; j < faceVertexCount; ++j) { const NvcVec3 c = mesh.vertex(i, j) - disp; const float sixTetV = a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x; sixV += sixTetV; centroid = centroid + sixTetV*(a + b + c); b = c; } } // Extra factor of four to average tet vertices centroid = centroid / (4.0f * sixV) + disp; return std::abs(sixV) / 6.0f; } } // namespace Blast } // namespace Nv #endif // NVBLASTVOLUMEINTEGRALS_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastGeometry.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTGEOMETRY_H #define NVBLASTGEOMETRY_H #include "NvBlastTypes.h" #include "NvBlastMath.h" #include "NvBlastAssert.h" #include <limits> namespace Nv { namespace Blast{ /** Find the closest node to point in the graph. Uses primarily distance to chunk centroids. Bond normals are expected to be directed from the lower to higher node index. Cannot be used for graph actors with only the external chunk in the graph. \param[in] point the point to test against \param[in] firstGraphNodeIndex the entry point for familyGraphNodeIndexLinks \param[in] familyGraphNodeIndexLinks the list index links of the actor's graph \param[in] adjacencyPartition the actor's SupportGraph adjacency partition \param[in] adjacentNodeIndices the actor's SupportGraph adjacent node indices \param[in] adjacentBondIndices the actor's SupportGraph adjacent bond indices \param[in] assetBonds the actor's asset bonds \param[in] bondHealths the actor's bond healths \param[in] assetChunks the actor's asset chunks \param[in] supportChunkHealths the actor's graph chunks healths \param[in] chunkIndices maps node index to chunk index in SupportGraph \return the index of the node closest to point */ NV_FORCE_INLINE uint32_t findClosestNode(const float point[4], const uint32_t firstGraphNodeIndex, const uint32_t* familyGraphNodeIndexLinks, const uint32_t* adjacencyPartition, const uint32_t* adjacentNodeIndices, const uint32_t* adjacentBondIndices, const NvBlastBond* assetBonds, const float* bondHealths, const NvBlastChunk* assetChunks, const float* supportChunkHealths, const uint32_t* chunkIndices) { // firstGraphNodeIndex could still be the external chunk, however // there should be no way a single-node actor that is just the external chunk exists. uint32_t nodeIndex = firstGraphNodeIndex; // Since there should always be a regular chunk in the graph, it is possible to initialize closestNode // as external chunk index but it would always evaluate to some meaningful node index eventually. uint32_t closestNode = nodeIndex; float minDist = std::numeric_limits<float>().max(); // find the closest healthy chunk in the graph by its centroid to point distance while (!Nv::Blast::isInvalidIndex(nodeIndex)) { if (supportChunkHealths[nodeIndex] > 0.0f) { uint32_t chunkIndex = chunkIndices[nodeIndex]; if (!isInvalidIndex(chunkIndex)) // Invalid if this is the external chunk { const NvBlastChunk& chunk = assetChunks[chunkIndex]; const float* centroid = chunk.centroid; float d[3]; VecMath::sub(point, centroid, d); float dist = VecMath::dot(d, d); if (dist < minDist) { minDist = dist; closestNode = nodeIndex; } } } nodeIndex = familyGraphNodeIndexLinks[nodeIndex]; } // as long as the external chunk is not input as a single-node graph actor NVBLAST_ASSERT(!isInvalidIndex(chunkIndices[closestNode])); bool iterateOnBonds = true; if (iterateOnBonds) { // improve geometric accuracy by looking on which side of the closest bond the point lies // expects bond normals to point from the smaller to the larger node index nodeIndex = closestNode; minDist = std::numeric_limits<float>().max(); const uint32_t startIndex = adjacencyPartition[nodeIndex]; const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1]; for (uint32_t adjacentIndex = startIndex; adjacentIndex < stopIndex; adjacentIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentIndex]; const uint32_t neighbourChunk = chunkIndices[neighbourIndex]; if (!isInvalidIndex(neighbourChunk)) // Invalid if neighbor is the external chunk { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; // do not follow broken bonds, since it means that neighbor is not actually connected in the graph if (bondHealths[bondIndex] > 0.0f && supportChunkHealths[neighbourIndex] > 0.0f) { const NvBlastBond& bond = assetBonds[bondIndex]; const float* centroid = bond.centroid; float d[3]; VecMath::sub(point, centroid, d); float dist = VecMath::dot(d, d); if (dist < minDist) { minDist = dist; float s = VecMath::dot(d, bond.normal); if (nodeIndex < neighbourIndex) { closestNode = s < 0.0f ? nodeIndex : neighbourIndex; } else { closestNode = s < 0.0f ? neighbourIndex : nodeIndex; } } } } } } return closestNode; } /** Find the closest node to point in the graph. Uses primarily distance to bond centroids. Slower compared to chunk based lookup but may yield better accuracy in some cases. Bond normals are expected to be directed from the lower to higher node index. Cannot be used for graph actors with only the external chunk in the graph. \param[in] point the point to test against \param[in] firstGraphNodeIndex the entry point for familyGraphNodeIndexLinks \param[in] familyGraphNodeIndexLinks the list index links of the actor's graph \param[in] adjacencyPartition the actor's SupportGraph adjacency partition \param[in] adjacentNodeIndices the actor's SupportGraph adjacent node indices \param[in] adjacentBondIndices the actor's SupportGraph adjacent bond indices \param[in] assetBonds the actor's asset bonds \param[in] bondHealths the actor's bond healths \param[in] chunkIndices maps node index to chunk index in SupportGraph \return the index of the node closest to point */ NV_FORCE_INLINE uint32_t findClosestNode(const float point[4], const uint32_t firstGraphNodeIndex, const uint32_t* familyGraphNodeIndexLinks, const uint32_t* adjacencyPartition, const uint32_t* adjacentNodeIndices, const uint32_t* adjacentBondIndices, const NvBlastBond* bonds, const float* bondHealths, const uint32_t* chunkIndices) { // firstGraphNodeIndex could still be the external chunk, however // there should be no way a single-node actor that is just the external chunk exists. uint32_t nodeIndex = firstGraphNodeIndex; // Since there should always be a regular chunk in the graph, it is possible to initialize closestNode // as external chunk index but it would always evaluate to some meaningful node index eventually. uint32_t closestNode = nodeIndex; float minDist = std::numeric_limits<float>().max(); while (!Nv::Blast::isInvalidIndex(nodeIndex)) { const uint32_t startIndex = adjacencyPartition[nodeIndex]; const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1]; for (uint32_t adjacentIndex = startIndex; adjacentIndex < stopIndex; adjacentIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentIndex]; if (nodeIndex < neighbourIndex) { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; if (bondHealths[bondIndex] > 0.0f) { const NvBlastBond& bond = bonds[bondIndex]; const float* centroid = bond.centroid; float d[3]; VecMath::sub(point, centroid, d); float dist = VecMath::dot(d, d); if (dist < minDist) { minDist = dist; // if any of the nodes is the external chunk, use the valid one instead if (isInvalidIndex(chunkIndices[neighbourIndex])) { closestNode = nodeIndex; } else if (isInvalidIndex(chunkIndices[nodeIndex])) { closestNode = neighbourIndex; } else { float s = VecMath::dot(d, bond.normal); closestNode = s < 0 ? nodeIndex : neighbourIndex; } } } } } nodeIndex = familyGraphNodeIndexLinks[nodeIndex]; } // as long as the external chunk is not input as a single-node graph actor NVBLAST_ASSERT(!isInvalidIndex(chunkIndices[closestNode])); return closestNode; } } // namespace Blast } // namespace Nv #endif // NVBLASTGEOMETRY_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastIncludeWindows.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINCLUDEWINDOWS_H #define NVBLASTINCLUDEWINDOWS_H #ifndef _WINDOWS_ // windows already included if this is defined #include "NvPreprocessor.h" #ifndef _WIN32 #error "This file should only be included by Windows builds!!" #endif // We only support >= Windows XP, and we need this for critical section and #if !NV_WINRT #define _WIN32_WINNT 0x0501 #else #define _WIN32_WINNT 0x0602 #endif // turn off as much as we can for windows. All we really need is the thread functions(critical sections/Interlocked* // etc) #define NOGDICAPMASKS #define NOVIRTUALKEYCODES #define NOWINMESSAGES #define NOWINSTYLES #define NOSYSMETRICS #define NOMENUS #define NOICONS #define NOKEYSTATES #define NOSYSCOMMANDS #define NORASTEROPS #define NOSHOWWINDOW #define NOATOM #define NOCLIPBOARD #define NOCOLOR #define NOCTLMGR #define NODRAWTEXT #define NOGDI #define NOMB #define NOMEMMGR #define NOMETAFILE #define NOMINMAX #define NOOPENFILE #define NOSCROLL #define NOSERVICE #define NOSOUND #define NOTEXTMETRIC #define NOWH #define NOWINOFFSETS #define NOCOMM #define NOKANJI #define NOHELP #define NOPROFILER #define NODEFERWINDOWPOS #define NOMCX #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #if !NV_WINRT #define NOUSER #define NONLS #define NOMSG #endif #pragma warning(push) #pragma warning(disable : 4668) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives' #include <windows.h> #pragma warning(pop) #if NV_SSE2 #include <xmmintrin.h> #endif #endif // #ifndef _WINDOWS_ #endif // #ifndef NVBLASTINCLUDEWINDOWS_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastAtomic.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTATOMIC_H #define NVBLASTATOMIC_H #include "NvBlastTypes.h" namespace Nv { namespace Blast { /* increment the specified location. Return the incremented value */ int32_t atomicIncrement(volatile int32_t* val); /* decrement the specified location. Return the decremented value */ int32_t atomicDecrement(volatile int32_t* val); } // namespace Blast } // namespace Nv #endif // #ifndef NVBLASTATOMIC_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastPreprocessorInternal.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTPREPROCESSORINTERNAL_H #define NVBLASTPREPROCESSORINTERNAL_H #include "NvPreprocessor.h" /** Macros for more convenient logging */ #define NVBLASTLL_LOG_ERROR(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Error, _msg, __FILE__, __LINE__); } ((void)0) #define NVBLASTLL_LOG_WARNING(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Warning, _msg, __FILE__, __LINE__); } ((void)0) #define NVBLASTLL_LOG_INFO(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Info, _msg, __FILE__, __LINE__); } ((void)0) #define NVBLASTLL_LOG_DEBUG(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Debug, _msg, __FILE__, __LINE__); } ((void)0) /** Blast will check function parameters for debug and checked builds. */ #define NVBLASTLL_CHECK_PARAMS (NV_DEBUG || NV_CHECKED) #if NVBLASTLL_CHECK_PARAMS #define NVBLASTLL_CHECK(_expr, _logFn, _msg, _onFail) \ { \ if(!(_expr)) \ { \ if (_logFn) { _logFn(NvBlastMessage::Error, _msg, __FILE__, __LINE__); } \ { _onFail; }; \ } \ } #else #define NVBLASTLL_CHECK(_expr, _logFn, _msg, _onFail) NV_UNUSED(_logFn) #endif /** Convenience macro to replace deprecated UINT32_MAX */ #ifndef UINT32_MAX #include <limits> #define UINT32_MAX (std::numeric_limits<uint32_t>::max()) #endif #endif // ifndef NVBLASTPREPROCESSORINTERNAL_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastHashMap.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTHASHMAP_H #define NVBLASTHASHMAP_H #include "NvBlastAllocator.h" #include "NsHashMap.h" namespace Nv { namespace Blast { /** Wrapped NvShared HashMap that uses NvBlastGlobals AllocatorCallback. */ template <class Key, class Value, class HashFn = nvidia::shdfnd::Hash<Key>> struct HashMap { typedef nvidia::shdfnd::HashMap<Key, Value, HashFn, Allocator> type; }; } // namespace Blast } // namespace Nv #endif // #ifndef NVBLASTHASHMAP_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastAssert.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTASSERT_H #define NVBLASTASSERT_H #include "NvPreprocessor.h" #if !NV_ENABLE_ASSERTS #define NVBLAST_ASSERT(exp) ((void)0) #define NVBLAST_ALWAYS_ASSERT_MESSAGE(message) ((void)0) #define NVBLAST_ASSERT_WITH_MESSAGE(condition, message) ((void)0) #else #if NV_VC #define NVBLAST_CODE_ANALYSIS_ASSUME(exp) \ __analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a NVBLAST_ASSERT is used // to "guard" illegal mem access, for example. #else #define NVBLAST_CODE_ANALYSIS_ASSUME(exp) #endif #define NVBLAST_ASSERT(exp) \ { \ static bool _ignore = false; \ if (!(exp) && !_ignore) NvBlastAssertHandler(#exp, __FILE__, __LINE__, _ignore); \ NVBLAST_CODE_ANALYSIS_ASSUME(exp); \ } ((void)0) #define NVBLAST_ALWAYS_ASSERT_MESSAGE(message) \ { \ static bool _ignore = false; \ if(!_ignore) \ { \ NvBlastAssertHandler(message, __FILE__, __LINE__, _ignore); \ } \ } ((void)0) #define NVBLAST_ASSERT_WITH_MESSAGE(exp, message) \ { \ static bool _ignore = false; \ if (!(exp) && !_ignore) NvBlastAssertHandler(message, __FILE__, __LINE__, _ignore); \ NVBLAST_CODE_ANALYSIS_ASSUME(exp); \ } ((void)0) #endif #define NVBLAST_ALWAYS_ASSERT() NVBLAST_ASSERT(0) extern "C" { NV_C_API void NvBlastAssertHandler(const char* expr, const char* file, int line, bool& ignore); } // extern "C" #endif // #ifndef NVBLASTASSERT_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastAtomic.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAtomic.h" #include <string.h> #include <stdlib.h> namespace Nv { namespace Blast { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Windows Implementation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #if NV_WINDOWS_FAMILY #include "NvBlastIncludeWindows.h" int32_t atomicIncrement(volatile int32_t* val) { return (int32_t)InterlockedIncrement((volatile LONG*)val); } int32_t atomicDecrement(volatile int32_t* val) { return (int32_t)InterlockedDecrement((volatile LONG*)val); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Unix Implementation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #elif(NV_UNIX_FAMILY) int32_t atomicIncrement(volatile int32_t* val) { return __sync_add_and_fetch(val, 1); } int32_t atomicDecrement(volatile int32_t* val) { return __sync_sub_and_fetch(val, 1); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Unsupported Platforms /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #else #error "Platform not supported!" #endif } // namespace Blast } // namespace Nv
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastDLink.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTDLINK_H #define NVBLASTDLINK_H #include "NvBlastAssert.h" #include "NvBlastIndexFns.h" namespace Nv { namespace Blast { template<typename IndexType> struct IndexDLink { IndexType m_adj[2]; }; template<typename IndexType> class IndexDList { public: void initLinksSolitary(IndexDLink<IndexType>* links, IndexType linkCount) { for (IndexType i = 0; i < linkCount; ++i) { links[i].m_adj[0] = invalidIndex<IndexType>(); links[i].m_adj[1] = invalidIndex<IndexType>(); } } void initLinksChain(IndexDLink<IndexType>* links, IndexType linkCount) { if (linkCount > 0) { links[0].m_adj[0] = invalidIndex<IndexType>(); for (IndexType i = 1; i < linkCount; ++i) { links[i - 1].m_adj[1] = i; links[i].m_adj[0] = i - 1; } links[linkCount - 1].m_adj[1] = invalidIndex<IndexType>(); } } IndexType getAdj(IndexDLink<IndexType>* links, IndexType linkIndex, int which) { return links[linkIndex].m_adj[which & 1]; } void remove(IndexDLink<IndexType>* links, IndexType linkIndex) { IndexDLink<IndexType>& link = links[linkIndex]; const IndexType adj0 = link.m_adj[0]; const IndexType adj1 = link.m_adj[1]; if (!isInvalidIndex(adj1)) { links[adj1].m_adj[0] = adj0; link.m_adj[1] = invalidIndex<IndexType>(); } if (!isInvalidIndex(adj0)) { links[adj0].m_adj[1] = adj1; link.m_adj[0] = invalidIndex<IndexType>(); } } bool isSolitary(IndexDLink<IndexType>* links, IndexType linkIndex) { const IndexDLink<IndexType>& link = links[linkIndex]; return isInvalidIndex(link.m_adj[0]) && isInvalidIndex(link.m_adj[1]); } void insertListHead(IndexType& listHead, IndexDLink<IndexType>* links, IndexType linkIndex) { NVBLAST_ASSERT(!isInvalidIndex(linkIndex)); if (!isInvalidIndex(listHead)) { links[listHead].m_adj[0] = linkIndex; } links[linkIndex].m_adj[1] = listHead; listHead = linkIndex; } IndexType removeListHead(IndexType& listHead, IndexDLink<IndexType>* links) { const IndexType linkIndex = listHead; if (!isInvalidIndex(linkIndex)) { listHead = links[linkIndex].m_adj[1]; if (!isInvalidIndex(listHead)) { links[listHead].m_adj[0] = invalidIndex<IndexType>(); } links[linkIndex].m_adj[1] = invalidIndex<IndexType>(); } return linkIndex; } void removeFromList(IndexType& listHead, IndexDLink<IndexType>* links, IndexType linkIndex) { NVBLAST_ASSERT(!isInvalidIndex(linkIndex)); if (listHead == linkIndex) { listHead = links[linkIndex].m_adj[1]; } remove(links, linkIndex); } }; struct DLink { DLink() : m_prev(nullptr), m_next(nullptr) {} DLink* getPrev() const { return m_prev; } DLink* getNext() const { return m_next; } private: DLink* m_prev; DLink* m_next; friend class DList; }; class DList { public: DList() : m_head(nullptr), m_tail(nullptr) {} bool isEmpty() const { NVBLAST_ASSERT((m_head == nullptr) == (m_tail == nullptr)); return m_head == nullptr; } bool isSolitary(const DLink& link) const { return link.m_prev == nullptr && link.m_next == nullptr && m_head != &link; } DLink* getHead() const { return m_head; } DLink* getTail() const { return m_tail; } bool insertHead(DLink& link) { NVBLAST_ASSERT(isSolitary(link)); if (!isSolitary(link)) { return false; } link.m_next = m_head; if (m_head != nullptr) { m_head->m_prev = &link; } m_head = &link; if (m_tail == nullptr) { m_tail = &link; } return true; } bool insertTail(DLink& link) { NVBLAST_ASSERT(isSolitary(link)); if (!isSolitary(link)) { return false; } link.m_prev = m_tail; if (m_tail != nullptr) { m_tail->m_next = &link; } m_tail = &link; if (m_head == nullptr) { m_head = &link; } return true; } void remove(DLink& link) { if (link.m_prev != nullptr) { link.m_prev->m_next = link.m_next; } else if (m_head == &link) { m_head = link.m_next; } if (link.m_next != nullptr) { link.m_next->m_prev = link.m_prev; } else if (m_tail == &link) { m_tail = link.m_prev; } link.m_next = link.m_prev = nullptr; } class It { public: enum Direction { Reverse, Forward }; It(const DList& list, Direction dir = Forward) : m_curr(dir == Forward ? list.getHead() : list.getTail()) {} /** Validity of current value. */ operator bool() const { return m_curr != nullptr; } /** Current value. */ operator const DLink*() const { return m_curr; } /** Pre-increment. */ const DLink* operator ++ () { return m_curr = m_curr->getNext(); } /** Pre-deccrement. */ const DLink* operator -- () { return m_curr = m_curr->getPrev(); } private: const DLink* m_curr; }; private: DLink* m_head; DLink* m_tail; }; } // end namespace Blast } // end namespace Nv #endif // #ifndef NVBLASTDLINK_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastIndexFns.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINDEXFNS_H #define NVBLASTINDEXFNS_H #include "NvBlastTypes.h" #include <cstring> namespace Nv { namespace Blast { /** Set to invalid index. */ template<typename T> NV_INLINE T invalidIndex() { return ~(T)0; } /** Test for invalid index (max representable integer). */ template<typename T> NV_INLINE bool isInvalidIndex(T index) { return index == invalidIndex<T>(); } /** Create a lookup table for data sorted by a templated index type. Note: when using this function with unsigned integer index types invalidIndex<T>() is treated as a value less than zero. On input: The indices must lie in the interval [indexBase, indexBase+indexRange]. indexSource must point to the first index in the data. indexCount must be set to the number of indices in the data. indexByteStride must be set to the distance, in bytes, between subequent indices. lookup must point to a T array of size indexRange+2. On return: lookup will be filled such that: lookup[i] = the position of first data element with index (i + indexBase) lookup[indexRange+1] = indexCount The last (indexRange+1) element is used so that one may always determine the number of data elements with the given index using: count = lookup[i+1] - lookup[i] Note, if an index (i + indexBase) is not present in the data then, lookup[i+1] = lookup[i], so the count (above) will correctly be zero. In this case, the actual value of lookup[i] is irrelevant. */ template<typename T> void createIndexStartLookup(T* lookup, T indexBase, T indexRange, T* indexSource, T indexCount, T indexByteStride) { ++indexBase; // Ordering invalidIndex<T>() as lowest value T indexPos = 0; for (T i = 0; i <= indexRange; ++i) { for (; indexPos < indexCount; ++indexPos, indexSource = (T*)((uintptr_t)indexSource + indexByteStride)) { if (*indexSource + 1 >= i + indexBase) // +1 to order invalidIndex<T>() as lowest value { lookup[i] = indexPos; break; } } if (indexPos == indexCount) { lookup[i] = indexPos; } } lookup[indexRange + 1] = indexCount; } /** Creates the inverse of a map, such that inverseMap[map[i]] = i. Unmapped indices are set to invalidIndex<T>. \param[out] inverseMap inverse map space of given size \param[in] map original map of given size, unmapped entries must contain invalidIndex<T> \param[in] size size of the involved maps */ template<typename T> void invertMap(T* inverseMap, const T* map, const T size) { memset(inverseMap, invalidIndex<T>(), size*sizeof(T)); for (T i = 0; i < size; i++) { if (!isInvalidIndex(map[i])) { inverseMap[map[i]] = i; } } } } // end namespace Blast } // end namespace Nv #endif // #ifndef NVBLASTINDEXFNS_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedBitmap.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDBITMAP_H #define NVBLASTFIXEDBITMAP_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" #include <cstring> namespace Nv { namespace Blast { /*! FixedBitmap is a bitset (bitmap) of fixed size, it's intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; const uint32_t bitsCount = 100; // placement new on this memory FixedBitmap* arr = new (buf) FixedBitmap(bitsCount); // you can get max requiredMemorySize by an bitMap to use memory left buf = buf + FixedBitmap::requiredMemorySize(bitsCount); buf: +------------------------------------------------------------+ | uint32_t | word0 | word1 | word2 | ... | +------------------------------------------------------------+ */ class FixedBitmap { public: explicit FixedBitmap(uint32_t bitsCount) { m_bitsCount = bitsCount; } static uint32_t getWordsCount(uint32_t bitsCount) { return (bitsCount + 31) >> 5; } static size_t requiredMemorySize(uint32_t bitsCount) { return align16(sizeof(FixedBitmap)) + align16(getWordsCount(bitsCount) * sizeof(uint32_t)); } void clear() { memset(data(), 0, getWordsCount(m_bitsCount) * sizeof(uint32_t)); } void fill() { const uint32_t wordCount = getWordsCount(m_bitsCount); uint32_t* mem = data(); memset(mem, 0xFF, wordCount * sizeof(uint32_t)); const uint32_t bitsRemainder = m_bitsCount & 31; if (bitsRemainder > 0) { mem[wordCount - 1] &= ~(0xFFFFFFFF << bitsRemainder); } } int test(uint32_t index) const { NVBLAST_ASSERT(index < m_bitsCount); return data()[index >> 5] & (1 << (index & 31)); } void set(uint32_t index) { NVBLAST_ASSERT(index < m_bitsCount); data()[index >> 5] |= 1 << (index & 31); } void reset(uint32_t index) { NVBLAST_ASSERT(index < m_bitsCount); data()[index >> 5] &= ~(1 << (index & 31)); } private: uint32_t m_bitsCount; NV_FORCE_INLINE uint32_t* data() { return (uint32_t*)((char*)this + sizeof(FixedBitmap)); } NV_FORCE_INLINE const uint32_t* data() const { return (uint32_t*)((char*)this + sizeof(FixedBitmap)); } private: FixedBitmap(const FixedBitmap& that); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDBITMAP_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastNvSharedHelpers.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTNVSHAREDSHELPERS_H #define NVBLASTNVSHAREDSHELPERS_H #include "NvCTypes.h" #include "NvVec2.h" #include "NvVec3.h" #include "NvVec4.h" #include "NvTransform.h" #include "NvPlane.h" #include "NvMat33.h" #include "NvMat44.h" #include "NvBounds3.h" using namespace nvidia; #define WCast(type, name) reinterpret_cast<type>(name) #define RCast(type, name) reinterpret_cast<const type>(name) #define CONVERT(BlastType, NvSharedType) \ static inline NvSharedType& toNvShared(BlastType& v) \ { \ return WCast(NvSharedType&, v); \ } \ static inline const NvSharedType& toNvShared(const BlastType& v) \ { \ return RCast(NvSharedType&, v); \ } \ static inline const BlastType& fromNvShared(const NvSharedType& v) \ { \ return RCast(BlastType&, v); \ } \ static inline BlastType& fromNvShared(NvSharedType& v) \ { \ return WCast(BlastType&, v); \ } \ static inline NvSharedType* toNvShared(BlastType* v) \ { \ return WCast(NvSharedType*, v); \ } \ static inline const NvSharedType* toNvShared(const BlastType* v) \ { \ return RCast(NvSharedType*, v); \ } \ static inline const BlastType* fromNvShared(const NvSharedType* v) \ { \ return RCast(BlastType*, v); \ } \ static inline BlastType* fromNvShared(NvSharedType* v) \ { \ return WCast(BlastType*, v); \ } CONVERT(NvcVec2, nvidia::NvVec2) CONVERT(NvcVec3, nvidia::NvVec3) CONVERT(NvcVec4, nvidia::NvVec4) CONVERT(NvcQuat, nvidia::NvQuat) CONVERT(NvcTransform, nvidia::NvTransform) CONVERT(NvcPlane, nvidia::NvPlane) CONVERT(NvcMat33, nvidia::NvMat33) CONVERT(NvcMat44, nvidia::NvMat44) CONVERT(NvcBounds3, nvidia::NvBounds3) NV_COMPILE_TIME_ASSERT(sizeof(NvcVec2) == sizeof(nvidia::NvVec2)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, x) == NV_OFFSET_OF(nvidia::NvVec2, x)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, y) == NV_OFFSET_OF(nvidia::NvVec2, y)); NV_COMPILE_TIME_ASSERT(sizeof(NvcVec3) == sizeof(nvidia::NvVec3)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, x) == NV_OFFSET_OF(nvidia::NvVec3, x)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, y) == NV_OFFSET_OF(nvidia::NvVec3, y)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, z) == NV_OFFSET_OF(nvidia::NvVec3, z)); NV_COMPILE_TIME_ASSERT(sizeof(NvcVec4) == sizeof(nvidia::NvVec4)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, x) == NV_OFFSET_OF(nvidia::NvVec4, x)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, y) == NV_OFFSET_OF(nvidia::NvVec4, y)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, z) == NV_OFFSET_OF(nvidia::NvVec4, z)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, w) == NV_OFFSET_OF(nvidia::NvVec4, w)); NV_COMPILE_TIME_ASSERT(sizeof(NvcQuat) == sizeof(nvidia::NvQuat)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, x) == NV_OFFSET_OF(nvidia::NvQuat, x)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, y) == NV_OFFSET_OF(nvidia::NvQuat, y)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, z) == NV_OFFSET_OF(nvidia::NvQuat, z)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, w) == NV_OFFSET_OF(nvidia::NvQuat, w)); NV_COMPILE_TIME_ASSERT(sizeof(NvcTransform) == sizeof(nvidia::NvTransform)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, p) == NV_OFFSET_OF(nvidia::NvTransform, p)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, q) == NV_OFFSET_OF(nvidia::NvTransform, q)); NV_COMPILE_TIME_ASSERT(sizeof(NvcPlane) == sizeof(nvidia::NvPlane)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, n) == NV_OFFSET_OF(nvidia::NvPlane, n)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, d) == NV_OFFSET_OF(nvidia::NvPlane, d)); NV_COMPILE_TIME_ASSERT(sizeof(NvcMat33) == sizeof(nvidia::NvMat33)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column0) == NV_OFFSET_OF(nvidia::NvMat33, column0)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column1) == NV_OFFSET_OF(nvidia::NvMat33, column1)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column2) == NV_OFFSET_OF(nvidia::NvMat33, column2)); NV_COMPILE_TIME_ASSERT(sizeof(NvcBounds3) == sizeof(nvidia::NvBounds3)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, minimum) == NV_OFFSET_OF(nvidia::NvBounds3, minimum)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, maximum) == NV_OFFSET_OF(nvidia::NvBounds3, maximum)); #endif // #ifndef NVBLASTNVSHAREDSHELPERS_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamily.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTypes.h" #include "NvBlastFamily.h" #include "NvBlastFamilyGraph.h" #include "NvBlastIndexFns.h" #include "NvBlastTime.h" #include <new> namespace Nv { namespace Blast { //////// Global functions //////// struct FamilyDataOffsets { size_t m_actors; size_t m_visibleChunkIndexLinks; size_t m_chunkActorIndices; size_t m_graphNodeIndexLinks; size_t m_lowerSupportChunkHealths; size_t m_graphBondHealths; size_t m_graphCachedBondHealths; size_t m_familyGraph; }; static size_t createFamilyDataOffsets(FamilyDataOffsets& offsets, const NvBlastAssetMemSizeData& sizeData) { NvBlastCreateOffsetStart(sizeof(FamilyHeader)); NvBlastCreateOffsetAlign16(offsets.m_actors, sizeData.lowerSupportChunkCount * sizeof(Actor)); NvBlastCreateOffsetAlign16(offsets.m_visibleChunkIndexLinks, sizeData.chunkCount * sizeof(IndexDLink<uint32_t>)); NvBlastCreateOffsetAlign16(offsets.m_chunkActorIndices, sizeData.upperSupportChunkCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_graphNodeIndexLinks, sizeData.nodeCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_lowerSupportChunkHealths, sizeData.lowerSupportChunkCount * sizeof(float)); NvBlastCreateOffsetAlign16(offsets.m_graphBondHealths, sizeData.bondCount * sizeof(float)); NvBlastCreateOffsetAlign16(offsets.m_graphCachedBondHealths, sizeData.bondCount * sizeof(float)); NvBlastCreateOffsetAlign16(offsets.m_familyGraph, static_cast<size_t>(FamilyGraph::requiredMemorySize(sizeData.nodeCount, sizeData.bondCount))); return NvBlastCreateOffsetEndAlign16(); } size_t getFamilyMemorySize(const Asset* asset) { #if NVBLASTLL_CHECK_PARAMS if (asset == nullptr) { NVBLAST_ALWAYS_ASSERT(); return 0; } #endif const NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(asset); return getFamilyMemorySize(sizeData); } size_t getFamilyMemorySize(const NvBlastAssetMemSizeData& sizeData) { FamilyDataOffsets offsets; return createFamilyDataOffsets(offsets, sizeData); } // this path is used by the serialization code // buffers are set up, but some parts (like asset ID) are left to the serialization code to fill in static NvBlastFamily* createFamily(void* mem, const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn) { NVBLASTLL_CHECK(mem != nullptr, logFn, "createFamily: NULL mem pointer input.", return nullptr); NVBLASTLL_CHECK((reinterpret_cast<uintptr_t>(mem) & 0xF) == 0, logFn, "createFamily: mem pointer not 16-byte aligned.", return nullptr); if (sizeData.chunkCount == 0) { NVBLASTLL_LOG_ERROR(logFn, "createFamily: Asset has no chunks. Family not created.\n"); return nullptr; } const uint32_t bondCount = sizeData.bondCount; // We need to keep this many actor representations around for our island indexing scheme. const uint32_t lowerSupportChunkCount = sizeData.lowerSupportChunkCount; // We need this many chunk actor indices. const uint32_t upperSupportChunkCount = sizeData.upperSupportChunkCount; // Family offsets FamilyDataOffsets offsets; const size_t dataSize = createFamilyDataOffsets(offsets, sizeData); // Restricting our data size to < 4GB so that we may use uint32_t offsets if (dataSize > (size_t)UINT32_MAX) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::instanceAllocate: Instance data block size will exceed 4GB. Instance not created.\n"); return nullptr; } // Allocate family NvBlastFamily* family = reinterpret_cast<NvBlastFamily*>(memset(mem, 0, dataSize)); // Fill in family header FamilyHeader* header = (FamilyHeader*)family; header->dataType = NvBlastDataBlock::FamilyDataBlock; header->formatVersion = 0; // Not currently using this field header->size = (uint32_t)dataSize; header->m_actorCount = 0; header->m_actorsOffset = (uint32_t)offsets.m_actors; header->m_visibleChunkIndexLinksOffset = (uint32_t)offsets.m_visibleChunkIndexLinks; header->m_chunkActorIndicesOffset = (uint32_t)offsets.m_chunkActorIndices; header->m_graphNodeIndexLinksOffset = (uint32_t)offsets.m_graphNodeIndexLinks; header->m_lowerSupportChunkHealthsOffset = (uint32_t)offsets.m_lowerSupportChunkHealths; header->m_graphBondHealthsOffset = (uint32_t)offsets.m_graphBondHealths; header->m_graphCachedBondHealthsOffset = (uint32_t)offsets.m_graphCachedBondHealths; header->m_familyGraphOffset = (uint32_t)offsets.m_familyGraph; // Initialize family header data: // Actors - initialize to defaults, with zero offset value (indicating inactive state) Actor* actors = header->getActors(); // This will get the subsupport actors too for (uint32_t i = 0; i < lowerSupportChunkCount; ++i) { new (actors + i) Actor(); } // Visible chunk index links - initialize to solitary links (0xFFFFFFFF fields) memset(header->getVisibleChunkIndexLinks(), 0xFF, sizeData.chunkCount*sizeof(IndexDLink<uint32_t>)); // Chunk actor IDs - initialize to invalid (0xFFFFFFFF) memset(header->getChunkActorIndices(), 0xFF, upperSupportChunkCount*sizeof(uint32_t)); // Graph node index links - initialize to solitary links memset(header->getGraphNodeIndexLinks(), 0xFF, sizeData.nodeCount*sizeof(uint32_t)); // Healths are initialized to 0 - the entire memory block is already set to 0 above // memset(header->getLowerSupportChunkHealths(), 0, lowerSupportChunkCount*sizeof(float)); // memset(header->getBondHealths(), 0, bondCount*sizeof(float)); // FamilyGraph ctor new (header->getFamilyGraph()) FamilyGraph(sizeData.nodeCount, sizeData.bondCount); return family; } // this path is taken when an asset already exists and a family is to be created from it directly static NvBlastFamily* createFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "createFamily: NULL asset pointer input.", return nullptr); const Asset* solverAsset = static_cast<const Asset*>(asset); // pull count info from the asset and use that to initialize the family buffers NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(solverAsset); NvBlastFamily* family = createFamily(mem, sizeData, logFn); if (family != nullptr) { // set the asset ID and pointer since we have them available FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family); header->m_assetID = solverAsset->m_ID; header->m_asset = solverAsset; } return family; } //////// Family member methods //////// void FamilyHeader::fractureSubSupportNoEvents(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks) { const NvBlastChunk& chunk = chunks[chunkIndex]; uint32_t numChildren = chunk.childIndexStop - chunk.firstChildIndex; if (numChildren > 0) { healthDamage /= numChildren; for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; childIndex++) { float& health = chunkHealths[childIndex - suboffset]; if (canTakeDamage(health)) { float remainingDamage = healthDamage - health; health -= healthDamage; NVBLAST_ASSERT(chunks[childIndex].parentChunkIndex == chunkIndex); if (health <= 0.0f && remainingDamage > 0.0f) { fractureSubSupportNoEvents(childIndex, suboffset, remainingDamage, chunkHealths, chunks); } } } } } void FamilyHeader::fractureSubSupport(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks, NvBlastChunkFractureData* outBuffer, uint32_t* currentIndex, const uint32_t maxCount) { const NvBlastChunk& chunk = chunks[chunkIndex]; uint32_t numChildren = chunk.childIndexStop - chunk.firstChildIndex; if (numChildren > 0) { healthDamage /= numChildren; for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; childIndex++) { float& health = chunkHealths[childIndex - suboffset]; if (canTakeDamage(health)) { float remainingDamage = healthDamage - health; health -= healthDamage; NVBLAST_ASSERT(chunks[childIndex].parentChunkIndex == chunkIndex); if (*currentIndex < maxCount) { NvBlastChunkFractureData& event = outBuffer[*currentIndex]; event.userdata = chunks[childIndex].userData; event.chunkIndex = childIndex; event.health = health; } (*currentIndex)++; if (health <= 0.0f && remainingDamage > 0.0f) { fractureSubSupport(childIndex, suboffset, remainingDamage, chunkHealths, chunks, outBuffer, currentIndex, maxCount); } } } } } void FamilyHeader::fractureNoEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* chunkFractures, Actor* filterActor, NvBlastLog logFn) { const SupportGraph& graph = m_asset->m_graph; const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices(); float* bondHealths = getBondHealths(); float* chunkHealths = getLowerSupportChunkHealths(); float* subChunkHealths = getSubsupportChunkHealths(); const NvBlastChunk* chunks = m_asset->getChunks(); for (uint32_t i = 0; i < chunkFractureCount; ++i) { const NvBlastChunkFractureData& command = chunkFractures[i]; const uint32_t chunkIndex = command.chunkIndex; const uint32_t chunkHealthIndex = m_asset->getContiguousLowerSupportIndex(chunkIndex); NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex)); if (isInvalidIndex(chunkHealthIndex)) { continue; } float& health = chunkHealths[chunkHealthIndex]; if (canTakeDamage(health) && command.health > 0.0f) { Actor* actor = getChunkActor(chunkIndex); if (filterActor && filterActor != actor) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: chunk fracture command corresponds to other actor, command is ignored."); } else if (actor) { const uint32_t nodeIndex = m_asset->getChunkToGraphNodeMap()[chunkIndex]; if (actor->getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex)) { for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++) { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; NVBLAST_ASSERT(!isInvalidIndex(bondIndex)); if (bondHealths[bondIndex] > 0.0f) { bondHealths[bondIndex] = 0.0f; } } getFamilyGraph()->notifyNodeRemoved(actor->getIndex(), nodeIndex, &graph); } health -= command.health; const float remainingDamage = -health; if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health { fractureSubSupportNoEvents(chunkIndex, m_asset->m_firstSubsupportChunkIndex, remainingDamage, subChunkHealths, chunks); } } } } } void FamilyHeader::fractureWithEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* commands, NvBlastChunkFractureData* events, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn) { const SupportGraph& graph = m_asset->m_graph; const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices(); float* bondHealths = getBondHealths(); float* chunkHealths = getLowerSupportChunkHealths(); float* subChunkHealths = getSubsupportChunkHealths(); const NvBlastChunk* chunks = m_asset->getChunks(); for (uint32_t i = 0; i < chunkFractureCount; ++i) { const NvBlastChunkFractureData& command = commands[i]; const uint32_t chunkIndex = command.chunkIndex; const uint32_t chunkHealthIndex = m_asset->getContiguousLowerSupportIndex(chunkIndex); NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex)); if (isInvalidIndex(chunkHealthIndex)) { continue; } float& health = chunkHealths[chunkHealthIndex]; if (canTakeDamage(health) && command.health > 0.0f) { Actor* actor = getChunkActor(chunkIndex); if (filterActor && filterActor != actor) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: chunk fracture command corresponds to other actor, command is ignored."); } else if (actor) { const uint32_t nodeIndex = m_asset->getChunkToGraphNodeMap()[chunkIndex]; if (actor->getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex)) { for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++) { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; NVBLAST_ASSERT(!isInvalidIndex(bondIndex)); if (bondHealths[bondIndex] > 0.0f) { bondHealths[bondIndex] = 0.0f; } } getFamilyGraph()->notifyNodeRemoved(actor->getIndex(), nodeIndex, &graph); } health -= command.health; if (*count < eventsSize) { NvBlastChunkFractureData& outEvent = events[*count]; outEvent.userdata = chunks[chunkIndex].userData; outEvent.chunkIndex = chunkIndex; outEvent.health = health; } (*count)++; const float remainingDamage = -health; if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health { fractureSubSupport(chunkIndex, m_asset->m_firstSubsupportChunkIndex, remainingDamage, subChunkHealths, chunks, events, count, eventsSize); } } } } } void FamilyHeader::fractureInPlaceEvents(uint32_t chunkFractureCount, NvBlastChunkFractureData* inoutbuffer, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn) { const SupportGraph& graph = m_asset->m_graph; const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices(); float* bondHealths = getBondHealths(); float* chunkHealths = getLowerSupportChunkHealths(); float* subChunkHealths = getSubsupportChunkHealths(); const NvBlastChunk* chunks = m_asset->getChunks(); // // First level Chunk Fractures // for (uint32_t i = 0; i < chunkFractureCount; ++i) { const NvBlastChunkFractureData& command = inoutbuffer[i]; const uint32_t chunkIndex = command.chunkIndex; const uint32_t chunkHealthIndex = m_asset->getContiguousLowerSupportIndex(chunkIndex); NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex)); if (isInvalidIndex(chunkHealthIndex)) { continue; } float& health = chunkHealths[chunkHealthIndex]; if (canTakeDamage(health) && command.health > 0.0f) { Actor* actor = getChunkActor(chunkIndex); if (filterActor && filterActor != actor) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: chunk fracture command corresponds to other actor, command is ignored."); } else if (actor) { const uint32_t nodeIndex = m_asset->getChunkToGraphNodeMap()[chunkIndex]; if (actor->getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex)) { for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++) { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; NVBLAST_ASSERT(!isInvalidIndex(bondIndex)); if (bondHealths[bondIndex] > 0.0f) { bondHealths[bondIndex] = 0.0f; } } getFamilyGraph()->notifyNodeRemoved(actor->getIndex(), nodeIndex, &graph); } health -= command.health; NvBlastChunkFractureData& outEvent = inoutbuffer[(*count)++]; outEvent.userdata = chunks[chunkIndex].userData; outEvent.chunkIndex = chunkIndex; outEvent.health = health; } } } // // Hierarchical Chunk Fractures // uint32_t commandedChunkFractures = *count; for (uint32_t i = 0; i < commandedChunkFractures; ++i) { NvBlastChunkFractureData& event = inoutbuffer[i]; const uint32_t chunkIndex = event.chunkIndex; const float remainingDamage = -event.health; if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health { fractureSubSupport(chunkIndex, m_asset->m_firstSubsupportChunkIndex, remainingDamage, subChunkHealths, chunks, inoutbuffer, count, eventsSize); } } } void FamilyHeader::applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, Actor* filterActor, NvBlastLog logFn, NvBlastTimers* timers) { NVBLASTLL_CHECK(commands != nullptr, logFn, "NvBlastActorApplyFracture: NULL commands pointer input.", return); NVBLASTLL_CHECK(isValid(commands), logFn, "NvBlastActorApplyFracture: commands memory is NULL but size is > 0.", return); NVBLASTLL_CHECK(eventBuffers == nullptr || isValid(eventBuffers), logFn, "NvBlastActorApplyFracture: eventBuffers memory is NULL but size is > 0.", eventBuffers->bondFractureCount = 0; eventBuffers->chunkFractureCount = 0; return); #if NVBLASTLL_CHECK_PARAMS if (eventBuffers != nullptr && eventBuffers->bondFractureCount == 0 && eventBuffers->chunkFractureCount == 0) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers do not provide any space."); return; } #endif #if NV_PROFILE Time time; #else NV_UNUSED(timers); #endif // // Chunk Fracture // if (eventBuffers == nullptr || eventBuffers->chunkFractures == nullptr) { // immediate hierarchical fracture fractureNoEvents(commands->chunkFractureCount, commands->chunkFractures, filterActor, logFn); } else if (eventBuffers->chunkFractures != commands->chunkFractures) { // immediate hierarchical fracture uint32_t count = 0; fractureWithEvents(commands->chunkFractureCount, commands->chunkFractures, eventBuffers->chunkFractures, eventBuffers->chunkFractureCount, &count, filterActor, logFn); if (count > eventBuffers->chunkFractureCount) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Chunk events were lost."); } else { eventBuffers->chunkFractureCount = count; } } else if (eventBuffers->chunkFractures == commands->chunkFractures) { // compacting first uint32_t count = 0; fractureInPlaceEvents(commands->chunkFractureCount, commands->chunkFractures, eventBuffers->chunkFractureCount, &count, filterActor, logFn); if (count > eventBuffers->chunkFractureCount) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Chunk events were lost."); } else { eventBuffers->chunkFractureCount = count; } } // // Bond Fracture // uint32_t outCount = 0; const uint32_t eventBufferSize = eventBuffers ? eventBuffers->bondFractureCount : 0; NvBlastBond* bonds = m_asset->getBonds(); float* bondHealths = getBondHealths(); const uint32_t* graphChunkIndices = m_asset->m_graph.getChunkIndices(); for (uint32_t i = 0; i < commands->bondFractureCount; ++i) { const NvBlastBondFractureData& frac = commands->bondFractures[i]; NVBLAST_ASSERT(frac.nodeIndex0 < m_asset->m_graph.m_nodeCount); NVBLAST_ASSERT(frac.nodeIndex1 < m_asset->m_graph.m_nodeCount); uint32_t chunkIndex0 = graphChunkIndices[frac.nodeIndex0]; uint32_t chunkIndex1 = graphChunkIndices[frac.nodeIndex1]; NVBLAST_ASSERT(!isInvalidIndex(chunkIndex0) || !isInvalidIndex(chunkIndex1)); Actor* actor0 = !isInvalidIndex(chunkIndex0) ? getChunkActor(chunkIndex0) : nullptr; Actor* actor1 = !isInvalidIndex(chunkIndex1) ? getChunkActor(chunkIndex1) : nullptr; NVBLAST_ASSERT(actor0 != nullptr || actor1 != nullptr); // If actors are not nullptr and different then bond is already broken // One of actor can be nullptr which probably means it's 'world' node. if (actor0 == actor1 || actor0 == nullptr || actor1 == nullptr) { Actor* actor = actor0 ? actor0 : actor1; NVBLAST_ASSERT_WITH_MESSAGE(actor, "NvBlastActorApplyFracture: all actors in bond fracture command are nullptr, command will be safely ignored, but investigation is recommended."); if (filterActor && filterActor != actor) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: bond fracture command corresponds to other actor, command is ignored."); } else if (actor) { const uint32_t bondIndex = actor->damageBond(frac.nodeIndex0, frac.nodeIndex1, frac.health); if (!isInvalidIndex(bondIndex)) { if (eventBuffers && eventBuffers->bondFractures) { if (outCount < eventBufferSize) { NvBlastBondFractureData& outEvent = eventBuffers->bondFractures[outCount]; outEvent.userdata = bonds[bondIndex].userData; outEvent.nodeIndex0 = frac.nodeIndex0; outEvent.nodeIndex1 = frac.nodeIndex1; outEvent.health = bondHealths[bondIndex]; } } outCount++; } } } } if (eventBuffers && eventBuffers->bondFractures) { if (outCount > eventBufferSize) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Bond events were lost."); } else { eventBuffers->bondFractureCount = outCount; } } #if NV_PROFILE if (timers != nullptr) { timers->fracture += time.getElapsedTicks(); } #endif } } // namespace Blast } // namespace Nv // API implementation extern "C" { NvBlastAssetMemSizeData NvBlastAssetMemSizeDataFromAsset(const NvBlastAsset* asset) { const Nv::Blast::Asset* solverAsset = reinterpret_cast<const Nv::Blast::Asset*>(asset); NvBlastAssetMemSizeData sizeData; if (solverAsset) { sizeData.bondCount = solverAsset->getBondCount(); sizeData.chunkCount = solverAsset->m_chunkCount; sizeData.nodeCount = solverAsset->m_graph.m_nodeCount; sizeData.lowerSupportChunkCount = solverAsset->getLowerSupportChunkCount(); sizeData.upperSupportChunkCount = solverAsset->getUpperSupportChunkCount(); } else { memset(&sizeData, 0, sizeof(NvBlastAssetMemSizeData)); } return sizeData; } NvBlastFamily* NvBlastAssetCreateFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn) { return Nv::Blast::createFamily(mem, asset, logFn); } NvBlastFamily* NvBlastAssetCreateFamilyFromSizeData(void* mem, const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn) { return Nv::Blast::createFamily(mem, sizeData, logFn); } uint32_t NvBlastFamilyGetFormatVersion(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetFormatVersion: NULL family pointer input.", return UINT32_MAX); return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->formatVersion; } const NvBlastAsset* NvBlastFamilyGetAsset(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetAsset: NULL family pointer input.", return nullptr); return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_asset; } void NvBlastFamilySetAsset(NvBlastFamily* family, const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilySetAsset: NULL family pointer input.", return); NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastFamilySetAsset: NULL asset pointer input.", return); Nv::Blast::FamilyHeader* header = reinterpret_cast<Nv::Blast::FamilyHeader*>(family); const Nv::Blast::Asset* solverAsset = reinterpret_cast<const Nv::Blast::Asset*>(asset); if (memcmp(&header->m_assetID, &solverAsset->m_ID, sizeof(NvBlastID))) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastFamilySetAsset: wrong asset. Passed asset ID doesn't match family asset ID."); return; } header->m_asset = solverAsset; } uint32_t NvBlastFamilyGetSize(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetSize: NULL family pointer input.", return 0); return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->size; } NvBlastID NvBlastFamilyGetAssetID(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetAssetID: NULL family pointer input.", return NvBlastID()); return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_assetID; } uint32_t NvBlastFamilyGetActorCount(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActorCount: NULL family pointer input.", return 0); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); return header->m_actorCount; } uint32_t NvBlastFamilyGetActors(NvBlastActor** actors, uint32_t actorsSize, const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(actors != nullptr, logFn, "NvBlastFamilyGetActors: NULL actors pointer input.", return 0); NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActors: NULL family pointer input.", return 0); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); // Iterate through active actors and write to supplied array const uint32_t familyActorCount = header->getActorsArraySize(); Nv::Blast::Actor* familyActor = header->getActors(); uint32_t actorCount = 0; for (uint32_t i = 0; actorCount < actorsSize && i < familyActorCount; ++i, ++familyActor) { if (familyActor->isActive()) { actors[actorCount++] = familyActor; } } return actorCount; } NvBlastActor* NvBlastFamilyGetActorByIndex(const NvBlastFamily* family, uint32_t actorIndex, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActorByIndex: NULL family pointer input.", return nullptr); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); return header->getActorByIndex(actorIndex); } NvBlastActor* NvBlastFamilyGetChunkActor(const NvBlastFamily* family, uint32_t chunkIndex, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetChunkActor: NULL family pointer input.", return nullptr); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); NVBLASTLL_CHECK(header->m_asset != nullptr, logFn, "NvBlastFamilyGetChunkActor: NvBlastFamily has null asset set.", return nullptr); NVBLASTLL_CHECK(chunkIndex < header->m_asset->m_chunkCount, logFn, "NvBlastFamilyGetChunkActor: bad value of chunkIndex for the given family's asset.", return nullptr); return header->getChunkActor(chunkIndex); } uint32_t* NvBlastFamilyGetChunkActorIndices(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetChunkActorIndices: NULL family pointer input.", return nullptr); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); NVBLASTLL_CHECK(header->m_asset != nullptr, logFn, "NvBlastFamilyGetChunkActorIndices: NvBlastFamily has null asset set.", return nullptr); return header->getChunkActorIndices(); } uint32_t NvBlastFamilyGetMaxActorCount(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetMaxActorCount: NULL family pointer input.", return 0); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); return header->getActorsArraySize(); } } // extern "C"
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActorSerializationBlock.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastActor.h" #include "NvBlastActorSerializationBlock.h" #include "NvBlastFamilyGraph.h" #include <algorithm> namespace Nv { namespace Blast { //////// Actor static methods for serialization //////// Actor* Actor::deserialize(NvBlastFamily* family, const void* buffer, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "Actor::deserialize: NULL family pointer input.", return nullptr); const ActorSerializationHeader* serHeader = reinterpret_cast<const ActorSerializationHeader*>(buffer); if (serHeader->m_formatVersion != ActorSerializationFormat::Current) { NVBLASTLL_LOG_ERROR(logFn, "Actor::deserialize: wrong data format. Serialization data must be converted to current version."); return nullptr; } FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family); const Asset* asset = header->m_asset; const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices(); const uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices(); Actor* actor = nullptr; const uint32_t actorIndex = serHeader->m_index; if (serHeader->m_index < header->getActorsArraySize()) { if (!header->getActors()[actorIndex].isActive()) { actor = header->borrowActor(serHeader->m_index); } } if (actor == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "Actor::deserialize: invalid actor index in serialized data. Actor not created."); return nullptr; } // Commonly used data uint32_t* chunkActorIndices = header->getChunkActorIndices(); FamilyGraph* familyGraph = header->getFamilyGraph(); #if NVBLASTLL_CHECK_PARAMS { const uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices(); for (uint32_t i = 0; i < serHeader->m_visibleChunkCount; ++i) { const uint32_t visibleChunkIndex = serVisibleChunkIndices[i]; if (!isInvalidIndex(chunkActorIndices[visibleChunkIndex])) { NVBLASTLL_LOG_ERROR(logFn, "Actor::deserialize: visible chunk already has an actor in family. Actor not created."); header->returnActor(*actor); return nullptr; } } } #endif // Visible chunk indices and chunk actor indices { // Add visible chunks, set chunk subtree ownership const uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices(); IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks(); for (uint32_t i = serHeader->m_visibleChunkCount; i--;) // Reverse-order, so the resulting linked list is in the original order { const uint32_t visibleChunkIndex = serVisibleChunkIndices[i]; NVBLAST_ASSERT(isInvalidIndex(visibleChunkIndexLinks[visibleChunkIndex].m_adj[0]) && isInvalidIndex(visibleChunkIndexLinks[visibleChunkIndex].m_adj[1])); IndexDList<uint32_t>().insertListHead(actor->m_firstVisibleChunkIndex, visibleChunkIndexLinks, visibleChunkIndex); for (Asset::DepthFirstIt j(*asset, visibleChunkIndex, true); (bool)j; ++j) { NVBLAST_ASSERT(isInvalidIndex(chunkActorIndices[(uint32_t)j])); chunkActorIndices[(uint32_t)j] = actorIndex; } } actor->m_visibleChunkCount = serHeader->m_visibleChunkCount; } // Graph node indices, leaf chunk count, and and island IDs { // Add graph nodes const uint32_t* serGraphNodeIndices = serHeader->getGraphNodeIndices(); uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks(); uint32_t* islandIDs = familyGraph->getIslandIds(); for (uint32_t i = serHeader->m_graphNodeCount; i--;) // Reverse-order, so the resulting linked list is in the original order { const uint32_t graphNodeIndex = serGraphNodeIndices[i]; NVBLAST_ASSERT(isInvalidIndex(graphNodeIndexLinks[graphNodeIndex])); graphNodeIndexLinks[graphNodeIndex] = actor->m_firstGraphNodeIndex; actor->m_firstGraphNodeIndex = graphNodeIndex; islandIDs[graphNodeIndex] = actorIndex; } actor->m_graphNodeCount = serHeader->m_graphNodeCount; actor->m_leafChunkCount = serHeader->m_leafChunkCount; } // Using this function after the family graph data has been set up, so that it will work correctly const bool hasExternalBonds = actor->hasExternalBonds(); // Lower support chunk healths { const float* serLowerSupportChunkHealths = serHeader->getLowerSupportChunkHealths(); float* subsupportHealths = header->getSubsupportChunkHealths(); const uint32_t subsupportChunkCount = asset->getUpperSupportChunkCount(); if (actor->m_graphNodeCount > 0) { uint32_t serLowerSupportChunkCount = 0; float* graphNodeHealths = header->getLowerSupportChunkHealths(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (isInvalidIndex(supportChunkIndex)) { continue; } graphNodeHealths[graphNodeIndex] = serLowerSupportChunkHealths[serLowerSupportChunkCount++]; Asset::DepthFirstIt j(*asset, supportChunkIndex); NVBLAST_ASSERT((bool)j); ++j; // Skip first (support) chunk, it's already been handled for (; (bool)j; ++j) { subsupportHealths[(uint32_t)j] = serLowerSupportChunkHealths[serLowerSupportChunkCount++]; } } } else // Single subsupport chunk if (!isInvalidIndex(actor->m_firstVisibleChunkIndex)) { NVBLAST_ASSERT(actor->m_firstVisibleChunkIndex >= subsupportChunkCount); subsupportHealths[actor->m_firstVisibleChunkIndex - subsupportChunkCount] = *serLowerSupportChunkHealths; } } // Bond healths uint32_t serBondCount = 0; { const float* serBondHealths = serHeader->getBondHealths(); float* bondHealths = header->getBondHealths(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == actorIndex)) { const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex]; bondHealths[adjacentBondIndex] = serBondHealths[serBondCount++]; } } } } } // Fast routes { const uint32_t* serFastRoute = serHeader->getFastRoute(); uint32_t* fastRoute = header->getFamilyGraph()->getFastRoute(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { fastRoute[(uint32_t)i] = *serFastRoute++; } } // Hop counts { const uint32_t* serHopCounts = serHeader->getHopCounts(); uint32_t* hopCounts = header->getFamilyGraph()->getHopCounts(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { hopCounts[(uint32_t)i] = *serHopCounts++; } } // Edge removed array if (serBondCount > 0) { uint32_t serBondIndex = 0; const FixedBoolArray* serEdgeRemovedArray = serHeader->getEdgeRemovedArray(); FixedBoolArray* edgeRemovedArray = familyGraph->getIsEdgeRemoved(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == actorIndex)) { if (!serEdgeRemovedArray->test(serBondIndex)) { const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex]; edgeRemovedArray->reset(adjacentBondIndex); } ++serBondIndex; } } } } } return actor; } //////// Actor member methods for serialization //////// uint32_t Actor::serialize(void* buffer, uint32_t bufferSize, NvBlastLog logFn) const { // Set up pointers and such const Asset* asset = getAsset(); const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices(); const uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices(); const FamilyHeader* header = getFamilyHeader(); const uint32_t* chunkActorIndices = header->getChunkActorIndices(); const uint32_t thisActorIndex = getIndex(); const bool hasExternalBonds = this->hasExternalBonds(); // Make sure there are no dirty nodes if (m_graphNodeCount) { const uint32_t* firstDirtyNodeIndices = header->getFamilyGraph()->getFirstDirtyNodeIndices(); if (!isInvalidIndex(firstDirtyNodeIndices[thisActorIndex])) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: instance graph has dirty nodes. Call Nv::Blast::Actor::findIslands before serializing."); return 0; } } uint64_t offset = 0; // Header ActorSerializationHeader* serHeader = reinterpret_cast<ActorSerializationHeader*>(buffer); offset = align16(sizeof(ActorSerializationHeader)); if (offset > bufferSize) { return 0; // Buffer size insufficient } serHeader->m_formatVersion = ActorSerializationFormat::Current; serHeader->m_size = 0; // Will be updated below serHeader->m_index = thisActorIndex; serHeader->m_visibleChunkCount = m_visibleChunkCount; serHeader->m_graphNodeCount = m_graphNodeCount; serHeader->m_leafChunkCount = m_leafChunkCount; // Visible chunk indices { serHeader->m_visibleChunkIndicesOffset = (uint32_t)offset; offset = align16(offset + m_visibleChunkCount*sizeof(uint32_t)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices(); uint32_t serVisibleChunkCount = 0; for (Actor::VisibleChunkIt i = *this; (bool)i; ++i) { NVBLAST_ASSERT(serVisibleChunkCount < m_visibleChunkCount); serVisibleChunkIndices[serVisibleChunkCount++] = (uint32_t)i; } NVBLAST_ASSERT(serVisibleChunkCount == m_visibleChunkCount); } // Graph node indices { serHeader->m_graphNodeIndicesOffset = (uint32_t)offset; offset = align16(offset + m_graphNodeCount*sizeof(uint32_t)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t* serGraphNodeIndices = serHeader->getGraphNodeIndices(); uint32_t serGraphNodeCount = 0; for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { NVBLAST_ASSERT(serGraphNodeCount < m_graphNodeCount); serGraphNodeIndices[serGraphNodeCount++] = (uint32_t)i; } NVBLAST_ASSERT(serGraphNodeCount == m_graphNodeCount); } // Lower support chunk healths { serHeader->m_lowerSupportChunkHealthsOffset = (uint32_t)offset; float* serLowerSupportChunkHealths = serHeader->getLowerSupportChunkHealths(); const float* subsupportHealths = header->getSubsupportChunkHealths(); const uint32_t subsupportChunkCount = asset->getUpperSupportChunkCount(); if (m_graphNodeCount > 0) { uint32_t serLowerSupportChunkCount = 0; const float* graphNodeHealths = header->getLowerSupportChunkHealths(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (isInvalidIndex(supportChunkIndex)) { continue; } serLowerSupportChunkHealths[serLowerSupportChunkCount++] = graphNodeHealths[graphNodeIndex]; offset += sizeof(float); Asset::DepthFirstIt j(*asset, supportChunkIndex); NVBLAST_ASSERT((bool)j); ++j; // Skip first (support) chunk, it's already been handled for (; (bool)j; ++j) { if (offset >= bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } serLowerSupportChunkHealths[serLowerSupportChunkCount++] = subsupportHealths[(uint32_t)j - subsupportChunkCount]; offset += sizeof(float); } } } else // Single subsupport chunk if (!isInvalidIndex(m_firstVisibleChunkIndex)) { NVBLAST_ASSERT(m_firstVisibleChunkIndex >= subsupportChunkCount); if (offset >= bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } *serLowerSupportChunkHealths = subsupportHealths[m_firstVisibleChunkIndex - subsupportChunkCount]; offset += sizeof(float); } } offset = align16(offset); // Bond healths uint32_t serBondCount = 0; { serHeader->m_bondHealthsOffset = (uint32_t)offset; float* serBondHealths = serHeader->getBondHealths(); const float* bondHealths = header->getBondHealths(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == thisActorIndex)) { if (offset >= bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex]; serBondHealths[serBondCount++] = bondHealths[adjacentBondIndex]; offset += sizeof(float); } } } } } offset = align16(offset); // Fast routes { serHeader->m_fastRouteOffset = (uint32_t)offset; offset = align16(offset + m_graphNodeCount*sizeof(uint32_t)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t* serFastRoute = serHeader->getFastRoute(); const uint32_t* fastRoute = header->getFamilyGraph()->getFastRoute(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { *serFastRoute++ = fastRoute[(uint32_t)i]; } } // Hop counts { serHeader->m_hopCountsOffset = (uint32_t)offset; offset = align16(offset + m_graphNodeCount*sizeof(uint32_t)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t* serHopCounts = serHeader->getHopCounts(); const uint32_t* hopCounts = header->getFamilyGraph()->getHopCounts(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { *serHopCounts++ = hopCounts[(uint32_t)i]; } } // Edge removed array if (serBondCount > 0) { serHeader->m_edgeRemovedArrayOffset = (uint32_t)offset; offset = align16(offset + FixedBoolArray::requiredMemorySize(serBondCount)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t serBondIndex = 0; FixedBoolArray* serEdgeRemovedArray = serHeader->getEdgeRemovedArray(); new (serEdgeRemovedArray)FixedBoolArray(serBondCount); serEdgeRemovedArray->fill(); // Reset bits as we find bonds const FixedBoolArray* edgeRemovedArray = header->getFamilyGraph()->getIsEdgeRemoved(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == thisActorIndex)) { const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex]; if (!edgeRemovedArray->test(adjacentBondIndex)) { serEdgeRemovedArray->reset(serBondIndex); } ++serBondIndex; } } } } } // Finally record size serHeader->m_size = static_cast<uint32_t>(offset); return serHeader->m_size; } uint32_t Actor::serializationRequiredStorage(NvBlastLog logFn) const { const Asset* asset = getAsset(); const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices(); const uint32_t* graphNodeIndexLinks = getFamilyHeader()->getGraphNodeIndexLinks(); const uint32_t* chunkActorIndices = getFamilyHeader()->getChunkActorIndices(); const uint32_t thisActorIndex = getIndex(); const bool hasExternalBonds = this->hasExternalBonds(); // Lower-support chunk count and bond counts for this actor need to be calculated. Iterate over all support chunks to count these. uint32_t lowerSupportChunkCount = 0; uint32_t bondCount = 0; if (m_graphNodeCount > 0) { for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex]) { // Update bond count for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor or the world const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == thisActorIndex)) { ++bondCount; } } } // Update lower-support chunk count const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (isInvalidIndex(supportChunkIndex)) { continue; } for (Asset::DepthFirstIt i(*asset, supportChunkIndex); (bool)i; ++i) { ++lowerSupportChunkCount; } } } else // Subsupport chunk { ++lowerSupportChunkCount; } const uint64_t dataSize = getActorSerializationSize(m_visibleChunkCount, lowerSupportChunkCount, m_graphNodeCount, bondCount); if (dataSize > UINT32_MAX) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::serializationRequiredStorage: Serialization block size exceeds 4GB. Returning 0.\n"); return 0; } return static_cast<uint32_t>(dataSize); } } // namespace Blast } // namespace Nv // API implementation extern "C" { uint32_t NvBlastActorGetSerializationSize(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetSerializationSize: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetSerializationSize: inactive actor pointer input."); return 0; } return a.serializationRequiredStorage(logFn); } uint32_t NvBlastActorSerialize(void* buffer, uint32_t bufferSize, const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(buffer != nullptr, logFn, "NvBlastActorSerialize: NULL buffer pointer input.", return 0); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorSerialize: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorSerialize: inactive actor pointer input."); return 0; } return a.serialize(buffer, bufferSize, logFn); } NvBlastActor* NvBlastFamilyDeserializeActor(NvBlastFamily* family, const void* buffer, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyDeserializeActor: NULL family input. No actor deserialized.", return nullptr); NVBLASTLL_CHECK(buffer != nullptr, logFn, "NvBlastFamilyDeserializeActor: NULL buffer pointer input. No actor deserialized.", return nullptr); return Nv::Blast::Actor::deserialize(family, buffer, logFn); } } // extern "C"
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastChunkHierarchy.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTCHUNKHIERARCHY_H #define NVBLASTCHUNKHIERARCHY_H #include "NvBlastIndexFns.h" #include "NvBlastDLink.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastIteratorBase.h" namespace Nv { namespace Blast { /** Chunk hierarchy depth-first iterator. Traverses subtree with root given by startChunkIndex. Will not traverse chunks with index at or beyond chunkIndexLimit. */ class ChunkDepthFirstIt : public IteratorBase<uint32_t> { public: /** Constructed from a chunk array. */ ChunkDepthFirstIt(const NvBlastChunk* chunks, uint32_t startChunkIndex, uint32_t chunkIndexLimit) : IteratorBase<uint32_t>(startChunkIndex), m_chunks(chunks), m_stop(startChunkIndex), m_limit(chunkIndexLimit) { if (m_curr >= m_limit) { m_curr = invalidIndex<uint32_t>(); } } /** Pre-increment. Only use if valid() == true. */ uint32_t operator ++ () { NVBLAST_ASSERT(!isInvalidIndex(m_curr)); const NvBlastChunk* chunk = m_chunks + m_curr; if (chunk->childIndexStop > chunk->firstChildIndex && chunk->firstChildIndex < m_limit) { m_curr = chunk->firstChildIndex; } else { for (;;) { if (m_curr == m_stop) { m_curr = invalidIndex<uint32_t>(); break; } NVBLAST_ASSERT(!isInvalidIndex(chunk->parentChunkIndex)); // This should not be possible with this search const NvBlastChunk* parentChunk = m_chunks + chunk->parentChunkIndex; if (++m_curr < parentChunk->childIndexStop) { break; // Sibling chunk is valid, that's the next chunk } m_curr = chunk->parentChunkIndex; chunk = parentChunk; } } return m_curr; } private: const NvBlastChunk* m_chunks; uint32_t m_stop; uint32_t m_limit; }; /** Enumerates chunk indices in a subtree with root given by chunkIndex, in breadth-first order. Will not traverse chunks with index at or beyond chunkIndexLimit. Returns the number of indices written to the chunkIndex array */ NV_INLINE uint32_t enumerateChunkHierarchyBreadthFirst ( uint32_t* chunkIndices, uint32_t chunkIndicesSize, const NvBlastChunk* chunks, uint32_t chunkIndex, bool includeRoot = true, uint32_t chunkIndexLimit = invalidIndex<uint32_t>() ) { if (chunkIndicesSize == 0) { return 0; } uint32_t chunkIndexCount = 0; bool rootHandled = false; if (includeRoot) { chunkIndices[chunkIndexCount++] = chunkIndex; rootHandled = true; } for (uint32_t curr = 0; !rootHandled || curr < chunkIndexCount;) { const NvBlastChunk& chunk = chunks[rootHandled ? chunkIndices[curr] : chunkIndex]; if (chunk.firstChildIndex < chunkIndexLimit) { const uint32_t childIndexStop = chunk.childIndexStop < chunkIndexLimit ? chunk.childIndexStop : chunkIndexLimit; const uint32_t childIndexBufferStop = chunk.firstChildIndex + (chunkIndicesSize - chunkIndexCount); const uint32_t stop = childIndexStop < childIndexBufferStop ? childIndexStop : childIndexBufferStop; for (uint32_t childIndex = chunk.firstChildIndex; childIndex < stop; ++childIndex) { chunkIndices[chunkIndexCount++] = childIndex; } } if (rootHandled) { ++curr; } rootHandled = true; } return chunkIndexCount; } /** VisibilityRep must have m_firstVisibleChunkIndex and m_visibleChunkCount fields */ template<class VisibilityRep> void updateVisibleChunksFromSupportChunk ( VisibilityRep* actors, IndexDLink<uint32_t>* visibleChunkIndexLinks, uint32_t* chunkActorIndices, uint32_t actorIndex, uint32_t supportChunkIndex, const NvBlastChunk* chunks, uint32_t upperSupportChunkCount ) { uint32_t chunkIndex = supportChunkIndex; uint32_t chunkActorIndex = chunkActorIndices[supportChunkIndex]; uint32_t newChunkActorIndex = actorIndex; VisibilityRep& thisActor = actors[actorIndex]; do { if (chunkActorIndex == newChunkActorIndex) { break; // Nothing to do } const uint32_t parentChunkIndex = chunks[chunkIndex].parentChunkIndex; const uint32_t parentChunkActorIndex = parentChunkIndex != invalidIndex<uint32_t>() ? chunkActorIndices[parentChunkIndex] : invalidIndex<uint32_t>(); const bool chunkVisible = chunkActorIndex != parentChunkActorIndex; // If the chunk is visible, it needs to be removed from its old actor's visibility list if (chunkVisible && !isInvalidIndex(chunkActorIndex)) { VisibilityRep& chunkActor = actors[chunkActorIndex]; IndexDList<uint32_t>().removeFromList(chunkActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, chunkIndex); --chunkActor.m_visibleChunkCount; } // Now update the chunk's actor index const uint32_t oldChunkActorIndex = chunkActorIndices[chunkIndex]; chunkActorIndices[chunkIndex] = newChunkActorIndex; if (newChunkActorIndex != invalidIndex<uint32_t>() && parentChunkActorIndex != newChunkActorIndex) { // The chunk is now visible. Add it to this actor's visibility list IndexDList<uint32_t>().insertListHead(thisActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, chunkIndex); ++thisActor.m_visibleChunkCount; // Remove its children from this actor's visibility list if (actorIndex != oldChunkActorIndex) { const NvBlastChunk& chunk = chunks[chunkIndex]; if (chunk.firstChildIndex < upperSupportChunkCount) // Only need to deal with upper-support children { for (uint32_t childChunkIndex = chunk.firstChildIndex; childChunkIndex < chunk.childIndexStop; ++childChunkIndex) { if (chunkActorIndices[childChunkIndex] == actorIndex) { IndexDList<uint32_t>().removeFromList(thisActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, childChunkIndex); --thisActor.m_visibleChunkCount; } } } } } if (parentChunkIndex != invalidIndex<uint32_t>()) { // If all of its siblings have the same index, then the parent will too. Otherwise, the parent will have an invalid index and its children will be visible const NvBlastChunk& parentChunk = chunks[parentChunkIndex]; bool uniform = true; for (uint32_t childChunkIndex = parentChunk.firstChildIndex; uniform && childChunkIndex < parentChunk.childIndexStop; ++childChunkIndex) { uniform = (newChunkActorIndex == chunkActorIndices[childChunkIndex]); } if (!uniform) { newChunkActorIndex = invalidIndex<uint32_t>(); // no need to search if the parent index is invalid // the conditional in the loop could never be true in that case if (parentChunkActorIndex != invalidIndex<uint32_t>()) { for (uint32_t childChunkIndex = parentChunk.firstChildIndex; childChunkIndex < parentChunk.childIndexStop; ++childChunkIndex) { const uint32_t childChunkActorIndex = chunkActorIndices[childChunkIndex]; if (childChunkActorIndex != invalidIndex<uint32_t>() && childChunkActorIndex == parentChunkActorIndex) { // The child was invisible. Add it to its actor's visibility list VisibilityRep& childChunkActor = actors[childChunkActorIndex]; IndexDList<uint32_t>().insertListHead(childChunkActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, childChunkIndex); ++childChunkActor.m_visibleChunkCount; } } } } } // Climb the hierarchy chunkIndex = parentChunkIndex; chunkActorIndex = parentChunkActorIndex; } while (chunkIndex != invalidIndex<uint32_t>()); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTCHUNKHIERARCHY_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastSupportGraph.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTSUPPORTGRAPH_H #define NVBLASTSUPPORTGRAPH_H #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { /** Describes the connectivity between support chunks via bonds. Vertices in the support graph are termed "nodes," and represent particular chunks (NvBlastChunk) in an NvBlastAsset. The indexing for nodes is not the same as that for chunks. Only some chunks are represented by nodes in the graph, and these chunks are called "support chunks." Adjacent node indices and adjacent bond indices are stored for each node, and therefore each bond is represented twice in this graph, going from node[i] -> node[j] and from node[j] -> node[i]. Therefore the size of the getAdjacentNodeIndices() and getAdjacentBondIndices() arrays are twice the number of bonds stored in the corresponding NvBlastAsset. The graph is used as follows. Given a SupportGraph "graph" and node index i, (0 <= i < graph.nodeCount), one may find all adjacent bonds and nodes using: const uint32_t* adjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* adjacentNodeIndices = graph.getAdjacentNodeIndices(); const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices(); // adj is the lookup value in adjacentNodeIndices and graph.getAdjacentBondIndices() for (uint32_t adj = adjacencyPartition[i]; adj < adjacencyPartition[i+1]; ++adj) { // An adjacent node: uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; // The corresponding bond (that connects node index i with node indexed adjacentNodeIndex: uint32_t adjacentBondIndex = adjacentBondIndices[adj]; } For a graph node with index i, the corresponding asset chunk index is found using graph.getChunkIndices()[i]. The reverse mapping (obtaining a graph node index from an asset chunk index) can be done using the NvBlastAssetGetChunkToGraphNodeMap(asset, logFn); function. See the documentation for its use. The returned "node index" for a non-support chunk is the invalid value 0xFFFFFFFF. */ struct SupportGraph { /** Total number of nodes in the support graph. */ uint32_t m_nodeCount; /** Indices of chunks represented by the nodes. getChunkIndices returns an array of size m_nodeCount. */ NvBlastBlockArrayData(uint32_t, m_chunkIndicesOffset, getChunkIndices, m_nodeCount); /** Adjacency lookup table, of type uint32_t. Partitions both the getAdjacentNodeIndices() and the getAdjacentBondIndices() arrays into subsets corresponding to each node. The size of this array is nodeCount+1. For 0 <= i < nodeCount, getAdjacencyPartition()[i] is the index of the first element in getAdjacentNodeIndices() (or getAdjacentBondIndices()) for nodes adjacent to the node with index i. getAdjacencyPartition()[nodeCount] is the size of the getAdjacentNodeIndices() and getAdjacentBondIndices() arrays. This allows one to easily count the number of nodes adjacent to a node with index i, using getAdjacencyPartition()[i+1] - getAdjacencyPartition()[i]. getAdjacencyPartition returns an array of size m_nodeCount + 1. */ NvBlastBlockArrayData(uint32_t, m_adjacencyPartitionOffset, getAdjacencyPartition, m_nodeCount + 1); /** Array of uint32_t composed of subarrays holding the indices of nodes adjacent to a given node. The subarrays may be accessed through the getAdjacencyPartition() array. getAdjacentNodeIndices returns an array of size getAdjacencyPartition()[m_nodeCount]. */ NvBlastBlockArrayData(uint32_t, m_adjacentNodeIndicesOffset, getAdjacentNodeIndices, getAdjacencyPartition()[m_nodeCount]); /** Array of uint32_t composed of subarrays holding the indices of bonds (NvBlastBond) for a given node. The subarrays may be accessed through the getAdjacencyPartition() array. getAdjacentBondIndices returns an array of size getAdjacencyPartition()[m_nodeCount]. */ NvBlastBlockArrayData(uint32_t, m_adjacentBondIndicesOffset, getAdjacentBondIndices, getAdjacencyPartition()[m_nodeCount]); /** Finds the bond between two given graph nodes (if it exists) and returns the bond index. If no bond exists, returns invalidIndex<uint32_t>(). \return the index of the bond between the given nodes. */ uint32_t findBond(uint32_t nodeIndex0, uint32_t nodeIndex1) const; }; //////// SupportGraph inline member functions //////// NV_INLINE uint32_t SupportGraph::findBond(uint32_t nodeIndex0, uint32_t nodeIndex1) const { const uint32_t* adjacencyPartition = getAdjacencyPartition(); const uint32_t* adjacentNodeIndices = getAdjacentNodeIndices(); const uint32_t* adjacentBondIndices = getAdjacentBondIndices(); // Iterate through all neighbors of nodeIndex0 chunk for (uint32_t i = adjacencyPartition[nodeIndex0]; i < adjacencyPartition[nodeIndex0 + 1]; i++) { if (adjacentNodeIndices[i] == nodeIndex1) { return adjacentBondIndices[i]; } } return invalidIndex<uint32_t>(); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTSUPPORTGRAPH_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActor.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTACTOR_H #define NVBLASTACTOR_H #include "NvBlastAsset.h" #include "NvBlastDLink.h" #include "NvBlastIteratorBase.h" #include "NvBlastSupportGraph.h" #include "NvBlastFamilyGraph.h" #include "NvBlastPreprocessorInternal.h" #include <cstring> namespace Nv { namespace Blast { // Forward declarations class FamilyGraph; struct FamilyHeader; /** Internal implementation of solver actor. These objects are stored within the family in a single array. A pointer to a Actor class will be given to the user through the NvBlastActor opaque type. */ class Actor : public NvBlastActor { friend struct FamilyHeader; friend void updateVisibleChunksFromSupportChunk<>(Actor*, IndexDLink<uint32_t>*, uint32_t*, uint32_t, uint32_t, const NvBlastChunk*, uint32_t); public: Actor() : m_familyOffset(0), m_firstVisibleChunkIndex(UINT32_MAX), m_visibleChunkCount(0), m_firstGraphNodeIndex(UINT32_MAX), m_graphNodeCount(0), m_leafChunkCount(0) {} //////// Accessors //////// /** Find the family (see FamilyHeader) that this actor belongs to. \return a pointer to the FamilyHeader for this actor. */ FamilyHeader* getFamilyHeader() const; /** Utility to get the asset this actor is associated with, through its family. \return the asset associated with this actor. */ const Asset* getAsset() const; /** Since this object is not deleted (unless the family is deleted), we use m_familyOffset to determine if the actor is valid, or "active." When no actors in an instance return isActive(), it should be safe to delete the family. \return true iff this actor is valid for use (active). */ bool isActive() const; /** Whether or not this actor represents a subsupport chunk. If the actor contains a subsupport chunk, then it can have only that chunk. \return true iff this actor contains a chunk which is a descendant of a support chunk. */ bool isSubSupportChunk() const; /** Whether or not this actor represents a single support chunk. If the actor contains a single support chunk, it can have no other chunks associated with it. \return true iff this actor contains exactly one support chunk. */ bool isSingleSupportChunk() const; /** Utility to calculate actor index. \return the index of this actor in the FamilyHeader's getActors() array. */ uint32_t getIndex() const; /** Offset to block of memory which holds the data associated with all actors in this actor's lineage \return the family offset. */ uint32_t getFamilyOffset() const; void setFamilyOffset(uint32_t familyOffset); /** The number of visible chunks. This is calculated from updateVisibleChunksFromGraphNodes(). See also getFirstVisibleChunkIndex. \return the number of chunks in the actor's visible chunk index list. */ uint32_t getVisibleChunkCount() const; void setVisibleChunkCount(uint32_t visibleChunkCount); /** Access to visible chunk linked list for this actor. The index returned is that of a link in the FamilyHeader's getVisibleChunkIndexLinks(). \return the index of the head of the visible chunk linked list. */ uint32_t getFirstVisibleChunkIndex() const; void setFirstVisibleChunkIndex(uint32_t firstVisibleChunkIndex); /** The number of graph nodes, corresponding to support chunks, for this actor. See also getFirstGraphNodeIndex. \return the number of graph nodes in the actor's graph node index list. */ uint32_t getGraphNodeCount() const; void setGraphNodeCount(uint32_t graphNodeCount); /** The number of leaf chunks for this actor. \return number of leaf chunks for this actor. */ uint32_t getLeafChunkCount() const; void setLeafChunkCount(uint32_t leafChunkCount); /** Access to graph node linked list for this actor. The index returned is that of a link in the FamilyHeader's getGraphNodeIndexLinks(). \return the index of the head of the graph node linked list. */ uint32_t getFirstGraphNodeIndex() const; void setFirstGraphNodeIndex(uint32_t firstGraphNodeIndex); /** Access to the index of the first subsupport chunk. \return the index of the first subsupport chunk. */ uint32_t getFirstSubsupportChunkIndex() const; /** Access to the support graph. \return the support graph associated with this actor. */ const SupportGraph* getGraph() const; /** Access the instance graph for islands searching. Return the dynamic data generated for the support graph. (See FamilyGraph.) This is used to store current connectivity information based upon bond and chunk healths, as well as cached intermediate data for faster incremental updates. */ FamilyGraph* getFamilyGraph() const; /** Access to the chunks, of type NvBlastChunk. \return an array of size m_chunkCount. */ NvBlastChunk* getChunks() const; /** Access to the bonds, of type NvBlastBond. \return an array of size m_bondCount. */ NvBlastBond* getBonds() const; /** Access to the health for each support chunk and subsupport chunk, of type float. Use getAsset()->getContiguousLowerSupportIndex() to map lower-support chunk indices into the range of indices valid for this array. \return a float array of chunk healths. */ float* getLowerSupportChunkHealths() const; /** Access to the start of the subsupport chunk health array. \return the array of health values associated with all descendants of support chunks. */ float* getSubsupportChunkHealths() const; /** Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i]. \return the array of healths associated with all bonds in the support graph. */ float* getBondHealths() const; /** Graph node index links, of type uint32_t. The successor to index[i] is m_graphNodeIndexLinksOffset[i]. A value of invalidIndex<uint32_t>() indicates no successor. getGraphNodeIndexLinks returns an array of size m_asset->m_graphNodeCount. */ const uint32_t* getGraphNodeIndexLinks() const; //////// Iterators //////// /** Visible chunk iterator. Usage: Given a solver actor a, for (Actor::VisibleChunkIt i = a; (bool)i; ++i) { uint32_t visibleChunkIndex = (uint32_t)i; // visibleChunkIndex references the asset index list } */ class VisibleChunkIt : public DListIt<uint32_t> { public: /** Constructed from an actor. */ VisibleChunkIt(const Actor& actor); }; /** Graph node iterator. Usage: Given a solver actor a, for (Actor::GraphNodeIt i = a; (bool)i; ++i) { uint32_t graphNodeIndex = (uint32_t)i; // graphNodeIndex references the asset's graph node index list } */ class GraphNodeIt : public LListIt<uint32_t> { public: /** Constructed from an actor. */ GraphNodeIt(const Actor& actor); }; //////// Operations //////// /** Create an actor from a descriptor (creates a family). This actor will represent an unfractured instance of the asset. The asset must be in a valid state, for example each chunk hierarchy in it must contain at least one support chunk (a single support chunk in a hierarchy corresponds to the root chunk). This will always be the case for assets created by NvBlastCreateAsset. \param[in] family Family in which to create a new actor. The family must be valid and have no other actors in it. (See createFamily.) \param[in] desc Actor initialization data, must be a valid pointer. \param[in] scratch User-supplied scratch memory of size createRequiredScratch(desc) bytes. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the new actor if the input is valid (by the conditions described above), NULL otherwise. */ static Actor* create(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn); /** Returns the size of the scratch space (in bytes) required to be passed into the create function, based upon the family that will be passed to the create function. \param[in] family The family being instanced. \return the number of bytes required. */ static size_t createRequiredScratch(const NvBlastFamily* family, NvBlastLog logFn); /** Deserialize a single Actor from a buffer. An actor family must given, into which the actor will be inserted if it is compatible. That is, it must not share any chunks or internal IDs with the actors already present in the block. \param[in] family Family in which to deserialize the actor. \param[in] buffer Buffer containing the serialized actor data. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the deserialized actor if successful, NULL otherwise. */ static Actor* deserialize(NvBlastFamily* family, const void* buffer, NvBlastLog logFn); /** Serialize actor into single-actor buffer. \param[out] buffer User-supplied buffer, must be at least of size given by NvBlastActorGetSerializationSize(actor). \param[in] bufferSize The size of the user-supplied buffer. The buffer size must be less than 4GB. If NvBlastActorGetSerializationSize(actor) >= 4GB, this actor cannot be serialized with this method. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of bytes written to the buffer, or 0 if there is an error (such as an under-sized buffer). */ uint32_t serialize(void* buffer, uint32_t bufferSize, NvBlastLog logFn) const; /** Calculate the space required to serialize this actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the required buffer size in bytes. */ uint32_t serializationRequiredStorage(NvBlastLog logFn) const; /** Release this actor's association with a family, if any. This actor should be considered deleted after this function is called. \return true if release was successful (actor was active). */ bool release(); //////// Damage and fracturing methods //////// /** See NvBlastActorGenerateFracture */ void generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastDamageProgram& program, const void* programParams, NvBlastLog logFn, NvBlastTimers* timers) const; /** Damage bond between two chunks by health amount (instance graph also will be notified in case bond is broken after). */ uint32_t damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, float healthDamage); /** TODO: document */ void damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, uint32_t bondIndex, float healthDamage); /** TODO: document */ uint32_t damageBond(const NvBlastBondFractureData& cmd); /** See NvBlastActorApplyFracture */ void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, NvBlastLog logFn, NvBlastTimers* timers); /** The scratch space required to call the findIslands function, or the split function, in bytes. \return the number of bytes required. */ size_t splitRequiredScratch() const; /** See NvBlastActorSplit */ uint32_t split(NvBlastActorSplitEvent* result, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers); /** Perform islands search. Bonds which are broken when their health values drop to zero (or below) may lead to new islands of chunks which need to be split into new actors. This function labels all nodes in the instance graph (see FamilyGraph) with a unique index per island that may be used as actor indices for new islands. \param[in] scratch User-supplied scratch memory of size splitRequiredScratch(). \return the number of new islands found. */ uint32_t findIslands(void* scratch); /** Partition this actor into smaller pieces. If this actor represents a single support or subsupport chunk, then after this operation this actor will released if child chunks are created (see Return value), and its pointer no longer valid for use (unless it appears in the newActors list). This function will not split a leaf chunk actor. In that case, the actor is not destroyed and this function returns 0. \param[in] newActors user-supplied array of actor pointers to hold the actors generated from this partitioning. This array must be of size equal to the number of leaf chunks in the asset, to guarantee that all actors are reported. (See AssetDataHeader::m_leafChunkCount.) \param[in] newActorsSize The size of the user-supplied newActors array. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of new actors created. If greater than newActorsSize, some actors are not reported in the newActors array. */ uint32_t partition(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn); /** Recalculate the visible chunk list for this actor based upon it graph node list (does not modify subsupport chunk actors) */ void updateVisibleChunksFromGraphNodes(); /** Partition this actor into smaller pieces if it is a single lower-support chunk actor. Use this function on single support or sub-support chunks. After this operation, if successful (child chunks created, see Return value), this actor will released, and its pointer no longer valid for use. This function will not split a leaf chunk actor. In that case, the actor is not destroyed and this function returns 0. \param[in] newActors User-supplied array of actor pointers to hold the actors generated from this partitioning. Note: this actor will be released. This array must be of size equal to the lower-support chunk's child count, to guarantee that all actors are reported. \param[in] newActorsSize The size of the user-supplied newActors array. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of new actors created. */ uint32_t partitionSingleLowerSupportChunk(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn); /** Partition this actor into smaller pieces. Use this function if this actor contains more than one support chunk. After this operation, if successful, this actor will released, and its pointer no longer valid for use (unless it appears in the newActors list). \param[in] newActors User-supplied array of actor pointers to hold the actors generated from this partitioning. Note: this actor will not be released, but will hold a subset of the graph nodes that it had before the function was called. This array must be of size equal to the number of graph nodes in the asset, to guarantee that all actors are reported. \param[in] newActorsSize The size of the user-supplied newActors array. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of new actors created. */ uint32_t partitionMultipleGraphNodes(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn); /** \return true iff this actor contains the "external" support graph node, created when a bond contains the invalidIndex<uint32_t>() value for one of their chunkIndices. */ bool hasExternalBonds() const; /** \return true iff this actor was damaged and split() call is required. */ bool isSplitRequired() const; private: //////// Data //////// /** Offset to block of memory which holds the data associated with all actors in this actor's lineage. This offset is positive. The block address is this object's pointer _minus_ the m_familyOffset. This value is initialized to 0, which denotes an invalid actor. Actors should be obtained through the FamilyHeader::borrowActor API, which will create a valid offset, and the FamilyHeader::returnActor API, which will zero the offset. */ uint32_t m_familyOffset; /** The index of the head of a doubly-linked list of visible chunk indices. If m_firstVisibleChunkIndex == invalidIndex<uint32_t>(), then there are no visible chunks. */ uint32_t m_firstVisibleChunkIndex; /** The number of elements in the visible chunk list. */ uint32_t m_visibleChunkCount; /** The index of the head of a singly-linked list of graph node indices. If m_firstGraphNodeIndex == invalidIndex<uint32_t>(), then there are no graph nodes. */ uint32_t m_firstGraphNodeIndex; /** The number of elements in the graph node list. */ uint32_t m_graphNodeCount; /** The number of leaf chunks in this actor. */ uint32_t m_leafChunkCount; }; } // namespace Blast } // namespace Nv #include "NvBlastFamily.h" namespace Nv { namespace Blast { //////// Actor inline methods //////// NV_INLINE FamilyHeader* Actor::getFamilyHeader() const { NVBLAST_ASSERT(isActive()); return isActive() ? (FamilyHeader*)((uintptr_t)this - (uintptr_t)m_familyOffset) : nullptr; } NV_INLINE const Asset* Actor::getAsset() const { return getFamilyHeader()->m_asset; } NV_INLINE bool Actor::isActive() const { return m_familyOffset != 0; } NV_INLINE bool Actor::isSubSupportChunk() const { return m_graphNodeCount == 0; } NV_INLINE bool Actor::isSingleSupportChunk() const { return m_graphNodeCount == 1; } NV_INLINE uint32_t Actor::getIndex() const { NVBLAST_ASSERT(isActive()); const FamilyHeader* header = getFamilyHeader(); NVBLAST_ASSERT(header != nullptr); const size_t index = this - header->getActors(); NVBLAST_ASSERT(index <= UINT32_MAX); return (uint32_t)index; } NV_INLINE uint32_t Actor::getFamilyOffset() const { return m_familyOffset; } NV_INLINE void Actor::setFamilyOffset(uint32_t familyOffset) { m_familyOffset = familyOffset; } NV_INLINE uint32_t Actor::getVisibleChunkCount() const { return m_visibleChunkCount; } NV_INLINE void Actor::setVisibleChunkCount(uint32_t visibleChunkCount) { m_visibleChunkCount = visibleChunkCount; } NV_INLINE uint32_t Actor::getFirstVisibleChunkIndex() const { return m_firstVisibleChunkIndex; } NV_INLINE void Actor::setFirstVisibleChunkIndex(uint32_t firstVisibleChunkIndex) { m_firstVisibleChunkIndex = firstVisibleChunkIndex; } NV_INLINE uint32_t Actor::getGraphNodeCount() const { return m_graphNodeCount; } NV_INLINE void Actor::setGraphNodeCount(uint32_t graphNodeCount) { m_graphNodeCount = graphNodeCount; } NV_INLINE uint32_t Actor::getLeafChunkCount() const { return m_leafChunkCount; } NV_INLINE void Actor::setLeafChunkCount(uint32_t leafChunkCount) { m_leafChunkCount = leafChunkCount; } NV_INLINE uint32_t Actor::getFirstGraphNodeIndex() const { return m_firstGraphNodeIndex; } NV_INLINE void Actor::setFirstGraphNodeIndex(uint32_t firstGraphNodeIndex) { m_firstGraphNodeIndex = firstGraphNodeIndex; } NV_INLINE uint32_t Actor::getFirstSubsupportChunkIndex() const { return getAsset()->m_firstSubsupportChunkIndex; } NV_INLINE const SupportGraph* Actor::getGraph() const { return &getAsset()->m_graph; } NV_INLINE FamilyGraph* Actor::getFamilyGraph() const { return getFamilyHeader()->getFamilyGraph(); } NV_INLINE NvBlastChunk* Actor::getChunks() const { return getAsset()->getChunks(); } NV_INLINE NvBlastBond* Actor::getBonds() const { return getAsset()->getBonds(); } NV_INLINE float* Actor::getLowerSupportChunkHealths() const { return getFamilyHeader()->getLowerSupportChunkHealths(); } NV_INLINE float* Actor::getSubsupportChunkHealths() const { return getFamilyHeader()->getSubsupportChunkHealths(); } NV_INLINE float* Actor::getBondHealths() const { return getFamilyHeader()->getBondHealths(); } NV_INLINE const uint32_t* Actor::getGraphNodeIndexLinks() const { return getFamilyHeader()->getGraphNodeIndexLinks(); } NV_INLINE bool Actor::release() { // Do nothing if this actor is not currently active. if (!isActive()) { return false; } FamilyHeader* header = getFamilyHeader(); // Clear the graph node list uint32_t* graphNodeIndexLinks = getFamilyHeader()->getGraphNodeIndexLinks(); while (!isInvalidIndex(m_firstGraphNodeIndex)) { const uint32_t graphNodeIndex = m_firstGraphNodeIndex; m_firstGraphNodeIndex = graphNodeIndexLinks[m_firstGraphNodeIndex]; graphNodeIndexLinks[graphNodeIndex] = invalidIndex<uint32_t>(); --m_graphNodeCount; } NVBLAST_ASSERT(m_graphNodeCount == 0); const Asset* asset = getAsset(); // Clear the visible chunk list IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks(); uint32_t* chunkActorIndices = header->getChunkActorIndices(); while (!isInvalidIndex(m_firstVisibleChunkIndex)) { // Descendants of the visible actor may be accessed again if the actor is deserialized. Clear subtree. for (Asset::DepthFirstIt i(*asset, m_firstVisibleChunkIndex, true); (bool)i; ++i) { chunkActorIndices[(uint32_t)i] = invalidIndex<uint32_t>(); } IndexDList<uint32_t>().removeListHead(m_firstVisibleChunkIndex, visibleChunkIndexLinks); --m_visibleChunkCount; } NVBLAST_ASSERT(m_visibleChunkCount == 0); // Clear the leaf chunk count m_leafChunkCount = 0; // This invalidates the actor and decrements the reference count header->returnActor(*this); return true; } NV_INLINE uint32_t Actor::partition(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn) { NVBLASTLL_CHECK(newActorsSize == 0 || newActors != nullptr, logFn, "Nv::Blast::Actor::partition: NULL newActors pointer array input with non-zero newActorCount.", return 0); // Call one of two partition functions depending on the actor's support status return m_graphNodeCount <= 1 ? partitionSingleLowerSupportChunk(newActors, newActorsSize, logFn) : // This actor will partition into subsupport chunks partitionMultipleGraphNodes(newActors, newActorsSize, logFn); // This actor will partition into support chunks } NV_INLINE bool Actor::hasExternalBonds() const { const SupportGraph& graph = *getGraph(); if (graph.m_nodeCount == 0) { return false; // This shouldn't happen } const uint32_t lastGraphChunkIndex = graph.getChunkIndices()[graph.m_nodeCount - 1]; if (!isInvalidIndex(lastGraphChunkIndex)) { return false; // There is no external node } return getFamilyGraph()->getIslandIds()[graph.m_nodeCount - 1] == getIndex(); } NV_INLINE bool Actor::isSplitRequired() const { NVBLAST_ASSERT(isActive()); if (getGraphNodeCount() <= 1) { uint32_t chunkHealthIndex = isSingleSupportChunk() ? getIndex() : getFirstVisibleChunkIndex() - getFirstSubsupportChunkIndex() + getGraph()->m_nodeCount; float* chunkHealths = getLowerSupportChunkHealths(); if (chunkHealths[chunkHealthIndex] <= 0.0f) { const uint32_t chunkIndex = m_graphNodeCount == 0 ? m_firstVisibleChunkIndex : getGraph()->getChunkIndices()[m_firstGraphNodeIndex]; if (!isInvalidIndex(chunkIndex)) { const NvBlastChunk& chunk = getChunks()[chunkIndex]; uint32_t childCount = chunk.childIndexStop - chunk.firstChildIndex; return childCount > 0; } } } else { uint32_t* firstDirtyNodeIndices = getFamilyGraph()->getFirstDirtyNodeIndices(); if (!isInvalidIndex(firstDirtyNodeIndices[getIndex()])) { return true; } } return false; } //////// Actor::VisibleChunkIt inline methods //////// NV_INLINE Actor::VisibleChunkIt::VisibleChunkIt(const Actor& actor) : DListIt<uint32_t>(actor.m_firstVisibleChunkIndex, actor.getFamilyHeader()->getVisibleChunkIndexLinks()) { } //////// Actor::GraphNodeIt inline methods //////// NV_INLINE Actor::GraphNodeIt::GraphNodeIt(const Actor& actor) : LListIt<uint32_t>(actor.m_firstGraphNodeIndex, actor.getFamilyHeader()->getGraphNodeIndexLinks()) { } //////// Helper functions //////// #if NVBLASTLL_CHECK_PARAMS /** Helper function to validate fracture buffer values being meaningful. */ static inline bool isValid(const NvBlastFractureBuffers* buffers) { if (buffers->chunkFractureCount != 0 && buffers->chunkFractures == nullptr) return false; if (buffers->bondFractureCount != 0 && buffers->bondFractures == nullptr) return false; return true; } #endif } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTACTOR_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActor.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastActor.h" #include "NvBlastFamilyGraph.h" #include "NvBlastChunkHierarchy.h" #include "NvBlastIndexFns.h" #include "NvBlastDLink.h" #include "NvBlastGeometry.h" #include "NvBlastTime.h" #include <float.h> #include <algorithm> namespace Nv { namespace Blast { //////// Actor static methods //////// size_t Actor::createRequiredScratch(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr && reinterpret_cast<const FamilyHeader*>(family)->m_asset != nullptr, logFn, "Actor::createRequiredScratch: NULL family input or asset.", return 0); const Asset& solverAsset = *reinterpret_cast<const FamilyHeader*>(family)->m_asset; return FamilyGraph::findIslandsRequiredScratch(solverAsset.m_graph.m_nodeCount); } Actor* Actor::create(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "Actor::create: NULL family pointer input.", return nullptr); NVBLASTLL_CHECK(reinterpret_cast<FamilyHeader*>(family)->m_asset != nullptr, logFn, "Actor::create: family has NULL asset.", return nullptr); NVBLASTLL_CHECK(reinterpret_cast<FamilyHeader*>(family)->m_asset->m_graph.m_nodeCount != 0, logFn, "Actor::create: family's asset has no support chunks.", return nullptr); NVBLASTLL_CHECK(desc != nullptr, logFn, "Actor::create: NULL desc pointer input.", return nullptr); NVBLASTLL_CHECK(scratch != nullptr, logFn, "Actor::create: NULL scratch input.", return nullptr); FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family); if (header->m_actorCount > 0) { NVBLASTLL_LOG_ERROR(logFn, "Actor::create: input family is not empty."); return nullptr; } const Asset& solverAsset = *static_cast<const Asset*>(header->m_asset); const SupportGraph& graph = solverAsset.m_graph; // Lower support chunk healths - initialize float* lowerSupportChunkHealths = header->getLowerSupportChunkHealths(); if (desc->initialSupportChunkHealths != nullptr) // Health array given { const uint32_t* supportChunkIndices = graph.getChunkIndices(); for (uint32_t supportChunkNum = 0; supportChunkNum < graph.m_nodeCount; ++supportChunkNum) { const float initialHealth = desc->initialSupportChunkHealths[supportChunkNum]; for (Asset::DepthFirstIt i(solverAsset, supportChunkIndices[supportChunkNum]); (bool)i; ++i) { lowerSupportChunkHealths[solverAsset.getContiguousLowerSupportIndex((uint32_t)i)] = initialHealth; } } } else // Use uniform initialization { const uint32_t lowerSupportChunkCount = solverAsset.getLowerSupportChunkCount(); for (uint32_t i = 0; i < lowerSupportChunkCount; ++i) { lowerSupportChunkHealths[i] = desc->uniformInitialLowerSupportChunkHealth; } } // Bond healths - initialize const uint32_t bondCount = solverAsset.getBondCount(); float* bondHealths = header->getBondHealths(); if (desc->initialBondHealths != nullptr) // Health array given { memcpy(bondHealths, desc->initialBondHealths, bondCount * sizeof(float)); } else // Use uniform initialization { for (uint32_t bondNum = 0; bondNum < bondCount; ++bondNum) { bondHealths[bondNum] = desc->uniformInitialBondHealth; } } // Get first actor - NOTE: we don't send an event for this! May need to do so for consistency. Actor* actor = header->borrowActor(0); // Using actor[0] // Fill in actor fields actor->m_firstGraphNodeIndex = 0; actor->m_graphNodeCount = graph.m_nodeCount; actor->m_leafChunkCount = solverAsset.m_leafChunkCount; // Graph node index links - initialize to chain uint32_t* graphNodeLinks = header->getGraphNodeIndexLinks(); for (uint32_t i = 0; i < graph.m_nodeCount - 1; ++i) { graphNodeLinks[i] = i + 1; } graphNodeLinks[graph.m_nodeCount - 1] = invalidIndex<uint32_t>(); // Update visible chunks (we assume that all chunks belong to one actor at the beginning) actor->updateVisibleChunksFromGraphNodes(); // Initialize instance graph with this actor header->getFamilyGraph()->initialize(actor->getIndex(), &graph); // Call findIslands to set up the internal instance graph data header->getFamilyGraph()->findIslands(actor->getIndex(), scratch, &graph); return actor; } //////// Actor member methods //////// uint32_t Actor::damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, float healthDamage) { const uint32_t bondIndex = getGraph()->findBond(nodeIndex0, nodeIndex1); damageBond(nodeIndex0, nodeIndex1, bondIndex, healthDamage); return bondIndex; } void Actor::damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, uint32_t bondIndex, float healthDamage) { if (bondIndex == invalidIndex<uint32_t>()) { NVBLAST_ALWAYS_ASSERT(); return; } float* bondHealths = getBondHealths(); if (canTakeDamage(bondHealths[bondIndex]) && healthDamage > 0.0f) { // Subtract health bondHealths[bondIndex] -= healthDamage; // Was removed? if (bondHealths[bondIndex] <= 0.0f) { // Notify graph that bond was removed getFamilyGraph()->notifyEdgeRemoved(getIndex(), nodeIndex0, nodeIndex1, bondIndex, getGraph()); bondHealths[bondIndex] = 0.0f; // Doing this for single-actor serialization consistency; should not actually be necessary } } } uint32_t Actor::damageBond(const NvBlastBondFractureData& cmd) { NVBLAST_ASSERT(!isInvalidIndex(cmd.nodeIndex1)); return damageBond(cmd.nodeIndex0, cmd.nodeIndex1, cmd.health); } void Actor::generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastDamageProgram& program, const void* programParams, NvBlastLog logFn, NvBlastTimers* timers) const { NVBLASTLL_CHECK(commandBuffers != nullptr, logFn, "Actor::generateFracture: NULL commandBuffers pointer input.", return); NVBLASTLL_CHECK(isValid(commandBuffers), logFn, "NvBlastActorGenerateFracture: commandBuffers memory is NULL but size is > 0.", commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = 0; return); #if NVBLASTLL_CHECK_PARAMS if (commandBuffers->bondFractureCount == 0 && commandBuffers->chunkFractureCount == 0) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorGenerateFracture: commandBuffers do not provide any space."); return; } #endif #if NV_PROFILE Time time; #else NV_UNUSED(timers); #endif const SupportGraph* graph = getGraph(); const uint32_t graphNodeCount = getGraphNodeCount(); if (graphNodeCount > 1 && program.graphShaderFunction != nullptr) { const NvBlastGraphShaderActor shaderActor = { getIndex(), getGraphNodeCount(), graph->m_nodeCount, getFirstGraphNodeIndex(), getGraphNodeIndexLinks(), graph->getChunkIndices(), graph->getAdjacencyPartition(), graph->getAdjacentNodeIndices(), graph->getAdjacentBondIndices(), getBonds(), getChunks(), getBondHealths(), getLowerSupportChunkHealths(), getFamilyHeader()->getFamilyGraph()->getIslandIds() }; program.graphShaderFunction(commandBuffers, &shaderActor, programParams); } else if (graphNodeCount <= 1 && program.subgraphShaderFunction != nullptr) { const NvBlastSubgraphShaderActor shaderActor = { // The conditional (visible vs. support chunk) is needed because we allow single-child chunk chains // This makes it possible that an actor with a single support chunk will have a different visible chunk (ancestor of the support chunk) graphNodeCount == 1 ? graph->getChunkIndices()[getFirstGraphNodeIndex()] : getFirstVisibleChunkIndex(), getChunks() }; program.subgraphShaderFunction(commandBuffers, &shaderActor, programParams); } else { commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = 0; } #if NV_PROFILE if (timers != nullptr) { timers->material += time.getElapsedTicks(); } #endif } size_t Actor::splitRequiredScratch() const { // Scratch is reused, just need the max of these two values return std::max(m_graphNodeCount * sizeof(uint32_t), static_cast<size_t>(FamilyGraph::findIslandsRequiredScratch(getGraph()->m_nodeCount))); } uint32_t Actor::split(NvBlastActorSplitEvent* result, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers) { NVBLASTLL_CHECK(result != nullptr, logFn, "Actor::split: NULL result pointer input.", return 0); NVBLASTLL_CHECK(newActorsMaxCount > 0 && result->newActors != nullptr, logFn, "NvBlastActorSplit: no space for results provided.", return 0); NVBLASTLL_CHECK(scratch != nullptr, logFn, "Actor::split: NULL scratch pointer input.", return 0); #if NV_PROFILE Time time; #else NV_UNUSED(timers); #endif Actor** newActors = reinterpret_cast<Actor**>(result->newActors); uint32_t actorsCount = 0; if (getGraphNodeCount() <= 1) { uint32_t chunkHealthIndex = isSingleSupportChunk() ? getIndex() : getFirstVisibleChunkIndex() - getFirstSubsupportChunkIndex() + getGraph()->m_nodeCount; float* chunkHealths = getLowerSupportChunkHealths(); if (chunkHealths[chunkHealthIndex] <= 0.0f) { actorsCount = partitionSingleLowerSupportChunk(newActors, newActorsMaxCount, logFn); for (uint32_t i = 0; i < actorsCount; ++i) { Actor* newActor = newActors[i]; uint32_t firstVisible = newActor->getFirstVisibleChunkIndex(); uint32_t firstSub = newActor->getFirstSubsupportChunkIndex(); uint32_t nodeCount = newActor->getGraph()->m_nodeCount; uint32_t newActorIndex = newActor->getIndex(); uint32_t healthIndex = newActor->isSubSupportChunk() ? firstVisible - firstSub + nodeCount : newActorIndex; if (chunkHealths[healthIndex] <= 0.0f) { uint32_t brittleActors = newActors[i]->partitionSingleLowerSupportChunk(&newActors[actorsCount], newActorsMaxCount - actorsCount, logFn); actorsCount += brittleActors; if (brittleActors > 0) { actorsCount--; newActors[i] = newActors[actorsCount]; i--; } } } } #if NV_PROFILE if (timers != nullptr) { timers->partition += time.getElapsedTicks(); } #endif } else { findIslands(scratch); #if NV_PROFILE if (timers != nullptr) { timers->island += time.getElapsedTicks(); } #endif // Reuse scratch for node list uint32_t* graphNodeIndexList = reinterpret_cast<uint32_t*>(scratch); // Get the family header FamilyHeader* header = getFamilyHeader(); NVBLAST_ASSERT(header != nullptr); // If m_actorEntryDataIndex is valid, this should be too // Record nodes in this actor before splitting const uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks(); // Get the links for the graph nodes uint32_t graphNodeIndexCount = 0; for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex]) { if (graphNodeIndexCount >= m_graphNodeCount) { // Safety, splitRequiredScratch() only guarantees m_graphNodeCount elements. In any case, this condition shouldn't happen. NVBLAST_ASSERT(graphNodeIndexCount < m_graphNodeCount); break; } graphNodeIndexList[graphNodeIndexCount++] = graphNodeIndex; } actorsCount = partitionMultipleGraphNodes(newActors, newActorsMaxCount, logFn); if (actorsCount > 1) { #if NV_PROFILE if (timers != nullptr) { timers->partition += time.getElapsedTicks(); } #endif // Get various pointers and values to iterate const Asset* asset = getAsset(); Actor* actors = header->getActors(); IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks(); uint32_t* chunkActorIndices = header->getChunkActorIndices(); const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const NvBlastChunk* chunks = asset->getChunks(); const uint32_t upperSupportChunkCount = asset->getUpperSupportChunkCount(); const uint32_t* familyGraphIslandIDs = header->getFamilyGraph()->getIslandIds(); // Iterate over all graph nodes and update visible chunk lists for (uint32_t graphNodeNum = 0; graphNodeNum < graphNodeIndexCount; ++graphNodeNum) { const uint32_t graphNodeIndex = graphNodeIndexList[graphNodeNum]; const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (!isInvalidIndex(supportChunkIndex)) // Invalid if this is the world chunk { updateVisibleChunksFromSupportChunk<Actor>(actors, visibleChunkIndexLinks, chunkActorIndices, familyGraphIslandIDs[graphNodeIndex], graphChunkIndices[graphNodeIndex], chunks, upperSupportChunkCount); } } // Remove actors with no visible chunks - this can happen if we've split such that the world node is by itself uint32_t actualActorsCount = 0; for (uint32_t i = 0; i < actorsCount; ++i) { newActors[actualActorsCount] = newActors[i]; if (newActors[actualActorsCount]->getVisibleChunkCount() > 0) { ++actualActorsCount; } else { header->returnActor(*newActors[actualActorsCount]); } } actorsCount = actualActorsCount; #if NV_PROFILE if (timers != nullptr) { timers->visibility += time.getElapsedTicks(); } #endif // NOTE: we MUST use header->getLowerSupportChunkHealths() instead of just getLowerSupportChunkHealths() here, // since this actor has been made inactive at this point. Therefore Actor::getLowerSupportChunkHealths() will return // garbage since it calls getFamilyHeader() which does not return a valid header if the actor is not active. const float* chunkHealths = header->getLowerSupportChunkHealths(); for (uint32_t i = 0; i < actorsCount; ++i) { Actor* newActor = newActors[i]; if (newActor->getGraphNodeCount() <= 1) { const uint32_t firstVisible = newActor->getFirstVisibleChunkIndex(); const uint32_t firstSub = newActor->getFirstSubsupportChunkIndex(); const uint32_t assetNodeCount = newActor->getGraph()->m_nodeCount; const uint32_t newActorIndex = newActor->getIndex(); const uint32_t healthIndex = newActor->isSubSupportChunk() ? firstVisible - firstSub + assetNodeCount : newActorIndex; // this relies on visibility updated, subsupport actors only have m_firstVisibleChunkIndex to identify the chunk if (chunkHealths[healthIndex] <= 0.0f) { const uint32_t brittleActors = newActor->partitionSingleLowerSupportChunk(&newActors[actorsCount], newActorsMaxCount - actorsCount, logFn); actorsCount += brittleActors; if (brittleActors > 0) { actorsCount--; newActors[i] = newActors[actorsCount]; i--; } } } } #if NV_PROFILE if (timers != nullptr) { timers->partition += time.getElapsedTicks(); } #endif } else { actorsCount = 0; } } result->deletedActor = actorsCount == 0 ? nullptr : this; return actorsCount; } uint32_t Actor::findIslands(void* scratch) { return getFamilyHeader()->getFamilyGraph()->findIslands(getIndex(), scratch, &getAsset()->m_graph); } uint32_t Actor::partitionMultipleGraphNodes(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn) { NVBLAST_ASSERT(newActorsSize == 0 || newActors != nullptr); // Check for single subsupport chunk, no partitioning if (m_graphNodeCount <= 1) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionMultipleGraphNodes: actor is a single lower-support chunk, and cannot be partitioned by this function."); return 0; } FamilyHeader* header = getFamilyHeader(); NVBLAST_ASSERT(header != nullptr); // If m_actorEntryDataIndex is valid, this should be too // Get the links for the graph nodes uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks(); // Get the graph chunk indices and leaf chunk counts const Asset* asset = getAsset(); const uint32_t* graphChunkIndices = asset->m_graph.getChunkIndices(); const uint32_t* subtreeLeafChunkCounts = asset->getSubtreeLeafChunkCounts(); // Distribute graph nodes to new actors uint32_t newActorCount = 0; const uint32_t thisActorIndex = getIndex(); m_leafChunkCount = 0; const uint32_t* islandIDs = header->getFamilyGraph()->getIslandIds(); uint32_t lastGraphNodeIndex = invalidIndex<uint32_t>(); uint32_t nextGraphNodeIndex = invalidIndex<uint32_t>(); bool overflow = false; for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = nextGraphNodeIndex) { nextGraphNodeIndex = graphNodeIndexLinks[graphNodeIndex]; const uint32_t islandID = islandIDs[graphNodeIndex]; if (islandID == thisActorIndex) { const uint32_t graphChunkIndex = graphChunkIndices[graphNodeIndex]; if (!isInvalidIndex(graphChunkIndex)) // Invalid if this is the world chunk { m_leafChunkCount += subtreeLeafChunkCounts[graphChunkIndex]; } lastGraphNodeIndex = graphNodeIndex; continue; // Leave the chunk in this actor } // Remove link from this actor if (isInvalidIndex(lastGraphNodeIndex)) { m_firstGraphNodeIndex = nextGraphNodeIndex; } else { graphNodeIndexLinks[lastGraphNodeIndex] = nextGraphNodeIndex; } graphNodeIndexLinks[graphNodeIndex] = invalidIndex<uint32_t>(); --m_graphNodeCount; // See if the chunk had been removed if (islandID == invalidIndex<uint32_t>()) { continue; } // Get new actor if the islandID is valid Actor* newActor = header->borrowActor(islandID); // Check new actor to see if we're adding the first chunk if (isInvalidIndex(newActor->m_firstGraphNodeIndex)) { // See if we can fit it in the output list if (newActorCount < newActorsSize) { newActors[newActorCount++] = newActor; } else { overflow = true; } } // Put link in new actor graphNodeIndexLinks[graphNodeIndex] = newActor->m_firstGraphNodeIndex; newActor->m_firstGraphNodeIndex = graphNodeIndex; ++newActor->m_graphNodeCount; // Add to the actor's leaf chunk count const uint32_t graphChunkIndex = graphChunkIndices[graphNodeIndex]; if (!isInvalidIndex(graphChunkIndex)) // Invalid if this is the world chunk { newActor->m_leafChunkCount += subtreeLeafChunkCounts[graphChunkIndex]; } } if (m_graphNodeCount > 0) { // There are still chunks in this actor. See if we can fit this in the output list. if (newActorCount < newActorsSize) { newActors[newActorCount++] = this; } else { overflow = true; } } else { // No more chunks; release this actor. release(); } if (overflow) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionMultipleGraphNodes: input newActors array could not hold all actors generated."); } return newActorCount; } uint32_t Actor::partitionSingleLowerSupportChunk(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn) { NVBLAST_ASSERT(newActorsSize == 0 || newActors != nullptr); // Ensure this is a single subsupport chunk, no partitioning if (m_graphNodeCount > 1) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionSingleLowerSupportChunk: actor is not a single lower-support chunk, and cannot be partitioned by this function."); return 0; } FamilyHeader* header = getFamilyHeader(); // The conditional (visible vs. support chunk) is needed because we allow single-child chunk chains // This makes it possible that an actor with a single support chunk will have a different visible chunk (ancestor of the support chunk) const uint32_t chunkIndex = m_graphNodeCount == 0 ? m_firstVisibleChunkIndex : getGraph()->getChunkIndices()[m_firstGraphNodeIndex]; if (isInvalidIndex(chunkIndex)) { return 0; // This actor has no chunks; only a graph node representing the world } NVBLAST_ASSERT(isInvalidIndex(header->getVisibleChunkIndexLinks()[chunkIndex].m_adj[1])); const NvBlastChunk& chunk = header->m_asset->getChunks()[chunkIndex]; uint32_t childCount = chunk.childIndexStop - chunk.firstChildIndex; // Warn if we cannot fit all child chunks in the output list if (childCount > newActorsSize) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionSingleLowerSupportChunk: input newActors array will not hold all actors generated."); childCount = newActorsSize; } // Return if no chunks will be created. if (childCount == 0) { return 0; } // Activate a new actor for every child chunk const Asset* asset = getAsset(); const NvBlastChunk* chunks = asset->getChunks(); const uint32_t firstChildIndex = chunks[chunkIndex].firstChildIndex; for (uint32_t i = 0; i < childCount; ++i) { const uint32_t childIndex = firstChildIndex + i; NVBLAST_ASSERT(childIndex >= asset->m_firstSubsupportChunkIndex); const uint32_t actorIndex = asset->m_graph.m_nodeCount + (childIndex - asset->m_firstSubsupportChunkIndex); NVBLAST_ASSERT(!header->isActorActive(actorIndex)); newActors[i] = header->borrowActor(actorIndex); newActors[i]->m_firstVisibleChunkIndex = childIndex; newActors[i]->m_visibleChunkCount = 1; newActors[i]->m_leafChunkCount = asset->getSubtreeLeafChunkCounts()[childIndex]; } // Release this actor release(); return childCount; } void Actor::updateVisibleChunksFromGraphNodes() { // Only apply this to upper-support chunk actors if (m_graphNodeCount == 0) { return; } const Asset* asset = getAsset(); const uint32_t thisActorIndex = getIndex(); // Get various arrays FamilyHeader* header = getFamilyHeader(); Actor* actors = header->getActors(); IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks(); uint32_t* chunkActorIndices = header->getChunkActorIndices(); const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const NvBlastChunk* chunks = asset->getChunks(); const uint32_t upperSupportChunkCount = asset->getUpperSupportChunkCount(); // Iterate over all graph nodes and update visible chunk list const uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks(); for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex]) { const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (!isInvalidIndex(supportChunkIndex)) // Invalid if this is the world chunk { updateVisibleChunksFromSupportChunk<Actor>(actors, visibleChunkIndexLinks, chunkActorIndices, thisActorIndex, graphChunkIndices[graphNodeIndex], chunks, upperSupportChunkCount); } } } } // namespace Blast } // namespace Nv // API implementation extern "C" { NvBlastActor* NvBlastFamilyCreateFirstActor(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL family input.", return nullptr); NVBLASTLL_CHECK(desc != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL desc input.", return nullptr); NVBLASTLL_CHECK(scratch != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL scratch input.", return nullptr); return Nv::Blast::Actor::create(family, desc, scratch, logFn); } size_t NvBlastFamilyGetRequiredScratchForCreateFirstActor(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetRequiredScratchForCreateFirstActor: NULL family input.", return 0); NVBLASTLL_CHECK(reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_asset != nullptr, logFn, "NvBlastFamilyGetRequiredScratchForCreateFirstActor: family has NULL asset.", return 0); return Nv::Blast::Actor::createRequiredScratch(family, logFn); } bool NvBlastActorDeactivate(NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorDeactivate: NULL actor input.", return false); Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorDeactivate: inactive actor input."); } return a.release(); } uint32_t NvBlastActorGetVisibleChunkCount(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetVisibleChunkCount: NULL actor input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetVisibleChunkCount: inactive actor input."); return 0; } return a.getVisibleChunkCount(); } uint32_t NvBlastActorGetVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize, const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(visibleChunkIndices != nullptr, logFn, "NvBlastActorGetVisibleChunkIndices: NULL visibleChunkIndices pointer input.", return 0); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetVisibleChunkIndices: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetVisibleChunkIndices: inactive actor pointer input."); return 0; } // Iterate through visible chunk list and write to supplied array uint32_t indexCount = 0; for (Nv::Blast::Actor::VisibleChunkIt i = a; indexCount < visibleChunkIndicesSize && (bool)i; ++i) { visibleChunkIndices[indexCount++] = (uint32_t)i; } return indexCount; } uint32_t NvBlastActorGetGraphNodeCount(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetGraphNodeCount: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetGraphNodeCount: inactive actor pointer input."); return 0; } return a.getGraphNodeCount(); } uint32_t NvBlastActorGetGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize, const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(graphNodeIndices != nullptr, logFn, "NvBlastActorGetGraphNodeIndices: NULL graphNodeIndices pointer input.", return 0); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetGraphNodeIndices: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetGraphNodeIndices: inactive actor pointer input."); return 0; } // Iterate through graph node list and write to supplied array const uint32_t* graphChunkIndices = a.getAsset()->m_graph.getChunkIndices(); uint32_t indexCount = 0; for (Nv::Blast::Actor::GraphNodeIt i = a; indexCount < graphNodeIndicesSize && (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; if (!Nv::Blast::isInvalidIndex(graphChunkIndices[graphNodeIndex])) { graphNodeIndices[indexCount++] = graphNodeIndex; } } return indexCount; } const float* NvBlastActorGetBondHealths(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetBondHealths: NULL actor pointer input.", return nullptr); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetBondHealths: inactive actor pointer input."); return nullptr; } return a.getFamilyHeader()->getBondHealths(); } const float* NvBlastActorGetCachedBondHeaths(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetCachedBondHeaths: NULL actor pointer input.", return nullptr); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetCachedBondHeaths: inactive actor pointer input."); return nullptr; } return a.getFamilyHeader()->getCachedBondHealths(); } bool NvBlastActorCacheBondHeath(const NvBlastActor* actor, uint32_t bondIndex, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorCacheBondHeath: NULL actor pointer input.", return nullptr); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorCacheBondHeath: inactive actor pointer input."); return false; } // copy the value over from the current bond health Nv::Blast::FamilyHeader* familyHeader = a.getFamilyHeader(); const float curHealth = familyHeader->getBondHealths()[bondIndex]; familyHeader->getCachedBondHealths()[bondIndex] = curHealth; return true; } NvBlastFamily* NvBlastActorGetFamily(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetFamily: NULL actor pointer input.", return nullptr); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetFamily: inactive actor pointer input."); return nullptr; } return reinterpret_cast<NvBlastFamily*>(a.getFamilyHeader()); } uint32_t NvBlastActorGetIndex(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetIndex: NULL actor pointer input.", return Nv::Blast::invalidIndex<uint32_t>()); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetIndex: actor is not active."); return Nv::Blast::invalidIndex<uint32_t>(); } return a.getIndex(); } void NvBlastActorGenerateFracture ( NvBlastFractureBuffers* commandBuffers, const NvBlastActor* actor, const NvBlastDamageProgram program, const void* programParams, NvBlastLog logFn, NvBlastTimers* timers ) { NVBLASTLL_CHECK(commandBuffers != nullptr, logFn, "NvBlastActorGenerateFracture: NULL commandBuffers pointer input.", return); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGenerateFracture: NULL actor pointer input.", return); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGenerateFracture: actor is not active."); commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = 0; return; } a.generateFracture(commandBuffers, program, programParams, logFn, timers); } void NvBlastActorApplyFracture ( NvBlastFractureBuffers* eventBuffers, NvBlastActor* actor, const NvBlastFractureBuffers* commands, NvBlastLog logFn, NvBlastTimers* timers ) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorApplyFracture: NULL actor pointer input.", return); NVBLASTLL_CHECK(commands != nullptr, logFn, "NvBlastActorApplyFracture: NULL commands pointer input.", return); NVBLASTLL_CHECK(Nv::Blast::isValid(commands), logFn, "NvBlastActorApplyFracture: commands memory is NULL but size is > 0.", return); Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorApplyFracture: actor is not active."); if (eventBuffers != nullptr) { eventBuffers->bondFractureCount = 0; eventBuffers->chunkFractureCount = 0; } return; } a.getFamilyHeader()->applyFracture(eventBuffers, commands, &a, logFn, timers); } size_t NvBlastActorGetRequiredScratchForSplit(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetRequiredScratchForSplit: NULL actor input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetRequiredScratchForSplit: actor is not active."); return 0; } return a.splitRequiredScratch(); } uint32_t NvBlastActorGetMaxActorCountForSplit(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetMaxActorCountForSplit: NULL actor input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetMaxActorCountForSplit: actor is not active."); return 0; } return a.getLeafChunkCount() + 1; // GWD-167 workaround (+1) } uint32_t NvBlastActorSplit ( NvBlastActorSplitEvent* result, NvBlastActor* actor, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers ) { NVBLASTLL_CHECK(result != nullptr, logFn, "NvBlastActorSplit: NULL result pointer input.", return 0); NVBLASTLL_CHECK(newActorsMaxCount > 0 && result->newActors != nullptr, logFn, "NvBlastActorSplit: no space for results provided.", return 0); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorSplit: NULL actor pointer input.", return 0); NVBLASTLL_CHECK(scratch != nullptr, logFn, "NvBlastActorSplit: NULL scratch pointer input.", return 0); Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetIndex: actor is not active."); return 0; } return a.split(result, newActorsMaxCount, scratch, logFn, timers); } bool NvBlastActorCanFracture(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorCanFracture: NULL actor input.", return false); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorCanFracture: actor is not active."); return false; } bool canFracture = true; uint32_t graphNodeCount = a.getGraphNodeCount(); if (graphNodeCount < 2) { uint32_t chunkHealthIndex = graphNodeCount == 0 ? a.getFirstVisibleChunkIndex() - a.getFirstSubsupportChunkIndex() + a.getGraph()->m_nodeCount : a.getFirstGraphNodeIndex(); canFracture = (a.getLowerSupportChunkHealths()[chunkHealthIndex] > 0.0f); } return canFracture; } bool NvBlastActorHasExternalBonds(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorHasExternalBonds: NULL actor input.", return false); return static_cast<const Nv::Blast::Actor*>(actor)->hasExternalBonds(); } bool NvBlastActorIsSplitRequired(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorIsSplitRequired: NULL actor input.", return false); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorIsSplitRequired: actor is not active."); return false; } return a.isSplitRequired(); } } // extern "C"
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastAssetHelper.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAsset.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvBlastMemory.h" #include "NvBlastMath.h" #include "NvBlastPreprocessorInternal.h" #include <algorithm> namespace Nv { namespace Blast { /** Class to hold chunk descriptor and annotation context for sorting a list of indices */ class ChunksOrdered { public: ChunksOrdered(const NvBlastChunkDesc* descs, const char* annotation) : m_descs(descs), m_annotation(annotation), m_chunkMap(nullptr), m_chunkInvMap(nullptr) {} // Map and inverse to apply to chunk descs bool setMap(const uint32_t* map, const uint32_t* inv) { if ((map == nullptr) != (inv == nullptr)) { return false; } m_chunkMap = map; m_chunkInvMap = inv; return true; } bool operator () (uint32_t ii0, uint32_t ii1) const { const uint32_t i0 = m_chunkMap ? m_chunkMap[ii0] : ii0; const uint32_t i1 = m_chunkMap ? m_chunkMap[ii1] : ii1; const bool upperSupport0 = (m_annotation[i0] & Asset::ChunkAnnotation::UpperSupport) != 0; const bool upperSupport1 = (m_annotation[i1] & Asset::ChunkAnnotation::UpperSupport) != 0; if (upperSupport0 != upperSupport1) { return upperSupport0; // If one is uppersupport and one is subsupport, uppersupport should come first } const uint32_t p0 = m_descs[i0].parentChunkDescIndex; const uint32_t p1 = m_descs[i1].parentChunkDescIndex; // Parent chunk index (+1 so that UINT32_MAX becomes the lowest value) const uint32_t pp0 = 1 + (m_chunkInvMap && !isInvalidIndex(p0) ? m_chunkInvMap[p0] : p0); const uint32_t pp1 = 1 + (m_chunkInvMap && !isInvalidIndex(p1) ? m_chunkInvMap[p1] : p1); return pp0 < pp1; // With the same support relationship, order by parent index } private: const NvBlastChunkDesc* m_descs; const char* m_annotation; const uint32_t* m_chunkMap; const uint32_t* m_chunkInvMap; }; } // namespace Blast } // namespace Nv using namespace Nv::Blast; extern "C" { bool NvBlastBuildAssetDescChunkReorderMap(uint32_t* chunkReorderMap, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn) { NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL chunkDescs input with non-zero chunkCount", return false); NVBLASTLL_CHECK(chunkReorderMap == nullptr || chunkCount != 0, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL chunkReorderMap input with non-zero chunkCount", return false); NVBLASTLL_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL scratch input with non-zero chunkCount", return false); uint32_t* composedMap = static_cast<uint32_t*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(uint32_t)); uint32_t* chunkMap = static_cast<uint32_t*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(uint32_t)); char* chunkAnnotation = static_cast<char*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(char)); uint32_t supportChunkCount; uint32_t leafChunkCount; if (!Asset::ensureExactSupportCoverage(supportChunkCount, leafChunkCount, chunkAnnotation, chunkCount, const_cast<NvBlastChunkDesc*>(chunkDescs), true, logFn)) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastBuildAssetDescChunkReorderMap: chunk descriptors did not have exact coverage, map could not be built. Use NvBlastEnsureAssetExactSupportCoverage to fix descriptors."); return false; } // Initialize composedMap and its inverse to identity for (uint32_t i = 0; i < chunkCount; ++i) { composedMap[i] = i; chunkReorderMap[i] = i; } // Create a chunk ordering operator using the composedMap ChunksOrdered chunksOrdered(chunkDescs, chunkAnnotation); chunksOrdered.setMap(composedMap, chunkReorderMap); // Check initial order bool ordered = true; if (chunkCount > 1) { for (uint32_t i = chunkCount - 1; ordered && i--;) { ordered = !chunksOrdered(i + 1, i); } } if (ordered) { return true; // Initially ordered, return true } NVBLAST_ASSERT(chunkCount > 1); // Max depth is bounded by chunkCount, so that is the vound on the number of iterations uint32_t iter = chunkCount; do { // Reorder based on current composed map for (uint32_t i = 0; i < chunkCount; ++i) { chunkMap[i] = i; } std::stable_sort(chunkMap, chunkMap + chunkCount, chunksOrdered); // Fold chunkMap into composedMap for (uint32_t i = 0; i < chunkCount; ++i) { chunkMap[i] = composedMap[chunkMap[i]]; } for (uint32_t i = 0; i < chunkCount; ++i) { composedMap[i] = chunkMap[i]; chunkMap[i] = i; } invertMap(chunkReorderMap, composedMap, chunkCount); // Check order ordered = true; for (uint32_t i = chunkCount - 1; ordered && i--;) { ordered = !chunksOrdered(i + 1, i); } } while (!ordered && iter--); NVBLAST_ASSERT(ordered); return false; } void NvBlastApplyAssetDescChunkReorderMap ( NvBlastChunkDesc* reorderedChunkDescs, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, const uint32_t* chunkReorderMap, bool keepBondNormalChunkOrder, NvBlastLog logFn ) { NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL chunkDescs input with non-zero chunkCount", return); NVBLASTLL_CHECK(reorderedChunkDescs == nullptr || chunkCount != 0, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL reorderedChunkDescs input with non-zero chunkCount", return); NVBLASTLL_CHECK(chunkReorderMap == nullptr || chunkCount != 0, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL chunkReorderMap input with non-zero chunkCount", return); NVBLASTLL_CHECK(bondCount == 0 || bondDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL bondDescs input with non-zero bondCount", return); NVBLASTLL_CHECK(bondDescs == nullptr || chunkReorderMap != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL bondDescs input with NULL chunkReorderMap", return); // Copy chunk descs if (reorderedChunkDescs) { for (uint32_t i = 0; i < chunkCount; ++i) { reorderedChunkDescs[chunkReorderMap[i]] = chunkDescs[i]; uint32_t& parentIndex = reorderedChunkDescs[chunkReorderMap[i]].parentChunkDescIndex; if (parentIndex < chunkCount) { parentIndex = chunkReorderMap[parentIndex]; // If the parent index is valid, remap it too to reflect the new order } } } if (bondDescs) { for (uint32_t i = 0; i < bondCount; ++i) { NvBlastBondDesc& bondDesc = bondDescs[i]; uint32_t& index0 = bondDesc.chunkIndices[0]; uint32_t& index1 = bondDesc.chunkIndices[1]; const uint32_t newIndex0 = index0 < chunkCount ? chunkReorderMap[index0] : index0; const uint32_t newIndex1 = index1 < chunkCount ? chunkReorderMap[index1] : index1; if (keepBondNormalChunkOrder && (index0 < index1) != (newIndex0 < newIndex1)) { VecMath::mul(bondDesc.bond.normal, -1); } index0 = newIndex0; index1 = newIndex1; } } } void NvBlastApplyAssetDescChunkReorderMapInPlace ( NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, const uint32_t* chunkReorderMap, bool keepBondNormalChunkOrder, void* scratch, NvBlastLog logFn ) { NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMapInPlace: NULL chunkDescs input with non-zero chunkCount", return); NVBLASTLL_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMapInPlace: NULL scratch input with non-zero chunkCount", return); NvBlastChunkDesc* chunksTemp = static_cast<NvBlastChunkDesc*>(scratch); memcpy(chunksTemp, chunkDescs, sizeof(NvBlastChunkDesc) * chunkCount); NvBlastApplyAssetDescChunkReorderMap(chunkDescs, chunksTemp, chunkCount, bondDescs, bondCount, chunkReorderMap, keepBondNormalChunkOrder, logFn); } bool NvBlastReorderAssetDescChunks ( NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap, bool keepBondNormalChunkOrder, void* scratch, NvBlastLog logFn ) { if (!NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap, chunkDescs, chunkCount, scratch, logFn)) { NvBlastApplyAssetDescChunkReorderMapInPlace(chunkDescs, chunkCount, bondDescs, bondCount, chunkReorderMap, keepBondNormalChunkOrder, scratch, logFn); return false; } return true; } bool NvBlastEnsureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn) { NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastEnsureAssetExactSupportCoverage: NULL chunkDescs input with non-zero chunkCount", return false); NVBLASTLL_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastEnsureAssetExactSupportCoverage: NULL scratch input with non-zero chunkCount", return false); uint32_t supportChunkCount; uint32_t leafChunkCount; return Asset::ensureExactSupportCoverage(supportChunkCount, leafChunkCount, static_cast<char*>(scratch), chunkCount, chunkDescs, false, logFn); } } // extern "C"
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastAsset.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAssert.h" #include "NvBlastAsset.h" #include "NvBlastActor.h" #include "NvBlastMath.h" #include "NvBlastPreprocessorInternal.h" #include "NvBlastIndexFns.h" #include "NvBlastActorSerializationBlock.h" #include "NvBlastMemory.h" #include <algorithm> //#include <random> namespace Nv { namespace Blast { //////// Local helper functions //////// /** Helper function to validate the input parameters for NvBlastCreateAsset. See NvBlastCreateAsset for parameter definitions. */ static bool solverAssetBuildValidateInput(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn) { if (mem == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL mem pointer input."); return false; } if (desc == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL desc pointer input."); return false; } if (desc->chunkCount == 0) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: Zero chunk count not allowed."); return false; } if (desc->chunkDescs == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL chunkDescs pointer input."); return false; } if (desc->bondCount != 0 && desc->bondDescs == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: bondCount non-zero but NULL bondDescs pointer input."); return false; } if (scratch == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL scratch pointer input."); return false; } return true; } struct AssetDataOffsets { size_t m_chunks; size_t m_bonds; size_t m_subtreeLeafChunkCounts; size_t m_supportChunkIndices; size_t m_chunkToGraphNodeMap; size_t m_graphAdjacencyPartition; size_t m_graphAdjacentNodeIndices; size_t m_graphAdjacentBondIndices; }; static size_t createAssetDataOffsets(AssetDataOffsets& offsets, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t bondCount) { NvBlastCreateOffsetStart(sizeof(Asset)); NvBlastCreateOffsetAlign16(offsets.m_chunks, chunkCount * sizeof(NvBlastChunk)); NvBlastCreateOffsetAlign16(offsets.m_bonds, bondCount * sizeof(NvBlastBond)); NvBlastCreateOffsetAlign16(offsets.m_subtreeLeafChunkCounts, chunkCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_supportChunkIndices, graphNodeCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_chunkToGraphNodeMap, chunkCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_graphAdjacencyPartition, (graphNodeCount + 1) * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_graphAdjacentNodeIndices, (2 * bondCount) * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_graphAdjacentBondIndices, (2 * bondCount) * sizeof(uint32_t)); return NvBlastCreateOffsetEndAlign16(); } Asset* initializeAsset(void* mem, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t leafChunkCount, uint32_t firstSubsupportChunkIndex, uint32_t bondCount, NvBlastLog logFn) { // Data offsets AssetDataOffsets offsets; const size_t dataSize = createAssetDataOffsets(offsets, chunkCount, graphNodeCount, bondCount); // Restricting our data size to < 4GB so that we may use uint32_t offsets if (dataSize > (size_t)UINT32_MAX) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::allocateAsset: Asset data size will exceed 4GB. Instance not created.\n"); return nullptr; } // Zero memory and cast to Asset Asset* asset = reinterpret_cast<Asset*>(memset(mem, 0, dataSize)); // Fill in fields const size_t graphOffset = NV_OFFSET_OF(Asset, m_graph); asset->m_header.dataType = NvBlastDataBlock::AssetDataBlock; asset->m_header.formatVersion = 0; // Not currently using this field asset->m_header.size = (uint32_t)dataSize; asset->m_header.reserved = 0; memset(&asset->m_ID, 0, sizeof(NvBlastID)); asset->m_chunkCount = chunkCount; asset->m_graph.m_nodeCount = graphNodeCount; asset->m_graph.m_chunkIndicesOffset = (uint32_t)(offsets.m_supportChunkIndices - graphOffset); asset->m_graph.m_adjacencyPartitionOffset = (uint32_t)(offsets.m_graphAdjacencyPartition - graphOffset); asset->m_graph.m_adjacentNodeIndicesOffset = (uint32_t)(offsets.m_graphAdjacentNodeIndices - graphOffset); asset->m_graph.m_adjacentBondIndicesOffset = (uint32_t)(offsets.m_graphAdjacentBondIndices - graphOffset); asset->m_leafChunkCount = leafChunkCount; asset->m_firstSubsupportChunkIndex = firstSubsupportChunkIndex; asset->m_bondCount = bondCount; asset->m_chunksOffset = (uint32_t)offsets.m_chunks; asset->m_bondsOffset = (uint32_t)offsets.m_bonds; asset->m_subtreeLeafChunkCountsOffset = (uint32_t)offsets.m_subtreeLeafChunkCounts; asset->m_chunkToGraphNodeMapOffset = (uint32_t)offsets.m_chunkToGraphNodeMap; // Ensure Bonds remain aligned NV_COMPILE_TIME_ASSERT((sizeof(NvBlastBond) & 0xf) == 0); // Ensure Bonds are aligned - note, this requires that the block be aligned NVBLAST_ASSERT((uintptr_t(asset->getBonds()) & 0xf) == 0); return asset; } /** Tests for a loop in a digraph starting at a given graph vertex. Using the implied digraph given by the chunkDescs' parentChunkIndex fields, the graph is walked from the chunk descriptor chunkDescs[chunkIndex], to determine if that walk leads to a loop. Input: chunkDescs - the chunk descriptors chunkDescIndex - the index of the starting chunk descriptor Return: true if a loop is found, false otherwise. */ NV_INLINE bool testForLoop(const NvBlastChunkDesc* chunkDescs, uint32_t chunkDescIndex) { NVBLAST_ASSERT(!isInvalidIndex(chunkDescIndex)); uint32_t chunkDescIndex1 = chunkDescs[chunkDescIndex].parentChunkDescIndex; if (isInvalidIndex(chunkDescIndex1)) { return false; } uint32_t chunkDescIndex2 = chunkDescs[chunkDescIndex1].parentChunkDescIndex; if (isInvalidIndex(chunkDescIndex2)) { return false; } do { // advance index 1 chunkDescIndex1 = chunkDescs[chunkDescIndex1].parentChunkDescIndex; // No need to check for termination here. index 2 would find it first. // advance index 2 twice and check for incidence with index 1 as well as termination if ((chunkDescIndex2 = chunkDescs[chunkDescIndex2].parentChunkDescIndex) == chunkDescIndex1) { return true; } if (isInvalidIndex(chunkDescIndex2)) { return false; } if ((chunkDescIndex2 = chunkDescs[chunkDescIndex2].parentChunkDescIndex) == chunkDescIndex1) { return true; } } while (!isInvalidIndex(chunkDescIndex2)); return false; } /** Tests a set of chunk descriptors to see if the implied hierarchy describes valid trees. A single tree implies that only one of the chunkDescs has an invalid (invalidIndex<uint32_t>()) parentChunkIndex, and all other chunks are descendents of that chunk. Passed set of chunk is checked to contain one or more single trees. Input: chunkCount - the number of chunk descriptors chunkDescs - an array of chunk descriptors of length chunkCount logFn - message function (see NvBlastLog definition). Return: true if the descriptors imply a valid trees, false otherwise. */ static bool testForValidTrees(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, NvBlastLog logFn) { for (uint32_t i = 0; i < chunkCount; ++i) { // Ensure there are no loops if (testForLoop(chunkDescs, i)) { NVBLASTLL_LOG_WARNING(logFn, "testForValidTrees: loop found. Asset will not be created."); return false; } } return true; } #if 0 /** * Helper to generate random GUID */ static NvBlastID NvBlastExtCreateRandomID() { NvBlastID id; static std::default_random_engine re; *reinterpret_cast<uint32_t*>(&id.data[0]) = re(); *reinterpret_cast<uint32_t*>(&id.data[4]) = re(); *reinterpret_cast<uint32_t*>(&id.data[8]) = re(); *reinterpret_cast<uint32_t*>(&id.data[12]) = re(); return id; } #endif // CRC-32C (iSCSI) polynomial in reversed bit order. inline uint32_t crc32c(uint32_t crc, const char* buf, size_t len) { crc = ~crc; while (len--) { crc ^= *buf++; for (int k = 0; k < 8; k++) crc = (crc >> 1) ^ (-(int)(crc & 1) & 0x82f63b78); } return ~crc; } /** * Helper to generate GUID from NvBlastAsset memory */ static NvBlastID createIDFromAsset(const NvBlastAsset* asset, NvBlastLog logFn) { // Divide memory into quarters const char* m0 = reinterpret_cast<const char*>(asset); const char* m4 = m0 + NvBlastAssetGetSize(asset, logFn); const char* m2 = m0 + (m4 - m0) / 2; const char* m1 = m0 + (m2 - m0) / 2; const char* m3 = m2 + (m4 - m2) / 2; // CRC hash quarters const uint32_t a = crc32c(0, m0, m1 - m0); const uint32_t b = crc32c(a, m1, m2 - m1); const uint32_t c = crc32c(b, m2, m3 - m2); const uint32_t d = crc32c(c, m3, m4 - m3); // Build ID out of hashes NvBlastID id; *reinterpret_cast<uint32_t*>(&id.data[0x0]) = a; *reinterpret_cast<uint32_t*>(&id.data[0x4]) = b; *reinterpret_cast<uint32_t*>(&id.data[0x8]) = c; *reinterpret_cast<uint32_t*>(&id.data[0xc]) = d; return id; } /** Struct to hold chunk indices and bond index for sorting Utility struct used by NvBlastCreateAsset in order to arrange bond data in a lookup table, and also to easily identify redundant input. */ struct BondSortData { BondSortData(uint32_t c0, uint32_t c1, uint32_t b) : m_c0(c0), m_c1(c1), m_b(b) {} uint32_t m_c0; uint32_t m_c1; uint32_t m_b; }; /** Functional class for sorting a list of BondSortData */ class BondsOrdered { public: bool operator () (const BondSortData& bond0, const BondSortData& bond1) const { return (bond0.m_c0 != bond1.m_c0) ? (bond0.m_c0 < bond1.m_c0) : (bond0.m_c1 != bond1.m_c1 ? bond0.m_c1 < bond1.m_c1 : bond0.m_b < bond1.m_b); } }; //////// Asset static functions //////// size_t Asset::getMemorySize(const NvBlastAssetDesc* desc) { NVBLAST_ASSERT(desc != nullptr); // Count graph nodes uint32_t graphNodeCount = 0; for (uint32_t i = 0; i < desc->chunkCount; ++i) { graphNodeCount += (uint32_t)((desc->chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0); } for (uint32_t i = 0; i < desc->bondCount; ++i) { const NvBlastBondDesc& bondDesc = desc->bondDescs[i]; const uint32_t chunkIndex0 = bondDesc.chunkIndices[0]; const uint32_t chunkIndex1 = bondDesc.chunkIndices[1]; if ((isInvalidIndex(chunkIndex0) && chunkIndex1 < desc->chunkCount) || (isInvalidIndex(chunkIndex1) && chunkIndex0 < desc->chunkCount)) { ++graphNodeCount; // world node break; } } AssetDataOffsets offsets; return createAssetDataOffsets(offsets, desc->chunkCount, graphNodeCount, desc->bondCount); } size_t Asset::createRequiredScratch(const NvBlastAssetDesc* desc, NvBlastLog logFn) { NVBLASTLL_CHECK(desc != nullptr, logFn, "Asset::createRequiredScratch: NULL desc.", return 0); // Aligned and padded return 16 + align16(desc->chunkCount*sizeof(char)) + align16(desc->chunkCount*sizeof(uint32_t)) + align16(2 * desc->bondCount*sizeof(BondSortData)) + align16(desc->bondCount*sizeof(uint32_t)); } Asset* Asset::create(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn) { #if NVBLASTLL_CHECK_PARAMS if (!solverAssetBuildValidateInput(mem, desc, scratch, logFn)) { return nullptr; } #else NV_UNUSED(solverAssetBuildValidateInput); #endif NVBLASTLL_CHECK((reinterpret_cast<uintptr_t>(mem) & 0xF) == 0, logFn, "NvBlastCreateAsset: mem pointer not 16-byte aligned.", return nullptr); // Make sure we have valid trees before proceeding if (!testForValidTrees(desc->chunkCount, desc->chunkDescs, logFn)) { return nullptr; } scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment (see padding in NvBlastGetRequiredScratchForCreateAsset) // reserve chunkAnnotation on scratch char* chunkAnnotation = reinterpret_cast<char*>(scratch); scratch = pointerOffset(scratch, align16(desc->chunkCount)); // test for coverage, chunkAnnotation will be filled there. uint32_t leafChunkCount; uint32_t supportChunkCount; if (!ensureExactSupportCoverage(supportChunkCount, leafChunkCount, chunkAnnotation, desc->chunkCount, const_cast<NvBlastChunkDesc*>(desc->chunkDescs), true, logFn)) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastCreateAsset: support coverage is not exact. Asset will not be created. The Asset helper function NvBlastEnsureAssetExactSupportCoverage may be used to create exact coverage."); return nullptr; } // test for valid chunk order if (!testForValidChunkOrder(desc->chunkCount, desc->chunkDescs, chunkAnnotation, scratch)) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastCreateAsset: chunks order is invalid. Asset will not be created. Use Asset helper functions such as NvBlastBuildAssetDescChunkReorderMap to fix descriptor order."); return nullptr; } // Find first subsupport chunk uint32_t firstSubsupportChunkIndex = desc->chunkCount; // Set value to chunk count if no subsupport chunks are found for (uint32_t i = 0; i < desc->chunkCount; ++i) { if ((chunkAnnotation[i] & ChunkAnnotation::UpperSupport) == 0) { firstSubsupportChunkIndex = i; break; } } // Create map from global indices to graph node indices and initialize to invalid values uint32_t* graphNodeIndexMap = (uint32_t*)scratch; scratch = pointerOffset(scratch, align16(desc->chunkCount * sizeof(uint32_t))); memset(graphNodeIndexMap, 0xFF, desc->chunkCount*sizeof(uint32_t)); // Fill graphNodeIndexMap uint32_t graphNodeCount = 0; for (uint32_t i = 0; i < desc->chunkCount; ++i) { if ((chunkAnnotation[i] & ChunkAnnotation::Support) != 0) { graphNodeIndexMap[i] = graphNodeCount++; } } NVBLAST_ASSERT(graphNodeCount == supportChunkCount); // Scratch array for bond sorting, of size 2*desc->bondCount BondSortData* bondSortArray = (BondSortData*)scratch; scratch = pointerOffset(scratch, align16(2 * desc->bondCount*sizeof(BondSortData))); // Bond remapping array of size desc->bondCount uint32_t* bondMap = (uint32_t*)scratch; memset(bondMap, 0xFF, desc->bondCount*sizeof(uint32_t)); // Eliminate bad or redundant bonds, finding actual bond count uint32_t bondCount = 0; if (desc->bondCount > 0) { // Check for duplicates from input data as well as non-support chunk indices. All such bonds must be removed. bool invalidFound = false; bool duplicateFound = false; bool nonSupportFound = false; // Construct temp array of chunk index pairs and bond indices. This array is symmetrized to hold the reversed chunk indices as well. uint32_t bondSortArraySize = 0; BondSortData* t = bondSortArray; bool addWorldNode = false; for (uint32_t i = 0; i < desc->bondCount; ++i) { const NvBlastBondDesc& bondDesc = desc->bondDescs[i]; const uint32_t chunkIndex0 = bondDesc.chunkIndices[0]; const uint32_t chunkIndex1 = bondDesc.chunkIndices[1]; if ((chunkIndex0 >= desc->chunkCount && !isInvalidIndex(chunkIndex0)) || (chunkIndex1 >= desc->chunkCount && !isInvalidIndex(chunkIndex1)) || chunkIndex0 == chunkIndex1) { invalidFound = true; continue; } uint32_t graphIndex0; if (!isInvalidIndex(chunkIndex0)) { graphIndex0 = graphNodeIndexMap[chunkIndex0]; } else { addWorldNode = true; graphIndex0 = graphNodeCount; // Will set graphNodeCount = supportChunkCount + 1 } uint32_t graphIndex1; if (!isInvalidIndex(chunkIndex1)) { graphIndex1 = graphNodeIndexMap[chunkIndex1]; } else { addWorldNode = true; graphIndex1 = graphNodeCount; // Will set graphNodeCount = supportChunkCount + 1 } if (isInvalidIndex(graphIndex0) || isInvalidIndex(graphIndex1)) { nonSupportFound = true; continue; } t[bondSortArraySize++] = BondSortData(graphIndex0, graphIndex1, i); t[bondSortArraySize++] = BondSortData(graphIndex1, graphIndex0, i); } // Sort the temp array std::sort(bondSortArray, bondSortArray + bondSortArraySize, BondsOrdered()); uint32_t symmetrizedBondCount = 0; for (uint32_t i = 0; i < bondSortArraySize; ++i) { const bool duplicate = i > 0 && bondSortArray[i].m_c0 == bondSortArray[i - 1].m_c0 && bondSortArray[i].m_c1 == bondSortArray[i - 1].m_c1; // Since the array is sorted, uniqueness may be tested by only considering the previous element duplicateFound = duplicateFound || duplicate; if (!duplicate) { // Keep this bond if (symmetrizedBondCount != i) { bondSortArray[symmetrizedBondCount] = bondSortArray[i]; // Compact array if we've dropped bonds } ++symmetrizedBondCount; } } NVBLAST_ASSERT((symmetrizedBondCount & 1) == 0); // Because we symmetrized, there should be an even number bondCount = symmetrizedBondCount / 2; // World node references found in bonds; add a world node if (addWorldNode) { ++graphNodeCount; } // Report warnings if (invalidFound) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastCreateAsset: Invalid bonds found (non-existent or same chunks referenced) and removed from asset."); } if (duplicateFound) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastCreateAsset: Duplicate bonds found and removed from asset."); } if (nonSupportFound) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastCreateAsset: Bonds referencing non-support chunks found and removed from asset."); } } // Allocate memory for asset Asset* asset = initializeAsset(mem, desc->chunkCount, graphNodeCount, leafChunkCount, firstSubsupportChunkIndex, bondCount, logFn); // Asset data pointers SupportGraph& graph = asset->m_graph; NvBlastChunk* chunks = asset->getChunks(); NvBlastBond* bonds = asset->getBonds(); uint32_t* subtreeLeafChunkCounts = asset->getSubtreeLeafChunkCounts(); // Create chunks uint32_t* graphChunkIndices = graph.getChunkIndices(); memset(graphChunkIndices, 0xFF, graphNodeCount * sizeof(uint32_t)); // Ensures unmapped node indices go to invalidIndex - this is important for the world node, if added for (uint32_t i = 0; i < desc->chunkCount; ++i) { const NvBlastChunkDesc& chunkDesc = desc->chunkDescs[i]; NvBlastChunk& assetChunk = chunks[i]; memcpy(assetChunk.centroid, chunkDesc.centroid, 3 * sizeof(float)); assetChunk.volume = chunkDesc.volume; assetChunk.parentChunkIndex = chunkDesc.parentChunkDescIndex; assetChunk.firstChildIndex = invalidIndex<uint32_t>(); // Will be filled in below assetChunk.childIndexStop = assetChunk.firstChildIndex; assetChunk.userData = chunkDesc.userData; const uint32_t graphNodeIndex = graphNodeIndexMap[i]; if (!isInvalidIndex(graphNodeIndex)) { graphChunkIndices[graphNodeIndex] = i; } } // Copy chunkToGraphNodeMap memcpy(asset->getChunkToGraphNodeMap(), graphNodeIndexMap, desc->chunkCount * sizeof(uint32_t)); // Count chunk children for (uint32_t i = 0; i < desc->chunkCount; ++i) { const uint32_t parentChunkIndex = chunks[i].parentChunkIndex; if (!isInvalidIndex(parentChunkIndex)) { if (chunks[parentChunkIndex].childIndexStop == chunks[parentChunkIndex].firstChildIndex) { chunks[parentChunkIndex].childIndexStop = chunks[parentChunkIndex].firstChildIndex = i; } ++chunks[parentChunkIndex].childIndexStop; } } // Create bonds uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices(); uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices(); if (bondCount > 0) { // Create the lookup table from the sorted array createIndexStartLookup<uint32_t>(graphAdjacencyPartition, 0, graphNodeCount - 1, &bondSortArray->m_c0, 2 * bondCount, sizeof(BondSortData)); // Write the adjacent chunk and bond index data uint32_t bondIndex = 0; for (uint32_t i = 0; i < 2 * bondCount; ++i) { const BondSortData& bondSortData = bondSortArray[i]; graphAdjacentNodeIndices[i] = bondSortData.m_c1; const uint32_t oldBondIndex = bondSortData.m_b; const NvBlastBondDesc& bondDesc = desc->bondDescs[oldBondIndex]; if (isInvalidIndex(bondMap[oldBondIndex])) { bonds[bondIndex] = bondDesc.bond; bondMap[oldBondIndex] = bondIndex++; } NVBLAST_ASSERT(bondMap[oldBondIndex] < bondCount); graphAdjacentBondIndices[i] = bondMap[oldBondIndex]; } } else { // No bonds - zero out all partition elements (including last one, to give zero size for adjacent data arrays) memset(graphAdjacencyPartition, 0, (graphNodeCount + 1)*sizeof(uint32_t)); } // Count subtree leaf chunks memset(subtreeLeafChunkCounts, 0, desc->chunkCount*sizeof(uint32_t)); uint32_t* breadthFirstChunkIndices = graphNodeIndexMap; // Reusing graphNodeIndexMap ... graphNodeIndexMap may no longer be used for (uint32_t startChunkIndex = 0; startChunkIndex < desc->chunkCount; ++startChunkIndex) { if (!isInvalidIndex(chunks[startChunkIndex].parentChunkIndex)) { break; // Only iterate through root chunks at this level } const uint32_t enumeratedChunkCount = enumerateChunkHierarchyBreadthFirst(breadthFirstChunkIndices, desc->chunkCount, chunks, startChunkIndex); for (uint32_t chunkNum = enumeratedChunkCount; chunkNum--;) { const uint32_t chunkIndex = breadthFirstChunkIndices[chunkNum]; const NvBlastChunk& chunk = chunks[chunkIndex]; if (chunk.childIndexStop <= chunk.firstChildIndex) { subtreeLeafChunkCounts[chunkIndex] = 1; } if (!isInvalidIndex(chunk.parentChunkIndex)) { subtreeLeafChunkCounts[chunk.parentChunkIndex] += subtreeLeafChunkCounts[chunkIndex]; } } } // Assign ID after data has been created asset->m_ID = createIDFromAsset(asset, logFn); return asset; } bool Asset::ensureExactSupportCoverage(uint32_t& supportChunkCount, uint32_t& leafChunkCount, char* chunkAnnotation, uint32_t chunkCount, NvBlastChunkDesc* chunkDescs, bool testOnly, NvBlastLog logFn) { // Clear leafChunkCount leafChunkCount = 0; memset(chunkAnnotation, 0, chunkCount); // Walk up the hierarchy from all chunks and mark all parents for (uint32_t i = 0; i < chunkCount; ++i) { if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent) { continue; } uint32_t chunkDescIndex = i; while (!isInvalidIndex(chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex)) { chunkAnnotation[chunkDescIndex] = Asset::ChunkAnnotation::Parent; // Note as non-leaf } } // Walk up the hierarchy from all leaves (counting them with leafChunkCount) and keep track of the support chunks found on each chain // Exactly one support chunk should be found on each walk. Remove all but the highest support markings if more than one are found. bool redundantCoverage = false; bool insufficientCoverage = false; for (uint32_t i = 0; i < chunkCount; ++i) { if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent) { continue; } ++leafChunkCount; uint32_t supportChunkDescIndex; supportChunkDescIndex = invalidIndex<uint32_t>(); uint32_t chunkDescIndex = i; bool doneWithChain = false; do { if (chunkDescs[chunkDescIndex].flags & NvBlastChunkDesc::SupportFlag) { if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::Support) { // We've already been up this chain and marked this as support, so we have unique coverage already doneWithChain = true; } chunkAnnotation[chunkDescIndex] |= Asset::ChunkAnnotation::Support; // Note as support if (!isInvalidIndex(supportChunkDescIndex)) { if (testOnly) { return false; } redundantCoverage = true; chunkAnnotation[supportChunkDescIndex] &= ~Asset::ChunkAnnotation::Support; // Remove support marking do // Run up the hierarchy from supportChunkDescIndex to chunkDescIndex and remove the supersupport markings { supportChunkDescIndex = chunkDescs[supportChunkDescIndex].parentChunkDescIndex; chunkAnnotation[supportChunkDescIndex] &= ~Asset::ChunkAnnotation::SuperSupport; // Remove supersupport marking } while (supportChunkDescIndex != chunkDescIndex); } supportChunkDescIndex = chunkDescIndex; } else if (!isInvalidIndex(supportChunkDescIndex)) { chunkAnnotation[chunkDescIndex] |= Asset::ChunkAnnotation::SuperSupport; // Not a support chunk and we've already found a support chunk, so this is super-support } } while (!doneWithChain && !isInvalidIndex(chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex)); if (isInvalidIndex(supportChunkDescIndex)) { if (testOnly) { return false; } insufficientCoverage = true; } } if (redundantCoverage) { NVBLASTLL_LOG_INFO(logFn, "NvBlastCreateAsset: some leaf-to-root chains had more than one support chunk. Some support chunks removed."); } if (insufficientCoverage) { // If coverage was insufficient, then walk up the hierarchy again and mark all chunks that have a support descendant. // This will allow us to place support chunks at the highest possible level to obtain coverage. for (uint32_t i = 0; i < chunkCount; ++i) { if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent) { continue; } bool supportFound = false; uint32_t chunkDescIndex = i; do { if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::Support) { supportFound = true; } else if (supportFound) { chunkAnnotation[chunkDescIndex] |= Asset::ChunkAnnotation::SuperSupport; // Note that a descendant has support } } while (!isInvalidIndex(chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex)); } // Now walk up the hierarchy from each leaf one more time, and make sure there is coverage for (uint32_t i = 0; i < chunkCount; ++i) { if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent) { continue; } uint32_t previousChunkDescIndex; previousChunkDescIndex = invalidIndex<uint32_t>(); uint32_t chunkDescIndex = i; for (;;) { if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::Support) { break; // There is support along this chain } if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::SuperSupport) { NVBLAST_ASSERT(!isInvalidIndex(previousChunkDescIndex)); // This should be impossible chunkAnnotation[previousChunkDescIndex] |= Asset::ChunkAnnotation::Support; // There is no support along this chain, and this is the highest place where we can put support break; } previousChunkDescIndex = chunkDescIndex; chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex; if (isInvalidIndex(chunkDescIndex)) { chunkAnnotation[previousChunkDescIndex] |= Asset::ChunkAnnotation::Support; // There was no support found anywhere in the hierarchy, so we add it at the root break; } } } NVBLASTLL_LOG_INFO(logFn, "NvBlastCreateAsset: some leaf-to-root chains had no support chunks. Support chunks added."); } // Apply changes and count the number of support chunks supportChunkCount = 0; for (uint32_t i = 0; i < chunkCount; ++i) { const bool wasSupport = (chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0; const bool nowSupport = (chunkAnnotation[i] & Asset::ChunkAnnotation::Support) != 0; if (wasSupport != nowSupport) { chunkDescs[i].flags ^= NvBlastChunkDesc::SupportFlag; } if ((chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0) { ++supportChunkCount; } } return !redundantCoverage && !insufficientCoverage; } bool Asset::testForValidChunkOrder(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, const char* chunkAnnotation, void* scratch) { char* chunkMarks = static_cast<char*>(memset(scratch, 0, chunkCount)); uint32_t currentParentChunkDescIndex = invalidIndex<uint32_t>(); for (uint32_t i = 0; i < chunkCount; ++i) { const uint32_t parentChunkDescIndex = chunkDescs[i].parentChunkDescIndex; if (!isInvalidIndex(parentChunkDescIndex) && parentChunkDescIndex >= i) // 'chunks should come after their parents' { return false; } if (parentChunkDescIndex != currentParentChunkDescIndex) { if (!isInvalidIndex(currentParentChunkDescIndex)) { chunkMarks[currentParentChunkDescIndex] = 1; } currentParentChunkDescIndex = parentChunkDescIndex; if (isInvalidIndex(currentParentChunkDescIndex)) // 'root chunks should go first' { return false; } else if (chunkMarks[currentParentChunkDescIndex] != 0) // 'all chunks with same parent index should go in a row' { return false; } } if (i < chunkCount - 1) { const bool upperSupport0 = (chunkAnnotation[i] & ChunkAnnotation::UpperSupport) != 0; const bool upperSupport1 = (chunkAnnotation[i + 1] & ChunkAnnotation::UpperSupport) != 0; if (!upperSupport0 && upperSupport1) // 'upper-support chunks should come before subsupport chunks' { return false; } } } return true; } } // namespace Blast } // namespace Nv // API implementation extern "C" { size_t NvBlastGetRequiredScratchForCreateAsset(const NvBlastAssetDesc* desc, NvBlastLog logFn) { NVBLASTLL_CHECK(desc != nullptr, logFn, "NvBlastGetRequiredScratchForCreateAsset: NULL desc pointer input.", return 0); return Nv::Blast::Asset::createRequiredScratch(desc, logFn); } size_t NvBlastGetAssetMemorySize(const NvBlastAssetDesc* desc, NvBlastLog logFn) { NVBLASTLL_CHECK(desc != nullptr, logFn, "NvBlastGetAssetMemorySize: NULL desc input.", return 0); return Nv::Blast::Asset::getMemorySize(desc); } size_t NvBlastGetAssetMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn) { NV_UNUSED(logFn); Nv::Blast::AssetDataOffsets offsets; return Nv::Blast::createAssetDataOffsets(offsets, sizeData.chunkCount, sizeData.nodeCount, sizeData.bondCount); } NvBlastAsset* NvBlastCreateAsset(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn) { return Nv::Blast::Asset::create(mem, desc, scratch, logFn); } size_t NvBlastAssetGetFamilyMemorySize(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFamilyMemorySize: NULL asset pointer input.", return 0); return Nv::Blast::getFamilyMemorySize(reinterpret_cast<const Nv::Blast::Asset*>(asset)); } size_t NvBlastAssetGetFamilyMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn) { NV_UNUSED(logFn); return Nv::Blast::getFamilyMemorySize(sizeData); } NvBlastID NvBlastAssetGetID(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetID: NULL asset pointer input.", NvBlastID zero; memset(&zero, 0, sizeof(NvBlastID)); return zero); return ((Nv::Blast::Asset*)asset)->m_ID; } bool NvBlastAssetSetID(NvBlastAsset* asset, const NvBlastID* id, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetSetID: NULL asset pointer input.", return false); NVBLASTLL_CHECK(id != nullptr, logFn, "NvBlastAssetSetID: NULL id pointer input.", return false); ((Nv::Blast::Asset*)asset)->m_ID = *id; return true; } uint32_t NvBlastAssetGetFormatVersion(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFormatVersion: NULL asset input.", return UINT32_MAX); return ((Nv::Blast::Asset*)asset)->m_header.formatVersion; } uint32_t NvBlastAssetGetSize(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSize: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_header.size; } uint32_t NvBlastAssetGetChunkCount(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunkCount: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_chunkCount; } uint32_t NvBlastAssetGetSupportChunkCount(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSupportChunkCount: NULL asset input.", return 0); const Nv::Blast::Asset* a = reinterpret_cast<const Nv::Blast::Asset*>(asset); const Nv::Blast::SupportGraph& graph = a->m_graph; if (graph.m_nodeCount == 0) { return 0; // This shouldn't happen } return Nv::Blast::isInvalidIndex(graph.getChunkIndices()[graph.m_nodeCount - 1]) ? graph.m_nodeCount - 1 : graph.m_nodeCount; } uint32_t NvBlastAssetGetLeafChunkCount(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetLeafChunkCount: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_leafChunkCount; } uint32_t NvBlastAssetGetFirstSubsupportChunkIndex(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFirstSubsupportChunkIndex: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_firstSubsupportChunkIndex; } uint32_t NvBlastAssetGetBondCount(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetBondCount: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_bondCount; } const NvBlastSupportGraph NvBlastAssetGetSupportGraph(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSupportGraph: NULL asset input.", NvBlastSupportGraph blank; blank.nodeCount = 0; blank.chunkIndices = blank.adjacencyPartition = blank.adjacentNodeIndices = blank.adjacentBondIndices = nullptr; return blank); const Nv::Blast::SupportGraph& supportGraph = static_cast<const Nv::Blast::Asset*>(asset)->m_graph; NvBlastSupportGraph graph; graph.nodeCount = supportGraph.m_nodeCount; graph.chunkIndices = supportGraph.getChunkIndices(); graph.adjacencyPartition = supportGraph.getAdjacencyPartition(); graph.adjacentNodeIndices = supportGraph.getAdjacentNodeIndices(); graph.adjacentBondIndices = supportGraph.getAdjacentBondIndices(); return graph; } const uint32_t* NvBlastAssetGetChunkToGraphNodeMap(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunkToGraphNodeMap: NULL asset input.", return nullptr); return static_cast<const Nv::Blast::Asset*>(asset)->getChunkToGraphNodeMap(); } const NvBlastChunk* NvBlastAssetGetChunks(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunks: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->getChunks(); } const NvBlastBond* NvBlastAssetGetBonds(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetBonds: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->getBonds(); } uint32_t NvBlastAssetGetActorSerializationSizeUpperBound(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetActorSerializationSizeUpperBound: NULL asset input.", return 0); const Nv::Blast::Asset& solverAsset = *(const Nv::Blast::Asset*)asset; const uint32_t graphNodeCount = solverAsset.m_graph.m_nodeCount; // Calculate serialization size for an actor with all graph nodes (and therefore all bonds), and somehow with all graph nodes visible (after all, this is an upper bound). const uint64_t upperBound = Nv::Blast::getActorSerializationSize(graphNodeCount, solverAsset.getLowerSupportChunkCount(), graphNodeCount, solverAsset.getBondCount()); if (upperBound > UINT32_MAX) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastAssetGetActorSerializationSizeUpperBound: Serialization block size exceeds 4GB. Returning 0.\n"); return 0; } return static_cast<uint32_t>(upperBound); } } // extern "C"
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamily.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFAMILY_H #define NVBLASTFAMILY_H #include "NvBlastAsset.h" #include "NvPreprocessor.h" #include "NvBlastDLink.h" #include "NvBlastAtomic.h" #include "NvBlastMemory.h" #include <cstring> struct NvBlastAsset; namespace Nv { namespace Blast { // Forward declarations class FamilyGraph; class Actor; class Asset; /** Data header at the beginning of every NvBlastActor family The block address may be cast to a valid FamilyHeader pointer. */ struct FamilyHeader : public NvBlastDataBlock { /** The ID for the asset. This will be resolved into a pointer in the runtime data. */ NvBlastID m_assetID; /** Actors, of type Actor. Actors with support chunks will use this array in the range [0, m_asset->m_graphNodeCount), while subsupport actors will be placed in the range [m_asset->m_graphNodeCount, m_asset->getLowerSupportChunkCount()). */ NvBlastBlockArrayData(Actor, m_actorsOffset, getActors, m_asset->getLowerSupportChunkCount()); /** Visible chunk index links, of type IndexDLink<uint32_t>. getVisibleChunkIndexLinks returns an array of size m_asset->m_chunkCount of IndexDLink<uint32_t> (see IndexDLink). */ NvBlastBlockArrayData(IndexDLink<uint32_t>, m_visibleChunkIndexLinksOffset, getVisibleChunkIndexLinks, m_asset->m_chunkCount); /** Chunk actor IDs, of type uint32_t. These correspond to the ID of the actor which owns each chunk. A value of invalidIndex<uint32_t>() indicates no owner. getChunkActorIndices returns an array of size m_asset->m_firstSubsupportChunkIndex. */ NvBlastBlockArrayData(uint32_t, m_chunkActorIndicesOffset, getChunkActorIndices, m_asset->m_firstSubsupportChunkIndex); /** Graph node index links, of type uint32_t. The successor to index[i] is m_graphNodeIndexLinksOffset[i]. A value of invalidIndex<uint32_t>() indicates no successor. getGraphNodeIndexLinks returns an array of size m_asset->m_graphNodeCount. */ NvBlastBlockArrayData(uint32_t, m_graphNodeIndexLinksOffset, getGraphNodeIndexLinks, m_asset->m_graph.m_nodeCount); /** Health for each support chunk and subsupport chunk, of type float. To access support chunks, use the corresponding graph node index in the array returned by getLowerSupportChunkHealths. To access subsupport chunk healths, use getSubsupportChunkHealths (see documentation for details). */ NvBlastBlockArrayData(float, m_lowerSupportChunkHealthsOffset, getLowerSupportChunkHealths, m_asset->getLowerSupportChunkCount()); /** Utility function to get the start of the subsupport chunk health array. To access a subsupport chunk health indexed by i, use getSubsupportChunkHealths()[i - m_asset->m_firstSubsupportChunkIndex] \return the array of health values associated with all descendants of support chunks. */ float* getSubsupportChunkHealths() const { NVBLAST_ASSERT(m_asset != nullptr); return (float*)((uintptr_t)this + m_lowerSupportChunkHealthsOffset) + m_asset->m_graph.m_nodeCount; } /** Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i]. getBondHealths returns the array of healths associated with all bonds in the support graph. */ NvBlastBlockArrayData(float, m_graphBondHealthsOffset, getBondHealths, m_asset->getBondCount()); /** Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i]. getCachedBondHealths returns the array of manually cached healths associated with all bonds in the support graph. */ NvBlastBlockArrayData(float, m_graphCachedBondHealthsOffset, getCachedBondHealths, m_asset->getBondCount()); /** The instance graph for islands searching, of type FamilyGraph. Return the dynamic data generated for the support graph. (See FamilyGraph.) This is used to store current connectivity information based upon bond and chunk healths, as well as cached intermediate data for faster incremental updates. */ NvBlastBlockData(FamilyGraph, m_familyGraphOffset, getFamilyGraph); //////// Runtime data //////// /** The number of actors using this block. */ volatile uint32_t m_actorCount; /** The asset corresponding to all actors in this family. This is runtime data and will be resolved from m_assetID. */ union { const Asset* m_asset; uint64_t m_runtimePlaceholder; // Make sure we reserve enough room for an 8-byte pointer }; //////// Functions //////// /** Gets an actor from the actor array and validates it if it is not already valid. This increments the actor reference count. \param[in] index The index of the actor to borrow. Must be in the range [0, getActorsArraySize()). \return A pointer to the indexed Actor. */ Actor* borrowActor(uint32_t index); /** Invalidates the actor if it is not already invalid. This decrements the actor reference count, but does not free this block when the count goes to zero. \param[in] actor The actor to invalidate. */ void returnActor(Actor& actor); /** Returns a value to indicate whether or not the Actor with the given index is valid for use (active). \return true iff the indexed actor is active. */ bool isActorActive(uint32_t index) const; /** Retrieve the actor from an index. If actor is inactive nullptr is returned. \param[in] index The index of an actor. \return A pointer to the indexed actor if the actor is active, nullptr otherwise. */ Actor* getActorByIndex(uint32_t index) const; /** Retrieve the index of an actor associated with the given chunk. \param[in] chunkIndex The index of chunk. \return the index of associated actor in the FamilyHeader's getActors() array. */ uint32_t getChunkActorIndex(uint32_t chunkIndex) const; /** Retrieve the index of an actor associated with the given node. \param[in] nodeIndex The index of node. \return the index of associated actor in the FamilyHeader's getActors() array. */ uint32_t getNodeActorIndex(uint32_t nodeIndex) const; /** Retrieve an actor associated with the given chunk. \param[in] chunkIndex The index of chunk. \return A pointer to the actor if the actor is active, nullptr otherwise. */ Actor* getChunkActor(uint32_t chunkIndex) const; /** Retrieve an actor associated with the given node. \param[in] nodeIndex The index of node. \return A pointer to the actor if the actor is active, nullptr otherwise. */ Actor* getNodeActor(uint32_t nodeIndex) const; //////// Fracturing methods //////// /** Hierarchically distribute damage to child chunks. \param chunkIndex asset chunk index to hierarchically damage \param suboffset index of the first sub-support health \param healthDamage damage strength to apply \param chunkHealths instance chunk healths \param chunks asset chunk collection */ void fractureSubSupportNoEvents(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks); /** Hierarchically distribute damage to child chunks, recording a fracture event for each health damage applied. If outBuffer is too small, events are dropped but the chunks are still damaged. \param chunkIndex asset chunk index to hierarchically damage \param suboffset index of the first sub-support health \param healthDamage damage strength to apply \param chunkHealths instance chunk healths \param chunks asset chunk collection \param outBuffer target buffer for fracture events \param currentIndex current position in outBuffer - returns the number of damaged chunks \param maxCount capacity of outBuffer \param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ void fractureSubSupport(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks, NvBlastChunkFractureData* outBuffer, uint32_t* currentIndex, const uint32_t maxCount); /** Apply chunk fracture commands hierarchically. \param chunkFractureCount number of chunk fracture commands to apply \param chunkFractures array of chunk fracture commands \param filterActor pointer to the actor to filter commands corresponding to other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ void fractureNoEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* chunkFractures, Actor* filterActor, NvBlastLog logFn); /** Apply chunk fracture commands hierarchically, recording a fracture event for each health damage applied. If events array is too small, events are dropped but the chunks are still damaged. \param chunkFractureCount number of chunk fracture commands to apply \param commands array of chunk fracture commands \param events target buffer for fracture events \param eventsSize number of available entries in 'events' \param count returns the number of damaged chunks \param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ void fractureWithEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* commands, NvBlastChunkFractureData* events, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn); /** Apply chunk fracture commands hierarchically, recording a fracture event for each health damage applied. In-Place version: fracture commands are replaced by fracture events. If inoutbuffer array is too small, events are dropped but the chunks are still damaged. \param chunkFractureCount number of chunk fracture commands to apply \param inoutbuffer array of chunk fracture commands to be replaced by events \param eventsSize number of available entries in inoutbuffer \param count returns the number of damaged chunks \param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ void fractureInPlaceEvents(uint32_t chunkFractureCount, NvBlastChunkFractureData* inoutbuffer, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn); /** See NvBlastActorApplyFracture \param[in,out] eventBuffers Target buffers to hold applied fracture events. May be NULL, in which case events are not reported. To avoid data loss, provide an entry for every lower-support chunk and every bond in the original actor. \param[in,out] actor The NvBlastActor to apply fracture to. \param[in] commands The fracture commands to process. \param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations. */ void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, Actor* filterActor, NvBlastLog logFn, NvBlastTimers* timers); }; } // namespace Blast } // namespace Nv #include "NvBlastActor.h" namespace Nv { namespace Blast { //////// FamilyHeader inline methods //////// NV_INLINE Actor* FamilyHeader::borrowActor(uint32_t index) { NVBLAST_ASSERT(index < getActorsArraySize()); Actor& actor = getActors()[index]; if (actor.m_familyOffset == 0) { const uintptr_t offset = (uintptr_t)&actor - (uintptr_t)this; NVBLAST_ASSERT(offset <= UINT32_MAX); actor.m_familyOffset = (uint32_t)offset; atomicIncrement(reinterpret_cast<volatile int32_t*>(&m_actorCount)); } return &actor; } NV_INLINE void FamilyHeader::returnActor(Actor& actor) { if (actor.m_familyOffset != 0) { actor.m_familyOffset = 0; // The actor count should be positive since this actor was valid. Check to be safe. NVBLAST_ASSERT(m_actorCount > 0); atomicDecrement(reinterpret_cast<volatile int32_t*>(&m_actorCount)); } } NV_INLINE bool FamilyHeader::isActorActive(uint32_t index) const { NVBLAST_ASSERT(index < getActorsArraySize()); return getActors()[index].m_familyOffset != 0; } NV_INLINE Actor* FamilyHeader::getActorByIndex(uint32_t index) const { NVBLAST_ASSERT(index < getActorsArraySize()); Actor& actor = getActors()[index]; return actor.isActive() ? &actor : nullptr; } NV_INLINE uint32_t FamilyHeader::getChunkActorIndex(uint32_t chunkIndex) const { NVBLAST_ASSERT(m_asset); NVBLAST_ASSERT(chunkIndex < m_asset->m_chunkCount); if (chunkIndex < m_asset->getUpperSupportChunkCount()) { return getChunkActorIndices()[chunkIndex]; } else { return chunkIndex - (m_asset->getUpperSupportChunkCount() - m_asset->m_graph.m_nodeCount); } } NV_INLINE uint32_t FamilyHeader::getNodeActorIndex(uint32_t nodeIndex) const { NVBLAST_ASSERT(m_asset); NVBLAST_ASSERT(nodeIndex < m_asset->m_graph.m_nodeCount); const uint32_t chunkIndex = m_asset->m_graph.getChunkIndices()[nodeIndex]; return isInvalidIndex(chunkIndex) ? chunkIndex : getChunkActorIndices()[chunkIndex]; } NV_INLINE Actor* FamilyHeader::getChunkActor(uint32_t chunkIndex) const { uint32_t actorIndex = getChunkActorIndex(chunkIndex); return !isInvalidIndex(actorIndex) ? getActorByIndex(actorIndex) : nullptr; } NV_INLINE Actor* FamilyHeader::getNodeActor(uint32_t nodeIndex) const { uint32_t actorIndex = getNodeActorIndex(nodeIndex); return !isInvalidIndex(actorIndex) ? getActorByIndex(actorIndex) : nullptr; } //////// Global functions //////// /** Returns the number of bytes of memory that a family created using the given asset will require. A pointer to a block of memory of at least this size must be passed in as the mem argument of createFamily. \param[in] asset The asset that will be passed into NvBlastAssetCreateFamily. \param[in] sizeData Alternate version where the counts are known but there is not an existing asset. */ size_t getFamilyMemorySize(const Asset* asset); size_t getFamilyMemorySize(const NvBlastAssetMemSizeData& sizeData); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFAMILY_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActorSerializationBlock.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTACTORSERIALIZATIONBLOCK_H #define NVBLASTACTORSERIALIZATIONBLOCK_H #include "NvBlastFixedBoolArray.h" namespace Nv { namespace Blast { /** Struct-enum which keeps track of the actor serialization format. */ struct ActorSerializationFormat { enum Version { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialization formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; /** Data header at the beginning of a NvBlastActor serialization block The block address may be cast to a valid ActorSerializationHeader pointer. Serialization state is only valid if partition has been called since the last call to findIslands(). */ struct ActorSerializationHeader { /** A number which is incremented every time the data layout changes. */ uint32_t m_formatVersion; /** The size of the serialization block, including this header. Memory sizes are restricted to 32-bit representable values. */ uint32_t m_size; /** The index of the actor within its family. */ uint32_t m_index; /** The number of elements in the visible chunk indices list. */ uint32_t m_visibleChunkCount; /** The number of elements in the graph node indices list. */ uint32_t m_graphNodeCount; /** The number of leaf chunks in this actor. */ uint32_t m_leafChunkCount; /** Visible chunk indices, of type uint32_t. */ NvBlastBlockArrayData(uint32_t, m_visibleChunkIndicesOffset, getVisibleChunkIndices, m_visibleChunkCount); /** Graph node indices, of type uint32_t. */ NvBlastBlockArrayData(uint32_t, m_graphNodeIndicesOffset, getGraphNodeIndices, m_graphNodeCount); /** Healths for lower support chunks in this actor, in breadth-first order from the support chunks associated with the graph nodes. Type float. */ NvBlastBlockData(float, m_lowerSupportChunkHealthsOffset, getLowerSupportChunkHealths); /** Healths for bonds associated with support chunks in this actor, in order of graph adjacency from associated graph nodes, i < j only. Type float. */ NvBlastBlockData(float, m_bondHealthsOffset, getBondHealths); /** Fast route in instance graph calculated for each graph node in this actor, of type uint32_t. */ NvBlastBlockArrayData(uint32_t, m_fastRouteOffset, getFastRoute, m_graphNodeCount); /** Hop counts in instance graph calculated for each graph node in this actor, of type uint32_t. */ NvBlastBlockArrayData(uint32_t, m_hopCountsOffset, getHopCounts, m_graphNodeCount); /** "Edge removed" bits for bonds associated with support chunks in this actor, in order of graph adjacency from associated graph nodes, i < j only. Type FixedBoolArray. */ NvBlastBlockData(FixedBoolArray, m_edgeRemovedArrayOffset, getEdgeRemovedArray); }; //////// Global functions //////// /** A buffer size sufficient to serialize an actor with a given visible chunk count, lower support chunk count, graph node count, and bond count. \param[in] visibleChunkCount The number of visible chunks \param[in] lowerSupportChunkCount The number of lower-support chunks in the asset. \param[in] graphNodeCount The number of graph nodes in the asset. \param[in] bondCount The number of graph bonds in the asset. \return the required buffer size in bytes. */ NV_INLINE size_t getActorSerializationSize(uint32_t visibleChunkCount, uint32_t lowerSupportChunkCount, uint32_t graphNodeCount, uint32_t bondCount) { // Family offsets const size_t visibleChunkIndicesOffset = align16(sizeof(ActorSerializationHeader)); // size = visibleChunkCount*sizeof(uint32_t) const size_t graphNodeIndicesOffset = align16(visibleChunkIndicesOffset + visibleChunkCount*sizeof(uint32_t)); // size = graphNodeCount*sizeof(uint32_t) const size_t lowerSupportHealthsOffset = align16(graphNodeIndicesOffset + graphNodeCount*sizeof(uint32_t)); // size = lowerSupportChunkCount*sizeof(float) const size_t bondHealthsOffset = align16(lowerSupportHealthsOffset + lowerSupportChunkCount*sizeof(float)); // size = bondCount*sizeof(float) const size_t fastRouteOffset = align16(bondHealthsOffset + bondCount*sizeof(float)); // size = graphNodeCount*sizeof(uint32_t) const size_t hopCountsOffset = align16(fastRouteOffset + graphNodeCount*sizeof(uint32_t)); // size = graphNodeCount*sizeof(uint32_t) const size_t edgeRemovedArrayOffset = align16(hopCountsOffset + graphNodeCount*sizeof(uint32_t)); // size = 0 or FixedBoolArray::requiredMemorySize(bondCount) return align16(edgeRemovedArrayOffset + (bondCount == 0 ? 0 : FixedBoolArray::requiredMemorySize(bondCount))); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTACTORSERIALIZATIONBLOCK_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamilyGraph.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFAMILYGRAPH_H #define NVBLASTFAMILYGRAPH_H #include "NvBlastSupportGraph.h" #include "NvBlastFixedArray.h" #include "NvBlastFixedBitmap.h" #include "NvBlastFixedBoolArray.h" #include "NvBlastMath.h" #include "NvBlastFixedPriorityQueue.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { typedef uint32_t NodeIndex; typedef NodeIndex IslandId; typedef uint32_t ActorIndex; /** Internal implementation of family graph stored on the family. It processes full NvBlastSupportGraph graph, stores additional information used for faster islands finding, keeps and provides access to current islandId for every node. */ class FamilyGraph { public: //////// ctor //////// /** Constructor. family graph is meant to be placed (with placement new) on family memory. \param[in] nodeCount The number of nodes in the support graph (see SupportGraph) \param[in] bondCount The number of bonds in the support graph (see SupportGraph) */ FamilyGraph(uint32_t nodeCount, const uint32_t bondCount); /** Returns memory needed for this class (see fillMemory). \param[in] nodeCount The number of nodes in the graph. \param[in] bondCount The number of bonds in the graph. \return the number of bytes required. */ static size_t requiredMemorySize(uint32_t nodeCount, uint32_t bondCount) { return fillMemory(nullptr, nodeCount, bondCount); } //////// API //////// /** Function to initialize graph (all nodes added to dirty list for this actor) \param[in] actorIndex The index of the actor to initialize graph with. Must be in the range [0, m_nodeCount). \param[in] graph The static graph data for this family. */ void initialize(ActorIndex actorIndex, const SupportGraph* graph); /** Function to notify graph about removed edges. These nodes will be added to dirty list for this actor. Returns true if bond as removed. \param[in] actorIndex The index of the actor from which the edge is removed. Must be in the range [0, m_nodeCount). \param[in] node0 The index of the first node of removed edge. Must be in the range [0, m_nodeCount). \param[in] node1 The index of the second node of removed edge. Must be in the range [0, m_nodeCount). \param[in] graph The static graph data for this family. */ bool notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, const SupportGraph* graph); bool notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, uint32_t bondIndex, const SupportGraph* graph); bool notifyNodeRemoved(ActorIndex actorIndex, NodeIndex nodeIndex, const SupportGraph* graph); /** Function to find new islands by examining dirty nodes associated with this actor (they can be associated with actor if notifyEdgeRemoved() were previously called for it. \param[in] actorIndex The index of the actor on which graph part (edges + nodes) findIslands will be performed. Must be in the range [0, m_nodeCount). \param[in] scratch User-supplied scratch memory of size findIslandsRequiredScratch(graphNodeCount) bytes. \param[in] graph The static graph data for this family. \return the number of new islands found. */ uint32_t findIslands(ActorIndex actorIndex, void* scratch, const SupportGraph* graph); /** The scratch space required to call the findIslands function, in bytes. \param[in] graphNodeCount The number of nodes in the graph. \return the number of bytes required. */ static size_t findIslandsRequiredScratch(uint32_t graphNodeCount); //////// data getters //////// /** Utility function to get the start of the island ids array. This is an array of size nodeCount. Every islandId == NodeIndex of root node in this island, it is set for every Node. \return the array of island ids. */ NvBlastBlockData(IslandId, m_islandIdsOffset, getIslandIds); /** Utility function to get the start of the dirty node links array. This is an array of size nodeCount. */ NvBlastBlockData(NodeIndex, m_dirtyNodeLinksOffset, getDirtyNodeLinks); /** Utility function to get the start of the first dirty node indices array. This is an array of size nodeCount. */ NvBlastBlockData(uint32_t, m_firstDirtyNodeIndicesOffset, getFirstDirtyNodeIndices); /** Utility function to get the start of the fast route array. This is an array of size nodeCount. */ NvBlastBlockData(NodeIndex, m_fastRouteOffset, getFastRoute); /** Utility function to get the start of the hop counts array. This is an array of size nodeCount. */ NvBlastBlockData(uint32_t, m_hopCountsOffset, getHopCounts); /** Utility function to get the pointer of the is edge removed bitmap. This is an bitmap of size bondCount. */ NvBlastBlockData(FixedBoolArray, m_isEdgeRemovedOffset, getIsEdgeRemoved); /** Utility function to get the pointer of the is node in dirty list bitmap. This is an bitmap of size nodeCount. */ NvBlastBlockData(FixedBoolArray, m_isNodeInDirtyListOffset, getIsNodeInDirtyList); //////// Debug/Test //////// uint32_t getEdgesCount(const SupportGraph* graph) const; bool hasEdge(NodeIndex node0, NodeIndex node1, const SupportGraph* graph) const; bool canFindRoot(NodeIndex startNode, NodeIndex targetNode, FixedArray<NodeIndex>* visitedNodes, const SupportGraph* graph); private: FamilyGraph& operator = (const FamilyGraph&); //////// internal types //////// /** Used to represent current graph traverse state. */ struct TraversalState { NodeIndex mNodeIndex; uint32_t mCurrentIndex; uint32_t mPrevIndex; uint32_t mDepth; TraversalState() { } TraversalState(NodeIndex nodeIndex, uint32_t currentIndex, uint32_t prevIndex, uint32_t depth) : mNodeIndex(nodeIndex), mCurrentIndex(currentIndex), mPrevIndex(prevIndex), mDepth(depth) { } }; /** Queue element for graph traversal with priority queue. */ struct QueueElement { TraversalState* mState; uint32_t mHopCount; QueueElement() { } QueueElement(TraversalState* state, uint32_t hopCount) : mState(state), mHopCount(hopCount) { } }; /** Queue comparator for graph traversal with priority queue. */ struct NodeComparator { NodeComparator() { } bool operator() (const QueueElement& node0, const QueueElement& node1) const { return node0.mHopCount < node1.mHopCount; } private: NodeComparator& operator = (const NodeComparator&); }; /** PriorityQueue for graph traversal. Queue element with smallest hopCounts will be always on top. */ typedef FixedPriorityQueue<QueueElement, NodeComparator> NodePriorityQueue; //////// internal operations //////// /** Function calculate needed memory and feel it if familyGraph is passed. FamilyGraph is designed to use memory right after itself. So it should be initialized with placement new operation on memory of memoryNeeded() size. \param[in] familyGraph The pointer to actual FamilyGraph instance which will be filled. Can be nullptr, function will only return required bytes and do nothing. \param[in] nodeCount The number of nodes in the graph. \param[in] bondCount The number of bonds in the graph. \return the number of bytes required or filled */ static size_t fillMemory(FamilyGraph* familyGraph, uint32_t nodeCount, uint32_t bondCount); /** Function to find route from on node to another. It uses fastPath first as optimization and then if it fails it performs brute-force traverse (with hop count heuristic) */ bool findRoute(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, NodePriorityQueue* priorityQueue, const SupportGraph* graph); /** Function to try finding targetNode (from startNode) with getFastRoute(). */ bool tryFastPath(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, const SupportGraph* graph); /** Function to unwind route upon successful finding of root node or witness. We have found either a witness *or* the root node with this traversal. In the event of finding the root node, hopCount will be 0. In the event of finding a witness, hopCount will be the hopCount that witness reported as being the distance to the root. */ void unwindRoute(uint32_t traversalIndex, NodeIndex lastNode, uint32_t hopCount, IslandId id, FixedArray<TraversalState>* visitedNodes); /** Function to add node to dirty node list associated with actor. */ void addToDirtyNodeList(ActorIndex actorIndex, NodeIndex node); /** Function used to get adjacentNode using index from adjacencyPartition with check for bondHealths (if it's not removed already) */ NodeIndex getAdjacentNode(uint32_t adjacencyIndex, const SupportGraph* graph) const { const uint32_t bondIndex = graph->getAdjacentBondIndices()[adjacencyIndex]; return getIsEdgeRemoved()->test(bondIndex) ? invalidIndex<uint32_t>() : graph->getAdjacentNodeIndices()[adjacencyIndex]; } }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFAMILYGRAPH_H
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamilyGraph.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastFamilyGraph.h" #include "NvBlastAssert.h" #include <vector> #include <stack> #define SANITY_CHECKS 0 namespace Nv { namespace Blast { size_t FamilyGraph::fillMemory(FamilyGraph* familyGraph, uint32_t nodeCount, uint32_t bondCount) { // calculate all offsets, and dataSize as a result NvBlastCreateOffsetStart(sizeof(FamilyGraph)); const size_t NvBlastCreateOffsetAlign16(dirtyNodeLinksOffset, sizeof(NodeIndex) * nodeCount); const size_t NvBlastCreateOffsetAlign16(firstDirtyNodeIndicesOffset, sizeof(uint32_t) * nodeCount); const size_t NvBlastCreateOffsetAlign16(islandIdsOffset, sizeof(IslandId) * nodeCount); const size_t NvBlastCreateOffsetAlign16(fastRouteOffset, sizeof(NodeIndex) * nodeCount); const size_t NvBlastCreateOffsetAlign16(hopCountsOffset, sizeof(uint32_t) * nodeCount); const size_t NvBlastCreateOffsetAlign16(isEdgeRemovedOffset, FixedBoolArray::requiredMemorySize(bondCount)); const size_t NvBlastCreateOffsetAlign16(isNodeInDirtyListOffset, FixedBoolArray::requiredMemorySize(nodeCount)); const size_t dataSize = NvBlastCreateOffsetEndAlign16(); // fill only if familyGraph was passed (otherwise we just used this function to get dataSize) if (familyGraph) { familyGraph->m_dirtyNodeLinksOffset = static_cast<uint32_t>(dirtyNodeLinksOffset); familyGraph->m_firstDirtyNodeIndicesOffset = static_cast<uint32_t>(firstDirtyNodeIndicesOffset); familyGraph->m_islandIdsOffset = static_cast<uint32_t>(islandIdsOffset); familyGraph->m_fastRouteOffset = static_cast<uint32_t>(fastRouteOffset); familyGraph->m_hopCountsOffset = static_cast<uint32_t>(hopCountsOffset); familyGraph->m_isEdgeRemovedOffset = static_cast<uint32_t>(isEdgeRemovedOffset); familyGraph->m_isNodeInDirtyListOffset = static_cast<uint32_t>(isNodeInDirtyListOffset); new (familyGraph->getIsEdgeRemoved()) FixedBoolArray(bondCount); new (familyGraph->getIsNodeInDirtyList()) FixedBoolArray(nodeCount); } return dataSize; } FamilyGraph::FamilyGraph(uint32_t nodeCount, const uint32_t bondCount) { // fill memory with all internal data // we need chunks count for size calculation fillMemory(this, nodeCount, bondCount); // fill arrays with invalid indices / max value (0xFFFFFFFF) memset(getIslandIds(), 0xFF, nodeCount*sizeof(uint32_t)); memset(getFastRoute(), 0xFF, nodeCount*sizeof(uint32_t)); memset(getHopCounts(), 0xFF, nodeCount*sizeof(uint32_t)); // Initializing to large value memset(getDirtyNodeLinks(), 0xFF, nodeCount*sizeof(uint32_t)); // No dirty list initially memset(getFirstDirtyNodeIndices(), 0xFF, nodeCount*sizeof(uint32_t)); getIsNodeInDirtyList()->clear(); getIsEdgeRemoved()->fill(); } /** Graph initialization, reset all internal data to initial state. Marks all nodes dirty for this actor. First island search probably would be the longest one, as it has to traverse whole graph and set all the optimization stuff like fastRoute and hopCounts for all nodes. */ void FamilyGraph::initialize(ActorIndex actorIndex, const SupportGraph* graph) { // used internal data pointers NodeIndex* dirtyNodeLinks = getDirtyNodeLinks(); uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices(); // link dirty nodes for (NodeIndex node = 1; node < graph->m_nodeCount; node++) { dirtyNodeLinks[node-1] = node; } firstDirtyNodeIndices[actorIndex] = 0; getIsNodeInDirtyList()->fill(); getIsEdgeRemoved()->clear(); } void FamilyGraph::addToDirtyNodeList(ActorIndex actorIndex, NodeIndex node) { // used internal data pointers FixedBoolArray* isNodeInDirtyList = getIsNodeInDirtyList(); NodeIndex* dirtyNodeLinks = getDirtyNodeLinks(); uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices(); // check for bitmap first for avoid O(n) list search if (isNodeInDirtyList->test(node)) return; // add node to dirty node list head dirtyNodeLinks[node] = firstDirtyNodeIndices[actorIndex]; firstDirtyNodeIndices[actorIndex] = node; isNodeInDirtyList->set(node); } /** Removes fast routes and marks involved nodes as dirty */ bool FamilyGraph::notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, const SupportGraph* graph) { NVBLAST_ASSERT(node0 < graph->m_nodeCount); NVBLAST_ASSERT(node1 < graph->m_nodeCount); // used internal data pointers NodeIndex* fastRoute = getFastRoute(); const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph->getAdjacentBondIndices(); // search for bond for (uint32_t adjacencyIndex = adjacencyPartition[node0]; adjacencyIndex < adjacencyPartition[node0 + 1]; adjacencyIndex++) { if (getAdjacentNode(adjacencyIndex, graph) == node1) { // found bond const uint32_t bondIndex = adjacentBondIndices[adjacencyIndex]; // remove bond getIsEdgeRemoved()->set(bondIndex); // broke fast route if it goes through this edge: if (fastRoute[node0] == node1) fastRoute[node0] = invalidIndex<uint32_t>(); if (fastRoute[node1] == node0) fastRoute[node1] = invalidIndex<uint32_t>(); // mark nodes dirty (add to list if doesn't exist) addToDirtyNodeList(actorIndex, node0); addToDirtyNodeList(actorIndex, node1); // we don't expect to be more than one bond between 2 nodes return true; } } return false; } bool FamilyGraph::notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, uint32_t bondIndex, const SupportGraph* graph) { NV_UNUSED(graph); NVBLAST_ASSERT(node0 < graph->m_nodeCount); NVBLAST_ASSERT(node1 < graph->m_nodeCount); getIsEdgeRemoved()->set(bondIndex); NodeIndex* fastRoute = getFastRoute(); // broke fast route if it goes through this edge: if (fastRoute[node0] == node1) fastRoute[node0] = invalidIndex<uint32_t>(); if (fastRoute[node1] == node0) fastRoute[node1] = invalidIndex<uint32_t>(); // mark nodes dirty (add to list if doesn't exist) addToDirtyNodeList(actorIndex, node0); addToDirtyNodeList(actorIndex, node1); return true; } bool FamilyGraph::notifyNodeRemoved(ActorIndex actorIndex, NodeIndex nodeIndex, const SupportGraph* graph) { NVBLAST_ASSERT(nodeIndex < graph->m_nodeCount); // used internal data pointers NodeIndex* fastRoute = getFastRoute(); const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph->getAdjacentBondIndices(); // remove all edges leaving this node for (uint32_t adjacencyIndex = adjacencyPartition[nodeIndex]; adjacencyIndex < adjacencyPartition[nodeIndex + 1]; adjacencyIndex++) { const uint32_t adjacentNodeIndex = getAdjacentNode(adjacencyIndex, graph); if (!isInvalidIndex(adjacentNodeIndex)) { const uint32_t bondIndex = adjacentBondIndices[adjacencyIndex]; getIsEdgeRemoved()->set(bondIndex); if (fastRoute[adjacentNodeIndex] == nodeIndex) fastRoute[adjacentNodeIndex] = invalidIndex<uint32_t>(); if (fastRoute[nodeIndex] == adjacentNodeIndex) fastRoute[nodeIndex] = invalidIndex<uint32_t>(); addToDirtyNodeList(actorIndex, adjacentNodeIndex); } } addToDirtyNodeList(actorIndex, nodeIndex); // ignore this node in partition (only needed for "chunk deleted from graph") // getIslandIds()[nodeIndex] = invalidIndex<uint32_t>(); return true; } void FamilyGraph::unwindRoute(uint32_t traversalIndex, NodeIndex lastNode, uint32_t hopCount, IslandId id, FixedArray<TraversalState>* visitedNodes) { // used internal data pointers IslandId* islandIds = getIslandIds(); NodeIndex* fastRoute = getFastRoute(); uint32_t* hopCounts = getHopCounts(); uint32_t currIndex = traversalIndex; uint32_t hc = hopCount + 1; //Add on 1 for the hop to the witness/root node. do { TraversalState& state = visitedNodes->at(currIndex); hopCounts[state.mNodeIndex] = hc++; islandIds[state.mNodeIndex] = id; fastRoute[state.mNodeIndex] = lastNode; currIndex = state.mPrevIndex; lastNode = state.mNodeIndex; } while(currIndex != invalidIndex<uint32_t>()); } bool FamilyGraph::tryFastPath(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, const SupportGraph* graph) { NV_UNUSED(graph); // used internal data pointers IslandId* islandIds = getIslandIds(); NodeIndex* fastRoute = getFastRoute(); // prepare for iterating path NodeIndex currentNode = startNode; uint32_t visitedNotesInitialSize = visitedNodes->size(); uint32_t depth = 0; bool found = false; do { // witness ? if (isNodeWitness->test(currentNode)) { // Already visited and not tagged with invalid island == a witness! found = islandIds[currentNode] != invalidIndex<uint32_t>(); break; } // reached targetNode ? if (currentNode == targetNode) { found = true; break; } TraversalState state(currentNode, visitedNodes->size(), visitedNodes->size() - 1, depth++); visitedNodes->pushBack(state); NVBLAST_ASSERT(isInvalidIndex(fastRoute[currentNode]) || hasEdge(currentNode, fastRoute[currentNode], graph)); islandIds[currentNode] = invalidIndex<uint32_t>(); isNodeWitness->set(currentNode); currentNode = fastRoute[currentNode]; } while (currentNode != invalidIndex<uint32_t>()); for (uint32_t a = visitedNotesInitialSize; a < visitedNodes->size(); ++a) { TraversalState& state = visitedNodes->at(a); islandIds[state.mNodeIndex] = islandId; } // if fast path failed we have to remove all isWitness marks on visited nodes and nodes from visited list if (!found) { for (uint32_t a = visitedNotesInitialSize; a < visitedNodes->size(); ++a) { TraversalState& state = visitedNodes->at(a); isNodeWitness->reset(state.mNodeIndex); } visitedNodes->forceSize_Unsafe(visitedNotesInitialSize); } return found; } bool FamilyGraph::findRoute(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, NodePriorityQueue* priorityQueue, const SupportGraph* graph) { // used internal data pointers IslandId* islandIds = getIslandIds(); NodeIndex* fastRoute = getFastRoute(); uint32_t* hopCounts = getHopCounts(); const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); // Firstly, traverse the fast path and tag up witnesses. TryFastPath can fail. In that case, no witnesses are left but this node is permitted to report // that it is still part of the island. Whichever node lost its fast path will be tagged as dirty and will be responsible for recovering the fast path // and tagging up the visited nodes if (fastRoute[startNode] != invalidIndex<uint32_t>()) { if (tryFastPath(startNode, targetNode, islandId, visitedNodes, isNodeWitness, graph)) return true; } // If we got here, there was no fast path. Therefore, we need to fall back on searching for the root node. This is optimized by using "hop counts". // These are per-node counts that indicate the expected number of hops from this node to the root node. These are lazily evaluated and updated // as new edges are formed or when traversals occur to re-establish islands. As a result, they may be inaccurate but they still serve the purpose // of guiding our search to minimize the chances of us doing an exhaustive search to find the root node. islandIds[startNode] = invalidIndex<uint32_t>(); TraversalState startTraversal(startNode, visitedNodes->size(), invalidIndex<uint32_t>(), 0); isNodeWitness->set(startNode); QueueElement element(&visitedNodes->pushBack(startTraversal), hopCounts[startNode]); priorityQueue->push(element); do { QueueElement currentQE = priorityQueue->pop(); TraversalState& currentState = *currentQE.mState; NodeIndex& currentNode = currentState.mNodeIndex; // iterate all edges of currentNode for (uint32_t adjacencyIndex = adjacencyPartition[currentNode]; adjacencyIndex < adjacencyPartition[currentNode + 1]; adjacencyIndex++) { NodeIndex nextIndex = getAdjacentNode(adjacencyIndex, graph); if (nextIndex != invalidIndex<uint32_t>()) { if (nextIndex == targetNode) { // targetNode found! unwindRoute(currentState.mCurrentIndex, nextIndex, 0, islandId, visitedNodes); return true; } if (isNodeWitness->test(nextIndex)) { // We already visited this node. This means that it's either in the priority queue already or we // visited in on a previous pass. If it was visited on a previous pass, then it already knows what island it's in. // We now need to test the island id to find out if this node knows the root. // If it has a valid root id, that id *is* our new root. We can guesstimate our hop count based on the node's properties IslandId visitedIslandId = islandIds[nextIndex]; if (visitedIslandId != invalidIndex<uint32_t>()) { // If we get here, we must have found a node that knows a route to our root node. It must not be a different island // because that would caused me to have been visited already because totally separate islands trigger a full traversal on // the orphaned side. NVBLAST_ASSERT(visitedIslandId == islandId); unwindRoute(currentState.mCurrentIndex, nextIndex, hopCounts[nextIndex], islandId, visitedNodes); return true; } } else { // This node has not been visited yet, so we need to push it into the stack and continue traversing TraversalState state(nextIndex, visitedNodes->size(), currentState.mCurrentIndex, currentState.mDepth + 1); QueueElement qe(&visitedNodes->pushBack(state), hopCounts[nextIndex]); priorityQueue->push(qe); isNodeWitness->set(nextIndex); NVBLAST_ASSERT(islandIds[nextIndex] == islandId); islandIds[nextIndex] = invalidIndex<uint32_t>(); //Flag as invalid island until we know whether we can find root or an island id. } } } } while (priorityQueue->size()); return false; } size_t FamilyGraph::findIslandsRequiredScratch(uint32_t graphNodeCount) { const size_t visitedNodesSize = align16(FixedArray<TraversalState>::requiredMemorySize(graphNodeCount)); const size_t isNodeWitnessSize = align16(FixedBitmap::requiredMemorySize(graphNodeCount)); const size_t priorityQueueSize = align16(NodePriorityQueue::requiredMemorySize(graphNodeCount)); // Aligned and padded return 16 + visitedNodesSize + isNodeWitnessSize + priorityQueueSize; } uint32_t FamilyGraph::findIslands(ActorIndex actorIndex, void* scratch, const SupportGraph* graph) { // check if we have at least 1 dirty node for this actor before proceeding uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices(); if (isInvalidIndex(firstDirtyNodeIndices[actorIndex])) return 0; // used internal data pointers IslandId* islandIds = getIslandIds(); NodeIndex* fastRoute = getFastRoute(); uint32_t* hopCounts = getHopCounts(); NodeIndex* dirtyNodeLinks = getDirtyNodeLinks(); FixedBoolArray* isNodeInDirtyList = getIsNodeInDirtyList(); // prepare intermediate data on scratch scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment (see padding in findIslandsRequiredScratch) const uint32_t nodeCount = graph->m_nodeCount; FixedArray<TraversalState>* visitedNodes = new (scratch)FixedArray<TraversalState>(); scratch = pointerOffset(scratch, align16(FixedArray<TraversalState>::requiredMemorySize(nodeCount))); FixedBitmap* isNodeWitness = new (scratch)FixedBitmap(nodeCount); scratch = pointerOffset(scratch, align16(FixedBitmap::requiredMemorySize(nodeCount))); NodePriorityQueue* priorityQueue = new (scratch)NodePriorityQueue(); scratch = pointerOffset(scratch, align16(NodePriorityQueue::requiredMemorySize(nodeCount))); // reset nodes visited bitmap isNodeWitness->clear(); uint32_t newIslandsCount = 0; while (!isInvalidIndex(firstDirtyNodeIndices[actorIndex])) { // Pop head off of dirty node's list const NodeIndex dirtyNode = firstDirtyNodeIndices[actorIndex]; firstDirtyNodeIndices[actorIndex] = dirtyNodeLinks[dirtyNode]; dirtyNodeLinks[dirtyNode] = invalidIndex<uint32_t>(); NVBLAST_ASSERT(isNodeInDirtyList->test(dirtyNode)); isNodeInDirtyList->reset(dirtyNode); // clear PriorityQueue priorityQueue->clear(); // if we already visited this node before in this loop it's not dirty anymore if (isNodeWitness->test(dirtyNode)) continue; const IslandId& islandRootNode = islandIds[dirtyNode]; IslandId islandId = islandRootNode; // the same in this implementation // if this node is island root node we don't need to do anything if (islandRootNode == dirtyNode) continue; // clear visited notes list (to fill during traverse) visitedNodes->clear(); // try finding island root node from this dirtyNode if (findRoute(dirtyNode, islandRootNode, islandId, visitedNodes, isNodeWitness, priorityQueue, graph)) { // We found the root node so let's let every visited node know that we found its root // and we can also update our hop counts because we recorded how many hops it took to reach this // node // We already filled in the path to the root/witness with accurate hop counts. Now we just need to fill in the estimates // for the remaining nodes and re-define their islandIds. We approximate their path to the root by just routing them through // the route we already found. // This loop works because visitedNodes are recorded in the order they were visited and we already filled in the critical path // so the remainder of the paths will just fork from that path. for (uint32_t b = 0; b < visitedNodes->size(); ++b) { TraversalState& state = visitedNodes->at(b); if (isInvalidIndex(islandIds[state.mNodeIndex])) { hopCounts[state.mNodeIndex] = hopCounts[visitedNodes->at(state.mPrevIndex).mNodeIndex] + 1; fastRoute[state.mNodeIndex] = visitedNodes->at(state.mPrevIndex).mNodeIndex; islandIds[state.mNodeIndex] = islandId; } } } else { // NEW ISLAND BORN! // If I traversed and could not find the root node, then I have established a new island. In this island, I am the root node // and I will point all my nodes towards me. Furthermore, I have established how many steps it took to reach all nodes in my island // OK. We need to separate the islands. We have a list of nodes that are part of the new island (visitedNodes) and we know that the // first node in that list is the root node. #if SANITY_CHECKS NVBLAST_ASSERT(!canFindRoot(dirtyNode, islandRootNode, NULL)); #endif IslandId newIsland = dirtyNode; newIslandsCount++; hopCounts[dirtyNode] = 0; fastRoute[dirtyNode] = invalidIndex<uint32_t>(); islandIds[dirtyNode] = newIsland; for (uint32_t a = 1; a < visitedNodes->size(); ++a) { NodeIndex visitedNode = visitedNodes->at(a).mNodeIndex; hopCounts[visitedNode] = visitedNodes->at(a).mDepth; //How many hops to root fastRoute[visitedNode] = visitedNodes->at(visitedNodes->at(a).mPrevIndex).mNodeIndex; islandIds[visitedNode] = newIsland; } } } // all dirty nodes processed return newIslandsCount; } /** !!! Debug/Test function. Function to check that root between nodes exists. */ bool FamilyGraph::canFindRoot(NodeIndex startNode, NodeIndex targetNode, FixedArray<NodeIndex>* visitedNodes, const SupportGraph* graph) { if (visitedNodes) visitedNodes->pushBack(startNode); if (startNode == targetNode) return true; std::vector<bool> visitedState; visitedState.resize(graph->m_nodeCount); for (uint32_t i = 0; i < graph->m_nodeCount; i++) visitedState[i] = false; std::stack<NodeIndex> stack; stack.push(startNode); visitedState[startNode] = true; const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); do { NodeIndex currentNode = stack.top(); stack.pop(); for (uint32_t adjacencyIndex = adjacencyPartition[currentNode]; adjacencyIndex < adjacencyPartition[currentNode + 1]; adjacencyIndex++) { NodeIndex nextNode = getAdjacentNode(adjacencyIndex, graph); if (isInvalidIndex(nextNode)) continue; if (!visitedState[nextNode]) { if (nextNode == targetNode) { return true; } visitedState[nextNode] = true; stack.push(nextNode); if (visitedNodes) visitedNodes->pushBack(nextNode); } } } while (!stack.empty()); return false; } /** !!! Debug/Test function. Function to check if edge exists. */ bool FamilyGraph::hasEdge(NodeIndex node0, NodeIndex node1, const SupportGraph* graph) const { const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); uint32_t edges = 0; for (uint32_t adjacencyIndex = adjacencyPartition[node0]; adjacencyIndex < adjacencyPartition[node0 + 1]; adjacencyIndex++) { if (getAdjacentNode(adjacencyIndex, graph) == node1) { edges++; break; } } for (uint32_t adjacencyIndex = adjacencyPartition[node1]; adjacencyIndex < adjacencyPartition[node1 + 1]; adjacencyIndex++) { if (getAdjacentNode(adjacencyIndex, graph) == node0) { edges++; break; } } return edges > 0; } /** !!! Debug/Test function. Function to calculate and return edges count */ uint32_t FamilyGraph::getEdgesCount(const SupportGraph* graph) const { const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); uint32_t edges = 0; for (NodeIndex n = 0; n < graph->m_nodeCount; n++) { for (uint32_t adjacencyIndex = adjacencyPartition[n]; adjacencyIndex < adjacencyPartition[n + 1]; adjacencyIndex++) { if (getAdjacentNode(adjacencyIndex, graph) != invalidIndex<uint32_t>()) edges++; } } NVBLAST_ASSERT(edges % 2 == 0); return edges / 2; } } // namespace Nv } // namespace Blast
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastAsset.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTASSET_H #define NVBLASTASSET_H #include "NvBlastSupportGraph.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastIndexFns.h" #include "NvBlastChunkHierarchy.h" namespace Nv { namespace Blast { class Asset : public NvBlastAsset { public: /** Struct-enum which is used to mark chunk descriptors when building an asset. */ struct ChunkAnnotation { enum Enum { Parent = (1 << 0), Support = (1 << 1), SuperSupport = (1 << 2), // Combinations UpperSupport = Support | SuperSupport }; }; /** Create an asset from a descriptor. \param[in] mem Pointer to block of memory of at least the size given by getMemorySize(desc). Must be 16-byte aligned. \param[in] desc Asset descriptor (see NvBlastAssetDesc). \param[in] scratch User-supplied scratch memory of size createRequiredScratch(desc) bytes. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the pointer to the new asset, or nullptr if unsuccessful. */ static Asset* create(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn); /** Returns the number of bytes of memory that an asset created using the given descriptor will require. A pointer to a block of memory of at least this size must be passed in as the mem argument of create. \param[in] desc The asset descriptor that will be passed into NvBlastCreateAsset. */ static size_t getMemorySize(const NvBlastAssetDesc* desc); /** Returns the size of the scratch space (in bytes) required to be passed into the create function, based upon the input descriptor that will be passed to the create function. \param[in] desc The descriptor that will be passed to the create function. \return the number of bytes required. */ static size_t createRequiredScratch(const NvBlastAssetDesc* desc, NvBlastLog logFn); /** Returns the number of upper-support chunks in this asset.. \return the number of upper-support chunks. */ uint32_t getUpperSupportChunkCount() const; /** Returns the number of lower-support chunks in this asset. This is the required actor buffer size for a Actor family. \return the number of lower-support chunks. */ uint32_t getLowerSupportChunkCount() const; /** Returns the number of bonds in this asset's support graph. \return the number of bonds in this asset's support graph. */ uint32_t getBondCount() const; /** Returns the number of separate chunk hierarchies in the asset. This will be the initial number of visible chunks in an actor instanced from this asset. \return the number of separate chunk hierarchies in the asset. */ uint32_t getHierarchyCount() const; /** Maps all lower-support chunk indices to a contiguous range [0, getLowerSupportChunkCount()). \param[in] chunkIndex Asset chunk index. \return an index in the range [0, getLowerSupportChunkCount()) if it is a lower-support chunk, invalidIndex<uint32_t>() otherwise. */ uint32_t getContiguousLowerSupportIndex(uint32_t chunkIndex) const; // Static functions /** Function to ensure support coverage of chunks. Support chunks (marked in the NvBlastChunkDesc struct) must provide full coverage over the asset. This means that from any leaf chunk to the root node, exactly one chunk must be support. If this condition is not met, the actual support chunks will be adjusted accordingly. Chunk order depends on support coverage, so this function should be called before chunk reordering. \param[out] supportChunkCount The number of support chunks. NOTE - this value is not meaninful if testOnly = true and the return value is false. \param[out] leafChunkCount The number of leaf chunks. NOTE - this value is not meaninful if testOnly = true and the return value is false. \param[out] chunkAnnotation User-supplied char array of size chunkCount. NOTE - these values are not meaninful if testOnly = true and the return value is false. \param[in] chunkCount The number of chunk descriptors. \param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly. \param[in] testOnly If true, this function early-outs if support coverage is not exact. If false, exact coverage is ensured by possibly modifying chunkDescs' flags. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return true iff coverage was already exact. */ static bool ensureExactSupportCoverage(uint32_t& supportChunkCount, uint32_t& leafChunkCount, char* chunkAnnotation, uint32_t chunkCount, NvBlastChunkDesc* chunkDescs, bool testOnly, NvBlastLog logFn); /** Tests a set of chunk descriptors to see if chunks are in valid chunk order. Chunk order conditions checked: 1. 'all chunks with same parent index should go in a row'. 2. 'chunks should come after their parents'. 3. 'root chunks should go first'. 4. 'upper-support chunks should come before subsupport chunks'. \param[in] chunkCount The number of chunk descriptors. \param[in] chunkDescs An array of chunk descriptors of length chunkCount. \param[in] chunkAnnotation Annotation generated from ensureExactSupportCoverage (see ensureExactSupportCoverage). \param[in] scratch User-supplied scratch memory of chunkCount bytes. \return true if the descriptors meet the ordering conditions, false otherwise. */ static bool testForValidChunkOrder(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, const char* chunkAnnotation, void* scratch); //////// Data //////// /** Asset data block header. */ NvBlastDataBlock m_header; /** ID for this asset. */ NvBlastID m_ID; /** The total number of chunks in the asset, support and non-support. */ uint32_t m_chunkCount; /** The support graph. */ SupportGraph m_graph; /** The number of leaf chunks in the asset. */ uint32_t m_leafChunkCount; /** Chunks are sorted such that subsupport chunks come last. This is the first subsupport chunk index. Equals m_chunkCount if there are no subsupport chunks. */ uint32_t m_firstSubsupportChunkIndex; /** The number of bonds in the asset. */ uint32_t m_bondCount; /** Chunks, of type NvBlastChunk. getChunks returns an array of size m_chunkCount. */ NvBlastBlockArrayData(NvBlastChunk, m_chunksOffset, getChunks, m_chunkCount); /** Array of bond data for the interfaces between two chunks. Since the bond is shared by two chunks, the same bond data is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i]. The size of the array is m_graph.adjacencyPartition[m_graph.m_nodeCount]/2. See NvBlastBond. getBonds returns an array of size m_bondCount. */ NvBlastBlockArrayData(NvBlastBond, m_bondsOffset, getBonds, m_bondCount); /** Caching the number of leaf chunks descended from each chunk (including the chunk itself). This data parallels the Chunks array, and is an array of the same size. getSubtreeLeafChunkCount returns a uint32_t array of size m_chunkCount. */ NvBlastBlockArrayData(uint32_t, m_subtreeLeafChunkCountsOffset, getSubtreeLeafChunkCounts, m_chunkCount); /** Mapping from chunk index to graph node index (inverse of m_graph.getChunkIndices(). getChunkToGraphNodeMap returns a uint32_t array of size m_chunkCount. */ NvBlastBlockArrayData(uint32_t, m_chunkToGraphNodeMapOffset, getChunkToGraphNodeMap, m_chunkCount); //////// Iterators //////// /** Chunk hierarchy depth-first iterator. Traverses subtree with root given by startChunkIndex. If upperSupportOnly == true, then the iterator will not traverse subsuppport chunks. */ class DepthFirstIt : public ChunkDepthFirstIt { public: /** Constructed from an asset. */ DepthFirstIt(const Asset& asset, uint32_t startChunkIndex, bool upperSupportOnly = false) : ChunkDepthFirstIt(asset.getChunks(), startChunkIndex, upperSupportOnly ? asset.getUpperSupportChunkCount() : asset.m_chunkCount) {} }; }; //////// Asset inline member functions //////// NV_INLINE uint32_t Asset::getUpperSupportChunkCount() const { return m_firstSubsupportChunkIndex; } NV_INLINE uint32_t Asset::getLowerSupportChunkCount() const { return m_graph.m_nodeCount + (m_chunkCount - m_firstSubsupportChunkIndex); } NV_INLINE uint32_t Asset::getBondCount() const { NVBLAST_ASSERT((m_graph.getAdjacencyPartition()[m_graph.m_nodeCount] & 1) == 0); // The bidirectional graph data should have an even number of edges return m_graph.getAdjacencyPartition()[m_graph.m_nodeCount] / 2; // Directional bonds, divide by two } NV_INLINE uint32_t Asset::getHierarchyCount() const { const NvBlastChunk* chunks = getChunks(); for (uint32_t i = 0; i < m_chunkCount; ++i) { if (!isInvalidIndex(chunks[i].parentChunkIndex)) { return i; } } return m_chunkCount; } NV_INLINE uint32_t Asset::getContiguousLowerSupportIndex(uint32_t chunkIndex) const { NVBLAST_ASSERT(chunkIndex < m_chunkCount); return chunkIndex < m_firstSubsupportChunkIndex ? getChunkToGraphNodeMap()[chunkIndex] : (chunkIndex - m_firstSubsupportChunkIndex + m_graph.m_nodeCount); } //JDM: Expose this so serialization layer can use it. NV_C_API Asset* initializeAsset(void* mem, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t leafChunkCount, uint32_t firstSubsupportChunkIndex, uint32_t bondCount, NvBlastLog logFn); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTASSET_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvGpuDispatcher.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_GPU_DISPATCHER_H #define NV_GPU_DISPATCHER_H #include "NvTaskDefine.h" #include "NvTask.h" /* forward decl to avoid including <cuda.h> */ typedef struct CUstream_st* CUstream; namespace nvidia { namespace cudamanager { struct NvGpuCopyDesc; class NvCudaContextManager; } namespace task { NV_PUSH_PACK_DEFAULT class NvTaskManager; /** \brief A GpuTask dispatcher * * A NvGpuDispatcher executes GpuTasks submitted by one or more TaskManagers (one * or more scenes). It maintains a CPU worker thread which waits on GpuTask * "groups" to be submitted. The submission API is explicitly sessioned so that * GpuTasks are dispatched together as a group whenever possible to improve * parallelism on the GPU. * * A NvGpuDispatcher cannot be allocated ad-hoc, they are created as a result of * creating a NvCudaContextManager. Every NvCudaContextManager has a NvGpuDispatcher * instance that can be queried. In this way, each NvGpuDispatcher is tied to * exactly one CUDA context. * * A scene will use CPU fallback Tasks for GpuTasks if the NvTaskManager provided * to it does not have a NvGpuDispatcher. For this reason, the NvGpuDispatcher must * be assigned to the NvTaskManager before the NvTaskManager is given to a scene. * * Multiple TaskManagers may safely share a single NvGpuDispatcher instance, thus * enabling scenes to share a CUDA context. * * Only failureDetected() is intended for use by the user. The rest of the * nvGpuDispatcher public methods are reserved for internal use by only both * TaskManagers and GpuTasks. */ class NvGpuDispatcher { public: /** \brief Record the start of a simulation step * * A NvTaskManager calls this function to record the beginning of a simulation * step. The NvGpuDispatcher uses this notification to initialize the * profiler state. */ virtual void startSimulation() = 0; /** \brief Record the start of a GpuTask batch submission * * A NvTaskManager calls this function to notify the NvGpuDispatcher that one or * more GpuTasks are about to be submitted for execution. The NvGpuDispatcher * will not read the incoming task queue until it receives one finishGroup() * call for each startGroup() call. This is to ensure as many GpuTasks as * possible are executed together as a group, generating optimal parallelism * on the GPU. */ virtual void startGroup() = 0; /** \brief Submit a GpuTask for execution * * Submitted tasks are pushed onto an incoming queue. The NvGpuDispatcher * will take the contents of this queue every time the pending group count * reaches 0 and run the group of submitted GpuTasks as an interleaved * group. */ virtual void submitTask(NvTask& task) = 0; /** \brief Record the end of a GpuTask batch submission * * A NvTaskManager calls this function to notify the NvGpuDispatcher that it is * done submitting a group of GpuTasks (GpuTasks which were all make ready * to run by the same prerequisite dependency becoming resolved). If no * other group submissions are in progress, the NvGpuDispatcher will execute * the set of ready tasks. */ virtual void finishGroup() = 0; /** \brief Add a CUDA completion prerequisite dependency to a task * * A GpuTask calls this function to add a prerequisite dependency on another * task (usually a CpuTask) preventing that task from starting until all of * the CUDA kernels and copies already launched have been completed. The * NvGpuDispatcher will increment that task's reference count, blocking its * execution, until the CUDA work is complete. * * This is generally only required when a CPU task is expecting the results * of the CUDA kernels to have been copied into host memory. * * This mechanism is not at all not required to ensure CUDA kernels and * copies are issued in the correct order. Kernel issue order is determined * by normal task dependencies. The rule of thumb is to only use a blocking * completion prerequisite if the task in question depends on a completed * GPU->Host DMA. * * The NvGpuDispatcher issues a blocking event record to CUDA for the purposes * of tracking the already submitted CUDA work. When this event is * resolved, the NvGpuDispatcher manually decrements the reference count of * the specified task, allowing it to execute (assuming it does not have * other pending prerequisites). */ virtual void addCompletionPrereq(NvBaseTask& task) = 0; /** \brief Retrieve the NvCudaContextManager associated with this * NvGpuDispatcher * * Every NvCudaContextManager has one NvGpuDispatcher, and every NvGpuDispatcher * has one NvCudaContextManager. */ virtual cudamanager::NvCudaContextManager* getCudaContextManager() = 0; /** \brief Record the end of a simulation frame * * A NvTaskManager calls this function to record the completion of its * dependency graph. If profiling is enabled, the NvGpuDispatcher will * trigger the retrieval of profiling data from the GPU at this point. */ virtual void stopSimulation() = 0; /** \brief Returns true if a CUDA call has returned a non-recoverable error * * A return value of true indicates a fatal error has occurred. To protect * itself, the NvGpuDispatcher enters a fall through mode that allows GpuTasks * to complete without being executed. This allows simulations to continue * but leaves GPU content static or corrupted. * * The user may try to recover from these failures by deleting GPU content * so the visual artifacts are minimized. But there is no way to recover * the state of the GPU actors before the failure. Once a CUDA context is * in this state, the only recourse is to create a new CUDA context, a new * scene, and start over. * * This is our "Best Effort" attempt to not turn a soft failure into a hard * failure because continued use of a CUDA context after it has returned an * error will usually result in a driver reset. However if the initial * failure was serious enough, a reset may have already occurred by the time * we learn of it. */ virtual bool failureDetected() const = 0; /** \brief Force the NvGpuDispatcher into failure mode * * This API should be used if user code detects a non-recoverable CUDA * error. This ensures the NvGpuDispatcher does not launch any further * CUDA work. Subsequent calls to failureDetected() will return true. */ virtual void forceFailureMode() = 0; /** \brief Returns a pointer to the current in-use profile buffer * * The returned pointer should be passed to all kernel launches to enable * CTA/Warp level profiling. If a data collector is not attached, or CTA * profiling is not enabled, the pointer will be zero. */ virtual void* getCurrentProfileBuffer() const = 0; /** \brief Register kernel names with PlatformAnalyzer * * The returned uint16_t must be stored and used as a base offset for the ID * passed to the KERNEL_START|STOP_EVENT macros. */ virtual uint16_t registerKernelNames(const char**, uint16_t count) = 0; /** \brief Launch a copy kernel with arbitrary number of copy commands * * This method is intended to be called from Kernel GpuTasks, but it can * function outside of that context as well. * * If count is 1, the descriptor is passed to the kernel as arguments, so it * may be declared on the stack. * * If count is greater than 1, the kernel will read the descriptors out of * host memory. Because of this, the descriptor array must be located in * page locked (pinned) memory. The provided descriptors may be modified by * this method (converting host pointers to their GPU mapped equivalents) * and should be considered *owned* by CUDA until the current batch of work * has completed, so descriptor arrays should not be freed or modified until * you have received a completion notification. * * If your GPU does not support mapping of page locked memory (SM>=1.1), * this function degrades to calling CUDA copy methods. */ virtual void launchCopyKernel(cudamanager::NvGpuCopyDesc* desc, uint32_t count, CUstream stream) = 0; /** \brief Query pre launch task that runs before launching gpu kernels. * * This is part of an optional feature to schedule multiple gpu features * at the same time to get kernels to run in parallel. * \note Do *not* set the continuation on the returned task, but use addPreLaunchDependent(). */ virtual NvBaseTask& getPreLaunchTask() = 0; /** \brief Adds a gpu launch task that gets executed after the pre launch task. * * This is part of an optional feature to schedule multiple gpu features * at the same time to get kernels to run in parallel. * \note Each call adds a reference to the pre-launch task. */ virtual void addPreLaunchDependent(NvBaseTask& dependent) = 0; /** \brief Query post launch task that runs after the gpu is done. * * This is part of an optional feature to schedule multiple gpu features * at the same time to get kernels to run in parallel. * \note Do *not* set the continuation on the returned task, but use addPostLaunchDependent(). */ virtual NvBaseTask& getPostLaunchTask() = 0; /** \brief Adds a task that gets executed after the post launch task. * * This is part of an optional feature to schedule multiple gpu features * at the same time to get kernels to run in parallel. * \note Each call adds a reference to the pre-launch task. */ virtual void addPostLaunchDependent(NvBaseTask& dependent) = 0; protected: /** \brief protected destructor * * GpuDispatchers are allocated and freed by their NvCudaContextManager. */ virtual ~NvGpuDispatcher() {} }; NV_POP_PACK } } // end nvidia namespace #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvTaskDefine.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_TASK_DEFINE_H #define NV_TASK_DEFINE_H #include "NvPreprocessor.h" #define NV_SUPPORT_GPU ((NV_WINDOWS_FAMILY && !NV_WINRT) || NV_LINUX) namespace nvidia { namespace task { #ifndef NV_SUPPORT_NVTASK_PROFILING #define NV_SUPPORT_NVTASK_PROFILING 1 #endif } } // end nvidia namespace #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvTaskManager.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_TASK_MANAGER_H #define NV_TASK_MANAGER_H #include "NvTaskDefine.h" #include "NvSimpleTypes.h" #include "NvErrorCallback.h" namespace nvidia { namespace task { NV_PUSH_PACK_DEFAULT class NvBaseTask; class NvTask; class NvLightCpuTask; typedef unsigned int NvTaskID; /** \brief Identifies the type of each heavyweight NvTask object \note This enum type is only used by NvTask and GpuTask objects, LightCpuTasks do not use this enum. @see NvTask @see NvLightCpuTask */ struct NvTaskType { /** * \brief Identifies the type of each heavyweight NvTask object */ enum Enum { TT_CPU, //!< NvTask will be run on the CPU TT_GPU, //!< NvTask will be run on the GPU TT_NOT_PRESENT, //!< Return code when attempting to find a task that does not exist TT_COMPLETED //!< NvTask execution has been completed }; }; class NvCpuDispatcher; class NvGpuDispatcher; /** \brief The NvTaskManager interface A NvTaskManager instance holds references to user-provided dispatcher objects, when tasks are submitted the NvTaskManager routes them to the appropriate dispatcher and handles task profiling if enabled. @see CpuDispatcher @see NvGpuDispatcher */ class NvTaskManager { public: /** \brief Set the user-provided dispatcher object for CPU tasks \param[in] ref The dispatcher object. @see CpuDispatcher */ virtual void setCpuDispatcher(NvCpuDispatcher& ref) = 0; /** \brief Set the user-provided dispatcher object for GPU tasks \param[in] ref The dispatcher object. @see NvGpuDispatcher */ virtual void setGpuDispatcher(NvGpuDispatcher& ref) = 0; /** \brief Get the user-provided dispatcher object for CPU tasks \return The CPU dispatcher object. @see CpuDispatcher */ virtual NvCpuDispatcher* getCpuDispatcher() const = 0; /** \brief Get the user-provided dispatcher object for GPU tasks \return The GPU dispatcher object. @see NvGpuDispatcher */ virtual NvGpuDispatcher* getGpuDispatcher() const = 0; /** \brief Reset any dependencies between Tasks \note Will be called at the start of every frame before tasks are submitted. @see NvTask */ virtual void resetDependencies() = 0; /** \brief Called by the owning scene to start the task graph. \note All tasks with with ref count of 1 will be dispatched. @see NvTask */ virtual void startSimulation() = 0; /** \brief Called by the owning scene at the end of a simulation step to synchronize the NvGpuDispatcher @see NvGpuDispatcher */ virtual void stopSimulation() = 0; /** \brief Called by the worker threads to inform the NvTaskManager that a task has completed processing \param[in] task The task which has been completed */ virtual void taskCompleted(NvTask& task) = 0; /** \brief Retrieve a task by name \param[in] name The unique name of a task \return The ID of the task with that name, or TT_NOT_PRESENT if not found */ virtual NvTaskID getNamedTask(const char* name) = 0; /** \brief Submit a task with a unique name. \param[in] task The task to be executed \param[in] name The unique name of a task \param[in] type The type of the task (default TT_CPU) \return The ID of the task with that name, or TT_NOT_PRESENT if not found */ virtual NvTaskID submitNamedTask(NvTask* task, const char* name, NvTaskType::Enum type = NvTaskType::TT_CPU) = 0; /** \brief Submit an unnamed task. \param[in] task The task to be executed \param[in] type The type of the task (default TT_CPU) \return The ID of the task with that name, or TT_NOT_PRESENT if not found */ virtual NvTaskID submitUnnamedTask(NvTask& task, NvTaskType::Enum type = NvTaskType::TT_CPU) = 0; /** \brief Retrieve a task given a task ID \param[in] id The ID of the task to return, a valid ID must be passed or results are undefined \return The task associated with the ID */ virtual NvTask* getTaskFromID(NvTaskID id) = 0; /** \brief Release the NvTaskManager object, referenced dispatchers will not be released */ virtual void release() = 0; /** \brief Construct a new NvTaskManager instance with the given [optional] dispatchers */ static NvTaskManager* createTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* = 0, NvGpuDispatcher* = 0); protected: virtual ~NvTaskManager() {} /*! \cond PRIVATE */ virtual void finishBefore(NvTask& task, NvTaskID taskID) = 0; virtual void startAfter(NvTask& task, NvTaskID taskID) = 0; virtual void addReference(NvTaskID taskID) = 0; virtual void decrReference(NvTaskID taskID) = 0; virtual int32_t getReference(NvTaskID taskID) const = 0; virtual void decrReference(NvLightCpuTask&) = 0; virtual void addReference(NvLightCpuTask&) = 0; virtual void emitStartEvent(NvBaseTask&, uint32_t threadId=0) = 0; virtual void emitStopEvent(NvBaseTask&, uint32_t threadId=0) = 0; /*! \endcond */ friend class NvBaseTask; friend class NvTask; friend class NvLightCpuTask; friend class NvGpuWorkerThread; }; NV_POP_PACK } } // end nvidia namespace #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvCpuDispatcher.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_CPU_DISPATCHER_H #define NV_CPU_DISPATCHER_H #include "NvTaskDefine.h" #include "NvSimpleTypes.h" namespace nvidia { namespace task { class NvBaseTask; /** \brief A CpuDispatcher is responsible for scheduling the execution of tasks passed to it by the SDK. A typical implementation would for example use a thread pool with the dispatcher pushing tasks onto worker thread queues or a global queue. @see NvBaseTask @see NvTask @see NvTaskManager */ class NvCpuDispatcher { public: /** \brief Called by the TaskManager when a task is to be queued for execution. Upon receiving a task, the dispatcher should schedule the task to run when resource is available. After the task has been run, it should call the release() method and discard it's pointer. \param[in] task The task to be run. @see NvBaseTask */ virtual void submitTask( NvBaseTask& task ) = 0; /** \brief Returns the number of available worker threads for this dispatcher. The SDK will use this count to control how many tasks are submitted. By matching the number of tasks with the number of execution units task overhead can be reduced. */ virtual uint32_t getWorkerCount() const = 0; virtual ~NvCpuDispatcher() {} }; } } // end nvidia namespace #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvTask.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_TASK_H #define NV_TASK_H #include "NvTaskDefine.h" #include "NvTaskManager.h" #include "NvAssert.h" namespace nvidia { namespace task { /** * \brief Base class of all task types * * NvBaseTask defines a runnable reference counted task with built-in profiling. */ class NvBaseTask { public: NvBaseTask() : mEventID(0xFFFF), mProfileStat(0), mTm(0) {} virtual ~NvBaseTask() {} /** * \brief The user-implemented run method where the task's work should be performed * * run() methods must be thread safe, stack friendly (no alloca, etc), and * must never block. */ virtual void run() = 0; /** * \brief Return a user-provided task name for profiling purposes. * * It does not have to be unique, but unique names are helpful. * * \return The name of this task */ virtual const char* getName() const = 0; //! \brief Implemented by derived implementation classes virtual void addReference() = 0; //! \brief Implemented by derived implementation classes virtual void removeReference() = 0; //! \brief Implemented by derived implementation classes virtual int32_t getReference() const = 0; /** \brief Implemented by derived implementation classes * * A task may assume in its release() method that the task system no longer holds * references to it - so it may safely run its destructor, recycle itself, etc. * provided no additional user references to the task exist */ virtual void release() = 0; /** * \brief Execute user run method with wrapping profiling events. * * Optional entry point for use by CpuDispatchers. * * \param[in] threadId The threadId of the thread that executed the task. */ NV_INLINE void runProfiled(uint32_t threadId=0) { mTm->emitStartEvent(*this, threadId); run(); mTm->emitStopEvent(*this, threadId); } /** * \brief Specify stop event statistic * * If called before or while the task is executing, the given value * will appear in the task's event bar in the profile viewer * * \param[in] stat The stat to signal when the task is finished */ NV_INLINE void setProfileStat( uint16_t stat ) { mProfileStat = stat; } /** * \brief Return NvTaskManager to which this task was submitted * * Note, can return NULL if task was not submitted, or has been * completed. */ NV_INLINE NvTaskManager* getTaskManager() const { return mTm; } protected: uint16_t mEventID; //!< Registered profile event ID uint16_t mProfileStat; //!< Profiling statistic NvTaskManager* mTm; //!< Owning NvTaskManager instance friend class NvTaskMgr; }; /** * \brief A NvBaseTask implementation with deferred execution and full dependencies * * A NvTask must be submitted to a NvTaskManager to to be executed, Tasks may * optionally be named when they are submitted. */ class NvTask : public NvBaseTask { public: NvTask() : mTaskID(0) {} virtual ~NvTask() {} //! \brief Release method implementation virtual void release() { NV_ASSERT(mTm); // clear mTm before calling taskCompleted() for safety NvTaskManager* save = mTm; mTm = NULL; save->taskCompleted( *this ); } //! \brief Inform the NvTaskManager this task must finish before the given // task is allowed to start. NV_INLINE void finishBefore( NvTaskID taskID ) { NV_ASSERT(mTm); mTm->finishBefore( *this, taskID); } //! \brief Inform the NvTaskManager this task cannot start until the given // task has completed. NV_INLINE void startAfter( NvTaskID taskID ) { NV_ASSERT(mTm); mTm->startAfter( *this, taskID ); } /** * \brief Manually increment this task's reference count. The task will * not be allowed to run until removeReference() is called. */ NV_INLINE void addReference() { NV_ASSERT(mTm); mTm->addReference( mTaskID ); } /** * \brief Manually decrement this task's reference count. If the reference * count reaches zero, the task will be dispatched. */ NV_INLINE void removeReference() { NV_ASSERT(mTm); mTm->decrReference( mTaskID ); } /** * \brief Return the ref-count for this task */ NV_INLINE int32_t getReference() const { return mTm->getReference( mTaskID ); } /** * \brief Return the unique ID for this task */ NV_INLINE NvTaskID getTaskID() const { return mTaskID; } /** * \brief Called by NvTaskManager at submission time for initialization * * Perform simulation step initialization here. */ virtual void submitted() { mStreamIndex = 0; mPreSyncRequired = false; mProfileStat = 0; } /** * \brief Specify that the GpuTask sync flag be set */ NV_INLINE void requestSyncPoint() { mPreSyncRequired = true; } protected: NvTaskID mTaskID; //!< ID assigned at submission uint32_t mStreamIndex; //!< GpuTask CUDA stream index bool mPreSyncRequired; //!< GpuTask sync flag friend class NvTaskMgr; friend class NvGpuWorkerThread; }; /** * \brief A NvBaseTask implementation with immediate execution and simple dependencies * * A NvLightCpuTask bypasses the NvTaskManager launch dependencies and will be * submitted directly to your scene's CpuDispatcher. When the run() function * completes, it will decrement the reference count of the specified * continuation task. * * You must use a full-blown NvTask if you want your task to be resolved * by another NvTask, or you need more than a single dependency to be * resolved when your task completes, or your task will not run on the * CpuDispatcher. */ class NvLightCpuTask : public NvBaseTask { public: NvLightCpuTask() : mCont( NULL ) , mRefCount( 0 ) { } virtual ~NvLightCpuTask() { mTm = NULL; } /** * \brief Initialize this task and specify the task that will have its ref count decremented on completion. * * Submission is deferred until the task's mRefCount is decremented to zero. * Note that we only use the NvTaskManager to query the appropriate dispatcher. * * \param[in] tm The NvTaskManager this task is managed by * \param[in] c The task to be executed when this task has finished running */ NV_INLINE void setContinuation(NvTaskManager& tm, NvBaseTask* c) { NV_ASSERT( mRefCount == 0 ); mRefCount = 1; mCont = c; mTm = &tm; if( mCont ) { mCont->addReference(); } } /** * \brief Initialize this task and specify the task that will have its ref count decremented on completion. * * This overload of setContinuation() queries the NvTaskManager from the continuation * task, which cannot be NULL. * \param[in] c The task to be executed after this task has finished running */ NV_INLINE void setContinuation( NvBaseTask* c ) { NV_ASSERT( c ); NV_ASSERT( mRefCount == 0 ); mRefCount = 1; mCont = c; if( mCont ) { mCont->addReference(); mTm = mCont->getTaskManager(); NV_ASSERT( mTm ); } } /** * \brief Retrieves continuation task */ NV_INLINE NvBaseTask* getContinuation() const { return mCont; } /** * \brief Manually decrement this task's reference count. If the reference * count reaches zero, the task will be dispatched. */ NV_INLINE void removeReference() { mTm->decrReference(*this); } /** \brief Return the ref-count for this task */ NV_INLINE int32_t getReference() const { return mRefCount; } /** * \brief Manually increment this task's reference count. The task will * not be allowed to run until removeReference() is called. */ NV_INLINE void addReference() { mTm->addReference(*this); } /** * \brief called by CpuDispatcher after run method has completed * * Decrements the continuation task's reference count, if specified. */ NV_INLINE void release() { if( mCont ) { mCont->removeReference(); } } protected: NvBaseTask* mCont; //!< Continuation task, can be NULL volatile int32_t mRefCount; //!< NvTask is dispatched when reaches 0 friend class NvTaskMgr; }; } }// end physx namespace #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/buffer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvPreprocessor.h" #include <assert.h> #include <stdio.h> #include <vector> #if NV_WINDOWS_FAMILY #define POD_Buffer std::vector #else template<typename T, int Alignment = sizeof(T)> class POD_Buffer { public: POD_Buffer() : _size(0), _capacity(0), _data(nullptr) {} ~POD_Buffer() { deallocate(); } size_t size() const { return _size; } void resize(size_t new_size) { if (new_size > _capacity) { reserve(new_size); } _size = new_size; } void reserve(size_t min_capacity) { if (min_capacity > _capacity) { void* new_data = allocate(min_capacity); if (!!_size) { memcpy(new_data, _data, _size*sizeof(T)); } deallocate(); _capacity = min_capacity; _data = reinterpret_cast<T*>(new_data); } } void push_back(const T& e) { if (_size >= _capacity) { reserve(!!_size ? 2*_size : (size_t)16); } _data[_size++] = e; } void pop_back() { if (!!_size) --_size; } T* data() { return _data; } const T* data() const { return _data; } T& operator [] (size_t index) { assert(_size > index); return _data[index]; } const T& operator [] (size_t index) const { assert(_size > index); return _data[index]; } T& back() { return (*this)[_size-1]; } const T& back() const { return (*this)[_size-1]; } private: void* allocate(size_t buffer_size) { const size_t mem_size = sizeof(T)*buffer_size; unsigned char* mem = (unsigned char*)malloc(mem_size + Alignment); const unsigned char offset = (unsigned char)((uintptr_t)Alignment - (uintptr_t)mem % Alignment - 1); mem += offset; *mem++ = offset; return mem; } void deallocate() { if (!!_data) { unsigned char* cmem = (unsigned char*)_data; const unsigned char offset = *--cmem; ::free(cmem - offset); } _size = 0; _capacity = 0; _data = nullptr; } size_t _size; size_t _capacity; T* _data; }; #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/coupling.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "solver_types.h" #include "anglin6.h" #include "NvCMath.h" /** * Bond coupling data used as a representation of a block column of a "coupling matrix" C, * which has exactly two non-zero blocks. The non-zero blocks are of the form * * / 1 ~r_ij \ * C_ij = s_ij | |. * \ 0 1 / * * This represents the coupling of node i by bond j. The scalar s_ij is +/-1, and for each * bond (column j of C) s_ij must take on both signs. The matrix factor is again composed * of blocks, each element a 3x3 matrix. The 0 and 1's are just multiples of the unit matrix, * and ~r_ij is the 3x3 antisymmetric matrix representing "crossing with the vector r_ij on the * left" (i.e. (~u)*v = (u) x (v)). The vector r_ij represents the displacement from node i's * CoM to bond j's centroid. */ SIMD_ALIGN_32 ( struct Coupling { NvcVec3 offset0; uint32_t node0; NvcVec3 offset1; uint32_t node1; } ); template <typename Elem, typename Scalar = Float_Scalar> struct CouplingMatrixOps { /** * Sparse matrix-vector multiply y = C*x, where C is a "coupling matrix" represented by columns * of type Coupling (see the comments for Coupling). * * \param[out] y Resulting column Elem vector of length M. * \param[in] C Input M x N coupling matrix. * \param[in] x Input column Elem vector of length N. * \param[in] M The number of rows in y and C. * \param[in] N The number of rows in x and columns in C. */ inline void rmul(Elem* y, const Coupling* C, const Elem* x, uint32_t M, uint32_t N) { memset(y, 0, sizeof(AngLin6)*M); for (uint32_t j = 0 ; j < N; ++j) { const Coupling& c = C[j]; const AngLin6& x_j = x[j]; AngLin6& y0 = y[c.node0]; AngLin6& y1 = y[c.node1]; y0.ang += x_j.ang - (c.offset0^x_j.lin); y0.lin += x_j.lin; y1.ang -= x_j.ang - (c.offset1^x_j.lin); y1.lin -= x_j.lin; } } /** * Sparse matrix-vector multiply y = x*C, where C is a "coupling matrix" represented by columns * of type Coupling (see the comments for Coupling). * * \param[out] y Resulting row Elem vector of length N. * \param[in] x Input row Elem vector, must be long enough to be indexed by all values in B's representation. * \param[in] C Input M x N couping matrix. * \param[in] M The number of columns in x and rows in C. * \param[in] N The number of columns in y and C. */ inline void lmul(Elem* y, const Elem* x, const Coupling* C, uint32_t M, uint32_t N) { NV_UNUSED(M); for (uint32_t j = 0; j < N; ++j) { const Coupling& c = C[j]; const AngLin6& x0 = x[c.node0]; const AngLin6& x1 = x[c.node1]; AngLin6& y_j = y[j]; y_j.ang = x0.ang - x1.ang; y_j.lin = x0.lin - x1.lin + (c.offset0^x0.ang) - (c.offset1^x1.ang); } } }; template <typename Elem> struct CouplingMatrixOps<Elem, SIMD_Scalar> { /** * Sparse matrix-vector multiply y = C*x, where C is a "coupling matrix" represented by columns * of type Coupling (see the comments for Coupling). * * \param[out] y Resulting column Elem vector of length M. * \param[in] C Input M x N coupling matrix. * \param[in] x Input column Elem vector of length N. * \param[in] M The number of rows in y and C. * \param[in] N The number of rows in x and columns in C. */ inline void rmul(Elem* y, const Coupling* C, const Elem* x, uint32_t M, uint32_t N) { memset(y, 0, sizeof(AngLin6)*M); for (uint32_t j = 0 ; j < N; ++j) { const Coupling& c = C[j]; const AngLin6& x_j = x[j]; AngLin6& y0 = y[c.node0]; AngLin6& y1 = y[c.node1]; __m256 _x = _mm256_load_ps(&x_j.ang.x); __m256 _y0 = _mm256_load_ps(&y0.ang.x); __m256 _y1 = _mm256_load_ps(&y1.ang.x); __m256 _c = _mm256_load_ps(&c.offset0.x); _y0 = _mm256_add_ps(_y0, _x); _y1 = _mm256_sub_ps(_y1, _x); __m128 _xl = _mm256_extractf128_ps(_x, 1); __m256 _a = pair_cross3(_mm256_set_m128(_xl, _xl), _c); _y0 = _mm256_add_ps(_y0, _mm256_set_m128(_mm_setzero_ps(), _mm256_castps256_ps128(_a))); _y1 = _mm256_sub_ps(_y1, _mm256_set_m128(_mm_setzero_ps(), _mm256_extractf128_ps(_a, 1))); _mm256_store_ps(&y0.ang.x, _y0); _mm256_store_ps(&y1.ang.x, _y1); } } /** * Sparse matrix-vector multiply y = x*C, where C is a "coupling matrix" represented by columns * of type Coupling (see the comments for Coupling). * * \param[out] y Resulting row Elem vector of length N. * \param[in] x Input row Elem vector, must be long enough to be indexed by all values in B's representation. * \param[in] C Input M x N couping matrix. * \param[in] M The number of columns in x and rows in C. * \param[in] N The number of columns in y and C. */ inline void lmul(Elem* y, const Elem* x, const Coupling* C, uint32_t M, uint32_t N) { NV_UNUSED(M); for (uint32_t j = 0; j < N; ++j) { const Coupling& c = C[j]; const AngLin6& x0 = x[c.node0]; const AngLin6& x1 = x[c.node1]; AngLin6& y_j = y[j]; __m256 _x0 = _mm256_load_ps(&x0.ang.x); __m256 _x1 = _mm256_load_ps(&x1.ang.x); __m256 _c = _mm256_load_ps(&c.offset0.x); __m256 _y = _mm256_sub_ps(_x0, _x1); __m256 _a = pair_cross3(_c, _mm256_set_m128(_mm256_castps256_ps128(_x1), _mm256_castps256_ps128(_x0))); _y = _mm256_add_ps(_y, _mm256_set_m128(_mm_sub_ps(_mm256_castps256_ps128(_a), _mm256_extractf128_ps(_a, 1)), _mm_setzero_ps())); _mm256_store_ps(&y_j.ang.x, _y); } } private: inline __m256 pair_cross3(const __m256& v0, const __m256& v1) { __m256 prep0 = _mm256_shuffle_ps(v0, v0, 0xc9); __m256 prep1 = _mm256_shuffle_ps(v1, v1, 0xc9); __m256 res_shuffled = _mm256_fmsub_ps(v0, prep1, _mm256_mul_ps(prep0, v1)); return _mm256_shuffle_ps(res_shuffled, res_shuffled, 0xc9); } };
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/bond.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "coupling.h" #include "inertia.h" #include "anglin6.h" /** * BondMatrix * * Given a BondMatrix B, when (B^T)*B is applied to a vector of bond impulses, the result * is a vector of the differences between the the resulting accelerations of the nodes * joined by each bond. * * This is done in block form, so a vector is composed of vector elements. Each element * is a 6-dimensional vector, composed of a linear part followed by an angular part. * Matrix blocks are likewise 6x6. * * This matrix is composed of two sparse matrices: * An M x M block diagonal matrix I, where the i^th diagonal block is the 6x6 matrix: * * / I_i 0 \ * I_ii = | | * \ 0 m_i / * * Except for possibly I_i, each "element" in I_ii is a multiple of the 3x3 unit matrix. I_i is a * 3x3 symmetric inertia tensor. See the definition of Inertia<TensorType> for its representation. * * The second component is the coupling matrix C, see documentation for Coupling. * * The matrix represented by this object is (I^-1/2)*C, an M x N matrix. * * NOTE: I and C are _not_ stored as described above, for efficiency. */ template <typename TensorType> struct BondMatrix { /** Constructor clears member data. */ BondMatrix() : C(nullptr), sqrt_I_inv(nullptr), scratch(nullptr), M(0), N(0) {} /** * Set fields (shallow pointer copy). * * \param[in] _C Coupling matrix, see the documentation for Coupling. * \param[in] _sqrt_I_inv The inverse of the square root of the diagonal mass and inertia tensor, represented by a * vector of _M Inertia structs for the diagonal values. The i^th element is the reciprocal * of the square root of the mass and inertia tensor of node i. * \param[in] _scratch Scratch memory required to carry out a multiply. Must be at least _M*sizeof(AngLin6) bytes. * \param[in] _M The number of nodes. * \param[in] _N The number of bonds. */ void set(const Coupling* _C, const Inertia<TensorType>* _sqrt_I_inv, void* _scratch, uint32_t _M, uint32_t _N) { C = _C; sqrt_I_inv = _sqrt_I_inv; scratch = _scratch; M = _M; N = _N; } const Coupling* C; const Inertia<TensorType>* sqrt_I_inv; void* scratch; uint32_t M, N; }; typedef BondMatrix<float> BondMatrixS; typedef BondMatrix<NvcVec3> BondMatrixD; typedef BondMatrix<NvcMat33> BondMatrixG; template<typename TensorType, typename Scalar> struct BondMatrixOps { /** * Matrix-vector multiply y = B*x. * * \param[out] y Resulting column vector of length N. * \param[in] B Input MxN matrix representation. * \param[in] x Input column vector of length M. * \param[in] M Number of rows in B. * \param[in] N Number of columns in B. */ inline void rmul(AngLin6* y, const BondMatrix<TensorType>& B, const AngLin6* x, uint32_t M, uint32_t N) const { NV_UNUSED(M); // BondMatrix stores these NV_UNUSED(N); // Calculate y = C*x (apply C) CouplingMatrixOps<AngLin6, Scalar>().rmul(y, B.C, x, B.M, B.N); // Calculate y = (I^-1/2)*C*x (apply I^-1/2) InertiaMatrixOps<Scalar>().mul(y, B.sqrt_I_inv, y, B.M); } /** * Matrix-vector multiply y = x*B. * * \param[out] y Resulting row vector of length B.N. * \param[in] x Input row vector of length B.N. * \param[in] B Input matrix representation. * \param[in] M Number of rows in B. * \param[in] N Number of columns in B. */ inline void lmul(AngLin6* y, const AngLin6* x, const BondMatrix<TensorType>& B, uint32_t M, uint32_t N) const { NV_UNUSED(M); // BondMatrix stores these NV_UNUSED(N); AngLin6* s = (AngLin6*)B.scratch; // M-sized scratch s // Calculate s = (I^-1/2)*x (apply I^-1/2) InertiaMatrixOps<Scalar>().mul(s, B.sqrt_I_inv, x, B.M); // Calculate y = (C^T)*(I^-1/2)*x (apply C^T) CouplingMatrixOps<AngLin6, Scalar>().lmul(y, s, B.C, B.M, B.N); } }; template<typename Scalar> using BondMatrixOpsS = BondMatrixOps<float, Scalar>; template<typename Scalar> using BondMatrixOpsD = BondMatrixOps<float, NvcVec3>; template<typename Scalar> using BondMatrixOpsG = BondMatrixOps<float, NvcMat33>;
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/solver_types.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvCTypes.h" #include "simd/simd.h" /** * Scalar types for SIMD and non-SIMD calculations. * Currently also used as a template argument to distinguish code paths. May need a different * scheme if two codepaths use the same scalar type. */ typedef __m128 SIMD_Scalar; typedef float Float_Scalar; /** * Holds the components of a rigid body description that are necessary for the stress solver. */ template<typename InertiaType> struct SolverNode { NvcVec3 CoM; float mass; InertiaType inertia; }; typedef SolverNode<float> SolverNodeS; typedef SolverNode<NvcVec3> SolverNodeD; typedef SolverNode<NvcMat33> SolverNodeG; /** * Holds the components of a rigid body bond description that are necessary for the stress solver. */ struct SolverBond { NvcVec3 centroid; uint32_t nodes[2]; // Index into accompanying SolverNode<InertiaType> array. };
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/stress.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "bond.h" #include "buffer.h" class StressProcessor { public: /** Constructor clears member data. */ StressProcessor() : m_mass_scale(0.0f), m_length_scale(0.0f), m_can_resume(false) {} /** Parameters controlling the data preparation. */ struct DataParams { bool equalizeMasses = false; // Use the geometric mean of the nodes' masses instead of the individual masses. bool centerBonds = false; // Place the bond position halfway between adjoining nodes' CoMs. }; /** Parameters controlling the solver behavior. */ struct SolverParams { uint32_t maxIter = 0; // The maximum number of iterations. If 0, use CGNR for default value. float tolerance = 1.e-6f; // The relative tolerance threshold for convergence. Iteration will stop when this is reached. bool warmStart = false; // Whether or not to use the solve function's 'impulses' parameter as a starting input vector. }; /** * Build the internal representation of the stress network from nodes and bonds. * This only needs to be called initially, and any time the nodes or bonds change. * * \param[in] nodes Array of SolverNodeS (scalar inertia). * \param[in] N_nodes Number of elements in the nodes array. * \param[in] bonds Array of SolverBond. The node indices in each bond entry correspond to the ordering of the nodes array. * \param[in] N_bonds Number of elements in the bonds array. * \param[in] params Parameters affecting the processing of the input data (see DataParams). */ void prepare(const SolverNodeS* nodes, uint32_t N_nodes, const SolverBond* bonds, uint32_t N_bonds, const DataParams& params); /** * Solve for the bond impulses given the velocities of each node. The function prepare(...) must be called * before this can be used, but then solve(...) may be called multiple times. * * The vector elements (impulses and velocities) hold linear and angular parts. * * \param[out] impulses Output array of impulses exerted by each bond. For a warm or hot start, this is also used as an input. * Must be of length N_bonds passed into the prepare(...) function. * \param[in] velocities Input array of external velocities on each node. Must be of length N_nodes passed into the prepare(...) function. * \param[in] params Parameters affecting the solver characteristics (see SolverParams). * \param[out] error_sq (Optional) If not NULL, *error_sq will be filled with the angular and linear square errors (solver residuals). Default = NULL. * \param[in] resume (Optional) Set to true if impulses and velocities have not changed since last call, to resume solving. Default = false. * * \return the number of iterations taken to converge, if it converges. Otherwise, returns minus the number of iterations before exiting. */ int solve(AngLin6* impulses, const AngLin6* velocities, const SolverParams& params, AngLin6ErrorSq* error_sq = nullptr, bool resume = false); /** * Removes the indexed bond from the solver. * * \param[in] bondIndex The index of the bond to remove. Must be less than getBondCount(). * * \return true iff successful. */ bool removeBond(uint32_t bondIndex); /** * \return the number of nodes in the stress network. (Set by prepare(...).) */ uint32_t getNodeCount() const { return (uint32_t)m_recip_sqrt_I.size(); } /** * \return the number of bonds in the stress network. (Set by prepare(...), possibly reduced by removeBond(...).) */ uint32_t getBondCount() const { return (uint32_t)m_couplings.size(); } /** * \return whether or not the solver uses SIMD. If the device and OS support SSE, AVX, and FMA instruction sets, SIMD is used. */ static bool usingSIMD() { return s_use_simd; } protected: float m_mass_scale; float m_length_scale; POD_Buffer<InertiaS> m_recip_sqrt_I; POD_Buffer<Coupling> m_couplings; BondMatrixS m_B; POD_Buffer<AngLin6> m_rhs; POD_Buffer<AngLin6> m_B_scratch; POD_Buffer<AngLin6> m_solver_cache; bool m_can_resume; static const bool s_use_simd; };
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/inertia.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "solver_types.h" #include "NvCMath.h" /** * Holds an inertia component and a mass component. * The inertial component is represented by a TensorType, which may be a float (representing a multiple of * the unit matrix), an NvcVec3 (representing the non-zero components of a diagonal inertia tensor), or a * 3x3 symmetric matrix representing a general inertia tensor. * * This structure might also be used to store reciprocals, or powers (e.g. square roots) of these quantities. */ template <typename TensorType> struct Inertia { TensorType I; float m; }; typedef Inertia<float> InertiaS; typedef Inertia<NvcVec3> InertiaD; typedef Inertia<NvcMat33> InertiaG; template<typename Scalar = Float_Scalar> struct InertiaMatrixOps { /** * Matrix-vector multiply y = I*x. * * Apply a block-diagonal inertia matrix I to a vector of AngLin6 elements. * x and y may be the same vector. * * \param[out] y Resulting column vector of length N. * \param[in] I Input inertia matrix representation. * \param[in] x Input column vector of length N. * \param[in] N Number of columns in x and y, and the square size of I. * * x and y may be the same vector. */ inline void mul(AngLin6* y, const InertiaS* I, const AngLin6* x, uint32_t N) { for (uint32_t i = 0; i < N; ++i) { const InertiaS& I_i = I[i]; const AngLin6& x_i = x[i]; AngLin6& y_i = y[i]; y_i.ang = I_i.I*x_i.ang; y_i.lin = I_i.m*x_i.lin; } } }; template<> struct InertiaMatrixOps<SIMD_Scalar> { /** * Matrix-vector multiply y = I*x. * * Apply a block-diagonal inertia matrix I to a vector of AngLin6 elements. * * \param[out] y Resulting column vector of length N. * \param[in] I Input inertia matrix representation. * \param[in] x Input column vector of length N. * \param[in] N Number of columns in x and y, and the square size of I. * * x and y may be the same vector. */ inline void mul(AngLin6* y, const InertiaS* I, const AngLin6* x, uint32_t N) { for (uint32_t i = 0; i < N; ++i) { const InertiaS& I_i = I[i]; const AngLin6& x_i = x[i]; AngLin6& y_i = y[i]; __m256 _x = _mm256_load_ps(&x_i.ang.x); __m128 _Il = _mm_load1_ps(&I_i.I); __m128 _Ih = _mm_load1_ps(&I_i.m); __m256 _I = _mm256_set_m128(_Ih,_Il); __m256 _y = _mm256_mul_ps(_I, _x); _mm256_store_ps(&y_i.ang.x, _y); } } };
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/anglin6.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvCMath.h" #include "simd/simd.h" /** * Holds an angular and linear component, for angular and linear velocities, accelerations, impulses, torques and forces, etc. */ SIMD_ALIGN_32( struct AngLin6 { SIMD_ALIGN_16(NvcVec3 ang); SIMD_ALIGN_16(NvcVec3 lin); } ); /** * Holds the angular and linear components of the calculated error. */ struct AngLin6ErrorSq { float ang, lin; }; /** * SISD AngLin6 operations. */ template<typename Scalar = float> struct AngLin6Ops { /** r = x + y */ inline void add(AngLin6& r, const AngLin6& x, const AngLin6& y) { r.ang = x.ang + y.ang; r.lin = x.lin + y.lin; } /** r = x - y */ inline void sub(AngLin6& r, const AngLin6& x, const AngLin6& y) { r.ang = x.ang - y.ang; r.lin = x.lin - y.lin; } /** r = c*x + y */ inline void madd(AngLin6& r, float c, const AngLin6& x, const AngLin6& y) { r.ang = c*x.ang + y.ang; r.lin = c*x.lin + y.lin; } /** r = -c*x + y */ inline void nmadd(AngLin6& r, float c, const AngLin6& x, const AngLin6& y) { r.ang = y.ang - c*x.ang; r.lin = y.lin - c*x.lin; } /** Vector add */ inline void vadd(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) add(*r++, *x++, *y++); } /** Vector sub */ inline void vsub(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) sub(*r++, *x++, *y++); } /** Vector madd */ inline void vmadd(AngLin6* r, float c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) madd(*r++, c, *x++, *y++); } /** Vector nmadd */ inline void vnmadd(AngLin6* r, float c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) nmadd(*r++, c, *x++, *y++); } /** * Vector-of-vectors dot product. * * \param[in] v Vector of AngLin6, of length N. * \param[in] w Vector of AngLin6, of length N. * \param[in] N Number of elements in v and w. * * return (v|w). */ inline float dot(const AngLin6* v, const AngLin6* w, uint32_t N) { float result = 0.0f; for (uint32_t i = 0; i < N; ++i) { const AngLin6& v_i = v[i]; const AngLin6& w_i = w[i]; result += (v_i.ang|w_i.ang) + (v_i.lin|w_i.lin); } return result; } /** * Vector-of-vectors length squared. * * Equivalent to dot(v, v N), but could be faster in some cases * * \param[in] v Vector of AngLin6, of length N. * \param[in] N Number of elements in v. * * return |v|^2. */ inline float length_sq(const AngLin6* v, uint32_t N) { float result = 0.0f; for (uint32_t i = 0; i < N; ++i) { const AngLin6& v_i = v[i]; result += (v_i.ang|v_i.ang) + (v_i.lin|v_i.lin); } return result; } /** * Vector-of-vectors length squared, split into angular and linear contributions. * * \param[out] error_sq Sum of the squared angular and linear parts of v. * \param[in] v Vector of AngLin6, of length N. * \param[in] N Number of elements in v. * * \return the sum of the squared angular and linear errors, error.ang + error.lin. */ inline float calculate_error(AngLin6ErrorSq& error_sq, const AngLin6* v, uint32_t N) { error_sq.ang = error_sq.lin = 0.0f; for (uint32_t i = 0; i < N; ++i) { const AngLin6& v_i = v[i]; error_sq.ang += v_i.ang|v_i.ang; error_sq.lin += v_i.lin|v_i.lin; } return error_sq.ang + error_sq.lin; } }; /** * SIMD AngLin6 operations. */ template<> struct AngLin6Ops<__m128> { /** r = x + y */ inline void add(AngLin6& r, const AngLin6& x, const AngLin6& y) { __m256 _x = _mm256_load_ps(&x.ang.x); __m256 _y = _mm256_load_ps(&y.ang.x); __m256 _r = _mm256_add_ps(_x, _y); _mm256_store_ps(&r.ang.x, _r); } /** r = x - y */ inline void sub(AngLin6& r, const AngLin6& x, const AngLin6& y) { __m256 _x = _mm256_load_ps(&x.ang.x); __m256 _y = _mm256_load_ps(&y.ang.x); __m256 _r = _mm256_sub_ps(_x, _y); _mm256_store_ps(&r.ang.x, _r); } /** r = c*x + y */ inline void madd(AngLin6& r, __m128 c, const AngLin6& x, const AngLin6& y) { __m256 _c = _mm256_set_m128(c, c); __m256 _x = _mm256_load_ps(&x.ang.x); __m256 _y = _mm256_load_ps(&y.ang.x); __m256 _r = _mm256_fmadd_ps(_c, _x, _y); _mm256_store_ps(&r.ang.x, _r); } /** r = -c*x + y */ inline void nmadd(AngLin6& r, __m128 c, const AngLin6& x, const AngLin6& y) { __m256 _c = _mm256_set_m128(c, c); __m256 _x = _mm256_load_ps(&x.ang.x); __m256 _y = _mm256_load_ps(&y.ang.x); __m256 _r = _mm256_fnmadd_ps(_c, _x, _y); _mm256_store_ps(&r.ang.x, _r); } /** Vector add */ inline void vadd(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) add(*r++, *x++, *y++); } /** Vector sub */ inline void vsub(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) sub(*r++, *x++, *y++); } /** Vector madd */ inline void vmadd(AngLin6* r, __m128 c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) madd(*r++, c, *x++, *y++); } /** Vector nmadd */ inline void vnmadd(AngLin6* r, __m128 c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) nmadd(*r++, c, *x++, *y++); } /** * Vector-of-vectors dot product. * * \param[in] v Vector of AngLin6, of length N. * \param[in] w Vector of AngLin6, of length N. * \param[in] N Number of elements in v and w. * * return (v|w). */ inline __m128 dot(const AngLin6* v, const AngLin6* w, uint32_t N) { __m256 _res = _mm256_setzero_ps(); for (uint32_t i = 0; i < N; ++i) { __m256 _v = _mm256_load_ps((const float*)(v+i)); __m256 _w = _mm256_load_ps((const float*)(w+i)); _res = _mm256_add_ps(_res, _mm256_dp_ps(_v, _w, 0x7f)); } return _mm_add_ps(_mm256_castps256_ps128(_res), _mm256_extractf128_ps(_res, 1)); } /** * Vector-of-vectors length squared. * * Equivalent to dot(v, v N), but could be faster in some cases * * \param[in] v Vector of AngLin6, of length N. * \param[in] N Number of elements in v. * * return |v|^2. */ inline __m128 length_sq(const AngLin6* v, uint32_t N) { __m256 _res = _mm256_setzero_ps(); for (uint32_t i = 0; i < N; ++i) { __m256 _v = _mm256_load_ps((const float*)(v+i)); _res = _mm256_add_ps(_res, _mm256_dp_ps(_v, _v, 0x7f)); } return _mm_add_ps(_mm256_castps256_ps128(_res), _mm256_extractf128_ps(_res, 1)); } /** * Vector-of-vectors length squared, split into angular and linear contributions. * * \param[out] error_sq Sum of the squared angular and linear parts of v. * \param[in] v Vector of AngLin6, of length N. * \param[in] N Number of elements in v. * * \return the sum of the squared angular and linear errors, error.ang + error.lin. */ inline __m128 calculate_error(AngLin6ErrorSq& error_sq, const AngLin6* v, uint32_t N) { __m256 _res = _mm256_setzero_ps(); for (uint32_t i = 0; i < N; ++i) { __m256 _v = _mm256_load_ps((const float*)(v+i)); _res = _mm256_add_ps(_res, _mm256_dp_ps(_v, _v, 0x7f)); } __m128 _ang_sq = _mm256_castps256_ps128(_res); __m128 _lin_sq = _mm256_extractf128_ps(_res, 1); _mm_store_ss(&error_sq.ang, _ang_sq); _mm_store_ss(&error_sq.lin, _lin_sq); return _mm_add_ps(_ang_sq, _lin_sq); } };
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/stress.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "stress.h" #include "math/cgnr.h" #include "simd/simd_device_query.h" #include <algorithm> #include <cmath> #define MASS_AND_LENGTH_SCALING 1 typedef CGNR<AngLin6, AngLin6Ops<Float_Scalar>, BondMatrixS, BondMatrixOpsS<Float_Scalar>, Float_Scalar, AngLin6ErrorSq> CGNR_SISD; typedef CGNR<AngLin6, AngLin6Ops<SIMD_Scalar>, BondMatrixS, BondMatrixOpsS<SIMD_Scalar>, SIMD_Scalar, AngLin6ErrorSq> CGNR_SIMD; /** * StressProcessor static members */ // Check for SSE, FMA3, and AVX support const bool StressProcessor::s_use_simd = device_supports_instruction_set(InstructionSet::SSE) && // Basic SSE device_supports_instruction_set(InstructionSet::FMA3) && // Fused Multiply-Add instructions device_supports_instruction_set(InstructionSet::OSXSAVE) && // OS uses XSAVE and XRSTORE instructions allowing saving YMM registers on context switch device_supports_instruction_set(InstructionSet::AVX) && // Advanced Vector Extensions (256 bit operations) os_supports_avx_restore(); // OS has enabled the required extended state for AVX /** * StressProcessor methods */ void StressProcessor::prepare(const SolverNodeS* nodes, uint32_t N_nodes, const SolverBond* bonds, uint32_t N_bonds, const DataParams& params) { m_recip_sqrt_I.resize(N_nodes); m_couplings.resize(N_bonds); m_rhs.resize(N_nodes); m_B_scratch.resize(N_nodes); m_solver_cache.resize(s_use_simd ? CGNR_SIMD().required_cache_size(N_nodes, N_bonds) : CGNR_SISD().required_cache_size(N_nodes, N_bonds)); m_can_resume = false; // Calculate bond offsets and length scale uint32_t offsets_to_scale = 0; m_length_scale = 0.0f; for (uint32_t i = 0; i < N_bonds; ++i) { const SolverBond& bond = bonds[i]; const uint32_t b0 = bond.nodes[0]; const uint32_t b1 = bond.nodes[1]; Coupling& c = m_couplings[i]; NvcVec3 offset0, offset1; if (!params.centerBonds) { offset0 = nodes[b0].mass > 0 ? bond.centroid - nodes[b0].CoM : nodes[b1].CoM - bond.centroid; offset1 = nodes[b1].mass > 0 ? bond.centroid - nodes[b1].CoM : nodes[b0].CoM - bond.centroid; } else { if (nodes[b0].mass <= 0) { offset1 = bond.centroid - nodes[b1].CoM; offset0 = -offset1; } else if (nodes[b1].mass <= 0) { offset0 = bond.centroid - nodes[b0].CoM; offset1 = -offset0; } else { offset0 = 0.5f*(nodes[b1].CoM - nodes[b0].CoM); offset1 = -offset0; } } if (nodes[b0].mass > 0.0f) { ++offsets_to_scale; m_length_scale += std::sqrt(offset0|offset0); } if (nodes[b1].mass > 0.0f) { ++offsets_to_scale; m_length_scale += std::sqrt(offset1|offset1); } c.offset0 = offset0; c.node0 = bond.nodes[0]; c.offset1 = offset1; c.node1 = bond.nodes[1]; } #if MASS_AND_LENGTH_SCALING m_length_scale = offsets_to_scale ? m_length_scale / offsets_to_scale : 1.0f; #else m_length_scale = 1.0f; #endif // Scale offsets by length scale const float recip_length_scale = 1.0f/m_length_scale; for (uint32_t j = 0; j < N_bonds; ++j) { Coupling& coupling = m_couplings[j]; coupling.offset0 *= recip_length_scale; coupling.offset1 *= recip_length_scale; } // Set mass scale to geometric mean of the masses m_mass_scale = 0.0f; uint32_t nonzero_mass_count = 0; for (uint32_t i = 0; i < N_nodes; ++i) { if (nodes[i].mass > 0.0f) { m_mass_scale += std::log(nodes[i].mass); ++nonzero_mass_count; } } #if MASS_AND_LENGTH_SCALING m_mass_scale = nonzero_mass_count ? std::exp(m_mass_scale / nonzero_mass_count) : 1.0f; #else m_mass_scale = 1.0f; #endif // Generate I^-1/2 std::vector<InertiaS> invI(N_nodes); const float inertia_scale = m_mass_scale*m_length_scale*m_length_scale; if (!params.equalizeMasses) { for (uint32_t i = 0; i < N_nodes; ++i) { invI[i] = { nodes[i].inertia > 0.0f ? inertia_scale/nodes[i].inertia : 0.0f, nodes[i].mass > 0.0f ? m_mass_scale/nodes[i].mass : 0.0f }; m_recip_sqrt_I[i] = { std::sqrt(invI[i].I), std::sqrt(invI[i].m) }; } } else { for (uint32_t i = 0; i < N_nodes; ++i) { invI[i] = { nodes[i].inertia > 0.0f ? 1.0f : 0.0f, nodes[i].mass > 0.0f ? 1.0f : 0.0f }; m_recip_sqrt_I[i] = { std::sqrt(invI[i].I), std::sqrt(invI[i].m) }; } } // Create sparse matrix representation for B = (I^-1/2)*C m_B.set(m_couplings.data(), m_recip_sqrt_I.data(), m_B_scratch.data(), N_nodes, N_bonds); } int StressProcessor::solve(AngLin6* impulses, const AngLin6* velocities, const SolverParams& params, AngLin6ErrorSq* error_sq /* = nullptr */, bool resume /* = false */) { const InertiaS* sqrt_I_inv = m_recip_sqrt_I.data(); const uint32_t N_nodes = getNodeCount(); const uint32_t N_bonds = getBondCount(); void* cache = m_solver_cache.data(); const float recip_length_scale = 1.0f/m_length_scale; // Apply length and mass scaling to impulses if warm-starting if (params.warmStart) { const float recip_mass_scale = 1.0f/m_mass_scale; const float recip_linear_impulse_scale = recip_length_scale*recip_mass_scale; const float recip_angular_impulse_scale = recip_length_scale*recip_linear_impulse_scale; for (uint32_t j = 0; j < N_bonds; ++j) { impulses[j].ang *= recip_angular_impulse_scale; impulses[j].lin *= recip_linear_impulse_scale; } } // Calculate r.h.s. vector b = -(I^1/2)*velocities AngLin6* b = m_rhs.data(); for (uint32_t i = 0; i < N_nodes; ++i) { const InertiaS& I_i = sqrt_I_inv[i]; const AngLin6& v_i = velocities[i]; AngLin6& b_i = b[i]; b_i.ang = v_i.ang/(-(I_i.I > 0 ? I_i.I : 1.0f)); b_i.lin = (-recip_length_scale/(I_i.m > 0 ? I_i.m : 1.0f))*v_i.lin; } // Solve B*J = b for J, where B = (I^-1/2)*C and b = -(I^1/2)*v. // Since CGNR does this by solving (B^T)*B*J = (B^T)*b, this actually solves // (C^T)*(I^-1)*C*J = -(C^T)*v for J, which is the equation we really wanted to solve. const uint32_t maxIter = params.maxIter ? params.maxIter : 6*std::max(N_nodes, N_bonds); // Set solver warmth const unsigned warmth = params.warmStart ? (m_can_resume && resume ? 2 : 1) : 0; // Choose solver based on parameters const int result = s_use_simd ? CGNR_SIMD().solve(impulses, m_B, b, N_nodes, N_bonds, cache, error_sq, params.tolerance, maxIter, warmth) : CGNR_SISD().solve(impulses, m_B, b, N_nodes, N_bonds, cache, error_sq, params.tolerance, maxIter, warmth); // Undo length and mass scaling const float linear_impulse_scale = m_length_scale*m_mass_scale; const float angular_impulse_scale = m_length_scale*linear_impulse_scale; for (uint32_t j = 0; j < N_bonds; ++j) { impulses[j].ang *= angular_impulse_scale; impulses[j].lin *= linear_impulse_scale; } m_can_resume = true; return result; } bool StressProcessor::removeBond(uint32_t bondIndex) { if (bondIndex >= getBondCount()) return false; m_couplings[bondIndex] = m_couplings.back(); m_couplings.pop_back(); --m_B.N; m_can_resume = false; return true; }
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/math/cgnr.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include <stdint.h> #include <cstring> // for memcpy, memset #include "simd/simd.h" template<typename Elem, typename ElemOps, typename Mat, typename MatOps, typename Scalar = float, typename Error = float> struct CGNR { /** * Conjugate Gradient Normal Equation Residual (CGNR) solver for systems of M equations and N unknowns. * * Based on Matrix Computations (4th ed.) by Golub and Van Loan, section 11.3.9. * * Solves A*x = b. * * Template arguments: * Elem: the type of element used in the vectors x and b, and (implicitly) in the matrix A. * * ElemOps: a class which defines various functions on Elem type and vectors of Elem type. * * Mat: the explicit type used to represent the matrix, allowing e.g. for sparse representations. * * MatOps: a class which defines the functions rmul and lmul, which multiply a matrix of type Mat * by an Elem-typed vector on the right and left, respectively. The function signatures must be: * * void rmul(Elem* y, const Mat& A, const Elem* x, uint32_t M, uint32_t N); // y = A*x * void lmul(Elem* y, const Elem* x, const Mat& A, uint32_t M, uint32_t N); // y = x*A * * Scalar: set to float by default. May be used to keep all operations in a particular representation, e.g. SIMD registers. * * \param[out] x User-supplied Elem vector of length N, filled with the solution upon exit (if successful). * \param[in] A System M x N matrix of type Mat. * \param[in] b Right hand side of equation to be solved, an Elem vector of length M. * \param[in] M The number of rows in A and elements in b. * \param[in] N The number of columns in A and elements in x. * \param[in] cache Cache memory provided by the user, must be at least required_cache_size(M, N) bytes, and sizeof(Elem)-byte aligned. * \param[out] error_ptr If not null, returns the square magnitude error calculated from residual. * \param[in] tol (Optional) relative convergence threshold for |(A^T)*(A*x-b)|/|b|. Default value is 10^-6. * \param[in] max_it (Optional) the maximum number of internal iterations. If set to 0, the maximum is N. Default value is 0. * \param[in] warmth (Optional) valid values are 0, 1, and 2. 0 => cold, clears the x vector and ignores the cache. * 1 => warm, uses the x vector as a starting solution, but still ignores the cache. 2 => hot, uses the x * vector as a starting solution, and the cache must be valid. Default value is 0. * N.B. if warmth == 2, then this function must have been called previously, and the equation values * (x, A, b, M, and N) as well as the cache must not have been changed since the last call. * * return the number of iterations taken to converge, if it converges. Otherwise, returns minus the number of iterations before exiting. */ int solve ( Elem* x, const Mat& A, const Elem* b, uint32_t M, uint32_t N, void* cache, Error* error_ptr = nullptr, float tol = 1.e-6f, uint32_t max_it = 0, unsigned warmth = 0 ) { // Cache and temporary storage static_assert(sizeof(Elem) >= sizeof(Scalar), "sizeof(Elem) must be at least as great as sizeof(Scalar)."); float* z_last_sq_mem = (float*)cache; cache = (Elem*)z_last_sq_mem + 1; // Elem-sized storage float* delta_sq_mem = (float*)cache; cache = (Elem*)delta_sq_mem + 1; // Elem-sized storage Elem* z = (Elem*)cache; cache = z + N; // Array of length N Elem* p = (Elem*)cache; cache = p + N; // Array of length N Elem* r = (Elem*)cache; cache = r + M; // Array of length M Elem* s = (Elem*)cache; // Array of length M Scalar z_last_sq, delta_sq; load_float(z_last_sq, z_last_sq_mem); load_float(delta_sq, delta_sq_mem); if (warmth < 2) // Not hot { delta_sq = mul(tol*tol, ElemOps().length_sq(b, M)); // Calculate allowed residual length squared and cache it store_float(delta_sq_mem, delta_sq); memcpy(r, b, sizeof(Elem)*M); // Initialize residual r = b if (warmth) // Warm start, r = b - A*x { MatOps().rmul(s, A, x, M, N); ElemOps().vsub(r, r, s, M); } else memset(x, 0, sizeof(Elem)*N); // Cold start, x = 0 so r = b warmth = 0; // This lets p be initialized in the loop below } Error error; // Iterate if (!max_it) max_it = N; // Default to a maximum of N iterations uint32_t it = 0; do { MatOps().lmul(z, r, A, M, N); // Set z = (A^T)*r const Scalar z_sq = ElemOps().calculate_error(error, z, N); // Calculate residual (of modified equation) length squared if (le(z_sq, delta_sq)) break; // Terminate (convergence) if within tolerance if (warmth || warmth++) ElemOps().vmadd(p, div(z_sq, z_last_sq), p, z, N); // If not cold set p = z + (|z|^2/|z_last|^2)*p, and make warm hereafter else memcpy(p, z, sizeof(Elem)*N); // If cold set p = z z_last_sq = z_sq; MatOps().rmul(s, A, p, M, N); // Calculate s = A*p const Scalar mu = div(z_sq, ElemOps().length_sq(s, M)); // mu = |z|^2 / |A*p|^2 ElemOps().vmadd(x, mu, p, x, N); // x += mu*p ElemOps().vnmadd(r, mu, s, r, M); // r -= mu*s } while (++it < max_it); // Store off remainder of state (the rest was maintained in memory with array operations) store_float(z_last_sq_mem, z_last_sq); // Store off the error if requested if (error_ptr) *error_ptr = error; // Return the number of iterations used if successful. Otherwise return minus the number of iterations performed return it < max_it ? (int)it : -(int)it; } /** * \param[in] M See solve(...) for a description. * \param[in] N See solve(...) for a description. * * \return the required cache size (in bytes) for the given values of M and N. */ size_t required_cache_size(uint32_t M, uint32_t N) { return 2*(M+N+1)*sizeof(Elem); } };
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/simd/simd.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include <xmmintrin.h> #include <emmintrin.h> #include <immintrin.h> #if defined(__GNUC__) // missing with gcc #define _mm256_set_m128(vh, vl) _mm256_insertf128_ps(_mm256_castps128_ps256(vl), (vh), 1) #endif #define SIMD_ALIGN_16(code) NV_ALIGN_PREFIX(16) code NV_ALIGN_SUFFIX(16) #define SIMD_ALIGN_32(code) NV_ALIGN_PREFIX(32) code NV_ALIGN_SUFFIX(32) inline __m128 add(const __m128& a, const __m128& b) { return _mm_add_ps(a, b); } inline __m128 add(float a, const __m128& b) { return _mm_add_ps(_mm_load1_ps(&a), b); } inline __m128 add(const __m128& a, float b) { return _mm_add_ps(a, _mm_load1_ps(&b)); } inline float add(float a, float b) { return a + b; } inline __m128 sub(const __m128& a, const __m128& b) { return _mm_sub_ps(a, b); } inline __m128 sub(float a, const __m128& b) { return _mm_sub_ps(_mm_load1_ps(&a), b); } inline __m128 sub(const __m128& a, float b) { return _mm_sub_ps(a, _mm_load1_ps(&b)); } inline float sub(float a, float b) { return a - b; } inline __m128 mul(const __m128& a, const __m128& b) { return _mm_mul_ps(a, b); } inline __m128 mul(float a, const __m128& b) { return _mm_mul_ps(_mm_load1_ps(&a), b); } inline __m128 mul(const __m128& a, float b) { return _mm_mul_ps(a, _mm_load1_ps(&b)); } inline float mul(float a, float b) { return a * b; } inline __m128 div(const __m128& a, const __m128& b) { return _mm_div_ps(a, b); } inline __m128 div(float a, const __m128& b) { return _mm_div_ps(_mm_load1_ps(&a), b); } inline __m128 div(const __m128& a, float b) { return _mm_div_ps(a, _mm_load1_ps(&b)); } inline float div(float a, float b) { return a / b; } inline bool lt(const __m128& a, const __m128& b) { return !!_mm_comilt_ss(a, b); } inline bool gt(const __m128& a, const __m128& b) { return !!_mm_comigt_ss(a, b); } inline bool le(const __m128& a, const __m128& b) { return !!_mm_comile_ss(a, b); } inline bool ge(const __m128& a, const __m128& b) { return !!_mm_comige_ss(a, b); } inline bool eq(const __m128& a, const __m128& b) { return !!_mm_comieq_ss(a, b); } inline bool ne(const __m128& a, const __m128& b) { return !!_mm_comineq_ss(a, b); } inline bool lt(const float a, const float b) { return a < b; } inline bool gt(const float a, const float b) { return a > b; } inline bool le(const float a, const float b) { return a <= b; } inline bool ge(const float a, const float b) { return a >= b; } inline bool eq(const float a, const float b) { return a == b; } inline bool ne(const float a, const float b) { return a != b; } inline float to_float(const __m128& x) { float f; _mm_store_ss(&f, x); return f; } inline float to_float(float x) { return x; } inline void from_float(__m128& x, float y) { x = _mm_load1_ps(&y); } inline void from_float(float& x, float y) { x = y; } inline void set_zero(__m128& x) { x = _mm_setzero_ps(); } inline void set_zero(float& x) { x = 0.0f; } inline void store_float(float* mem, const __m128& f) { _mm_store_ps(mem, f); } inline void store_float(float* mem, float f) { *mem = f; } inline void load_float(__m128& f, const float* mem) { f = _mm_load_ps(mem); } inline void load_float(float& f, const float* mem) { f = *mem; } inline __m128 prep_cross3(const __m128& v) { return _mm_shuffle_ps(v, v, 0xc9); } // w z y x -> w x z y inline __m128 cross3(const __m128& v0, const __m128& v1) { __m128 prep0 = prep_cross3(v0); __m128 prep1 = prep_cross3(v1); __m128 res_shuffled = _mm_sub_ps(_mm_mul_ps(v0, prep1), _mm_mul_ps(prep0, v1)); return _mm_shuffle_ps(res_shuffled, res_shuffled, 0xc9); } inline __m128 cross3_prep0(const __m128& v0, const __m128& prep0, const __m128& v1) { __m128 prep1 = prep_cross3(v1); __m128 res_shuffled = _mm_sub_ps(_mm_mul_ps(v0, prep1), _mm_mul_ps(prep0, v1)); return _mm_shuffle_ps(res_shuffled, res_shuffled, 0xc9); } inline __m128 cross3_prep1(const __m128& v0, const __m128& v1, const __m128& prep1) { __m128 prep0 = prep_cross3(v0); __m128 res_shuffled = _mm_sub_ps(_mm_mul_ps(v0, prep1), _mm_mul_ps(prep0, v1)); return _mm_shuffle_ps(res_shuffled, res_shuffled, 0xc9); }
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/simd/simd_device_query.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include <utility> #include <stdio.h> inline static constexpr uint32_t instSetCode(uint8_t fn, uint8_t bitset, uint8_t bit) { return (uint32_t)fn << 16 | (uint32_t)bitset << 8 | (uint32_t)bit; } inline static void extractInstSetBitsetAndBit(int& fn, int& bitset, int& bit, uint32_t code) { fn = (int)(code >> 16); bitset = (int)(code >> 8)&0xff; bit = (int)(code & 0xff); } struct InstructionSet { enum Enum { MMX = instSetCode(1, 3, 23), SSE = instSetCode(1, 3, 25), SSE2 = instSetCode(1, 3, 26), SSE3 = instSetCode(1, 2, 0), SSSE3 = instSetCode(1, 2, 9), SSE4_1 = instSetCode(1, 2, 19), SSE4_2 = instSetCode(1, 2, 20), OSXSAVE = instSetCode(1, 2, 27), AVX = instSetCode(1, 2, 28), AVX2 = instSetCode(7, 1, 5), FMA3 = instSetCode(1, 2, 12), AVX512F = instSetCode(7, 1, 16), AVX512PF = instSetCode(7, 1, 26), AVX512ER = instSetCode(7, 1, 27), AVX512CD = instSetCode(7, 1, 28) }; }; #define InstructionSetEntry(_name) { #_name, InstructionSet::_name } constexpr std::pair<const char*, uint32_t> sInstructionSetLookup[] = { InstructionSetEntry(MMX), InstructionSetEntry(SSE), InstructionSetEntry(SSE2), InstructionSetEntry(SSE3), InstructionSetEntry(SSSE3), InstructionSetEntry(SSE4_1), InstructionSetEntry(SSE4_2), InstructionSetEntry(OSXSAVE), InstructionSetEntry(AVX), InstructionSetEntry(AVX2), InstructionSetEntry(FMA3), InstructionSetEntry(AVX512F), InstructionSetEntry(AVX512PF), InstructionSetEntry(AVX512ER), InstructionSetEntry(AVX512CD), }; #if NV_WINDOWS_FAMILY #include <intrin.h> // for __cpuidex inline void cpuid(int cpui[4], int fn) { __cpuidex(cpui, fn, 0); } inline bool os_supports_avx_restore() { return ((uint32_t)_xgetbv(0) & 6) == 6; } #else #include <cpuid.h> // for __cpuid_count inline void cpuid(int cpui[4], int fn) { __cpuid_count(fn, 0, cpui[0], cpui[1], cpui[2], cpui[3]); } inline bool os_supports_avx_restore() { uint32_t xcr0; __asm__("xgetbv" : "=a" (xcr0) : "c" (0) : "%edx"); return (xcr0 & 6) == 6; } #endif static bool device_supports_instruction_set(uint32_t inst_set) { int fn, bitset, bit; extractInstSetBitsetAndBit(fn, bitset, bit, inst_set); int cpui[4]; cpuid(cpui, 0); if (cpui[0] < fn) return false; cpuid(cpui, fn); return !!((cpui[bitset] >> bit) & 1); } static void print_supported_instruction_sets() { printf("Supported instruction sets:\n"); for (std::pair<const char*, uint32_t> entry : sInstructionSetLookup) { printf("%s: %s\n", entry.first, device_supports_instruction_set(entry.second) ? "yes" : "no"); } }
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFileBuffer/include/NsMemoryBuffer.h
/* * Copyright 2009-2011 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ #ifndef NS_MEMORY_BUFFER_H #define NS_MEMORY_BUFFER_H #include "Ns.h" #include "NsUserAllocated.h" #include "NsAlignedMalloc.h" #include "NvFileBuf.h" #include "NvAssert.h" namespace nvidia { namespace general_NvIOStream2 { using namespace shdfnd; const uint32_t BUFFER_SIZE_DEFAULT = 4096; //Use this class if you want to use your own allocator template<class Allocator> class NvMemoryBufferBase : public NvFileBuf, public Allocator { NV_NOCOPY(NvMemoryBufferBase) void init(const void *readMem, uint32_t readLen) { mAllocator = this; mReadBuffer = mReadLoc = static_cast<const uint8_t *>(readMem); mReadStop = &mReadLoc[readLen]; mWriteBuffer = mWriteLoc = mWriteStop = NULL; mWriteBufferSize = 0; mDefaultWriteBufferSize = BUFFER_SIZE_DEFAULT; mOpenMode = OPEN_READ_ONLY; mSeekType = SEEKABLE_READ; } void init(uint32_t defaultWriteBufferSize) { mAllocator = this; mReadBuffer = mReadLoc = mReadStop = NULL; mWriteBuffer = mWriteLoc = mWriteStop = NULL; mWriteBufferSize = 0; mDefaultWriteBufferSize = defaultWriteBufferSize; mOpenMode = OPEN_READ_WRITE_NEW; mSeekType = SEEKABLE_READWRITE; } public: NvMemoryBufferBase(const void *readMem,uint32_t readLen) { init(readMem, readLen); } NvMemoryBufferBase(const void *readMem,uint32_t readLen, const Allocator &alloc): Allocator(alloc) { init(readMem, readLen); } NvMemoryBufferBase(uint32_t defaultWriteBufferSize = BUFFER_SIZE_DEFAULT) { init(defaultWriteBufferSize); } NvMemoryBufferBase(uint32_t defaultWriteBufferSize, const Allocator &alloc): Allocator(alloc) { init(defaultWriteBufferSize); } virtual ~NvMemoryBufferBase(void) { reset(); } void setAllocator(Allocator *allocator) { mAllocator = allocator; } void initWriteBuffer(uint32_t size) { if ( mWriteBuffer == NULL ) { if ( size < mDefaultWriteBufferSize ) size = mDefaultWriteBufferSize; mWriteBuffer = static_cast<uint8_t *>(mAllocator->allocate(size)); NV_ASSERT( mWriteBuffer ); mWriteLoc = mWriteBuffer; mWriteStop = &mWriteBuffer[size]; mWriteBufferSize = size; mReadBuffer = mWriteBuffer; mReadStop = &mWriteBuffer[size]; mReadLoc = mWriteBuffer; } } void reset(void) { mAllocator->deallocate(mWriteBuffer); mWriteBuffer = NULL; mWriteBufferSize = 0; mWriteLoc = NULL; mWriteStop = NULL; mReadBuffer = NULL; mReadStop = NULL; mReadLoc = NULL; } virtual OpenMode getOpenMode(void) const { return mOpenMode; } SeekType isSeekable(void) const { return mSeekType; } virtual uint32_t read(void* buffer, uint32_t size) { if ( (mReadLoc+size) > mReadStop ) { size = uint32_t(mReadStop - mReadLoc); } if ( size != 0 ) { memmove(buffer,mReadLoc,size); mReadLoc+=size; } return size; } virtual uint32_t peek(void* buffer, uint32_t size) { if ( (mReadLoc+size) > mReadStop ) { size = uint32_t(mReadStop - mReadLoc); } if ( size != 0 ) { memmove(buffer,mReadLoc,size); } return size; } virtual uint32_t write(const void* buffer, uint32_t size) { NV_ASSERT( mOpenMode == OPEN_READ_WRITE_NEW ); if ( mOpenMode == OPEN_READ_WRITE_NEW ) { if ( (mWriteLoc+size) > mWriteStop ) growWriteBuffer(size); memmove(mWriteLoc,buffer,size); mWriteLoc+=size; mReadStop = mWriteLoc; } else { size = 0; } return size; } NV_INLINE const uint8_t * getReadLoc(void) const { return mReadLoc; } NV_INLINE void advanceReadLoc(uint32_t len) { NV_ASSERT(mReadBuffer); if ( mReadBuffer ) { mReadLoc+=len; if ( mReadLoc >= mReadStop ) { mReadLoc = mReadStop; } } } virtual uint32_t tellRead(void) const { uint32_t ret=0; if ( mReadBuffer ) { ret = uint32_t(mReadLoc-mReadBuffer); } return ret; } virtual uint32_t tellWrite(void) const { return uint32_t(mWriteLoc-mWriteBuffer); } virtual uint32_t seekRead(uint32_t loc) { uint32_t ret = 0; NV_ASSERT(mReadBuffer); if ( mReadBuffer ) { mReadLoc = &mReadBuffer[loc]; if ( mReadLoc >= mReadStop ) { mReadLoc = mReadStop; } ret = uint32_t(mReadLoc-mReadBuffer); } return ret; } virtual uint32_t seekWrite(uint32_t loc) { uint32_t ret = 0; NV_ASSERT( mOpenMode == OPEN_READ_WRITE_NEW ); if ( mWriteBuffer ) { if ( loc > mWriteBufferSize ) { mWriteLoc = mWriteStop; growWriteBuffer(loc - mWriteBufferSize); } mWriteLoc = &mWriteBuffer[loc]; ret = uint32_t(mWriteLoc-mWriteBuffer); } return ret; } virtual void flush(void) { } virtual uint32_t getFileLength(void) const { uint32_t ret = 0; if ( mReadBuffer ) { ret = uint32_t(mReadStop-mReadBuffer); } else if ( mWriteBuffer ) { ret = uint32_t(mWriteLoc-mWriteBuffer); } return ret; } uint32_t getWriteBufferSize(void) const { return uint32_t(mWriteLoc-mWriteBuffer); } void setWriteLoc(uint8_t *writeLoc) { NV_ASSERT(writeLoc >= mWriteBuffer && writeLoc < mWriteStop ); mWriteLoc = writeLoc; mReadStop = mWriteLoc; } const uint8_t * getWriteBuffer(void) const { return mWriteBuffer; } /** * Attention: if you use aligned allocator you cannot free memory with NV_FREE macros instead use deallocate method from base */ uint8_t * getWriteBufferOwnership(uint32_t &dataLen) // return the write buffer, and zero it out, the caller is taking ownership of the memory { uint8_t *ret = mWriteBuffer; dataLen = uint32_t(mWriteLoc-mWriteBuffer); mWriteBuffer = NULL; mWriteLoc = NULL; mWriteStop = NULL; mWriteBufferSize = 0; return ret; } void alignRead(uint32_t a) { uint32_t loc = tellRead(); uint32_t aloc = ((loc+(a-1))/a)*a; if ( aloc != loc ) { seekRead(aloc); } } void alignWrite(uint32_t a) { uint32_t loc = tellWrite(); uint32_t aloc = ((loc+(a-1))/a)*a; if ( aloc != loc ) { seekWrite(aloc); } } private: // double the size of the write buffer or at least as large as the 'size' value passed in. void growWriteBuffer(uint32_t size) { if ( mWriteBuffer == NULL ) { if ( size < mDefaultWriteBufferSize ) size = mDefaultWriteBufferSize; initWriteBuffer(size); } else { uint32_t oldWriteIndex = uint32_t(mWriteLoc - mWriteBuffer); uint32_t newSize = mWriteBufferSize*2; uint32_t avail = newSize-oldWriteIndex; if ( size >= avail ) newSize = newSize+size; uint8_t *writeBuffer = static_cast<uint8_t *>(mAllocator->allocate(newSize)); NV_ASSERT( writeBuffer ); memmove(writeBuffer,mWriteBuffer,mWriteBufferSize); mAllocator->deallocate(mWriteBuffer); mWriteBuffer = writeBuffer; mWriteBufferSize = newSize; mWriteLoc = &mWriteBuffer[oldWriteIndex]; mWriteStop = &mWriteBuffer[mWriteBufferSize]; uint32_t oldReadLoc = uint32_t(mReadLoc-mReadBuffer); mReadBuffer = mWriteBuffer; mReadStop = mWriteLoc; mReadLoc = &mReadBuffer[oldReadLoc]; } } const uint8_t *mReadBuffer; const uint8_t *mReadLoc; const uint8_t *mReadStop; uint8_t *mWriteBuffer; uint8_t *mWriteLoc; uint8_t *mWriteStop; uint32_t mWriteBufferSize; uint32_t mDefaultWriteBufferSize; Allocator *mAllocator; OpenMode mOpenMode; SeekType mSeekType; }; class NvMemoryBufferAllocator { public: NvMemoryBufferAllocator(uint32_t a = 0) : alignment(a) {} virtual void * allocate(uint32_t size) { switch(alignment) { case 0: return NV_ALLOC(size, NV_DEBUG_EXP("NvMemoryBufferAllocator")); case 16 : return nvidia::AlignedAllocator<16>().allocate(size, __FILE__, __LINE__); case 32 : return nvidia::AlignedAllocator<32>().allocate(size, __FILE__, __LINE__); case 64 : return nvidia::AlignedAllocator<64>().allocate(size, __FILE__, __LINE__); case 128 : return nvidia::AlignedAllocator<128>().allocate(size, __FILE__, __LINE__); default : NV_ASSERT(0); } return NULL; } virtual void deallocate(void *mem) { switch(alignment) { case 0: NV_FREE(mem); break; case 16 : nvidia::AlignedAllocator<16>().deallocate(mem); break; case 32 : nvidia::AlignedAllocator<32>().deallocate(mem); break; case 64 : nvidia::AlignedAllocator<64>().deallocate(mem); break; case 128 : nvidia::AlignedAllocator<128>().deallocate(mem); break; default : NV_ASSERT(0); } } virtual ~NvMemoryBufferAllocator(void) {} private: NvMemoryBufferAllocator& operator=(const NvMemoryBufferAllocator&); const uint32_t alignment; }; //Use this class if you want to use PhysX memory allocator class NsMemoryBuffer: public NvMemoryBufferBase<NvMemoryBufferAllocator>, public UserAllocated { NV_NOCOPY(NsMemoryBuffer) typedef NvMemoryBufferBase<NvMemoryBufferAllocator> BaseClass; public: NsMemoryBuffer(const void *readMem,uint32_t readLen): BaseClass(readMem, readLen) {} NsMemoryBuffer(const void *readMem,uint32_t readLen, uint32_t alignment): BaseClass(readMem, readLen, NvMemoryBufferAllocator(alignment)) {} NsMemoryBuffer(uint32_t defaultWriteBufferSize=BUFFER_SIZE_DEFAULT): BaseClass(defaultWriteBufferSize) {} NsMemoryBuffer(uint32_t defaultWriteBufferSize,uint32_t alignment): BaseClass(defaultWriteBufferSize, NvMemoryBufferAllocator(alignment)) {} }; } using namespace general_NvIOStream2; } #endif // NV_MEMORY_BUFFER_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFileBuffer/include/NvFileBuf.h
/* * Copyright 2009-2011 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ #ifndef NV_FILE_BUF_H #define NV_FILE_BUF_H #include "NvCTypes.h" /** \addtogroup foundation @{ */ namespace nvidia { namespace general_NvIOStream2 { NV_PUSH_PACK_DEFAULT /** \brief Callback class for data serialization. The user needs to supply an NvFileBuf implementation to a number of methods to allow the SDK to read or write chunks of binary data. This allows flexibility for the source/destination of the data. For example the NvFileBuf could store data in a file, memory buffer or custom file format. \note It is the users responsibility to ensure that the data is written to the appropriate offset. */ class NvFileBuf { public: enum EndianMode { ENDIAN_NONE = 0, // do no conversion for endian mode ENDIAN_BIG = 1, // always read/write data as natively big endian (Power PC, etc.) ENDIAN_LITTLE = 2 // always read/write data as natively little endian (Intel, etc.) Default Behavior! }; NvFileBuf(EndianMode mode=ENDIAN_LITTLE) { setEndianMode(mode); } virtual ~NvFileBuf(void) { } /** \brief Declares a constant to seek to the end of the stream. * * Does not support streams longer than 32 bits */ static const uint32_t STREAM_SEEK_END=0xFFFFFFFF; enum OpenMode { OPEN_FILE_NOT_FOUND, OPEN_READ_ONLY, // open file buffer stream for read only access OPEN_WRITE_ONLY, // open file buffer stream for write only access OPEN_READ_WRITE_NEW, // open a new file for both read/write access OPEN_READ_WRITE_EXISTING // open an existing file for both read/write access }; virtual OpenMode getOpenMode(void) const = 0; bool isOpen(void) const { return getOpenMode()!=OPEN_FILE_NOT_FOUND; } enum SeekType { SEEKABLE_NO = 0, SEEKABLE_READ = 0x1, SEEKABLE_WRITE = 0x2, SEEKABLE_READWRITE = 0x3 }; virtual SeekType isSeekable(void) const = 0; void setEndianMode(EndianMode e) { mEndianMode = e; if ( (e==ENDIAN_BIG && !isBigEndian() ) || (e==ENDIAN_LITTLE && isBigEndian() ) ) { mEndianSwap = true; } else { mEndianSwap = false; } } EndianMode getEndianMode(void) const { return mEndianMode; } virtual uint32_t getFileLength(void) const = 0; /** \brief Seeks the stream to a particular location for reading * * If the location passed exceeds the length of the stream, then it will seek to the end. * Returns the location it ended up at (useful if you seek to the end) to get the file position */ virtual uint32_t seekRead(uint32_t loc) = 0; /** \brief Seeks the stream to a particular location for writing * * If the location passed exceeds the length of the stream, then it will seek to the end. * Returns the location it ended up at (useful if you seek to the end) to get the file position */ virtual uint32_t seekWrite(uint32_t loc) = 0; /** \brief Reads from the stream into a buffer. \param[out] mem The buffer to read the stream into. \param[in] len The number of bytes to stream into the buffer \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream. */ virtual uint32_t read(void *mem,uint32_t len) = 0; /** \brief Reads from the stream into a buffer but does not advance the read location. \param[out] mem The buffer to read the stream into. \param[in] len The number of bytes to stream into the buffer \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream. */ virtual uint32_t peek(void *mem,uint32_t len) = 0; /** \brief Writes a buffer of memory to the stream \param[in] mem The address of a buffer of memory to send to the stream. \param[in] len The number of bytes to send to the stream. \return Returns the actual number of bytes sent to the stream. If not equal to the length specific, then the stream is full or unable to write for some reason. */ virtual uint32_t write(const void *mem,uint32_t len) = 0; /** \brief Reports the current stream location read aqccess. \return Returns the current stream read location. */ virtual uint32_t tellRead(void) const = 0; /** \brief Reports the current stream location for write access. \return Returns the current stream write location. */ virtual uint32_t tellWrite(void) const = 0; /** \brief Causes any temporarily cached data to be flushed to the stream. */ virtual void flush(void) = 0; /** \brief Close the stream. */ virtual void close(void) {} void release(void) { delete this; } static NV_INLINE bool isBigEndian() { int32_t i = 1; return *(reinterpret_cast<char*>(&i))==0; } NV_INLINE void swap2Bytes(void* _data) const { char *data = static_cast<char *>(_data); char one_byte; one_byte = data[0]; data[0] = data[1]; data[1] = one_byte; } NV_INLINE void swap4Bytes(void* _data) const { char *data = static_cast<char *>(_data); char one_byte; one_byte = data[0]; data[0] = data[3]; data[3] = one_byte; one_byte = data[1]; data[1] = data[2]; data[2] = one_byte; } NV_INLINE void swap8Bytes(void *_data) const { char *data = static_cast<char *>(_data); char one_byte; one_byte = data[0]; data[0] = data[7]; data[7] = one_byte; one_byte = data[1]; data[1] = data[6]; data[6] = one_byte; one_byte = data[2]; data[2] = data[5]; data[5] = one_byte; one_byte = data[3]; data[3] = data[4]; data[4] = one_byte; } NV_INLINE void storeDword(uint32_t v) { if ( mEndianSwap ) swap4Bytes(&v); write(&v,sizeof(v)); } NV_INLINE void storeFloat(float v) { if ( mEndianSwap ) swap4Bytes(&v); write(&v,sizeof(v)); } NV_INLINE void storeDouble(double v) { if ( mEndianSwap ) swap8Bytes(&v); write(&v,sizeof(v)); } NV_INLINE void storeByte(uint8_t b) { write(&b,sizeof(b)); } NV_INLINE void storeWord(uint16_t w) { if ( mEndianSwap ) swap2Bytes(&w); write(&w,sizeof(w)); } uint8_t readByte(void) { uint8_t v=0; read(&v,sizeof(v)); return v; } uint16_t readWord(void) { uint16_t v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap2Bytes(&v); return v; } uint32_t readDword(void) { uint32_t v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap4Bytes(&v); return v; } float readFloat(void) { float v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap4Bytes(&v); return v; } double readDouble(void) { double v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap8Bytes(&v); return v; } private: bool mEndianSwap; // whether or not the endian should be swapped on the current platform EndianMode mEndianMode; // the current endian mode behavior for the stream }; NV_POP_PACK } // end of namespace using namespace general_NvIOStream2; namespace general_NvIOStream = general_NvIOStream2; } // end of namespace #endif // NV_FILE_BUF_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSARRAY_H #define NV_NSFOUNDATION_NSARRAY_H #include "NvAssert.h" #include "NsAllocator.h" #include "NsBasicTemplates.h" #include "NvIntrinsics.h" #if NV_LINUX || NV_ANDROID || (NV_IOS && !NV_A64) || NV_OSX || NV_PS3 || NV_PSP2 || NV_WIIU #include <tr1/type_traits> #elif NV_WINRT || NV_XBOXONE || (NV_IOS && NV_A64) || NV_WIN64 || NV_X360 || NV_WIN32 || NV_PS4 #include <type_traits> #if NV_IOS && NV_A64 namespace std { namespace tr1 { using std::is_pod; } } #endif #else #error "OS with no defined path to type_traits.h" #endif #if NV_VC == 9 || NV_VC == 10 #pragma warning(push) #pragma warning(disable : 4347) // behavior change: 'function template' is called instead of 'function' #endif namespace nvidia { namespace shdfnd { template <class Serializer> void exportArray(Serializer& stream, const void* data, uint32_t size, uint32_t sizeOfElement, uint32_t capacity); char* importArray(char* address, void** data, uint32_t size, uint32_t sizeOfElement, uint32_t capacity); /*! An array is a sequential container. Implementation note * entries between 0 and size are valid objects * we use inheritance to build this because the array is included inline in a lot of objects and we want the allocator to take no space if it's not stateful, which aggregation doesn't allow. Also, we want the metadata at the front for the inline case where the allocator contains some inline storage space */ template <class T, class Alloc = typename AllocatorTraits<T>::Type> class Array : protected Alloc { public: typedef T* Iterator; typedef const T* ConstIterator; explicit Array(const NvEMPTY v) : Alloc(v) { if(mData) mCapacity |= NV_SIGN_BITMASK; } /*! Default array constructor. Initialize an empty array */ NV_INLINE explicit Array(const Alloc& alloc = Alloc()) : Alloc(alloc), mData(0), mSize(0), mCapacity(0) { } /*! Initialize array with given capacity */ NV_INLINE explicit Array(uint32_t size, const T& a = T(), const Alloc& alloc = Alloc()) : Alloc(alloc), mData(0), mSize(0), mCapacity(0) { resize(size, a); } /*! Copy-constructor. Copy all entries from other array */ template <class A> NV_INLINE explicit Array(const Array<T, A>& other, const Alloc& alloc = Alloc()) : Alloc(alloc) { copy(other); } // This is necessary else the basic default copy constructor is used in the case of both arrays being of the same // template instance // The C++ standard clearly states that a template constructor is never a copy constructor [2]. In other words, // the presence of a template constructor does not suppress the implicit declaration of the copy constructor. // Also never make a copy constructor explicit, or copy-initialization* will no longer work. This is because // 'binding an rvalue to a const reference requires an accessible copy constructor' (http://gcc.gnu.org/bugs/) // *http://stackoverflow.com/questions/1051379/is-there-a-difference-in-c-between-copy-initialization-and-assignment-initializ NV_INLINE Array(const Array& other, const Alloc& alloc = Alloc()) : Alloc(alloc) { copy(other); } /*! Initialize array with given length */ NV_INLINE explicit Array(const T* first, const T* last, const Alloc& alloc = Alloc()) : Alloc(alloc), mSize(last < first ? 0 : uint32_t(last - first)), mCapacity(mSize) { mData = allocate(mSize); copy(mData, mData + mSize, first); } /*! Destructor */ NV_INLINE ~Array() { destroy(mData, mData + mSize); if(capacity() && !isInUserMemory()) deallocate(mData); } /*! Assignment operator. Copy content (deep-copy) */ template <class A> NV_INLINE Array& operator=(const Array<T, A>& rhs) { if(&rhs == this) return *this; clear(); reserve(rhs.mSize); copy(mData, mData + rhs.mSize, rhs.mData); mSize = rhs.mSize; return *this; } NV_INLINE Array& operator=(const Array& t) // Needs to be declared, see comment at copy-constructor { return operator=<Alloc>(t); } NV_FORCE_INLINE static bool isArrayOfPOD() { #if NV_VC>=14 return std::is_trivially_copyable<T>::value; #else return std::tr1::is_pod<T>::value; #endif } /*! Array indexing operator. \param i The index of the element that will be returned. \return The element i in the array. */ NV_FORCE_INLINE const T& operator[](uint32_t i) const { NV_ASSERT(i < mSize); return mData[i]; } /*! Array indexing operator. \param i The index of the element that will be returned. \return The element i in the array. */ NV_FORCE_INLINE T& operator[](uint32_t i) { NV_ASSERT(i < mSize); return mData[i]; } /*! Returns a pointer to the initial element of the array. \return a pointer to the initial element of the array. */ NV_FORCE_INLINE ConstIterator begin() const { return mData; } NV_FORCE_INLINE Iterator begin() { return mData; } /*! Returns an iterator beyond the last element of the array. Do not dereference. \return a pointer to the element beyond the last element of the array. */ NV_FORCE_INLINE ConstIterator end() const { return mData + mSize; } NV_FORCE_INLINE Iterator end() { return mData + mSize; } /*! Returns a reference to the first element of the array. Undefined if the array is empty. \return a reference to the first element of the array */ NV_FORCE_INLINE const T& front() const { NV_ASSERT(mSize); return mData[0]; } NV_FORCE_INLINE T& front() { NV_ASSERT(mSize); return mData[0]; } /*! Returns a reference to the last element of the array. Undefined if the array is empty \return a reference to the last element of the array */ NV_FORCE_INLINE const T& back() const { NV_ASSERT(mSize); return mData[mSize - 1]; } NV_FORCE_INLINE T& back() { NV_ASSERT(mSize); return mData[mSize - 1]; } /*! Returns the number of entries in the array. This can, and probably will, differ from the array capacity. \return The number of of entries in the array. */ NV_FORCE_INLINE uint32_t size() const { return mSize; } /*! Clears the array. */ NV_INLINE void clear() { destroy(mData, mData + mSize); mSize = 0; } /*! Returns whether the array is empty (i.e. whether its size is 0). \return true if the array is empty */ NV_FORCE_INLINE bool empty() const { return mSize == 0; } /*! Finds the first occurrence of an element in the array. \param a The element to find. */ NV_INLINE Iterator find(const T& a) { uint32_t index; for(index = 0; index < mSize && mData[index] != a; index++) ; return mData + index; } NV_INLINE ConstIterator find(const T& a) const { uint32_t index; for(index = 0; index < mSize && mData[index] != a; index++) ; return mData + index; } ///////////////////////////////////////////////////////////////////////// /*! Adds one element to the end of the array. Operation is O(1). \param a The element that will be added to this array. */ ///////////////////////////////////////////////////////////////////////// NV_FORCE_INLINE T& pushBack(const T& a) { if(capacity() <= mSize) return growAndPushBack(a); NV_PLACEMENT_NEW(reinterpret_cast<void*>(mData + mSize), T)(a); return mData[mSize++]; } ///////////////////////////////////////////////////////////////////////// /*! Returns the element at the end of the array. Only legal if the array is non-empty. */ ///////////////////////////////////////////////////////////////////////// NV_INLINE T popBack() { NV_ASSERT(mSize); T t = mData[mSize - 1]; if (!isArrayOfPOD()) { mData[--mSize].~T(); } else { --mSize; } return t; } ///////////////////////////////////////////////////////////////////////// /*! Construct one element at the end of the array. Operation is O(1). */ ///////////////////////////////////////////////////////////////////////// NV_INLINE T& insert() { if(capacity() <= mSize) grow(capacityIncrement()); T* ptr = mData + mSize++; new (ptr) T; // not 'T()' because PODs should not get default-initialized. return *ptr; } ///////////////////////////////////////////////////////////////////////// /*! Subtracts the element on position i from the array and replace it with the last element. Operation is O(1) \param i The position of the element that will be subtracted from this array. */ ///////////////////////////////////////////////////////////////////////// NV_INLINE void replaceWithLast(uint32_t i) { NV_ASSERT(i < mSize); mData[i] = mData[--mSize]; if (!isArrayOfPOD()) { mData[mSize].~T(); } } NV_INLINE void replaceWithLast(Iterator i) { replaceWithLast(static_cast<uint32_t>(i - mData)); } ///////////////////////////////////////////////////////////////////////// /*! Replaces the first occurrence of the element a with the last element Operation is O(n) \param a The position of the element that will be subtracted from this array. \return true if the element has been removed. */ ///////////////////////////////////////////////////////////////////////// NV_INLINE bool findAndReplaceWithLast(const T& a) { uint32_t index = 0; while(index < mSize && mData[index] != a) ++index; if(index == mSize) return false; replaceWithLast(index); return true; } ///////////////////////////////////////////////////////////////////////// /*! Subtracts the element on position i from the array. Shift the entire array one step. Operation is O(n) \param i The position of the element that will be subtracted from this array. */ ///////////////////////////////////////////////////////////////////////// NV_INLINE void remove(uint32_t i) { NV_ASSERT(i < mSize); if (isArrayOfPOD()) { if (i + 1 != mSize) { nvidia::intrinsics::memMove(mData + i, mData + i + 1, (mSize - i - 1) * sizeof(T)); } } else { for(T* it = mData + i; it->~T(), ++i < mSize; ++it) new (it) T(mData[i]); } --mSize; } ///////////////////////////////////////////////////////////////////////// /*! Removes a range from the array. Shifts the array so order is maintained. Operation is O(n) \param begin The starting position of the element that will be subtracted from this array. \param count The number of elments that will be subtracted from this array. */ ///////////////////////////////////////////////////////////////////////// NV_INLINE void removeRange(uint32_t begin, uint32_t count) { NV_ASSERT(begin < mSize); NV_ASSERT((begin + count) <= mSize); if (!isArrayOfPOD()) { for(uint32_t i = 0; i < count; i++) { mData[begin + i].~T(); // call the destructor on the ones being removed first. } } T* dest = &mData[begin]; // location we are copying the tail end objects to T* src = &mData[begin + count]; // start of tail objects uint32_t move_count = mSize - (begin + count); // compute remainder that needs to be copied down if (isArrayOfPOD()) { nvidia::intrinsics::memMove(dest, src, move_count * sizeof(T)); } else { for(uint32_t i = 0; i < move_count; i++) { new (dest) T(*src); // copy the old one to the new location src->~T(); // call the destructor on the old location dest++; src++; } } mSize -= count; } ////////////////////////////////////////////////////////////////////////// /*! Resize array */ ////////////////////////////////////////////////////////////////////////// NV_NOINLINE void resize(const uint32_t size, const T& a = T()); NV_NOINLINE void resizeUninitialized(const uint32_t size); ////////////////////////////////////////////////////////////////////////// /*! Resize array such that only as much memory is allocated to hold the existing elements */ ////////////////////////////////////////////////////////////////////////// NV_INLINE void shrink() { recreate(mSize); } ////////////////////////////////////////////////////////////////////////// /*! Deletes all array elements and frees memory. */ ////////////////////////////////////////////////////////////////////////// NV_INLINE void reset() { resize(0); shrink(); } ////////////////////////////////////////////////////////////////////////// /*! Ensure that the array has at least size capacity. */ ////////////////////////////////////////////////////////////////////////// NV_INLINE void reserve(const uint32_t capacity) { if(capacity > this->capacity()) grow(capacity); } ////////////////////////////////////////////////////////////////////////// /*! Query the capacity(allocated mem) for the array. */ ////////////////////////////////////////////////////////////////////////// NV_FORCE_INLINE uint32_t capacity() const { return mCapacity & ~NV_SIGN_BITMASK; } ////////////////////////////////////////////////////////////////////////// /*! Unsafe function to force the size of the array */ ////////////////////////////////////////////////////////////////////////// NV_FORCE_INLINE void forceSize_Unsafe(uint32_t size) { NV_ASSERT(size <= mCapacity); mSize = size; } ////////////////////////////////////////////////////////////////////////// /*! Swap contents of an array without allocating temporary storage */ ////////////////////////////////////////////////////////////////////////// NV_INLINE void swap(Array<T, Alloc>& other) { shdfnd::swap(mData, other.mData); shdfnd::swap(mSize, other.mSize); shdfnd::swap(mCapacity, other.mCapacity); } ////////////////////////////////////////////////////////////////////////// /*! Assign a range of values to this vector (resizes to length of range) */ ////////////////////////////////////////////////////////////////////////// NV_INLINE void assign(const T* first, const T* last) { resizeUninitialized(uint32_t(last - first)); copy(begin(), end(), first); } // We need one bit to mark arrays that have been deserialized from a user-provided memory block. // For alignment & memory saving purpose we store that bit in the rarely used capacity member. NV_FORCE_INLINE uint32_t isInUserMemory() const { return mCapacity & NV_SIGN_BITMASK; } /// return reference to allocator NV_INLINE Alloc& getAllocator() { return *this; } protected: // constructor for where we don't own the memory Array(T* memory, uint32_t size, uint32_t capacity, const Alloc& alloc = Alloc()) : Alloc(alloc), mData(memory), mSize(size), mCapacity(capacity | NV_SIGN_BITMASK) { } template <class A> NV_NOINLINE void copy(const Array<T, A>& other); NV_INLINE T* allocate(uint32_t size) { if(size > 0) { T* p = reinterpret_cast<T*>(Alloc::allocate(sizeof(T) * size, __FILE__, __LINE__)); /** Mark a specified amount of memory with 0xcd pattern. This is used to check that the meta data definition for serialized classes is complete in checked builds. */ #if NV_CHECKED if(p) { for(uint32_t i = 0; i < (sizeof(T) * size); ++i) reinterpret_cast<uint8_t*>(p)[i] = 0xcd; } #endif return p; } return 0; } NV_INLINE void deallocate(void* mem) { Alloc::deallocate(mem); } static NV_INLINE bool isZeroInit(const T& object) { char ZeroBuffOnStack[sizeof(object)] = {}; return memcmp(&object, ZeroBuffOnStack, sizeof(object)) == 0; } static NV_INLINE void create(T* first, T* last, const T& a) { if (isArrayOfPOD() && isZeroInit(a)) { if(last>first) nvidia::intrinsics::memZero(first, uint32_t((last-first) * sizeof(T))); } else { for(; first<last; ++first) ::new(first)T(a); } } static NV_INLINE void copy(T* first, T* last, const T* src) { if (last <= first) return; if (isArrayOfPOD()) { nvidia::intrinsics::memCopy(first, src, uint32_t((last-first) * sizeof(T))); } else { for(; first<last; ++first, ++src) ::new (first)T(*src); } } static NV_INLINE void destroy(T* first, T* last) { if (!isArrayOfPOD()) { for(; first < last; ++first) first->~T(); } } /*! Called when pushBack() needs to grow the array. \param a The element that will be added to this array. */ NV_NOINLINE T& growAndPushBack(const T& a); /*! Resizes the available memory for the array. \param capacity The number of entries that the set should be able to hold. */ NV_INLINE void grow(uint32_t capacity) { NV_ASSERT(this->capacity() < capacity); recreate(capacity); } /*! Creates a new memory block, copies all entries to the new block and destroys old entries. \param capacity The number of entries that the set should be able to hold. */ NV_NOINLINE void recreate(uint32_t capacity); // The idea here is to prevent accidental bugs with pushBack or insert. Unfortunately // it interacts badly with InlineArrays with smaller inline allocations. // TODO(dsequeira): policy template arg, this is exactly what they're for. NV_INLINE uint32_t capacityIncrement() const { const uint32_t capacity = this->capacity(); return capacity == 0 ? 1 : capacity * 2; } T* mData; uint32_t mSize; uint32_t mCapacity; }; template <class T, class Alloc> NV_NOINLINE void Array<T, Alloc>::resize(const uint32_t size, const T& a) { reserve(size); create(mData + mSize, mData + size, a); destroy(mData + size, mData + mSize); mSize = size; } template <class T, class Alloc> template <class A> NV_NOINLINE void Array<T, Alloc>::copy(const Array<T, A>& other) { if(!other.empty()) { mData = allocate(mSize = mCapacity = other.size()); copy(mData, mData + mSize, other.begin()); } else { mData = NULL; mSize = 0; mCapacity = 0; } // mData = allocate(other.mSize); // mSize = other.mSize; // mCapacity = other.mSize; // copy(mData, mData + mSize, other.mData); } template <class T, class Alloc> NV_NOINLINE void Array<T, Alloc>::resizeUninitialized(const uint32_t size) { reserve(size); mSize = size; } template <class T, class Alloc> NV_NOINLINE T& Array<T, Alloc>::growAndPushBack(const T& a) { uint32_t capacity = capacityIncrement(); T* newData = allocate(capacity); NV_ASSERT((!capacity) || (newData && (newData != mData))); copy(newData, newData + mSize, mData); // inserting element before destroying old array // avoids referencing destroyed object when duplicating array element. NV_PLACEMENT_NEW(reinterpret_cast<void*>(newData + mSize), T)(a); destroy(mData, mData + mSize); if(!isInUserMemory()) deallocate(mData); mData = newData; mCapacity = capacity; return mData[mSize++]; } template <class T, class Alloc> NV_NOINLINE void Array<T, Alloc>::recreate(uint32_t capacity) { T* newData = allocate(capacity); NV_ASSERT((!capacity) || (newData && (newData != mData))); copy(newData, newData + mSize, mData); destroy(mData, mData + mSize); if(!isInUserMemory()) deallocate(mData); mData = newData; mCapacity = capacity; } template <class T, class Alloc> NV_INLINE void swap(Array<T, Alloc>& x, Array<T, Alloc>& y) { x.swap(y); } } // namespace shdfnd } // namespace nvidia #if NV_VC == 9 || NV_VC == 10 #pragma warning(pop) #endif #endif // #ifndef NV_NSFOUNDATION_NSARRAY_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsBasicTemplates.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSBASICTEMPLATES_H #define NV_NSFOUNDATION_NSBASICTEMPLATES_H #include "Ns.h" namespace nvidia { namespace shdfnd { template <typename A> struct Equal { bool operator()(const A& a, const A& b) const { return a == b; } }; template <typename A> struct Less { bool operator()(const A& a, const A& b) const { return a < b; } }; template <typename A> struct Greater { bool operator()(const A& a, const A& b) const { return a > b; } }; template <class F, class S> class Pair { public: F first; S second; Pair() : first(F()), second(S()) { } Pair(const F& f, const S& s) : first(f), second(s) { } Pair(const Pair& p) : first(p.first), second(p.second) { } // CN - fix for /.../NsBasicTemplates.h(61) : warning C4512: 'nvidia::shdfnd::Pair<F,S>' : assignment operator could // not be generated Pair& operator=(const Pair& p) { first = p.first; second = p.second; return *this; } bool operator==(const Pair& p) const { return first == p.first && second == p.second; } bool operator<(const Pair& p) const { if(first < p.first) return true; else return !(p.first < first) && (second < p.second); } }; template <unsigned int A> struct LogTwo { static const unsigned int value = LogTwo<(A >> 1)>::value + 1; }; template <> struct LogTwo<1> { static const unsigned int value = 0; }; template <typename T> struct UnConst { typedef T Type; }; template <typename T> struct UnConst<const T> { typedef T Type; }; template <typename T> T pointerOffset(void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<char*>(p) + offset); } template <typename T> T pointerOffset(const void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<const char*>(p) + offset); } template <class T> NV_CUDA_CALLABLE NV_INLINE void swap(T& x, T& y) { const T tmp = x; x = y; y = tmp; } } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_NSFOUNDATION_NSBASICTEMPLATES_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsUserAllocated.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSUSERALLOCATED_H #define NV_NSFOUNDATION_NSUSERALLOCATED_H #include "NsAllocator.h" namespace nvidia { namespace shdfnd { /** Provides new and delete using a UserAllocator. Guarantees that 'delete x;' uses the UserAllocator too. */ class UserAllocated { public: // NV_SERIALIZATION NV_INLINE void* operator new(size_t, void* address) { return address; } //~NV_SERIALIZATION // Matching operator delete to the above operator new. Don't ask me // how this makes any sense - Nuernberger. NV_INLINE void operator delete(void*, void*) { } template <typename Alloc> NV_INLINE void* operator new(size_t size, Alloc alloc, const char* fileName, int line) { return alloc.allocate(size, fileName, line); } template <typename Alloc> NV_INLINE void* operator new [](size_t size, Alloc alloc, const char* fileName, int line) { return alloc.allocate(size, fileName, line); } // placement delete template <typename Alloc> NV_INLINE void operator delete(void* ptr, Alloc alloc, const char* fileName, int line) { NV_UNUSED(fileName); NV_UNUSED(line); alloc.deallocate(ptr); } template <typename Alloc> NV_INLINE void operator delete [](void* ptr, Alloc alloc, const char* fileName, int line) { NV_UNUSED(fileName); NV_UNUSED(line); alloc.deallocate(ptr); } NV_INLINE void operator delete(void* ptr) { NonTrackingAllocator().deallocate(ptr); } NV_INLINE void operator delete [](void* ptr) { NonTrackingAllocator().deallocate(ptr); } }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_NSFOUNDATION_NSUSERALLOCATED_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsIntrinsics.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSINTRINSICS_H #define NV_NSFOUNDATION_NSINTRINSICS_H #include "NvPreprocessor.h" #if(NV_WINDOWS_FAMILY || NV_WINRT) #include "platform/windows/NsWindowsIntrinsics.h" #elif NV_X360 #include "xbox360/NsXbox360Intrinsics.h" #elif(NV_LINUX || NV_ANDROID || NV_APPLE_FAMILY || NV_PS4) #include "platform/unix/NsUnixIntrinsics.h" #elif NV_PS3 #include "ps3/NsPS3Intrinsics.h" #elif NV_PSP2 #include "psp2/NsPSP2Intrinsics.h" #elif NV_WIIU #include "wiiu/NsWiiUIntrinsics.h" #elif NV_XBOXONE #include "XboxOne/NsXboxOneIntrinsics.h" #else #error "Platform not supported!" #endif #endif // #ifndef NV_NSFOUNDATION_NSINTRINSICS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsHashInternals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSHASHINTERNALS_H #define NV_NSFOUNDATION_NSHASHINTERNALS_H #include "NsBasicTemplates.h" #include "NsArray.h" #include "NsBitUtils.h" #include "NsHash.h" #include "NvIntrinsics.h" #if NV_VC #pragma warning(push) #pragma warning(disable : 4127) // conditional expression is constant #endif namespace nvidia { namespace shdfnd { namespace internal { template <class Entry, class Key, class HashFn, class GetKey, class Allocator, bool compacting> class HashBase : private Allocator { void init(uint32_t initialTableSize, float loadFactor) { mBuffer = NULL; mEntries = NULL; mEntriesNext = NULL; mHash = NULL; mEntriesCapacity = 0; mHashSize = 0; mLoadFactor = loadFactor; mFreeList = uint32_t(EOL); mTimestamp = 0; mEntriesCount = 0; if(initialTableSize) reserveInternal(initialTableSize); } public: typedef Entry EntryType; HashBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : Allocator(NV_DEBUG_EXP("hashBase")) { init(initialTableSize, loadFactor); } HashBase(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : Allocator(alloc) { init(initialTableSize, loadFactor); } HashBase(const Allocator& alloc) : Allocator(alloc) { init(64, 0.75f); } ~HashBase() { destroy(); // No need to clear() if(mBuffer) Allocator::deallocate(mBuffer); } static const uint32_t EOL = 0xffffffff; NV_INLINE Entry* create(const Key& k, bool& exists) { uint32_t h = 0; if(mHashSize) { h = hash(k); uint32_t index = mHash[h]; while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k)) index = mEntriesNext[index]; exists = index != EOL; if(exists) return mEntries + index; } else exists = false; if(freeListEmpty()) { grow(); h = hash(k); } uint32_t entryIndex = freeListGetNext(); mEntriesNext[entryIndex] = mHash[h]; mHash[h] = entryIndex; mEntriesCount++; mTimestamp++; return mEntries + entryIndex; } NV_INLINE const Entry* find(const Key& k) const { if(!mHashSize) return NULL; const uint32_t h = hash(k); uint32_t index = mHash[h]; while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k)) index = mEntriesNext[index]; return index != EOL ? mEntries + index : NULL; } NV_INLINE bool erase(const Key& k) { if(!mHashSize) return false; const uint32_t h = hash(k); uint32_t* ptr = mHash + h; while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k)) ptr = mEntriesNext + *ptr; if(*ptr == EOL) return false; const uint32_t index = *ptr; *ptr = mEntriesNext[index]; mEntries[index].~Entry(); mEntriesCount--; mTimestamp++; if(compacting && index != mEntriesCount) replaceWithLast(index); freeListAdd(index); return true; } NV_INLINE uint32_t size() const { return mEntriesCount; } NV_INLINE uint32_t capacity() const { return mHashSize; } void clear() { if(!mHashSize || mEntriesCount == 0) return; destroy(); intrinsics::memSet(mHash, EOL, mHashSize * sizeof(uint32_t)); const uint32_t sizeMinus1 = mEntriesCapacity - 1; for(uint32_t i = 0; i < sizeMinus1; i++) { prefetchLine(mEntriesNext + i, 128); mEntriesNext[i] = i + 1; } mEntriesNext[mEntriesCapacity - 1] = uint32_t(EOL); mFreeList = 0; mEntriesCount = 0; } void reserve(uint32_t size) { if(size > mHashSize) reserveInternal(size); } NV_INLINE const Entry* getEntries() const { return mEntries; } NV_INLINE Entry* insertUnique(const Key& k) { NV_ASSERT(find(k) == NULL); uint32_t h = hash(k); uint32_t entryIndex = freeListGetNext(); mEntriesNext[entryIndex] = mHash[h]; mHash[h] = entryIndex; mEntriesCount++; mTimestamp++; return mEntries + entryIndex; } private: void destroy() { for(uint32_t i = 0; i < mHashSize; i++) { for(uint32_t j = mHash[i]; j != EOL; j = mEntriesNext[j]) mEntries[j].~Entry(); } } template <typename HK, typename GK, class A, bool comp> NV_NOINLINE void copy(const HashBase<Entry, Key, HK, GK, A, comp>& other); // free list management - if we're coalescing, then we use mFreeList to hold // the top of the free list and it should always be equal to size(). Otherwise, // we build a free list in the next() pointers. NV_INLINE void freeListAdd(uint32_t index) { if(compacting) { mFreeList--; NV_ASSERT(mFreeList == mEntriesCount); } else { mEntriesNext[index] = mFreeList; mFreeList = index; } } NV_INLINE void freeListAdd(uint32_t start, uint32_t end) { if(!compacting) { for(uint32_t i = start; i < end - 1; i++) // add the new entries to the free list mEntriesNext[i] = i + 1; // link in old free list mEntriesNext[end - 1] = mFreeList; NV_ASSERT(mFreeList != end - 1); mFreeList = start; } else if(mFreeList == EOL) // don't reset the free ptr for the compacting hash unless it's empty mFreeList = start; } NV_INLINE uint32_t freeListGetNext() { NV_ASSERT(!freeListEmpty()); if(compacting) { NV_ASSERT(mFreeList == mEntriesCount); return mFreeList++; } else { uint32_t entryIndex = mFreeList; mFreeList = mEntriesNext[mFreeList]; return entryIndex; } } NV_INLINE bool freeListEmpty() const { if(compacting) return mEntriesCount == mEntriesCapacity; else return mFreeList == EOL; } NV_INLINE void replaceWithLast(uint32_t index) { NV_PLACEMENT_NEW(mEntries + index, Entry)(mEntries[mEntriesCount]); mEntries[mEntriesCount].~Entry(); mEntriesNext[index] = mEntriesNext[mEntriesCount]; uint32_t h = hash(GetKey()(mEntries[index])); uint32_t* ptr; for(ptr = mHash + h; *ptr != mEntriesCount; ptr = mEntriesNext + *ptr) NV_ASSERT(*ptr != EOL); *ptr = index; } NV_INLINE uint32_t hash(const Key& k, uint32_t hashSize) const { return HashFn()(k) & (hashSize - 1); } NV_INLINE uint32_t hash(const Key& k) const { return hash(k, mHashSize); } void reserveInternal(uint32_t size) { if(!isPowerOfTwo(size)) size = nextPowerOfTwo(size); NV_ASSERT(!(size & (size - 1))); // decide whether iteration can be done on the entries directly bool resizeCompact = compacting || freeListEmpty(); // define new table sizes uint32_t oldEntriesCapacity = mEntriesCapacity; uint32_t newEntriesCapacity = uint32_t(float(size) * mLoadFactor); uint32_t newHashSize = size; // allocate new common buffer and setup pointers to new tables uint8_t* newBuffer; uint32_t* newHash; uint32_t* newEntriesNext; Entry* newEntries; { uint32_t newHashByteOffset = 0; uint32_t newEntriesNextBytesOffset = newHashByteOffset + newHashSize * sizeof(uint32_t); uint32_t newEntriesByteOffset = newEntriesNextBytesOffset + newEntriesCapacity * sizeof(uint32_t); newEntriesByteOffset += (16 - (newEntriesByteOffset & 15)) & 15; uint32_t newBufferByteSize = newEntriesByteOffset + newEntriesCapacity * sizeof(Entry); newBuffer = reinterpret_cast<uint8_t*>(Allocator::allocate(newBufferByteSize, __FILE__, __LINE__)); NV_ASSERT(newBuffer); newHash = reinterpret_cast<uint32_t*>(newBuffer + newHashByteOffset); newEntriesNext = reinterpret_cast<uint32_t*>(newBuffer + newEntriesNextBytesOffset); newEntries = reinterpret_cast<Entry*>(newBuffer + newEntriesByteOffset); } // initialize new hash table intrinsics::memSet(newHash, uint32_t(EOL), newHashSize * sizeof(uint32_t)); // iterate over old entries, re-hash and create new entries if(resizeCompact) { // check that old free list is empty - we don't need to copy the next entries NV_ASSERT(compacting || mFreeList == EOL); for(uint32_t index = 0; index < mEntriesCount; ++index) { uint32_t h = hash(GetKey()(mEntries[index]), newHashSize); newEntriesNext[index] = newHash[h]; newHash[h] = index; NV_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]); mEntries[index].~Entry(); } } else { // copy old free list, only required for non compact resizing intrinsics::memCopy(newEntriesNext, mEntriesNext, mEntriesCapacity * sizeof(uint32_t)); for(uint32_t bucket = 0; bucket < mHashSize; bucket++) { uint32_t index = mHash[bucket]; while(index != EOL) { uint32_t h = hash(GetKey()(mEntries[index]), newHashSize); newEntriesNext[index] = newHash[h]; NV_ASSERT(index != newHash[h]); newHash[h] = index; NV_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]); mEntries[index].~Entry(); index = mEntriesNext[index]; } } } // swap buffer and pointers Allocator::deallocate(mBuffer); mBuffer = newBuffer; mHash = newHash; mHashSize = newHashSize; mEntriesNext = newEntriesNext; mEntries = newEntries; mEntriesCapacity = newEntriesCapacity; freeListAdd(oldEntriesCapacity, newEntriesCapacity); } void grow() { NV_ASSERT((mFreeList == EOL) || (compacting && (mEntriesCount == mEntriesCapacity))); uint32_t size = mHashSize == 0 ? 16 : mHashSize * 2; reserve(size); } uint8_t* mBuffer; Entry* mEntries; uint32_t* mEntriesNext; // same size as mEntries uint32_t* mHash; uint32_t mEntriesCapacity; uint32_t mHashSize; float mLoadFactor; uint32_t mFreeList; uint32_t mTimestamp; uint32_t mEntriesCount; // number of entries public: class Iter { public: NV_INLINE Iter(HashBase& b) : mBucket(0), mEntry(uint32_t(b.EOL)), mTimestamp(b.mTimestamp), mBase(b) { if(mBase.mEntriesCapacity > 0) { mEntry = mBase.mHash[0]; skip(); } } NV_INLINE void check() const { NV_ASSERT(mTimestamp == mBase.mTimestamp); } NV_INLINE Entry operator*() const { check(); return mBase.mEntries[mEntry]; } NV_INLINE Entry* operator->() const { check(); return mBase.mEntries + mEntry; } NV_INLINE Iter operator++() { check(); advance(); return *this; } NV_INLINE Iter operator++(int) { check(); Iter i = *this; advance(); return i; } NV_INLINE bool done() const { check(); return mEntry == mBase.EOL; } private: NV_INLINE void advance() { mEntry = mBase.mEntriesNext[mEntry]; skip(); } NV_INLINE void skip() { while(mEntry == mBase.EOL) { if(++mBucket == mBase.mHashSize) break; mEntry = mBase.mHash[mBucket]; } } Iter& operator=(const Iter&); uint32_t mBucket; uint32_t mEntry; uint32_t mTimestamp; HashBase& mBase; }; }; template <class Entry, class Key, class HashFn, class GetKey, class Allocator, bool compacting> template <typename HK, typename GK, class A, bool comp> NV_NOINLINE void HashBase<Entry, Key, HashFn, GetKey, Allocator, compacting>::copy(const HashBase<Entry, Key, HK, GK, A, comp>& other) { reserve(other.mEntriesCount); for(uint32_t i = 0; i < other.mEntriesCount; i++) { for(uint32_t j = other.mHash[i]; j != EOL; j = other.mEntriesNext[j]) { const Entry& otherEntry = other.mEntries[j]; bool exists; Entry* newEntry = create(GK()(otherEntry), exists); NV_ASSERT(!exists); NV_PLACEMENT_NEW(newEntry, Entry)(otherEntry); } } } template <class Key, class HashFn, class Allocator = typename AllocatorTraits<Key>::Type, bool Coalesced = false> class HashSetBase { NV_NOCOPY(HashSetBase) public: struct GetKey { NV_INLINE const Key& operator()(const Key& e) { return e; } }; typedef HashBase<Key, Key, HashFn, GetKey, Allocator, Coalesced> BaseMap; typedef typename BaseMap::Iter Iterator; HashSetBase(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : mBase(initialTableSize, loadFactor, alloc) { } HashSetBase(const Allocator& alloc) : mBase(64, 0.75f, alloc) { } HashSetBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor) { } bool insert(const Key& k) { bool exists; Key* e = mBase.create(k, exists); if(!exists) NV_PLACEMENT_NEW(e, Key)(k); return !exists; } NV_INLINE bool contains(const Key& k) const { return mBase.find(k) != 0; } NV_INLINE bool erase(const Key& k) { return mBase.erase(k); } NV_INLINE uint32_t size() const { return mBase.size(); } NV_INLINE uint32_t capacity() const { return mBase.capacity(); } NV_INLINE void reserve(uint32_t size) { mBase.reserve(size); } NV_INLINE void clear() { mBase.clear(); } protected: BaseMap mBase; }; template <class Key, class Value, class HashFn, class Allocator = typename AllocatorTraits<Pair<const Key, Value> >::Type> class HashMapBase { NV_NOCOPY(HashMapBase) public: typedef Pair<const Key, Value> Entry; struct GetKey { NV_INLINE const Key& operator()(const Entry& e) { return e.first; } }; typedef HashBase<Entry, Key, HashFn, GetKey, Allocator, true> BaseMap; typedef typename BaseMap::Iter Iterator; HashMapBase(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : mBase(initialTableSize, loadFactor, alloc) { } HashMapBase(const Allocator& alloc) : mBase(64, 0.75f, alloc) { } HashMapBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor) { } bool insert(const Key /*&*/ k, const Value /*&*/ v) { bool exists; Entry* e = mBase.create(k, exists); if(!exists) NV_PLACEMENT_NEW(e, Entry)(k, v); return !exists; } Value& operator[](const Key& k) { bool exists; Entry* e = mBase.create(k, exists); if(!exists) NV_PLACEMENT_NEW(e, Entry)(k, Value()); return e->second; } NV_INLINE const Entry* find(const Key& k) const { return mBase.find(k); } NV_INLINE bool erase(const Key& k) { return mBase.erase(k); } NV_INLINE uint32_t size() const { return mBase.size(); } NV_INLINE uint32_t capacity() const { return mBase.capacity(); } NV_INLINE Iterator getIterator() { return Iterator(mBase); } NV_INLINE void reserve(uint32_t size) { mBase.reserve(size); } NV_INLINE void clear() { mBase.clear(); } protected: BaseMap mBase; }; } } // namespace shdfnd } // namespace nvidia #if NV_VC #pragma warning(pop) #endif #endif // #ifndef NV_NSFOUNDATION_NSHASHINTERNALS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/Ns.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NS_H #define NV_NSFOUNDATION_NS_H /*! \file top level include file for shared foundation */ #include "Nv.h" /** Platform specific defines */ #if NV_WINDOWS_FAMILY || NV_XBOXONE #pragma intrinsic(memcmp) #pragma intrinsic(memcpy) #pragma intrinsic(memset) #pragma intrinsic(abs) #pragma intrinsic(labs) #endif // An expression that should expand to nothing in non NV_CHECKED builds. // We currently use this only for tagging the purpose of containers for memory use tracking. #if NV_CHECKED #define NV_DEBUG_EXP(x) (x) #else #define NV_DEBUG_EXP(x) #endif #define NV_SIGN_BITMASK 0x80000000 namespace nvidia { namespace shdfnd { // Int-as-bool type - has some uses for efficiency and with SIMD typedef int IntBool; static const IntBool IntFalse = 0; static const IntBool IntTrue = 1; } } // namespace nvidia #endif // #ifndef NV_NSFOUNDATION_NS_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NvUnionCast.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_FOUNDATION_NV_UNION_CAST_H #define NV_FOUNDATION_NV_UNION_CAST_H #include "NvPreprocessor.h" /** \addtogroup foundation @{ */ #if !NV_DOXYGEN namespace nvidia { #endif template<class A, class B> NV_FORCE_INLINE A NvUnionCast(B b) { union AB { AB(B bb) : _b(bb) { } B _b; A _a; } u(b); return u._a; } #if !NV_DOXYGEN } // namespace nvidia #endif /** @} */ #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsInlineAllocator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSINLINEALLOCATOR_H #define NV_NSFOUNDATION_NSINLINEALLOCATOR_H #include "NsUserAllocated.h" namespace nvidia { namespace shdfnd { // this is used by the array class to allocate some space for a small number // of objects along with the metadata template <uint32_t N, typename BaseAllocator> class InlineAllocator : private BaseAllocator { public: InlineAllocator(const NvEMPTY v) : BaseAllocator(v) { } InlineAllocator(const BaseAllocator& alloc = BaseAllocator()) : BaseAllocator(alloc), mBufferUsed(false) { } InlineAllocator(const InlineAllocator& aloc) : BaseAllocator(aloc), mBufferUsed(false) { } void* allocate(uint32_t size, const char* filename, int line) { if(!mBufferUsed && size <= N) { mBufferUsed = true; return mBuffer; } return BaseAllocator::allocate(size, filename, line); } void deallocate(void* ptr) { if(ptr == mBuffer) mBufferUsed = false; else BaseAllocator::deallocate(ptr); } NV_FORCE_INLINE uint8_t* getInlineBuffer() { return mBuffer; } NV_FORCE_INLINE bool isBufferUsed() const { return mBufferUsed; } protected: uint8_t mBuffer[N]; bool mBufferUsed; }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_NSFOUNDATION_NSINLINEALLOCATOR_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsHashMap.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSHASHMAP_H #define NV_NSFOUNDATION_NSHASHMAP_H #include "NsHashInternals.h" // TODO: make this doxy-format // // This header defines two hash maps. Hash maps // * support custom initial table sizes (rounded up internally to power-of-2) // * support custom static allocator objects // * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize // when the 49th element is inserted) // * are based on open hashing // * have O(1) contains, erase // // Maps have STL-like copying semantics, and properly initialize and destruct copies of objects // // There are two forms of map: coalesced and uncoalesced. Coalesced maps keep the entries in the // initial segment of an array, so are fast to iterate over; however deletion is approximately // twice as expensive. // // HashMap<T>: // bool insert(const Key& k, const Value& v) O(1) amortized (exponential resize policy) // Value & operator[](const Key& k) O(1) for existing objects, else O(1) amortized // const Entry * find(const Key& k); O(1) // bool erase(const T& k); O(1) // uint32_t size(); constant // void reserve(uint32_t size); O(MAX(currentOccupancy,size)) // void clear(); O(currentOccupancy) (with zero constant for objects // without // destructors) // Iterator getIterator(); // // operator[] creates an entry if one does not exist, initializing with the default constructor. // CoalescedHashMap<T> does not support getIterator, but instead supports // const Key *getEntries(); // // Use of iterators: // // for(HashMap::Iterator iter = test.getIterator(); !iter.done(); ++iter) // myFunction(iter->first, iter->second); namespace nvidia { namespace shdfnd { template <class Key, class Value, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator> class HashMap : public internal::HashMapBase<Key, Value, HashFn, Allocator> { public: typedef internal::HashMapBase<Key, Value, HashFn, Allocator> HashMapBase; typedef typename HashMapBase::Iterator Iterator; HashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashMapBase(initialTableSize, loadFactor) { } HashMap(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : HashMapBase(initialTableSize, loadFactor, alloc) { } HashMap(const Allocator& alloc) : HashMapBase(64, 0.75f, alloc) { } Iterator getIterator() { return Iterator(HashMapBase::mBase); } }; template <class Key, class Value, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator> class CoalescedHashMap : public internal::HashMapBase<Key, Value, HashFn, Allocator> { public: typedef internal::HashMapBase<Key, Value, HashFn, Allocator> HashMapBase; CoalescedHashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashMapBase(initialTableSize, loadFactor) { } const Pair<const Key, Value>* getEntries() const { return HashMapBase::mBase.getEntries(); } }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_NSFOUNDATION_NSHASHMAP_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsHash.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSHASH_H #define NV_NSFOUNDATION_NSHASH_H #include "Ns.h" #include "NsBasicTemplates.h" #if NV_VC #pragma warning(push) #pragma warning(disable : 4302) #endif #if NV_LINUX #include "NvSimpleTypes.h" #endif /*! Central definition of hash functions */ namespace nvidia { namespace shdfnd { // Hash functions // Thomas Wang's 32 bit mix // http://www.cris.com/~Ttwang/tech/inthash.htm NV_FORCE_INLINE uint32_t hash(const uint32_t key) { uint32_t k = key; k += ~(k << 15); k ^= (k >> 10); k += (k << 3); k ^= (k >> 6); k += ~(k << 11); k ^= (k >> 16); return uint32_t(k); } NV_FORCE_INLINE uint32_t hash(const int32_t key) { return hash(uint32_t(key)); } // Thomas Wang's 64 bit mix // http://www.cris.com/~Ttwang/tech/inthash.htm NV_FORCE_INLINE uint32_t hash(const uint64_t key) { uint64_t k = key; k += ~(k << 32); k ^= (k >> 22); k += ~(k << 13); k ^= (k >> 8); k += (k << 3); k ^= (k >> 15); k += ~(k << 27); k ^= (k >> 31); return uint32_t(UINT32_MAX & k); } #if NV_APPLE_FAMILY // hash for size_t, to make gcc happy NV_INLINE uint32_t hash(const size_t key) { #if NV_P64_FAMILY return hash(uint64_t(key)); #else return hash(uint32_t(key)); #endif } #endif // Hash function for pointers NV_INLINE uint32_t hash(const void* ptr) { #if NV_P64_FAMILY return hash(uint64_t(ptr)); #else return hash(uint32_t(UINT32_MAX & size_t(ptr))); #endif } // Hash function for pairs template <typename F, typename S> NV_INLINE uint32_t hash(const Pair<F, S>& p) { uint32_t seed = 0x876543; uint32_t m = 1000007; return hash(p.second) ^ (m * (hash(p.first) ^ (m * seed))); } // hash object for hash map template parameter template <class Key> struct Hash { uint32_t operator()(const Key& k) const { return hash(k); } bool equal(const Key& k0, const Key& k1) const { return k0 == k1; } }; // specialization for strings template <> struct Hash<const char*> { public: uint32_t operator()(const char* _string) const { // "DJB" string hash const uint8_t* string = reinterpret_cast<const uint8_t*>(_string); uint32_t h = 5381; for(const uint8_t* ptr = string; *ptr; ptr++) h = ((h << 5) + h) ^ uint32_t(*ptr); return h; } bool equal(const char* string0, const char* string1) const { return !strcmp(string0, string1); } }; } // namespace shdfnd } // namespace nvidia #if NV_VC #pragma warning(pop) #endif #endif // #ifndef NV_NSFOUNDATION_NSHASH_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsAoS.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NS_AOS_H #define NS_AOS_H #include "NvPreprocessor.h" #if NV_WINDOWS_FAMILY && !NV_NEON #include "platform/windows/NsWindowsAoS.h" #elif NV_X360 #include "xbox360/NsXbox360AoS.h" #elif (NV_LINUX || NV_ANDROID || NV_APPLE || NV_PS4 || (NV_WINRT && NV_NEON)) #include "platform/unix/NsUnixAoS.h" #elif NV_PS3 #include "ps3/NsPS3AoS.h" #elif NV_PSP2 #include "psp2/NsPSP2AoS.h" #elif NV_XBOXONE #include "XboxOne/NsXboxOneAoS.h" #else #error "Platform not supported!" #endif #endif
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsFPU.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSFPU_H #define NV_NSFOUNDATION_NSFPU_H #include "Ns.h" #include "NsIntrinsics.h" // unsigned integer representation of a floating-point value. #if NV_PS3 NV_FORCE_INLINE unsigned int NV_IR(const float x) { union { int i; float f; } u; u.f = x; return u.i; } NV_FORCE_INLINE int NV_SIR(const float x) { union { int i; float f; } u; u.f = x; return u.i; } NV_FORCE_INLINE float NV_FR(const unsigned int x) { union { unsigned int i; float f; } u; u.i = x; return u.f; } #else #define NV_IR(x) ((uint32_t&)(x)) #define NV_SIR(x) ((int32_t&)(x)) #define NV_FR(x) ((float&)(x)) #endif // signed integer representation of a floating-point value. // Floating-point representation of a integer value. #define NV_SIGN_BITMASK 0x80000000 #define NV_FPU_GUARD shdfnd::FPUGuard scopedFpGuard; #define NV_SIMD_GUARD shdfnd::SIMDGuard scopedFpGuard; #define NV_SUPPORT_GUARDS (NV_WINDOWS_FAMILY || NV_XBOXONE || NV_LINUX || NV_PS4 || NV_OSX) namespace nvidia { namespace shdfnd { // sets the default SDK state for scalar and SIMD units class NV_FOUNDATION_API FPUGuard { public: FPUGuard(); // set fpu control word for PhysX ~FPUGuard(); // restore fpu control word private: uint32_t mControlWords[8]; }; // sets default SDK state for simd unit only, lighter weight than FPUGuard class SIMDGuard { public: NV_INLINE SIMDGuard(); // set simd control word for PhysX NV_INLINE ~SIMDGuard(); // restore simd control word private: #if NV_SUPPORT_GUARDS uint32_t mControlWord; #endif }; /** \brief Enables floating point exceptions for the scalar and SIMD unit */ NV_FOUNDATION_API void enableFPExceptions(); /** \brief Disables floating point exceptions for the scalar and SIMD unit */ NV_FOUNDATION_API void disableFPExceptions(); } // namespace shdfnd } // namespace nvidia #if NV_WINDOWS_FAMILY || NV_XBOXONE #include "platform/windows/NsWindowsFPU.h" #elif NV_LINUX || NV_PS4 || NV_OSX #include "platform/unix/NsUnixFPU.h" #else NV_INLINE nvidia::shdfnd::SIMDGuard::SIMDGuard() { } NV_INLINE nvidia::shdfnd::SIMDGuard::~SIMDGuard() { } #endif #endif // #ifndef NV_NSFOUNDATION_NSFPU_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsInlineArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSINLINEARRAY_H #define NV_NSFOUNDATION_NSINLINEARRAY_H #include "NsArray.h" #include "NsInlineAllocator.h" namespace nvidia { namespace shdfnd { // array that pre-allocates for N elements template <typename T, uint32_t N, typename Alloc = typename AllocatorTraits<T>::Type> class InlineArray : public Array<T, InlineAllocator<N * sizeof(T), Alloc> > { typedef InlineAllocator<N * sizeof(T), Alloc> Allocator; public: InlineArray(const NvEMPTY v) : Array<T, Allocator>(v) { if(isInlined()) this->mData = reinterpret_cast<T*>(Array<T, Allocator>::getInlineBuffer()); } NV_INLINE bool isInlined() const { return Allocator::isBufferUsed(); } NV_INLINE explicit InlineArray(const Alloc& alloc = Alloc()) : Array<T, Allocator>(alloc) { this->mData = this->allocate(N); this->mCapacity = N; } }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NV_NSFOUNDATION_NSINLINEARRAY_H
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFoundation/include/NsAllocator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_NSFOUNDATION_NSALLOCATOR_H #define NV_NSFOUNDATION_NSALLOCATOR_H #include "NvAllocatorCallback.h" #include "Ns.h" #include "NsGlobals.h" #if(NV_WINDOWS_FAMILY || NV_WINRT || NV_X360 || NV_XBOXONE) #include <exception> #include <typeinfo.h> #endif #if(NV_APPLE_FAMILY) #include <typeinfo> #endif #if NV_WIIU #pragma ghs nowarning 193 // warning #193-D: zero used for undefined preprocessing identifier #endif #include <new> #if NV_WIIU #pragma ghs endnowarning #endif // Allocation macros going through user allocator #if NV_CHECKED #define NV_ALLOC(n, name) nvidia::shdfnd::NamedAllocator(name).allocate(n, __FILE__, __LINE__) #else #define NV_ALLOC(n, name) nvidia::shdfnd::NonTrackingAllocator().allocate(n, __FILE__, __LINE__) #endif #define NV_ALLOC_TEMP(n, name) NV_ALLOC(n, name) #define NV_FREE(x) nvidia::shdfnd::NonTrackingAllocator().deallocate(x) #define NV_FREE_AND_RESET(x) \ { \ NV_FREE(x); \ x = 0; \ } // The following macros support plain-old-types and classes derived from UserAllocated. #define NV_NEW(T) new (nvidia::shdfnd::ReflectionAllocator<T>(), __FILE__, __LINE__) T #define NV_NEW_TEMP(T) NV_NEW(T) #define NV_DELETE(x) delete x #define NV_DELETE_AND_RESET(x) \ { \ NV_DELETE(x); \ x = 0; \ } #define NV_DELETE_POD(x) \ { \ NV_FREE(x); \ x = 0; \ } #define NV_DELETE_ARRAY(x) \ { \ NV_DELETE([] x); \ x = 0; \ } // aligned allocation #define NV_ALIGNED16_ALLOC(n) nvidia::shdfnd::AlignedAllocator<16>().allocate(n, __FILE__, __LINE__) #define NV_ALIGNED16_FREE(x) nvidia::shdfnd::AlignedAllocator<16>().deallocate(x) //! placement new macro to make it easy to spot bad use of 'new' #define NV_PLACEMENT_NEW(p, T) new (p) T #if NV_DEBUG || NV_CHECKED #define NV_USE_NAMED_ALLOCATOR 1 #else #define NV_USE_NAMED_ALLOCATOR 0 #endif // Don't use inline for alloca !!! #if NV_WINDOWS_FAMILY || NV_WINRT #include <malloc.h> #define NvAlloca(x) _alloca(x) #elif NV_LINUX || NV_ANDROID #include <malloc.h> #define NvAlloca(x) alloca(x) #elif NV_PSP2 #include <alloca.h> #define NvAlloca(x) alloca(x) #elif NV_APPLE_FAMILY #include <alloca.h> #define NvAlloca(x) alloca(x) #elif NV_PS3 #include <alloca.h> #define NvAlloca(x) alloca(x) #elif NV_X360 #include <malloc.h> #define NvAlloca(x) _alloca(x) #elif NV_WIIU #include <alloca.h> #define NvAlloca(x) alloca(x) #elif NV_PS4 #include <memory.h> #define NvAlloca(x) alloca(x) #elif NV_XBOXONE #include <malloc.h> #define NvAlloca(x) alloca(x) #endif #define NvAllocaAligned(x, alignment) ((size_t(NvAlloca(x + alignment)) + (alignment - 1)) & ~size_t(alignment - 1)) namespace nvidia { namespace shdfnd { /* * Bootstrap allocator using malloc/free. * Don't use unless your objects get allocated before foundation is initialized. */ class RawAllocator { public: RawAllocator(const char* = 0) { } void* allocate(size_t size, const char*, int) { // malloc returns valid pointer for size==0, no need to check return ::malloc(size); } void deallocate(void* ptr) { // free(0) is guaranteed to have no side effect, no need to check ::free(ptr); } }; /* * Allocator that simply calls straight back to the application without tracking. * This is used by the heap (Foundation::mNamedAllocMap) that tracks allocations * because it needs to be able to grow as a result of an allocation. * Making the hash table re-entrant to deal with this may not make sense. */ class NonTrackingAllocator { public: NV_FORCE_INLINE NonTrackingAllocator(const char* = 0) { } NV_FORCE_INLINE void* allocate(size_t size, const char* file, int line) { return !size ? 0 : getAllocator().allocate(size, "NonTrackedAlloc", file, line); } NV_FORCE_INLINE void deallocate(void* ptr) { if(ptr) getAllocator().deallocate(ptr); } }; /** Allocator used to access the global NvAllocatorCallback instance using a dynamic name. */ void initializeNamedAllocatorGlobals(); void terminateNamedAllocatorGlobals(); #if NV_USE_NAMED_ALLOCATOR // can be slow, so only use in debug/checked class NV_FOUNDATION_API NamedAllocator { public: NamedAllocator(const NvEMPTY); NamedAllocator(const char* name = 0); // todo: should not have default argument! NamedAllocator(const NamedAllocator&); ~NamedAllocator(); NamedAllocator& operator=(const NamedAllocator&); void* allocate(size_t size, const char* filename, int line); void deallocate(void* ptr); }; #else class NamedAllocator; #endif // NV_DEBUG /** Allocator used to access the global NvAllocatorCallback instance using a static name derived from T. */ template <typename T> class ReflectionAllocator { static const char* getName() { if(!getReflectionAllocatorReportsNames()) return "<allocation names disabled>"; #if NV_GCC_FAMILY return __PRETTY_FUNCTION__; #else // name() calls malloc(), raw_name() wouldn't return typeid(T).name(); #endif } public: ReflectionAllocator(const NvEMPTY) { } ReflectionAllocator(const char* = 0) { } inline ReflectionAllocator(const ReflectionAllocator&) { } void* allocate(size_t size, const char* filename, int line) { return size ? getAllocator().allocate(size, getName(), filename, line) : 0; } void deallocate(void* ptr) { if(ptr) getAllocator().deallocate(ptr); } }; template <typename T> struct AllocatorTraits { #if NV_USE_NAMED_ALLOCATOR typedef NamedAllocator Type; #else typedef ReflectionAllocator<T> Type; #endif }; // if you get a build error here, you are trying to NV_NEW a class // that is neither plain-old-type nor derived from UserAllocated template <typename T, typename X> union EnableIfPod { int i; T t; typedef X Type; }; } // namespace shdfnd } // namespace nvidia // Global placement new for ReflectionAllocator templated by // plain-old-type. Allows using NV_NEW for pointers and built-in-types. // // ATTENTION: You need to use NV_DELETE_POD or NV_FREE to deallocate // memory, not NV_DELETE. NV_DELETE_POD redirects to NV_FREE. // // Rationale: NV_DELETE uses global operator delete(void*), which we dont' want to overload. // Any other definition of NV_DELETE couldn't support array syntax 'NV_DELETE([]a);'. // NV_DELETE_POD was preferred over NV_DELETE_ARRAY because it is used // less often and applies to both single instances and arrays. template <typename T> NV_INLINE void* operator new(size_t size, nvidia::shdfnd::ReflectionAllocator<T> alloc, const char* fileName, typename nvidia::shdfnd::EnableIfPod<T, int>::Type line) { return alloc.allocate(size, fileName, line); } template <typename T> NV_INLINE void* operator new [](size_t size, nvidia::shdfnd::ReflectionAllocator<T> alloc, const char* fileName, typename nvidia::shdfnd::EnableIfPod<T, int>::Type line) { return alloc.allocate(size, fileName, line); } // If construction after placement new throws, this placement delete is being called. template <typename T> NV_INLINE void operator delete(void* ptr, nvidia::shdfnd::ReflectionAllocator<T> alloc, const char* fileName, typename nvidia::shdfnd::EnableIfPod<T, int>::Type line) { NV_UNUSED(fileName); NV_UNUSED(line); alloc.deallocate(ptr); } // If construction after placement new throws, this placement delete is being called. template <typename T> NV_INLINE void operator delete [](void* ptr, nvidia::shdfnd::ReflectionAllocator<T> alloc, const char* fileName, typename nvidia::shdfnd::EnableIfPod<T, int>::Type line) { NV_UNUSED(fileName); NV_UNUSED(line); alloc.deallocate(ptr); } #endif // #ifndef NV_NSFOUNDATION_NSALLOCATOR_H