file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetMeasure.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @author Ken Museth /// /// @file LevelSetMeasure.h #ifndef OPENVDB_TOOLS_LEVELSETMEASURE_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVELSETMEASURE_HAS_BEEN_INCLUDED #include <openvdb/math/Math.h> #include <openvdb/Types.h> #include <openvdb/Grid.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/tree/ValueAccessor.h> #include <openvdb/math/FiniteDifference.h> #include <openvdb/math/Operators.h> #include <openvdb/math/Stencils.h> #include <openvdb/util/NullInterrupter.h> #include <tbb/parallel_for.h> #include <tbb/parallel_sort.h> #include <tbb/parallel_invoke.h> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Return the surface area of a narrow-band level set. /// /// @param grid a scalar, floating-point grid with one or more disjoint, /// closed level set surfaces /// @param useWorldSpace if true the area is computed in /// world space units, else in voxel units. /// /// @throw TypeError if @a grid is not scalar or not floating-point or not a level set or empty. template<class GridType> inline Real levelSetArea(const GridType& grid, bool useWorldSpace = true); /// @brief Return the volume of a narrow-band level set surface. /// /// @param grid a scalar, floating-point grid with one or more disjoint, /// closed level set surfaces /// @param useWorldSpace if true the volume is computed in /// world space units, else in voxel units. /// /// @throw TypeError if @a grid is not scalar or not floating-point or not a level set or empty. template<class GridType> inline Real levelSetVolume(const GridType& grid, bool useWorldSpace = true); /// @brief Return the Euler Characteristics of a narrow-band level set surface (possibly disconnected). /// /// @param grid a scalar, floating-point grid with one or more disjoint, /// closed level set surfaces /// /// @throw TypeError if @a grid is not scalar or not floating-point or not a level set or empty. template<class GridType> inline int levelSetEulerCharacteristic(const GridType& grid); /// @brief Return the genus of a narrow-band level set surface. /// /// @param grid a scalar, floating-point grid with one or more disjoint, /// closed level set surfaces /// @warning The genus is only well defined for a single connected surface /// /// @throw TypeError if @a grid is not scalar or not floating-point or not a level set or empty. template<class GridType> inline int levelSetGenus(const GridType& grid); //////////////////////////////////////////////////////////////////////////////////////// /// @brief Smeared-out and continuous Dirac Delta function. template<typename RealT> class DiracDelta { public: // eps is the half-width of the dirac delta function in units of phi DiracDelta(RealT eps) : mC(0.5/eps), mD(2*math::pi<RealT>()*mC), mE(eps) {} // values of the dirac delta function are in units of one over the units of phi inline RealT operator()(RealT phi) const { return math::Abs(phi) > mE ? 0 : mC*(1+cos(mD*phi)); } private: const RealT mC, mD, mE; };// DiracDelta functor /// @brief Multi-threaded computation of surface area, volume and /// average mean-curvature for narrow band level sets. /// /// @details To reduce the risk of round-off errors (primarily due to /// catastrophic cancellation) and guarantee determinism during /// multi-threading this class is implemented using parallel_for, and /// delayed reduction of a sorted list. template<typename GridT, typename InterruptT = util::NullInterrupter> class LevelSetMeasure { public: using GridType = GridT; using TreeType = typename GridType::TreeType; using ValueType = typename TreeType::ValueType; using ManagerType = typename tree::LeafManager<const TreeType>; static_assert(std::is_floating_point<ValueType>::value, "level set measure is supported only for scalar, floating-point grids"); /// @brief Main constructor from a grid /// @param grid The level set to be measured. /// @param interrupt Optional interrupter. /// @throw RuntimeError if the grid is not a level set or if it's empty. LevelSetMeasure(const GridType& grid, InterruptT* interrupt = nullptr); /// @brief Re-initialize using the specified grid. /// @param grid The level set to be measured. /// @throw RuntimeError if the grid is not a level set or if it's empty. void init(const GridType& grid); /// @brief Destructor virtual ~LevelSetMeasure() {} /// @return the grain-size used for multi-threading int getGrainSize() const { return mGrainSize; } /// @brief Set the grain-size used for multi-threading. /// @note A grain size of 0 or less disables multi-threading! void setGrainSize(int grainsize) { mGrainSize = grainsize; } /// @brief Compute the surface area of the level set. /// @param useWorldUnits Specifies if the result is in world or voxel units. /// @note Performs internal caching so only the initial call incurs actual computation. Real area(bool useWorldUnits = true); /// @brief Compute the volume of the level set surface. /// @param useWorldUnits Specifies if the result is in world or voxel units. /// @note Performs internal caching so only the initial call incurs actual computation. Real volume(bool useWorldUnits = true); /// @brief Compute the total mean curvature of the level set surface. /// @param useWorldUnits Specifies if the result is in world or voxel units. /// @note Performs internal caching so only the initial call incurs actual computation. Real totMeanCurvature(bool useWorldUnits = true); /// @brief Compute the total gaussian curvature of the level set surface. /// @param useWorldUnits Specifies if the result is in world or voxel units. /// @note Performs internal caching so only the initial call incurs actual computation. Real totGaussianCurvature(bool useWorldUnits = true); /// @brief Compute the average mean curvature of the level set surface. /// @param useWorldUnits Specifies if the result is in world or voxel units. /// @note Performs internal caching so only the initial call incurs actual computation. Real avgMeanCurvature(bool useWorldUnits = true) {return this->totMeanCurvature(useWorldUnits) / this->area(useWorldUnits);} /// @brief Compute the average gaussian curvature of the level set surface. /// @param useWorldUnits Specifies if the result is in world or voxel units. /// @note Performs internal caching so only the initial call incurs actual computation. Real avgGaussianCurvature(bool useWorldUnits = true) {return this->totGaussianCurvature(useWorldUnits) / this->area(useWorldUnits); } /// @brief Compute the Euler characteristic of the level set surface. /// @note Performs internal caching so only the initial call incurs actual computation. int eulerCharacteristic(); /// @brief Compute the genus of the level set surface. /// @warning The genus is only well defined for a single connected surface. /// @note Performs internal caching so only the initial call incurs actual computation. int genus() { return 1 - this->eulerCharacteristic()/2;} private: using LeafT = typename TreeType::LeafNodeType; using VoxelCIterT = typename LeafT::ValueOnCIter; using LeafRange = typename ManagerType::LeafRange; using LeafIterT = typename LeafRange::Iterator; using ManagerPtr = std::unique_ptr<ManagerType>; using BufferPtr = std::unique_ptr<double[]>; // disallow copy construction and copy by assignment! LevelSetMeasure(const LevelSetMeasure&);// not implemented LevelSetMeasure& operator=(const LevelSetMeasure&);// not implemented const GridType *mGrid; ManagerPtr mLeafs; BufferPtr mBuffer; InterruptT *mInterrupter; double mDx, mArea, mVolume, mTotMeanCurvature, mTotGausCurvature; int mGrainSize; bool mUpdateArea, mUpdateCurvature; // @brief Return false if the process was interrupted bool checkInterrupter(); struct MeasureArea { MeasureArea(LevelSetMeasure* parent) : mParent(parent), mStencil(*mParent->mGrid) { if (parent->mInterrupter) parent->mInterrupter->start("Measuring area and volume of level set"); if (parent->mGrainSize>0) { tbb::parallel_for(parent->mLeafs->leafRange(parent->mGrainSize), *this); } else { (*this)(parent->mLeafs->leafRange()); } tbb::parallel_invoke([&](){parent->mArea = parent->reduce(0);}, [&](){parent->mVolume = parent->reduce(1)/3.0;}); parent->mUpdateArea = false; if (parent->mInterrupter) parent->mInterrupter->end(); } MeasureArea(const MeasureArea& other) : mParent(other.mParent), mStencil(*mParent->mGrid) {} void operator()(const LeafRange& range) const; LevelSetMeasure* mParent; mutable math::GradStencil<GridT, false> mStencil; };// MeasureArea struct MeasureCurvatures { MeasureCurvatures(LevelSetMeasure* parent) : mParent(parent), mStencil(*mParent->mGrid) { if (parent->mInterrupter) parent->mInterrupter->start("Measuring curvatures of level set"); if (parent->mGrainSize>0) { tbb::parallel_for(parent->mLeafs->leafRange(parent->mGrainSize), *this); } else { (*this)(parent->mLeafs->leafRange()); } tbb::parallel_invoke([&](){parent->mTotMeanCurvature = parent->reduce(0);}, [&](){parent->mTotGausCurvature = parent->reduce(1);}); parent->mUpdateCurvature = false; if (parent->mInterrupter) parent->mInterrupter->end(); } MeasureCurvatures(const MeasureCurvatures& other) : mParent(other.mParent), mStencil(*mParent->mGrid) {} void operator()(const LeafRange& range) const; LevelSetMeasure* mParent; mutable math::CurvatureStencil<GridT, false> mStencil; };// MeasureCurvatures double reduce(int offset) { double *first = mBuffer.get() + offset*mLeafs->leafCount(), *last = first + mLeafs->leafCount(); tbb::parallel_sort(first, last);// mitigates catastrophic cancellation Real sum = 0.0; while(first != last) sum += *first++; return sum; } }; // end of LevelSetMeasure class template<typename GridT, typename InterruptT> inline LevelSetMeasure<GridT, InterruptT>::LevelSetMeasure(const GridType& grid, InterruptT* interrupt) : mInterrupter(interrupt) , mGrainSize(1) { this->init(grid); } template<typename GridT, typename InterruptT> inline void LevelSetMeasure<GridT, InterruptT>::init(const GridType& grid) { if (!grid.hasUniformVoxels()) { OPENVDB_THROW(RuntimeError, "The transform must have uniform scale for the LevelSetMeasure to function"); } if (grid.getGridClass() != GRID_LEVEL_SET) { OPENVDB_THROW(RuntimeError, "LevelSetMeasure only supports level sets;" " try setting the grid class to \"level set\""); } if (grid.empty()) { OPENVDB_THROW(RuntimeError, "LevelSetMeasure does not support empty grids;"); } mGrid = &grid; mDx = grid.voxelSize()[0]; mLeafs = std::make_unique<ManagerType>(mGrid->tree()); mBuffer = std::make_unique<double[]>(2*mLeafs->leafCount()); mUpdateArea = mUpdateCurvature = true; } template<typename GridT, typename InterruptT> inline Real LevelSetMeasure<GridT, InterruptT>::area(bool useWorldUnits) { if (mUpdateArea) {MeasureArea m(this);}; double area = mArea; if (useWorldUnits) area *= math::Pow2(mDx); return area; } template<typename GridT, typename InterruptT> inline Real LevelSetMeasure<GridT, InterruptT>::volume(bool useWorldUnits) { if (mUpdateArea) {MeasureArea m(this);}; double volume = mVolume; if (useWorldUnits) volume *= math::Pow3(mDx) ; return volume; } template<typename GridT, typename InterruptT> inline Real LevelSetMeasure<GridT, InterruptT>::totMeanCurvature(bool useWorldUnits) { if (mUpdateCurvature) {MeasureCurvatures m(this);}; return mTotMeanCurvature * (useWorldUnits ? mDx : 1); } template<typename GridT, typename InterruptT> inline Real LevelSetMeasure<GridT, InterruptT>::totGaussianCurvature(bool) { if (mUpdateCurvature) {MeasureCurvatures m(this);}; return mTotGausCurvature; } template<typename GridT, typename InterruptT> inline int LevelSetMeasure<GridT, InterruptT>::eulerCharacteristic() { const Real x = this->totGaussianCurvature(true) / (2.0*math::pi<Real>()); return int(math::Round( x )); } ///////////////////////// PRIVATE METHODS ////////////////////// template<typename GridT, typename InterruptT> inline bool LevelSetMeasure<GridT, InterruptT>::checkInterrupter() { if (util::wasInterrupted(mInterrupter)) { tbb::task::self().cancel_group_execution(); return false; } return true; } template<typename GridT, typename InterruptT> inline void LevelSetMeasure<GridT, InterruptT>:: MeasureArea::operator()(const LeafRange& range) const { using Vec3T = math::Vec3<ValueType>; // computations are performed in index space where dV = 1 mParent->checkInterrupter(); const Real invDx = 1.0/mParent->mDx; const DiracDelta<Real> DD(1.5);// dirac delta function is 3 voxel units wide const size_t leafCount = mParent->mLeafs->leafCount(); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { Real sumA = 0, sumV = 0;//reduce risk of catastrophic cancellation for (VoxelCIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) { const Real dd = DD(invDx * (*voxelIter)); if (dd > 0.0) { mStencil.moveTo(voxelIter); const Coord& p = mStencil.getCenterCoord();// in voxel units const Vec3T g = mStencil.gradient();// in world units sumA += dd*g.length();// \delta(\phi)*|\nabla\phi| sumV += dd*(g[0]*Real(p[0]) + g[1]*Real(p[1]) + g[2]*Real(p[2]));// \delta(\phi)\vec{x}\cdot\nabla\phi } } double* ptr = mParent->mBuffer.get() + leafIter.pos(); *ptr = sumA; ptr += leafCount; *ptr = sumV; } } template<typename GridT, typename InterruptT> inline void LevelSetMeasure<GridT, InterruptT>:: MeasureCurvatures::operator()(const LeafRange& range) const { using Vec3T = math::Vec3<ValueType>; // computations are performed in index space where dV = 1 mParent->checkInterrupter(); const Real dx = mParent->mDx, dx2=dx*dx, invDx = 1.0/dx; const DiracDelta<Real> DD(1.5);// dirac delta function is 3 voxel units wide ValueType mean, gauss; const size_t leafCount = mParent->mLeafs->leafCount(); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { Real sumM = 0, sumG = 0;//reduce risk of catastrophic cancellation for (VoxelCIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) { const Real dd = DD(invDx * (*voxelIter)); if (dd > 0.0) { mStencil.moveTo(voxelIter); const Vec3T g = mStencil.gradient(); const Real dA = dd*g.length();// \delta(\phi)*\delta(\phi) mStencil.curvatures(mean, gauss); sumM += dA*mean*dx;// \delta(\phi)*\delta(\phi)*MeanCurvature sumG += dA*gauss*dx2;// \delta(\phi)*\delta(\phi)*GaussCurvature } } double* ptr = mParent->mBuffer.get() + leafIter.pos(); *ptr = sumM; ptr += leafCount; *ptr = sumG; } } //////////////////////////////////////// //{ /// @cond OPENVDB_LEVEL_SET_MEASURE_INTERNAL template<class GridT> inline typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, Real>::type doLevelSetArea(const GridT& grid, bool useWorldUnits) { LevelSetMeasure<GridT> m(grid); return m.area(useWorldUnits); } template<class GridT> inline typename std::enable_if<!std::is_floating_point<typename GridT::ValueType>::value, Real>::type doLevelSetArea(const GridT&, bool) { OPENVDB_THROW(TypeError, "level set area is supported only for scalar, floating-point grids"); } /// @endcond //} template<class GridT> inline Real levelSetArea(const GridT& grid, bool useWorldUnits) { return doLevelSetArea<GridT>(grid, useWorldUnits); } //////////////////////////////////////// //{ /// @cond OPENVDB_LEVEL_SET_MEASURE_INTERNAL template<class GridT> inline typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, Real>::type doLevelSetVolume(const GridT& grid, bool useWorldUnits) { LevelSetMeasure<GridT> m(grid); return m.volume(useWorldUnits); } template<class GridT> inline typename std::enable_if<!std::is_floating_point<typename GridT::ValueType>::value, Real>::type doLevelSetVolume(const GridT&, bool) { OPENVDB_THROW(TypeError, "level set volume is supported only for scalar, floating-point grids"); } /// @endcond //} template<class GridT> inline Real levelSetVolume(const GridT& grid, bool useWorldUnits) { return doLevelSetVolume<GridT>(grid, useWorldUnits); } //////////////////////////////////////// //{ /// @cond OPENVDB_LEVEL_SET_MEASURE_INTERNAL template<class GridT> inline typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, int>::type doLevelSetEulerCharacteristic(const GridT& grid) { LevelSetMeasure<GridT> m(grid); return m.eulerCharacteristic(); } template<class GridT> inline typename std::enable_if<!std::is_floating_point<typename GridT::ValueType>::value, int>::type doLevelSetEulerCharacteristic(const GridT&) { OPENVDB_THROW(TypeError, "level set euler characteristic is supported only for scalar, floating-point grids"); } /// @endcond //} template<class GridT> inline int levelSetEulerCharacteristic(const GridT& grid) { return doLevelSetEulerCharacteristic(grid); } //////////////////////////////////////// //{ /// @cond OPENVDB_LEVEL_SET_MEASURE_INTERNAL template<class GridT> inline typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, int>::type doLevelSetEuler(const GridT& grid) { LevelSetMeasure<GridT> m(grid); return m.eulerCharacteristics(); } template<class GridT> inline typename std::enable_if<std::is_floating_point<typename GridT::ValueType>::value, int>::type doLevelSetGenus(const GridT& grid) { LevelSetMeasure<GridT> m(grid); return m.genus(); } template<class GridT> inline typename std::enable_if<!std::is_floating_point<typename GridT::ValueType>::value, int>::type doLevelSetGenus(const GridT&) { OPENVDB_THROW(TypeError, "level set genus is supported only for scalar, floating-point grids"); } /// @endcond //} template<class GridT> inline int levelSetGenus(const GridT& grid) { return doLevelSetGenus(grid); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVELSETMEASURE_HAS_BEEN_INCLUDED
19,524
C
34.629562
137
0.673171
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetTracker.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Ken Museth /// /// @file tools/LevelSetTracker.h /// /// @brief Performs multi-threaded interface tracking of narrow band /// level sets. This is the building-block for most level set /// computations that involve dynamic topology, e.g. advection. #ifndef OPENVDB_TOOLS_LEVEL_SET_TRACKER_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVEL_SET_TRACKER_HAS_BEEN_INCLUDED #include <tbb/parallel_for.h> #include <openvdb/Types.h> #include <openvdb/math/Math.h> #include <openvdb/math/FiniteDifference.h> #include <openvdb/math/Operators.h> #include <openvdb/math/Stencils.h> #include <openvdb/math/Transform.h> #include <openvdb/Grid.h> #include <openvdb/util/NullInterrupter.h> #include <openvdb/tree/ValueAccessor.h> #include <openvdb/tree/LeafManager.h> #include "ChangeBackground.h"// for changeLevelSetBackground #include "Morphology.h"//for dilateActiveValues #include "Prune.h"// for pruneLevelSet #include <functional> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { namespace lstrack { /// @brief How to handle voxels that fall outside the narrow band /// @sa @link LevelSetTracker::trimming() trimming@endlink, /// @link LevelSetTracker::setTrimming() setTrimming@endlink enum class TrimMode { kNone, ///< Leave out-of-band voxels intact kInterior, ///< Set out-of-band interior voxels to the background value kExterior, ///< Set out-of-band exterior voxels to the background value kAll ///< Set all out-of-band voxels to the background value }; } // namespace lstrack /// @brief Performs multi-threaded interface tracking of narrow band level sets template<typename GridT, typename InterruptT = util::NullInterrupter> class LevelSetTracker { public: using TrimMode = lstrack::TrimMode; using GridType = GridT; using TreeType = typename GridT::TreeType; using LeafType = typename TreeType::LeafNodeType; using ValueType = typename TreeType::ValueType; using LeafManagerType = typename tree::LeafManager<TreeType>; // leafs + buffers using LeafRange = typename LeafManagerType::LeafRange; using BufferType = typename LeafManagerType::BufferType; using MaskTreeType = typename TreeType::template ValueConverter<ValueMask>::Type; static_assert(std::is_floating_point<ValueType>::value, "LevelSetTracker requires a level set grid with floating-point values"); /// Lightweight struct that stores the state of the LevelSetTracker struct State { State(math::BiasedGradientScheme s = math::HJWENO5_BIAS, math::TemporalIntegrationScheme t = math::TVD_RK1, int n = static_cast<int>(LEVEL_SET_HALF_WIDTH), int g = 1) : spatialScheme(s), temporalScheme(t), normCount(n), grainSize(g) {} math::BiasedGradientScheme spatialScheme; math::TemporalIntegrationScheme temporalScheme; int normCount;// Number of iterations of normalization int grainSize; }; /// @brief Main constructor /// @throw RuntimeError if the grid is not a level set LevelSetTracker(GridT& grid, InterruptT* interrupt = nullptr); virtual ~LevelSetTracker() { delete mLeafs; } /// @brief Iterative normalization, i.e. solving the Eikonal equation /// @note The mask it optional and by default it is ignored. template <typename MaskType> void normalize(const MaskType* mask); /// @brief Iterative normalization, i.e. solving the Eikonal equation void normalize() { this->normalize<MaskTreeType>(nullptr); } /// @brief Track the level set interface, i.e. rebuild and normalize the /// narrow band of the level set. void track(); /// @brief Set voxels that are outside the narrow band to the background value /// (if trimming is enabled) and prune the grid. /// @details Pruning is done automatically as a step in tracking. /// @sa @link setTrimming() setTrimming@endlink, @link trimming() trimming@endlink void prune(); /// @brief Fast but approximate dilation of the narrow band - one /// layer at a time. Normally we recommend using the resize method below /// which internally calls dilate (or erode) with the correct /// number of @a iterations to achieve the desired half voxel width /// of the narrow band (3 is recomended for most level set applications). /// /// @note Since many level set applications perform /// interface-tracking, which in turn rebuilds the narrow-band /// accurately, this dilate method can often be used with a /// single iterations of low-order re-normalization. This /// effectively allows very narrow bands to be created from points /// or polygons (e.g. with a half voxel width of 1), followed by a /// fast but approximate dilation (typically with a half voxel /// width of 3). This can be significantly faster than generating /// the final width of the narrow band from points or polygons. void dilate(int iterations = 1); /// @brief Erodes the width of the narrow-band and update the background values /// @throw ValueError if @a iterations is larger than the current half-width. void erode(int iterations = 1); /// @brief Resize the width of the narrow band, i.e. perform /// dilation and renormalization or erosion as required. bool resize(Index halfWidth = static_cast<Index>(LEVEL_SET_HALF_WIDTH)); /// @brief Return the half width of the narrow band in floating-point voxel units. ValueType getHalfWidth() const { return mGrid->background()/mDx; } /// @brief Return the state of the tracker (see struct defined above) State getState() const { return mState; } /// @brief Set the state of the tracker (see struct defined above) void setState(const State& s) { mState = s; } /// @return the spatial finite difference scheme math::BiasedGradientScheme getSpatialScheme() const { return mState.spatialScheme; } /// @brief Set the spatial finite difference scheme void setSpatialScheme(math::BiasedGradientScheme s) { mState.spatialScheme = s; } /// @return the temporal integration scheme math::TemporalIntegrationScheme getTemporalScheme() const { return mState.temporalScheme; } /// @brief Set the spatial finite difference scheme void setTemporalScheme(math::TemporalIntegrationScheme s) { mState.temporalScheme = s;} /// @return The number of normalizations performed per track or /// normalize call. int getNormCount() const { return mState.normCount; } /// @brief Set the number of normalizations performed per track or /// normalize call. void setNormCount(int n) { mState.normCount = n; } /// @return the grain-size used for multi-threading int getGrainSize() const { return mState.grainSize; } /// @brief Set the grain-size used for multi-threading. /// @note A grainsize of 0 or less disables multi-threading! void setGrainSize(int grainsize) { mState.grainSize = grainsize; } /// @brief Return the trimming mode for voxels outside the narrow band. /// @details Trimming is enabled by default and is applied automatically prior to pruning. /// @sa @link setTrimming() setTrimming@endlink, @link prune() prune@endlink TrimMode trimming() const { return mTrimMode; } /// @brief Specify whether to trim voxels outside the narrow band prior to pruning. /// @sa @link trimming() trimming@endlink, @link prune() prune@endlink void setTrimming(TrimMode mode) { mTrimMode = mode; } ValueType voxelSize() const { return mDx; } void startInterrupter(const char* msg); void endInterrupter(); /// @return false if the process was interrupted bool checkInterrupter(); const GridType& grid() const { return *mGrid; } LeafManagerType& leafs() { return *mLeafs; } const LeafManagerType& leafs() const { return *mLeafs; } private: // disallow copy construction and copy by assignment! LevelSetTracker(const LevelSetTracker&);// not implemented LevelSetTracker& operator=(const LevelSetTracker&);// not implemented // Private class to perform multi-threaded trimming of // voxels that are too far away from the zero-crossing. template<TrimMode Trimming> struct Trim { Trim(LevelSetTracker& tracker) : mTracker(tracker) {} void trim(); void operator()(const LeafRange& r) const; LevelSetTracker& mTracker; };// Trim // Private struct to perform multi-threaded normalization template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MaskT> struct Normalizer { using SchemeT = math::BIAS_SCHEME<SpatialScheme>; using StencilT = typename SchemeT::template ISStencil<GridType>::StencilType; using MaskLeafT = typename MaskT::LeafNodeType; using MaskIterT = typename MaskLeafT::ValueOnCIter; using VoxelIterT = typename LeafType::ValueOnCIter; Normalizer(LevelSetTracker& tracker, const MaskT* mask); void normalize(); void operator()(const LeafRange& r) const {mTask(const_cast<Normalizer*>(this), r);} void cook(const char* msg, int swapBuffer=0); template <int Nominator, int Denominator> void euler(const LeafRange& range, Index phiBuffer, Index resultBuffer); inline void euler01(const LeafRange& r) {this->euler<0,1>(r, 0, 1);} inline void euler12(const LeafRange& r) {this->euler<1,2>(r, 1, 1);} inline void euler34(const LeafRange& r) {this->euler<3,4>(r, 1, 2);} inline void euler13(const LeafRange& r) {this->euler<1,3>(r, 1, 2);} template <int Nominator, int Denominator> void eval(StencilT& stencil, const ValueType* phi, ValueType* result, Index n) const; LevelSetTracker& mTracker; const MaskT* mMask; const ValueType mDt, mInvDx; typename std::function<void (Normalizer*, const LeafRange&)> mTask; }; // Normalizer struct template<math::BiasedGradientScheme SpatialScheme, typename MaskT> void normalize1(const MaskT* mask); template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MaskT> void normalize2(const MaskT* mask); // Throughout the methods below mLeafs is always assumed to contain // a list of the current LeafNodes! The auxiliary buffers on the // other hand always have to be allocated locally, since some // methods need them and others don't! GridType* mGrid; LeafManagerType* mLeafs; InterruptT* mInterrupter; const ValueType mDx; State mState; TrimMode mTrimMode = TrimMode::kAll; }; // end of LevelSetTracker class template<typename GridT, typename InterruptT> LevelSetTracker<GridT, InterruptT>:: LevelSetTracker(GridT& grid, InterruptT* interrupt): mGrid(&grid), mLeafs(new LeafManagerType(grid.tree())), mInterrupter(interrupt), mDx(static_cast<ValueType>(grid.voxelSize()[0])), mState() { if ( !grid.hasUniformVoxels() ) { OPENVDB_THROW(RuntimeError, "The transform must have uniform scale for the LevelSetTracker to function"); } if ( grid.getGridClass() != GRID_LEVEL_SET) { OPENVDB_THROW(RuntimeError, "LevelSetTracker expected a level set, got a grid of class \"" + grid.gridClassToString(grid.getGridClass()) + "\" [hint: Grid::setGridClass(openvdb::GRID_LEVEL_SET)]"); } } template<typename GridT, typename InterruptT> inline void LevelSetTracker<GridT, InterruptT>:: prune() { this->startInterrupter("Pruning Level Set"); // Set voxels that are too far away from the zero crossing to the background value. switch (mTrimMode) { case TrimMode::kNone: break; case TrimMode::kInterior: Trim<TrimMode::kInterior>(*this).trim(); break; case TrimMode::kExterior: Trim<TrimMode::kExterior>(*this).trim(); break; case TrimMode::kAll: Trim<TrimMode::kAll>(*this).trim(); break; } // Remove inactive nodes from tree tools::pruneLevelSet(mGrid->tree()); // The tree topology has changes so rebuild the list of leafs mLeafs->rebuildLeafArray(); this->endInterrupter(); } template<typename GridT, typename InterruptT> inline void LevelSetTracker<GridT, InterruptT>:: track() { // Dilate narrow-band (this also rebuilds the leaf array!) tools::dilateActiveValues( *mLeafs, 1, tools::NN_FACE, tools::IGNORE_TILES); // Compute signed distances in dilated narrow-band this->normalize(); // Remove voxels that are outside the narrow band this->prune(); } template<typename GridT, typename InterruptT> inline void LevelSetTracker<GridT, InterruptT>:: dilate(int iterations) { if (this->getNormCount() == 0) { for (int i=0; i < iterations; ++i) { tools::dilateActiveValues( *mLeafs, 1, tools::NN_FACE, tools::IGNORE_TILES); tools::changeLevelSetBackground(this->leafs(), mDx + mGrid->background()); } } else { for (int i=0; i < iterations; ++i) { MaskTreeType mask0(mGrid->tree(), false, TopologyCopy()); tools::dilateActiveValues( *mLeafs, 1, tools::NN_FACE, tools::IGNORE_TILES); tools::changeLevelSetBackground(this->leafs(), mDx + mGrid->background()); MaskTreeType mask(mGrid->tree(), false, TopologyCopy()); mask.topologyDifference(mask0); this->normalize(&mask); } } } template<typename GridT, typename InterruptT> inline void LevelSetTracker<GridT, InterruptT>:: erode(int iterations) { tools::erodeVoxels(*mLeafs, iterations); mLeafs->rebuildLeafArray(); const ValueType background = mGrid->background() - ValueType(iterations) * mDx; tools::changeLevelSetBackground(this->leafs(), background); } template<typename GridT, typename InterruptT> inline bool LevelSetTracker<GridT, InterruptT>:: resize(Index halfWidth) { const int wOld = static_cast<int>(math::RoundDown(this->getHalfWidth())); const int wNew = static_cast<int>(halfWidth); if (wOld < wNew) { this->dilate(wNew - wOld); } else if (wOld > wNew) { this->erode(wOld - wNew); } return wOld != wNew; } template<typename GridT, typename InterruptT> inline void LevelSetTracker<GridT, InterruptT>:: startInterrupter(const char* msg) { if (mInterrupter) mInterrupter->start(msg); } template<typename GridT, typename InterruptT> inline void LevelSetTracker<GridT, InterruptT>:: endInterrupter() { if (mInterrupter) mInterrupter->end(); } template<typename GridT, typename InterruptT> inline bool LevelSetTracker<GridT, InterruptT>:: checkInterrupter() { if (util::wasInterrupted(mInterrupter)) { tbb::task::self().cancel_group_execution(); return false; } return true; } template<typename GridT, typename InterruptT> template<typename MaskT> inline void LevelSetTracker<GridT, InterruptT>:: normalize(const MaskT* mask) { switch (this->getSpatialScheme()) { case math::FIRST_BIAS: this->normalize1<math::FIRST_BIAS , MaskT>(mask); break; case math::SECOND_BIAS: this->normalize1<math::SECOND_BIAS, MaskT>(mask); break; case math::THIRD_BIAS: this->normalize1<math::THIRD_BIAS, MaskT>(mask); break; case math::WENO5_BIAS: this->normalize1<math::WENO5_BIAS, MaskT>(mask); break; case math::HJWENO5_BIAS: this->normalize1<math::HJWENO5_BIAS, MaskT>(mask); break; case math::UNKNOWN_BIAS: default: OPENVDB_THROW(ValueError, "Spatial difference scheme not supported!"); } } template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, typename MaskT> inline void LevelSetTracker<GridT, InterruptT>:: normalize1(const MaskT* mask) { switch (this->getTemporalScheme()) { case math::TVD_RK1: this->normalize2<SpatialScheme, math::TVD_RK1, MaskT>(mask); break; case math::TVD_RK2: this->normalize2<SpatialScheme, math::TVD_RK2, MaskT>(mask); break; case math::TVD_RK3: this->normalize2<SpatialScheme, math::TVD_RK3, MaskT>(mask); break; case math::UNKNOWN_TIS: default: OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!"); } } template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MaskT> inline void LevelSetTracker<GridT, InterruptT>:: normalize2(const MaskT* mask) { Normalizer<SpatialScheme, TemporalScheme, MaskT> tmp(*this, mask); tmp.normalize(); } //////////////////////////////////////////////////////////////////////////// template<typename GridT, typename InterruptT> template<lstrack::TrimMode Trimming> inline void LevelSetTracker<GridT, InterruptT>::Trim<Trimming>::trim() { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Trimming != TrimMode::kNone) { const int grainSize = mTracker.getGrainSize(); const LeafRange range = mTracker.leafs().leafRange(grainSize); if (grainSize>0) { tbb::parallel_for(range, *this); } else { (*this)(range); } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// Trim away voxels that have moved outside the narrow band template<typename GridT, typename InterruptT> template<lstrack::TrimMode Trimming> inline void LevelSetTracker<GridT, InterruptT>::Trim<Trimming>::operator()(const LeafRange& range) const { mTracker.checkInterrupter(); const ValueType gamma = mTracker.mGrid->background(); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN for (auto leafIter = range.begin(); leafIter; ++leafIter) { auto& leaf = *leafIter; for (auto iter = leaf.beginValueOn(); iter; ++iter) { const auto val = *iter; switch (Trimming) { // resolved at compile time case TrimMode::kNone: break; case TrimMode::kInterior: if (val <= -gamma) { leaf.setValueOff(iter.pos(), -gamma); } break; case TrimMode::kExterior: if (val >= gamma) { leaf.setValueOff(iter.pos(), gamma); } break; case TrimMode::kAll: if (val <= -gamma) { leaf.setValueOff(iter.pos(), -gamma); } else if (val >= gamma) { leaf.setValueOff(iter.pos(), gamma); } break; } } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////////////////////////////////////////// template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MaskT> inline LevelSetTracker<GridT, InterruptT>:: Normalizer<SpatialScheme, TemporalScheme, MaskT>:: Normalizer(LevelSetTracker& tracker, const MaskT* mask) : mTracker(tracker) , mMask(mask) , mDt(tracker.voxelSize()*(TemporalScheme == math::TVD_RK1 ? 0.3f : TemporalScheme == math::TVD_RK2 ? 0.9f : 1.0f)) , mInvDx(1.0f/tracker.voxelSize()) , mTask(nullptr) { } template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MaskT> inline void LevelSetTracker<GridT, InterruptT>:: Normalizer<SpatialScheme, TemporalScheme, MaskT>:: normalize() { namespace ph = std::placeholders; /// Make sure we have enough temporal auxiliary buffers mTracker.mLeafs->rebuildAuxBuffers(TemporalScheme == math::TVD_RK3 ? 2 : 1); for (int n=0, e=mTracker.getNormCount(); n < e; ++n) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN switch(TemporalScheme) {//switch is resolved at compile-time case math::TVD_RK1: // Perform one explicit Euler step: t1 = t0 + dt // Phi_t1(0) = Phi_t0(0) - dt * VdotG_t0(1) mTask = std::bind(&Normalizer::euler01, ph::_1, ph::_2); // Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1) this->cook("Normalizing level set using TVD_RK1", 1); break; case math::TVD_RK2: // Perform one explicit Euler step: t1 = t0 + dt // Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(1) mTask = std::bind(&Normalizer::euler01, ph::_1, ph::_2); // Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1) this->cook("Normalizing level set using TVD_RK1 (step 1 of 2)", 1); // Convex combine explicit Euler step: t2 = t0 + dt // Phi_t2(1) = 1/2 * Phi_t0(1) + 1/2 * (Phi_t1(0) - dt * V.Grad_t1(0)) mTask = std::bind(&Normalizer::euler12, ph::_1, ph::_2); // Cook and swap buffer 0 and 1 such that Phi_t2(0) and Phi_t1(1) this->cook("Normalizing level set using TVD_RK1 (step 2 of 2)", 1); break; case math::TVD_RK3: // Perform one explicit Euler step: t1 = t0 + dt // Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(1) mTask = std::bind(&Normalizer::euler01, ph::_1, ph::_2); // Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1) this->cook("Normalizing level set using TVD_RK3 (step 1 of 3)", 1); // Convex combine explicit Euler step: t2 = t0 + dt/2 // Phi_t2(2) = 3/4 * Phi_t0(1) + 1/4 * (Phi_t1(0) - dt * V.Grad_t1(0)) mTask = std::bind(&Normalizer::euler34, ph::_1, ph::_2); // Cook and swap buffer 0 and 2 such that Phi_t2(0) and Phi_t1(2) this->cook("Normalizing level set using TVD_RK3 (step 2 of 3)", 2); // Convex combine explicit Euler step: t3 = t0 + dt // Phi_t3(2) = 1/3 * Phi_t0(1) + 2/3 * (Phi_t2(0) - dt * V.Grad_t2(0) mTask = std::bind(&Normalizer::euler13, ph::_1, ph::_2); // Cook and swap buffer 0 and 2 such that Phi_t3(0) and Phi_t2(2) this->cook("Normalizing level set using TVD_RK3 (step 3 of 3)", 2); break; case math::UNKNOWN_TIS: default: OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!"); } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } mTracker.mLeafs->removeAuxBuffers(); } /// Private method to perform the task (serial or threaded) and /// subsequently swap the leaf buffers. template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MaskT> inline void LevelSetTracker<GridT, InterruptT>:: Normalizer<SpatialScheme, TemporalScheme, MaskT>:: cook(const char* msg, int swapBuffer) { mTracker.startInterrupter( msg ); const int grainSize = mTracker.getGrainSize(); const LeafRange range = mTracker.leafs().leafRange(grainSize); grainSize>0 ? tbb::parallel_for(range, *this) : (*this)(range); mTracker.leafs().swapLeafBuffer(swapBuffer, grainSize==0); mTracker.endInterrupter(); } template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MaskT> template <int Nominator, int Denominator> inline void LevelSetTracker<GridT, InterruptT>:: Normalizer<SpatialScheme, TemporalScheme, MaskT>:: eval(StencilT& stencil, const ValueType* phi, ValueType* result, Index n) const { using GradientT = typename math::ISGradientNormSqrd<SpatialScheme>; static const ValueType alpha = ValueType(Nominator)/ValueType(Denominator); static const ValueType beta = ValueType(1) - alpha; const ValueType normSqGradPhi = GradientT::result(stencil); const ValueType phi0 = stencil.getValue(); ValueType v = phi0 / ( math::Sqrt(math::Pow2(phi0) + normSqGradPhi) + math::Tolerance<ValueType>::value() ); v = phi0 - mDt * v * (math::Sqrt(normSqGradPhi) * mInvDx - 1.0f); result[n] = Nominator ? alpha * phi[n] + beta * v : v; } template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MaskT> template <int Nominator, int Denominator> inline void LevelSetTracker<GridT,InterruptT>:: Normalizer<SpatialScheme, TemporalScheme, MaskT>:: euler(const LeafRange& range, Index phiBuffer, Index resultBuffer) { mTracker.checkInterrupter(); StencilT stencil(mTracker.grid()); for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) { const ValueType* phi = leafIter.buffer(phiBuffer).data(); ValueType* result = leafIter.buffer(resultBuffer).data(); if (mMask == nullptr) { for (auto iter = leafIter->cbeginValueOn(); iter; ++iter) { stencil.moveTo(iter); this->eval<Nominator, Denominator>(stencil, phi, result, iter.pos()); }//loop over active voxels in the leaf of the level set } else if (const MaskLeafT* mask = mMask->probeLeaf(leafIter->origin())) { const ValueType* phi0 = leafIter->buffer().data(); for (MaskIterT iter = mask->cbeginValueOn(); iter; ++iter) { const Index i = iter.pos(); stencil.moveTo(iter.getCoord(), phi0[i]); this->eval<Nominator, Denominator>(stencil, phi, result, i); }//loop over active voxels in the leaf of the mask } }//loop over leafs of the level set } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVEL_SET_TRACKER_HAS_BEEN_INCLUDED
26,222
C
37.563235
95
0.661658
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointScatter.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @author Ken Museth /// /// @file tools/PointScatter.h /// /// @brief We offer three different algorithms (each in its own class) /// for scattering of points in active voxels: /// /// 1) UniformPointScatter. Has two modes: Either randomly distributes /// a fixed number of points into the active voxels, or the user can /// specify a fixed probability of having a points per unit of volume. /// /// 2) DenseUniformPointScatter. Randomly distributes points into active /// voxels using a fixed number of points per voxel. /// /// 3) NonIniformPointScatter. Define the local probability of having /// a point in a voxel as the product of a global density and the /// value of the voxel itself. #ifndef OPENVDB_TOOLS_POINT_SCATTER_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_POINT_SCATTER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/Grid.h> #include <openvdb/math/Math.h> #include <openvdb/util/NullInterrupter.h> #include <tbb/parallel_sort.h> #include <tbb/parallel_for.h> #include <iostream> #include <memory> #include <string> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// Forward declaration of base class template<typename PointAccessorType, typename RandomGenerator, typename InterruptType = util::NullInterrupter> class BasePointScatter; /// @brief The two point scatters UniformPointScatter and /// NonUniformPointScatter depend on the following two classes: /// /// The @c PointAccessorType template argument below refers to any class /// with the following interface: /// @code /// class PointAccessor { /// ... /// public: /// void add(const openvdb::Vec3R &pos);// appends point with world positions pos /// }; /// @endcode /// /// /// The @c InterruptType template argument below refers to any class /// with the following interface: /// @code /// class Interrupter { /// ... /// public: /// void start(const char* name = nullptr) // called when computations begin /// void end() // called when computations end /// bool wasInterrupted(int percent=-1) // return true to break computation ///}; /// @endcode /// /// @note If no template argument is provided for this InterruptType /// the util::NullInterrupter is used which implies that all /// interrupter calls are no-ops (i.e. incurs no computational overhead). /// @brief Uniformly scatters points in the active voxels. /// The point count is either explicitly defined or implicitly /// through the specification of a global density (=points-per-volume) /// /// @note This uniform scattering technique assumes that the number of /// points is generally smaller than the number of active voxels /// (including virtual active voxels in active tiles). template<typename PointAccessorType, typename RandomGenerator, typename InterruptType = util::NullInterrupter> class UniformPointScatter : public BasePointScatter<PointAccessorType, RandomGenerator, InterruptType> { public: using BaseT = BasePointScatter<PointAccessorType, RandomGenerator, InterruptType>; UniformPointScatter(PointAccessorType& points, Index64 pointCount, RandomGenerator& randGen, double spread = 1.0, InterruptType* interrupt = nullptr) : BaseT(points, randGen, spread, interrupt) , mTargetPointCount(pointCount) , mPointsPerVolume(0.0f) { } UniformPointScatter(PointAccessorType& points, float pointsPerVolume, RandomGenerator& randGen, double spread = 1.0, InterruptType* interrupt = nullptr) : BaseT(points, randGen, spread, interrupt) , mTargetPointCount(0) , mPointsPerVolume(pointsPerVolume) { } /// This is the main functor method implementing the actual scattering of points. template<typename GridT> bool operator()(const GridT& grid) { mVoxelCount = grid.activeVoxelCount(); if (mVoxelCount == 0) return false; const auto voxelVolume = grid.transform().voxelVolume(); if (mPointsPerVolume > 0) { BaseT::start("Uniform scattering with fixed point density"); mTargetPointCount = Index64(mPointsPerVolume * voxelVolume * double(mVoxelCount)); } else if (mTargetPointCount > 0) { BaseT::start("Uniform scattering with fixed point count"); mPointsPerVolume = float(mTargetPointCount) / float(voxelVolume * double(mVoxelCount)); } else { return false; } std::unique_ptr<Index64[]> idList{new Index64[mTargetPointCount]}; math::RandInt<Index64, RandomGenerator> rand(BaseT::mRand01.engine(), 0, mVoxelCount-1); for (Index64 i=0; i<mTargetPointCount; ++i) idList[i] = rand(); tbb::parallel_sort(idList.get(), idList.get() + mTargetPointCount); CoordBBox bbox; const Vec3R offset(0.5, 0.5, 0.5); typename GridT::ValueOnCIter valueIter = grid.cbeginValueOn(); for (Index64 i=0, n=valueIter.getVoxelCount() ; i != mTargetPointCount; ++i) { if (BaseT::interrupt()) return false; const Index64 voxelId = idList[i]; while ( n <= voxelId ) { ++valueIter; n += valueIter.getVoxelCount(); } if (valueIter.isVoxelValue()) {// a majority is expected to be voxels BaseT::addPoint(grid, valueIter.getCoord() - offset); } else {// tiles contain multiple (virtual) voxels valueIter.getBoundingBox(bbox); BaseT::addPoint(grid, bbox.min() - offset, bbox.extents()); } }//loop over all the active voxels and tiles //} BaseT::end(); return true; } // The following methods should only be called after the // the operator() method was called void print(const std::string &name, std::ostream& os = std::cout) const { os << "Uniformly scattered " << mPointCount << " points into " << mVoxelCount << " active voxels in \"" << name << "\" corresponding to " << mPointsPerVolume << " points per volume." << std::endl; } float getPointsPerVolume() const { return mPointsPerVolume; } Index64 getTargetPointCount() const { return mTargetPointCount; } private: using BaseT::mPointCount; using BaseT::mVoxelCount; Index64 mTargetPointCount; float mPointsPerVolume; }; // class UniformPointScatter /// @brief Scatters a fixed (and integer) number of points in all /// active voxels and tiles. template<typename PointAccessorType, typename RandomGenerator, typename InterruptType = util::NullInterrupter> class DenseUniformPointScatter : public BasePointScatter<PointAccessorType, RandomGenerator, InterruptType> { public: using BaseT = BasePointScatter<PointAccessorType, RandomGenerator, InterruptType>; DenseUniformPointScatter(PointAccessorType& points, float pointsPerVoxel, RandomGenerator& randGen, double spread = 1.0, InterruptType* interrupt = nullptr) : BaseT(points, randGen, spread, interrupt) , mPointsPerVoxel(pointsPerVoxel) { } /// This is the main functor method implementing the actual scattering of points. template<typename GridT> bool operator()(const GridT& grid) { using ValueIter = typename GridT::ValueOnCIter; if (mPointsPerVoxel < 1.0e-6) return false; mVoxelCount = grid.activeVoxelCount(); if (mVoxelCount == 0) return false; BaseT::start("Dense uniform scattering with fixed point count"); CoordBBox bbox; const Vec3R offset(0.5, 0.5, 0.5); const int ppv = math::Floor(mPointsPerVoxel); const double delta = mPointsPerVoxel - float(ppv); const bool fractional = !math::isApproxZero(delta, 1.0e-6); for (ValueIter iter = grid.cbeginValueOn(); iter; ++iter) { if (BaseT::interrupt()) return false; if (iter.isVoxelValue()) {// a majority is expected to be voxels const Vec3R dmin = iter.getCoord() - offset; for (int n = 0; n != ppv; ++n) BaseT::addPoint(grid, dmin); if (fractional && BaseT::getRand01() < delta) BaseT::addPoint(grid, dmin); } else {// tiles contain multiple (virtual) voxels iter.getBoundingBox(bbox); const Coord size(bbox.extents()); const Vec3R dmin = bbox.min() - offset; const double d = mPointsPerVoxel * float(iter.getVoxelCount()); const int m = math::Floor(d); for (int n = 0; n != m; ++n) BaseT::addPoint(grid, dmin, size); if (BaseT::getRand01() < d - m) BaseT::addPoint(grid, dmin, size); } }//loop over all the active voxels and tiles //} BaseT::end(); return true; } // The following methods should only be called after the // the operator() method was called void print(const std::string &name, std::ostream& os = std::cout) const { os << "Dense uniformly scattered " << mPointCount << " points into " << mVoxelCount << " active voxels in \"" << name << "\" corresponding to " << mPointsPerVoxel << " points per voxel." << std::endl; } float getPointsPerVoxel() const { return mPointsPerVoxel; } private: using BaseT::mPointCount; using BaseT::mVoxelCount; float mPointsPerVoxel; }; // class DenseUniformPointScatter /// @brief Non-uniform scatters of point in the active voxels. /// The local point count is implicitly defined as a product of /// of a global density (called pointsPerVolume) and the local voxel /// (or tile) value. /// /// @note This scattering technique can be significantly slower /// than a uniform scattering since its computational complexity /// is proportional to the active voxel (and tile) count. template<typename PointAccessorType, typename RandomGenerator, typename InterruptType = util::NullInterrupter> class NonUniformPointScatter : public BasePointScatter<PointAccessorType, RandomGenerator, InterruptType> { public: using BaseT = BasePointScatter<PointAccessorType, RandomGenerator, InterruptType>; NonUniformPointScatter(PointAccessorType& points, float pointsPerVolume, RandomGenerator& randGen, double spread = 1.0, InterruptType* interrupt = nullptr) : BaseT(points, randGen, spread, interrupt) , mPointsPerVolume(pointsPerVolume)//note this is merely a //multiplier for the local point density { } /// This is the main functor method implementing the actual scattering of points. template<typename GridT> bool operator()(const GridT& grid) { if (mPointsPerVolume <= 0.0f) return false; mVoxelCount = grid.activeVoxelCount(); if (mVoxelCount == 0) return false; BaseT::start("Non-uniform scattering with local point density"); const Vec3d dim = grid.voxelSize(); const double volumePerVoxel = dim[0]*dim[1]*dim[2], pointsPerVoxel = mPointsPerVolume * volumePerVoxel; CoordBBox bbox; const Vec3R offset(0.5, 0.5, 0.5); for (typename GridT::ValueOnCIter iter = grid.cbeginValueOn(); iter; ++iter) { if (BaseT::interrupt()) return false; const double d = double(*iter) * pointsPerVoxel * double(iter.getVoxelCount()); const int n = int(d); if (iter.isVoxelValue()) { // a majority is expected to be voxels const Vec3R dmin =iter.getCoord() - offset; for (int i = 0; i < n; ++i) BaseT::addPoint(grid, dmin); if (BaseT::getRand01() < (d - n)) BaseT::addPoint(grid, dmin); } else { // tiles contain multiple (virtual) voxels iter.getBoundingBox(bbox); const Coord size(bbox.extents()); const Vec3R dmin = bbox.min() - offset; for (int i = 0; i < n; ++i) BaseT::addPoint(grid, dmin, size); if (BaseT::getRand01() < (d - n)) BaseT::addPoint(grid, dmin, size); } }//loop over all the active voxels and tiles BaseT::end(); return true; } // The following methods should only be called after the // the operator() method was called void print(const std::string &name, std::ostream& os = std::cout) const { os << "Non-uniformly scattered " << mPointCount << " points into " << mVoxelCount << " active voxels in \"" << name << "\"." << std::endl; } float getPointPerVolume() const { return mPointsPerVolume; } private: using BaseT::mPointCount; using BaseT::mVoxelCount; float mPointsPerVolume; }; // class NonUniformPointScatter /// Base class of all the point scattering classes defined above template<typename PointAccessorType, typename RandomGenerator, typename InterruptType> class BasePointScatter { public: Index64 getPointCount() const { return mPointCount; } Index64 getVoxelCount() const { return mVoxelCount; } protected: PointAccessorType& mPoints; InterruptType* mInterrupter; Index64 mPointCount; Index64 mVoxelCount; Index64 mInterruptCount; const double mSpread; math::Rand01<double, RandomGenerator> mRand01; /// This is a base class so the constructor is protected BasePointScatter(PointAccessorType& points, RandomGenerator& randGen, double spread, InterruptType* interrupt = nullptr) : mPoints(points) , mInterrupter(interrupt) , mPointCount(0) , mVoxelCount(0) , mInterruptCount(0) , mSpread(math::Clamp01(spread)) , mRand01(randGen) { } inline void start(const char* name) { if (mInterrupter) mInterrupter->start(name); } inline void end() { if (mInterrupter) mInterrupter->end(); } inline bool interrupt() { //only check interrupter for every 32'th call return !(mInterruptCount++ & ((1<<5)-1)) && util::wasInterrupted(mInterrupter); } /// @brief Return a random floating point number between zero and one inline double getRand01() { return mRand01(); } /// @brief Return a random floating point number between 0.5 -+ mSpread/2 inline double getRand() { return 0.5 + mSpread * (mRand01() - 0.5); } template <typename GridT> inline void addPoint(const GridT &grid, const Vec3R &dmin) { const Vec3R pos(dmin[0] + this->getRand(), dmin[1] + this->getRand(), dmin[2] + this->getRand()); mPoints.add(grid.indexToWorld(pos)); ++mPointCount; } template <typename GridT> inline void addPoint(const GridT &grid, const Vec3R &dmin, const Coord &size) { const Vec3R pos(dmin[0] + size[0]*this->getRand(), dmin[1] + size[1]*this->getRand(), dmin[2] + size[2]*this->getRand()); mPoints.add(grid.indexToWorld(pos)); ++mPointCount; } };// class BasePointScatter } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_POINT_SCATTER_HAS_BEEN_INCLUDED
16,293
C
37.611374
99
0.61241
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/ChangeBackground.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file ChangeBackground.h /// /// @brief Efficient multi-threaded replacement of the background /// values in tree. /// /// @author Ken Museth #ifndef OPENVDB_TOOLS_ChangeBACKGROUND_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_ChangeBACKGROUND_HAS_BEEN_INCLUDED #include <openvdb/math/Math.h> // for isNegative and negative #include <openvdb/Types.h> // for Index typedef #include <openvdb/tree/NodeManager.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Replace the background value in all the nodes of a tree. /// @details The sign of the background value is preserved, and only /// inactive values equal to the old background value are replaced. /// /// @note If a LeafManager is used the cached leaf nodes are reused, /// resulting in slightly better overall performance. /// /// @param tree Tree (or LeafManager) that will have its background value changed /// @param background the new background value /// @param threaded enable or disable threading (threading is enabled by default) /// @param grainSize used to control the threading granularity (default is 32) template<typename TreeOrLeafManagerT> inline void changeBackground( TreeOrLeafManagerT& tree, const typename TreeOrLeafManagerT::ValueType& background, bool threaded = true, size_t grainSize = 32); /// @brief Replace the background value in all the nodes of a floating-point tree /// containing a symmetric narrow-band level set. /// @details All inactive values will be set to +| @a halfWidth | if outside /// and -| @a halfWidth | if inside, where @a halfWidth is half the width /// of the symmetric narrow band. /// /// @note This method is faster than changeBackground since it does not /// perform tests to see if inactive values are equal to the old background value. /// @note If a LeafManager is used the cached leaf nodes are reused, /// resulting in slightly better overall performance. /// /// @param tree Tree (or LeafManager) that will have its background value changed /// @param halfWidth half of the width of the symmetric narrow band /// @param threaded enable or disable threading (threading is enabled by default) /// @param grainSize used to control the threading granularity (default is 32) /// /// @throw ValueError if @a halfWidth is negative (as defined by math::isNegative) template<typename TreeOrLeafManagerT> inline void changeLevelSetBackground( TreeOrLeafManagerT& tree, const typename TreeOrLeafManagerT::ValueType& halfWidth, bool threaded = true, size_t grainSize = 32); /// @brief Replace the background values in all the nodes of a floating-point tree /// containing a possibly asymmetric narrow-band level set. /// @details All inactive values will be set to +| @a outsideWidth | if outside /// and -| @a insideWidth | if inside, where @a outsideWidth is the outside /// width of the narrow band and @a insideWidth is its inside width. /// /// @note This method is faster than changeBackground since it does not /// perform tests to see if inactive values are equal to the old background value. /// @note If a LeafManager is used the cached leaf nodes are reused, /// resulting in slightly better overall performance. /// /// @param tree Tree (or LeafManager) that will have its background value changed /// @param outsideWidth The width of the outside of the narrow band /// @param insideWidth The width of the inside of the narrow band /// @param threaded enable or disable threading (threading is enabled by default) /// @param grainSize used to control the threading granularity (default is 32) /// /// @throw ValueError if @a outsideWidth is negative or @a insideWidth is /// not negative (as defined by math::isNegative) template<typename TreeOrLeafManagerT> inline void changeAsymmetricLevelSetBackground( TreeOrLeafManagerT& tree, const typename TreeOrLeafManagerT::ValueType& outsideWidth, const typename TreeOrLeafManagerT::ValueType& insideWidth, bool threaded = true, size_t grainSize = 32); ////////////////////////////////////////////////////// // Replaces the background value in a Tree of any type. template<typename TreeOrLeafManagerT> class ChangeBackgroundOp { public: typedef typename TreeOrLeafManagerT::ValueType ValueT; typedef typename TreeOrLeafManagerT::RootNodeType RootT; typedef typename TreeOrLeafManagerT::LeafNodeType LeafT; ChangeBackgroundOp(const TreeOrLeafManagerT& tree, const ValueT& newValue) : mOldValue(tree.root().background()) , mNewValue(newValue) { } void operator()(RootT& root) const { for (typename RootT::ValueOffIter it = root.beginValueOff(); it; ++it) this->set(it); root.setBackground(mNewValue, false); } void operator()(LeafT& node) const { for (typename LeafT::ValueOffIter it = node.beginValueOff(); it; ++it) this->set(it); } template<typename NodeT> void operator()(NodeT& node) const { typename NodeT::NodeMaskType mask = node.getValueOffMask(); for (typename NodeT::ValueOnIter it(mask.beginOn(), &node); it; ++it) this->set(it); } private: template<typename IterT> inline void set(IterT& iter) const { if (math::isApproxEqual(*iter, mOldValue)) { iter.setValue(mNewValue); } else if (math::isApproxEqual(*iter, math::negative(mOldValue))) { iter.setValue(math::negative(mNewValue)); } } const ValueT mOldValue, mNewValue; };// ChangeBackgroundOp // Replaces the background value in a Tree assumed to represent a // level set. It is generally faster than ChangeBackgroundOp. // Note that is follows the sign-convention that outside is positive // and inside is negative! template<typename TreeOrLeafManagerT> class ChangeLevelSetBackgroundOp { public: typedef typename TreeOrLeafManagerT::ValueType ValueT; typedef typename TreeOrLeafManagerT::RootNodeType RootT; typedef typename TreeOrLeafManagerT::LeafNodeType LeafT; /// @brief Constructor for asymmetric narrow-bands ChangeLevelSetBackgroundOp(const ValueT& outside, const ValueT& inside) : mOutside(outside) , mInside(inside) { if (math::isNegative(mOutside)) { OPENVDB_THROW(ValueError, "ChangeLevelSetBackgroundOp: the outside value cannot be negative!"); } if (!math::isNegative(mInside)) { OPENVDB_THROW(ValueError, "ChangeLevelSetBackgroundOp: the inside value must be negative!"); } } void operator()(RootT& root) const { for (typename RootT::ValueOffIter it = root.beginValueOff(); it; ++it) this->set(it); root.setBackground(mOutside, false); } void operator()(LeafT& node) const { for(typename LeafT::ValueOffIter it = node.beginValueOff(); it; ++it) this->set(it); } template<typename NodeT> void operator()(NodeT& node) const { typedef typename NodeT::ValueOffIter IterT; for (IterT it(node.getChildMask().beginOff(), &node); it; ++it) this->set(it); } private: template<typename IterT> inline void set(IterT& iter) const { //this is safe since we know ValueType is_floating_point ValueT& v = const_cast<ValueT&>(*iter); v = v < 0 ? mInside : mOutside; } const ValueT mOutside, mInside; };// ChangeLevelSetBackgroundOp template<typename TreeOrLeafManagerT> inline void changeBackground( TreeOrLeafManagerT& tree, const typename TreeOrLeafManagerT::ValueType& background, bool threaded, size_t grainSize) { tree::NodeManager<TreeOrLeafManagerT> linearTree(tree); ChangeBackgroundOp<TreeOrLeafManagerT> op(tree, background); linearTree.foreachTopDown(op, threaded, grainSize); } template<typename TreeOrLeafManagerT> inline void changeAsymmetricLevelSetBackground( TreeOrLeafManagerT& tree, const typename TreeOrLeafManagerT::ValueType& outsideValue, const typename TreeOrLeafManagerT::ValueType& insideValue, bool threaded, size_t grainSize) { tree::NodeManager<TreeOrLeafManagerT> linearTree(tree); ChangeLevelSetBackgroundOp<TreeOrLeafManagerT> op(outsideValue, insideValue); linearTree.foreachTopDown(op, threaded, grainSize); } // If the narrow-band is symmetric only one background value is required template<typename TreeOrLeafManagerT> inline void changeLevelSetBackground( TreeOrLeafManagerT& tree, const typename TreeOrLeafManagerT::ValueType& background, bool threaded, size_t grainSize) { changeAsymmetricLevelSetBackground( tree, background, math::negative(background), threaded, grainSize); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_CHANGEBACKGROUND_HAS_BEEN_INCLUDED
9,036
C
35.439516
95
0.711598
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Statistics.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file Statistics.h /// /// @brief Functions to efficiently compute histograms, extremas /// (min/max) and statistics (mean, variance, etc.) of grid values #ifndef OPENVDB_TOOLS_STATISTICS_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_STATISTICS_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/Exceptions.h> #include <openvdb/math/Stats.h> #include "ValueTransformer.h" namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Iterate over a scalar grid and compute a histogram of the values /// of the voxels that are visited, or iterate over a vector-valued grid /// and compute a histogram of the magnitudes of the vectors. /// @param iter an iterator over the values of a grid or its tree /// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.) /// @param minVal the smallest value that can be added to the histogram /// @param maxVal the largest value that can be added to the histogram /// @param numBins the number of histogram bins /// @param threaded if true, iterate over the grid in parallel template<typename IterT> inline math::Histogram histogram(const IterT& iter, double minVal, double maxVal, size_t numBins = 10, bool threaded = true); /// @brief Iterate over a scalar grid and compute extrema (min/max) of the /// values of the voxels that are visited, or iterate over a vector-valued grid /// and compute extrema of the magnitudes of the vectors. /// @param iter an iterator over the values of a grid or its tree /// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.) /// @param threaded if true, iterate over the grid in parallel template<typename IterT> inline math::Extrema extrema(const IterT& iter, bool threaded = true); /// @brief Iterate over a scalar grid and compute statistics (mean, variance, etc.) /// of the values of the voxels that are visited, or iterate over a vector-valued grid /// and compute statistics of the magnitudes of the vectors. /// @param iter an iterator over the values of a grid or its tree /// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.) /// @param threaded if true, iterate over the grid in parallel template<typename IterT> inline math::Stats statistics(const IterT& iter, bool threaded = true); /// @brief Iterate over a grid and compute extrema (min/max) of /// the values produced by applying the given functor at each voxel that is visited. /// @param iter an iterator over the values of a grid or its tree /// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.) /// @param op a functor of the form <tt>void op(const IterT&, math::Stats&)</tt>, /// where @c IterT is the type of @a iter, that inserts zero or more /// floating-point values into the provided @c math::Stats object /// @param threaded if true, iterate over the grid in parallel /// @note When @a threaded is true, each thread gets its own copy of the functor. /// /// @par Example: /// Compute statistics of just the active and positive-valued voxels of a scalar, /// floating-point grid. /// @code /// struct Local { /// static inline /// void addIfPositive(const FloatGrid::ValueOnCIter& iter, math::Extrema& ex) /// { /// const float f = *iter; /// if (f > 0.0) { /// if (iter.isVoxelValue()) ex.add(f); /// else ex.add(f, iter.getVoxelCount()); /// } /// } /// }; /// FloatGrid grid = ...; /// math::Extrema stats = /// tools::extrema(grid.cbeginValueOn(), Local::addIfPositive, /*threaded=*/true); /// @endcode template<typename IterT, typename ValueOp> inline math::Extrema extrema(const IterT& iter, const ValueOp& op, bool threaded); /// @brief Iterate over a grid and compute statistics (mean, variance, etc.) of /// the values produced by applying the given functor at each voxel that is visited. /// @param iter an iterator over the values of a grid or its tree /// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.) /// @param op a functor of the form <tt>void op(const IterT&, math::Stats&)</tt>, /// where @c IterT is the type of @a iter, that inserts zero or more /// floating-point values into the provided @c math::Stats object /// @param threaded if true, iterate over the grid in parallel /// @note When @a threaded is true, each thread gets its own copy of the functor. /// /// @par Example: /// Compute statistics of just the active and positive-valued voxels of a scalar, /// floating-point grid. /// @code /// struct Local { /// static inline /// void addIfPositive(const FloatGrid::ValueOnCIter& iter, math::Stats& stats) /// { /// const float f = *iter; /// if (f > 0.0) { /// if (iter.isVoxelValue()) stats.add(f); /// else stats.add(f, iter.getVoxelCount()); /// } /// } /// }; /// FloatGrid grid = ...; /// math::Stats stats = /// tools::statistics(grid.cbeginValueOn(), Local::addIfPositive, /*threaded=*/true); /// @endcode template<typename IterT, typename ValueOp> inline math::Stats statistics(const IterT& iter, const ValueOp& op, bool threaded); /// @brief Iterate over a grid and compute statistics (mean, variance, etc.) /// of the values produced by applying a given operator (see math/Operators.h) /// at each voxel that is visited. /// @param iter an iterator over the values of a grid or its tree /// (@c Grid::ValueOnCIter, @c Tree::ValueOffIter, etc.) /// @param op an operator object with a method of the form /// <tt>double result(Accessor&, const Coord&)</tt> /// @param threaded if true, iterate over the grid in parallel /// @note World-space operators, whose @c result() methods are of the form /// <tt>double result(const Map&, Accessor&, const Coord&)</tt>, must be wrapped /// in a math::MapAdapter. /// @note Vector-valued operators like math::Gradient must be wrapped in an adapter /// such as math::OpMagnitude. /// /// @par Example: /// Compute statistics of the magnitude of the gradient at the active voxels of /// a scalar, floating-point grid. (Note the use of the math::MapAdapter and /// math::OpMagnitude adapters.) /// @code /// FloatGrid grid = ...; /// /// // Assume that we know that the grid has a uniform scale map. /// using MapType = math::UniformScaleMap; /// // Specify a world-space gradient operator that uses first-order differencing. /// using GradientOp = math::Gradient<MapType, math::FD_1ST>; /// // Wrap the operator with an adapter that computes the magnitude of the gradient. /// using MagnitudeOp = math::OpMagnitude<GradientOp, MapType>; /// // Wrap the operator with an adapter that associates a map with it. /// using CompoundOp = math::MapAdapter<MapType, GradientOp, double>; /// /// if (MapType::Ptr map = grid.constTransform().constMap<MapType>()) { /// math::Stats stats = tools::opStatistics(grid.cbeginValueOn(), CompoundOp(*map)); /// } /// @endcode /// /// @par Example: /// Compute statistics of the divergence at the active voxels of a vector-valued grid. /// @code /// Vec3SGrid grid = ...; /// /// // Assume that we know that the grid has a uniform scale map. /// using MapType = math::UniformScaleMap; /// // Specify a world-space divergence operator that uses first-order differencing. /// using DivergenceOp = math::Divergence<MapType, math::FD_1ST>; /// // Wrap the operator with an adapter that associates a map with it. /// using CompoundOp = math::MapAdapter<MapType, DivergenceOp, double>; /// /// if (MapType::Ptr map = grid.constTransform().constMap<MapType>()) { /// math::Stats stats = tools::opStatistics(grid.cbeginValueOn(), CompoundOp(*map)); /// } /// @endcode /// /// @par Example: /// As above, but computing the divergence in index space. /// @code /// Vec3SGrid grid = ...; /// /// // Specify an index-space divergence operator that uses first-order differencing. /// using DivergenceOp = math::ISDivergence<math::FD_1ST>; /// /// math::Stats stats = tools::opStatistics(grid.cbeginValueOn(), DivergenceOp()); /// @endcode template<typename OperatorT, typename IterT> inline math::Stats opStatistics(const IterT& iter, const OperatorT& op = OperatorT(), bool threaded = true); /// @brief Same as opStatistics except it returns a math::Extrema vs a math::Stats template<typename OperatorT, typename IterT> inline math::Extrema opExtrema(const IterT& iter, const OperatorT& op = OperatorT(), bool threaded = true); //////////////////////////////////////// namespace stats_internal { /// @todo This traits class is needed because tree::TreeValueIteratorBase uses /// the name ValueT for the type of the value to which the iterator points, /// whereas node-level iterators use the name ValueType. template<typename IterT, typename AuxT = void> struct IterTraits { using ValueType = typename IterT::ValueType; }; template<typename TreeT, typename ValueIterT> struct IterTraits<tree::TreeValueIteratorBase<TreeT, ValueIterT> > { using ValueType = typename tree::TreeValueIteratorBase<TreeT, ValueIterT>::ValueT; }; // Helper class to compute a scalar value from either a scalar or a vector value // (the latter by computing the vector's magnitude) template<typename T, bool IsVector> struct GetValImpl; template<typename T> struct GetValImpl<T, /*IsVector=*/false> { static inline double get(const T& val) { return double(val); } }; template<typename T> struct GetValImpl<T, /*IsVector=*/true> { static inline double get(const T& val) { return val.length(); } }; // Helper class to compute a scalar value from a tree or node iterator // that points to a value in either a scalar or a vector grid, and to // add that value to a math::Stats object. template<typename IterT, typename StatsT> struct GetVal { using ValueT = typename IterTraits<IterT>::ValueType; using ImplT = GetValImpl<ValueT, VecTraits<ValueT>::IsVec>; inline void operator()(const IterT& iter, StatsT& stats) const { if (iter.isVoxelValue()) stats.add(ImplT::get(*iter)); else stats.add(ImplT::get(*iter), iter.getVoxelCount()); } }; // Helper class to accumulate scalar voxel values or vector voxel magnitudes // into a math::Stats object template<typename IterT, typename ValueOp, typename StatsT> struct StatsOp { StatsOp(const ValueOp& op): getValue(op) {} // Accumulate voxel and tile values into this functor's Stats object. inline void operator()(const IterT& iter) { getValue(iter, stats); } // Accumulate another functor's Stats object into this functor's. inline void join(StatsOp& other) { stats.add(other.stats); } StatsT stats; ValueOp getValue; }; // Helper class to accumulate scalar voxel values or vector voxel magnitudes // into a math::Histogram object template<typename IterT, typename ValueOp> struct HistOp { HistOp(const ValueOp& op, double vmin, double vmax, size_t bins): hist(vmin, vmax, bins), getValue(op) {} // Accumulate voxel and tile values into this functor's Histogram object. inline void operator()(const IterT& iter) { getValue(iter, hist); } // Accumulate another functor's Histogram object into this functor's. inline void join(HistOp& other) { hist.add(other.hist); } math::Histogram hist; ValueOp getValue; }; // Helper class to apply an operator such as math::Gradient or math::Laplacian // to voxels and accumulate the scalar results or the magnitudes of vector results // into a math::Stats object template<typename IterT, typename OpT, typename StatsT> struct MathOp { using TreeT = typename IterT::TreeT; using ValueT = typename TreeT::ValueType; using ConstAccessor = typename tree::ValueAccessor<const TreeT>; // Each thread gets its own accessor and its own copy of the operator. ConstAccessor mAcc; OpT mOp; StatsT mStats; template<typename TreeT> static inline TreeT* THROW_IF_NULL(TreeT* ptr) { if (ptr == nullptr) OPENVDB_THROW(ValueError, "iterator references a null tree"); return ptr; } MathOp(const IterT& iter, const OpT& op): mAcc(*THROW_IF_NULL(iter.getTree())), mOp(op) {} // Accumulate voxel and tile values into this functor's Stats object. void operator()(const IterT& it) { if (it.isVoxelValue()) { // Add the magnitude of the gradient at a single voxel. mStats.add(mOp.result(mAcc, it.getCoord())); } else { // Iterate over the voxels enclosed by a tile and add the results // of applying the operator at each voxel. /// @todo This could be specialized to be done more efficiently for some operators. /// For example, all voxels in the interior of a tile (i.e., not on the borders) /// have gradient zero, so there's no need to apply the operator to every voxel. CoordBBox bbox = it.getBoundingBox(); Coord xyz; int &x = xyz.x(), &y = xyz.y(), &z = xyz.z(); for (x = bbox.min().x(); x <= bbox.max().x(); ++x) { for (y = bbox.min().y(); y <= bbox.max().y(); ++y) { for (z = bbox.min().z(); z <= bbox.max().z(); ++z) { mStats.add(mOp.result(mAcc, it.getCoord())); } } } } } // Accumulate another functor's Stats object into this functor's. inline void join(MathOp& other) { mStats.add(other.mStats); } }; // struct MathOp } // namespace stats_internal template<typename IterT> inline math::Histogram histogram(const IterT& iter, double vmin, double vmax, size_t numBins, bool threaded) { using ValueOp = stats_internal::GetVal<IterT, math::Histogram>; ValueOp valOp; stats_internal::HistOp<IterT, ValueOp> op(valOp, vmin, vmax, numBins); tools::accumulate(iter, op, threaded); return op.hist; } template<typename IterT> inline math::Extrema extrema(const IterT& iter, bool threaded) { stats_internal::GetVal<IterT, math::Extrema> valOp; return extrema(iter, valOp, threaded); } template<typename IterT> inline math::Stats statistics(const IterT& iter, bool threaded) { stats_internal::GetVal<IterT, math::Stats> valOp; return statistics(iter, valOp, threaded); } template<typename IterT, typename ValueOp> inline math::Extrema extrema(const IterT& iter, const ValueOp& valOp, bool threaded) { stats_internal::StatsOp<IterT, const ValueOp, math::Extrema> op(valOp); tools::accumulate(iter, op, threaded); return op.stats; } template<typename IterT, typename ValueOp> inline math::Stats statistics(const IterT& iter, const ValueOp& valOp, bool threaded) { stats_internal::StatsOp<IterT, const ValueOp, math::Stats> op(valOp); tools::accumulate(iter, op, threaded); return op.stats; } template<typename OperatorT, typename IterT> inline math::Extrema opExtrema(const IterT& iter, const OperatorT& op, bool threaded) { stats_internal::MathOp<IterT, OperatorT, math::Extrema> func(iter, op); tools::accumulate(iter, func, threaded); return func.mStats; } template<typename OperatorT, typename IterT> inline math::Stats opStatistics(const IterT& iter, const OperatorT& op, bool threaded) { stats_internal::MathOp<IterT, OperatorT, math::Stats> func(iter, op); tools::accumulate(iter, func, threaded); return func.mStats; } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_STATISTICS_HAS_BEEN_INCLUDED
15,710
C
37.507353
95
0.67823
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/SignedFloodFill.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SignedFloodFill.h /// /// @brief Propagate the signs of distance values from the active voxels /// in the narrow band to the inactive values outside the narrow band. /// /// @author Ken Museth #ifndef OPENVDB_TOOLS_SIGNEDFLOODFILL_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_SIGNEDFLOODFILL_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Types.h> // for Index typedef #include <openvdb/math/Math.h> // for math::negative #include <openvdb/tree/NodeManager.h> #include <map> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Set the values of all inactive voxels and tiles of a narrow-band /// level set from the signs of the active voxels, setting outside values to /// +background and inside values to -background. /// /// @warning This method should only be used on closed, symmetric narrow-band level sets. /// /// @note If a LeafManager is used the cached leaf nodes are reused, /// resulting in slightly better overall performance. /// /// @param tree Tree or LeafManager that will be flood filled. /// @param threaded enable or disable threading (threading is enabled by default) /// @param grainSize used to control the threading granularity (default is 1) /// @param minLevel Specify the lowest tree level to process (leafnode level = 0) /// /// @throw TypeError if the ValueType of @a tree is not floating-point. template<typename TreeOrLeafManagerT> inline void signedFloodFill(TreeOrLeafManagerT& tree, bool threaded = true, size_t grainSize = 1, Index minLevel = 0); /// @brief Set the values of all inactive voxels and tiles of a narrow-band /// level set from the signs of the active voxels, setting exterior values to /// @a outsideWidth and interior values to @a insideWidth. Set the background value /// of this tree to @a outsideWidth. /// /// @warning This method should only be used on closed, narrow-band level sets. /// /// @note If a LeafManager is used the cached leaf nodes are reused /// resulting in slightly better overall performance. /// /// @param tree Tree or LeafManager that will be flood filled /// @param outsideWidth the width of the outside of the narrow band /// @param insideWidth the width of the inside of the narrow band /// @param threaded enable or disable threading (threading is enabled by default) /// @param grainSize used to control the threading granularity (default is 1) /// @param minLevel Specify the lowest tree level to process (leafnode level = 0) /// /// @throw TypeError if the ValueType of @a tree is not floating-point. template<typename TreeOrLeafManagerT> inline void signedFloodFillWithValues( TreeOrLeafManagerT& tree, const typename TreeOrLeafManagerT::ValueType& outsideWidth, const typename TreeOrLeafManagerT::ValueType& insideWidth, bool threaded = true, size_t grainSize = 1, Index minLevel = 0); ////////////////////////// Implementation of SignedFloodFill //////////////////////////// template<typename TreeOrLeafManagerT> class SignedFloodFillOp { public: using ValueT = typename TreeOrLeafManagerT::ValueType; using RootT = typename TreeOrLeafManagerT::RootNodeType; using LeafT = typename TreeOrLeafManagerT::LeafNodeType; static_assert(std::is_signed<ValueT>::value, "signed flood fill is supported only for signed value grids"); SignedFloodFillOp(const TreeOrLeafManagerT& tree, Index minLevel = 0) : mOutside(ValueT(math::Abs(tree.background()))) , mInside(ValueT(math::negative(mOutside))) , mMinLevel(minLevel) { } SignedFloodFillOp(ValueT outsideValue, ValueT insideValue, Index minLevel = 0) : mOutside(ValueT(math::Abs(outsideValue))) , mInside(ValueT(math::negative(math::Abs(insideValue)))) , mMinLevel(minLevel) { } // Nothing to do at the leaf node level void operator()(LeafT& leaf) const { if (LeafT::LEVEL < mMinLevel) return; if (!leaf.allocate()) return; // this assures that the buffer is allocated and in-memory const typename LeafT::NodeMaskType& valueMask = leaf.getValueMask(); // WARNING: "Never do what you're about to see at home, we're what you call experts!" typename LeafT::ValueType* buffer = const_cast<typename LeafT::ValueType*>(&(leaf.getFirstValue())); const Index first = valueMask.findFirstOn(); if (first < LeafT::SIZE) { bool xInside = buffer[first]<0, yInside = xInside, zInside = xInside; for (Index x = 0; x != (1 << LeafT::LOG2DIM); ++x) { const Index x00 = x << (2 * LeafT::LOG2DIM); if (valueMask.isOn(x00)) xInside = buffer[x00] < 0; // element(x, 0, 0) yInside = xInside; for (Index y = 0; y != (1 << LeafT::LOG2DIM); ++y) { const Index xy0 = x00 + (y << LeafT::LOG2DIM); if (valueMask.isOn(xy0)) yInside = buffer[xy0] < 0; // element(x, y, 0) zInside = yInside; for (Index z = 0; z != (1 << LeafT::LOG2DIM); ++z) { const Index xyz = xy0 + z; // element(x, y, z) if (valueMask.isOn(xyz)) { zInside = buffer[xyz] < 0; } else { buffer[xyz] = zInside ? mInside : mOutside; } } } } } else {// if no active voxels exist simply use the sign of the first value leaf.fill(buffer[0] < 0 ? mInside : mOutside); } } // Prune the child nodes of the internal nodes template<typename NodeT> void operator()(NodeT& node) const { if (NodeT::LEVEL < mMinLevel) return; // We assume the child nodes have already been flood filled! const typename NodeT::NodeMaskType& childMask = node.getChildMask(); // WARNING: "Never do what you're about to see at home, we're what you call experts!" typename NodeT::UnionType* table = const_cast<typename NodeT::UnionType*>(node.getTable()); const Index first = childMask.findFirstOn(); if (first < NodeT::NUM_VALUES) { bool xInside = table[first].getChild()->getFirstValue()<0; bool yInside = xInside, zInside = xInside; for (Index x = 0; x != (1 << NodeT::LOG2DIM); ++x) { const int x00 = x << (2 * NodeT::LOG2DIM); // offset for block(x, 0, 0) if (childMask.isOn(x00)) xInside = table[x00].getChild()->getLastValue()<0; yInside = xInside; for (Index y = 0; y != (1 << NodeT::LOG2DIM); ++y) { const Index xy0 = x00 + (y << NodeT::LOG2DIM); // offset for block(x, y, 0) if (childMask.isOn(xy0)) yInside = table[xy0].getChild()->getLastValue()<0; zInside = yInside; for (Index z = 0; z != (1 << NodeT::LOG2DIM); ++z) { const Index xyz = xy0 + z; // offset for block(x, y, z) if (childMask.isOn(xyz)) { zInside = table[xyz].getChild()->getLastValue()<0; } else { table[xyz].setValue(zInside ? mInside : mOutside); } } } } } else {//no child nodes exist simply use the sign of the first tile value. const ValueT v = table[0].getValue()<0 ? mInside : mOutside; for (Index i = 0; i < NodeT::NUM_VALUES; ++i) table[i].setValue(v); } } // Prune the child nodes of the root node void operator()(RootT& root) const { if (RootT::LEVEL < mMinLevel) return; using ChildT = typename RootT::ChildNodeType; // Insert the child nodes into a map sorted according to their origin std::map<Coord, ChildT*> nodeKeys; typename RootT::ChildOnIter it = root.beginChildOn(); for (; it; ++it) nodeKeys.insert(std::pair<Coord, ChildT*>(it.getCoord(), &(*it))); static const Index DIM = RootT::ChildNodeType::DIM; // We employ a simple z-scanline algorithm that inserts inactive tiles with // the inside value if they are sandwiched between inside child nodes only! typename std::map<Coord, ChildT*>::const_iterator b = nodeKeys.begin(), e = nodeKeys.end(); if ( b == e ) return; for (typename std::map<Coord, ChildT*>::const_iterator a = b++; b != e; ++a, ++b) { Coord d = b->first - a->first; // delta of neighboring coordinates if (d[0]!=0 || d[1]!=0 || d[2]==Int32(DIM)) continue;// not same z-scanline or neighbors const ValueT fill[] = { a->second->getLastValue(), b->second->getFirstValue() }; if (!(fill[0] < 0) || !(fill[1] < 0)) continue; // scanline isn't inside Coord c = a->first + Coord(0u, 0u, DIM); for (; c[2] != b->first[2]; c[2] += DIM) root.addTile(c, mInside, false); } root.setBackground(mOutside, /*updateChildNodes=*/false); } private: const ValueT mOutside, mInside; const Index mMinLevel; };// SignedFloodFillOp //{ /// @cond OPENVDB_SIGNED_FLOOD_FILL_INTERNAL template<typename TreeOrLeafManagerT> inline typename std::enable_if<std::is_signed<typename TreeOrLeafManagerT::ValueType>::value, void>::type doSignedFloodFill(TreeOrLeafManagerT& tree, typename TreeOrLeafManagerT::ValueType outsideValue, typename TreeOrLeafManagerT::ValueType insideValue, bool threaded, size_t grainSize, Index minLevel) { tree::NodeManager<TreeOrLeafManagerT> nodes(tree); SignedFloodFillOp<TreeOrLeafManagerT> op(outsideValue, insideValue, minLevel); nodes.foreachBottomUp(op, threaded, grainSize); } // Dummy (no-op) implementation for unsigned types template <typename TreeOrLeafManagerT> inline typename std::enable_if<!std::is_signed<typename TreeOrLeafManagerT::ValueType>::value, void>::type doSignedFloodFill(TreeOrLeafManagerT&, const typename TreeOrLeafManagerT::ValueType&, const typename TreeOrLeafManagerT::ValueType&, bool, size_t, Index) { OPENVDB_THROW(TypeError, "signedFloodFill is supported only for signed value grids"); } /// @endcond //} // If the narrow-band is symmetric and unchanged template <typename TreeOrLeafManagerT> inline void signedFloodFillWithValues( TreeOrLeafManagerT& tree, const typename TreeOrLeafManagerT::ValueType& outsideValue, const typename TreeOrLeafManagerT::ValueType& insideValue, bool threaded, size_t grainSize, Index minLevel) { doSignedFloodFill(tree, outsideValue, insideValue, threaded, grainSize, minLevel); } template <typename TreeOrLeafManagerT> inline void signedFloodFill(TreeOrLeafManagerT& tree, bool threaded, size_t grainSize, Index minLevel) { const typename TreeOrLeafManagerT::ValueType v = tree.root().background(); doSignedFloodFill(tree, v, math::negative(v), threaded, grainSize, minLevel); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_RESETBACKGROUND_HAS_BEEN_INCLUDED
11,608
C
40.460714
100
0.625603
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/MultiResGrid.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file MultiResGrid.h /// /// @author Ken Museth /// /// @warning This class is fairly new and as such has not seen a lot of /// use in production. Please report any issues or request for new /// features directly to [email protected]. /// /// @brief Multi-resolution grid that contains LoD sequences of trees /// with powers of two refinements. /// /// @note While this class can arguably be used to implement a sparse /// Multi-Grid solver it is currently intended as a means to /// efficiently compute LoD levels for applications like rendering /// /// @note Prolongation means interpolation from coarse -> fine /// @note Restriction means interpolation (or remapping) from fine -> coarse /// /// @todo Add option to define the level of the input grid (currenlty /// 0) so as to allow for super-sampling. #ifndef OPENVDB_TOOLS_MULTIRESGRID_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_MULTIRESGRID_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/math/FiniteDifference.h> #include <openvdb/math/Math.h> #include <openvdb/math/Operators.h> #include <openvdb/math/Stencils.h> #include <openvdb/Metadata.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/tree/NodeManager.h> #include "Interpolation.h" #include "Morphology.h" #include "Prune.h" #include "SignedFloodFill.h" #include "ValueTransformer.h" #include <tbb/blocked_range.h> #include <tbb/enumerable_thread_specific.h> #include <tbb/parallel_for.h> #include <iostream> #include <sstream> #include <string> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { template<typename TreeType> class MultiResGrid: public MetaMap { public: using Ptr = SharedPtr<MultiResGrid>; using ConstPtr = SharedPtr<const MultiResGrid>; using ValueType = typename TreeType::ValueType; using ValueOnCIter = typename TreeType::ValueOnCIter; using ValueOnIter = typename TreeType::ValueOnIter; using TreePtr = typename TreeType::Ptr; using ConstTreePtr = typename TreeType::ConstPtr; using GridPtr = typename Grid<TreeType>::Ptr; using ConstGridPtr = typename Grid<TreeType>::ConstPtr; ////////////////////////////////////////////////////////////////////// /// @brief Constructor of empty grids /// @param levels The number of trees in this MultiResGrid /// @param background Background value /// @param voxelSize Size of a (uniform voxel). Defaults to one. /// @note The multiple grids are all empty. MultiResGrid(size_t levels, ValueType background, double voxelSize = 1.0); /// @brief Given an initial high-resolution grid this constructor /// generates all the coarser grids by means of restriction. /// @param levels The number of trees in this MultiResGrid /// @param grid High-resolution input grid /// @param useInjection Use restriction by injection, vs /// full-weighting. It defaults to false and should rarely be used. /// @note This constructor will perform a deep copy of the input /// grid and use it as the highest level grid. MultiResGrid(size_t levels, const Grid<TreeType> &grid, bool useInjection = false); /// @brief Given an initial high-resolution grid this constructor /// generates all the coarser grids by means of restriction. /// @param levels The number of trees in this MultiResGrid /// @param grid High-resolution input grid /// @param useInjection Use restriction by injection, vs /// full-weighting. It defaults to false and should rarely be used. /// @note This constructor will steal the input grid and use it /// as the highest level grid. On output the grid is empty. MultiResGrid(size_t levels, GridPtr grid, bool useInjection = false); ////////////////////////////////////////////////////////////////////// /// @brief Return the number of levels, i.e. trees, in this MultiResGrid /// @note level 0 is the finest level and numLevels()-1 is the coarsest /// level. size_t numLevels() const { return mTrees.size(); } /// @brief Return the level of the finest grid (always 0) static size_t finestLevel() { return 0; } /// @brief Return the level of the coarsest grid, i.e. numLevels()-1 size_t coarsestLevel() const { return mTrees.size()-1; } ////////////////////////////////////////////////////////////////////// /// @brief Return a reference to the tree at the specified level /// @param level The level of the tree to be returned /// @note Level 0 is by definition the finest tree. TreeType& tree(size_t level); /// @brief Return a const reference to the tree at the specified level /// @param level The level of the tree to be returned /// @note Level 0 is by definition the finest tree. const TreeType& constTree(size_t level) const; /// @brief Return a shared pointer to the tree at the specified level /// @param level The level of the tree to be returned /// @note Level 0 is by definition the finest tree. TreePtr treePtr(size_t level); /// @brief Return a const shared pointer to the tree at the specified level /// @param level The level of the tree to be returned /// @note Level 0 is by definition the finest tree. ConstTreePtr constTreePtr(size_t level) const; /// @brief Return a reference to the tree at the finest level TreeType& finestTree() { return *mTrees.front(); } /// @brief Return a const reference to the tree at the finest level const TreeType& finestConstTree() const { return *mTrees.front(); } /// @brief Return a shared pointer to the tree at the finest level TreePtr finestTreePtr() { return mTrees.front(); } /// @brief Return a const shared pointer to the tree at the finest level ConstTreePtr finestConstTreePtr() const { return mTrees.front(); } /// @brief Return a reference to the tree at the coarsest level TreeType& coarsestTree() { return *mTrees.back(); } /// @brief Return a const reference to the tree at the coarsest level const TreeType& coarsestConstTree() const { return *mTrees.back(); } /// @brief Return a shared pointer to the tree at the coarsest level TreePtr coarsestTreePtr() { return mTrees.back(); } /// @brief Return a const shared pointer to the tree at the coarsest level ConstTreePtr coarsestConstTreePtr() const { return mTrees.back(); } ////////////////////////////////////////////////////////////////////// /// @brief Return a shared pointer to the grid at the specified integer level /// @param level Integer level of the grid to be returned /// @note Level 0 is by definition the finest grid. GridPtr grid(size_t level); /// @brief Return a const shared pointer to the grid at the specified level /// @param level The level of the grid to be returned /// @note Level 0 is by definition the finest grid. ConstGridPtr grid(size_t level) const; /// @brief Return a shared pointer to a new grid at the specified /// floating-point level. /// @param level Floating-point level of the grid to be returned /// @param grainSize Grain size for the multi-threading /// @details Interpolation of the specified order is performed /// between the bracketing integer levels. /// @note Level 0 is by definition the finest grid. template<Index Order> GridPtr createGrid(float level, size_t grainSize = 1) const; /// @brief Return a shared pointer to a vector of all the base /// grids in this instance of the MultiResGrid. /// @brief This method is useful for I/O GridPtrVecPtr grids(); /// @brief Return a const shared pointer to a vector of all the base /// grids in this instance of the MultiResGrid. /// @brief This method is useful for I/O GridCPtrVecPtr grids() const; ////////////////////////////////////////////////////////////////////// //@{ /// @brief Return a reference to the finest grid's transform, which might be /// shared with other grids. /// @note Calling setTransform() on this grid invalidates all references /// previously returned by this method. /// @warning The transform is relative to the finest level (=0) grid! math::Transform& transform() { return *mTransform; } const math::Transform& transform() const { return *mTransform; } const math::Transform& constTransform() const { return *mTransform; } //@} ////////////////////////////////////////////////////////////////////// //@{ /// @brief Return the floating-point index coordinate at out_level given /// the index coordinate in_xyz at in_level. static Vec3R xyz(const Coord& in_ijk, size_t in_level, size_t out_level); static Vec3R xyz(const Vec3R& in_xyz, size_t in_level, size_t out_level); static Vec3R xyz(const Vec3R& in_xyz, double in_level, double out_level); //@} ////////////////////////////////////////////////////////////////////// //@{ /// @brief Return the value at the specified coordinate position using /// interpolation of the specified order into the tree at the out_level. /// /// @details First in_ijk is mapped from index space at in_level to /// out_level, and then a value is interpolated from the tree at out_level. /// /// @param in_ijk Index coordinate position relative to tree at in_level /// @param in_level Integer level of the input coordinate in_ijk /// @param out_level Integer level of the interpolated value template<Index Order> ValueType sampleValue(const Coord& in_ijk, size_t in_level, size_t out_level) const; template<Index Order> ValueType sampleValue(const Vec3R& in_ijk, size_t in_level, size_t out_level) const; //@} /// @brief Return the value at the specified integer coordinate position /// and level using interpolation of the specified order. /// @param ijk Integer coordinate position relative to the highest level (=0) grid /// @param level Floating-point level from which to interpolate the value. /// @brief Non-integer values of the level will use linear-interpolation /// between the neighboring integer levels. template<Index Order> ValueType sampleValue(const Coord& ijk, double level) const; /// @brief Return the value at the specified floating-point coordinate position /// and level using interpolation of the specified order. /// @param xyz Floating-point coordinate position relative to the highest level grid /// @param level Floating-point level from which to interpolate /// the value. /// @brief Non-integer values of the level will use linear-interpolation /// between the neighboring integer levels. template<Index Order> ValueType sampleValue(const Vec3R& xyz, double level) const; ////////////////////////////////////////////////////////////////////// /// @brief Return the value at coordinate location in @a level tree /// from the coarser tree at @a level+1 using trilinear interpolation /// @param coords input coords relative to the fine tree at level /// @param level The fine level to receive values from the coarser /// level-1 /// @note Prolongation means to interpolation from coarse -> fine ValueType prolongateVoxel(const Coord& coords, const size_t level) const; /// (coarse->fine) Populates all the active voxel values in a fine (@a level) tree /// from the coarse (@a level+1) tree using linear interpolation /// This transforms multiple values of the tree in parallel void prolongateActiveVoxels(size_t destlevel, size_t grainSize = 1); ////////////////////////////////////////////////////////////////////// /// Populate a coordinate location in @a level (coarse) tree /// from the @a level-1 (fine) tree using trilinear interpolation /// input coords are relative to the mTree[level] (coarse) /// @note Restriction means remapping from fine -> coarse ValueType restrictVoxel(Coord ijk, const size_t level, bool useInjection = false) const; /// (fine->coarse) Populates all the active voxel values in the coarse (@a level) tree /// from the fine (@a level-1) tree using trilinear interpolation. /// For cell-centered data, this is equivalent to an average /// For vertex-centered data this is equivalent to transferring the data /// from the fine vertex directly above the coarse vertex. /// This transforms multiple values of the tree in parallel void restrictActiveVoxels(size_t destlevel, size_t grainSize = 1); /// Output a human-readable description of this MultiResGrid void print(std::ostream& = std::cout, int verboseLevel = 1) const; /// @brief Return a string with the name of this MultiResGrid std::string getName() const { if (Metadata::ConstPtr meta = (*this)[GridBase::META_GRID_NAME]) return meta->str(); return ""; } /// @brief Set the name of this MultiResGrid void setName(const std::string& name) { this->removeMeta(GridBase::META_GRID_NAME); this->insertMeta(GridBase::META_GRID_NAME, StringMetadata(name)); } /// Return the class of volumetric data (level set, fog volume, etc.) stored in this grid. GridClass getGridClass() const { typename StringMetadata::ConstPtr s = this->getMetadata<StringMetadata>(GridBase::META_GRID_CLASS); return s ? GridBase::stringToGridClass(s->value()) : GRID_UNKNOWN; } /// Specify the class of volumetric data (level set, fog volume, etc.) stored in this grid. void setGridClass(GridClass cls) { this->insertMeta(GridBase::META_GRID_CLASS, StringMetadata(GridBase::gridClassToString(cls))); } /// Remove the setting specifying the class of this grid's volumetric data. void clearGridClass() { this->removeMeta(GridBase::META_GRID_CLASS); } private: MultiResGrid(const MultiResGrid& other);//disallow copy construction MultiResGrid& operator=(const MultiResGrid& other);//disallow copy assignment // For optimal performance we disable registration of the ValueAccessor using Accessor = tree::ValueAccessor<TreeType, false>; using ConstAccessor = tree::ValueAccessor<const TreeType, false>; void topDownRestrict(bool useInjection); inline void initMeta(); // Private struct that concurrently creates a mask of active voxel // in a coarse tree from the active voxels in a fine tree struct MaskOp; /// Private struct that performs multi-threaded restriction struct RestrictOp; /// Private struct that performs multi-threaded prolongation struct ProlongateOp; // Private struct that performs multi-threaded computation of grids a fraction levels template<Index Order> struct FractionOp; /// Private template struct that performs the actual multi-threading template<typename OpType> struct CookOp; // Array of shared pointer to trees, level 0 has the highest resolution. std::vector<TreePtr> mTrees; // Shared pointer to a transform associated with the finest level grid typename math::Transform::Ptr mTransform; };// MultiResGrid template<typename TreeType> MultiResGrid<TreeType>:: MultiResGrid(size_t levels, ValueType background, double voxelSize) : mTrees(levels) , mTransform(math::Transform::createLinearTransform( voxelSize )) { this->initMeta(); for (size_t i=0; i<levels; ++i) mTrees[i] = TreePtr(new TreeType(background)); } template<typename TreeType> MultiResGrid<TreeType>:: MultiResGrid(size_t levels, const Grid<TreeType> &grid, bool useInjection) : MetaMap(grid) , mTrees(levels) , mTransform( grid.transform().copy() ) { this->initMeta(); mTrees[0].reset( new TreeType( grid.tree() ) );// deep copy input tree mTrees[0]->voxelizeActiveTiles(); this->topDownRestrict(useInjection); } template<typename TreeType> MultiResGrid<TreeType>:: MultiResGrid(size_t levels, GridPtr grid, bool useInjection) : MetaMap(*grid) , mTrees(levels) , mTransform( grid->transform().copy() ) { this->initMeta(); mTrees[0] = grid->treePtr();// steal tree from input grid mTrees[0]->voxelizeActiveTiles(); grid->newTree(); this->topDownRestrict(useInjection); } template<typename TreeType> inline TreeType& MultiResGrid<TreeType>:: tree(size_t level) { assert( level < mTrees.size() ); return *mTrees[level]; } template<typename TreeType> inline const TreeType& MultiResGrid<TreeType>:: constTree(size_t level) const { assert( level < mTrees.size() ); return *mTrees[level]; } template<typename TreeType> inline typename TreeType::Ptr MultiResGrid<TreeType>:: treePtr(size_t level) { assert( level < mTrees.size() ); return mTrees[level]; } template<typename TreeType> inline typename TreeType::ConstPtr MultiResGrid<TreeType>:: constTreePtr(size_t level) const { assert( level < mTrees.size() ); return mTrees[level]; } template<typename TreeType> typename Grid<TreeType>::Ptr MultiResGrid<TreeType>:: grid(size_t level) { typename Grid<TreeType>::Ptr grid = Grid<TreeType>::create(this->treePtr(level)); math::Transform::Ptr xform = mTransform->copy(); if (level>0) xform->preScale( Real(1 << level) ); grid->setTransform( xform ); grid->insertMeta( *this->copyMeta() ); grid->insertMeta( "MultiResGrid_Level", Int64Metadata(level)); std::stringstream ss; ss << this->getName() << "_level_" << level; grid->setName( ss.str() ); return grid; } template<typename TreeType> inline typename Grid<TreeType>::ConstPtr MultiResGrid<TreeType>:: grid(size_t level) const { return const_cast<MultiResGrid*>(this)->grid(level); } template<typename TreeType> template<Index Order> typename Grid<TreeType>::Ptr MultiResGrid<TreeType>:: createGrid(float level, size_t grainSize) const { assert( level >= 0.0f && level <= float(mTrees.size()-1) ); typename Grid<TreeType>::Ptr grid(new Grid<TreeType>(this->constTree(0).background())); math::Transform::Ptr xform = mTransform->copy(); xform->preScale( math::Pow(2.0f, level) ); grid->setTransform( xform ); grid->insertMeta( *(this->copyMeta()) ); grid->insertMeta( "MultiResGrid_Level", FloatMetadata(level) ); std::stringstream ss; ss << this->getName() << "_level_" << level; grid->setName( ss.str() ); if ( size_t(floorf(level)) == size_t(ceilf(level)) ) { grid->setTree( this->constTree( size_t(floorf(level))).copy() ); } else { FractionOp<Order> tmp(*this, grid->tree(), level, grainSize); if ( grid->getGridClass() == GRID_LEVEL_SET ) { signedFloodFill( grid->tree() ); pruneLevelSet( grid->tree() );//only creates inactive tiles } } return grid; } template<typename TreeType> GridPtrVecPtr MultiResGrid<TreeType>:: grids() { GridPtrVecPtr grids( new GridPtrVec ); for (size_t level=0; level<mTrees.size(); ++level) grids->push_back(this->grid(level)); return grids; } template<typename TreeType> GridCPtrVecPtr MultiResGrid<TreeType>:: grids() const { GridCPtrVecPtr grids( new GridCPtrVec ); for (size_t level=0; level<mTrees.size(); ++level) grids->push_back(this->grid(level)); return grids; } template<typename TreeType> Vec3R MultiResGrid<TreeType>:: xyz(const Coord& in_ijk, size_t in_level, size_t out_level) { return Vec3R( in_ijk.data() ) * Real(1 << in_level) / Real(1 << out_level); } template<typename TreeType> Vec3R MultiResGrid<TreeType>:: xyz(const Vec3R& in_xyz, size_t in_level, size_t out_level) { return in_xyz * Real(1 << in_level) / Real(1 << out_level); } template<typename TreeType> Vec3R MultiResGrid<TreeType>:: xyz(const Vec3R& in_xyz, double in_level, double out_level) { return in_xyz * math::Pow(2.0, in_level - out_level); } template<typename TreeType> template<Index Order> typename TreeType::ValueType MultiResGrid<TreeType>:: sampleValue(const Coord& in_ijk, size_t in_level, size_t out_level) const { assert( in_level >= 0 && in_level < mTrees.size() ); assert( out_level >= 0 && out_level < mTrees.size() ); const ConstAccessor acc(*mTrees[out_level]);// has disabled registration! return tools::Sampler<Order>::sample( acc, this->xyz(in_ijk, in_level, out_level) ); } template<typename TreeType> template<Index Order> typename TreeType::ValueType MultiResGrid<TreeType>:: sampleValue(const Vec3R& in_xyz, size_t in_level, size_t out_level) const { assert( in_level >= 0 && in_level < mTrees.size() ); assert( out_level >= 0 && out_level < mTrees.size() ); const ConstAccessor acc(*mTrees[out_level]);// has disabled registration! return tools::Sampler<Order>::sample( acc, this->xyz(in_xyz, in_level, out_level) ); } template<typename TreeType> template<Index Order> typename TreeType::ValueType MultiResGrid<TreeType>:: sampleValue(const Coord& ijk, double level) const { assert( level >= 0.0 && level <= double(mTrees.size()-1) ); const size_t level0 = size_t(floor(level)), level1 = size_t(ceil(level)); const ValueType v0 = this->template sampleValue<Order>( ijk, 0, level0 ); if ( level0 == level1 ) return v0; assert( level1 - level0 == 1 ); const ValueType v1 = this->template sampleValue<Order>( ijk, 0, level1 ); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueType a = ValueType(level1 - level); OPENVDB_NO_TYPE_CONVERSION_WARNING_END return a * v0 + (ValueType(1) - a) * v1; } template<typename TreeType> template<Index Order> typename TreeType::ValueType MultiResGrid<TreeType>:: sampleValue(const Vec3R& xyz, double level) const { assert( level >= 0.0 && level <= double(mTrees.size()-1) ); const size_t level0 = size_t(floor(level)), level1 = size_t(ceil(level)); const ValueType v0 = this->template sampleValue<Order>( xyz, 0, level0 ); if ( level0 == level1 ) return v0; assert( level1 - level0 == 1 ); const ValueType v1 = this->template sampleValue<Order>( xyz, 0, level1 ); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueType a = ValueType(level1 - level); OPENVDB_NO_TYPE_CONVERSION_WARNING_END return a * v0 + (ValueType(1) - a) * v1; } template<typename TreeType> typename TreeType::ValueType MultiResGrid<TreeType>:: prolongateVoxel(const Coord& ijk, const size_t level) const { assert( level+1 < mTrees.size() ); const ConstAccessor acc(*mTrees[level + 1]);// has disabled registration! return ProlongateOp::run(ijk, acc); } template<typename TreeType> void MultiResGrid<TreeType>:: prolongateActiveVoxels(size_t destlevel, size_t grainSize) { assert( destlevel < mTrees.size()-1 ); TreeType &fineTree = *mTrees[ destlevel ]; const TreeType &coarseTree = *mTrees[ destlevel+1 ]; CookOp<ProlongateOp> tmp( coarseTree, fineTree, grainSize ); } template<typename TreeType> typename TreeType::ValueType MultiResGrid<TreeType>:: restrictVoxel(Coord ijk, const size_t destlevel, bool useInjection) const { assert( destlevel > 0 && destlevel < mTrees.size() ); const TreeType &fineTree = *mTrees[ destlevel-1 ]; if ( useInjection ) return fineTree.getValue(ijk<<1); const ConstAccessor acc( fineTree );// has disabled registration! return RestrictOp::run( ijk, acc); } template<typename TreeType> void MultiResGrid<TreeType>:: restrictActiveVoxels(size_t destlevel, size_t grainSize) { assert( destlevel > 0 && destlevel < mTrees.size() ); const TreeType &fineTree = *mTrees[ destlevel-1 ]; TreeType &coarseTree = *mTrees[ destlevel ]; CookOp<RestrictOp> tmp( fineTree, coarseTree, grainSize ); } template<typename TreeType> void MultiResGrid<TreeType>:: print(std::ostream& os, int verboseLevel) const { os << "MultiResGrid with " << mTrees.size() << " levels\n"; for (size_t i=0; i<mTrees.size(); ++i) { os << "Level " << i << ": "; mTrees[i]->print(os, verboseLevel); } if ( MetaMap::metaCount() > 0) { os << "Additional metadata:" << std::endl; for (ConstMetaIterator it = beginMeta(), end = endMeta(); it != end; ++it) { os << " " << it->first; if (it->second) { const std::string value = it->second->str(); if (!value.empty()) os << ": " << value; } os << "\n"; } } os << "Transform:" << std::endl; transform().print(os, /*indent=*/" "); os << std::endl; } template<typename TreeType> void MultiResGrid<TreeType>:: initMeta() { const size_t levels = this->numLevels(); if (levels < 2) { OPENVDB_THROW(ValueError, "MultiResGrid: at least two levels are required"); } this->insertMeta("MultiResGrid_Levels", Int64Metadata( levels ) ); } template<typename TreeType> void MultiResGrid<TreeType>:: topDownRestrict(bool useInjection) { const bool isLevelSet = this->getGridClass() == GRID_LEVEL_SET; for (size_t n=1; n<mTrees.size(); ++n) { const TreeType &fineTree = *mTrees[n-1]; mTrees[n] = TreePtr(new TreeType( fineTree.background() ) );// empty tree TreeType &coarseTree = *mTrees[n]; if (useInjection) {// Restriction by injection for (ValueOnCIter it = fineTree.cbeginValueOn(); it; ++it) { const Coord ijk = it.getCoord(); if ( (ijk[0] & 1) || (ijk[1] & 1) || (ijk[2] & 1) ) continue; coarseTree.setValue( ijk >> 1, *it ); } } else {// Restriction by full-weighting MaskOp tmp(fineTree, coarseTree, 128); this->restrictActiveVoxels(n, 64); } if ( isLevelSet ) { tools::signedFloodFill( coarseTree ); tools::pruneLevelSet( coarseTree );//only creates inactive tiles } }// loop over grid levels } template<typename TreeType> struct MultiResGrid<TreeType>::MaskOp { using MaskT = typename TreeType::template ValueConverter<ValueMask>::Type; using PoolType = tbb::enumerable_thread_specific<TreeType>; using ManagerT = tree::LeafManager<const MaskT>; using RangeT = typename ManagerT::LeafRange; using VoxelIterT = typename ManagerT::LeafNodeType::ValueOnCIter; MaskOp(const TreeType& fineTree, TreeType& coarseTree, size_t grainSize = 1) : mPool(new PoolType( coarseTree ) )// empty coarse tree acts as examplar { assert( coarseTree.empty() ); // Create Mask of restruction performed on fineTree MaskT mask(fineTree, false, true, TopologyCopy() ); // Muli-threaded dilation which also linearizes the tree to leaf nodes tools::dilateActiveValues(mask, 1, NN_FACE_EDGE_VERTEX, EXPAND_TILES); // Restriction by injection using thread-local storage of coarse tree masks ManagerT leafs( mask ); tbb::parallel_for(leafs.leafRange( grainSize ), *this); // multithreaded union of thread-local coarse tree masks with the coarse tree using IterT = typename PoolType::const_iterator; for (IterT it=mPool->begin(); it!=mPool->end(); ++it) coarseTree.topologyUnion( *it ); delete mPool; } void operator()(const RangeT& range) const { Accessor coarseAcc( mPool->local() );// disabled registration for (typename RangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) { for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) { Coord ijk = voxelIter.getCoord(); if ( (ijk[2] & 1) || (ijk[1] & 1) || (ijk[0] & 1) ) continue;//no overlap coarseAcc.setValueOn( ijk >> 1 );//injection from fine to coarse level }//loop over active voxels in the fine tree }// loop over leaf nodes in the fine tree } PoolType* mPool; };// MaskOp template<typename TreeType> template<Index Order> struct MultiResGrid<TreeType>::FractionOp { using MaskT = typename TreeType::template ValueConverter<ValueMask>::Type; using PoolType = tbb::enumerable_thread_specific<MaskT>; using PoolIterT = typename PoolType::iterator; using Manager1 = tree::LeafManager<const TreeType>; using Manager2 = tree::LeafManager<TreeType>; using Range1 = typename Manager1::LeafRange; using Range2 = typename Manager2::LeafRange; FractionOp(const MultiResGrid& parent, TreeType& midTree, float level, size_t grainSize = 1) : mLevel( level ) , mPool(nullptr) , mTree0( &*(parent.mTrees[size_t(floorf(level))]) )//high-resolution , mTree1( &*(parent.mTrees[size_t(ceilf(level))]) ) //low-resolution { assert( midTree.empty() ); assert( mTree0 != mTree1 ); // Create a pool of thread-local masks MaskT examplar( false ); mPool = new PoolType( examplar ); {// create mask from re-mapping coarse tree to mid-level tree tree::LeafManager<const TreeType> manager( *mTree1 ); tbb::parallel_for( manager.leafRange(grainSize), *this ); } // Multi-threaded dilation of mask tbb::parallel_for(tbb::blocked_range<PoolIterT>(mPool->begin(),mPool->end(),1), *this); // Union thread-local coarse tree masks into the coarse tree for (PoolIterT it=mPool->begin(); it!=mPool->end(); ++it) midTree.topologyUnion( *it ); delete mPool; {// Interpolate values into the static mid level tree Manager2 manager( midTree ); tbb::parallel_for(manager.leafRange(grainSize), *this); } } void operator()(const Range1& range) const { using VoxelIter = typename Manager1::LeafNodeType::ValueOnCIter; // Let mLevel = level + frac, where // level is integer part of mLevel and frac is the fractional part // low-res voxel size in world units = dx1 = 2^(level + 1) // mid-res voxel size in world units = dx = 2^(mLevel) = 2^(level + frac) // low-res index -> world: ijk * dx1 // world -> mid-res index: world / dx // low-res index -> mid-res index: (ijk * dx1) / dx = ijk * scale where // scale = dx1/dx = 2^(level+1)/2^(level+frac) = 2^(1-frac) const float scale = math::Pow(2.0f, 1.0f - math::FractionalPart(mLevel)); tree::ValueAccessor<MaskT, false> acc( mPool->local() );// disabled registration for (typename Range1::Iterator leafIter = range.begin(); leafIter; ++leafIter) { for (VoxelIter voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) { Coord ijk = voxelIter.getCoord(); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const auto value0 = ijk[0] * scale; const auto value1 = ijk[1] * scale; const auto value2 = ijk[2] * scale; OPENVDB_NO_TYPE_CONVERSION_WARNING_END ijk[0] = int(math::Round(value0)); ijk[1] = int(math::Round(value1)); ijk[2] = int(math::Round(value2)); acc.setValueOn( ijk ); }//loop over active voxels in the fine tree }// loop over leaf nodes in the fine tree } void operator()(const tbb::blocked_range<PoolIterT>& range) const { for (PoolIterT it=range.begin(); it!=range.end(); ++it) { tools::dilateVoxels( *it, 1, NN_FACE_EDGE_VERTEX); } } void operator()(const Range2 &r) const { using VoxelIter = typename TreeType::LeafNodeType::ValueOnIter; // Let mLevel = level + frac, where // level is integer part of mLevel and frac is the fractional part // high-res voxel size in world units = dx0 = 2^(level) // low-res voxel size in world units = dx1 = 2^(level+1) // mid-res voxel size in world units = dx = 2^(mLevel) = 2^(level + frac) // mid-res index -> world: ijk * dx // world -> high-res index: world / dx0 // world -> low-res index: world / dx1 // mid-res index -> high-res index: (ijk * dx) / dx0 = ijk * scale0 where // scale0 = dx/dx0 = 2^(level+frac)/2^(level) = 2^(frac) // mid-res index -> low-res index: (ijk * dx) / dx1 = ijk * scale1 where // scale1 = dx/dx1 = 2^(level+frac)/2^(level+1) = 2^(frac-1) const float b = math::FractionalPart(mLevel), a = 1.0f - b; const float scale0 = math::Pow( 2.0f, b ); const float scale1 = math::Pow( 2.0f,-a ); ConstAccessor acc0( *mTree0 ), acc1( *mTree1 ); for (typename Range2::Iterator leafIter = r.begin(); leafIter; ++leafIter) { for (VoxelIter voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) { const Vec3R xyz = Vec3R( voxelIter.getCoord().data() );// mid level coord const ValueType v0 = tools::Sampler<Order>::sample( acc0, xyz * scale0 ); const ValueType v1 = tools::Sampler<Order>::sample( acc1, xyz * scale1 ); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const auto value0 = a*v0; const auto value1 = b*v1; OPENVDB_NO_TYPE_CONVERSION_WARNING_END voxelIter.setValue( ValueType(value0 + value1) ); } } } const float mLevel; PoolType* mPool; const TreeType *mTree0, *mTree1; };// FractionOp template<typename TreeType> template<typename OperatorType> struct MultiResGrid<TreeType>::CookOp { using ManagerT = tree::LeafManager<TreeType>; using RangeT = typename ManagerT::LeafRange; CookOp(const TreeType& srcTree, TreeType& dstTree, size_t grainSize): acc(srcTree) { ManagerT leafs(dstTree); tbb::parallel_for(leafs.leafRange(grainSize), *this); } CookOp(const CookOp &other): acc(other.acc.tree()) {} void operator()(const RangeT& range) const { for (auto leafIt = range.begin(); leafIt; ++leafIt) { auto& phi = leafIt.buffer(0); for (auto voxelIt = leafIt->beginValueOn(); voxelIt; ++voxelIt) { phi.setValue(voxelIt.pos(), OperatorType::run(voxelIt.getCoord(), acc)); } } } const ConstAccessor acc; };// CookOp template<typename TreeType> struct MultiResGrid<TreeType>::RestrictOp { /// @brief Static method that performs restriction by full weighting /// @param ijk Coordinate location on the coarse tree /// @param acc ValueAccessor to the fine tree static ValueType run(Coord ijk, const ConstAccessor &acc) { ijk <<= 1; // Overlapping grid point ValueType v = 8*acc.getValue(ijk); // neighbors in one axial direction v += 4*(acc.getValue(ijk.offsetBy(-1, 0, 0)) + acc.getValue(ijk.offsetBy( 1, 0, 0)) +// x acc.getValue(ijk.offsetBy( 0,-1, 0)) + acc.getValue(ijk.offsetBy( 0, 1, 0)) +// y acc.getValue(ijk.offsetBy( 0, 0,-1)) + acc.getValue(ijk.offsetBy( 0, 0, 1)));// z // neighbors in two axial directions v += 2*(acc.getValue(ijk.offsetBy(-1,-1, 0)) + acc.getValue(ijk.offsetBy(-1, 1, 0)) +// xy acc.getValue(ijk.offsetBy( 1,-1, 0)) + acc.getValue(ijk.offsetBy( 1, 1, 0)) +// xy acc.getValue(ijk.offsetBy(-1, 0,-1)) + acc.getValue(ijk.offsetBy(-1, 0, 1)) +// xz acc.getValue(ijk.offsetBy( 1, 0,-1)) + acc.getValue(ijk.offsetBy( 1, 0, 1)) +// xz acc.getValue(ijk.offsetBy( 0,-1,-1)) + acc.getValue(ijk.offsetBy( 0,-1, 1)) +// yz acc.getValue(ijk.offsetBy( 0, 1,-1)) + acc.getValue(ijk.offsetBy( 0, 1, 1)));// yz // neighbors in three axial directions for (int i=-1; i<=1; i+=2) { for (int j=-1; j<=1; j+=2) { for (int k=-1; k<=1; k+=2) v += acc.getValue(ijk.offsetBy(i,j,k));// xyz } } v *= ValueType(1.0f/64.0f); return v; } };// RestrictOp template<typename TreeType> struct MultiResGrid<TreeType>::ProlongateOp { /// @brief Interpolate values from a coarse grid (acc) into the index space (ijk) of a fine grid /// @param ijk Coordinate location on the fine tree /// @param acc ValueAccessor to the coarse tree static ValueType run(const Coord& ijk, const ConstAccessor &acc) { switch ( (ijk[0] & 1) | ((ijk[1] & 1) << 1) | ((ijk[2] & 1) << 2) ) { case 0:// all even return acc.getValue(ijk>>1); case 1:// x is odd return ValueType(0.5)*(acc.getValue(ijk.offsetBy(-1,0,0)>>1) + acc.getValue(ijk.offsetBy( 1,0,0)>>1)); case 2:// y is odd return ValueType(0.5)*(acc.getValue(ijk.offsetBy(0,-1,0)>>1) + acc.getValue(ijk.offsetBy(0, 1,0)>>1)); case 3:// x&y are odd return ValueType(0.25)*(acc.getValue(ijk.offsetBy(-1,-1,0)>>1) + acc.getValue(ijk.offsetBy(-1, 1,0)>>1) + acc.getValue(ijk.offsetBy( 1,-1,0)>>1) + acc.getValue(ijk.offsetBy( 1, 1,0)>>1)); case 4:// z is odd return ValueType(0.5)*(acc.getValue(ijk.offsetBy(0,0,-1)>>1) + acc.getValue(ijk.offsetBy(0,0, 1)>>1)); case 5:// x&z are odd return ValueType(0.25)*(acc.getValue(ijk.offsetBy(-1,0,-1)>>1) + acc.getValue(ijk.offsetBy(-1,0, 1)>>1) + acc.getValue(ijk.offsetBy( 1,0,-1)>>1) + acc.getValue(ijk.offsetBy( 1,0, 1)>>1)); case 6:// y&z are odd return ValueType(0.25)*(acc.getValue(ijk.offsetBy(0,-1,-1)>>1) + acc.getValue(ijk.offsetBy(0,-1, 1)>>1) + acc.getValue(ijk.offsetBy(0, 1,-1)>>1) + acc.getValue(ijk.offsetBy(0, 1, 1)>>1)); } // all are odd ValueType v = zeroVal<ValueType>(); for (int i=-1; i<=1; i+=2) { for (int j=-1; j<=1; j+=2) { for (int k=-1; k<=1; k+=2) v += acc.getValue(ijk.offsetBy(i,j,k)>>1);// xyz } } return ValueType(0.125) * v; } };// ProlongateOp } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_MULTIRESGRID_HAS_BEEN_INCLUDED
38,573
C
39.518908
102
0.636637
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Interpolation.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file Interpolation.h /// /// Sampler classes such as PointSampler and BoxSampler that are intended for use /// with tools::GridTransformer should operate in voxel space and must adhere to /// the interface described in the example below: /// @code /// struct MySampler /// { /// // Return a short name that can be used to identify this sampler /// // in error messages and elsewhere. /// const char* name() { return "mysampler"; } /// /// // Return the radius of the sampling kernel in voxels, not including /// // the center voxel. This is the number of voxels of padding that /// // are added to all sides of a volume as a result of resampling. /// int radius() { return 2; } /// /// // Return true if scaling by a factor smaller than 0.5 (along any axis) /// // should be handled via a mipmapping-like scheme of successive halvings /// // of a grid's resolution, until the remaining scale factor is /// // greater than or equal to 1/2. Set this to false only when high-quality /// // scaling is not required. /// bool mipmap() { return true; } /// /// // Specify if sampling at a location that is collocated with a grid point /// // is guaranteed to return the exact value at that grid point. /// // For most sampling kernels, this should be false. /// bool consistent() { return false; } /// /// // Sample the tree at the given coordinates and return the result in val. /// // Return true if the sampled value is active. /// template<class TreeT> /// bool sample(const TreeT& tree, const Vec3R& coord, typename TreeT::ValueType& val); /// }; /// @endcode #ifndef OPENVDB_TOOLS_INTERPOLATION_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_INTERPOLATION_HAS_BEEN_INCLUDED #include <openvdb/version.h> // for OPENVDB_VERSION_NAME #include <openvdb/Platform.h> // for round() #include <openvdb/math/Math.h>// for SmoothUnitStep #include <openvdb/math/Transform.h> // for Transform #include <openvdb/Grid.h> #include <openvdb/tree/ValueAccessor.h> #include <cmath> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Provises a unified interface for sampling, i.e. interpolation. /// @details Order = 0: closest point /// Order = 1: tri-linear /// Order = 2: tri-quadratic /// Staggered: Set to true for MAC grids template <size_t Order, bool Staggered = false> struct Sampler { static_assert(Order < 3, "Samplers of order higher than 2 are not supported"); static const char* name(); static int radius(); static bool mipmap(); static bool consistent(); static bool staggered(); static size_t order(); /// @brief Sample @a inTree at the floating-point index coordinate @a inCoord /// and store the result in @a result. /// /// @return @c true if the sampled value is active. template<class TreeT> static bool sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result); /// @brief Sample @a inTree at the floating-point index coordinate @a inCoord. /// /// @return the reconstructed value template<class TreeT> static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord); }; //////////////////////////////////////// Non-Staggered Samplers // The following samplers operate in voxel space. // When the samplers are applied to grids holding vector or other non-scalar data, // the data is assumed to be collocated. For example, using the BoxSampler on a grid // with ValueType Vec3f assumes that all three elements in a vector can be assigned // the same physical location. Consider using the GridSampler below instead. struct PointSampler { static const char* name() { return "point"; } static int radius() { return 0; } static bool mipmap() { return false; } static bool consistent() { return true; } static bool staggered() { return false; } static size_t order() { return 0; } /// @brief Sample @a inTree at the nearest neighbor to @a inCoord /// and store the result in @a result. /// @return @c true if the sampled value is active. template<class TreeT> static bool sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result); /// @brief Sample @a inTree at the nearest neighbor to @a inCoord /// @return the reconstructed value template<class TreeT> static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord); }; struct BoxSampler { static const char* name() { return "box"; } static int radius() { return 1; } static bool mipmap() { return true; } static bool consistent() { return true; } static bool staggered() { return false; } static size_t order() { return 1; } /// @brief Trilinearly reconstruct @a inTree at @a inCoord /// and store the result in @a result. /// @return @c true if any one of the sampled values is active. template<class TreeT> static bool sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result); /// @brief Trilinearly reconstruct @a inTree at @a inCoord. /// @return the reconstructed value template<class TreeT> static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord); /// @brief Import all eight values from @a inTree to support /// tri-linear interpolation. template<class ValueT, class TreeT, size_t N> static inline void getValues(ValueT (&data)[N][N][N], const TreeT& inTree, Coord ijk); /// @brief Import all eight values from @a inTree to support /// tri-linear interpolation. /// @return @c true if any of the eight values are active template<class ValueT, class TreeT, size_t N> static inline bool probeValues(ValueT (&data)[N][N][N], const TreeT& inTree, Coord ijk); /// @brief Find the minimum and maximum values of the eight cell /// values in @ data. template<class ValueT, size_t N> static inline void extrema(ValueT (&data)[N][N][N], ValueT& vMin, ValueT& vMax); /// @return the tri-linear interpolation with the unit cell coordinates @a uvw template<class ValueT, size_t N> static inline ValueT trilinearInterpolation(ValueT (&data)[N][N][N], const Vec3R& uvw); }; struct QuadraticSampler { static const char* name() { return "quadratic"; } static int radius() { return 1; } static bool mipmap() { return true; } static bool consistent() { return false; } static bool staggered() { return false; } static size_t order() { return 2; } /// @brief Triquadratically reconstruct @a inTree at @a inCoord /// and store the result in @a result. /// @return @c true if any one of the sampled values is active. template<class TreeT> static bool sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result); /// @brief Triquadratically reconstruct @a inTree at to @a inCoord. /// @return the reconstructed value template<class TreeT> static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord); template<class ValueT, size_t N> static inline ValueT triquadraticInterpolation(ValueT (&data)[N][N][N], const Vec3R& uvw); }; //////////////////////////////////////// Staggered Samplers // The following samplers operate in voxel space and are designed for Vec3 // staggered grid data (e.g., fluid simulations using the Marker-and-Cell approach // associate elements of the velocity vector with different physical locations: // the faces of a cube). struct StaggeredPointSampler { static const char* name() { return "point"; } static int radius() { return 0; } static bool mipmap() { return false; } static bool consistent() { return false; } static bool staggered() { return true; } static size_t order() { return 0; } /// @brief Sample @a inTree at the nearest neighbor to @a inCoord /// and store the result in @a result. /// @return true if the sampled value is active. template<class TreeT> static bool sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result); /// @brief Sample @a inTree at the nearest neighbor to @a inCoord /// @return the reconstructed value template<class TreeT> static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord); }; struct StaggeredBoxSampler { static const char* name() { return "box"; } static int radius() { return 1; } static bool mipmap() { return true; } static bool consistent() { return false; } static bool staggered() { return true; } static size_t order() { return 1; } /// @brief Trilinearly reconstruct @a inTree at @a inCoord /// and store the result in @a result. /// @return true if any one of the sampled value is active. template<class TreeT> static bool sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result); /// @brief Trilinearly reconstruct @a inTree at @a inCoord. /// @return the reconstructed value template<class TreeT> static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord); }; struct StaggeredQuadraticSampler { static const char* name() { return "quadratic"; } static int radius() { return 1; } static bool mipmap() { return true; } static bool consistent() { return false; } static bool staggered() { return true; } static size_t order() { return 2; } /// @brief Triquadratically reconstruct @a inTree at @a inCoord /// and store the result in @a result. /// @return true if any one of the sampled values is active. template<class TreeT> static bool sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result); /// @brief Triquadratically reconstruct @a inTree at to @a inCoord. /// @return the reconstructed value template<class TreeT> static typename TreeT::ValueType sample(const TreeT& inTree, const Vec3R& inCoord); }; //////////////////////////////////////// GridSampler /// @brief Class that provides the interface for continuous sampling /// of values in a tree. /// /// @details Since trees support only discrete voxel sampling, TreeSampler /// must be used to sample arbitrary continuous points in (world or /// index) space. /// /// @warning This implementation of the GridSampler stores a pointer /// to a Tree for value access. While this is thread-safe it is /// uncached and hence slow compared to using a /// ValueAccessor. Consequently it is normally advisable to use the /// template specialization below that employs a /// ValueAccessor. However, care must be taken when dealing with /// multi-threading (see warning below). template<typename GridOrTreeType, typename SamplerType> class GridSampler { public: using Ptr = SharedPtr<GridSampler>; using ValueType = typename GridOrTreeType::ValueType; using GridType = typename TreeAdapter<GridOrTreeType>::GridType; using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType; using AccessorType = typename TreeAdapter<GridOrTreeType>::AccessorType; /// @param grid a grid to be sampled explicit GridSampler(const GridType& grid) : mTree(&(grid.tree())), mTransform(&(grid.transform())) {} /// @param tree a tree to be sampled, or a ValueAccessor for the tree /// @param transform is used when sampling world space locations. GridSampler(const TreeType& tree, const math::Transform& transform) : mTree(&tree), mTransform(&transform) {} const math::Transform& transform() const { return *mTransform; } /// @brief Sample a point in index space in the grid. /// @param x Fractional x-coordinate of point in index-coordinates of grid /// @param y Fractional y-coordinate of point in index-coordinates of grid /// @param z Fractional z-coordinate of point in index-coordinates of grid template<typename RealType> ValueType sampleVoxel(const RealType& x, const RealType& y, const RealType& z) const { return this->isSample(Vec3d(x,y,z)); } /// @brief Sample value in integer index space /// @param i Integer x-coordinate in index space /// @param j Integer y-coordinate in index space /// @param k Integer x-coordinate in index space ValueType sampleVoxel(typename Coord::ValueType i, typename Coord::ValueType j, typename Coord::ValueType k) const { return this->isSample(Coord(i,j,k)); } /// @brief Sample value in integer index space /// @param ijk the location in index space ValueType isSample(const Coord& ijk) const { return mTree->getValue(ijk); } /// @brief Sample in fractional index space /// @param ispoint the location in index space ValueType isSample(const Vec3d& ispoint) const { ValueType result = zeroVal<ValueType>(); SamplerType::sample(*mTree, ispoint, result); return result; } /// @brief Sample in world space /// @param wspoint the location in world space ValueType wsSample(const Vec3d& wspoint) const { ValueType result = zeroVal<ValueType>(); SamplerType::sample(*mTree, mTransform->worldToIndex(wspoint), result); return result; } private: const TreeType* mTree; const math::Transform* mTransform; }; // class GridSampler /// @brief Specialization of GridSampler for construction from a ValueAccessor type /// /// @note This version should normally be favored over the one above /// that takes a Grid or Tree. The reason is this version uses a /// ValueAccessor that performs fast (cached) access where the /// tree-based flavor performs slower (uncached) access. /// /// @warning Since this version stores a pointer to an (externally /// allocated) value accessor it is not threadsafe. Hence each thread /// should have its own instance of a GridSampler constructed from a /// local ValueAccessor. Alternatively the Grid/Tree-based GridSampler /// is threadsafe, but also slower. template<typename TreeT, typename SamplerType> class GridSampler<tree::ValueAccessor<TreeT>, SamplerType> { public: using Ptr = SharedPtr<GridSampler>; using ValueType = typename TreeT::ValueType; using TreeType = TreeT; using GridType = Grid<TreeType>; using AccessorType = typename tree::ValueAccessor<TreeT>; /// @param acc a ValueAccessor to be sampled /// @param transform is used when sampling world space locations. GridSampler(const AccessorType& acc, const math::Transform& transform) : mAccessor(&acc), mTransform(&transform) {} const math::Transform& transform() const { return *mTransform; } /// @brief Sample a point in index space in the grid. /// @param x Fractional x-coordinate of point in index-coordinates of grid /// @param y Fractional y-coordinate of point in index-coordinates of grid /// @param z Fractional z-coordinate of point in index-coordinates of grid template<typename RealType> ValueType sampleVoxel(const RealType& x, const RealType& y, const RealType& z) const { return this->isSample(Vec3d(x,y,z)); } /// @brief Sample value in integer index space /// @param i Integer x-coordinate in index space /// @param j Integer y-coordinate in index space /// @param k Integer x-coordinate in index space ValueType sampleVoxel(typename Coord::ValueType i, typename Coord::ValueType j, typename Coord::ValueType k) const { return this->isSample(Coord(i,j,k)); } /// @brief Sample value in integer index space /// @param ijk the location in index space ValueType isSample(const Coord& ijk) const { return mAccessor->getValue(ijk); } /// @brief Sample in fractional index space /// @param ispoint the location in index space ValueType isSample(const Vec3d& ispoint) const { ValueType result = zeroVal<ValueType>(); SamplerType::sample(*mAccessor, ispoint, result); return result; } /// @brief Sample in world space /// @param wspoint the location in world space ValueType wsSample(const Vec3d& wspoint) const { ValueType result = zeroVal<ValueType>(); SamplerType::sample(*mAccessor, mTransform->worldToIndex(wspoint), result); return result; } private: const AccessorType* mAccessor;//not thread-safe! const math::Transform* mTransform; };//Specialization of GridSampler //////////////////////////////////////// DualGridSampler /// @brief This is a simple convenience class that allows for sampling /// from a source grid into the index space of a target grid. At /// construction the source and target grids are checked for alignment /// which potentially renders interpolation unnecessary. Else /// interpolation is performed according to the templated Sampler /// type. /// /// @warning For performance reasons the check for alignment of the /// two grids is only performed at construction time! template<typename GridOrTreeT, typename SamplerT> class DualGridSampler { public: using ValueType = typename GridOrTreeT::ValueType; using GridType = typename TreeAdapter<GridOrTreeT>::GridType; using TreeType = typename TreeAdapter<GridOrTreeT>::TreeType; using AccessorType = typename TreeAdapter<GridType>::AccessorType; /// @brief Grid and transform constructor. /// @param sourceGrid Source grid. /// @param targetXform Transform of the target grid. DualGridSampler(const GridType& sourceGrid, const math::Transform& targetXform) : mSourceTree(&(sourceGrid.tree())) , mSourceXform(&(sourceGrid.transform())) , mTargetXform(&targetXform) , mAligned(targetXform == *mSourceXform) { } /// @brief Tree and transform constructor. /// @param sourceTree Source tree. /// @param sourceXform Transform of the source grid. /// @param targetXform Transform of the target grid. DualGridSampler(const TreeType& sourceTree, const math::Transform& sourceXform, const math::Transform& targetXform) : mSourceTree(&sourceTree) , mSourceXform(&sourceXform) , mTargetXform(&targetXform) , mAligned(targetXform == sourceXform) { } /// @brief Return the value of the source grid at the index /// coordinates, ijk, relative to the target grid (or its tranform). inline ValueType operator()(const Coord& ijk) const { if (mAligned) return mSourceTree->getValue(ijk); const Vec3R world = mTargetXform->indexToWorld(ijk); return SamplerT::sample(*mSourceTree, mSourceXform->worldToIndex(world)); } /// @brief Return true if the two grids are aligned. inline bool isAligned() const { return mAligned; } private: const TreeType* mSourceTree; const math::Transform* mSourceXform; const math::Transform* mTargetXform; const bool mAligned; };// DualGridSampler /// @brief Specialization of DualGridSampler for construction from a ValueAccessor type. template<typename TreeT, typename SamplerT> class DualGridSampler<tree::ValueAccessor<TreeT>, SamplerT> { public: using ValueType = typename TreeT::ValueType; using TreeType = TreeT; using GridType = Grid<TreeType>; using AccessorType = typename tree::ValueAccessor<TreeT>; /// @brief ValueAccessor and transform constructor. /// @param sourceAccessor ValueAccessor into the source grid. /// @param sourceXform Transform for the source grid. /// @param targetXform Transform for the target grid. DualGridSampler(const AccessorType& sourceAccessor, const math::Transform& sourceXform, const math::Transform& targetXform) : mSourceAcc(&sourceAccessor) , mSourceXform(&sourceXform) , mTargetXform(&targetXform) , mAligned(targetXform == sourceXform) { } /// @brief Return the value of the source grid at the index /// coordinates, ijk, relative to the target grid. inline ValueType operator()(const Coord& ijk) const { if (mAligned) return mSourceAcc->getValue(ijk); const Vec3R world = mTargetXform->indexToWorld(ijk); return SamplerT::sample(*mSourceAcc, mSourceXform->worldToIndex(world)); } /// @brief Return true if the two grids are aligned. inline bool isAligned() const { return mAligned; } private: const AccessorType* mSourceAcc; const math::Transform* mSourceXform; const math::Transform* mTargetXform; const bool mAligned; };//Specialization of DualGridSampler //////////////////////////////////////// AlphaMask // Class to derive the normalized alpha mask template <typename GridT, typename MaskT, typename SamplerT = tools::BoxSampler, typename FloatT = float> class AlphaMask { public: static_assert(std::is_floating_point<FloatT>::value, "AlphaMask requires a floating-point value type"); using GridType = GridT; using MaskType = MaskT; using SamlerType = SamplerT; using FloatType = FloatT; AlphaMask(const GridT& grid, const MaskT& mask, FloatT min, FloatT max, bool invert) : mAcc(mask.tree()) , mSampler(mAcc, mask.transform() , grid.transform()) , mMin(min) , mInvNorm(1/(max-min)) , mInvert(invert) { assert(min < max); } inline bool operator()(const Coord& xyz, FloatT& a, FloatT& b) const { a = math::SmoothUnitStep( (mSampler(xyz) - mMin) * mInvNorm );//smooth mapping to 0->1 b = 1 - a; if (mInvert) std::swap(a,b); return a>0; } protected: using AccT = typename MaskType::ConstAccessor; AccT mAcc; tools::DualGridSampler<AccT, SamplerT> mSampler; const FloatT mMin, mInvNorm; const bool mInvert; };// AlphaMask //////////////////////////////////////// namespace local_util { inline Vec3i floorVec3(const Vec3R& v) { return Vec3i(int(std::floor(v(0))), int(std::floor(v(1))), int(std::floor(v(2)))); } inline Vec3i ceilVec3(const Vec3R& v) { return Vec3i(int(std::ceil(v(0))), int(std::ceil(v(1))), int(std::ceil(v(2)))); } inline Vec3i roundVec3(const Vec3R& v) { return Vec3i(int(::round(v(0))), int(::round(v(1))), int(::round(v(2)))); } } // namespace local_util //////////////////////////////////////// PointSampler template<class TreeT> inline bool PointSampler::sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result) { return inTree.probeValue(Coord(local_util::roundVec3(inCoord)), result); } template<class TreeT> inline typename TreeT::ValueType PointSampler::sample(const TreeT& inTree, const Vec3R& inCoord) { return inTree.getValue(Coord(local_util::roundVec3(inCoord))); } //////////////////////////////////////// BoxSampler template<class ValueT, class TreeT, size_t N> inline void BoxSampler::getValues(ValueT (&data)[N][N][N], const TreeT& inTree, Coord ijk) { data[0][0][0] = inTree.getValue(ijk); // i, j, k ijk[2] += 1; data[0][0][1] = inTree.getValue(ijk); // i, j, k + 1 ijk[1] += 1; data[0][1][1] = inTree.getValue(ijk); // i, j+1, k + 1 ijk[2] -= 1; data[0][1][0] = inTree.getValue(ijk); // i, j+1, k ijk[0] += 1; ijk[1] -= 1; data[1][0][0] = inTree.getValue(ijk); // i+1, j, k ijk[2] += 1; data[1][0][1] = inTree.getValue(ijk); // i+1, j, k + 1 ijk[1] += 1; data[1][1][1] = inTree.getValue(ijk); // i+1, j+1, k + 1 ijk[2] -= 1; data[1][1][0] = inTree.getValue(ijk); // i+1, j+1, k } template<class ValueT, class TreeT, size_t N> inline bool BoxSampler::probeValues(ValueT (&data)[N][N][N], const TreeT& inTree, Coord ijk) { bool hasActiveValues = false; hasActiveValues |= inTree.probeValue(ijk, data[0][0][0]); // i, j, k ijk[2] += 1; hasActiveValues |= inTree.probeValue(ijk, data[0][0][1]); // i, j, k + 1 ijk[1] += 1; hasActiveValues |= inTree.probeValue(ijk, data[0][1][1]); // i, j+1, k + 1 ijk[2] -= 1; hasActiveValues |= inTree.probeValue(ijk, data[0][1][0]); // i, j+1, k ijk[0] += 1; ijk[1] -= 1; hasActiveValues |= inTree.probeValue(ijk, data[1][0][0]); // i+1, j, k ijk[2] += 1; hasActiveValues |= inTree.probeValue(ijk, data[1][0][1]); // i+1, j, k + 1 ijk[1] += 1; hasActiveValues |= inTree.probeValue(ijk, data[1][1][1]); // i+1, j+1, k + 1 ijk[2] -= 1; hasActiveValues |= inTree.probeValue(ijk, data[1][1][0]); // i+1, j+1, k return hasActiveValues; } template<class ValueT, size_t N> inline void BoxSampler::extrema(ValueT (&data)[N][N][N], ValueT& vMin, ValueT &vMax) { vMin = vMax = data[0][0][0]; vMin = math::Min(vMin, data[0][0][1]); vMax = math::Max(vMax, data[0][0][1]); vMin = math::Min(vMin, data[0][1][0]); vMax = math::Max(vMax, data[0][1][0]); vMin = math::Min(vMin, data[0][1][1]); vMax = math::Max(vMax, data[0][1][1]); vMin = math::Min(vMin, data[1][0][0]); vMax = math::Max(vMax, data[1][0][0]); vMin = math::Min(vMin, data[1][0][1]); vMax = math::Max(vMax, data[1][0][1]); vMin = math::Min(vMin, data[1][1][0]); vMax = math::Max(vMax, data[1][1][0]); vMin = math::Min(vMin, data[1][1][1]); vMax = math::Max(vMax, data[1][1][1]); } template<class ValueT, size_t N> inline ValueT BoxSampler::trilinearInterpolation(ValueT (&data)[N][N][N], const Vec3R& uvw) { auto _interpolate = [](const ValueT& a, const ValueT& b, double weight) { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const auto temp = (b - a) * weight; OPENVDB_NO_TYPE_CONVERSION_WARNING_END return static_cast<ValueT>(a + ValueT(temp)); }; // Trilinear interpolation: // The eight surrounding latice values are used to construct the result. \n // result(x,y,z) = // v000 (1-x)(1-y)(1-z) + v001 (1-x)(1-y)z + v010 (1-x)y(1-z) + v011 (1-x)yz // + v100 x(1-y)(1-z) + v101 x(1-y)z + v110 xy(1-z) + v111 xyz return _interpolate( _interpolate( _interpolate(data[0][0][0], data[0][0][1], uvw[2]), _interpolate(data[0][1][0], data[0][1][1], uvw[2]), uvw[1]), _interpolate( _interpolate(data[1][0][0], data[1][0][1], uvw[2]), _interpolate(data[1][1][0], data[1][1][1], uvw[2]), uvw[1]), uvw[0]); } template<class TreeT> inline bool BoxSampler::sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result) { using ValueT = typename TreeT::ValueType; const Vec3i inIdx = local_util::floorVec3(inCoord); const Vec3R uvw = inCoord - inIdx; // Retrieve the values of the eight voxels surrounding the // fractional source coordinates. ValueT data[2][2][2]; const bool hasActiveValues = BoxSampler::probeValues(data, inTree, Coord(inIdx)); result = BoxSampler::trilinearInterpolation(data, uvw); return hasActiveValues; } template<class TreeT> inline typename TreeT::ValueType BoxSampler::sample(const TreeT& inTree, const Vec3R& inCoord) { using ValueT = typename TreeT::ValueType; const Vec3i inIdx = local_util::floorVec3(inCoord); const Vec3R uvw = inCoord - inIdx; // Retrieve the values of the eight voxels surrounding the // fractional source coordinates. ValueT data[2][2][2]; BoxSampler::getValues(data, inTree, Coord(inIdx)); return BoxSampler::trilinearInterpolation(data, uvw); } //////////////////////////////////////// QuadraticSampler template<class ValueT, size_t N> inline ValueT QuadraticSampler::triquadraticInterpolation(ValueT (&data)[N][N][N], const Vec3R& uvw) { auto _interpolate = [](const ValueT* value, double weight) { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueT a = static_cast<ValueT>(0.5 * (value[0] + value[2]) - value[1]), b = static_cast<ValueT>(0.5 * (value[2] - value[0])), c = static_cast<ValueT>(value[1]); const auto temp = weight * (weight * a + b) + c; OPENVDB_NO_TYPE_CONVERSION_WARNING_END return static_cast<ValueT>(temp); }; /// @todo For vector types, interpolate over each component independently. ValueT vx[3]; for (int dx = 0; dx < 3; ++dx) { ValueT vy[3]; for (int dy = 0; dy < 3; ++dy) { // Fit a parabola to three contiguous samples in z // (at z=-1, z=0 and z=1), then evaluate the parabola at z', // where z' is the fractional part of inCoord.z, i.e., // inCoord.z - inIdx.z. The coefficients come from solving // // | (-1)^2 -1 1 || a | | v0 | // | 0 0 1 || b | = | v1 | // | 1^2 1 1 || c | | v2 | // // for a, b and c. const ValueT* vz = &data[dx][dy][0]; vy[dy] = _interpolate(vz, uvw.z()); }//loop over y // Fit a parabola to three interpolated samples in y, then // evaluate the parabola at y', where y' is the fractional // part of inCoord.y. vx[dx] = _interpolate(vy, uvw.y()); }//loop over x // Fit a parabola to three interpolated samples in x, then // evaluate the parabola at the fractional part of inCoord.x. return _interpolate(vx, uvw.x()); } template<class TreeT> inline bool QuadraticSampler::sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result) { using ValueT = typename TreeT::ValueType; const Vec3i inIdx = local_util::floorVec3(inCoord), inLoIdx = inIdx - Vec3i(1, 1, 1); const Vec3R uvw = inCoord - inIdx; // Retrieve the values of the 27 voxels surrounding the // fractional source coordinates. bool active = false; ValueT data[3][3][3]; for (int dx = 0, ix = inLoIdx.x(); dx < 3; ++dx, ++ix) { for (int dy = 0, iy = inLoIdx.y(); dy < 3; ++dy, ++iy) { for (int dz = 0, iz = inLoIdx.z(); dz < 3; ++dz, ++iz) { if (inTree.probeValue(Coord(ix, iy, iz), data[dx][dy][dz])) active = true; } } } result = QuadraticSampler::triquadraticInterpolation(data, uvw); return active; } template<class TreeT> inline typename TreeT::ValueType QuadraticSampler::sample(const TreeT& inTree, const Vec3R& inCoord) { using ValueT = typename TreeT::ValueType; const Vec3i inIdx = local_util::floorVec3(inCoord), inLoIdx = inIdx - Vec3i(1, 1, 1); const Vec3R uvw = inCoord - inIdx; // Retrieve the values of the 27 voxels surrounding the // fractional source coordinates. ValueT data[3][3][3]; for (int dx = 0, ix = inLoIdx.x(); dx < 3; ++dx, ++ix) { for (int dy = 0, iy = inLoIdx.y(); dy < 3; ++dy, ++iy) { for (int dz = 0, iz = inLoIdx.z(); dz < 3; ++dz, ++iz) { data[dx][dy][dz] = inTree.getValue(Coord(ix, iy, iz)); } } } return QuadraticSampler::triquadraticInterpolation(data, uvw); } //////////////////////////////////////// StaggeredPointSampler template<class TreeT> inline bool StaggeredPointSampler::sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result) { using ValueType = typename TreeT::ValueType; ValueType tempX, tempY, tempZ; bool active = false; active = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0, 0), tempX) || active; active = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0.5, 0), tempY) || active; active = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0, 0.5), tempZ) || active; result.x() = tempX.x(); result.y() = tempY.y(); result.z() = tempZ.z(); return active; } template<class TreeT> inline typename TreeT::ValueType StaggeredPointSampler::sample(const TreeT& inTree, const Vec3R& inCoord) { using ValueT = typename TreeT::ValueType; const ValueT tempX = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0.0, 0.0)); const ValueT tempY = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.5, 0.0)); const ValueT tempZ = PointSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.0, 0.5)); return ValueT(tempX.x(), tempY.y(), tempZ.z()); } //////////////////////////////////////// StaggeredBoxSampler template<class TreeT> inline bool StaggeredBoxSampler::sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result) { using ValueType = typename TreeT::ValueType; ValueType tempX, tempY, tempZ; tempX = tempY = tempZ = zeroVal<ValueType>(); bool active = false; active = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0, 0), tempX) || active; active = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0.5, 0), tempY) || active; active = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0, 0.5), tempZ) || active; result.x() = tempX.x(); result.y() = tempY.y(); result.z() = tempZ.z(); return active; } template<class TreeT> inline typename TreeT::ValueType StaggeredBoxSampler::sample(const TreeT& inTree, const Vec3R& inCoord) { using ValueT = typename TreeT::ValueType; const ValueT tempX = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0.0, 0.0)); const ValueT tempY = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.5, 0.0)); const ValueT tempZ = BoxSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.0, 0.5)); return ValueT(tempX.x(), tempY.y(), tempZ.z()); } //////////////////////////////////////// StaggeredQuadraticSampler template<class TreeT> inline bool StaggeredQuadraticSampler::sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result) { using ValueType = typename TreeT::ValueType; ValueType tempX, tempY, tempZ; bool active = false; active = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0, 0), tempX) || active; active = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0.5, 0), tempY) || active; active = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0, 0, 0.5), tempZ) || active; result.x() = tempX.x(); result.y() = tempY.y(); result.z() = tempZ.z(); return active; } template<class TreeT> inline typename TreeT::ValueType StaggeredQuadraticSampler::sample(const TreeT& inTree, const Vec3R& inCoord) { using ValueT = typename TreeT::ValueType; const ValueT tempX = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.5, 0.0, 0.0)); const ValueT tempY = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.5, 0.0)); const ValueT tempZ = QuadraticSampler::sample<TreeT>(inTree, inCoord + Vec3R(0.0, 0.0, 0.5)); return ValueT(tempX.x(), tempY.y(), tempZ.z()); } //////////////////////////////////////// Sampler template <> struct Sampler<0, false> : public PointSampler {}; template <> struct Sampler<1, false> : public BoxSampler {}; template <> struct Sampler<2, false> : public QuadraticSampler {}; template <> struct Sampler<0, true> : public StaggeredPointSampler {}; template <> struct Sampler<1, true> : public StaggeredBoxSampler {}; template <> struct Sampler<2, true> : public StaggeredQuadraticSampler {}; } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_INTERPOLATION_HAS_BEEN_INCLUDED
36,124
C
34.486248
98
0.642398
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/VolumeAdvect.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /////////////////////////////////////////////////////////////////////////// // /// @author Ken Museth /// /// @file tools/VolumeAdvect.h /// /// @brief Sparse hyperbolic advection of volumes, e.g. a density or /// velocity (vs a level set interface). #ifndef OPENVDB_TOOLS_VOLUME_ADVECT_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_VOLUME_ADVECT_HAS_BEEN_INCLUDED #include <tbb/parallel_for.h> #include <openvdb/Types.h> #include <openvdb/math/Math.h> #include <openvdb/util/NullInterrupter.h> #include "Interpolation.h"// for Sampler #include "VelocityFields.h" // for VelocityIntegrator #include "Morphology.h"//for dilateActiveValues and dilateVoxels #include "Prune.h"// for prune #include "Statistics.h" // for extrema #include <functional> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { namespace Scheme { /// @brief Numerical advections schemes. enum SemiLagrangian { SEMI, MID, RK3, RK4, MAC, BFECC }; /// @brief Flux-limiters employed to stabalize the second-order /// advection schemes MacCormack and BFECC. enum Limiter { NO_LIMITER, CLAMP, REVERT }; } /// @brief Performs advections of an arbitrary type of volume in a /// static velocity field. The advections are performed by means /// of various derivatives of Semi-Lagrangian integration, i.e. /// backwards tracking along the hyperbolic characteristics /// followed by interpolation. /// /// @note Optionally a limiter can be combined with the higher-order /// integration schemes MacCormack and BFECC. There are two /// types of limiters (CLAMP and REVERT) that supress /// non-physical oscillations by means of either claminging or /// reverting to a first-order schemes when the function is not /// bounded by the cell values used for tri-linear interpolation. /// /// @verbatim The supported integrations schemes: /// /// ================================================================ /// | Lable | Accuracy | Integration Scheme | Interpolations | /// | |Time/Space| | velocity/volume | /// ================================================================ /// | SEMI | 1/1 | Semi-Lagrangian | 1/1 | /// | MID | 2/1 | Mid-Point | 2/1 | /// | RK3 | 3/1 | 3rd Order Runge-Kutta | 3/1 | /// | RK4 | 4/1 | 4th Order Runge-Kutta | 4/1 | /// | MAC | 2/2 | MacCormack | 2/2 | /// | BFECC | 2/2 | BFECC | 3/2 | /// ================================================================ /// @endverbatim template<typename VelocityGridT = Vec3fGrid, bool StaggeredVelocity = false, typename InterrupterType = util::NullInterrupter> class VolumeAdvection { public: /// @brief Constructor /// /// @param velGrid Velocity grid responsible for the (passive) advection. /// @param interrupter Optional interrupter used to prematurely end computations. /// /// @note The velocity field is assumed to be constant for the duration of the /// advection. VolumeAdvection(const VelocityGridT& velGrid, InterrupterType* interrupter = nullptr) : mVelGrid(velGrid) , mInterrupter(interrupter) , mIntegrator( Scheme::SEMI ) , mLimiter( Scheme::CLAMP ) , mGrainSize( 128 ) , mSubSteps( 1 ) { math::Extrema e = extrema(velGrid.cbeginValueAll(), /*threading*/true); e.add(velGrid.background().length()); mMaxVelocity = e.max(); } virtual ~VolumeAdvection() { } /// @brief Return the spatial order of accuracy of the advection scheme /// /// @note This is the optimal order in smooth regions. In /// non-smooth regions the flux-limiter will drop the order of /// accuracy to add numerical dissipation. int spatialOrder() const { return (mIntegrator == Scheme::MAC || mIntegrator == Scheme::BFECC) ? 2 : 1; } /// @brief Return the temporal order of accuracy of the advection scheme /// /// @note This is the optimal order in smooth regions. In /// non-smooth regions the flux-limiter will drop the order of /// accuracy to add numerical dissipation. int temporalOrder() const { switch (mIntegrator) { case Scheme::SEMI: return 1; case Scheme::MID: return 2; case Scheme::RK3: return 3; case Scheme::RK4: return 4; case Scheme::BFECC:return 2; case Scheme::MAC: return 2; } return 0;//should never reach this point } /// @brief Set the integrator (see details in the table above) void setIntegrator(Scheme::SemiLagrangian integrator) { mIntegrator = integrator; } /// @brief Return the integrator (see details in the table above) Scheme::SemiLagrangian getIntegrator() const { return mIntegrator; } /// @brief Set the limiter (see details above) void setLimiter(Scheme::Limiter limiter) { mLimiter = limiter; } /// @brief Retrun the limiter (see details above) Scheme::Limiter getLimiter() const { return mLimiter; } /// @brief Return @c true if a limiter will be applied based on /// the current settings. bool isLimiterOn() const { return this->spatialOrder()>1 && mLimiter != Scheme::NO_LIMITER; } /// @return the grain-size used for multi-threading /// @note A grainsize of 0 implies serial execution size_t getGrainSize() const { return mGrainSize; } /// @brief Set the grain-size used for multi-threading /// @note A grainsize of 0 disables multi-threading /// @warning A small grainsize can degrade performance, /// both in terms of time and memory footprint! void setGrainSize(size_t grainsize) { mGrainSize = grainsize; } /// @return the number of sub-steps per integration (always larger /// than or equal to 1). int getSubSteps() const { return mSubSteps; } /// @brief Set the number of sub-steps per integration. /// @note The only reason to increase the sub-step above its /// default value of one is to reduce the memory footprint /// due to significant dilation. Values smaller than 1 will /// be clamped to 1! void setSubSteps(int substeps) { mSubSteps = math::Max(1, substeps); } /// @brief Return the maximum magnitude of the velocity in the /// advection velocity field defined during construction. double getMaxVelocity() const { return mMaxVelocity; } /// @return Returns the maximum distance in voxel units of @a inGrid /// that a particle can travel in the time-step @a dt when advected /// in the velocity field defined during construction. /// /// @details This method is useful when dilating sparse volume /// grids to pad boundary regions. Excessive dilation can be /// computationally expensive so use this method to prevent /// or warn against run-away computation. /// /// @throw RuntimeError if @a inGrid does not have uniform voxels. template<typename VolumeGridT> int getMaxDistance(const VolumeGridT& inGrid, double dt) const { if (!inGrid.hasUniformVoxels()) { OPENVDB_THROW(RuntimeError, "Volume grid does not have uniform voxels!"); } const double d = mMaxVelocity*math::Abs(dt)/inGrid.voxelSize()[0]; return static_cast<int>( math::RoundUp(d) ); } /// @return Returns a new grid that is the result of passive advection /// of all the active values the input grid by @a timeStep. /// /// @param inGrid The input grid to be advected (unmodified) /// @param timeStep Time-step of the Runge-Kutta integrator. /// /// @details This method will advect all of the active values in /// the input @a inGrid. To achieve this a /// deep-copy is dilated to account for the material /// transport. This dilation step can be slow for large /// time steps @a dt or a velocity field with large magnitudes. /// /// @warning If the VolumeSamplerT is of higher order than one /// (i.e. tri-linear interpolation) instabilities are /// known to occure. To suppress those monotonicity /// constrains or flux-limiters need to be applies. /// /// @throw RuntimeError if @a inGrid does not have uniform voxels. template<typename VolumeGridT, typename VolumeSamplerT>//only C++11 allows for a default argument typename VolumeGridT::Ptr advect(const VolumeGridT& inGrid, double timeStep) { typename VolumeGridT::Ptr outGrid = inGrid.deepCopy(); const double dt = timeStep/mSubSteps; const int n = this->getMaxDistance(inGrid, dt); dilateActiveValues( outGrid->tree(), n, NN_FACE, EXPAND_TILES); this->template cook<VolumeGridT, VolumeSamplerT>(*outGrid, inGrid, dt); for (int step = 1; step < mSubSteps; ++step) { typename VolumeGridT::Ptr tmpGrid = outGrid->deepCopy(); dilateActiveValues( tmpGrid->tree(), n, NN_FACE, EXPAND_TILES); this->template cook<VolumeGridT, VolumeSamplerT>(*tmpGrid, *outGrid, dt); outGrid.swap( tmpGrid ); } return outGrid; } /// @return Returns a new grid that is the result of /// passive advection of the active values in @a inGrid /// that intersect the active values in @c mask. The time /// of the output grid is incremented by @a timeStep. /// /// @param inGrid The input grid to be advected (unmodified). /// @param mask The mask of active values defining the active voxels /// in @c inGrid on which to perform advection. Only /// if a value is active in both grids will it be modified. /// @param timeStep Time-step for a single Runge-Kutta integration step. /// /// /// @details This method will advect all of the active values in /// the input @a inGrid that intersects with the /// active values in @a mask. To achieve this a /// deep-copy is dilated to account for the material /// transport and finally cropped to the intersection /// with @a mask. The dilation step can be slow for large /// time steps @a dt or fast moving velocity fields. /// /// @warning If the VolumeSamplerT is of higher order the one /// (i.e. tri-linear interpolation) instabilities are /// known to occure. To suppress those monotonicity /// constrains or flux-limiters need to be applies. /// /// @throw RuntimeError if @a inGrid is not aligned with @a mask /// or if its voxels are not uniform. template<typename VolumeGridT, typename MaskGridT, typename VolumeSamplerT>//only C++11 allows for a default argument typename VolumeGridT::Ptr advect(const VolumeGridT& inGrid, const MaskGridT& mask, double timeStep) { if (inGrid.transform() != mask.transform()) { OPENVDB_THROW(RuntimeError, "Volume grid and mask grid are misaligned! Consider " "resampling either of the two grids into the index space of the other."); } typename VolumeGridT::Ptr outGrid = inGrid.deepCopy(); const double dt = timeStep/mSubSteps; const int n = this->getMaxDistance(inGrid, dt); dilateActiveValues( outGrid->tree(), n, NN_FACE, EXPAND_TILES); outGrid->topologyIntersection( mask ); pruneInactive( outGrid->tree(), mGrainSize>0, mGrainSize ); this->template cook<VolumeGridT, VolumeSamplerT>(*outGrid, inGrid, dt); outGrid->topologyUnion( inGrid ); for (int step = 1; step < mSubSteps; ++step) { typename VolumeGridT::Ptr tmpGrid = outGrid->deepCopy(); dilateActiveValues( tmpGrid->tree(), n, NN_FACE, EXPAND_TILES); tmpGrid->topologyIntersection( mask ); pruneInactive( tmpGrid->tree(), mGrainSize>0, mGrainSize ); this->template cook<VolumeGridT, VolumeSamplerT>(*tmpGrid, *outGrid, dt); tmpGrid->topologyUnion( inGrid ); outGrid.swap( tmpGrid ); } return outGrid; } private: // disallow copy construction and copy by assignment! VolumeAdvection(const VolumeAdvection&);// not implemented VolumeAdvection& operator=(const VolumeAdvection&);// not implemented void start(const char* str) const { if (mInterrupter) mInterrupter->start(str); } void stop() const { if (mInterrupter) mInterrupter->end(); } bool interrupt() const { if (mInterrupter && util::wasInterrupted(mInterrupter)) { tbb::task::self().cancel_group_execution(); return true; } return false; } template<typename VolumeGridT, typename VolumeSamplerT> void cook(VolumeGridT& outGrid, const VolumeGridT& inGrid, double dt) { switch (mIntegrator) { case Scheme::SEMI: { Advect<VolumeGridT, 1, VolumeSamplerT> adv(inGrid, *this); adv.cook(outGrid, dt); break; } case Scheme::MID: { Advect<VolumeGridT, 2, VolumeSamplerT> adv(inGrid, *this); adv.cook(outGrid, dt); break; } case Scheme::RK3: { Advect<VolumeGridT, 3, VolumeSamplerT> adv(inGrid, *this); adv.cook(outGrid, dt); break; } case Scheme::RK4: { Advect<VolumeGridT, 4, VolumeSamplerT> adv(inGrid, *this); adv.cook(outGrid, dt); break; } case Scheme::BFECC: { Advect<VolumeGridT, 1, VolumeSamplerT> adv(inGrid, *this); adv.cook(outGrid, dt); break; } case Scheme::MAC: { Advect<VolumeGridT, 1, VolumeSamplerT> adv(inGrid, *this); adv.cook(outGrid, dt); break; } default: OPENVDB_THROW(ValueError, "Spatial difference scheme not supported!"); } pruneInactive(outGrid.tree(), mGrainSize>0, mGrainSize); } // Private class that implements the multi-threaded advection template<typename VolumeGridT, size_t OrderRK, typename SamplerT> struct Advect; // Private member data of VolumeAdvection const VelocityGridT& mVelGrid; double mMaxVelocity; InterrupterType* mInterrupter; Scheme::SemiLagrangian mIntegrator; Scheme::Limiter mLimiter; size_t mGrainSize; int mSubSteps; };//end of VolumeAdvection class // Private class that implements the multi-threaded advection template<typename VelocityGridT, bool StaggeredVelocity, typename InterrupterType> template<typename VolumeGridT, size_t OrderRK, typename SamplerT> struct VolumeAdvection<VelocityGridT, StaggeredVelocity, InterrupterType>::Advect { using TreeT = typename VolumeGridT::TreeType; using AccT = typename VolumeGridT::ConstAccessor; using ValueT = typename TreeT::ValueType; using LeafManagerT = typename tree::LeafManager<TreeT>; using LeafNodeT = typename LeafManagerT::LeafNodeType; using LeafRangeT = typename LeafManagerT::LeafRange; using VelocityIntegratorT = VelocityIntegrator<VelocityGridT, StaggeredVelocity>; using RealT = typename VelocityIntegratorT::ElementType; using VoxelIterT = typename TreeT::LeafNodeType::ValueOnIter; Advect(const VolumeGridT& inGrid, const VolumeAdvection& parent) : mTask(nullptr) , mInGrid(&inGrid) , mVelocityInt(parent.mVelGrid) , mParent(&parent) { } inline void cook(const LeafRangeT& range) { if (mParent->mGrainSize > 0) { tbb::parallel_for(range, *this); } else { (*this)(range); } } void operator()(const LeafRangeT& range) const { assert(mTask); mTask(const_cast<Advect*>(this), range); } void cook(VolumeGridT& outGrid, double time_step) { namespace ph = std::placeholders; mParent->start("Advecting volume"); LeafManagerT manager(outGrid.tree(), mParent->spatialOrder()==2 ? 1 : 0); const LeafRangeT range = manager.leafRange(mParent->mGrainSize); const RealT dt = static_cast<RealT>(-time_step);//method of characteristics backtracks if (mParent->mIntegrator == Scheme::MAC) { mTask = std::bind(&Advect::rk, ph::_1, ph::_2, dt, 0, mInGrid);//out[0]=forward this->cook(range); mTask = std::bind(&Advect::rk, ph::_1, ph::_2,-dt, 1, &outGrid);//out[1]=backward this->cook(range); mTask = std::bind(&Advect::mac, ph::_1, ph::_2);//out[0] = out[0] + (in[0] - out[1])/2 this->cook(range); } else if (mParent->mIntegrator == Scheme::BFECC) { mTask = std::bind(&Advect::rk, ph::_1, ph::_2, dt, 0, mInGrid);//out[0]=forward this->cook(range); mTask = std::bind(&Advect::rk, ph::_1, ph::_2,-dt, 1, &outGrid);//out[1]=backward this->cook(range); mTask = std::bind(&Advect::bfecc, ph::_1, ph::_2);//out[0] = (3*in[0] - out[1])/2 this->cook(range); mTask = std::bind(&Advect::rk, ph::_1, ph::_2, dt, 1, &outGrid);//out[1]=forward this->cook(range); manager.swapLeafBuffer(1);// out[0] = out[1] } else {// SEMI, MID, RK3 and RK4 mTask = std::bind(&Advect::rk, ph::_1, ph::_2, dt, 0, mInGrid);//forward this->cook(range); } if (mParent->spatialOrder()==2) manager.removeAuxBuffers(); mTask = std::bind(&Advect::limiter, ph::_1, ph::_2, dt);// out[0] = limiter( out[0] ) this->cook(range); mParent->stop(); } // Last step of the MacCormack scheme: out[0] = out[0] + (in[0] - out[1])/2 void mac(const LeafRangeT& range) const { if (mParent->interrupt()) return; assert( mParent->mIntegrator == Scheme::MAC ); AccT acc = mInGrid->getAccessor(); for (typename LeafRangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) { ValueT* out0 = leafIter.buffer( 0 ).data();// forward const ValueT* out1 = leafIter.buffer( 1 ).data();// backward const LeafNodeT* leaf = acc.probeConstLeaf( leafIter->origin() ); if (leaf != nullptr) { const ValueT* in0 = leaf->buffer().data(); for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) { const Index i = voxelIter.pos(); out0[i] += RealT(0.5) * ( in0[i] - out1[i] ); } } else { for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) { const Index i = voxelIter.pos(); out0[i] += RealT(0.5) * ( acc.getValue(voxelIter.getCoord()) - out1[i] ); }//loop over active voxels } }//loop over leaf nodes } // Intermediate step in the BFECC scheme: out[0] = (3*in[0] - out[1])/2 void bfecc(const LeafRangeT& range) const { if (mParent->interrupt()) return; assert( mParent->mIntegrator == Scheme::BFECC ); AccT acc = mInGrid->getAccessor(); for (typename LeafRangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) { ValueT* out0 = leafIter.buffer( 0 ).data();// forward const ValueT* out1 = leafIter.buffer( 1 ).data();// backward const LeafNodeT* leaf = acc.probeConstLeaf(leafIter->origin()); if (leaf != nullptr) { const ValueT* in0 = leaf->buffer().data(); for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) { const Index i = voxelIter.pos(); out0[i] = RealT(0.5)*( RealT(3)*in0[i] - out1[i] ); }//loop over active voxels } else { for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) { const Index i = voxelIter.pos(); out0[i] = RealT(0.5)*( RealT(3)*acc.getValue(voxelIter.getCoord()) - out1[i] ); }//loop over active voxels } }//loop over leaf nodes } // Semi-Lagrangian integration with Runge-Kutta of various orders (1->4) void rk(const LeafRangeT& range, RealT dt, size_t n, const VolumeGridT* grid) const { if (mParent->interrupt()) return; const math::Transform& xform = mInGrid->transform(); AccT acc = grid->getAccessor(); for (typename LeafRangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) { ValueT* phi = leafIter.buffer( n ).data(); for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) { ValueT& value = phi[voxelIter.pos()]; Vec3d wPos = xform.indexToWorld(voxelIter.getCoord()); mVelocityInt.template rungeKutta<OrderRK, Vec3d>(dt, wPos); value = SamplerT::sample(acc, xform.worldToIndex(wPos)); }//loop over active voxels }//loop over leaf nodes } void limiter(const LeafRangeT& range, RealT dt) const { if (mParent->interrupt()) return; const bool doLimiter = mParent->isLimiterOn(); const bool doClamp = mParent->mLimiter == Scheme::CLAMP; ValueT data[2][2][2], vMin, vMax; const math::Transform& xform = mInGrid->transform(); AccT acc = mInGrid->getAccessor(); const ValueT backg = mInGrid->background(); for (typename LeafRangeT::Iterator leafIter = range.begin(); leafIter; ++leafIter) { ValueT* phi = leafIter.buffer( 0 ).data(); for (VoxelIterT voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) { ValueT& value = phi[voxelIter.pos()]; if ( doLimiter ) { assert(OrderRK == 1); Vec3d wPos = xform.indexToWorld(voxelIter.getCoord()); mVelocityInt.template rungeKutta<1, Vec3d>(dt, wPos);// Explicit Euler Vec3d iPos = xform.worldToIndex(wPos); Coord ijk = Coord::floor( iPos ); BoxSampler::getValues(data, acc, ijk); BoxSampler::extrema(data, vMin, vMax); if ( doClamp ) { value = math::Clamp( value, vMin, vMax); } else if (value < vMin || value > vMax ) { iPos -= Vec3R(ijk[0], ijk[1], ijk[2]);//unit coordinates value = BoxSampler::trilinearInterpolation( data, iPos ); } } if (math::isApproxEqual(value, backg, math::Delta<ValueT>::value())) { value = backg; leafIter->setValueOff( voxelIter.pos() ); } }//loop over active voxels }//loop over leaf nodes } // Public member data of the private Advect class typename std::function<void (Advect*, const LeafRangeT&)> mTask; const VolumeGridT* mInGrid; const VelocityIntegratorT mVelocityInt;// lightweight! const VolumeAdvection* mParent; };// end of private member class Advect } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_VOLUME_ADVECT_HAS_BEEN_INCLUDED
24,026
C
43.330258
103
0.595688
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/VolumeToSpheres.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tools/VolumeToSpheres.h /// /// @brief Fill a closed level set or fog volume with adaptively-sized spheres. #ifndef OPENVDB_TOOLS_VOLUME_TO_SPHERES_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_VOLUME_TO_SPHERES_HAS_BEEN_INCLUDED #include <openvdb/tree/LeafManager.h> #include <openvdb/math/Math.h> #include "Morphology.h" // for erodeVoxels() #include "PointScatter.h" #include "LevelSetRebuild.h" #include "LevelSetUtil.h" #include "VolumeToMesh.h" #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <algorithm> // for std::min(), std::max() #include <cmath> // for std::sqrt() #include <limits> // for std::numeric_limits #include <memory> #include <random> #include <utility> // for std::pair #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Fill a closed level set or fog volume with adaptively-sized spheres. /// /// @param grid a scalar grid that defines the surface to be filled with spheres /// @param spheres an output array of 4-tuples representing the fitted spheres<BR> /// The first three components of each tuple specify the sphere center, /// and the fourth specifies the radius. /// The spheres are ordered by radius, from largest to smallest. /// @param sphereCount lower and upper bounds on the number of spheres to be generated<BR> /// The actual number will be somewhere within the bounds. /// @param overlapping toggle to allow spheres to overlap/intersect /// @param minRadius the smallest allowable sphere size, in voxel units<BR> /// @param maxRadius the largest allowable sphere size, in voxel units /// @param isovalue the voxel value that determines the surface of the volume<BR> /// The default value of zero works for signed distance fields, /// while fog volumes require a larger positive value /// (0.5 is a good initial guess). /// @param instanceCount the number of interior points to consider for the sphere placement<BR> /// Increasing this count increases the chances of finding optimal /// sphere sizes. /// @param interrupter pointer to an object adhering to the util::NullInterrupter interface /// /// @note The minimum sphere count takes precedence over the minimum radius. template<typename GridT, typename InterrupterT = util::NullInterrupter> inline void fillWithSpheres( const GridT& grid, std::vector<openvdb::Vec4s>& spheres, const Vec2i& sphereCount = Vec2i(1, 50), bool overlapping = false, float minRadius = 1.0, float maxRadius = std::numeric_limits<float>::max(), float isovalue = 0.0, int instanceCount = 10000, InterrupterT* interrupter = nullptr); //////////////////////////////////////// /// @brief Accelerated closest surface point queries for narrow band level sets /// @details Supports queries that originate at arbitrary world-space locations, /// is not confined to the narrow band region of the input volume geometry. template<typename GridT> class ClosestSurfacePoint { public: using Ptr = std::unique_ptr<ClosestSurfacePoint>; using TreeT = typename GridT::TreeType; using BoolTreeT = typename TreeT::template ValueConverter<bool>::Type; using Index32TreeT = typename TreeT::template ValueConverter<Index32>::Type; using Int16TreeT = typename TreeT::template ValueConverter<Int16>::Type; /// @brief Extract surface points and construct a spatial acceleration structure. /// /// @return a null pointer if the initialization fails for any reason, /// otherwise a unique pointer to a newly-allocated ClosestSurfacePoint object. /// /// @param grid a scalar level set or fog volume /// @param isovalue the voxel value that determines the surface of the volume /// The default value of zero works for signed distance fields, /// while fog volumes require a larger positive value /// (0.5 is a good initial guess). /// @param interrupter pointer to an object adhering to the util::NullInterrupter interface. template<typename InterrupterT = util::NullInterrupter> static inline Ptr create(const GridT& grid, float isovalue = 0.0, InterrupterT* interrupter = nullptr); /// @brief Compute the distance from each input point to its closest surface point. /// @param points input list of points in world space /// @param distances output list of closest surface point distances inline bool search(const std::vector<Vec3R>& points, std::vector<float>& distances); /// @brief Overwrite each input point with its closest surface point. /// @param points input/output list of points in world space /// @param distances output list of closest surface point distances inline bool searchAndReplace(std::vector<Vec3R>& points, std::vector<float>& distances); /// @brief Tree accessor const Index32TreeT& indexTree() const { return *mIdxTreePt; } /// @brief Tree accessor const Int16TreeT& signTree() const { return *mSignTreePt; } private: using Index32LeafT = typename Index32TreeT::LeafNodeType; using IndexRange = std::pair<size_t, size_t>; std::vector<Vec4R> mLeafBoundingSpheres, mNodeBoundingSpheres; std::vector<IndexRange> mLeafRanges; std::vector<const Index32LeafT*> mLeafNodes; PointList mSurfacePointList; size_t mPointListSize = 0, mMaxNodeLeafs = 0; typename Index32TreeT::Ptr mIdxTreePt; typename Int16TreeT::Ptr mSignTreePt; ClosestSurfacePoint() = default; template<typename InterrupterT = util::NullInterrupter> inline bool initialize(const GridT&, float isovalue, InterrupterT*); inline bool search(std::vector<Vec3R>&, std::vector<float>&, bool transformPoints); }; //////////////////////////////////////// // Internal utility methods namespace v2s_internal { struct PointAccessor { PointAccessor(std::vector<Vec3R>& points) : mPoints(points) { } void add(const Vec3R &pos) { mPoints.push_back(pos); } private: std::vector<Vec3R>& mPoints; }; template<typename Index32LeafT> class LeafOp { public: LeafOp(std::vector<Vec4R>& leafBoundingSpheres, const std::vector<const Index32LeafT*>& leafNodes, const math::Transform& transform, const PointList& surfacePointList); void run(bool threaded = true); void operator()(const tbb::blocked_range<size_t>&) const; private: std::vector<Vec4R>& mLeafBoundingSpheres; const std::vector<const Index32LeafT*>& mLeafNodes; const math::Transform& mTransform; const PointList& mSurfacePointList; }; template<typename Index32LeafT> LeafOp<Index32LeafT>::LeafOp( std::vector<Vec4R>& leafBoundingSpheres, const std::vector<const Index32LeafT*>& leafNodes, const math::Transform& transform, const PointList& surfacePointList) : mLeafBoundingSpheres(leafBoundingSpheres) , mLeafNodes(leafNodes) , mTransform(transform) , mSurfacePointList(surfacePointList) { } template<typename Index32LeafT> void LeafOp<Index32LeafT>::run(bool threaded) { if (threaded) { tbb::parallel_for(tbb::blocked_range<size_t>(0, mLeafNodes.size()), *this); } else { (*this)(tbb::blocked_range<size_t>(0, mLeafNodes.size())); } } template<typename Index32LeafT> void LeafOp<Index32LeafT>::operator()(const tbb::blocked_range<size_t>& range) const { typename Index32LeafT::ValueOnCIter iter; Vec3s avg; for (size_t n = range.begin(); n != range.end(); ++n) { avg[0] = 0.0; avg[1] = 0.0; avg[2] = 0.0; int count = 0; for (iter = mLeafNodes[n]->cbeginValueOn(); iter; ++iter) { avg += mSurfacePointList[iter.getValue()]; ++count; } if (count > 1) avg *= float(1.0 / double(count)); float maxDist = 0.0; for (iter = mLeafNodes[n]->cbeginValueOn(); iter; ++iter) { float tmpDist = (mSurfacePointList[iter.getValue()] - avg).lengthSqr(); if (tmpDist > maxDist) maxDist = tmpDist; } Vec4R& sphere = mLeafBoundingSpheres[n]; sphere[0] = avg[0]; sphere[1] = avg[1]; sphere[2] = avg[2]; sphere[3] = maxDist * 2.0; // padded radius } } class NodeOp { public: using IndexRange = std::pair<size_t, size_t>; NodeOp(std::vector<Vec4R>& nodeBoundingSpheres, const std::vector<IndexRange>& leafRanges, const std::vector<Vec4R>& leafBoundingSpheres); inline void run(bool threaded = true); inline void operator()(const tbb::blocked_range<size_t>&) const; private: std::vector<Vec4R>& mNodeBoundingSpheres; const std::vector<IndexRange>& mLeafRanges; const std::vector<Vec4R>& mLeafBoundingSpheres; }; inline NodeOp::NodeOp(std::vector<Vec4R>& nodeBoundingSpheres, const std::vector<IndexRange>& leafRanges, const std::vector<Vec4R>& leafBoundingSpheres) : mNodeBoundingSpheres(nodeBoundingSpheres) , mLeafRanges(leafRanges) , mLeafBoundingSpheres(leafBoundingSpheres) { } inline void NodeOp::run(bool threaded) { if (threaded) { tbb::parallel_for(tbb::blocked_range<size_t>(0, mLeafRanges.size()), *this); } else { (*this)(tbb::blocked_range<size_t>(0, mLeafRanges.size())); } } inline void NodeOp::operator()(const tbb::blocked_range<size_t>& range) const { Vec3d avg, pos; for (size_t n = range.begin(); n != range.end(); ++n) { avg[0] = 0.0; avg[1] = 0.0; avg[2] = 0.0; int count = int(mLeafRanges[n].second) - int(mLeafRanges[n].first); for (size_t i = mLeafRanges[n].first; i < mLeafRanges[n].second; ++i) { avg[0] += mLeafBoundingSpheres[i][0]; avg[1] += mLeafBoundingSpheres[i][1]; avg[2] += mLeafBoundingSpheres[i][2]; } if (count > 1) avg *= float(1.0 / double(count)); double maxDist = 0.0; for (size_t i = mLeafRanges[n].first; i < mLeafRanges[n].second; ++i) { pos[0] = mLeafBoundingSpheres[i][0]; pos[1] = mLeafBoundingSpheres[i][1]; pos[2] = mLeafBoundingSpheres[i][2]; const auto radiusSqr = mLeafBoundingSpheres[i][3]; double tmpDist = (pos - avg).lengthSqr() + radiusSqr; if (tmpDist > maxDist) maxDist = tmpDist; } Vec4R& sphere = mNodeBoundingSpheres[n]; sphere[0] = avg[0]; sphere[1] = avg[1]; sphere[2] = avg[2]; sphere[3] = maxDist * 2.0; // padded radius } } //////////////////////////////////////// template<typename Index32LeafT> class ClosestPointDist { public: using IndexRange = std::pair<size_t, size_t>; ClosestPointDist( std::vector<Vec3R>& instancePoints, std::vector<float>& instanceDistances, const PointList& surfacePointList, const std::vector<const Index32LeafT*>& leafNodes, const std::vector<IndexRange>& leafRanges, const std::vector<Vec4R>& leafBoundingSpheres, const std::vector<Vec4R>& nodeBoundingSpheres, size_t maxNodeLeafs, bool transformPoints = false); void run(bool threaded = true); void operator()(const tbb::blocked_range<size_t>&) const; private: void evalLeaf(size_t index, const Index32LeafT& leaf) const; void evalNode(size_t pointIndex, size_t nodeIndex) const; std::vector<Vec3R>& mInstancePoints; std::vector<float>& mInstanceDistances; const PointList& mSurfacePointList; const std::vector<const Index32LeafT*>& mLeafNodes; const std::vector<IndexRange>& mLeafRanges; const std::vector<Vec4R>& mLeafBoundingSpheres; const std::vector<Vec4R>& mNodeBoundingSpheres; std::vector<float> mLeafDistances, mNodeDistances; const bool mTransformPoints; size_t mClosestPointIndex; };// ClosestPointDist template<typename Index32LeafT> ClosestPointDist<Index32LeafT>::ClosestPointDist( std::vector<Vec3R>& instancePoints, std::vector<float>& instanceDistances, const PointList& surfacePointList, const std::vector<const Index32LeafT*>& leafNodes, const std::vector<IndexRange>& leafRanges, const std::vector<Vec4R>& leafBoundingSpheres, const std::vector<Vec4R>& nodeBoundingSpheres, size_t maxNodeLeafs, bool transformPoints) : mInstancePoints(instancePoints) , mInstanceDistances(instanceDistances) , mSurfacePointList(surfacePointList) , mLeafNodes(leafNodes) , mLeafRanges(leafRanges) , mLeafBoundingSpheres(leafBoundingSpheres) , mNodeBoundingSpheres(nodeBoundingSpheres) , mLeafDistances(maxNodeLeafs, 0.0) , mNodeDistances(leafRanges.size(), 0.0) , mTransformPoints(transformPoints) , mClosestPointIndex(0) { } template<typename Index32LeafT> void ClosestPointDist<Index32LeafT>::run(bool threaded) { if (threaded) { tbb::parallel_for(tbb::blocked_range<size_t>(0, mInstancePoints.size()), *this); } else { (*this)(tbb::blocked_range<size_t>(0, mInstancePoints.size())); } } template<typename Index32LeafT> void ClosestPointDist<Index32LeafT>::evalLeaf(size_t index, const Index32LeafT& leaf) const { typename Index32LeafT::ValueOnCIter iter; const Vec3s center = mInstancePoints[index]; size_t& closestPointIndex = const_cast<size_t&>(mClosestPointIndex); for (iter = leaf.cbeginValueOn(); iter; ++iter) { const Vec3s& point = mSurfacePointList[iter.getValue()]; float tmpDist = (point - center).lengthSqr(); if (tmpDist < mInstanceDistances[index]) { mInstanceDistances[index] = tmpDist; closestPointIndex = iter.getValue(); } } } template<typename Index32LeafT> void ClosestPointDist<Index32LeafT>::evalNode(size_t pointIndex, size_t nodeIndex) const { if (nodeIndex >= mLeafRanges.size()) return; const Vec3R& pos = mInstancePoints[pointIndex]; float minDist = mInstanceDistances[pointIndex]; size_t minDistIdx = 0; Vec3R center; bool updatedDist = false; for (size_t i = mLeafRanges[nodeIndex].first, n = 0; i < mLeafRanges[nodeIndex].second; ++i, ++n) { float& distToLeaf = const_cast<float&>(mLeafDistances[n]); center[0] = mLeafBoundingSpheres[i][0]; center[1] = mLeafBoundingSpheres[i][1]; center[2] = mLeafBoundingSpheres[i][2]; const auto radiusSqr = mLeafBoundingSpheres[i][3]; distToLeaf = float(std::max(0.0, (pos - center).lengthSqr() - radiusSqr)); if (distToLeaf < minDist) { minDist = distToLeaf; minDistIdx = i; updatedDist = true; } } if (!updatedDist) return; evalLeaf(pointIndex, *mLeafNodes[minDistIdx]); for (size_t i = mLeafRanges[nodeIndex].first, n = 0; i < mLeafRanges[nodeIndex].second; ++i, ++n) { if (mLeafDistances[n] < mInstanceDistances[pointIndex] && i != minDistIdx) { evalLeaf(pointIndex, *mLeafNodes[i]); } } } template<typename Index32LeafT> void ClosestPointDist<Index32LeafT>::operator()(const tbb::blocked_range<size_t>& range) const { Vec3R center; for (size_t n = range.begin(); n != range.end(); ++n) { const Vec3R& pos = mInstancePoints[n]; float minDist = mInstanceDistances[n]; size_t minDistIdx = 0; for (size_t i = 0, I = mNodeDistances.size(); i < I; ++i) { float& distToNode = const_cast<float&>(mNodeDistances[i]); center[0] = mNodeBoundingSpheres[i][0]; center[1] = mNodeBoundingSpheres[i][1]; center[2] = mNodeBoundingSpheres[i][2]; const auto radiusSqr = mNodeBoundingSpheres[i][3]; distToNode = float(std::max(0.0, (pos - center).lengthSqr() - radiusSqr)); if (distToNode < minDist) { minDist = distToNode; minDistIdx = i; } } evalNode(n, minDistIdx); for (size_t i = 0, I = mNodeDistances.size(); i < I; ++i) { if (mNodeDistances[i] < mInstanceDistances[n] && i != minDistIdx) { evalNode(n, i); } } mInstanceDistances[n] = std::sqrt(mInstanceDistances[n]); if (mTransformPoints) mInstancePoints[n] = mSurfacePointList[mClosestPointIndex]; } } class UpdatePoints { public: UpdatePoints( const Vec4s& sphere, const std::vector<Vec3R>& points, std::vector<float>& distances, std::vector<unsigned char>& mask, bool overlapping); float radius() const { return mRadius; } int index() const { return mIndex; } inline void run(bool threaded = true); UpdatePoints(UpdatePoints&, tbb::split); inline void operator()(const tbb::blocked_range<size_t>& range); void join(const UpdatePoints& rhs) { if (rhs.mRadius > mRadius) { mRadius = rhs.mRadius; mIndex = rhs.mIndex; } } private: const Vec4s& mSphere; const std::vector<Vec3R>& mPoints; std::vector<float>& mDistances; std::vector<unsigned char>& mMask; bool mOverlapping; float mRadius; int mIndex; }; inline UpdatePoints::UpdatePoints( const Vec4s& sphere, const std::vector<Vec3R>& points, std::vector<float>& distances, std::vector<unsigned char>& mask, bool overlapping) : mSphere(sphere) , mPoints(points) , mDistances(distances) , mMask(mask) , mOverlapping(overlapping) , mRadius(0.0) , mIndex(0) { } inline UpdatePoints::UpdatePoints(UpdatePoints& rhs, tbb::split) : mSphere(rhs.mSphere) , mPoints(rhs.mPoints) , mDistances(rhs.mDistances) , mMask(rhs.mMask) , mOverlapping(rhs.mOverlapping) , mRadius(rhs.mRadius) , mIndex(rhs.mIndex) { } inline void UpdatePoints::run(bool threaded) { if (threaded) { tbb::parallel_reduce(tbb::blocked_range<size_t>(0, mPoints.size()), *this); } else { (*this)(tbb::blocked_range<size_t>(0, mPoints.size())); } } inline void UpdatePoints::operator()(const tbb::blocked_range<size_t>& range) { Vec3s pos; for (size_t n = range.begin(); n != range.end(); ++n) { if (mMask[n]) continue; pos.x() = float(mPoints[n].x()) - mSphere[0]; pos.y() = float(mPoints[n].y()) - mSphere[1]; pos.z() = float(mPoints[n].z()) - mSphere[2]; float dist = pos.length(); if (dist < mSphere[3]) { mMask[n] = 1; continue; } if (!mOverlapping) { mDistances[n] = std::min(mDistances[n], (dist - mSphere[3])); } if (mDistances[n] > mRadius) { mRadius = mDistances[n]; mIndex = int(n); } } } } // namespace v2s_internal //////////////////////////////////////// template<typename GridT, typename InterrupterT> inline void fillWithSpheres( const GridT& grid, std::vector<openvdb::Vec4s>& spheres, const Vec2i& sphereCount, bool overlapping, float minRadius, float maxRadius, float isovalue, int instanceCount, InterrupterT* interrupter) { spheres.clear(); if (grid.empty()) return; const int minSphereCount = sphereCount[0], maxSphereCount = sphereCount[1]; if ((minSphereCount > maxSphereCount) || (maxSphereCount < 1)) { OPENVDB_LOG_WARN("fillWithSpheres: minimum sphere count (" << minSphereCount << ") exceeds maximum count (" << maxSphereCount << ")"); return; } spheres.reserve(maxSphereCount); auto gridPtr = grid.copy(); // shallow copy if (gridPtr->getGridClass() == GRID_LEVEL_SET) { // Clamp the isovalue to the level set's background value minus epsilon. // (In a valid narrow-band level set, all voxels, including background voxels, // have values less than or equal to the background value, so an isovalue // greater than or equal to the background value would produce a mask with // effectively infinite extent.) isovalue = std::min(isovalue, static_cast<float>(gridPtr->background() - math::Tolerance<float>::value())); } else if (gridPtr->getGridClass() == GRID_FOG_VOLUME) { // Clamp the isovalue of a fog volume between epsilon and one, // again to avoid a mask with infinite extent. (Recall that // fog volume voxel values vary from zero outside to one inside.) isovalue = math::Clamp(isovalue, math::Tolerance<float>::value(), 1.f); } // ClosestSurfacePoint is inaccurate for small grids. // Resample the input grid if it is too small. auto numVoxels = gridPtr->activeVoxelCount(); if (numVoxels < 10000) { const auto scale = 1.0 / math::Cbrt(2.0 * 10000.0 / double(numVoxels)); auto scaledXform = gridPtr->transform().copy(); scaledXform->preScale(scale); auto newGridPtr = levelSetRebuild(*gridPtr, isovalue, LEVEL_SET_HALF_WIDTH, LEVEL_SET_HALF_WIDTH, scaledXform.get(), interrupter); const auto newNumVoxels = newGridPtr->activeVoxelCount(); if (newNumVoxels > numVoxels) { OPENVDB_LOG_DEBUG_RUNTIME("fillWithSpheres: resampled input grid from " << numVoxels << " voxel" << (numVoxels == 1 ? "" : "s") << " to " << newNumVoxels << " voxel" << (newNumVoxels == 1 ? "" : "s")); gridPtr = newGridPtr; numVoxels = newNumVoxels; } } const bool addNarrowBandPoints = (numVoxels < 10000); int instances = std::max(instanceCount, maxSphereCount); using TreeT = typename GridT::TreeType; using BoolTreeT = typename TreeT::template ValueConverter<bool>::Type; using Int16TreeT = typename TreeT::template ValueConverter<Int16>::Type; using RandGen = std::mersenne_twister_engine<uint32_t, 32, 351, 175, 19, 0xccab8ee7, 11, 0xffffffff, 7, 0x31b6ab00, 15, 0xffe50000, 17, 1812433253>; // mt11213b RandGen mtRand(/*seed=*/0); const TreeT& tree = gridPtr->tree(); math::Transform transform = gridPtr->transform(); std::vector<Vec3R> instancePoints; { // Compute a mask of the voxels enclosed by the isosurface. typename Grid<BoolTreeT>::Ptr interiorMaskPtr; if (gridPtr->getGridClass() == GRID_LEVEL_SET) { interiorMaskPtr = sdfInteriorMask(*gridPtr, isovalue); } else { // For non-level-set grids, the interior mask comprises the active voxels. interiorMaskPtr = typename Grid<BoolTreeT>::Ptr(Grid<BoolTreeT>::create(false)); interiorMaskPtr->setTransform(transform.copy()); interiorMaskPtr->tree().topologyUnion(tree); } if (interrupter && interrupter->wasInterrupted()) return; // If the interior mask is small and eroding it results in an empty grid, // use the uneroded mask instead. (But if the minimum sphere count is zero, // then eroding away the mask is acceptable.) if (!addNarrowBandPoints || (minSphereCount <= 0)) { erodeVoxels(interiorMaskPtr->tree(), 1); } else { auto& maskTree = interiorMaskPtr->tree(); auto copyOfTree = StaticPtrCast<BoolTreeT>(maskTree.copy()); erodeVoxels(maskTree, 1); if (maskTree.empty()) { interiorMaskPtr->setTree(copyOfTree); } } // Scatter candidate sphere centroids (instancePoints) instancePoints.reserve(instances); v2s_internal::PointAccessor ptnAcc(instancePoints); const auto scatterCount = Index64(addNarrowBandPoints ? (instances / 2) : instances); UniformPointScatter<v2s_internal::PointAccessor, RandGen, InterrupterT> scatter( ptnAcc, scatterCount, mtRand, 1.0, interrupter); scatter(*interiorMaskPtr); } if (interrupter && interrupter->wasInterrupted()) return; auto csp = ClosestSurfacePoint<GridT>::create(*gridPtr, isovalue, interrupter); if (!csp) return; // Add extra instance points in the interior narrow band. if (instancePoints.size() < size_t(instances)) { const Int16TreeT& signTree = csp->signTree(); for (auto leafIt = signTree.cbeginLeaf(); leafIt; ++leafIt) { for (auto it = leafIt->cbeginValueOn(); it; ++it) { const int flags = int(it.getValue()); if (!(volume_to_mesh_internal::EDGES & flags) && (volume_to_mesh_internal::INSIDE & flags)) { instancePoints.push_back(transform.indexToWorld(it.getCoord())); } if (instancePoints.size() == size_t(instances)) break; } if (instancePoints.size() == size_t(instances)) break; } } if (interrupter && interrupter->wasInterrupted()) return; // Assign a radius to each candidate sphere. The radius is the world-space // distance from the sphere's center to the closest surface point. std::vector<float> instanceRadius; if (!csp->search(instancePoints, instanceRadius)) return; float largestRadius = 0.0; int largestRadiusIdx = 0; for (size_t n = 0, N = instancePoints.size(); n < N; ++n) { if (instanceRadius[n] > largestRadius) { largestRadius = instanceRadius[n]; largestRadiusIdx = int(n); } } std::vector<unsigned char> instanceMask(instancePoints.size(), 0); minRadius = float(minRadius * transform.voxelSize()[0]); maxRadius = float(maxRadius * transform.voxelSize()[0]); for (size_t s = 0, S = std::min(size_t(maxSphereCount), instancePoints.size()); s < S; ++s) { if (interrupter && interrupter->wasInterrupted()) return; largestRadius = std::min(maxRadius, largestRadius); if ((int(s) >= minSphereCount) && (largestRadius < minRadius)) break; const Vec4s sphere( float(instancePoints[largestRadiusIdx].x()), float(instancePoints[largestRadiusIdx].y()), float(instancePoints[largestRadiusIdx].z()), largestRadius); spheres.push_back(sphere); instanceMask[largestRadiusIdx] = 1; v2s_internal::UpdatePoints op( sphere, instancePoints, instanceRadius, instanceMask, overlapping); op.run(); largestRadius = op.radius(); largestRadiusIdx = op.index(); } } // fillWithSpheres //////////////////////////////////////// template<typename GridT> template<typename InterrupterT> inline typename ClosestSurfacePoint<GridT>::Ptr ClosestSurfacePoint<GridT>::create(const GridT& grid, float isovalue, InterrupterT* interrupter) { auto csp = Ptr{new ClosestSurfacePoint}; if (!csp->initialize(grid, isovalue, interrupter)) csp.reset(); return csp; } template<typename GridT> template<typename InterrupterT> inline bool ClosestSurfacePoint<GridT>::initialize( const GridT& grid, float isovalue, InterrupterT* interrupter) { using Index32LeafManagerT = tree::LeafManager<Index32TreeT>; using ValueT = typename GridT::ValueType; const TreeT& tree = grid.tree(); const math::Transform& transform = grid.transform(); { // Extract surface point cloud BoolTreeT mask(false); volume_to_mesh_internal::identifySurfaceIntersectingVoxels(mask, tree, ValueT(isovalue)); mSignTreePt.reset(new Int16TreeT(0)); mIdxTreePt.reset(new Index32TreeT(std::numeric_limits<Index32>::max())); volume_to_mesh_internal::computeAuxiliaryData( *mSignTreePt, *mIdxTreePt, mask, tree, ValueT(isovalue)); if (interrupter && interrupter->wasInterrupted()) return false; // count unique points using Int16LeafNodeType = typename Int16TreeT::LeafNodeType; using Index32LeafNodeType = typename Index32TreeT::LeafNodeType; std::vector<Int16LeafNodeType*> signFlagsLeafNodes; mSignTreePt->getNodes(signFlagsLeafNodes); const tbb::blocked_range<size_t> auxiliaryLeafNodeRange(0, signFlagsLeafNodes.size()); std::unique_ptr<Index32[]> leafNodeOffsets(new Index32[signFlagsLeafNodes.size()]); tbb::parallel_for(auxiliaryLeafNodeRange, volume_to_mesh_internal::LeafNodePointCount<Int16LeafNodeType::LOG2DIM> (signFlagsLeafNodes, leafNodeOffsets)); { Index32 pointCount = 0; for (size_t n = 0, N = signFlagsLeafNodes.size(); n < N; ++n) { const Index32 tmp = leafNodeOffsets[n]; leafNodeOffsets[n] = pointCount; pointCount += tmp; } mPointListSize = size_t(pointCount); mSurfacePointList.reset(new Vec3s[mPointListSize]); } std::vector<Index32LeafNodeType*> pointIndexLeafNodes; mIdxTreePt->getNodes(pointIndexLeafNodes); tbb::parallel_for(auxiliaryLeafNodeRange, volume_to_mesh_internal::ComputePoints<TreeT>( mSurfacePointList.get(), tree, pointIndexLeafNodes, signFlagsLeafNodes, leafNodeOffsets, transform, ValueT(isovalue))); } if (interrupter && interrupter->wasInterrupted()) return false; Index32LeafManagerT idxLeafs(*mIdxTreePt); using Index32RootNodeT = typename Index32TreeT::RootNodeType; using Index32NodeChainT = typename Index32RootNodeT::NodeChainType; static_assert(Index32NodeChainT::Size > 1, "expected tree depth greater than one"); using Index32InternalNodeT = typename Index32NodeChainT::template Get<1>; typename Index32TreeT::NodeCIter nIt = mIdxTreePt->cbeginNode(); nIt.setMinDepth(Index32TreeT::NodeCIter::LEAF_DEPTH - 1); nIt.setMaxDepth(Index32TreeT::NodeCIter::LEAF_DEPTH - 1); std::vector<const Index32InternalNodeT*> internalNodes; const Index32InternalNodeT* node = nullptr; for (; nIt; ++nIt) { nIt.getNode(node); if (node) internalNodes.push_back(node); } std::vector<IndexRange>().swap(mLeafRanges); mLeafRanges.resize(internalNodes.size()); std::vector<const Index32LeafT*>().swap(mLeafNodes); mLeafNodes.reserve(idxLeafs.leafCount()); typename Index32InternalNodeT::ChildOnCIter leafIt; mMaxNodeLeafs = 0; for (size_t n = 0, N = internalNodes.size(); n < N; ++n) { mLeafRanges[n].first = mLeafNodes.size(); size_t leafCount = 0; for (leafIt = internalNodes[n]->cbeginChildOn(); leafIt; ++leafIt) { mLeafNodes.push_back(&(*leafIt)); ++leafCount; } mMaxNodeLeafs = std::max(leafCount, mMaxNodeLeafs); mLeafRanges[n].second = mLeafNodes.size(); } std::vector<Vec4R>().swap(mLeafBoundingSpheres); mLeafBoundingSpheres.resize(mLeafNodes.size()); v2s_internal::LeafOp<Index32LeafT> leafBS( mLeafBoundingSpheres, mLeafNodes, transform, mSurfacePointList); leafBS.run(); std::vector<Vec4R>().swap(mNodeBoundingSpheres); mNodeBoundingSpheres.resize(internalNodes.size()); v2s_internal::NodeOp nodeBS(mNodeBoundingSpheres, mLeafRanges, mLeafBoundingSpheres); nodeBS.run(); return true; } // ClosestSurfacePoint::initialize template<typename GridT> inline bool ClosestSurfacePoint<GridT>::search(std::vector<Vec3R>& points, std::vector<float>& distances, bool transformPoints) { distances.clear(); distances.resize(points.size(), std::numeric_limits<float>::infinity()); v2s_internal::ClosestPointDist<Index32LeafT> cpd(points, distances, mSurfacePointList, mLeafNodes, mLeafRanges, mLeafBoundingSpheres, mNodeBoundingSpheres, mMaxNodeLeafs, transformPoints); cpd.run(); return true; } template<typename GridT> inline bool ClosestSurfacePoint<GridT>::search(const std::vector<Vec3R>& points, std::vector<float>& distances) { return search(const_cast<std::vector<Vec3R>& >(points), distances, false); } template<typename GridT> inline bool ClosestSurfacePoint<GridT>::searchAndReplace(std::vector<Vec3R>& points, std::vector<float>& distances) { return search(points, distances, true); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_VOLUME_TO_MESH_HAS_BEEN_INCLUDED
32,716
C
31.914487
99
0.645861
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetUtil.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tools/LevelSetUtil.h /// /// @brief Miscellaneous utility methods that operate primarily /// or exclusively on level set grids. /// /// @author Mihai Alden #ifndef OPENVDB_TOOLS_LEVEL_SET_UTIL_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVEL_SET_UTIL_HAS_BEEN_INCLUDED #include "MeshToVolume.h" // for traceExteriorBoundaries #include "SignedFloodFill.h" // for signedFloodFillWithValues #include <openvdb/Types.h> #include <openvdb/Grid.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/parallel_sort.h> #include <algorithm> #include <cmath> #include <cstdlib> #include <deque> #include <limits> #include <memory> #include <set> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { // MS Visual C++ requires this extra level of indirection in order to compile // THIS MUST EXIST IN AN UNNAMED NAMESPACE IN ORDER TO COMPILE ON WINDOWS namespace { template<typename GridType> inline typename GridType::ValueType lsutilGridMax() { return std::numeric_limits<typename GridType::ValueType>::max(); } template<typename GridType> inline typename GridType::ValueType lsutilGridZero() { return zeroVal<typename GridType::ValueType>(); } } // unnamed namespace //////////////////////////////////////// /// @brief Threaded method to convert a sparse level set/SDF into a sparse fog volume /// /// @details For a level set, the active and negative-valued interior half of the /// narrow band becomes a linear ramp from 0 to 1; the inactive interior becomes /// active with a constant value of 1; and the exterior, including the background /// and the active exterior half of the narrow band, becomes inactive with a constant /// value of 0. The interior, though active, remains sparse. /// @details For a generic SDF, a specified cutoff distance determines the width /// of the ramp, but otherwise the result is the same as for a level set. /// /// @param grid level set/SDF grid to transform /// @param cutoffDistance optional world space cutoff distance for the ramp /// (automatically clamped if greater than the interior /// narrow band width) template<class GridType> inline void sdfToFogVolume( GridType& grid, typename GridType::ValueType cutoffDistance = lsutilGridMax<GridType>()); /// @brief Threaded method to construct a boolean mask that represents interior regions /// in a signed distance field. /// /// @return A shared pointer to either a boolean grid or tree with the same tree /// configuration and potentially transform as the input @c volume and whose active /// and @c true values correspond to the interior of the input signed distance field. /// /// @param volume Signed distance field / level set volume. /// @param isovalue Threshold below which values are considered part of the /// interior region. template<class GridOrTreeType> inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr sdfInteriorMask( const GridOrTreeType& volume, typename GridOrTreeType::ValueType isovalue = lsutilGridZero<GridOrTreeType>()); /// @brief Extracts the interior regions of a signed distance field and topologically enclosed /// (watertight) regions of value greater than the @a isovalue (cavities) that can arise /// as the result of CSG union operations between different shapes where at least one of /// the shapes has a concavity that is capped. /// /// For example the enclosed region of a capped bottle would include the walls and /// the interior cavity. /// /// @return A shared pointer to either a boolean grid or tree with the same tree configuration /// and potentially transform as the input @c volume and whose active and @c true values /// correspond to the interior and enclosed regions in the input signed distance field. /// /// @param volume Signed distance field / level set volume. /// @param isovalue Threshold below which values are considered part of the interior region. /// @param fillMask Optional boolean tree, when provided enclosed cavity regions that are not /// completely filled by this mask are ignored. /// /// For instance if the fill mask does not completely fill the bottle in the /// previous example only the walls and cap are returned and the interior /// cavity will be ignored. template<typename GridOrTreeType> inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr extractEnclosedRegion( const GridOrTreeType& volume, typename GridOrTreeType::ValueType isovalue = lsutilGridZero<GridOrTreeType>(), const typename TreeAdapter<GridOrTreeType>::TreeType::template ValueConverter<bool>::Type* fillMask = nullptr); /// @brief Return a mask of the voxels that intersect the implicit surface with /// the given @a isovalue. /// /// @param volume Signed distance field / level set volume. /// @param isovalue The crossing point that is considered the surface. template<typename GridOrTreeType> inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr extractIsosurfaceMask(const GridOrTreeType& volume, typename GridOrTreeType::ValueType isovalue); /// @brief Return a mask for each connected component of the given grid's active voxels. /// /// @param volume Input grid or tree /// @param masks Output set of disjoint active topology masks sorted in descending order /// based on the active voxel count. template<typename GridOrTreeType> inline void extractActiveVoxelSegmentMasks(const GridOrTreeType& volume, std::vector<typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr>& masks); /// @brief Separates disjoint active topology components into distinct grids or trees. /// /// @details Supports volumes with active tiles. /// /// @param volume Input grid or tree /// @param segments Output set of disjoint active topology components sorted in /// descending order based on the active voxel count. template<typename GridOrTreeType> inline void segmentActiveVoxels(const GridOrTreeType& volume, std::vector<typename GridOrTreeType::Ptr>& segments); /// @brief Separates disjoint SDF surfaces into distinct grids or trees. /// /// @details Supports asymmetric interior / exterior narrowband widths and /// SDF volumes with dense interior regions. /// /// @param volume Input signed distance field / level set volume /// @param segments Output set of disjoint SDF surfaces found in @a volume sorted in /// descending order based on the surface intersecting voxel count. template<typename GridOrTreeType> inline void segmentSDF(const GridOrTreeType& volume, std::vector<typename GridOrTreeType::Ptr>& segments); //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Internal utility objects and implementation details namespace level_set_util_internal { template<typename LeafNodeType> struct MaskInteriorVoxels { using ValueType = typename LeafNodeType::ValueType; using BoolLeafNodeType = tree::LeafNode<bool, LeafNodeType::LOG2DIM>; MaskInteriorVoxels( ValueType isovalue, const LeafNodeType ** nodes, BoolLeafNodeType ** maskNodes) : mNodes(nodes), mMaskNodes(maskNodes), mIsovalue(isovalue) { } void operator()(const tbb::blocked_range<size_t>& range) const { BoolLeafNodeType * maskNodePt = nullptr; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { mMaskNodes[n] = nullptr; const LeafNodeType& node = *mNodes[n]; if (!maskNodePt) { maskNodePt = new BoolLeafNodeType(node.origin(), false); } else { maskNodePt->setOrigin(node.origin()); } const ValueType* values = &node.getValue(0); for (Index i = 0; i < LeafNodeType::SIZE; ++i) { if (values[i] < mIsovalue) maskNodePt->setValueOn(i, true); } if (maskNodePt->onVoxelCount() > 0) { mMaskNodes[n] = maskNodePt; maskNodePt = nullptr; } } if (maskNodePt) delete maskNodePt; } LeafNodeType const * const * const mNodes; BoolLeafNodeType ** const mMaskNodes; ValueType const mIsovalue; }; // MaskInteriorVoxels template<typename TreeType, typename InternalNodeType> struct MaskInteriorTiles { using ValueType = typename TreeType::ValueType; MaskInteriorTiles(ValueType isovalue, const TreeType& tree, InternalNodeType ** maskNodes) : mTree(&tree), mMaskNodes(maskNodes), mIsovalue(isovalue) { } void operator()(const tbb::blocked_range<size_t>& range) const { tree::ValueAccessor<const TreeType> acc(*mTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { typename InternalNodeType::ValueAllIter it = mMaskNodes[n]->beginValueAll(); for (; it; ++it) { if (acc.getValue(it.getCoord()) < mIsovalue) { it.setValue(true); it.setValueOn(true); } } } } TreeType const * const mTree; InternalNodeType ** const mMaskNodes; ValueType const mIsovalue; }; // MaskInteriorTiles template<typename TreeType> struct PopulateTree { using ValueType = typename TreeType::ValueType; using LeafNodeType = typename TreeType::LeafNodeType; PopulateTree(TreeType& tree, LeafNodeType** leafnodes, const size_t * nodexIndexMap, ValueType background) : mNewTree(background) , mTreePt(&tree) , mNodes(leafnodes) , mNodeIndexMap(nodexIndexMap) { } PopulateTree(PopulateTree& rhs, tbb::split) : mNewTree(rhs.mNewTree.background()) , mTreePt(&mNewTree) , mNodes(rhs.mNodes) , mNodeIndexMap(rhs.mNodeIndexMap) { } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<TreeType> acc(*mTreePt); if (mNodeIndexMap) { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { for (size_t i = mNodeIndexMap[n], I = mNodeIndexMap[n + 1]; i < I; ++i) { if (mNodes[i] != nullptr) acc.addLeaf(mNodes[i]); } } } else { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { acc.addLeaf(mNodes[n]); } } } void join(PopulateTree& rhs) { mTreePt->merge(*rhs.mTreePt); } private: TreeType mNewTree; TreeType * const mTreePt; LeafNodeType ** const mNodes; size_t const * const mNodeIndexMap; }; // PopulateTree /// @brief Negative active values are set @c 0, everything else is set to @c 1. template<typename LeafNodeType> struct LabelBoundaryVoxels { using ValueType = typename LeafNodeType::ValueType; using CharLeafNodeType = tree::LeafNode<char, LeafNodeType::LOG2DIM>; LabelBoundaryVoxels( ValueType isovalue, const LeafNodeType ** nodes, CharLeafNodeType ** maskNodes) : mNodes(nodes), mMaskNodes(maskNodes), mIsovalue(isovalue) { } void operator()(const tbb::blocked_range<size_t>& range) const { CharLeafNodeType * maskNodePt = nullptr; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { mMaskNodes[n] = nullptr; const LeafNodeType& node = *mNodes[n]; if (!maskNodePt) { maskNodePt = new CharLeafNodeType(node.origin(), 1); } else { maskNodePt->setOrigin(node.origin()); } typename LeafNodeType::ValueOnCIter it; for (it = node.cbeginValueOn(); it; ++it) { maskNodePt->setValueOn(it.pos(), ((*it - mIsovalue) < 0.0) ? 0 : 1); } if (maskNodePt->onVoxelCount() > 0) { mMaskNodes[n] = maskNodePt; maskNodePt = nullptr; } } if (maskNodePt) delete maskNodePt; } LeafNodeType const * const * const mNodes; CharLeafNodeType ** const mMaskNodes; ValueType const mIsovalue; }; // LabelBoundaryVoxels template<typename LeafNodeType> struct FlipRegionSign { using ValueType = typename LeafNodeType::ValueType; FlipRegionSign(LeafNodeType ** nodes) : mNodes(nodes) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { ValueType* values = const_cast<ValueType*>(&mNodes[n]->getValue(0)); for (Index i = 0; i < LeafNodeType::SIZE; ++i) { values[i] = values[i] < 0 ? 1 : -1; } } } LeafNodeType ** const mNodes; }; // FlipRegionSign template<typename LeafNodeType> struct FindMinVoxelValue { using ValueType = typename LeafNodeType::ValueType; FindMinVoxelValue(LeafNodeType const * const * const leafnodes) : minValue(std::numeric_limits<ValueType>::max()) , mNodes(leafnodes) { } FindMinVoxelValue(FindMinVoxelValue& rhs, tbb::split) : minValue(std::numeric_limits<ValueType>::max()) , mNodes(rhs.mNodes) { } void operator()(const tbb::blocked_range<size_t>& range) { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const ValueType* data = mNodes[n]->buffer().data(); for (Index i = 0; i < LeafNodeType::SIZE; ++i) { minValue = std::min(minValue, data[i]); } } } void join(FindMinVoxelValue& rhs) { minValue = std::min(minValue, rhs.minValue); } ValueType minValue; LeafNodeType const * const * const mNodes; }; // FindMinVoxelValue template<typename InternalNodeType> struct FindMinTileValue { using ValueType = typename InternalNodeType::ValueType; FindMinTileValue(InternalNodeType const * const * const nodes) : minValue(std::numeric_limits<ValueType>::max()) , mNodes(nodes) { } FindMinTileValue(FindMinTileValue& rhs, tbb::split) : minValue(std::numeric_limits<ValueType>::max()) , mNodes(rhs.mNodes) { } void operator()(const tbb::blocked_range<size_t>& range) { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { typename InternalNodeType::ValueAllCIter it = mNodes[n]->beginValueAll(); for (; it; ++it) { minValue = std::min(minValue, *it); } } } void join(FindMinTileValue& rhs) { minValue = std::min(minValue, rhs.minValue); } ValueType minValue; InternalNodeType const * const * const mNodes; }; // FindMinTileValue template<typename LeafNodeType> struct SDFVoxelsToFogVolume { using ValueType = typename LeafNodeType::ValueType; SDFVoxelsToFogVolume(LeafNodeType ** nodes, ValueType cutoffDistance) : mNodes(nodes), mWeight(ValueType(1.0) / cutoffDistance) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { LeafNodeType& node = *mNodes[n]; node.setValuesOff(); ValueType* values = node.buffer().data(); for (Index i = 0; i < LeafNodeType::SIZE; ++i) { values[i] = values[i] > ValueType(0.0) ? ValueType(0.0) : values[i] * mWeight; if (values[i] > ValueType(0.0)) node.setValueOn(i); } if (node.onVoxelCount() == 0) { delete mNodes[n]; mNodes[n] = nullptr; } } } LeafNodeType ** const mNodes; ValueType const mWeight; }; // SDFVoxelsToFogVolume template<typename TreeType, typename InternalNodeType> struct SDFTilesToFogVolume { SDFTilesToFogVolume(const TreeType& tree, InternalNodeType ** nodes) : mTree(&tree), mNodes(nodes) { } void operator()(const tbb::blocked_range<size_t>& range) const { using ValueType = typename TreeType::ValueType; tree::ValueAccessor<const TreeType> acc(*mTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { typename InternalNodeType::ValueAllIter it = mNodes[n]->beginValueAll(); for (; it; ++it) { if (acc.getValue(it.getCoord()) < ValueType(0.0)) { it.setValue(ValueType(1.0)); it.setValueOn(true); } } } } TreeType const * const mTree; InternalNodeType ** const mNodes; }; // SDFTilesToFogVolume template<typename TreeType> struct FillMaskBoundary { using ValueType = typename TreeType::ValueType; using LeafNodeType = typename TreeType::LeafNodeType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; FillMaskBoundary(const TreeType& tree, ValueType isovalue, const BoolTreeType& fillMask, const BoolLeafNodeType ** fillNodes, BoolLeafNodeType ** newNodes) : mTree(&tree) , mFillMask(&fillMask) , mFillNodes(fillNodes) , mNewNodes(newNodes) , mIsovalue(isovalue) { } void operator()(const tbb::blocked_range<size_t>& range) const { tree::ValueAccessor<const BoolTreeType> maskAcc(*mFillMask); tree::ValueAccessor<const TreeType> distAcc(*mTree); std::unique_ptr<char[]> valueMask(new char[BoolLeafNodeType::SIZE]); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { mNewNodes[n] = nullptr; const BoolLeafNodeType& node = *mFillNodes[n]; const Coord& origin = node.origin(); const bool denseNode = node.isDense(); // possible early out if the fill mask is dense if (denseNode) { int denseNeighbors = 0; const BoolLeafNodeType* neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(-1, 0, 0)); if (neighborNode && neighborNode->isDense()) ++denseNeighbors; neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(BoolLeafNodeType::DIM, 0, 0)); if (neighborNode && neighborNode->isDense()) ++denseNeighbors; neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(0, -1, 0)); if (neighborNode && neighborNode->isDense()) ++denseNeighbors; neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(0, BoolLeafNodeType::DIM, 0)); if (neighborNode && neighborNode->isDense()) ++denseNeighbors; neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(0, 0, -1)); if (neighborNode && neighborNode->isDense()) ++denseNeighbors; neighborNode = maskAcc.probeConstLeaf(origin.offsetBy(0, 0, BoolLeafNodeType::DIM)); if (neighborNode && neighborNode->isDense()) ++denseNeighbors; if (denseNeighbors == 6) continue; } // rest value mask memset(valueMask.get(), 0, sizeof(char) * BoolLeafNodeType::SIZE); const typename TreeType::LeafNodeType* distNode = distAcc.probeConstLeaf(origin); // check internal voxel neighbors bool earlyTermination = false; if (!denseNode) { if (distNode) { evalInternalNeighborsP(valueMask.get(), node, *distNode); evalInternalNeighborsN(valueMask.get(), node, *distNode); } else if (distAcc.getValue(origin) > mIsovalue) { earlyTermination = evalInternalNeighborsP(valueMask.get(), node); if (!earlyTermination) { earlyTermination = evalInternalNeighborsN(valueMask.get(), node); } } } // check external voxel neighbors if (!earlyTermination) { evalExternalNeighborsX<true>(valueMask.get(), node, maskAcc, distAcc); evalExternalNeighborsX<false>(valueMask.get(), node, maskAcc, distAcc); evalExternalNeighborsY<true>(valueMask.get(), node, maskAcc, distAcc); evalExternalNeighborsY<false>(valueMask.get(), node, maskAcc, distAcc); evalExternalNeighborsZ<true>(valueMask.get(), node, maskAcc, distAcc); evalExternalNeighborsZ<false>(valueMask.get(), node, maskAcc, distAcc); } // Export marked boundary voxels. int numBoundaryValues = 0; for (Index i = 0, I = BoolLeafNodeType::SIZE; i < I; ++i) { numBoundaryValues += valueMask[i] == 1; } if (numBoundaryValues > 0) { mNewNodes[n] = new BoolLeafNodeType(origin, false); for (Index i = 0, I = BoolLeafNodeType::SIZE; i < I; ++i) { if (valueMask[i] == 1) mNewNodes[n]->setValueOn(i); } } } } private: // Check internal voxel neighbors in positive {x, y, z} directions. void evalInternalNeighborsP(char* valueMask, const BoolLeafNodeType& node, const LeafNodeType& distNode) const { for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM - 1; ++z) { const Index pos = yPos + z; if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue; if (!node.isValueOn(pos + 1) && distNode.getValue(pos + 1) > mIsovalue) { valueMask[pos] = 1; } } } } for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM - 1; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue; if (!node.isValueOn(pos + BoolLeafNodeType::DIM) && distNode.getValue(pos + BoolLeafNodeType::DIM) > mIsovalue) { valueMask[pos] = 1; } } } } for (Index x = 0; x < BoolLeafNodeType::DIM - 1; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue; if (!node.isValueOn(pos + BoolLeafNodeType::DIM * BoolLeafNodeType::DIM) && (distNode.getValue(pos + BoolLeafNodeType::DIM * BoolLeafNodeType::DIM) > mIsovalue)) { valueMask[pos] = 1; } } } } } bool evalInternalNeighborsP(char* valueMask, const BoolLeafNodeType& node) const { for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM - 1; ++z) { const Index pos = yPos + z; if (node.isValueOn(pos) && !node.isValueOn(pos + 1)) { valueMask[pos] = 1; return true; } } } } for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM - 1; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (node.isValueOn(pos) && !node.isValueOn(pos + BoolLeafNodeType::DIM)) { valueMask[pos] = 1; return true; } } } } for (Index x = 0; x < BoolLeafNodeType::DIM - 1; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (node.isValueOn(pos) && !node.isValueOn(pos + BoolLeafNodeType::DIM * BoolLeafNodeType::DIM)) { valueMask[pos] = 1; return true; } } } } return false; } // Check internal voxel neighbors in negative {x, y, z} directions. void evalInternalNeighborsN(char* valueMask, const BoolLeafNodeType& node, const LeafNodeType& distNode) const { for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 1; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue; if (!node.isValueOn(pos - 1) && distNode.getValue(pos - 1) > mIsovalue) { valueMask[pos] = 1; } } } } for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 1; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue; if (!node.isValueOn(pos - BoolLeafNodeType::DIM) && distNode.getValue(pos - BoolLeafNodeType::DIM) > mIsovalue) { valueMask[pos] = 1; } } } } for (Index x = 1; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (valueMask[pos] != 0 || !node.isValueOn(pos)) continue; if (!node.isValueOn(pos - BoolLeafNodeType::DIM * BoolLeafNodeType::DIM) && (distNode.getValue(pos - BoolLeafNodeType::DIM * BoolLeafNodeType::DIM) > mIsovalue)) { valueMask[pos] = 1; } } } } } bool evalInternalNeighborsN(char* valueMask, const BoolLeafNodeType& node) const { for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 1; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (node.isValueOn(pos) && !node.isValueOn(pos - 1)) { valueMask[pos] = 1; return true; } } } } for (Index x = 0; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 1; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (node.isValueOn(pos) && !node.isValueOn(pos - BoolLeafNodeType::DIM)) { valueMask[pos] = 1; return true; } } } } for (Index x = 1; x < BoolLeafNodeType::DIM; ++x) { const Index xPos = x << (2 * BoolLeafNodeType::LOG2DIM); for (Index y = 0; y < BoolLeafNodeType::DIM; ++y) { const Index yPos = xPos + (y << BoolLeafNodeType::LOG2DIM); for (Index z = 0; z < BoolLeafNodeType::DIM; ++z) { const Index pos = yPos + z; if (node.isValueOn(pos) && !node.isValueOn(pos - BoolLeafNodeType::DIM * BoolLeafNodeType::DIM)) { valueMask[pos] = 1; return true; } } } } return false; } // Check external voxel neighbors // If UpWind is true check the X+ oriented node face, else the X- oriented face. template<bool UpWind> void evalExternalNeighborsX(char* valueMask, const BoolLeafNodeType& node, const tree::ValueAccessor<const BoolTreeType>& maskAcc, const tree::ValueAccessor<const TreeType>& distAcc) const { const Coord& origin = node.origin(); Coord ijk(0, 0, 0), nijk; int step = -1; if (UpWind) { step = 1; ijk[0] = int(BoolLeafNodeType::DIM) - 1; } const Index xPos = ijk[0] << (2 * int(BoolLeafNodeType::LOG2DIM)); for (ijk[1] = 0; ijk[1] < int(BoolLeafNodeType::DIM); ++ijk[1]) { const Index yPos = xPos + (ijk[1] << int(BoolLeafNodeType::LOG2DIM)); for (ijk[2] = 0; ijk[2] < int(BoolLeafNodeType::DIM); ++ijk[2]) { const Index pos = yPos + ijk[2]; if (valueMask[pos] == 0 && node.isValueOn(pos)) { nijk = origin + ijk.offsetBy(step, 0, 0); if (!maskAcc.isValueOn(nijk) && distAcc.getValue(nijk) > mIsovalue) { valueMask[pos] = 1; } } } } } // If UpWind is true check the Y+ oriented node face, else the Y- oriented face. template<bool UpWind> void evalExternalNeighborsY(char* valueMask, const BoolLeafNodeType& node, const tree::ValueAccessor<const BoolTreeType>& maskAcc, const tree::ValueAccessor<const TreeType>& distAcc) const { const Coord& origin = node.origin(); Coord ijk(0, 0, 0), nijk; int step = -1; if (UpWind) { step = 1; ijk[1] = int(BoolLeafNodeType::DIM) - 1; } const Index yPos = ijk[1] << int(BoolLeafNodeType::LOG2DIM); for (ijk[0] = 0; ijk[0] < int(BoolLeafNodeType::DIM); ++ijk[0]) { const Index xPos = yPos + (ijk[0] << (2 * int(BoolLeafNodeType::LOG2DIM))); for (ijk[2] = 0; ijk[2] < int(BoolLeafNodeType::DIM); ++ijk[2]) { const Index pos = xPos + ijk[2]; if (valueMask[pos] == 0 && node.isValueOn(pos)) { nijk = origin + ijk.offsetBy(0, step, 0); if (!maskAcc.isValueOn(nijk) && distAcc.getValue(nijk) > mIsovalue) { valueMask[pos] = 1; } } } } } // If UpWind is true check the Z+ oriented node face, else the Z- oriented face. template<bool UpWind> void evalExternalNeighborsZ(char* valueMask, const BoolLeafNodeType& node, const tree::ValueAccessor<const BoolTreeType>& maskAcc, const tree::ValueAccessor<const TreeType>& distAcc) const { const Coord& origin = node.origin(); Coord ijk(0, 0, 0), nijk; int step = -1; if (UpWind) { step = 1; ijk[2] = int(BoolLeafNodeType::DIM) - 1; } for (ijk[0] = 0; ijk[0] < int(BoolLeafNodeType::DIM); ++ijk[0]) { const Index xPos = ijk[0] << (2 * int(BoolLeafNodeType::LOG2DIM)); for (ijk[1] = 0; ijk[1] < int(BoolLeafNodeType::DIM); ++ijk[1]) { const Index pos = ijk[2] + xPos + (ijk[1] << int(BoolLeafNodeType::LOG2DIM)); if (valueMask[pos] == 0 && node.isValueOn(pos)) { nijk = origin + ijk.offsetBy(0, 0, step); if (!maskAcc.isValueOn(nijk) && distAcc.getValue(nijk) > mIsovalue) { valueMask[pos] = 1; } } } } } ////////// TreeType const * const mTree; BoolTreeType const * const mFillMask; BoolLeafNodeType const * const * const mFillNodes; BoolLeafNodeType ** const mNewNodes; ValueType const mIsovalue; }; // FillMaskBoundary /// @brief Constructs a memory light char tree that represents the exterior region with @c +1 /// and the interior regions with @c -1. template <class TreeType> inline typename TreeType::template ValueConverter<char>::Type::Ptr computeEnclosedRegionMask(const TreeType& tree, typename TreeType::ValueType isovalue, const typename TreeType::template ValueConverter<bool>::Type* fillMask) { using LeafNodeType = typename TreeType::LeafNodeType; using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; using CharTreeType = typename TreeType::template ValueConverter<char>::Type; using CharLeafNodeType = typename CharTreeType::LeafNodeType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; const TreeType* treePt = &tree; size_t numLeafNodes = 0, numInternalNodes = 0; std::vector<const LeafNodeType*> nodes; std::vector<size_t> leafnodeCount; { // compute the prefix sum of the leafnode count in each internal node. std::vector<const InternalNodeType*> internalNodes; treePt->getNodes(internalNodes); numInternalNodes = internalNodes.size(); leafnodeCount.push_back(0); for (size_t n = 0; n < numInternalNodes; ++n) { leafnodeCount.push_back(leafnodeCount.back() + internalNodes[n]->leafCount()); } numLeafNodes = leafnodeCount.back(); // extract all leafnodes nodes.reserve(numLeafNodes); for (size_t n = 0; n < numInternalNodes; ++n) { internalNodes[n]->getNodes(nodes); } } // create mask leafnodes std::unique_ptr<CharLeafNodeType*[]> maskNodes(new CharLeafNodeType*[numLeafNodes]); tbb::parallel_for(tbb::blocked_range<size_t>(0, numLeafNodes), LabelBoundaryVoxels<LeafNodeType>(isovalue, nodes.data(), maskNodes.get())); // create mask grid typename CharTreeType::Ptr maskTree(new CharTreeType(1)); PopulateTree<CharTreeType> populate(*maskTree, maskNodes.get(), leafnodeCount.data(), 1); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, numInternalNodes), populate); // optionally evaluate the fill mask std::vector<CharLeafNodeType*> extraMaskNodes; if (fillMask) { std::vector<const BoolLeafNodeType*> fillMaskNodes; fillMask->getNodes(fillMaskNodes); std::unique_ptr<BoolLeafNodeType*[]> boundaryMaskNodes( new BoolLeafNodeType*[fillMaskNodes.size()]); tbb::parallel_for(tbb::blocked_range<size_t>(0, fillMaskNodes.size()), FillMaskBoundary<TreeType>(tree, isovalue, *fillMask, fillMaskNodes.data(), boundaryMaskNodes.get())); tree::ValueAccessor<CharTreeType> maskAcc(*maskTree); for (size_t n = 0, N = fillMaskNodes.size(); n < N; ++n) { if (boundaryMaskNodes[n] == nullptr) continue; const BoolLeafNodeType& boundaryNode = *boundaryMaskNodes[n]; const Coord& origin = boundaryNode.origin(); CharLeafNodeType* maskNodePt = maskAcc.probeLeaf(origin); if (!maskNodePt) { maskNodePt = maskAcc.touchLeaf(origin); extraMaskNodes.push_back(maskNodePt); } char* data = maskNodePt->buffer().data(); typename BoolLeafNodeType::ValueOnCIter it = boundaryNode.cbeginValueOn(); for (; it; ++it) { if (data[it.pos()] != 0) data[it.pos()] = -1; } delete boundaryMaskNodes[n]; } } // eliminate enclosed regions tools::traceExteriorBoundaries(*maskTree); // flip voxel sign to negative inside and positive outside. tbb::parallel_for(tbb::blocked_range<size_t>(0, numLeafNodes), FlipRegionSign<CharLeafNodeType>(maskNodes.get())); if (!extraMaskNodes.empty()) { tbb::parallel_for(tbb::blocked_range<size_t>(0, extraMaskNodes.size()), FlipRegionSign<CharLeafNodeType>(extraMaskNodes.data())); } // propagate sign information into tile region tools::signedFloodFill(*maskTree); return maskTree; } // computeEnclosedRegionMask() template <class TreeType> inline typename TreeType::template ValueConverter<bool>::Type::Ptr computeInteriorMask(const TreeType& tree, typename TreeType::ValueType iso) { using ValueType = typename TreeType::ValueType; using LeafNodeType = typename TreeType::LeafNodeType; using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; using BoolRootNodeType = typename BoolTreeType::RootNodeType; using BoolNodeChainType = typename BoolRootNodeType::NodeChainType; using BoolInternalNodeType = typename BoolNodeChainType::template Get<1>; ///// // Clamp the isovalue to the level set's background value minus epsilon. // (In a valid narrow-band level set, all voxels, including background voxels, // have values less than or equal to the background value, so an isovalue // greater than or equal to the background value would produce a mask with // effectively infinite extent.) iso = std::min(iso, static_cast<ValueType>(tree.background() - math::Tolerance<ValueType>::value())); size_t numLeafNodes = 0, numInternalNodes = 0; std::vector<const LeafNodeType*> nodes; std::vector<size_t> leafnodeCount; { // compute the prefix sum of the leafnode count in each internal node. std::vector<const InternalNodeType*> internalNodes; tree.getNodes(internalNodes); numInternalNodes = internalNodes.size(); leafnodeCount.push_back(0); for (size_t n = 0; n < numInternalNodes; ++n) { leafnodeCount.push_back(leafnodeCount.back() + internalNodes[n]->leafCount()); } numLeafNodes = leafnodeCount.back(); // extract all leafnodes nodes.reserve(numLeafNodes); for (size_t n = 0; n < numInternalNodes; ++n) { internalNodes[n]->getNodes(nodes); } } // create mask leafnodes std::unique_ptr<BoolLeafNodeType*[]> maskNodes(new BoolLeafNodeType*[numLeafNodes]); tbb::parallel_for(tbb::blocked_range<size_t>(0, numLeafNodes), MaskInteriorVoxels<LeafNodeType>(iso, nodes.data(), maskNodes.get())); // create mask grid typename BoolTreeType::Ptr maskTree(new BoolTreeType(false)); PopulateTree<BoolTreeType> populate(*maskTree, maskNodes.get(), leafnodeCount.data(), false); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, numInternalNodes), populate); // evaluate tile values std::vector<BoolInternalNodeType*> internalMaskNodes; maskTree->getNodes(internalMaskNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, internalMaskNodes.size()), MaskInteriorTiles<TreeType, BoolInternalNodeType>(iso, tree, internalMaskNodes.data())); tree::ValueAccessor<const TreeType> acc(tree); typename BoolTreeType::ValueAllIter it(*maskTree); it.setMaxDepth(BoolTreeType::ValueAllIter::LEAF_DEPTH - 2); for ( ; it; ++it) { if (acc.getValue(it.getCoord()) < iso) { it.setValue(true); it.setActiveState(true); } } return maskTree; } // computeInteriorMask() template<typename InputTreeType> struct MaskIsovalueCrossingVoxels { using InputValueType = typename InputTreeType::ValueType; using InputLeafNodeType = typename InputTreeType::LeafNodeType; using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; MaskIsovalueCrossingVoxels( const InputTreeType& inputTree, const std::vector<const InputLeafNodeType*>& inputLeafNodes, BoolTreeType& maskTree, InputValueType iso) : mInputAccessor(inputTree) , mInputNodes(!inputLeafNodes.empty() ? &inputLeafNodes.front() : nullptr) , mMaskTree(false) , mMaskAccessor(maskTree) , mIsovalue(iso) { } MaskIsovalueCrossingVoxels(MaskIsovalueCrossingVoxels& rhs, tbb::split) : mInputAccessor(rhs.mInputAccessor.tree()) , mInputNodes(rhs.mInputNodes) , mMaskTree(false) , mMaskAccessor(mMaskTree) , mIsovalue(rhs.mIsovalue) { } void operator()(const tbb::blocked_range<size_t>& range) { const InputValueType iso = mIsovalue; Coord ijk(0, 0, 0); BoolLeafNodeType* maskNodePt = nullptr; for (size_t n = range.begin(); mInputNodes && (n != range.end()); ++n) { const InputLeafNodeType& node = *mInputNodes[n]; if (!maskNodePt) maskNodePt = new BoolLeafNodeType(node.origin(), false); else maskNodePt->setOrigin(node.origin()); bool collectedData = false; for (typename InputLeafNodeType::ValueOnCIter it = node.cbeginValueOn(); it; ++it) { bool isUnder = *it < iso; ijk = it.getCoord(); ++ijk[2]; bool signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // +z edge --ijk[2]; if (!signChange) { --ijk[2]; signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // -z edge ++ijk[2]; } if (!signChange) { ++ijk[1]; signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // +y edge --ijk[1]; } if (!signChange) { --ijk[1]; signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // -y edge ++ijk[1]; } if (!signChange) { ++ijk[0]; signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // +x edge --ijk[0]; } if (!signChange) { --ijk[0]; signChange = isUnder != (mInputAccessor.getValue(ijk) < iso); // -x edge ++ijk[0]; } if (signChange) { collectedData = true; maskNodePt->setValueOn(it.pos(), true); } } if (collectedData) { mMaskAccessor.addLeaf(maskNodePt); maskNodePt = nullptr; } } if (maskNodePt) delete maskNodePt; } void join(MaskIsovalueCrossingVoxels& rhs) { mMaskAccessor.tree().merge(rhs.mMaskAccessor.tree()); } private: tree::ValueAccessor<const InputTreeType> mInputAccessor; InputLeafNodeType const * const * const mInputNodes; BoolTreeType mMaskTree; tree::ValueAccessor<BoolTreeType> mMaskAccessor; InputValueType mIsovalue; }; // MaskIsovalueCrossingVoxels //////////////////////////////////////// template<typename NodeType> struct NodeMaskSegment { using Ptr = SharedPtr<NodeMaskSegment>; using NodeMaskType = typename NodeType::NodeMaskType; NodeMaskSegment() : connections(), mask(false), origin(0,0,0), visited(false) {} std::vector<NodeMaskSegment*> connections; NodeMaskType mask; Coord origin; bool visited; }; // struct NodeMaskSegment template<typename NodeType> inline void nodeMaskSegmentation(const NodeType& node, std::vector<typename NodeMaskSegment<NodeType>::Ptr>& segments) { using NodeMaskType = typename NodeType::NodeMaskType; using NodeMaskSegmentType = NodeMaskSegment<NodeType>; using NodeMaskSegmentTypePtr = typename NodeMaskSegmentType::Ptr; NodeMaskType nodeMask(node.getValueMask()); std::deque<Index> indexList; while (!nodeMask.isOff()) { NodeMaskSegmentTypePtr segment(new NodeMaskSegmentType()); segment->origin = node.origin(); NodeMaskType& mask = segment->mask; indexList.push_back(nodeMask.findFirstOn()); nodeMask.setOff(indexList.back()); // mark as visited Coord ijk(0, 0, 0); while (!indexList.empty()) { const Index pos = indexList.back(); indexList.pop_back(); if (mask.isOn(pos)) continue; mask.setOn(pos); ijk = NodeType::offsetToLocalCoord(pos); Index npos = pos - 1; if (ijk[2] != 0 && nodeMask.isOn(npos)) { nodeMask.setOff(npos); indexList.push_back(npos); } npos = pos + 1; if (ijk[2] != (NodeType::DIM - 1) && nodeMask.isOn(npos)) { nodeMask.setOff(npos); indexList.push_back(npos); } npos = pos - NodeType::DIM; if (ijk[1] != 0 && nodeMask.isOn(npos)) { nodeMask.setOff(npos); indexList.push_back(npos); } npos = pos + NodeType::DIM; if (ijk[1] != (NodeType::DIM - 1) && nodeMask.isOn(npos)) { nodeMask.setOff(npos); indexList.push_back(npos); } npos = pos - NodeType::DIM * NodeType::DIM; if (ijk[0] != 0 && nodeMask.isOn(npos)) { nodeMask.setOff(npos); indexList.push_back(npos); } npos = pos + NodeType::DIM * NodeType::DIM; if (ijk[0] != (NodeType::DIM - 1) && nodeMask.isOn(npos)) { nodeMask.setOff(npos); indexList.push_back(npos); } } segments.push_back(segment); } } template<typename NodeType> struct SegmentNodeMask { using NodeMaskSegmentType = NodeMaskSegment<NodeType>; using NodeMaskSegmentTypePtr = typename NodeMaskSegmentType::Ptr; using NodeMaskSegmentVector = typename std::vector<NodeMaskSegmentTypePtr>; SegmentNodeMask(std::vector<NodeType*>& nodes, NodeMaskSegmentVector* nodeMaskArray) : mNodes(!nodes.empty() ? &nodes.front() : nullptr) , mNodeMaskArray(nodeMaskArray) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { NodeType& node = *mNodes[n]; nodeMaskSegmentation(node, mNodeMaskArray[n]); // hack origin data to store array offset Coord& origin = const_cast<Coord&>(node.origin()); origin[0] = static_cast<int>(n); } } NodeType * const * const mNodes; NodeMaskSegmentVector * const mNodeMaskArray; }; // struct SegmentNodeMask template<typename TreeType, typename NodeType> struct ConnectNodeMaskSegments { using NodeMaskType = typename NodeType::NodeMaskType; using NodeMaskSegmentType = NodeMaskSegment<NodeType>; using NodeMaskSegmentTypePtr = typename NodeMaskSegmentType::Ptr; using NodeMaskSegmentVector = typename std::vector<NodeMaskSegmentTypePtr>; ConnectNodeMaskSegments(const TreeType& tree, NodeMaskSegmentVector* nodeMaskArray) : mTree(&tree) , mNodeMaskArray(nodeMaskArray) { } void operator()(const tbb::blocked_range<size_t>& range) const { tree::ValueAccessor<const TreeType> acc(*mTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { NodeMaskSegmentVector& segments = mNodeMaskArray[n]; if (segments.empty()) continue; std::vector<std::set<NodeMaskSegmentType*> > connections(segments.size()); Coord ijk = segments[0]->origin; const NodeType* node = acc.template probeConstNode<NodeType>(ijk); if (!node) continue; // get neighbour nodes ijk[2] += NodeType::DIM; const NodeType* nodeZUp = acc.template probeConstNode<NodeType>(ijk); ijk[2] -= (NodeType::DIM + NodeType::DIM); const NodeType* nodeZDown = acc.template probeConstNode<NodeType>(ijk); ijk[2] += NodeType::DIM; ijk[1] += NodeType::DIM; const NodeType* nodeYUp = acc.template probeConstNode<NodeType>(ijk); ijk[1] -= (NodeType::DIM + NodeType::DIM); const NodeType* nodeYDown = acc.template probeConstNode<NodeType>(ijk); ijk[1] += NodeType::DIM; ijk[0] += NodeType::DIM; const NodeType* nodeXUp = acc.template probeConstNode<NodeType>(ijk); ijk[0] -= (NodeType::DIM + NodeType::DIM); const NodeType* nodeXDown = acc.template probeConstNode<NodeType>(ijk); ijk[0] += NodeType::DIM; const Index startPos = node->getValueMask().findFirstOn(); for (Index pos = startPos; pos < NodeMaskType::SIZE; ++pos) { if (!node->isValueOn(pos)) continue; ijk = NodeType::offsetToLocalCoord(pos); #ifdef _MSC_FULL_VER #if _MSC_FULL_VER >= 190000000 && _MSC_FULL_VER < 190024210 // Visual Studio 2015 had a codegen bug that wasn't fixed until Update 3 volatile Index npos = 0; #else Index npos = 0; #endif #else Index npos = 0; #endif if (ijk[2] == 0) { npos = pos + (NodeType::DIM - 1); if (nodeZDown && nodeZDown->isValueOn(npos)) { NodeMaskSegmentType* nsegment = findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeZDown)], npos); const Index idx = findNodeMaskSegmentIndex(segments, pos); connections[idx].insert(nsegment); } } else if (ijk[2] == (NodeType::DIM - 1)) { npos = pos - (NodeType::DIM - 1); if (nodeZUp && nodeZUp->isValueOn(npos)) { NodeMaskSegmentType* nsegment = findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeZUp)], npos); const Index idx = findNodeMaskSegmentIndex(segments, pos); connections[idx].insert(nsegment); } } if (ijk[1] == 0) { npos = pos + (NodeType::DIM - 1) * NodeType::DIM; if (nodeYDown && nodeYDown->isValueOn(npos)) { NodeMaskSegmentType* nsegment = findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeYDown)], npos); const Index idx = findNodeMaskSegmentIndex(segments, pos); connections[idx].insert(nsegment); } } else if (ijk[1] == (NodeType::DIM - 1)) { npos = pos - (NodeType::DIM - 1) * NodeType::DIM; if (nodeYUp && nodeYUp->isValueOn(npos)) { NodeMaskSegmentType* nsegment = findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeYUp)], npos); const Index idx = findNodeMaskSegmentIndex(segments, pos); connections[idx].insert(nsegment); } } if (ijk[0] == 0) { npos = pos + (NodeType::DIM - 1) * NodeType::DIM * NodeType::DIM; if (nodeXDown && nodeXDown->isValueOn(npos)) { NodeMaskSegmentType* nsegment = findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeXDown)], npos); const Index idx = findNodeMaskSegmentIndex(segments, pos); connections[idx].insert(nsegment); } } else if (ijk[0] == (NodeType::DIM - 1)) { npos = pos - (NodeType::DIM - 1) * NodeType::DIM * NodeType::DIM; if (nodeXUp && nodeXUp->isValueOn(npos)) { NodeMaskSegmentType* nsegment = findNodeMaskSegment(mNodeMaskArray[getNodeOffset(*nodeXUp)], npos); const Index idx = findNodeMaskSegmentIndex(segments, pos); connections[idx].insert(nsegment); } } } for (size_t i = 0, I = connections.size(); i < I; ++i) { typename std::set<NodeMaskSegmentType*>::iterator it = connections[i].begin(), end = connections[i].end(); std::vector<NodeMaskSegmentType*>& segmentConnections = segments[i]->connections; segmentConnections.reserve(connections.size()); for (; it != end; ++it) { segmentConnections.push_back(*it); } } } // end range loop } private: static inline size_t getNodeOffset(const NodeType& node) { return static_cast<size_t>(node.origin()[0]); } static inline NodeMaskSegmentType* findNodeMaskSegment(NodeMaskSegmentVector& segments, Index pos) { NodeMaskSegmentType* segment = nullptr; for (size_t n = 0, N = segments.size(); n < N; ++n) { if (segments[n]->mask.isOn(pos)) { segment = segments[n].get(); break; } } return segment; } static inline Index findNodeMaskSegmentIndex(NodeMaskSegmentVector& segments, Index pos) { for (Index n = 0, N = Index(segments.size()); n < N; ++n) { if (segments[n]->mask.isOn(pos)) return n; } return Index(-1); } TreeType const * const mTree; NodeMaskSegmentVector * const mNodeMaskArray; }; // struct ConnectNodeMaskSegments template<typename TreeType> struct MaskSegmentGroup { using LeafNodeType = typename TreeType::LeafNodeType; using TreeTypePtr = typename TreeType::Ptr; using NodeMaskSegmentType = NodeMaskSegment<LeafNodeType>; MaskSegmentGroup(const std::vector<NodeMaskSegmentType*>& segments) : mSegments(!segments.empty() ? &segments.front() : nullptr) , mTree(new TreeType(false)) { } MaskSegmentGroup(const MaskSegmentGroup& rhs, tbb::split) : mSegments(rhs.mSegments) , mTree(new TreeType(false)) { } TreeTypePtr& mask() { return mTree; } void join(MaskSegmentGroup& rhs) { mTree->merge(*rhs.mTree); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<TreeType> acc(*mTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { NodeMaskSegmentType& segment = *mSegments[n]; LeafNodeType* node = acc.touchLeaf(segment.origin); node->getValueMask() |= segment.mask; } } private: NodeMaskSegmentType * const * const mSegments; TreeTypePtr mTree; }; // struct MaskSegmentGroup //////////////////////////////////////// template<typename TreeType> struct ExpandLeafNodeRegion { using ValueType = typename TreeType::ValueType; using LeafNodeType = typename TreeType::LeafNodeType; using NodeMaskType = typename LeafNodeType::NodeMaskType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; ///// ExpandLeafNodeRegion(const TreeType& distTree, BoolTreeType& maskTree, std::vector<BoolLeafNodeType*>& maskNodes) : mDistTree(&distTree) , mMaskTree(&maskTree) , mMaskNodes(!maskNodes.empty() ? &maskNodes.front() : nullptr) , mNewMaskTree(false) { } ExpandLeafNodeRegion(const ExpandLeafNodeRegion& rhs, tbb::split) : mDistTree(rhs.mDistTree) , mMaskTree(rhs.mMaskTree) , mMaskNodes(rhs.mMaskNodes) , mNewMaskTree(false) { } BoolTreeType& newMaskTree() { return mNewMaskTree; } void join(ExpandLeafNodeRegion& rhs) { mNewMaskTree.merge(rhs.mNewMaskTree); } void operator()(const tbb::blocked_range<size_t>& range) { using NodeType = LeafNodeType; tree::ValueAccessor<const TreeType> distAcc(*mDistTree); tree::ValueAccessor<const BoolTreeType> maskAcc(*mMaskTree); tree::ValueAccessor<BoolTreeType> newMaskAcc(mNewMaskTree); NodeMaskType maskZUp, maskZDown, maskYUp, maskYDown, maskXUp, maskXDown; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { BoolLeafNodeType& maskNode = *mMaskNodes[n]; if (maskNode.isEmpty()) continue; Coord ijk = maskNode.origin(), nijk; const LeafNodeType* distNode = distAcc.probeConstLeaf(ijk); if (!distNode) continue; const ValueType *dataZUp = nullptr, *dataZDown = nullptr, *dataYUp = nullptr, *dataYDown = nullptr, *dataXUp = nullptr, *dataXDown = nullptr; ijk[2] += NodeType::DIM; getData(ijk, distAcc, maskAcc, maskZUp, dataZUp); ijk[2] -= (NodeType::DIM + NodeType::DIM); getData(ijk, distAcc, maskAcc, maskZDown, dataZDown); ijk[2] += NodeType::DIM; ijk[1] += NodeType::DIM; getData(ijk, distAcc, maskAcc, maskYUp, dataYUp); ijk[1] -= (NodeType::DIM + NodeType::DIM); getData(ijk, distAcc, maskAcc, maskYDown, dataYDown); ijk[1] += NodeType::DIM; ijk[0] += NodeType::DIM; getData(ijk, distAcc, maskAcc, maskXUp, dataXUp); ijk[0] -= (NodeType::DIM + NodeType::DIM); getData(ijk, distAcc, maskAcc, maskXDown, dataXDown); ijk[0] += NodeType::DIM; for (typename BoolLeafNodeType::ValueOnIter it = maskNode.beginValueOn(); it; ++it) { const Index pos = it.pos(); const ValueType val = std::abs(distNode->getValue(pos)); ijk = BoolLeafNodeType::offsetToLocalCoord(pos); nijk = ijk + maskNode.origin(); if (dataZUp && ijk[2] == (BoolLeafNodeType::DIM - 1)) { const Index npos = pos - (NodeType::DIM - 1); if (maskZUp.isOn(npos) && std::abs(dataZUp[npos]) > val) { newMaskAcc.setValueOn(nijk.offsetBy(0, 0, 1)); } } else if (dataZDown && ijk[2] == 0) { const Index npos = pos + (NodeType::DIM - 1); if (maskZDown.isOn(npos) && std::abs(dataZDown[npos]) > val) { newMaskAcc.setValueOn(nijk.offsetBy(0, 0, -1)); } } if (dataYUp && ijk[1] == (BoolLeafNodeType::DIM - 1)) { const Index npos = pos - (NodeType::DIM - 1) * NodeType::DIM; if (maskYUp.isOn(npos) && std::abs(dataYUp[npos]) > val) { newMaskAcc.setValueOn(nijk.offsetBy(0, 1, 0)); } } else if (dataYDown && ijk[1] == 0) { const Index npos = pos + (NodeType::DIM - 1) * NodeType::DIM; if (maskYDown.isOn(npos) && std::abs(dataYDown[npos]) > val) { newMaskAcc.setValueOn(nijk.offsetBy(0, -1, 0)); } } if (dataXUp && ijk[0] == (BoolLeafNodeType::DIM - 1)) { const Index npos = pos - (NodeType::DIM - 1) * NodeType::DIM * NodeType::DIM; if (maskXUp.isOn(npos) && std::abs(dataXUp[npos]) > val) { newMaskAcc.setValueOn(nijk.offsetBy(1, 0, 0)); } } else if (dataXDown && ijk[0] == 0) { const Index npos = pos + (NodeType::DIM - 1) * NodeType::DIM * NodeType::DIM; if (maskXDown.isOn(npos) && std::abs(dataXDown[npos]) > val) { newMaskAcc.setValueOn(nijk.offsetBy(-1, 0, 0)); } } } // end value on loop } // end range loop } private: static inline void getData(const Coord& ijk, tree::ValueAccessor<const TreeType>& distAcc, tree::ValueAccessor<const BoolTreeType>& maskAcc, NodeMaskType& mask, const ValueType*& data) { const LeafNodeType* node = distAcc.probeConstLeaf(ijk); if (node) { data = node->buffer().data(); mask = node->getValueMask(); const BoolLeafNodeType* maskNodePt = maskAcc.probeConstLeaf(ijk); if (maskNodePt) mask -= maskNodePt->getValueMask(); } } TreeType const * const mDistTree; BoolTreeType * const mMaskTree; BoolLeafNodeType ** const mMaskNodes; BoolTreeType mNewMaskTree; }; // struct ExpandLeafNodeRegion template<typename TreeType> struct FillLeafNodeVoxels { using ValueType = typename TreeType::ValueType; using LeafNodeType = typename TreeType::LeafNodeType; using NodeMaskType = typename LeafNodeType::NodeMaskType; using BoolLeafNodeType = tree::LeafNode<bool, LeafNodeType::LOG2DIM>; FillLeafNodeVoxels(const TreeType& tree, std::vector<BoolLeafNodeType*>& maskNodes) : mTree(&tree), mMaskNodes(!maskNodes.empty() ? &maskNodes.front() : nullptr) { } void operator()(const tbb::blocked_range<size_t>& range) const { tree::ValueAccessor<const TreeType> distAcc(*mTree); std::vector<Index> indexList; indexList.reserve(NodeMaskType::SIZE); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { BoolLeafNodeType& maskNode = *mMaskNodes[n]; const LeafNodeType * distNode = distAcc.probeConstLeaf(maskNode.origin()); if (!distNode) continue; NodeMaskType mask(distNode->getValueMask()); NodeMaskType& narrowbandMask = maskNode.getValueMask(); for (Index pos = narrowbandMask.findFirstOn(); pos < NodeMaskType::SIZE; ++pos) { if (narrowbandMask.isOn(pos)) indexList.push_back(pos); } mask -= narrowbandMask; // bitwise difference narrowbandMask.setOff(); const ValueType* data = distNode->buffer().data(); Coord ijk(0, 0, 0); while (!indexList.empty()) { const Index pos = indexList.back(); indexList.pop_back(); if (narrowbandMask.isOn(pos)) continue; narrowbandMask.setOn(pos); const ValueType dist = std::abs(data[pos]); ijk = LeafNodeType::offsetToLocalCoord(pos); Index npos = pos - 1; if (ijk[2] != 0 && mask.isOn(npos) && std::abs(data[npos]) > dist) { mask.setOff(npos); indexList.push_back(npos); } npos = pos + 1; if ((ijk[2] != (LeafNodeType::DIM - 1)) && mask.isOn(npos) && std::abs(data[npos]) > dist) { mask.setOff(npos); indexList.push_back(npos); } npos = pos - LeafNodeType::DIM; if (ijk[1] != 0 && mask.isOn(npos) && std::abs(data[npos]) > dist) { mask.setOff(npos); indexList.push_back(npos); } npos = pos + LeafNodeType::DIM; if ((ijk[1] != (LeafNodeType::DIM - 1)) && mask.isOn(npos) && std::abs(data[npos]) > dist) { mask.setOff(npos); indexList.push_back(npos); } npos = pos - LeafNodeType::DIM * LeafNodeType::DIM; if (ijk[0] != 0 && mask.isOn(npos) && std::abs(data[npos]) > dist) { mask.setOff(npos); indexList.push_back(npos); } npos = pos + LeafNodeType::DIM * LeafNodeType::DIM; if ((ijk[0] != (LeafNodeType::DIM - 1)) && mask.isOn(npos) && std::abs(data[npos]) > dist) { mask.setOff(npos); indexList.push_back(npos); } } // end flood fill loop } // end range loop } TreeType const * const mTree; BoolLeafNodeType ** const mMaskNodes; }; // FillLeafNodeVoxels template<typename TreeType> struct ExpandNarrowbandMask { using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; using BoolTreeTypePtr = typename BoolTreeType::Ptr; ExpandNarrowbandMask(const TreeType& tree, std::vector<BoolTreeTypePtr>& segments) : mTree(&tree), mSegments(!segments.empty() ? &segments.front() : nullptr) { } void operator()(const tbb::blocked_range<size_t>& range) const { const TreeType& distTree = *mTree; std::vector<BoolLeafNodeType*> nodes; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { BoolTreeType& narrowBandMask = *mSegments[n]; BoolTreeType candidateMask(narrowBandMask, false, TopologyCopy()); while (true) { nodes.clear(); candidateMask.getNodes(nodes); if (nodes.empty()) break; const tbb::blocked_range<size_t> nodeRange(0, nodes.size()); tbb::parallel_for(nodeRange, FillLeafNodeVoxels<TreeType>(distTree, nodes)); narrowBandMask.topologyUnion(candidateMask); ExpandLeafNodeRegion<TreeType> op(distTree, narrowBandMask, nodes); tbb::parallel_reduce(nodeRange, op); if (op.newMaskTree().empty()) break; candidateMask.clear(); candidateMask.merge(op.newMaskTree()); } // end expand loop } // end range loop } TreeType const * const mTree; BoolTreeTypePtr * const mSegments; }; // ExpandNarrowbandMask template<typename TreeType> struct FloodFillSign { using TreeTypePtr = typename TreeType::Ptr; using ValueType = typename TreeType::ValueType; using LeafNodeType = typename TreeType::LeafNodeType; using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; FloodFillSign(const TreeType& tree, std::vector<TreeTypePtr>& segments) : mTree(&tree) , mSegments(!segments.empty() ? &segments.front() : nullptr) , mMinValue(ValueType(0.0)) { ValueType minSDFValue = std::numeric_limits<ValueType>::max(); { std::vector<const InternalNodeType*> nodes; tree.getNodes(nodes); if (!nodes.empty()) { FindMinTileValue<InternalNodeType> minOp(nodes.data()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), minOp); minSDFValue = std::min(minSDFValue, minOp.minValue); } } if (minSDFValue > ValueType(0.0)) { std::vector<const LeafNodeType*> nodes; tree.getNodes(nodes); if (!nodes.empty()) { FindMinVoxelValue<LeafNodeType> minOp(nodes.data()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), minOp); minSDFValue = std::min(minSDFValue, minOp.minValue); } } mMinValue = minSDFValue; } void operator()(const tbb::blocked_range<size_t>& range) const { const ValueType interiorValue = -std::abs(mMinValue); const ValueType exteriorValue = std::abs(mTree->background()); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { tools::signedFloodFillWithValues(*mSegments[n], exteriorValue, interiorValue); } } private: TreeType const * const mTree; TreeTypePtr * const mSegments; ValueType mMinValue; }; // FloodFillSign template<typename TreeType> struct MaskedCopy { using TreeTypePtr = typename TreeType::Ptr; using ValueType = typename TreeType::ValueType; using LeafNodeType = typename TreeType::LeafNodeType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolTreeTypePtr = typename BoolTreeType::Ptr; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; MaskedCopy(const TreeType& tree, std::vector<TreeTypePtr>& segments, std::vector<BoolTreeTypePtr>& masks) : mTree(&tree) , mSegments(!segments.empty() ? &segments.front() : nullptr) , mMasks(!masks.empty() ? &masks.front() : nullptr) { } void operator()(const tbb::blocked_range<size_t>& range) const { std::vector<const BoolLeafNodeType*> nodes; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const BoolTreeType& mask = *mMasks[n]; nodes.clear(); mask.getNodes(nodes); Copy op(*mTree, nodes); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op); mSegments[n] = op.outputTree(); } } private: struct Copy { Copy(const TreeType& inputTree, std::vector<const BoolLeafNodeType*>& maskNodes) : mInputTree(&inputTree) , mMaskNodes(!maskNodes.empty() ? &maskNodes.front() : nullptr) , mOutputTreePtr(new TreeType(inputTree.background())) { } Copy(const Copy& rhs, tbb::split) : mInputTree(rhs.mInputTree) , mMaskNodes(rhs.mMaskNodes) , mOutputTreePtr(new TreeType(mInputTree->background())) { } TreeTypePtr& outputTree() { return mOutputTreePtr; } void join(Copy& rhs) { mOutputTreePtr->merge(*rhs.mOutputTreePtr); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const TreeType> inputAcc(*mInputTree); tree::ValueAccessor<TreeType> outputAcc(*mOutputTreePtr); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const BoolLeafNodeType& maskNode = *mMaskNodes[n]; if (maskNode.isEmpty()) continue; const Coord& ijk = maskNode.origin(); const LeafNodeType* inputNode = inputAcc.probeConstLeaf(ijk); if (inputNode) { LeafNodeType* outputNode = outputAcc.touchLeaf(ijk); for (typename BoolLeafNodeType::ValueOnCIter it = maskNode.cbeginValueOn(); it; ++it) { const Index idx = it.pos(); outputNode->setValueOn(idx, inputNode->getValue(idx)); } } else { const int valueDepth = inputAcc.getValueDepth(ijk); if (valueDepth >= 0) { outputAcc.addTile(TreeType::RootNodeType::LEVEL - valueDepth, ijk, inputAcc.getValue(ijk), true); } } } } private: TreeType const * const mInputTree; BoolLeafNodeType const * const * const mMaskNodes; TreeTypePtr mOutputTreePtr; }; // struct Copy TreeType const * const mTree; TreeTypePtr * const mSegments; BoolTreeTypePtr * const mMasks; }; // MaskedCopy //////////////////////////////////////// template<typename VolumePtrType> struct ComputeActiveVoxelCount { ComputeActiveVoxelCount(std::vector<VolumePtrType>& segments, size_t *countArray) : mSegments(!segments.empty() ? &segments.front() : nullptr) , mCountArray(countArray) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { mCountArray[n] = mSegments[n]->activeVoxelCount(); } } VolumePtrType * const mSegments; size_t * const mCountArray; }; struct GreaterCount { GreaterCount(const size_t *countArray) : mCountArray(countArray) {} inline bool operator() (const size_t& lhs, const size_t& rhs) const { return (mCountArray[lhs] > mCountArray[rhs]); } size_t const * const mCountArray; }; //////////////////////////////////////// template<typename TreeType> struct GridOrTreeConstructor { using TreeTypePtr = typename TreeType::Ptr; using BoolTreePtrType = typename TreeType::template ValueConverter<bool>::Type::Ptr; static BoolTreePtrType constructMask(const TreeType&, BoolTreePtrType& maskTree) { return maskTree; } static TreeTypePtr construct(const TreeType&, TreeTypePtr& tree) { return tree; } }; template<typename TreeType> struct GridOrTreeConstructor<Grid<TreeType> > { using GridType = Grid<TreeType>; using GridTypePtr = typename Grid<TreeType>::Ptr; using TreeTypePtr = typename TreeType::Ptr; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolTreePtrType = typename BoolTreeType::Ptr; using BoolGridType = Grid<BoolTreeType>; using BoolGridPtrType = typename BoolGridType::Ptr; static BoolGridPtrType constructMask(const GridType& grid, BoolTreePtrType& maskTree) { BoolGridPtrType maskGrid(BoolGridType::create(maskTree)); maskGrid->setTransform(grid.transform().copy()); return maskGrid; } static GridTypePtr construct(const GridType& grid, TreeTypePtr& maskTree) { GridTypePtr maskGrid(GridType::create(maskTree)); maskGrid->setTransform(grid.transform().copy()); maskGrid->insertMeta(grid); return maskGrid; } }; } // namespace level_set_util_internal //////////////////////////////////////// template <class GridType> inline void sdfToFogVolume(GridType& grid, typename GridType::ValueType cutoffDistance) { using ValueType = typename GridType::ValueType; using TreeType = typename GridType::TreeType; using LeafNodeType = typename TreeType::LeafNodeType; using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; ////////// TreeType& tree = grid.tree(); size_t numLeafNodes = 0, numInternalNodes = 0; std::vector<LeafNodeType*> nodes; std::vector<size_t> leafnodeCount; { // Compute the prefix sum of the leafnode count in each internal node. std::vector<InternalNodeType*> internalNodes; tree.getNodes(internalNodes); numInternalNodes = internalNodes.size(); leafnodeCount.push_back(0); for (size_t n = 0; n < numInternalNodes; ++n) { leafnodeCount.push_back(leafnodeCount.back() + internalNodes[n]->leafCount()); } numLeafNodes = leafnodeCount.back(); // Steal all leafnodes (Removes them from the tree and transfers ownership.) nodes.reserve(numLeafNodes); for (size_t n = 0; n < numInternalNodes; ++n) { internalNodes[n]->stealNodes(nodes, tree.background(), false); } // Clamp cutoffDistance to min sdf value ValueType minSDFValue = std::numeric_limits<ValueType>::max(); { level_set_util_internal::FindMinTileValue<InternalNodeType> minOp(internalNodes.data()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, internalNodes.size()), minOp); minSDFValue = std::min(minSDFValue, minOp.minValue); } if (minSDFValue > ValueType(0.0)) { level_set_util_internal::FindMinVoxelValue<LeafNodeType> minOp(nodes.data()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), minOp); minSDFValue = std::min(minSDFValue, minOp.minValue); } cutoffDistance = -std::abs(cutoffDistance); cutoffDistance = minSDFValue > cutoffDistance ? minSDFValue : cutoffDistance; } // Transform voxel values and delete leafnodes that are uniformly zero after the transformation. // (Positive values are set to zero with inactive state and negative values are remapped // from zero to one with active state.) tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), level_set_util_internal::SDFVoxelsToFogVolume<LeafNodeType>(nodes.data(), cutoffDistance)); // Populate a new tree with the remaining leafnodes typename TreeType::Ptr newTree(new TreeType(ValueType(0.0))); level_set_util_internal::PopulateTree<TreeType> populate( *newTree, nodes.data(), leafnodeCount.data(), 0); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, numInternalNodes), populate); // Transform tile values (Negative valued tiles are set to 1.0 with active state.) std::vector<InternalNodeType*> internalNodes; newTree->getNodes(internalNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, internalNodes.size()), level_set_util_internal::SDFTilesToFogVolume<TreeType, InternalNodeType>( tree, internalNodes.data())); { tree::ValueAccessor<const TreeType> acc(tree); typename TreeType::ValueAllIter it(*newTree); it.setMaxDepth(TreeType::ValueAllIter::LEAF_DEPTH - 2); for ( ; it; ++it) { if (acc.getValue(it.getCoord()) < ValueType(0.0)) { it.setValue(ValueType(1.0)); it.setActiveState(true); } } } // Insert missing root level tiles. (The new tree is constructed from the remaining leafnodes // and will therefore not contain any root level tiles that may exist in the original tree.) { typename TreeType::ValueAllIter it(tree); it.setMaxDepth(TreeType::ValueAllIter::ROOT_DEPTH); for ( ; it; ++it) { if (it.getValue() < ValueType(0.0)) { newTree->addTile(TreeType::ValueAllIter::ROOT_LEVEL, it.getCoord(), ValueType(1.0), true); } } } grid.setTree(newTree); grid.setGridClass(GRID_FOG_VOLUME); } //////////////////////////////////////// template <class GridOrTreeType> inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr sdfInteriorMask(const GridOrTreeType& volume, typename GridOrTreeType::ValueType isovalue) { using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType; const TreeType& tree = TreeAdapter<GridOrTreeType>::tree(volume); using BoolTreePtrType = typename TreeType::template ValueConverter<bool>::Type::Ptr; BoolTreePtrType mask = level_set_util_internal::computeInteriorMask(tree, isovalue); return level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask( volume, mask); } template<typename GridOrTreeType> inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr extractEnclosedRegion(const GridOrTreeType& volume, typename GridOrTreeType::ValueType isovalue, const typename TreeAdapter<GridOrTreeType>::TreeType::template ValueConverter<bool>::Type* fillMask) { using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType; const TreeType& tree = TreeAdapter<GridOrTreeType>::tree(volume); using CharTreePtrType = typename TreeType::template ValueConverter<char>::Type::Ptr; CharTreePtrType regionMask = level_set_util_internal::computeEnclosedRegionMask( tree, isovalue, fillMask); using BoolTreePtrType = typename TreeType::template ValueConverter<bool>::Type::Ptr; BoolTreePtrType mask = level_set_util_internal::computeInteriorMask(*regionMask, 0); return level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask( volume, mask); } //////////////////////////////////////// template<typename GridOrTreeType> inline typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr extractIsosurfaceMask(const GridOrTreeType& volume, typename GridOrTreeType::ValueType isovalue) { using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType; const TreeType& tree = TreeAdapter<GridOrTreeType>::tree(volume); std::vector<const typename TreeType::LeafNodeType*> nodes; tree.getNodes(nodes); using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; typename BoolTreeType::Ptr mask(new BoolTreeType(false)); level_set_util_internal::MaskIsovalueCrossingVoxels<TreeType> op(tree, nodes, *mask, isovalue); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op); return level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask( volume, mask); } //////////////////////////////////////// template<typename GridOrTreeType> inline void extractActiveVoxelSegmentMasks(const GridOrTreeType& volume, std::vector<typename GridOrTreeType::template ValueConverter<bool>::Type::Ptr>& masks) { using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolTreePtrType = typename BoolTreeType::Ptr; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; using NodeMaskSegmentType = level_set_util_internal::NodeMaskSegment<BoolLeafNodeType>; using NodeMaskSegmentPtrType = typename NodeMaskSegmentType::Ptr; using NodeMaskSegmentPtrVector = typename std::vector<NodeMaskSegmentPtrType>; using NodeMaskSegmentRawPtrVector = typename std::vector<NodeMaskSegmentType*>; ///// const TreeType& tree = TreeAdapter<GridOrTreeType>::tree(volume); BoolTreeType topologyMask(tree, false, TopologyCopy()); // prune out any inactive leaf nodes or inactive tiles tools::pruneInactive(topologyMask); if (topologyMask.hasActiveTiles()) { topologyMask.voxelizeActiveTiles(); } std::vector<BoolLeafNodeType*> leafnodes; topologyMask.getNodes(leafnodes); if (leafnodes.empty()) return; // 1. Split node masks into disjoint segments // Note: The LeafNode origin coord is modified to record the 'leafnodes' array offset. std::unique_ptr<NodeMaskSegmentPtrVector[]> nodeSegmentArray( new NodeMaskSegmentPtrVector[leafnodes.size()]); tbb::parallel_for(tbb::blocked_range<size_t>(0, leafnodes.size()), level_set_util_internal::SegmentNodeMask<BoolLeafNodeType>( leafnodes, nodeSegmentArray.get())); // 2. Compute segment connectivity tbb::parallel_for(tbb::blocked_range<size_t>(0, leafnodes.size()), level_set_util_internal::ConnectNodeMaskSegments<BoolTreeType, BoolLeafNodeType>( topologyMask, nodeSegmentArray.get())); topologyMask.clear(); size_t nodeSegmentCount = 0; for (size_t n = 0, N = leafnodes.size(); n < N; ++n) { nodeSegmentCount += nodeSegmentArray[n].size(); } // 3. Group connected segments std::deque<NodeMaskSegmentRawPtrVector> nodeSegmentGroups; NodeMaskSegmentType* nextSegment = nodeSegmentArray[0][0].get(); while (nextSegment) { nodeSegmentGroups.push_back(NodeMaskSegmentRawPtrVector()); std::vector<NodeMaskSegmentType*>& segmentGroup = nodeSegmentGroups.back(); segmentGroup.reserve(nodeSegmentCount); std::deque<NodeMaskSegmentType*> segmentQueue; segmentQueue.push_back(nextSegment); nextSegment = nullptr; while (!segmentQueue.empty()) { NodeMaskSegmentType* segment = segmentQueue.back(); segmentQueue.pop_back(); if (segment->visited) continue; segment->visited = true; segmentGroup.push_back(segment); // queue connected segments std::vector<NodeMaskSegmentType*>& connections = segment->connections; for (size_t n = 0, N = connections.size(); n < N; ++n) { if (!connections[n]->visited) segmentQueue.push_back(connections[n]); } } // find first unvisited segment for (size_t n = 0, N = leafnodes.size(); n < N; ++n) { NodeMaskSegmentPtrVector& nodeSegments = nodeSegmentArray[n]; for (size_t i = 0, I = nodeSegments.size(); i < I; ++i) { if (!nodeSegments[i]->visited) nextSegment = nodeSegments[i].get(); } } } // 4. Mask segment groups if (nodeSegmentGroups.size() == 1) { BoolTreePtrType mask(new BoolTreeType(tree, false, TopologyCopy())); tools::pruneInactive(*mask); if (mask->hasActiveTiles()) { mask->voxelizeActiveTiles(); } masks.push_back( level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask( volume, mask)); } else if (nodeSegmentGroups.size() > 1) { for (size_t n = 0, N = nodeSegmentGroups.size(); n < N; ++n) { NodeMaskSegmentRawPtrVector& segmentGroup = nodeSegmentGroups[n]; level_set_util_internal::MaskSegmentGroup<BoolTreeType> op(segmentGroup); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, segmentGroup.size()), op); masks.push_back( level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::constructMask( volume, op.mask())); } } // 5. Sort segments in descending order based on the active voxel count. if (masks.size() > 1) { const size_t segmentCount = masks.size(); std::unique_ptr<size_t[]> segmentOrderArray(new size_t[segmentCount]); std::unique_ptr<size_t[]> voxelCountArray(new size_t[segmentCount]); for (size_t n = 0; n < segmentCount; ++n) { segmentOrderArray[n] = n; } tbb::parallel_for(tbb::blocked_range<size_t>(0, segmentCount), level_set_util_internal::ComputeActiveVoxelCount<BoolTreePtrType>( masks, voxelCountArray.get())); size_t *begin = segmentOrderArray.get(); tbb::parallel_sort(begin, begin + masks.size(), level_set_util_internal::GreaterCount( voxelCountArray.get())); std::vector<BoolTreePtrType> orderedMasks; orderedMasks.reserve(masks.size()); for (size_t n = 0; n < segmentCount; ++n) { orderedMasks.push_back(masks[segmentOrderArray[n]]); } masks.swap(orderedMasks); } } // extractActiveVoxelSegmentMasks() template<typename GridOrTreeType> inline void segmentActiveVoxels(const GridOrTreeType& volume, std::vector<typename GridOrTreeType::Ptr>& segments) { using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType; using TreePtrType = typename TreeType::Ptr; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolTreePtrType = typename BoolTreeType::Ptr; const TreeType& inputTree = TreeAdapter<GridOrTreeType>::tree(volume); // 1. Segment active topology mask std::vector<BoolTreePtrType> maskSegmentArray; extractActiveVoxelSegmentMasks(inputTree, maskSegmentArray); // 2. Export segments const size_t numSegments = std::max(size_t(1), maskSegmentArray.size()); std::vector<TreePtrType> outputSegmentArray(numSegments); if (maskSegmentArray.empty()) { // if no active voxels in the original volume, copy just the background // value of the input tree outputSegmentArray[0] = TreePtrType(new TreeType(inputTree.background())); } else if (numSegments == 1) { // if there's only one segment with active voxels, copy the input tree TreePtrType segment(new TreeType(inputTree)); // however, if the leaf counts do not match due to the pruning of inactive leaf // nodes in the mask, do a topology intersection to drop these inactive leafs if (segment->leafCount() != inputTree.leafCount()) { segment->topologyIntersection(*maskSegmentArray[0]); } outputSegmentArray[0] = segment; } else { const tbb::blocked_range<size_t> segmentRange(0, numSegments); tbb::parallel_for(segmentRange, level_set_util_internal::MaskedCopy<TreeType>(inputTree, outputSegmentArray, maskSegmentArray)); } for (auto& segment : outputSegmentArray) { segments.push_back( level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::construct( volume, segment)); } } template<typename GridOrTreeType> inline void segmentSDF(const GridOrTreeType& volume, std::vector<typename GridOrTreeType::Ptr>& segments) { using TreeType = typename TreeAdapter<GridOrTreeType>::TreeType; using TreePtrType = typename TreeType::Ptr; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; using BoolTreePtrType = typename BoolTreeType::Ptr; const TreeType& inputTree = TreeAdapter<GridOrTreeType>::tree(volume); // 1. Mask zero crossing voxels BoolTreePtrType mask = extractIsosurfaceMask(inputTree, lsutilGridZero<GridOrTreeType>()); // 2. Segment the zero crossing mask std::vector<BoolTreePtrType> maskSegmentArray; extractActiveVoxelSegmentMasks(*mask, maskSegmentArray); const size_t numSegments = std::max(size_t(1), maskSegmentArray.size()); std::vector<TreePtrType> outputSegmentArray(numSegments); if (maskSegmentArray.empty()) { // if no active voxels in the original volume, copy just the background // value of the input tree outputSegmentArray[0] = TreePtrType(new TreeType(inputTree.background())); } else { const tbb::blocked_range<size_t> segmentRange(0, numSegments); // 3. Expand zero crossing mask to capture sdf narrow band tbb::parallel_for(segmentRange, level_set_util_internal::ExpandNarrowbandMask<TreeType>(inputTree, maskSegmentArray)); // 4. Export sdf segments tbb::parallel_for(segmentRange, level_set_util_internal::MaskedCopy<TreeType>( inputTree, outputSegmentArray, maskSegmentArray)); tbb::parallel_for(segmentRange, level_set_util_internal::FloodFillSign<TreeType>(inputTree, outputSegmentArray)); } for (auto& segment : outputSegmentArray) { segments.push_back( level_set_util_internal::GridOrTreeConstructor<GridOrTreeType>::construct( volume, segment)); } } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVEL_SET_UTIL_HAS_BEEN_INCLUDED
94,368
C
35.337697
100
0.589151
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Filter.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @author Ken Museth /// /// @file tools/Filter.h /// /// @brief Filtering of VDB volumes. Note that only the values in the /// grid are changed, not its topology! All operations can optionally /// be masked with another grid that acts as an alpha-mask. #ifndef OPENVDB_TOOLS_FILTER_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_FILTER_HAS_BEEN_INCLUDED #include <tbb/parallel_for.h> #include <openvdb/Types.h> #include <openvdb/math/Math.h> #include <openvdb/math/Stencils.h> #include <openvdb/math/Transform.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/util/NullInterrupter.h> #include <openvdb/Grid.h> #include "Interpolation.h" #include <algorithm> // for std::max() #include <functional> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Volume filtering (e.g., diffusion) with optional alpha masking /// /// @note Only the values in the grid are changed, not its topology! template<typename GridT, typename MaskT = typename GridT::template ValueConverter<float>::Type, typename InterruptT = util::NullInterrupter> class Filter { public: using GridType = GridT; using MaskType = MaskT; using TreeType = typename GridType::TreeType; using LeafType = typename TreeType::LeafNodeType; using ValueType = typename GridType::ValueType; using AlphaType = typename MaskType::ValueType; using LeafManagerType = typename tree::LeafManager<TreeType>; using RangeType = typename LeafManagerType::LeafRange; using BufferType = typename LeafManagerType::BufferType; static_assert(std::is_floating_point<AlphaType>::value, "openvdb::tools::Filter requires a mask grid with floating-point values"); /// Constructor /// @param grid Grid to be filtered. /// @param interrupt Optional interrupter. Filter(GridT& grid, InterruptT* interrupt = nullptr) : mGrid(&grid) , mTask(nullptr) , mInterrupter(interrupt) , mMask(nullptr) , mGrainSize(1) , mMinMask(0) , mMaxMask(1) , mInvertMask(false) { } /// @brief Shallow copy constructor called by tbb::parallel_for() /// threads during filtering. /// @param other The other Filter from which to copy. Filter(const Filter& other) : mGrid(other.mGrid) , mTask(other.mTask) , mInterrupter(other.mInterrupter) , mMask(other.mMask) , mGrainSize(other.mGrainSize) , mMinMask(other.mMinMask) , mMaxMask(other.mMaxMask) , mInvertMask(other.mInvertMask) { } /// @return the grain-size used for multi-threading int getGrainSize() const { return mGrainSize; } /// @brief Set the grain-size used for multi-threading. /// @note A grain size of 0 or less disables multi-threading! void setGrainSize(int grainsize) { mGrainSize = grainsize; } /// @brief Return the minimum value of the mask to be used for the /// derivation of a smooth alpha value. AlphaType minMask() const { return mMinMask; } /// @brief Return the maximum value of the mask to be used for the /// derivation of a smooth alpha value. AlphaType maxMask() const { return mMaxMask; } /// @brief Define the range for the (optional) scalar mask. /// @param min Minimum value of the range. /// @param max Maximum value of the range. /// @details Mask values outside the range are clamped to zero or one, and /// values inside the range map smoothly to 0->1 (unless the mask is inverted). /// @throw ValueError if @a min is not smaller than @a max. void setMaskRange(AlphaType min, AlphaType max) { if (!(min < max)) OPENVDB_THROW(ValueError, "Invalid mask range (expects min < max)"); mMinMask = min; mMaxMask = max; } /// @brief Return true if the mask is inverted, i.e. min->max in the /// original mask maps to 1->0 in the inverted alpha mask. bool isMaskInverted() const { return mInvertMask; } /// @brief Invert the optional mask, i.e. min->max in the original /// mask maps to 1->0 in the inverted alpha mask. void invertMask(bool invert=true) { mInvertMask = invert; } /// @brief One iteration of a fast separable mean-value (i.e. box) filter. /// @param width The width of the mean-value filter is 2*width+1 voxels. /// @param iterations Number of times the mean-value filter is applied. /// @param mask Optional alpha mask. void mean(int width = 1, int iterations = 1, const MaskType* mask = nullptr); /// @brief One iteration of a fast separable Gaussian filter. /// /// @note This is approximated as 4 iterations of a separable mean filter /// which typically leads an approximation that's better than 95%! /// @param width The width of the mean-value filter is 2*width+1 voxels. /// @param iterations Number of times the mean-value filter is applied. /// @param mask Optional alpha mask. void gaussian(int width = 1, int iterations = 1, const MaskType* mask = nullptr); /// @brief One iteration of a median-value filter /// /// @note This filter is not separable and is hence relatively slow! /// @param width The width of the mean-value filter is 2*width+1 voxels. /// @param iterations Number of times the mean-value filter is applied. /// @param mask Optional alpha mask. void median(int width = 1, int iterations = 1, const MaskType* mask = nullptr); /// Offsets (i.e. adds) a constant value to all active voxels. /// @param offset Offset in the same units as the grid. /// @param mask Optional alpha mask. void offset(ValueType offset, const MaskType* mask = nullptr); /// @brief Used internally by tbb::parallel_for() /// @param range Range of LeafNodes over which to multi-thread. /// /// @warning Never call this method directly! void operator()(const RangeType& range) const { if (mTask) mTask(const_cast<Filter*>(this), range); else OPENVDB_THROW(ValueError, "task is undefined - call median(), mean(), etc."); } private: using LeafT = typename TreeType::LeafNodeType; using VoxelIterT = typename LeafT::ValueOnIter; using VoxelCIterT = typename LeafT::ValueOnCIter; using BufferT = typename tree::LeafManager<TreeType>::BufferType; using LeafIterT = typename RangeType::Iterator; using AlphaMaskT = tools::AlphaMask<GridT, MaskT>; void cook(LeafManagerType& leafs); template<size_t Axis> struct Avg { Avg(const GridT* grid, Int32 w): acc(grid->tree()), width(w), frac(1.f/float(2*w+1)) {} inline ValueType operator()(Coord xyz); typename GridT::ConstAccessor acc; const Int32 width; const float frac; }; // Private filter methods called by tbb::parallel_for threads template <typename AvgT> void doBox( const RangeType& r, Int32 w); void doBoxX(const RangeType& r, Int32 w) { this->doBox<Avg<0> >(r,w); } void doBoxY(const RangeType& r, Int32 w) { this->doBox<Avg<1> >(r,w); } void doBoxZ(const RangeType& r, Int32 w) { this->doBox<Avg<2> >(r,w); } void doMedian(const RangeType&, int); void doOffset(const RangeType&, ValueType); /// @return true if the process was interrupted bool wasInterrupted(); GridType* mGrid; typename std::function<void (Filter*, const RangeType&)> mTask; InterruptT* mInterrupter; const MaskType* mMask; int mGrainSize; AlphaType mMinMask, mMaxMask; bool mInvertMask; }; // end of Filter class //////////////////////////////////////// namespace filter_internal { // Helper function for Filter::Avg::operator() template<typename T> static inline void accum(T& sum, T addend) { sum += addend; } // Overload for bool ValueType inline void accum(bool& sum, bool addend) { sum = sum || addend; } } template<typename GridT, typename MaskT, typename InterruptT> template<size_t Axis> inline typename GridT::ValueType Filter<GridT, MaskT, InterruptT>::Avg<Axis>::operator()(Coord xyz) { ValueType sum = zeroVal<ValueType>(); Int32 &i = xyz[Axis], j = i + width; for (i -= width; i <= j; ++i) filter_internal::accum(sum, acc.getValue(xyz)); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN ValueType value = static_cast<ValueType>(sum * frac); OPENVDB_NO_TYPE_CONVERSION_WARNING_END return value; } //////////////////////////////////////// template<typename GridT, typename MaskT, typename InterruptT> inline void Filter<GridT, MaskT, InterruptT>::mean(int width, int iterations, const MaskType* mask) { mMask = mask; if (mInterrupter) mInterrupter->start("Applying mean filter"); const int w = std::max(1, width); LeafManagerType leafs(mGrid->tree(), 1, mGrainSize==0); for (int i=0; i<iterations && !this->wasInterrupted(); ++i) { mTask = std::bind(&Filter::doBoxX, std::placeholders::_1, std::placeholders::_2, w); this->cook(leafs); // note that the order of the YZ passes are flipped to maintain backwards-compatibility // with an indexing typo in the original logic mTask = std::bind(&Filter::doBoxZ, std::placeholders::_1, std::placeholders::_2, w); this->cook(leafs); mTask = std::bind(&Filter::doBoxY, std::placeholders::_1, std::placeholders::_2, w); this->cook(leafs); } if (mInterrupter) mInterrupter->end(); } template<typename GridT, typename MaskT, typename InterruptT> inline void Filter<GridT, MaskT, InterruptT>::gaussian(int width, int iterations, const MaskType* mask) { mMask = mask; if (mInterrupter) mInterrupter->start("Applying Gaussian filter"); const int w = std::max(1, width); LeafManagerType leafs(mGrid->tree(), 1, mGrainSize==0); for (int i=0; i<iterations; ++i) { for (int n=0; n<4 && !this->wasInterrupted(); ++n) { mTask = std::bind(&Filter::doBoxX, std::placeholders::_1, std::placeholders::_2, w); this->cook(leafs); // note that the order of the YZ passes are flipped to maintain backwards-compatibility // with an indexing typo in the original logic mTask = std::bind(&Filter::doBoxZ, std::placeholders::_1, std::placeholders::_2, w); this->cook(leafs); mTask = std::bind(&Filter::doBoxY, std::placeholders::_1, std::placeholders::_2, w); this->cook(leafs); } } if (mInterrupter) mInterrupter->end(); } template<typename GridT, typename MaskT, typename InterruptT> inline void Filter<GridT, MaskT, InterruptT>::median(int width, int iterations, const MaskType* mask) { mMask = mask; if (mInterrupter) mInterrupter->start("Applying median filter"); LeafManagerType leafs(mGrid->tree(), 1, mGrainSize==0); mTask = std::bind(&Filter::doMedian, std::placeholders::_1, std::placeholders::_2, std::max(1, width)); for (int i=0; i<iterations && !this->wasInterrupted(); ++i) this->cook(leafs); if (mInterrupter) mInterrupter->end(); } template<typename GridT, typename MaskT, typename InterruptT> inline void Filter<GridT, MaskT, InterruptT>::offset(ValueType value, const MaskType* mask) { mMask = mask; if (mInterrupter) mInterrupter->start("Applying offset"); LeafManagerType leafs(mGrid->tree(), 0, mGrainSize==0); mTask = std::bind(&Filter::doOffset, std::placeholders::_1, std::placeholders::_2, value); this->cook(leafs); if (mInterrupter) mInterrupter->end(); } //////////////////////////////////////// /// Private method to perform the task (serial or threaded) and /// subsequently swap the leaf buffers. template<typename GridT, typename MaskT, typename InterruptT> inline void Filter<GridT, MaskT, InterruptT>::cook(LeafManagerType& leafs) { if (mGrainSize>0) { tbb::parallel_for(leafs.leafRange(mGrainSize), *this); } else { (*this)(leafs.leafRange()); } leafs.swapLeafBuffer(1, mGrainSize==0); } /// One dimensional convolution of a separable box filter template<typename GridT, typename MaskT, typename InterruptT> template <typename AvgT> inline void Filter<GridT, MaskT, InterruptT>::doBox(const RangeType& range, Int32 w) { this->wasInterrupted(); AvgT avg(mGrid, w); if (mMask) { typename AlphaMaskT::FloatType a, b; AlphaMaskT alpha(*mGrid, *mMask, mMinMask, mMaxMask, mInvertMask); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { BufferT& buffer = leafIter.buffer(1); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { const Coord xyz = iter.getCoord(); if (alpha(xyz, a, b)) { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueType value(b*(*iter) + a*avg(xyz)); OPENVDB_NO_TYPE_CONVERSION_WARNING_END buffer.setValue(iter.pos(), value); } } } } else { for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { BufferT& buffer = leafIter.buffer(1); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { buffer.setValue(iter.pos(), avg(iter.getCoord())); } } } } /// Performs simple but slow median-value diffusion template<typename GridT, typename MaskT, typename InterruptT> inline void Filter<GridT, MaskT, InterruptT>::doMedian(const RangeType& range, int width) { this->wasInterrupted(); typename math::DenseStencil<GridType> stencil(*mGrid, width);//creates local cache! if (mMask) { typename AlphaMaskT::FloatType a, b; AlphaMaskT alpha(*mGrid, *mMask, mMinMask, mMaxMask, mInvertMask); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { BufferT& buffer = leafIter.buffer(1); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { if (alpha(iter.getCoord(), a, b)) { stencil.moveTo(iter); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN ValueType value(b*(*iter) + a*stencil.median()); OPENVDB_NO_TYPE_CONVERSION_WARNING_END buffer.setValue(iter.pos(), value); } } } } else { for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { BufferT& buffer = leafIter.buffer(1); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { stencil.moveTo(iter); buffer.setValue(iter.pos(), stencil.median()); } } } } /// Offsets the values by a constant template<typename GridT, typename MaskT, typename InterruptT> inline void Filter<GridT, MaskT, InterruptT>::doOffset(const RangeType& range, ValueType offset) { this->wasInterrupted(); if (mMask) { typename AlphaMaskT::FloatType a, b; AlphaMaskT alpha(*mGrid, *mMask, mMinMask, mMaxMask, mInvertMask); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { for (VoxelIterT iter = leafIter->beginValueOn(); iter; ++iter) { if (alpha(iter.getCoord(), a, b)) { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN ValueType value(*iter + a*offset); OPENVDB_NO_TYPE_CONVERSION_WARNING_END iter.setValue(value); } } } } else { for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { for (VoxelIterT iter = leafIter->beginValueOn(); iter; ++iter) { iter.setValue(*iter + offset); } } } } template<typename GridT, typename MaskT, typename InterruptT> inline bool Filter<GridT, MaskT, InterruptT>::wasInterrupted() { if (util::wasInterrupted(mInterrupter)) { tbb::task::self().cancel_group_execution(); return true; } return false; } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_FILTER_HAS_BEEN_INCLUDED
16,322
C
35.112832
99
0.641037
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetMorph.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Ken Museth /// /// @file tools/LevelSetMorph.h /// /// @brief Shape morphology of level sets. Morphing from a source /// narrow-band level sets to a target narrow-band level set. #ifndef OPENVDB_TOOLS_LEVEL_SET_MORPH_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVEL_SET_MORPH_HAS_BEEN_INCLUDED #include "LevelSetTracker.h" #include "Interpolation.h" // for BoxSampler, etc. #include <openvdb/math/FiniteDifference.h> #include <functional> #include <limits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Shape morphology of level sets. Morphing from a source /// narrow-band level sets to a target narrow-band level set. /// /// @details /// The @c InterruptType template argument below refers to any class /// with the following interface: /// @code /// class Interrupter { /// ... /// public: /// void start(const char* name = nullptr) // called when computations begin /// void end() // called when computations end /// bool wasInterrupted(int percent=-1) // return true to break computation /// }; /// @endcode /// /// @note If no template argument is provided for this InterruptType, /// the util::NullInterrupter is used, which implies that all interrupter /// calls are no-ops (i.e., they incur no computational overhead). template<typename GridT, typename InterruptT = util::NullInterrupter> class LevelSetMorphing { public: using GridType = GridT; using TreeType = typename GridT::TreeType; using TrackerT = LevelSetTracker<GridT, InterruptT>; using LeafRange = typename TrackerT::LeafRange; using LeafType = typename TrackerT::LeafType; using BufferType = typename TrackerT::BufferType; using ValueType = typename TrackerT::ValueType; /// Main constructor LevelSetMorphing(GridT& sourceGrid, const GridT& targetGrid, InterruptT* interrupt = nullptr) : mTracker(sourceGrid, interrupt) , mTarget(&targetGrid) , mMask(nullptr) , mSpatialScheme(math::HJWENO5_BIAS) , mTemporalScheme(math::TVD_RK2) , mMinMask(0) , mDeltaMask(1) , mInvertMask(false) { } virtual ~LevelSetMorphing() {} /// Redefine the target level set void setTarget(const GridT& targetGrid) { mTarget = &targetGrid; } /// Define the alpha mask void setAlphaMask(const GridT& maskGrid) { mMask = &maskGrid; } /// Return the spatial finite-difference scheme math::BiasedGradientScheme getSpatialScheme() const { return mSpatialScheme; } /// Set the spatial finite-difference scheme void setSpatialScheme(math::BiasedGradientScheme scheme) { mSpatialScheme = scheme; } /// Return the temporal integration scheme math::TemporalIntegrationScheme getTemporalScheme() const { return mTemporalScheme; } /// Set the temporal integration scheme void setTemporalScheme(math::TemporalIntegrationScheme scheme) { mTemporalScheme = scheme; } /// Return the spatial finite-difference scheme math::BiasedGradientScheme getTrackerSpatialScheme() const { return mTracker.getSpatialScheme(); } /// Set the spatial finite-difference scheme void setTrackerSpatialScheme(math::BiasedGradientScheme scheme) { mTracker.setSpatialScheme(scheme); } /// Return the temporal integration scheme math::TemporalIntegrationScheme getTrackerTemporalScheme() const { return mTracker.getTemporalScheme(); } /// Set the temporal integration scheme void setTrackerTemporalScheme(math::TemporalIntegrationScheme scheme) { mTracker.setTemporalScheme(scheme); } /// Return the number of normalizations performed per track or normalize call. int getNormCount() const { return mTracker.getNormCount(); } /// Set the number of normalizations performed per track or normalize call. void setNormCount(int n) { mTracker.setNormCount(n); } /// Return the grain size used for multithreading int getGrainSize() const { return mTracker.getGrainSize(); } /// @brief Set the grain size used for multithreading. /// @note A grain size of 0 or less disables multithreading! void setGrainSize(int grainsize) { mTracker.setGrainSize(grainsize); } /// @brief Return the minimum value of the mask to be used for the /// derivation of a smooth alpha value. ValueType minMask() const { return mMinMask; } /// @brief Return the maximum value of the mask to be used for the /// derivation of a smooth alpha value. ValueType maxMask() const { return mDeltaMask + mMinMask; } /// @brief Define the range for the (optional) scalar mask. /// @param min Minimum value of the range. /// @param max Maximum value of the range. /// @details Mask values outside the range maps to alpha values of /// respectfully zero and one, and values inside the range maps /// smoothly to 0->1 (unless of course the mask is inverted). /// @throw ValueError if @a min is not smaller than @a max. void setMaskRange(ValueType min, ValueType max) { if (!(min < max)) OPENVDB_THROW(ValueError, "Invalid mask range (expects min < max)"); mMinMask = min; mDeltaMask = max-min; } /// @brief Return true if the mask is inverted, i.e. min->max in the /// original mask maps to 1->0 in the inverted alpha mask. bool isMaskInverted() const { return mInvertMask; } /// @brief Invert the optional mask, i.e. min->max in the original /// mask maps to 1->0 in the inverted alpha mask. void invertMask(bool invert=true) { mInvertMask = invert; } /// @brief Advect the level set from its current time, @a time0, to its /// final time, @a time1. If @a time0 > @a time1, perform backward advection. /// /// @return the number of CFL iterations used to advect from @a time0 to @a time1 size_t advect(ValueType time0, ValueType time1); private: // disallow copy construction and copy by assignment! LevelSetMorphing(const LevelSetMorphing&);// not implemented LevelSetMorphing& operator=(const LevelSetMorphing&);// not implemented template<math::BiasedGradientScheme SpatialScheme> size_t advect1(ValueType time0, ValueType time1); template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> size_t advect2(ValueType time0, ValueType time1); template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MapType> size_t advect3(ValueType time0, ValueType time1); TrackerT mTracker; const GridT *mTarget, *mMask; math::BiasedGradientScheme mSpatialScheme; math::TemporalIntegrationScheme mTemporalScheme; ValueType mMinMask, mDeltaMask; bool mInvertMask; // This templated private class implements all the level set magic. template<typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> struct Morph { /// Main constructor Morph(LevelSetMorphing<GridT, InterruptT>& parent); /// Shallow copy constructor called by tbb::parallel_for() threads Morph(const Morph& other); /// Shallow copy constructor called by tbb::parallel_reduce() threads Morph(Morph& other, tbb::split); /// destructor virtual ~Morph() {} /// Advect the level set from its current time, time0, to its final time, time1. /// @return number of CFL iterations size_t advect(ValueType time0, ValueType time1); /// Used internally by tbb::parallel_for() void operator()(const LeafRange& r) const { if (mTask) mTask(const_cast<Morph*>(this), r); else OPENVDB_THROW(ValueError, "task is undefined - don\'t call this method directly"); } /// Used internally by tbb::parallel_reduce() void operator()(const LeafRange& r) { if (mTask) mTask(this, r); else OPENVDB_THROW(ValueError, "task is undefined - don\'t call this method directly"); } /// This is only called by tbb::parallel_reduce() threads void join(const Morph& other) { mMaxAbsS = math::Max(mMaxAbsS, other.mMaxAbsS); } /// Enum to define the type of multithreading enum ThreadingMode { PARALLEL_FOR, PARALLEL_REDUCE }; // for internal use // method calling tbb void cook(ThreadingMode mode, size_t swapBuffer = 0); /// Sample field and return the CFT time step typename GridT::ValueType sampleSpeed(ValueType time0, ValueType time1, Index speedBuffer); void sampleXformedSpeed(const LeafRange& r, Index speedBuffer); void sampleAlignedSpeed(const LeafRange& r, Index speedBuffer); // Convex combination of Phi and a forward Euler advection steps: // Phi(result) = alpha * Phi(phi) + (1-alpha) * (Phi(0) - dt * Speed(speed)*|Grad[Phi(0)]|); template <int Nominator, int Denominator> void euler(const LeafRange&, ValueType, Index, Index, Index); inline void euler01(const LeafRange& r, ValueType t, Index s) {this->euler<0,1>(r,t,0,1,s);} inline void euler12(const LeafRange& r, ValueType t) {this->euler<1,2>(r, t, 1, 1, 2);} inline void euler34(const LeafRange& r, ValueType t) {this->euler<3,4>(r, t, 1, 2, 3);} inline void euler13(const LeafRange& r, ValueType t) {this->euler<1,3>(r, t, 1, 2, 3);} using FuncType = typename std::function<void (Morph*, const LeafRange&)>; LevelSetMorphing* mParent; ValueType mMinAbsS, mMaxAbsS; const MapT* mMap; FuncType mTask; }; // end of private Morph struct };//end of LevelSetMorphing template<typename GridT, typename InterruptT> inline size_t LevelSetMorphing<GridT, InterruptT>::advect(ValueType time0, ValueType time1) { switch (mSpatialScheme) { case math::FIRST_BIAS: return this->advect1<math::FIRST_BIAS >(time0, time1); //case math::SECOND_BIAS: //return this->advect1<math::SECOND_BIAS >(time0, time1); //case math::THIRD_BIAS: //return this->advect1<math::THIRD_BIAS >(time0, time1); //case math::WENO5_BIAS: //return this->advect1<math::WENO5_BIAS >(time0, time1); case math::HJWENO5_BIAS: return this->advect1<math::HJWENO5_BIAS>(time0, time1); case math::SECOND_BIAS: case math::THIRD_BIAS: case math::WENO5_BIAS: case math::UNKNOWN_BIAS: default: OPENVDB_THROW(ValueError, "Spatial difference scheme not supported!"); } return 0; } template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme> inline size_t LevelSetMorphing<GridT, InterruptT>::advect1(ValueType time0, ValueType time1) { switch (mTemporalScheme) { case math::TVD_RK1: return this->advect2<SpatialScheme, math::TVD_RK1>(time0, time1); case math::TVD_RK2: return this->advect2<SpatialScheme, math::TVD_RK2>(time0, time1); case math::TVD_RK3: return this->advect2<SpatialScheme, math::TVD_RK3>(time0, time1); case math::UNKNOWN_TIS: default: OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!"); } return 0; } template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline size_t LevelSetMorphing<GridT, InterruptT>::advect2(ValueType time0, ValueType time1) { const math::Transform& trans = mTracker.grid().transform(); if (trans.mapType() == math::UniformScaleMap::mapType()) { return this->advect3<SpatialScheme, TemporalScheme, math::UniformScaleMap>(time0, time1); } else if (trans.mapType() == math::UniformScaleTranslateMap::mapType()) { return this->advect3<SpatialScheme, TemporalScheme, math::UniformScaleTranslateMap>( time0, time1); } else if (trans.mapType() == math::UnitaryMap::mapType()) { return this->advect3<SpatialScheme, TemporalScheme, math::UnitaryMap >(time0, time1); } else if (trans.mapType() == math::TranslationMap::mapType()) { return this->advect3<SpatialScheme, TemporalScheme, math::TranslationMap>(time0, time1); } else { OPENVDB_THROW(ValueError, "MapType not supported!"); } return 0; } template<typename GridT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MapT> inline size_t LevelSetMorphing<GridT, InterruptT>::advect3(ValueType time0, ValueType time1) { Morph<MapT, SpatialScheme, TemporalScheme> tmp(*this); return tmp.advect(time0, time1); } /////////////////////////////////////////////////////////////////////// template<typename GridT, typename InterruptT> template <typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline LevelSetMorphing<GridT, InterruptT>:: Morph<MapT, SpatialScheme, TemporalScheme>:: Morph(LevelSetMorphing<GridT, InterruptT>& parent) : mParent(&parent) , mMinAbsS(ValueType(1e-6)) , mMap(parent.mTracker.grid().transform().template constMap<MapT>().get()) , mTask(nullptr) { } template<typename GridT, typename InterruptT> template <typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline LevelSetMorphing<GridT, InterruptT>:: Morph<MapT, SpatialScheme, TemporalScheme>:: Morph(const Morph& other) : mParent(other.mParent) , mMinAbsS(other.mMinAbsS) , mMaxAbsS(other.mMaxAbsS) , mMap(other.mMap) , mTask(other.mTask) { } template<typename GridT, typename InterruptT> template <typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline LevelSetMorphing<GridT, InterruptT>:: Morph<MapT, SpatialScheme, TemporalScheme>:: Morph(Morph& other, tbb::split) : mParent(other.mParent) , mMinAbsS(other.mMinAbsS) , mMaxAbsS(other.mMaxAbsS) , mMap(other.mMap) , mTask(other.mTask) { } template<typename GridT, typename InterruptT> template <typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline size_t LevelSetMorphing<GridT, InterruptT>:: Morph<MapT, SpatialScheme, TemporalScheme>:: advect(ValueType time0, ValueType time1) { namespace ph = std::placeholders; // Make sure we have enough temporal auxiliary buffers for the time // integration AS WELL AS an extra buffer with the speed function! static const Index auxBuffers = 1 + (TemporalScheme == math::TVD_RK3 ? 2 : 1); size_t countCFL = 0; while (time0 < time1 && mParent->mTracker.checkInterrupter()) { mParent->mTracker.leafs().rebuildAuxBuffers(auxBuffers); const ValueType dt = this->sampleSpeed(time0, time1, auxBuffers); if ( math::isZero(dt) ) break;//V is essentially zero so terminate OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN //switch is resolved at compile-time switch(TemporalScheme) { case math::TVD_RK1: // Perform one explicit Euler step: t1 = t0 + dt // Phi_t1(1) = Phi_t0(0) - dt * Speed(2) * |Grad[Phi(0)]| mTask = std::bind(&Morph::euler01, ph::_1, ph::_2, dt, /*speed*/2); // Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1) this->cook(PARALLEL_FOR, 1); break; case math::TVD_RK2: // Perform one explicit Euler step: t1 = t0 + dt // Phi_t1(1) = Phi_t0(0) - dt * Speed(2) * |Grad[Phi(0)]| mTask = std::bind(&Morph::euler01, ph::_1, ph::_2, dt, /*speed*/2); // Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1) this->cook(PARALLEL_FOR, 1); // Convex combine explict Euler step: t2 = t0 + dt // Phi_t2(1) = 1/2 * Phi_t0(1) + 1/2 * (Phi_t1(0) - dt * Speed(2) * |Grad[Phi(0)]|) mTask = std::bind(&Morph::euler12, ph::_1, ph::_2, dt); // Cook and swap buffer 0 and 1 such that Phi_t2(0) and Phi_t1(1) this->cook(PARALLEL_FOR, 1); break; case math::TVD_RK3: // Perform one explicit Euler step: t1 = t0 + dt // Phi_t1(1) = Phi_t0(0) - dt * Speed(3) * |Grad[Phi(0)]| mTask = std::bind(&Morph::euler01, ph::_1, ph::_2, dt, /*speed*/3); // Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1) this->cook(PARALLEL_FOR, 1); // Convex combine explict Euler step: t2 = t0 + dt/2 // Phi_t2(2) = 3/4 * Phi_t0(1) + 1/4 * (Phi_t1(0) - dt * Speed(3) * |Grad[Phi(0)]|) mTask = std::bind(&Morph::euler34, ph::_1, ph::_2, dt); // Cook and swap buffer 0 and 2 such that Phi_t2(0) and Phi_t1(2) this->cook(PARALLEL_FOR, 2); // Convex combine explict Euler step: t3 = t0 + dt // Phi_t3(2) = 1/3 * Phi_t0(1) + 2/3 * (Phi_t2(0) - dt * Speed(3) * |Grad[Phi(0)]|) mTask = std::bind(&Morph::euler13, ph::_1, ph::_2, dt); // Cook and swap buffer 0 and 2 such that Phi_t3(0) and Phi_t2(2) this->cook(PARALLEL_FOR, 2); break; case math::UNKNOWN_TIS: default: OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!"); }//end of compile-time resolved switch OPENVDB_NO_UNREACHABLE_CODE_WARNING_END time0 += dt; ++countCFL; mParent->mTracker.leafs().removeAuxBuffers(); // Track the narrow band mParent->mTracker.track(); }//end wile-loop over time return countCFL;//number of CLF propagation steps } template<typename GridT, typename InterruptT> template<typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline typename GridT::ValueType LevelSetMorphing<GridT, InterruptT>:: Morph<MapT, SpatialScheme, TemporalScheme>:: sampleSpeed(ValueType time0, ValueType time1, Index speedBuffer) { namespace ph = std::placeholders; mMaxAbsS = mMinAbsS; const size_t leafCount = mParent->mTracker.leafs().leafCount(); if (leafCount==0 || time0 >= time1) return ValueType(0); const math::Transform& xform = mParent->mTracker.grid().transform(); if (mParent->mTarget->transform() == xform && (mParent->mMask == nullptr || mParent->mMask->transform() == xform)) { mTask = std::bind(&Morph::sampleAlignedSpeed, ph::_1, ph::_2, speedBuffer); } else { mTask = std::bind(&Morph::sampleXformedSpeed, ph::_1, ph::_2, speedBuffer); } this->cook(PARALLEL_REDUCE); if (math::isApproxEqual(mMinAbsS, mMaxAbsS)) return ValueType(0);//speed is essentially zero static const ValueType CFL = (TemporalScheme == math::TVD_RK1 ? ValueType(0.3) : TemporalScheme == math::TVD_RK2 ? ValueType(0.9) : ValueType(1.0))/math::Sqrt(ValueType(3.0)); const ValueType dt = math::Abs(time1 - time0), dx = mParent->mTracker.voxelSize(); return math::Min(dt, ValueType(CFL*dx/mMaxAbsS)); } template<typename GridT, typename InterruptT> template <typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline void LevelSetMorphing<GridT, InterruptT>:: Morph<MapT, SpatialScheme, TemporalScheme>:: sampleXformedSpeed(const LeafRange& range, Index speedBuffer) { using VoxelIterT = typename LeafType::ValueOnCIter; using SamplerT = tools::GridSampler<typename GridT::ConstAccessor, tools::BoxSampler>; const MapT& map = *mMap; mParent->mTracker.checkInterrupter(); typename GridT::ConstAccessor targetAcc = mParent->mTarget->getAccessor(); SamplerT target(targetAcc, mParent->mTarget->transform()); if (mParent->mMask == nullptr) { for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) { ValueType* speed = leafIter.buffer(speedBuffer).data(); bool isZero = true; for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) { ValueType& s = speed[voxelIter.pos()]; s -= target.wsSample(map.applyMap(voxelIter.getCoord().asVec3d())); if (!math::isApproxZero(s)) isZero = false; mMaxAbsS = math::Max(mMaxAbsS, math::Abs(s)); } if (isZero) speed[0] = std::numeric_limits<ValueType>::max();//tag first voxel } } else { const ValueType min = mParent->mMinMask, invNorm = 1.0f/(mParent->mDeltaMask); const bool invMask = mParent->isMaskInverted(); typename GridT::ConstAccessor maskAcc = mParent->mMask->getAccessor(); SamplerT mask(maskAcc, mParent->mMask->transform()); for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) { ValueType* speed = leafIter.buffer(speedBuffer).data(); bool isZero = true; for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) { const Vec3R xyz = map.applyMap(voxelIter.getCoord().asVec3d());//world space const ValueType a = math::SmoothUnitStep((mask.wsSample(xyz)-min)*invNorm); ValueType& s = speed[voxelIter.pos()]; s -= target.wsSample(xyz); s *= invMask ? 1 - a : a; if (!math::isApproxZero(s)) isZero = false; mMaxAbsS = math::Max(mMaxAbsS, math::Abs(s)); } if (isZero) speed[0] = std::numeric_limits<ValueType>::max();//tag first voxel } } } template<typename GridT, typename InterruptT> template <typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline void LevelSetMorphing<GridT, InterruptT>:: Morph<MapT, SpatialScheme, TemporalScheme>:: sampleAlignedSpeed(const LeafRange& range, Index speedBuffer) { using VoxelIterT = typename LeafType::ValueOnCIter; mParent->mTracker.checkInterrupter(); typename GridT::ConstAccessor target = mParent->mTarget->getAccessor(); if (mParent->mMask == nullptr) { for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) { ValueType* speed = leafIter.buffer(speedBuffer).data(); bool isZero = true; for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) { ValueType& s = speed[voxelIter.pos()]; s -= target.getValue(voxelIter.getCoord()); if (!math::isApproxZero(s)) isZero = false; mMaxAbsS = math::Max(mMaxAbsS, math::Abs(s)); } if (isZero) speed[0] = std::numeric_limits<ValueType>::max();//tag first voxel } } else { const ValueType min = mParent->mMinMask, invNorm = 1.0f/(mParent->mDeltaMask); const bool invMask = mParent->isMaskInverted(); typename GridT::ConstAccessor mask = mParent->mMask->getAccessor(); for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) { ValueType* speed = leafIter.buffer(speedBuffer).data(); bool isZero = true; for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) { const Coord ijk = voxelIter.getCoord();//index space const ValueType a = math::SmoothUnitStep((mask.getValue(ijk)-min)*invNorm); ValueType& s = speed[voxelIter.pos()]; s -= target.getValue(ijk); s *= invMask ? 1 - a : a; if (!math::isApproxZero(s)) isZero = false; mMaxAbsS = math::Max(mMaxAbsS, math::Abs(s)); } if (isZero) speed[0] = std::numeric_limits<ValueType>::max();//tag first voxel } } } template<typename GridT, typename InterruptT> template <typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline void LevelSetMorphing<GridT, InterruptT>:: Morph<MapT, SpatialScheme, TemporalScheme>:: cook(ThreadingMode mode, size_t swapBuffer) { mParent->mTracker.startInterrupter("Morphing level set"); const int grainSize = mParent->mTracker.getGrainSize(); const LeafRange range = mParent->mTracker.leafs().leafRange(grainSize); if (mParent->mTracker.getGrainSize()==0) { (*this)(range); } else if (mode == PARALLEL_FOR) { tbb::parallel_for(range, *this); } else if (mode == PARALLEL_REDUCE) { tbb::parallel_reduce(range, *this); } else { OPENVDB_THROW(ValueError, "expected threading mode " << int(PARALLEL_FOR) << " or " << int(PARALLEL_REDUCE) << ", got " << int(mode)); } mParent->mTracker.leafs().swapLeafBuffer(swapBuffer, grainSize == 0); mParent->mTracker.endInterrupter(); } template<typename GridT, typename InterruptT> template<typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> template <int Nominator, int Denominator> inline void LevelSetMorphing<GridT,InterruptT>:: Morph<MapT, SpatialScheme, TemporalScheme>:: euler(const LeafRange& range, ValueType dt, Index phiBuffer, Index resultBuffer, Index speedBuffer) { using SchemeT = math::BIAS_SCHEME<SpatialScheme>; using StencilT = typename SchemeT::template ISStencil<GridType>::StencilType; using VoxelIterT = typename LeafType::ValueOnCIter; using NumGrad = math::GradientNormSqrd<MapT, SpatialScheme>; static const ValueType Alpha = ValueType(Nominator)/ValueType(Denominator); static const ValueType Beta = ValueType(1) - Alpha; mParent->mTracker.checkInterrupter(); const MapT& map = *mMap; StencilT stencil(mParent->mTracker.grid()); for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) { const ValueType* speed = leafIter.buffer(speedBuffer).data(); if (math::isExactlyEqual(speed[0], std::numeric_limits<ValueType>::max())) continue; const ValueType* phi = leafIter.buffer(phiBuffer).data(); ValueType* result = leafIter.buffer(resultBuffer).data(); for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter) { const Index n = voxelIter.pos(); if (math::isApproxZero(speed[n])) continue; stencil.moveTo(voxelIter); const ValueType v = stencil.getValue() - dt * speed[n] * NumGrad::result(map, stencil); result[n] = Nominator ? Alpha * phi[n] + Beta * v : v; }//loop over active voxels in the leaf of the mask }//loop over leafs of the level set } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVEL_SET_MORPH_HAS_BEEN_INCLUDED
27,359
C
41.418605
100
0.656128
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/GridTransformer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file GridTransformer.h /// @author Peter Cucka #ifndef OPENVDB_TOOLS_GRIDTRANSFORMER_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_GRIDTRANSFORMER_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/math/Math.h> // for isApproxEqual() #include <openvdb/util/NullInterrupter.h> #include "ChangeBackground.h" #include "Interpolation.h" #include "LevelSetRebuild.h" // for doLevelSetRebuild() #include "SignedFloodFill.h" // for signedFloodFill #include "Prune.h" // for pruneLevelSet #include <tbb/blocked_range.h> #include <tbb/parallel_reduce.h> #include <cmath> #include <functional> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Resample an input grid into an output grid of the same type such that, /// after resampling, the input and output grids coincide (apart from sampling /// artifacts), but the output grid's transform is unchanged. /// @details Specifically, this function resamples the input grid into the output /// grid's index space, using a sampling kernel like PointSampler, BoxSampler, /// or QuadraticSampler. /// @param inGrid the grid to be resampled /// @param outGrid the grid into which to write the resampled voxel data /// @param interrupter an object adhering to the util::NullInterrupter interface /// @par Example: /// @code /// // Create an input grid with the default identity transform /// // and populate it with a level-set sphere. /// FloatGrid::ConstPtr src = tools::makeSphere(...); /// // Create an output grid and give it a uniform-scale transform. /// FloatGrid::Ptr dest = FloatGrid::create(); /// const float voxelSize = 0.5; /// dest->setTransform(math::Transform::createLinearTransform(voxelSize)); /// // Resample the input grid into the output grid, reproducing /// // the level-set sphere at a smaller voxel size. /// MyInterrupter interrupter = ...; /// tools::resampleToMatch<tools::QuadraticSampler>(*src, *dest, interrupter); /// @endcode template<typename Sampler, typename Interrupter, typename GridType> inline void resampleToMatch(const GridType& inGrid, GridType& outGrid, Interrupter& interrupter); /// @brief Resample an input grid into an output grid of the same type such that, /// after resampling, the input and output grids coincide (apart from sampling /// artifacts), but the output grid's transform is unchanged. /// @details Specifically, this function resamples the input grid into the output /// grid's index space, using a sampling kernel like PointSampler, BoxSampler, /// or QuadraticSampler. /// @param inGrid the grid to be resampled /// @param outGrid the grid into which to write the resampled voxel data /// @par Example: /// @code /// // Create an input grid with the default identity transform /// // and populate it with a level-set sphere. /// FloatGrid::ConstPtr src = tools::makeSphere(...); /// // Create an output grid and give it a uniform-scale transform. /// FloatGrid::Ptr dest = FloatGrid::create(); /// const float voxelSize = 0.5; /// dest->setTransform(math::Transform::createLinearTransform(voxelSize)); /// // Resample the input grid into the output grid, reproducing /// // the level-set sphere at a smaller voxel size. /// tools::resampleToMatch<tools::QuadraticSampler>(*src, *dest); /// @endcode template<typename Sampler, typename GridType> inline void resampleToMatch(const GridType& inGrid, GridType& outGrid); //////////////////////////////////////// namespace internal { /// @brief A TileSampler wraps a grid sampler of another type (BoxSampler, /// QuadraticSampler, etc.), and for samples that fall within a given tile /// of the grid, it returns a cached tile value instead of accessing the grid. template<typename Sampler, typename TreeT> class TileSampler: public Sampler { public: using ValueT = typename TreeT::ValueType; /// @param b the index-space bounding box of a particular grid tile /// @param tileVal the tile's value /// @param on the tile's active state TileSampler(const CoordBBox& b, const ValueT& tileVal, bool on): mBBox(b.min().asVec3d(), b.max().asVec3d()), mVal(tileVal), mActive(on), mEmpty(false) { mBBox.expand(-this->radius()); // shrink the bounding box by the sample radius mEmpty = mBBox.empty(); } bool sample(const TreeT& inTree, const Vec3R& inCoord, ValueT& result) const { if (!mEmpty && mBBox.isInside(inCoord)) { result = mVal; return mActive; } return Sampler::sample(inTree, inCoord, result); } protected: BBoxd mBBox; ValueT mVal; bool mActive, mEmpty; }; /// @brief For point sampling, tree traversal is less expensive than testing /// bounding box membership. template<typename TreeT> class TileSampler<PointSampler, TreeT>: public PointSampler { public: TileSampler(const CoordBBox&, const typename TreeT::ValueType&, bool) {} }; /// @brief For point sampling, tree traversal is less expensive than testing /// bounding box membership. template<typename TreeT> class TileSampler<StaggeredPointSampler, TreeT>: public StaggeredPointSampler { public: TileSampler(const CoordBBox&, const typename TreeT::ValueType&, bool) {} }; } // namespace internal //////////////////////////////////////// /// A GridResampler applies a geometric transformation to an /// input grid using one of several sampling schemes, and stores /// the result in an output grid. /// /// Usage: /// @code /// GridResampler resampler(); /// resampler.transformGrid<BoxSampler>(xform, inGrid, outGrid); /// @endcode /// where @c xform is a functor that implements the following methods: /// @code /// bool isAffine() const /// openvdb::Vec3d transform(const openvdb::Vec3d&) const /// openvdb::Vec3d invTransform(const openvdb::Vec3d&) const /// @endcode /// @note When the transform is affine and can be expressed as a 4 x 4 matrix, /// a GridTransformer is much more efficient than a GridResampler. class GridResampler { public: using Ptr = SharedPtr<GridResampler>; using InterruptFunc = std::function<bool (void)>; GridResampler(): mThreaded(true), mTransformTiles(true) {} virtual ~GridResampler() {} GridResampler(const GridResampler&) = default; GridResampler& operator=(const GridResampler&) = default; /// Enable or disable threading. (Threading is enabled by default.) void setThreaded(bool b) { mThreaded = b; } /// Return @c true if threading is enabled. bool threaded() const { return mThreaded; } /// Enable or disable processing of tiles. (Enabled by default, except for level set grids.) void setTransformTiles(bool b) { mTransformTiles = b; } /// Return @c true if tile processing is enabled. bool transformTiles() const { return mTransformTiles; } /// @brief Allow processing to be aborted by providing an interrupter object. /// The interrupter will be queried periodically during processing. /// @see util/NullInterrupter.h for interrupter interface requirements. template<typename InterrupterType> void setInterrupter(InterrupterType&); template<typename Sampler, typename GridT, typename Transformer> void transformGrid(const Transformer&, const GridT& inGrid, GridT& outGrid) const; protected: template<typename Sampler, typename GridT, typename Transformer> void applyTransform(const Transformer&, const GridT& inGrid, GridT& outGrid) const; bool interrupt() const { return mInterrupt && mInterrupt(); } private: template<typename Sampler, typename InTreeT, typename OutTreeT, typename Transformer> static void transformBBox(const Transformer&, const CoordBBox& inBBox, const InTreeT& inTree, OutTreeT& outTree, const InterruptFunc&, const Sampler& = Sampler()); template<typename Sampler, typename TreeT, typename Transformer> class RangeProcessor; bool mThreaded, mTransformTiles; InterruptFunc mInterrupt; }; //////////////////////////////////////// /// @brief A GridTransformer applies a geometric transformation to an /// input grid using one of several sampling schemes, and stores /// the result in an output grid. /// /// @note GridTransformer is optimized for affine transformations. /// /// Usage: /// @code /// Mat4R xform = ...; /// GridTransformer transformer(xform); /// transformer.transformGrid<BoxSampler>(inGrid, outGrid); /// @endcode /// or /// @code /// Vec3R pivot = ..., scale = ..., rotate = ..., translate = ...; /// GridTransformer transformer(pivot, scale, rotate, translate); /// transformer.transformGrid<QuadraticSampler>(inGrid, outGrid); /// @endcode class GridTransformer: public GridResampler { public: using Ptr = SharedPtr<GridTransformer>; GridTransformer(const Mat4R& xform); GridTransformer( const Vec3R& pivot, const Vec3R& scale, const Vec3R& rotate, const Vec3R& translate, const std::string& xformOrder = "tsr", const std::string& rotationOrder = "zyx"); ~GridTransformer() override = default; GridTransformer(const GridTransformer&) = default; GridTransformer& operator=(const GridTransformer&) = default; const Mat4R& getTransform() const { return mTransform; } template<class Sampler, class GridT> void transformGrid(const GridT& inGrid, GridT& outGrid) const; private: struct MatrixTransform; inline void init(const Vec3R& pivot, const Vec3R& scale, const Vec3R& rotate, const Vec3R& translate, const std::string& xformOrder, const std::string& rotOrder); Vec3R mPivot; Vec3i mMipLevels; Mat4R mTransform, mPreScaleTransform, mPostScaleTransform; }; //////////////////////////////////////// namespace local_util { enum { DECOMP_INVALID = 0, DECOMP_VALID = 1, DECOMP_UNIQUE = 2 }; /// @brief Decompose an affine transform into scale, rotation (XYZ order), /// and translation components. /// @return DECOMP_INVALID if the given matrix is not affine or cannot /// be decomposed, DECOMP_UNIQUE if the matrix has a unique decomposition, /// DECOMP_VALID otherwise template<typename T> inline int decompose(const math::Mat4<T>& m, math::Vec3<T>& scale, math::Vec3<T>& rotate, math::Vec3<T>& translate) { if (!math::isAffine(m)) return DECOMP_INVALID; // This is the translation in world space translate = m.getTranslation(); // Extract translation. const math::Mat3<T> xform = m.getMat3(); const math::Vec3<T> unsignedScale( (math::Vec3<T>(1, 0, 0) * xform).length(), (math::Vec3<T>(0, 1, 0) * xform).length(), (math::Vec3<T>(0, 0, 1) * xform).length()); const bool hasUniformScale = unsignedScale.eq(math::Vec3<T>(unsignedScale[0])); bool hasRotation = false; bool validDecomposition = false; T minAngle = std::numeric_limits<T>::max(); // If the transformation matrix contains a reflection, test different negative scales // to find a decomposition that favors the optimal resampling algorithm. for (size_t n = 0; n < 8; ++n) { const math::Vec3<T> signedScale( n & 0x1 ? -unsignedScale.x() : unsignedScale.x(), n & 0x2 ? -unsignedScale.y() : unsignedScale.y(), n & 0x4 ? -unsignedScale.z() : unsignedScale.z()); // Extract scale and potentially reflection. const math::Mat3<T> mat = xform * math::scale<math::Mat3<T> >(signedScale).inverse(); if (mat.det() < T(0.0)) continue; // Skip if mat contains a reflection. const math::Vec3<T> tmpAngle = math::eulerAngles(mat, math::XYZ_ROTATION); const math::Mat3<T> rebuild = math::rotation<math::Mat3<T> >(math::Vec3<T>(0, 0, 1), tmpAngle.z()) * math::rotation<math::Mat3<T> >(math::Vec3<T>(0, 1, 0), tmpAngle.y()) * math::rotation<math::Mat3<T> >(math::Vec3<T>(1, 0, 0), tmpAngle.x()) * math::scale<math::Mat3<T> >(signedScale); if (xform.eq(rebuild)) { const T maxAngle = std::max(std::abs(tmpAngle[0]), std::max(std::abs(tmpAngle[1]), std::abs(tmpAngle[2]))); if (!(minAngle < maxAngle)) { // Update if less or equal. minAngle = maxAngle; rotate = tmpAngle; scale = signedScale; hasRotation = !rotate.eq(math::Vec3<T>::zero()); validDecomposition = true; if (hasUniformScale || !hasRotation) { // Current decomposition is optimal. break; } } } } if (!validDecomposition) { // The decomposition is invalid if the transformation matrix contains shear. return DECOMP_INVALID; } if (hasRotation && !hasUniformScale) { // No unique decomposition if scale is nonuniform and rotation is nonzero. return DECOMP_VALID; } return DECOMP_UNIQUE; } } // namespace local_util //////////////////////////////////////// /// This class implements the Transformer functor interface (specifically, /// the isAffine(), transform() and invTransform() methods) for a transform /// that is expressed as a 4 x 4 matrix. struct GridTransformer::MatrixTransform { MatrixTransform(): mat(Mat4R::identity()), invMat(Mat4R::identity()) {} MatrixTransform(const Mat4R& xform): mat(xform), invMat(xform.inverse()) {} bool isAffine() const { return math::isAffine(mat); } Vec3R transform(const Vec3R& pos) const { return mat.transformH(pos); } Vec3R invTransform(const Vec3R& pos) const { return invMat.transformH(pos); } Mat4R mat, invMat; }; //////////////////////////////////////// /// @brief This class implements the Transformer functor interface (specifically, /// the isAffine(), transform() and invTransform() methods) for a transform /// that maps an A grid into a B grid's index space such that, after resampling, /// A's index space and transform match B's index space and transform. class ABTransform { public: /// @param aXform the A grid's transform /// @param bXform the B grid's transform ABTransform(const math::Transform& aXform, const math::Transform& bXform): mAXform(aXform), mBXform(bXform), mIsAffine(mAXform.isLinear() && mBXform.isLinear()), mIsIdentity(mIsAffine && mAXform == mBXform) {} bool isAffine() const { return mIsAffine; } bool isIdentity() const { return mIsIdentity; } openvdb::Vec3R transform(const openvdb::Vec3R& pos) const { return mBXform.worldToIndex(mAXform.indexToWorld(pos)); } openvdb::Vec3R invTransform(const openvdb::Vec3R& pos) const { return mAXform.worldToIndex(mBXform.indexToWorld(pos)); } const math::Transform& getA() const { return mAXform; } const math::Transform& getB() const { return mBXform; } private: const math::Transform &mAXform, &mBXform; const bool mIsAffine; const bool mIsIdentity; }; /// The normal entry points for resampling are the resampleToMatch() functions, /// which correctly handle level set grids under scaling and shearing. /// doResampleToMatch() is mainly for internal use but is typically faster /// for level sets, and correct provided that no scaling or shearing is needed. /// /// @warning Do not use this function to scale or shear a level set grid. template<typename Sampler, typename Interrupter, typename GridType> inline void doResampleToMatch(const GridType& inGrid, GridType& outGrid, Interrupter& interrupter) { ABTransform xform(inGrid.transform(), outGrid.transform()); if (Sampler::consistent() && xform.isIdentity()) { // If the transforms of the input and output are identical, the // output tree is simply a deep copy of the input tree. outGrid.setTree(inGrid.tree().copy()); } else if (xform.isAffine()) { // If the input and output transforms are both affine, create an // input to output transform (in:index-to-world * out:world-to-index) // and use the fast GridTransformer API. Mat4R mat = xform.getA().baseMap()->getAffineMap()->getMat4() * ( xform.getB().baseMap()->getAffineMap()->getMat4().inverse() ); GridTransformer transformer(mat); transformer.setInterrupter(interrupter); // Transform the input grid and store the result in the output grid. transformer.transformGrid<Sampler>(inGrid, outGrid); } else { // If either the input or the output transform is non-affine, // use the slower GridResampler API. GridResampler resampler; resampler.setInterrupter(interrupter); resampler.transformGrid<Sampler>(xform, inGrid, outGrid); } } template<typename ValueType> struct HalfWidthOp { static ValueType eval(const ValueType& background, const Vec3d& voxelSize) { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN ValueType result(background * (1.0 / voxelSize[0])); OPENVDB_NO_TYPE_CONVERSION_WARNING_END return result; } }; // struct HalfWidthOp template<> struct HalfWidthOp<bool> { static bool eval(const bool& background, const Vec3d& /*voxelSize*/) { return background; } }; // struct HalfWidthOp<bool> template<typename Sampler, typename Interrupter, typename GridType> inline void resampleToMatch(const GridType& inGrid, GridType& outGrid, Interrupter& interrupter) { if (inGrid.getGridClass() == GRID_LEVEL_SET) { // If the input grid is a level set, resample it using the level set rebuild tool. if (inGrid.constTransform() == outGrid.constTransform()) { // If the transforms of the input and output grids are identical, // the output tree is simply a deep copy of the input tree. outGrid.setTree(inGrid.tree().copy()); return; } // If the output grid is a level set, resample the input grid to have the output grid's // background value. Otherwise, preserve the input grid's background value. using ValueT = typename GridType::ValueType; const bool outIsLevelSet = outGrid.getGridClass() == openvdb::GRID_LEVEL_SET; const ValueT halfWidth = outIsLevelSet ? HalfWidthOp<ValueT>::eval(outGrid.background(), outGrid.voxelSize()) : HalfWidthOp<ValueT>::eval(inGrid.background(), inGrid.voxelSize()); typename GridType::Ptr tempGrid; try { tempGrid = doLevelSetRebuild(inGrid, /*iso=*/zeroVal<ValueT>(), /*exWidth=*/halfWidth, /*inWidth=*/halfWidth, &outGrid.constTransform(), &interrupter); } catch (TypeError&) { // The input grid is classified as a level set, but it has a value type // that is not supported by the level set rebuild tool. Fall back to // using the generic resampler. tempGrid.reset(); } if (tempGrid) { outGrid.setTree(tempGrid->treePtr()); return; } } // If the input grid is not a level set, use the generic resampler. doResampleToMatch<Sampler>(inGrid, outGrid, interrupter); } template<typename Sampler, typename GridType> inline void resampleToMatch(const GridType& inGrid, GridType& outGrid) { util::NullInterrupter interrupter; resampleToMatch<Sampler>(inGrid, outGrid, interrupter); } //////////////////////////////////////// inline GridTransformer::GridTransformer(const Mat4R& xform): mPivot(0, 0, 0), mMipLevels(0, 0, 0), mTransform(xform), mPreScaleTransform(Mat4R::identity()), mPostScaleTransform(Mat4R::identity()) { Vec3R scale, rotate, translate; if (local_util::decompose(mTransform, scale, rotate, translate)) { // If the transform can be decomposed into affine components, // use them to set up a mipmapping-like scheme for downsampling. init(mPivot, scale, rotate, translate, "rst", "zyx"); } } inline GridTransformer::GridTransformer( const Vec3R& pivot, const Vec3R& scale, const Vec3R& rotate, const Vec3R& translate, const std::string& xformOrder, const std::string& rotOrder): mPivot(0, 0, 0), mMipLevels(0, 0, 0), mPreScaleTransform(Mat4R::identity()), mPostScaleTransform(Mat4R::identity()) { init(pivot, scale, rotate, translate, xformOrder, rotOrder); } //////////////////////////////////////// inline void GridTransformer::init( const Vec3R& pivot, const Vec3R& scale, const Vec3R& rotate, const Vec3R& translate, const std::string& xformOrder, const std::string& rotOrder) { if (xformOrder.size() != 3) { OPENVDB_THROW(ValueError, "invalid transform order (" + xformOrder + ")"); } if (rotOrder.size() != 3) { OPENVDB_THROW(ValueError, "invalid rotation order (" + rotOrder + ")"); } mPivot = pivot; // Scaling is handled via a mipmapping-like scheme of successive // halvings of the tree resolution, until the remaining scale // factor is greater than or equal to 1/2. Vec3R scaleRemainder = scale; for (int i = 0; i < 3; ++i) { double s = std::fabs(scale(i)); if (s < 0.5) { mMipLevels(i) = int(std::floor(-std::log(s)/std::log(2.0))); scaleRemainder(i) = scale(i) * (1 << mMipLevels(i)); } } // Build pre-scale and post-scale transform matrices based on // the user-specified order of operations. // Note that we iterate over the transform order string in reverse order // (e.g., "t", "r", "s", given "srt"). This is because math::Mat matrices // postmultiply row vectors rather than premultiplying column vectors. mTransform = mPreScaleTransform = mPostScaleTransform = Mat4R::identity(); Mat4R* remainder = &mPostScaleTransform; int rpos, spos, tpos; rpos = spos = tpos = 3; for (int ix = 2; ix >= 0; --ix) { // reverse iteration switch (xformOrder[ix]) { case 'r': rpos = ix; mTransform.preTranslate(pivot); remainder->preTranslate(pivot); int xpos, ypos, zpos; xpos = ypos = zpos = 3; for (int ir = 2; ir >= 0; --ir) { switch (rotOrder[ir]) { case 'x': xpos = ir; mTransform.preRotate(math::X_AXIS, rotate.x()); remainder->preRotate(math::X_AXIS, rotate.x()); break; case 'y': ypos = ir; mTransform.preRotate(math::Y_AXIS, rotate.y()); remainder->preRotate(math::Y_AXIS, rotate.y()); break; case 'z': zpos = ir; mTransform.preRotate(math::Z_AXIS, rotate.z()); remainder->preRotate(math::Z_AXIS, rotate.z()); break; } } // Reject rotation order strings that don't contain exactly one // instance of "x", "y" and "z". if (xpos > 2 || ypos > 2 || zpos > 2) { OPENVDB_THROW(ValueError, "invalid rotation order (" + rotOrder + ")"); } mTransform.preTranslate(-pivot); remainder->preTranslate(-pivot); break; case 's': spos = ix; mTransform.preTranslate(pivot); mTransform.preScale(scale); mTransform.preTranslate(-pivot); remainder->preTranslate(pivot); remainder->preScale(scaleRemainder); remainder->preTranslate(-pivot); remainder = &mPreScaleTransform; break; case 't': tpos = ix; mTransform.preTranslate(translate); remainder->preTranslate(translate); break; } } // Reject transform order strings that don't contain exactly one // instance of "t", "r" and "s". if (tpos > 2 || rpos > 2 || spos > 2) { OPENVDB_THROW(ValueError, "invalid transform order (" + xformOrder + ")"); } } //////////////////////////////////////// template<typename InterrupterType> void GridResampler::setInterrupter(InterrupterType& interrupter) { mInterrupt = std::bind(&InterrupterType::wasInterrupted, /*this=*/&interrupter, /*percent=*/-1); } template<typename Sampler, typename GridT, typename Transformer> void GridResampler::transformGrid(const Transformer& xform, const GridT& inGrid, GridT& outGrid) const { tools::changeBackground(outGrid.tree(), inGrid.background()); applyTransform<Sampler>(xform, inGrid, outGrid); } template<class Sampler, class GridT> void GridTransformer::transformGrid(const GridT& inGrid, GridT& outGrid) const { tools::changeBackground(outGrid.tree(), inGrid.background()); if (!Sampler::mipmap() || mMipLevels == Vec3i::zero()) { // Skip the mipmapping step. const MatrixTransform xform(mTransform); applyTransform<Sampler>(xform, inGrid, outGrid); } else { bool firstPass = true; const typename GridT::ValueType background = inGrid.background(); typename GridT::Ptr tempGrid = GridT::create(background); if (!mPreScaleTransform.eq(Mat4R::identity())) { firstPass = false; // Apply the pre-scale transform to the input grid // and store the result in a temporary grid. const MatrixTransform xform(mPreScaleTransform); applyTransform<Sampler>(xform, inGrid, *tempGrid); } // While the scale factor along one or more axes is less than 1/2, // scale the grid by half along those axes. Vec3i count = mMipLevels; // # of halvings remaining per axis while (count != Vec3i::zero()) { MatrixTransform xform; xform.mat.setTranslation(mPivot); xform.mat.preScale(Vec3R( count.x() ? .5 : 1, count.y() ? .5 : 1, count.z() ? .5 : 1)); xform.mat.preTranslate(-mPivot); xform.invMat = xform.mat.inverse(); if (firstPass) { firstPass = false; // Scale the input grid and store the result in a temporary grid. applyTransform<Sampler>(xform, inGrid, *tempGrid); } else { // Scale the temporary grid and store the result in a transient grid, // then swap the two and discard the transient grid. typename GridT::Ptr destGrid = GridT::create(background); applyTransform<Sampler>(xform, *tempGrid, *destGrid); tempGrid.swap(destGrid); } // (3, 2, 1) -> (2, 1, 0) -> (1, 0, 0) -> (0, 0, 0), etc. count = math::maxComponent(count - 1, Vec3i::zero()); } // Apply the post-scale transform and store the result in the output grid. if (!mPostScaleTransform.eq(Mat4R::identity())) { const MatrixTransform xform(mPostScaleTransform); applyTransform<Sampler>(xform, *tempGrid, outGrid); } else { outGrid.setTree(tempGrid->treePtr()); } } } //////////////////////////////////////// template<class Sampler, class TreeT, typename Transformer> class GridResampler::RangeProcessor { public: using LeafIterT = typename TreeT::LeafCIter; using TileIterT = typename TreeT::ValueAllCIter; using LeafRange = typename tree::IteratorRange<LeafIterT>; using TileRange = typename tree::IteratorRange<TileIterT>; using InTreeAccessor = typename tree::ValueAccessor<const TreeT>; using OutTreeAccessor = typename tree::ValueAccessor<TreeT>; RangeProcessor(const Transformer& xform, const CoordBBox& b, const TreeT& inT, TreeT& outT): mIsRoot(true), mXform(xform), mBBox(b), mInTree(inT), mOutTree(&outT), mInAcc(mInTree), mOutAcc(*mOutTree) {} RangeProcessor(const Transformer& xform, const CoordBBox& b, const TreeT& inTree): mIsRoot(false), mXform(xform), mBBox(b), mInTree(inTree), mOutTree(new TreeT(inTree.background())), mInAcc(mInTree), mOutAcc(*mOutTree) {} ~RangeProcessor() { if (!mIsRoot) delete mOutTree; } /// Splitting constructor: don't copy the original processor's output tree RangeProcessor(RangeProcessor& other, tbb::split): mIsRoot(false), mXform(other.mXform), mBBox(other.mBBox), mInTree(other.mInTree), mOutTree(new TreeT(mInTree.background())), mInAcc(mInTree), mOutAcc(*mOutTree), mInterrupt(other.mInterrupt) {} void setInterrupt(const InterruptFunc& f) { mInterrupt = f; } /// Transform each leaf node in the given range. void operator()(LeafRange& r) { for ( ; r; ++r) { if (interrupt()) break; LeafIterT i = r.iterator(); CoordBBox bbox(i->origin(), i->origin() + Coord(i->dim())); if (!mBBox.empty()) { // Intersect the leaf node's bounding box with mBBox. bbox = CoordBBox( Coord::maxComponent(bbox.min(), mBBox.min()), Coord::minComponent(bbox.max(), mBBox.max())); } if (!bbox.empty()) { transformBBox<Sampler>(mXform, bbox, mInAcc, mOutAcc, mInterrupt); } } } /// Transform each non-background tile in the given range. void operator()(TileRange& r) { for ( ; r; ++r) { if (interrupt()) break; TileIterT i = r.iterator(); // Skip voxels and background tiles. if (!i.isTileValue()) continue; if (!i.isValueOn() && math::isApproxEqual(*i, mOutTree->background())) continue; CoordBBox bbox; i.getBoundingBox(bbox); if (!mBBox.empty()) { // Intersect the tile's bounding box with mBBox. bbox = CoordBBox( Coord::maxComponent(bbox.min(), mBBox.min()), Coord::minComponent(bbox.max(), mBBox.max())); } if (!bbox.empty()) { /// @todo This samples the tile voxel-by-voxel, which is much too slow. /// Instead, compute the largest axis-aligned bounding box that is /// contained in the transformed tile (adjusted for the sampler radius) /// and fill it with the tile value. Then transform the remaining voxels. internal::TileSampler<Sampler, InTreeAccessor> sampler(bbox, i.getValue(), i.isValueOn()); transformBBox(mXform, bbox, mInAcc, mOutAcc, mInterrupt, sampler); } } } /// Merge another processor's output tree into this processor's tree. void join(RangeProcessor& other) { if (!interrupt()) mOutTree->merge(*other.mOutTree); } private: bool interrupt() const { return mInterrupt && mInterrupt(); } const bool mIsRoot; // true if mOutTree is the top-level tree Transformer mXform; CoordBBox mBBox; const TreeT& mInTree; TreeT* mOutTree; InTreeAccessor mInAcc; OutTreeAccessor mOutAcc; InterruptFunc mInterrupt; }; //////////////////////////////////////// template<class Sampler, class GridT, typename Transformer> void GridResampler::applyTransform(const Transformer& xform, const GridT& inGrid, GridT& outGrid) const { using TreeT = typename GridT::TreeType; const TreeT& inTree = inGrid.tree(); TreeT& outTree = outGrid.tree(); using RangeProc = RangeProcessor<Sampler, TreeT, Transformer>; const GridClass gridClass = inGrid.getGridClass(); if (gridClass != GRID_LEVEL_SET && mTransformTiles) { // Independently transform the tiles of the input grid. // Note: Tiles in level sets can only be background tiles, and they // are handled more efficiently with a signed flood fill (see below). RangeProc proc(xform, CoordBBox(), inTree, outTree); proc.setInterrupt(mInterrupt); typename RangeProc::TileIterT tileIter = inTree.cbeginValueAll(); tileIter.setMaxDepth(tileIter.getLeafDepth() - 1); // skip leaf nodes typename RangeProc::TileRange tileRange(tileIter); if (mThreaded) { tbb::parallel_reduce(tileRange, proc); } else { proc(tileRange); } } CoordBBox clipBBox; if (gridClass == GRID_LEVEL_SET) { // Inactive voxels in level sets can only be background voxels, and they // are handled more efficiently with a signed flood fill (see below). clipBBox = inGrid.evalActiveVoxelBoundingBox(); } // Independently transform the leaf nodes of the input grid. RangeProc proc(xform, clipBBox, inTree, outTree); proc.setInterrupt(mInterrupt); typename RangeProc::LeafRange leafRange(inTree.cbeginLeaf()); if (mThreaded) { tbb::parallel_reduce(leafRange, proc); } else { proc(leafRange); } // If the grid is a level set, mark inactive voxels as inside or outside. if (gridClass == GRID_LEVEL_SET) { tools::pruneLevelSet(outTree); tools::signedFloodFill(outTree); } } //////////////////////////////////////// //static template<class Sampler, class InTreeT, class OutTreeT, class Transformer> void GridResampler::transformBBox( const Transformer& xform, const CoordBBox& bbox, const InTreeT& inTree, OutTreeT& outTree, const InterruptFunc& interrupt, const Sampler& sampler) { using ValueT = typename OutTreeT::ValueType; // Transform the corners of the input tree's bounding box // and compute the enclosing bounding box in the output tree. Vec3R inRMin(bbox.min().x(), bbox.min().y(), bbox.min().z()), inRMax(bbox.max().x()+1, bbox.max().y()+1, bbox.max().z()+1), outRMin = math::minComponent(xform.transform(inRMin), xform.transform(inRMax)), outRMax = math::maxComponent(xform.transform(inRMin), xform.transform(inRMax)); for (int i = 0; i < 8; ++i) { Vec3R corner( i & 1 ? inRMax.x() : inRMin.x(), i & 2 ? inRMax.y() : inRMin.y(), i & 4 ? inRMax.z() : inRMin.z()); outRMin = math::minComponent(outRMin, xform.transform(corner)); outRMax = math::maxComponent(outRMax, xform.transform(corner)); } Vec3i outMin = local_util::floorVec3(outRMin) - Sampler::radius(), outMax = local_util::ceilVec3(outRMax) + Sampler::radius(); if (!xform.isAffine()) { // If the transform is not affine, back-project each output voxel // into the input tree. Vec3R xyz, inXYZ; Coord outXYZ; int &x = outXYZ.x(), &y = outXYZ.y(), &z = outXYZ.z(); for (x = outMin.x(); x <= outMax.x(); ++x) { if (interrupt && interrupt()) break; xyz.x() = x; for (y = outMin.y(); y <= outMax.y(); ++y) { if (interrupt && interrupt()) break; xyz.y() = y; for (z = outMin.z(); z <= outMax.z(); ++z) { xyz.z() = z; inXYZ = xform.invTransform(xyz); ValueT result; if (sampler.sample(inTree, inXYZ, result)) { outTree.setValueOn(outXYZ, result); } else { // Note: Don't overwrite existing active values with inactive values. if (!outTree.isValueOn(outXYZ)) { outTree.setValueOff(outXYZ, result); } } } } } } else { // affine // Compute step sizes in the input tree that correspond to // unit steps in x, y and z in the output tree. const Vec3R translation = xform.invTransform(Vec3R(0, 0, 0)), deltaX = xform.invTransform(Vec3R(1, 0, 0)) - translation, deltaY = xform.invTransform(Vec3R(0, 1, 0)) - translation, deltaZ = xform.invTransform(Vec3R(0, 0, 1)) - translation; #if defined(__ICC) /// @todo The following line is a workaround for bad code generation /// in opt-icc11.1_64 (but not debug or gcc) builds. It should be /// removed once the problem has been addressed at its source. const Vec3R dummy = deltaX; #endif // Step by whole voxels through the output tree, sampling the // corresponding fractional voxels of the input tree. Vec3R inStartX = xform.invTransform(Vec3R(outMin)); Coord outXYZ; int &x = outXYZ.x(), &y = outXYZ.y(), &z = outXYZ.z(); for (x = outMin.x(); x <= outMax.x(); ++x, inStartX += deltaX) { if (interrupt && interrupt()) break; Vec3R inStartY = inStartX; for (y = outMin.y(); y <= outMax.y(); ++y, inStartY += deltaY) { if (interrupt && interrupt()) break; Vec3R inXYZ = inStartY; for (z = outMin.z(); z <= outMax.z(); ++z, inXYZ += deltaZ) { ValueT result; if (sampler.sample(inTree, inXYZ, result)) { outTree.setValueOn(outXYZ, result); } else { // Note: Don't overwrite existing active values with inactive values. if (!outTree.isValueOn(outXYZ)) { outTree.setValueOff(outXYZ, result); } } } } } } } // GridResampler::transformBBox() } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_GRIDTRANSFORMER_HAS_BEEN_INCLUDED
37,980
C
35.520192
97
0.623223
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PoissonSolver.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file PoissonSolver.h /// /// @authors D.J. Hill, Peter Cucka /// /// @brief Solve Poisson's equation &nabla;<sup><small>2</small></sup><i>x</i> = <i>b</i> /// for <i>x</i>, where @e b is a vector comprising the values of all of the active voxels /// in a grid. /// /// @par Example: /// Solve for the pressure in a cubic tank of liquid, assuming uniform boundary conditions: /// @code /// FloatTree source(/*background=*/0.0f); /// // Activate voxels to indicate that they contain liquid. /// source.fill(CoordBBox(Coord(0, -10, 0), Coord(10, 0, 10)), /*value=*/0.0f); /// /// math::pcg::State state = math::pcg::terminationDefaults<float>(); /// FloatTree::Ptr solution = tools::poisson::solve(source, state); /// @endcode /// /// @par Example: /// Solve for the pressure, <i>P</i>, in a cubic tank of liquid that is open at the top. /// Boundary conditions are <i>P</i>&nbsp;=&nbsp;0 at the top, /// &part;<i>P</i>/&part;<i>y</i>&nbsp;=&nbsp;&minus;1 at the bottom /// and &part;<i>P</i>/&part;<i>x</i>&nbsp;=&nbsp;0 at the sides: /// <pre> /// P = 0 /// +--------+ (N,0,N) /// /| /| /// (0,0,0) +--------+ | /// | | | | dP/dx = 0 /// dP/dx = 0 | +------|-+ /// |/ |/ /// (0,-N,0) +--------+ (N,-N,0) /// dP/dy = -1 /// </pre> /// @code /// const int N = 10; /// DoubleTree source(/*background=*/0.0); /// // Activate voxels to indicate that they contain liquid. /// source.fill(CoordBBox(Coord(0, -N, 0), Coord(N, 0, N)), /*value=*/0.0); /// /// auto boundary = [](const openvdb::Coord& ijk, const openvdb::Coord& neighbor, /// double& source, double& diagonal) /// { /// if (neighbor.x() == ijk.x() && neighbor.z() == ijk.z()) { /// if (neighbor.y() < ijk.y()) source -= 1.0; /// else diagonal -= 1.0; /// } /// }; /// /// math::pcg::State state = math::pcg::terminationDefaults<double>(); /// util::NullInterrupter interrupter; /// /// DoubleTree::Ptr solution = tools::poisson::solveWithBoundaryConditions( /// source, boundary, state, interrupter); /// @endcode #ifndef OPENVDB_TOOLS_POISSONSOLVER_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_POISSONSOLVER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/math/ConjGradient.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/tree/Tree.h> #include <openvdb/util/NullInterrupter.h> #include "Morphology.h" // for erodeVoxels namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { namespace poisson { // This type should be at least as wide as math::pcg::SizeType. using VIndex = Int32; /// The type of a matrix used to represent a three-dimensional %Laplacian operator using LaplacianMatrix = math::pcg::SparseStencilMatrix<double, 7>; //@{ /// @brief Solve &nabla;<sup><small>2</small></sup><i>x</i> = <i>b</i> for <i>x</i>, /// where @e b is a vector comprising the values of all of the active voxels /// in the input tree. /// @return a new tree, with the same active voxel topology as the input tree, /// whose voxel values are the elements of the solution vector <i>x</i>. /// @details On input, the State object should specify convergence criteria /// (minimum error and maximum number of iterations); on output, it gives /// the actual termination conditions. /// @details The solution is computed using the conjugate gradient method /// with (where possible) incomplete Cholesky preconditioning, falling back /// to Jacobi preconditioning. /// @sa solveWithBoundaryConditions template<typename TreeType> inline typename TreeType::Ptr solve(const TreeType&, math::pcg::State&, bool staggered = false); template<typename TreeType, typename Interrupter> inline typename TreeType::Ptr solve(const TreeType&, math::pcg::State&, Interrupter&, bool staggered = false); //@} //@{ /// @brief Solve &nabla;<sup><small>2</small></sup><i>x</i> = <i>b</i> for <i>x</i> /// with user-specified boundary conditions, where @e b is a vector comprising /// the values of all of the active voxels in the input tree or domain mask if provided /// @return a new tree, with the same active voxel topology as the input tree, /// whose voxel values are the elements of the solution vector <i>x</i>. /// @details On input, the State object should specify convergence criteria /// (minimum error and maximum number of iterations); on output, it gives /// the actual termination conditions. /// @details The solution is computed using the conjugate gradient method with /// the specified type of preconditioner (default: incomplete Cholesky), /// falling back to Jacobi preconditioning if necessary. /// @details Each thread gets its own copy of the BoundaryOp, which should be /// a functor of the form /// @code /// struct BoundaryOp { /// using ValueType = LaplacianMatrix::ValueType; /// void operator()( /// const Coord& ijk, // coordinates of a boundary voxel /// const Coord& ijkNeighbor, // coordinates of an exterior neighbor of ijk /// ValueType& source, // element of b corresponding to ijk /// ValueType& diagonal // element of Laplacian matrix corresponding to ijk /// ) const; /// }; /// @endcode /// The functor is called for each of the exterior neighbors of each boundary voxel @ijk, /// and it must specify a boundary condition for @ijk by modifying one or both of two /// provided values: the entry in the source vector @e b corresponding to @ijk and /// the weighting coefficient for @ijk in the Laplacian operator matrix. /// /// @sa solve template<typename TreeType, typename BoundaryOp, typename Interrupter> inline typename TreeType::Ptr solveWithBoundaryConditions( const TreeType&, const BoundaryOp&, math::pcg::State&, Interrupter&, bool staggered = false); template< typename PreconditionerType, typename TreeType, typename BoundaryOp, typename Interrupter> inline typename TreeType::Ptr solveWithBoundaryConditionsAndPreconditioner( const TreeType&, const BoundaryOp&, math::pcg::State&, Interrupter&, bool staggered = false); template< typename PreconditionerType, typename TreeType, typename DomainTreeType, typename BoundaryOp, typename Interrupter> inline typename TreeType::Ptr solveWithBoundaryConditionsAndPreconditioner( const TreeType&, const DomainTreeType&, const BoundaryOp&, math::pcg::State&, Interrupter&, bool staggered = false); //@} /// @name Low-level functions //@{ // The following are low-level routines that can be used to assemble custom solvers. /// @brief Overwrite each active voxel in the given scalar tree /// with a sequential index, starting from zero. template<typename VIndexTreeType> inline void populateIndexTree(VIndexTreeType&); /// @brief Iterate over the active voxels of the input tree and for each one /// assign its index in the iteration sequence to the corresponding voxel /// of an integer-valued output tree. template<typename TreeType> inline typename TreeType::template ValueConverter<VIndex>::Type::Ptr createIndexTree(const TreeType&); /// @brief Return a vector of the active voxel values of the scalar-valued @a source tree. /// @details The <i>n</i>th element of the vector corresponds to the voxel whose value /// in the @a index tree is @e n. /// @param source a tree with a scalar value type /// @param index a tree of the same configuration as @a source but with /// value type VIndex that maps voxels to elements of the output vector template<typename VectorValueType, typename SourceTreeType> inline typename math::pcg::Vector<VectorValueType>::Ptr createVectorFromTree( const SourceTreeType& source, const typename SourceTreeType::template ValueConverter<VIndex>::Type& index); /// @brief Return a tree with the same active voxel topology as the @a index tree /// but whose voxel values are taken from the the given vector. /// @details The voxel whose value in the @a index tree is @e n gets assigned /// the <i>n</i>th element of the vector. /// @param index a tree with value type VIndex that maps voxels to elements of @a values /// @param values a vector of values with which to populate the active voxels of the output tree /// @param background the value for the inactive voxels of the output tree template<typename TreeValueType, typename VIndexTreeType, typename VectorValueType> inline typename VIndexTreeType::template ValueConverter<TreeValueType>::Type::Ptr createTreeFromVector( const math::pcg::Vector<VectorValueType>& values, const VIndexTreeType& index, const TreeValueType& background); /// @brief Generate a sparse matrix of the index-space (&Delta;<i>x</i> = 1) %Laplacian operator /// using second-order finite differences. /// @details This construction assumes homogeneous Dirichlet boundary conditions /// (exterior grid points are zero). template<typename BoolTreeType> inline LaplacianMatrix::Ptr createISLaplacian( const typename BoolTreeType::template ValueConverter<VIndex>::Type& vectorIndexTree, const BoolTreeType& interiorMask, bool staggered = false); /// @brief Generate a sparse matrix of the index-space (&Delta;<i>x</i> = 1) %Laplacian operator /// with user-specified boundary conditions using second-order finite differences. /// @details Each thread gets its own copy of @a boundaryOp, which should be /// a functor of the form /// @code /// struct BoundaryOp { /// using ValueType = LaplacianMatrix::ValueType; /// void operator()( /// const Coord& ijk, // coordinates of a boundary voxel /// const Coord& ijkNeighbor, // coordinates of an exterior neighbor of ijk /// ValueType& source, // element of source vector corresponding to ijk /// ValueType& diagonal // element of Laplacian matrix corresponding to ijk /// ) const; /// }; /// @endcode /// The functor is called for each of the exterior neighbors of each boundary voxel @ijk, /// and it must specify a boundary condition for @ijk by modifying one or both of two /// provided values: an entry in the given @a source vector corresponding to @ijk and /// the weighting coefficient for @ijk in the %Laplacian matrix. template<typename BoolTreeType, typename BoundaryOp> inline LaplacianMatrix::Ptr createISLaplacianWithBoundaryConditions( const typename BoolTreeType::template ValueConverter<VIndex>::Type& vectorIndexTree, const BoolTreeType& interiorMask, const BoundaryOp& boundaryOp, typename math::pcg::Vector<LaplacianMatrix::ValueType>& source, bool staggered = false); /// @brief Dirichlet boundary condition functor /// @details This is useful in describing fluid/air interfaces, where the pressure /// of the air is assumed to be zero. template<typename ValueType> struct DirichletBoundaryOp { inline void operator()(const Coord&, const Coord&, ValueType&, ValueType& diag) const { // Exterior neighbors are empty, so decrement the weighting coefficient // as for interior neighbors but leave the source vector unchanged. diag -= 1; } }; //@} //////////////////////////////////////// namespace internal { /// @brief Functor for use with LeafManager::foreach() to populate an array /// with per-leaf active voxel counts template<typename LeafType> struct LeafCountOp { VIndex* count; LeafCountOp(VIndex* count_): count(count_) {} void operator()(const LeafType& leaf, size_t leafIdx) const { count[leafIdx] = static_cast<VIndex>(leaf.onVoxelCount()); } }; /// @brief Functor for use with LeafManager::foreach() to populate /// active leaf voxels with sequential indices template<typename LeafType> struct LeafIndexOp { const VIndex* count; LeafIndexOp(const VIndex* count_): count(count_) {} void operator()(LeafType& leaf, size_t leafIdx) const { VIndex idx = (leafIdx == 0) ? 0 : count[leafIdx - 1]; for (typename LeafType::ValueOnIter it = leaf.beginValueOn(); it; ++it) { it.setValue(idx++); } } }; } // namespace internal template<typename VIndexTreeType> inline void populateIndexTree(VIndexTreeType& result) { using LeafT = typename VIndexTreeType::LeafNodeType; using LeafMgrT = typename tree::LeafManager<VIndexTreeType>; // Linearize the tree. LeafMgrT leafManager(result); const size_t leafCount = leafManager.leafCount(); if (leafCount == 0) return; // Count the number of active voxels in each leaf node. std::unique_ptr<VIndex[]> perLeafCount(new VIndex[leafCount]); VIndex* perLeafCountPtr = perLeafCount.get(); leafManager.foreach(internal::LeafCountOp<LeafT>(perLeafCountPtr)); // The starting index for each leaf node is the total number // of active voxels in all preceding leaf nodes. for (size_t i = 1; i < leafCount; ++i) { perLeafCount[i] += perLeafCount[i - 1]; } // The last accumulated value should be the total of all active voxels. assert(Index64(perLeafCount[leafCount-1]) == result.activeVoxelCount()); // Parallelize over the leaf nodes of the tree, storing a unique index // in each active voxel. leafManager.foreach(internal::LeafIndexOp<LeafT>(perLeafCountPtr)); } template<typename TreeType> inline typename TreeType::template ValueConverter<VIndex>::Type::Ptr createIndexTree(const TreeType& tree) { using VIdxTreeT = typename TreeType::template ValueConverter<VIndex>::Type; // Construct an output tree with the same active voxel topology as the input tree. const VIndex invalidIdx = -1; typename VIdxTreeT::Ptr result( new VIdxTreeT(tree, /*background=*/invalidIdx, TopologyCopy())); // All active voxels are degrees of freedom, including voxels contained in active tiles. result->voxelizeActiveTiles(); populateIndexTree(*result); return result; } //////////////////////////////////////// namespace internal { /// @brief Functor for use with LeafManager::foreach() to populate a vector /// with the values of a tree's active voxels template<typename VectorValueType, typename SourceTreeType> struct CopyToVecOp { using VIdxTreeT = typename SourceTreeType::template ValueConverter<VIndex>::Type; using VIdxLeafT = typename VIdxTreeT::LeafNodeType; using LeafT = typename SourceTreeType::LeafNodeType; using TreeValueT = typename SourceTreeType::ValueType; using VectorT = typename math::pcg::Vector<VectorValueType>; const SourceTreeType* tree; VectorT* vector; CopyToVecOp(const SourceTreeType& t, VectorT& v): tree(&t), vector(&v) {} void operator()(const VIdxLeafT& idxLeaf, size_t /*leafIdx*/) const { VectorT& vec = *vector; if (const LeafT* leaf = tree->probeLeaf(idxLeaf.origin())) { // If a corresponding leaf node exists in the source tree, // copy voxel values from the source node to the output vector. for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) { vec[*it] = leaf->getValue(it.pos()); } } else { // If no corresponding leaf exists in the source tree, // fill the vector with a uniform value. const TreeValueT& value = tree->getValue(idxLeaf.origin()); for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) { vec[*it] = value; } } } }; } // namespace internal template<typename VectorValueType, typename SourceTreeType> inline typename math::pcg::Vector<VectorValueType>::Ptr createVectorFromTree(const SourceTreeType& tree, const typename SourceTreeType::template ValueConverter<VIndex>::Type& idxTree) { using VIdxTreeT = typename SourceTreeType::template ValueConverter<VIndex>::Type; using VIdxLeafMgrT = tree::LeafManager<const VIdxTreeT>; using VectorT = typename math::pcg::Vector<VectorValueType>; // Allocate the vector. const size_t numVoxels = idxTree.activeVoxelCount(); typename VectorT::Ptr result(new VectorT(static_cast<math::pcg::SizeType>(numVoxels))); // Parallelize over the leaf nodes of the index tree, filling the output vector // with values from corresponding voxels of the source tree. VIdxLeafMgrT leafManager(idxTree); leafManager.foreach(internal::CopyToVecOp<VectorValueType, SourceTreeType>(tree, *result)); return result; } //////////////////////////////////////// namespace internal { /// @brief Functor for use with LeafManager::foreach() to populate a tree /// with values from a vector template<typename TreeValueType, typename VIndexTreeType, typename VectorValueType> struct CopyFromVecOp { using OutTreeT = typename VIndexTreeType::template ValueConverter<TreeValueType>::Type; using OutLeafT = typename OutTreeT::LeafNodeType; using VIdxLeafT = typename VIndexTreeType::LeafNodeType; using VectorT = typename math::pcg::Vector<VectorValueType>; const VectorT* vector; OutTreeT* tree; CopyFromVecOp(const VectorT& v, OutTreeT& t): vector(&v), tree(&t) {} void operator()(const VIdxLeafT& idxLeaf, size_t /*leafIdx*/) const { const VectorT& vec = *vector; OutLeafT* leaf = tree->probeLeaf(idxLeaf.origin()); assert(leaf != nullptr); for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) { leaf->setValueOnly(it.pos(), static_cast<TreeValueType>(vec[*it])); } } }; } // namespace internal template<typename TreeValueType, typename VIndexTreeType, typename VectorValueType> inline typename VIndexTreeType::template ValueConverter<TreeValueType>::Type::Ptr createTreeFromVector( const math::pcg::Vector<VectorValueType>& vector, const VIndexTreeType& idxTree, const TreeValueType& background) { using OutTreeT = typename VIndexTreeType::template ValueConverter<TreeValueType>::Type; using VIdxLeafMgrT = typename tree::LeafManager<const VIndexTreeType>; // Construct an output tree with the same active voxel topology as the index tree. typename OutTreeT::Ptr result(new OutTreeT(idxTree, background, TopologyCopy())); OutTreeT& tree = *result; // Parallelize over the leaf nodes of the index tree, populating voxels // of the output tree with values from the input vector. VIdxLeafMgrT leafManager(idxTree); leafManager.foreach( internal::CopyFromVecOp<TreeValueType, VIndexTreeType, VectorValueType>(vector, tree)); return result; } //////////////////////////////////////// namespace internal { /// Functor for use with LeafManager::foreach() to populate a sparse %Laplacian matrix template<typename BoolTreeType, typename BoundaryOp> struct ISStaggeredLaplacianOp { using VIdxTreeT = typename BoolTreeType::template ValueConverter<VIndex>::Type; using VIdxLeafT = typename VIdxTreeT::LeafNodeType; using ValueT = LaplacianMatrix::ValueType; using VectorT = typename math::pcg::Vector<ValueT>; LaplacianMatrix* laplacian; const VIdxTreeT* idxTree; const BoolTreeType* interiorMask; const BoundaryOp boundaryOp; VectorT* source; ISStaggeredLaplacianOp(LaplacianMatrix& m, const VIdxTreeT& idx, const BoolTreeType& mask, const BoundaryOp& op, VectorT& src): laplacian(&m), idxTree(&idx), interiorMask(&mask), boundaryOp(op), source(&src) {} void operator()(const VIdxLeafT& idxLeaf, size_t /*leafIdx*/) const { // Local accessors typename tree::ValueAccessor<const BoolTreeType> interior(*interiorMask); typename tree::ValueAccessor<const VIdxTreeT> vectorIdx(*idxTree); Coord ijk; VIndex column; const ValueT diagonal = -6.f, offDiagonal = 1.f; // Loop over active voxels in this leaf. for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) { assert(it.getValue() > -1); const math::pcg::SizeType rowNum = static_cast<math::pcg::SizeType>(it.getValue()); LaplacianMatrix::RowEditor row = laplacian->getRowEditor(rowNum); ijk = it.getCoord(); if (interior.isValueOn(ijk)) { // The current voxel is an interior voxel. // All of its neighbors are in the solution domain. // -x direction row.setValue(vectorIdx.getValue(ijk.offsetBy(-1, 0, 0)), offDiagonal); // -y direction row.setValue(vectorIdx.getValue(ijk.offsetBy(0, -1, 0)), offDiagonal); // -z direction row.setValue(vectorIdx.getValue(ijk.offsetBy(0, 0, -1)), offDiagonal); // diagonal row.setValue(rowNum, diagonal); // +z direction row.setValue(vectorIdx.getValue(ijk.offsetBy(0, 0, 1)), offDiagonal); // +y direction row.setValue(vectorIdx.getValue(ijk.offsetBy(0, 1, 0)), offDiagonal); // +x direction row.setValue(vectorIdx.getValue(ijk.offsetBy(1, 0, 0)), offDiagonal); } else { // The current voxel is a boundary voxel. // At least one of its neighbors is outside the solution domain. ValueT modifiedDiagonal = 0.f; // -x direction if (vectorIdx.probeValue(ijk.offsetBy(-1, 0, 0), column)) { row.setValue(column, offDiagonal); modifiedDiagonal -= 1; } else { boundaryOp(ijk, ijk.offsetBy(-1, 0, 0), source->at(rowNum), modifiedDiagonal); } // -y direction if (vectorIdx.probeValue(ijk.offsetBy(0, -1, 0), column)) { row.setValue(column, offDiagonal); modifiedDiagonal -= 1; } else { boundaryOp(ijk, ijk.offsetBy(0, -1, 0), source->at(rowNum), modifiedDiagonal); } // -z direction if (vectorIdx.probeValue(ijk.offsetBy(0, 0, -1), column)) { row.setValue(column, offDiagonal); modifiedDiagonal -= 1; } else { boundaryOp(ijk, ijk.offsetBy(0, 0, -1), source->at(rowNum), modifiedDiagonal); } // +z direction if (vectorIdx.probeValue(ijk.offsetBy(0, 0, 1), column)) { row.setValue(column, offDiagonal); modifiedDiagonal -= 1; } else { boundaryOp(ijk, ijk.offsetBy(0, 0, 1), source->at(rowNum), modifiedDiagonal); } // +y direction if (vectorIdx.probeValue(ijk.offsetBy(0, 1, 0), column)) { row.setValue(column, offDiagonal); modifiedDiagonal -= 1; } else { boundaryOp(ijk, ijk.offsetBy(0, 1, 0), source->at(rowNum), modifiedDiagonal); } // +x direction if (vectorIdx.probeValue(ijk.offsetBy(1, 0, 0), column)) { row.setValue(column, offDiagonal); modifiedDiagonal -= 1; } else { boundaryOp(ijk, ijk.offsetBy(1, 0, 0), source->at(rowNum), modifiedDiagonal); } // diagonal row.setValue(rowNum, modifiedDiagonal); } } // end loop over voxels } }; // Stencil 1 is the correct stencil, but stencil 2 requires // half as many comparisons and produces smoother results at boundaries. //#define OPENVDB_TOOLS_POISSON_LAPLACIAN_STENCIL 1 #define OPENVDB_TOOLS_POISSON_LAPLACIAN_STENCIL 2 /// Functor for use with LeafManager::foreach() to populate a sparse %Laplacian matrix template<typename VIdxTreeT, typename BoundaryOp> struct ISLaplacianOp { using VIdxLeafT = typename VIdxTreeT::LeafNodeType; using ValueT = LaplacianMatrix::ValueType; using VectorT = typename math::pcg::Vector<ValueT>; LaplacianMatrix* laplacian; const VIdxTreeT* idxTree; const BoundaryOp boundaryOp; VectorT* source; ISLaplacianOp(LaplacianMatrix& m, const VIdxTreeT& idx, const BoundaryOp& op, VectorT& src): laplacian(&m), idxTree(&idx), boundaryOp(op), source(&src) {} void operator()(const VIdxLeafT& idxLeaf, size_t /*leafIdx*/) const { typename tree::ValueAccessor<const VIdxTreeT> vectorIdx(*idxTree); const int kNumOffsets = 6; const Coord ijkOffset[kNumOffsets] = { #if OPENVDB_TOOLS_POISSON_LAPLACIAN_STENCIL == 1 Coord(-1,0,0), Coord(1,0,0), Coord(0,-1,0), Coord(0,1,0), Coord(0,0,-1), Coord(0,0,1) #else Coord(-2,0,0), Coord(2,0,0), Coord(0,-2,0), Coord(0,2,0), Coord(0,0,-2), Coord(0,0,2) #endif }; // For each active voxel in this leaf... for (typename VIdxLeafT::ValueOnCIter it = idxLeaf.cbeginValueOn(); it; ++it) { assert(it.getValue() > -1); const Coord ijk = it.getCoord(); const math::pcg::SizeType rowNum = static_cast<math::pcg::SizeType>(it.getValue()); LaplacianMatrix::RowEditor row = laplacian->getRowEditor(rowNum); ValueT modifiedDiagonal = 0.f; // For each of the neighbors of the voxel at (i,j,k)... for (int dir = 0; dir < kNumOffsets; ++dir) { const Coord neighbor = ijk + ijkOffset[dir]; VIndex column; // For collocated vector grids, the central differencing stencil requires // access to neighbors at a distance of two voxels in each direction // (-x, +x, -y, +y, -z, +z). #if OPENVDB_TOOLS_POISSON_LAPLACIAN_STENCIL == 1 const bool ijkIsInterior = (vectorIdx.probeValue(neighbor + ijkOffset[dir], column) && vectorIdx.isValueOn(neighbor)); #else const bool ijkIsInterior = vectorIdx.probeValue(neighbor, column); #endif if (ijkIsInterior) { // If (i,j,k) is sufficiently far away from the exterior, // set its weight to one and adjust the center weight accordingly. row.setValue(column, 1.f); modifiedDiagonal -= 1.f; } else { // If (i,j,k) is adjacent to or one voxel away from the exterior, // invoke the boundary condition functor. boundaryOp(ijk, neighbor, source->at(rowNum), modifiedDiagonal); } } // Set the (possibly modified) weight for the voxel at (i,j,k). row.setValue(rowNum, modifiedDiagonal); } } }; } // namespace internal template<typename BoolTreeType> inline LaplacianMatrix::Ptr createISLaplacian(const typename BoolTreeType::template ValueConverter<VIndex>::Type& idxTree, const BoolTreeType& interiorMask, bool staggered) { using ValueT = LaplacianMatrix::ValueType; math::pcg::Vector<ValueT> unused( static_cast<math::pcg::SizeType>(idxTree.activeVoxelCount())); DirichletBoundaryOp<ValueT> op; return createISLaplacianWithBoundaryConditions(idxTree, interiorMask, op, unused, staggered); } template<typename BoolTreeType, typename BoundaryOp> inline LaplacianMatrix::Ptr createISLaplacianWithBoundaryConditions( const typename BoolTreeType::template ValueConverter<VIndex>::Type& idxTree, const BoolTreeType& interiorMask, const BoundaryOp& boundaryOp, typename math::pcg::Vector<LaplacianMatrix::ValueType>& source, bool staggered) { using VIdxTreeT = typename BoolTreeType::template ValueConverter<VIndex>::Type; using VIdxLeafMgrT = typename tree::LeafManager<const VIdxTreeT>; // The number of active voxels is the number of degrees of freedom. const Index64 numDoF = idxTree.activeVoxelCount(); // Construct the matrix. LaplacianMatrix::Ptr laplacianPtr( new LaplacianMatrix(static_cast<math::pcg::SizeType>(numDoF))); LaplacianMatrix& laplacian = *laplacianPtr; // Populate the matrix using a second-order, 7-point CD stencil. VIdxLeafMgrT idxLeafManager(idxTree); if (staggered) { idxLeafManager.foreach(internal::ISStaggeredLaplacianOp<BoolTreeType, BoundaryOp>( laplacian, idxTree, interiorMask, boundaryOp, source)); } else { idxLeafManager.foreach(internal::ISLaplacianOp<VIdxTreeT, BoundaryOp>( laplacian, idxTree, boundaryOp, source)); } return laplacianPtr; } //////////////////////////////////////// template<typename TreeType> inline typename TreeType::Ptr solve(const TreeType& inTree, math::pcg::State& state, bool staggered) { util::NullInterrupter interrupter; return solve(inTree, state, interrupter, staggered); } template<typename TreeType, typename Interrupter> inline typename TreeType::Ptr solve(const TreeType& inTree, math::pcg::State& state, Interrupter& interrupter, bool staggered) { DirichletBoundaryOp<LaplacianMatrix::ValueType> boundaryOp; return solveWithBoundaryConditions(inTree, boundaryOp, state, interrupter, staggered); } template<typename TreeType, typename BoundaryOp, typename Interrupter> inline typename TreeType::Ptr solveWithBoundaryConditions(const TreeType& inTree, const BoundaryOp& boundaryOp, math::pcg::State& state, Interrupter& interrupter, bool staggered) { using DefaultPrecondT = math::pcg::IncompleteCholeskyPreconditioner<LaplacianMatrix>; return solveWithBoundaryConditionsAndPreconditioner<DefaultPrecondT>( inTree, boundaryOp, state, interrupter, staggered); } template< typename PreconditionerType, typename TreeType, typename BoundaryOp, typename Interrupter> inline typename TreeType::Ptr solveWithBoundaryConditionsAndPreconditioner( const TreeType& inTree, const BoundaryOp& boundaryOp, math::pcg::State& state, Interrupter& interrupter, bool staggered) { return solveWithBoundaryConditionsAndPreconditioner<PreconditionerType>( /*source=*/inTree, /*domain mask=*/inTree, boundaryOp, state, interrupter, staggered); } template< typename PreconditionerType, typename TreeType, typename DomainTreeType, typename BoundaryOp, typename Interrupter> inline typename TreeType::Ptr solveWithBoundaryConditionsAndPreconditioner( const TreeType& inTree, const DomainTreeType& domainMask, const BoundaryOp& boundaryOp, math::pcg::State& state, Interrupter& interrupter, bool staggered) { using TreeValueT = typename TreeType::ValueType; using VecValueT = LaplacianMatrix::ValueType; using VectorT = typename math::pcg::Vector<VecValueT>; using VIdxTreeT = typename TreeType::template ValueConverter<VIndex>::Type; using MaskTreeT = typename TreeType::template ValueConverter<bool>::Type; // 1. Create a mapping from active voxels of the input tree to elements of a vector. typename VIdxTreeT::ConstPtr idxTree = createIndexTree(domainMask); // 2. Populate a vector with values from the input tree. typename VectorT::Ptr b = createVectorFromTree<VecValueT>(inTree, *idxTree); // 3. Create a mask of the interior voxels of the input tree (from the densified index tree). /// @todo Is this really needed? typename MaskTreeT::Ptr interiorMask( new MaskTreeT(*idxTree, /*background=*/false, TopologyCopy())); tools::erodeVoxels(*interiorMask, /*iterations=*/1, tools::NN_FACE); // 4. Create the Laplacian matrix. LaplacianMatrix::Ptr laplacian = createISLaplacianWithBoundaryConditions( *idxTree, *interiorMask, boundaryOp, *b, staggered); // 5. Solve the Poisson equation. laplacian->scale(-1.0); // matrix is negative-definite; solve -M x = -b b->scale(-1.0); typename VectorT::Ptr x(new VectorT(b->size(), zeroVal<VecValueT>())); typename math::pcg::Preconditioner<VecValueT>::Ptr precond( new PreconditionerType(*laplacian)); if (!precond->isValid()) { precond.reset(new math::pcg::JacobiPreconditioner<LaplacianMatrix>(*laplacian)); } state = math::pcg::solve(*laplacian, *b, *x, *precond, interrupter, state); // 6. Populate the output tree with values from the solution vector. /// @todo if (state.success) ... ? return createTreeFromVector<TreeValueT>(*x, *idxTree, /*background=*/zeroVal<TreeValueT>()); } } // namespace poisson } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_POISSONSOLVER_HAS_BEEN_INCLUDED
32,786
C
37.893238
99
0.669371
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Mask.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file Mask.h /// /// @brief Construct boolean mask grids from grids of arbitrary type #ifndef OPENVDB_TOOLS_MASK_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_MASK_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include "LevelSetUtil.h" // for tools::sdfInteriorMask() #include <type_traits> // for std::enable_if, std::is_floating_point namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Given an input grid of any type, return a new, boolean grid /// whose active voxel topology matches the input grid's or, /// if the input grid is a level set, matches the input grid's interior. /// @param grid the grid from which to construct a mask /// @param isovalue for a level set grid, the isovalue that defines the grid's interior /// @sa tools::sdfInteriorMask() template<typename GridType> inline typename GridType::template ValueConverter<bool>::Type::Ptr interiorMask(const GridType& grid, const double isovalue = 0.0); //////////////////////////////////////// namespace mask_internal { /// @private template<typename GridType> struct Traits { static const bool isBool = std::is_same<typename GridType::ValueType, bool>::value; using BoolGridType = typename GridType::template ValueConverter<bool>::Type; using BoolGridPtrType = typename BoolGridType::Ptr; }; /// @private template<typename GridType> inline typename std::enable_if<std::is_floating_point<typename GridType::ValueType>::value, typename mask_internal::Traits<GridType>::BoolGridPtrType>::type doLevelSetInteriorMask(const GridType& grid, const double isovalue) { using GridValueT = typename GridType::ValueType; using MaskGridPtrT = typename mask_internal::Traits<GridType>::BoolGridPtrType; // If the input grid is a level set (and floating-point), return a mask of its interior. if (grid.getGridClass() == GRID_LEVEL_SET) { return tools::sdfInteriorMask(grid, static_cast<GridValueT>(isovalue)); } return MaskGridPtrT{}; } /// @private // No-op specialization for non-floating-point grids template<typename GridType> inline typename std::enable_if<!std::is_floating_point<typename GridType::ValueType>::value, typename mask_internal::Traits<GridType>::BoolGridPtrType>::type doLevelSetInteriorMask(const GridType&, const double /*isovalue*/) { using MaskGridPtrT = typename mask_internal::Traits<GridType>::BoolGridPtrType; return MaskGridPtrT{}; } /// @private template<typename GridType> inline typename std::enable_if<mask_internal::Traits<GridType>::isBool, typename mask_internal::Traits<GridType>::BoolGridPtrType>::type doInteriorMask(const GridType& grid, const double /*isovalue*/) { // If the input grid is already boolean, return a copy of it. return grid.deepCopy(); } /// @private template<typename GridType> inline typename std::enable_if<!(mask_internal::Traits<GridType>::isBool), typename mask_internal::Traits<GridType>::BoolGridPtrType>::type doInteriorMask(const GridType& grid, const double isovalue) { using MaskGridT = typename mask_internal::Traits<GridType>::BoolGridType; // If the input grid is a level set, return a mask of its interior. if (auto maskGridPtr = doLevelSetInteriorMask(grid, isovalue)) { return maskGridPtr; } // For any other grid type, return a mask of its active voxels. auto maskGridPtr = MaskGridT::create(/*background=*/false); maskGridPtr->setTransform(grid.transform().copy()); maskGridPtr->topologyUnion(grid); return maskGridPtr; } } // namespace mask_internal template<typename GridType> inline typename GridType::template ValueConverter<bool>::Type::Ptr interiorMask(const GridType& grid, const double isovalue) { return mask_internal::doInteriorMask(grid, isovalue); } //////////////////////////////////////// } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_MASK_HAS_BEEN_INCLUDED
4,060
C
31.75
92
0.72734
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointAdvect.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @author Ken Museth, D.J. Hill (openvdb port, added staggered grid support) /// /// @file tools/PointAdvect.h /// /// @brief Class PointAdvect advects points (with position) in a static velocity field #ifndef OPENVDB_TOOLS_POINT_ADVECT_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_POINT_ADVECT_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/math/Math.h> // min #include <openvdb/Types.h> // Vec3 types and version number #include <openvdb/Grid.h> // grid #include <openvdb/util/NullInterrupter.h> #include "Interpolation.h" // sampling #include "VelocityFields.h" // VelocityIntegrator #include <tbb/blocked_range.h> // threading #include <tbb/parallel_for.h> // threading #include <tbb/task.h> // for cancel #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// Class that holds a Vec3 grid, to be interpreted as the closest point to a constraint /// surface. Supports a method to allow a point to be projected onto the closest point /// on the constraint surface. Uses Caching. template<typename CptGridT = Vec3fGrid> class ClosestPointProjector { public: using CptGridType = CptGridT; using CptAccessor = typename CptGridType::ConstAccessor; using CptValueType = typename CptGridType::ValueType; ClosestPointProjector(): mCptIterations(0) { } ClosestPointProjector(const CptGridType& cptGrid, int n): mCptGrid(&cptGrid), mCptAccessor(cptGrid.getAccessor()), mCptIterations(n) { } ClosestPointProjector(const ClosestPointProjector &other): mCptGrid(other.mCptGrid), mCptAccessor(mCptGrid->getAccessor()), mCptIterations(other.mCptIterations) { } void setConstraintIterations(unsigned int cptIterations) { mCptIterations = cptIterations; } unsigned int numIterations() { return mCptIterations; } // point constraint template <typename LocationType> inline void projectToConstraintSurface(LocationType& W) const { /// Entries in the CPT tree are the closest point to the constraint surface. /// The interpolation step in sample introduces error so that the result /// of a single sample may not lie exactly on the surface. The iterations /// in the loop exist to minimize this error. CptValueType result(W[0], W[1],W[2]); for (unsigned int i = 0; i < mCptIterations; ++i) { const Vec3R location = mCptGrid->worldToIndex(Vec3R(result[0], result[1], result[2])); BoxSampler::sample<CptAccessor>(mCptAccessor, location, result); } W[0] = result[0]; W[1] = result[1]; W[2] = result[2]; } private: const CptGridType* mCptGrid; // Closest-Point-Transform vector field CptAccessor mCptAccessor; unsigned int mCptIterations; };// end of ClosestPointProjector class //////////////////////////////////////// /// Performs passive or constrained advection of points in a velocity field /// represented by an OpenVDB grid and an optional closest-point-transform (CPT) /// represented in another OpenVDB grid. Note the CPT is assumed to be /// in world coordinates and NOT index coordinates! /// Supports both collocated velocity grids and staggered velocity grids /// /// The @c PointListT template argument refers to any class with the following /// interface (e.g., std::vector<openvdb::Vec3f>): /// @code /// class PointList { /// ... /// public: /// using value_type = internal_vector3_type; // must support [] component access /// openvdb::Index size() const; // number of points in list /// value_type& operator[](int n); // world space position of nth point /// }; /// @endcode /// /// @note All methods (except size) are assumed to be thread-safe and /// the positions are returned as non-const references since the /// advection method needs to modify them! template<typename GridT = Vec3fGrid, typename PointListT = std::vector<typename GridT::ValueType>, bool StaggeredVelocity = false, typename InterrupterType = util::NullInterrupter> class PointAdvect { public: using GridType = GridT; using PointListType = PointListT; using LocationType = typename PointListT::value_type; using VelocityFieldIntegrator = VelocityIntegrator<GridT, StaggeredVelocity>; PointAdvect(const GridT& velGrid, InterrupterType* interrupter = nullptr): mVelGrid(&velGrid), mPoints(nullptr), mIntegrationOrder(1), mThreaded(true), mInterrupter(interrupter) { } PointAdvect(const PointAdvect &other) : mVelGrid(other.mVelGrid), mPoints(other.mPoints), mDt(other.mDt), mAdvIterations(other.mAdvIterations), mIntegrationOrder(other.mIntegrationOrder), mThreaded(other.mThreaded), mInterrupter(other.mInterrupter) { } virtual ~PointAdvect() { } /// If the order of the integration is set to zero no advection is performed bool earlyOut() const { return (mIntegrationOrder==0);} /// get & set void setThreaded(bool threaded) { mThreaded = threaded; } bool getThreaded() { return mThreaded; } void setIntegrationOrder(unsigned int order) {mIntegrationOrder = order;} /// Constrained advection of a list of points over a time = dt * advIterations void advect(PointListT& points, float dt, unsigned int advIterations = 1) { if (this->earlyOut()) return; // nothing to do! mPoints = &points; mDt = dt; mAdvIterations = advIterations; if (mInterrupter) mInterrupter->start("Advecting points by OpenVDB velocity field: "); if (mThreaded) { tbb::parallel_for(tbb::blocked_range<size_t>(0, mPoints->size()), *this); } else { (*this)(tbb::blocked_range<size_t>(0, mPoints->size())); } if (mInterrupter) mInterrupter->end(); } /// Never call this method directly - it is use by TBB and has to be public! void operator() (const tbb::blocked_range<size_t> &range) const { if (mInterrupter && mInterrupter->wasInterrupted()) { tbb::task::self().cancel_group_execution(); } VelocityFieldIntegrator velField(*mVelGrid); switch (mIntegrationOrder) { case 1: { for (size_t n = range.begin(); n != range.end(); ++n) { LocationType& X0 = (*mPoints)[n]; // loop over number of time steps for (unsigned int i = 0; i < mAdvIterations; ++i) { velField.template rungeKutta<1>(mDt, X0); } } } break; case 2: { for (size_t n = range.begin(); n != range.end(); ++n) { LocationType& X0 = (*mPoints)[n]; // loop over number of time steps for (unsigned int i = 0; i < mAdvIterations; ++i) { velField.template rungeKutta<2>(mDt, X0); } } } break; case 3: { for (size_t n = range.begin(); n != range.end(); ++n) { LocationType& X0 = (*mPoints)[n]; // loop over number of time steps for (unsigned int i = 0; i < mAdvIterations; ++i) { velField.template rungeKutta<3>(mDt, X0); } } } break; case 4: { for (size_t n = range.begin(); n != range.end(); ++n) { LocationType& X0 = (*mPoints)[n]; // loop over number of time steps for (unsigned int i = 0; i < mAdvIterations; ++i) { velField.template rungeKutta<4>(mDt, X0); } } } break; } } private: // the velocity field const GridType* mVelGrid; // vertex list of all the points PointListT* mPoints; // time integration parameters float mDt; // time step unsigned int mAdvIterations; // number of time steps unsigned int mIntegrationOrder; // operational parameters bool mThreaded; InterrupterType* mInterrupter; };//end of PointAdvect class template<typename GridT = Vec3fGrid, typename PointListT = std::vector<typename GridT::ValueType>, bool StaggeredVelocity = false, typename CptGridType = GridT, typename InterrupterType = util::NullInterrupter> class ConstrainedPointAdvect { public: using GridType = GridT; using LocationType = typename PointListT::value_type; using VelocityIntegratorType = VelocityIntegrator<GridT, StaggeredVelocity>; using ClosestPointProjectorType = ClosestPointProjector<CptGridType>; using PointListType = PointListT; ConstrainedPointAdvect(const GridType& velGrid, const GridType& cptGrid, int cptn, InterrupterType* interrupter = nullptr): mVelGrid(&velGrid), mCptGrid(&cptGrid), mCptIter(cptn), mInterrupter(interrupter) { } ConstrainedPointAdvect(const ConstrainedPointAdvect& other): mVelGrid(other.mVelGrid), mCptGrid(other.mCptGrid), mCptIter(other.mCptIter), mPoints(other.mPoints), mDt(other.mDt), mAdvIterations(other.mAdvIterations), mIntegrationOrder(other.mIntegrationOrder), mThreaded(other.mThreaded), mInterrupter(other.mInterrupter) { } virtual ~ConstrainedPointAdvect(){} void setConstraintIterations(unsigned int cptIter) {mCptIter = cptIter;} void setIntegrationOrder(unsigned int order) {mIntegrationOrder = order;} void setThreaded(bool threaded) { mThreaded = threaded; } bool getThreaded() { return mThreaded; } /// Constrained Advection a list of points over a time = dt * advIterations void advect(PointListT& points, float dt, unsigned int advIterations = 1) { mPoints = &points; mDt = dt; if (mIntegrationOrder==0 && mCptIter == 0) { return; // nothing to do! } (mIntegrationOrder>0) ? mAdvIterations = advIterations : mAdvIterations = 1; if (mInterrupter) mInterrupter->start("Advecting points by OpenVDB velocity field: "); const size_t N = mPoints->size(); if (mThreaded) { tbb::parallel_for(tbb::blocked_range<size_t>(0, N), *this); } else { (*this)(tbb::blocked_range<size_t>(0, N)); } if (mInterrupter) mInterrupter->end(); } /// Never call this method directly - it is use by TBB and has to be public! void operator() (const tbb::blocked_range<size_t> &range) const { if (mInterrupter && mInterrupter->wasInterrupted()) { tbb::task::self().cancel_group_execution(); } VelocityIntegratorType velField(*mVelGrid); ClosestPointProjectorType cptField(*mCptGrid, mCptIter); switch (mIntegrationOrder) { case 0://pure CPT projection { for (size_t n = range.begin(); n != range.end(); ++n) { LocationType& X0 = (*mPoints)[n]; for (unsigned int i = 0; i < mAdvIterations; ++i) { cptField.projectToConstraintSurface(X0); } } } break; case 1://1'th order advection and CPT projection { for (size_t n = range.begin(); n != range.end(); ++n) { LocationType& X0 = (*mPoints)[n]; for (unsigned int i = 0; i < mAdvIterations; ++i) { velField.template rungeKutta<1>(mDt, X0); cptField.projectToConstraintSurface(X0); } } } break; case 2://2'nd order advection and CPT projection { for (size_t n = range.begin(); n != range.end(); ++n) { LocationType& X0 = (*mPoints)[n]; for (unsigned int i = 0; i < mAdvIterations; ++i) { velField.template rungeKutta<2>(mDt, X0); cptField.projectToConstraintSurface(X0); } } } break; case 3://3'rd order advection and CPT projection { for (size_t n = range.begin(); n != range.end(); ++n) { LocationType& X0 = (*mPoints)[n]; for (unsigned int i = 0; i < mAdvIterations; ++i) { velField.template rungeKutta<3>(mDt, X0); cptField.projectToConstraintSurface(X0); } } } break; case 4://4'th order advection and CPT projection { for (size_t n = range.begin(); n != range.end(); ++n) { LocationType& X0 = (*mPoints)[n]; for (unsigned int i = 0; i < mAdvIterations; ++i) { velField.template rungeKutta<4>(mDt, X0); cptField.projectToConstraintSurface(X0); } } } break; } } private: const GridType* mVelGrid; // the velocity field const GridType* mCptGrid; int mCptIter; PointListT* mPoints; // vertex list of all the points // time integration parameters float mDt; // time step unsigned int mAdvIterations; // number of time steps unsigned int mIntegrationOrder; // order of Runge-Kutta integration // operational parameters bool mThreaded; InterrupterType* mInterrupter; };// end of ConstrainedPointAdvect class } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_POINT_ADVECT_HAS_BEEN_INCLUDED
14,639
C
36.15736
98
0.574151
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Dense.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file Dense.h /// /// @brief This file defines a simple dense grid and efficient /// converters to and from VDB grids. #ifndef OPENVDB_TOOLS_DENSE_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_DENSE_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/Grid.h> #include <openvdb/tree/ValueAccessor.h> #include <openvdb/Exceptions.h> #include <openvdb/util/Formats.h> #include "Prune.h" #include <tbb/parallel_for.h> #include <iostream> #include <memory> #include <string> #include <utility> // for std::pair #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Populate a dense grid with the values of voxels from a sparse grid, /// where the sparse grid intersects the dense grid. /// @param sparse an OpenVDB grid or tree from which to copy values /// @param dense the dense grid into which to copy values /// @param serial if false, process voxels in parallel template<typename DenseT, typename GridOrTreeT> void copyToDense( const GridOrTreeT& sparse, DenseT& dense, bool serial = false); /// @brief Populate a sparse grid with the values of all of the voxels of a dense grid. /// @param dense the dense grid from which to copy values /// @param sparse an OpenVDB grid or tree into which to copy values /// @param tolerance values in the dense grid that are within this tolerance of the sparse /// grid's background value become inactive background voxels or tiles in the sparse grid /// @param serial if false, process voxels in parallel template<typename DenseT, typename GridOrTreeT> void copyFromDense( const DenseT& dense, GridOrTreeT& sparse, const typename GridOrTreeT::ValueType& tolerance, bool serial = false); //////////////////////////////////////// /// We currently support the following two 3D memory layouts for dense /// volumes: XYZ, i.e. x is the fastest moving index, and ZYX, i.e. z /// is the fastest moving index. The ZYX memory layout leads to nested /// for-loops of the order x, y, z, which we find to be the most /// intuitive. Hence, ZYX is the layout used throughout VDB. However, /// other data structures, e.g. Houdini and Maya, employ the XYZ /// layout. Clearly a dense volume with the ZYX layout converts more /// efficiently to a VDB, but we support both for convenience. enum MemoryLayout { LayoutXYZ, LayoutZYX }; /// @brief Base class for Dense which is defined below. /// @note The constructor of this class is protected to prevent direct /// instantiation. template<typename ValueT, MemoryLayout Layout> class DenseBase; /// @brief Partial template specialization of DenseBase. /// @note ZYX is the memory-layout in VDB. It leads to nested /// for-loops of the order x, y, z which we find to be the most intuitive. template<typename ValueT> class DenseBase<ValueT, LayoutZYX> { public: /// @brief Return the linear offset into this grid's value array given by /// unsigned coordinates (i, j, k), i.e., coordinates relative to /// the origin of this grid's bounding box. /// /// @warning The input coordinates are assume to be relative to /// the grid's origin, i.e. minimum of its index bounding box! inline size_t coordToOffset(size_t i, size_t j, size_t k) const { return i*mX + j*mY + k; } /// @brief Return the local coordinate corresponding to the specified linear offset. /// /// @warning The returned coordinate is relative to the origin of this /// grid's bounding box so add dense.origin() to get absolute coordinates. inline Coord offsetToLocalCoord(size_t n) const { const size_t x = n / mX; n -= mX*x; const size_t y = n / mY; return Coord(Coord::ValueType(x), Coord::ValueType(y), Coord::ValueType(n - mY*y)); } /// @brief Return the stride of the array in the x direction ( = dimY*dimZ). /// @note This method is required by both CopyToDense and CopyFromDense. inline size_t xStride() const { return mX; } /// @brief Return the stride of the array in the y direction ( = dimZ). /// @note This method is required by both CopyToDense and CopyFromDense. inline size_t yStride() const { return mY; } /// @brief Return the stride of the array in the z direction ( = 1). /// @note This method is required by both CopyToDense and CopyFromDense. static size_t zStride() { return 1; } protected: /// Protected constructor so as to prevent direct instantiation DenseBase(const CoordBBox& bbox) : mBBox(bbox), mY(bbox.dim()[2]), mX(mY*bbox.dim()[1]) {} const CoordBBox mBBox;//signed coordinates of the domain represented by the grid const size_t mY, mX;//strides in the y and x direction };// end of DenseBase<ValueT, LayoutZYX> /// @brief Partial template specialization of DenseBase. /// @note This is the memory-layout employed in Houdini and Maya. It leads /// to nested for-loops of the order z, y, x. template<typename ValueT> class DenseBase<ValueT, LayoutXYZ> { public: /// @brief Return the linear offset into this grid's value array given by /// unsigned coordinates (i, j, k), i.e., coordinates relative to /// the origin of this grid's bounding box. /// /// @warning The input coordinates are assume to be relative to /// the grid's origin, i.e. minimum of its index bounding box! inline size_t coordToOffset(size_t i, size_t j, size_t k) const { return i + j*mY + k*mZ; } /// @brief Return the index coordinate corresponding to the specified linear offset. /// /// @warning The returned coordinate is relative to the origin of this /// grid's bounding box so add dense.origin() to get absolute coordinates. inline Coord offsetToLocalCoord(size_t n) const { const size_t z = n / mZ; n -= mZ*z; const size_t y = n / mY; return Coord(Coord::ValueType(n - mY*y), Coord::ValueType(y), Coord::ValueType(z)); } /// @brief Return the stride of the array in the x direction ( = 1). /// @note This method is required by both CopyToDense and CopyFromDense. static size_t xStride() { return 1; } /// @brief Return the stride of the array in the y direction ( = dimX). /// @note This method is required by both CopyToDense and CopyFromDense. inline size_t yStride() const { return mY; } /// @brief Return the stride of the array in the y direction ( = dimX*dimY). /// @note This method is required by both CopyToDense and CopyFromDense. inline size_t zStride() const { return mZ; } protected: /// Protected constructor so as to prevent direct instantiation DenseBase(const CoordBBox& bbox) : mBBox(bbox), mY(bbox.dim()[0]), mZ(mY*bbox.dim()[1]) {} const CoordBBox mBBox;//signed coordinates of the domain represented by the grid const size_t mY, mZ;//strides in the y and z direction };// end of DenseBase<ValueT, LayoutXYZ> /// @brief Dense is a simple dense grid API used by the CopyToDense and /// CopyFromDense classes defined below. /// @details Use the Dense class to efficiently produce a dense in-memory /// representation of an OpenVDB grid. However, be aware that a dense grid /// could have a memory footprint that is orders of magnitude larger than /// the sparse grid from which it originates. /// /// @note This class can be used as a simple wrapper for existing dense grid /// classes if they provide access to the raw data array. /// @note This implementation allows for the 3D memory layout to be /// defined by the MemoryLayout template parameter (see above for definition). /// The default memory layout is ZYX since that's the layout used by OpenVDB grids. template<typename ValueT, MemoryLayout Layout = LayoutZYX> class Dense : public DenseBase<ValueT, Layout> { public: using ValueType = ValueT; using BaseT = DenseBase<ValueT, Layout>; using Ptr = SharedPtr<Dense>; using ConstPtr = SharedPtr<const Dense>; /// @brief Construct a dense grid with a given range of coordinates. /// /// @param bbox the bounding box of the (signed) coordinate range of this grid /// @throw ValueError if the bounding box is empty. /// @note The min and max coordinates of the bounding box are inclusive. Dense(const CoordBBox& bbox) : BaseT(bbox) { this->init(); } /// @brief Construct a dense grid with a given range of coordinates and initial value /// /// @param bbox the bounding box of the (signed) coordinate range of this grid /// @param value the initial value of the grid. /// @throw ValueError if the bounding box is empty. /// @note The min and max coordinates of the bounding box are inclusive. Dense(const CoordBBox& bbox, const ValueT& value) : BaseT(bbox) { this->init(); this->fill(value); } /// @brief Construct a dense grid that wraps an external array. /// /// @param bbox the bounding box of the (signed) coordinate range of this grid /// @param data a raw C-style array whose size is commensurate with /// the coordinate domain of @a bbox /// /// @note The data array is assumed to have a stride of one in the @e z direction. /// @throw ValueError if the bounding box is empty. /// @note The min and max coordinates of the bounding box are inclusive. Dense(const CoordBBox& bbox, ValueT* data) : BaseT(bbox), mData(data) { if (BaseT::mBBox.empty()) { OPENVDB_THROW(ValueError, "can't construct a dense grid with an empty bounding box"); } } /// @brief Construct a dense grid with a given origin and dimensions. /// /// @param dim the desired dimensions of the grid /// @param min the signed coordinates of the first voxel in the dense grid /// @throw ValueError if any of the dimensions are zero. /// @note The @a min coordinate is inclusive, and the max coordinate will be /// @a min + @a dim - 1. Dense(const Coord& dim, const Coord& min = Coord(0)) : BaseT(CoordBBox(min, min+dim.offsetBy(-1))) { this->init(); } /// @brief Return the memory layout for this grid (see above for definitions). static MemoryLayout memoryLayout() { return Layout; } /// @brief Return a raw pointer to this grid's value array. /// @note This method is required by CopyToDense. inline ValueT* data() { return mData; } /// @brief Return a raw pointer to this grid's value array. /// @note This method is required by CopyFromDense. inline const ValueT* data() const { return mData; } /// @brief Return the bounding box of the signed index domain of this grid. /// @note This method is required by both CopyToDense and CopyFromDense. inline const CoordBBox& bbox() const { return BaseT::mBBox; } /// Return the grid's origin in index coordinates. inline const Coord& origin() const { return BaseT::mBBox.min(); } /// @brief Return the number of voxels contained in this grid. inline Index64 valueCount() const { return BaseT::mBBox.volume(); } /// @brief Set the value of the voxel at the given array offset. inline void setValue(size_t offset, const ValueT& value) { mData[offset] = value; } /// @brief Return a const reference to the value of the voxel at the given array offset. const ValueT& getValue(size_t offset) const { return mData[offset]; } /// @brief Return a non-const reference to the value of the voxel at the given array offset. ValueT& getValue(size_t offset) { return mData[offset]; } /// @brief Set the value of the voxel at unsigned index coordinates (i, j, k). /// @note This is somewhat slower than using an array offset. inline void setValue(size_t i, size_t j, size_t k, const ValueT& value) { mData[BaseT::coordToOffset(i,j,k)] = value; } /// @brief Return a const reference to the value of the voxel /// at unsigned index coordinates (i, j, k). /// @note This is somewhat slower than using an array offset. inline const ValueT& getValue(size_t i, size_t j, size_t k) const { return mData[BaseT::coordToOffset(i,j,k)]; } /// @brief Return a non-const reference to the value of the voxel /// at unsigned index coordinates (i, j, k). /// @note This is somewhat slower than using an array offset. inline ValueT& getValue(size_t i, size_t j, size_t k) { return mData[BaseT::coordToOffset(i,j,k)]; } /// @brief Set the value of the voxel at the given signed coordinates. /// @note This is slower than using either an array offset or unsigned index coordinates. inline void setValue(const Coord& xyz, const ValueT& value) { mData[this->coordToOffset(xyz)] = value; } /// @brief Return a const reference to the value of the voxel at the given signed coordinates. /// @note This is slower than using either an array offset or unsigned index coordinates. inline const ValueT& getValue(const Coord& xyz) const { return mData[this->coordToOffset(xyz)]; } /// @brief Return a non-const reference to the value of the voxel /// at the given signed coordinates. /// @note This is slower than using either an array offset or unsigned index coordinates. inline ValueT& getValue(const Coord& xyz) { return mData[this->coordToOffset(xyz)]; } /// @brief Fill this grid with a constant value. inline void fill(const ValueT& value) { size_t size = this->valueCount(); ValueT* a = mData; while(size--) *a++ = value; } /// @brief Return the linear offset into this grid's value array given by /// the specified signed coordinates, i.e., coordinates in the space of /// this grid's bounding box. /// /// @note This method reflects the fact that we assume the same /// layout of values as an OpenVDB grid, i.e., the fastest coordinate is @e z. inline size_t coordToOffset(const Coord& xyz) const { assert(BaseT::mBBox.isInside(xyz)); return BaseT::coordToOffset(size_t(xyz[0]-BaseT::mBBox.min()[0]), size_t(xyz[1]-BaseT::mBBox.min()[1]), size_t(xyz[2]-BaseT::mBBox.min()[2])); } /// @brief Return the global coordinate corresponding to the specified linear offset. inline Coord offsetToCoord(size_t n) const { return this->offsetToLocalCoord(n) + BaseT::mBBox.min(); } /// @brief Return the memory footprint of this Dense grid in bytes. inline Index64 memUsage() const { return sizeof(*this) + BaseT::mBBox.volume() * sizeof(ValueType); } /// @brief Output a human-readable description of this grid to the /// specified stream. void print(const std::string& name = "", std::ostream& os = std::cout) const { const Coord dim = BaseT::mBBox.dim(); os << "Dense Grid"; if (!name.empty()) os << " \"" << name << "\""; util::printBytes(os, this->memUsage(), ":\n Memory footprint: "); os << " Dimensions of grid : " << dim[0] << " x " << dim[1] << " x " << dim[2] << "\n"; os << " Number of voxels: " << util::formattedInt(this->valueCount()) << "\n"; os << " Bounding box of voxels: " << BaseT::mBBox << "\n"; os << " Memory layout: " << (Layout == LayoutZYX ? "ZYX (" : "XYZ (dis") << "similar to VDB)\n"; } private: /// @brief Private method to initialize the dense value array. void init() { if (BaseT::mBBox.empty()) { OPENVDB_THROW(ValueError, "can't construct a dense grid with an empty bounding box"); } mArray.reset(new ValueT[BaseT::mBBox.volume()]); mData = mArray.get(); } std::unique_ptr<ValueT[]> mArray; ValueT* mData;//raw c-style pointer to values };// end of Dense //////////////////////////////////////// /// @brief Copy an OpenVDB tree into an existing dense grid. /// /// @note Only voxels that intersect the dense grid's bounding box are copied /// from the OpenVDB tree. But both active and inactive voxels are copied, /// so all existing values in the dense grid are overwritten, regardless of /// the OpenVDB tree's topology. template<typename _TreeT, typename _DenseT = Dense<typename _TreeT::ValueType> > class CopyToDense { public: using DenseT = _DenseT; using TreeT = _TreeT; using ValueT = typename TreeT::ValueType; CopyToDense(const TreeT& tree, DenseT& dense) : mRoot(&(tree.root())), mDense(&dense) {} void copy(bool serial = false) const { if (serial) { mRoot->copyToDense(mDense->bbox(), *mDense); } else { tbb::parallel_for(mDense->bbox(), *this); } } /// @brief Public method called by tbb::parallel_for void operator()(const CoordBBox& bbox) const { mRoot->copyToDense(bbox, *mDense); } private: const typename TreeT::RootNodeType* mRoot; DenseT* mDense; };// CopyToDense // Convenient wrapper function for the CopyToDense class template<typename DenseT, typename GridOrTreeT> void copyToDense(const GridOrTreeT& sparse, DenseT& dense, bool serial) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; CopyToDense<TreeT, DenseT> op(Adapter::constTree(sparse), dense); op.copy(serial); } //////////////////////////////////////// /// @brief Copy the values from a dense grid into an OpenVDB tree. /// /// @details Values in the dense grid that are within a tolerance of /// the background value are truncated to inactive background voxels or tiles. /// This allows the tree to form a sparse representation of the dense grid. /// /// @note Since this class allocates leaf nodes concurrently it is recommended /// to use a scalable implementation of @c new like the one provided by TBB, /// rather than the mutex-protected standard library @c new. template<typename _TreeT, typename _DenseT = Dense<typename _TreeT::ValueType> > class CopyFromDense { public: using DenseT = _DenseT; using TreeT = _TreeT; using ValueT = typename TreeT::ValueType; using LeafT = typename TreeT::LeafNodeType; using AccessorT = tree::ValueAccessor<TreeT>; CopyFromDense(const DenseT& dense, TreeT& tree, const ValueT& tolerance) : mDense(&dense), mTree(&tree), mBlocks(nullptr), mTolerance(tolerance), mAccessor(tree.empty() ? nullptr : new AccessorT(tree)) { } CopyFromDense(const CopyFromDense& other) : mDense(other.mDense), mTree(other.mTree), mBlocks(other.mBlocks), mTolerance(other.mTolerance), mAccessor(other.mAccessor.get() == nullptr ? nullptr : new AccessorT(*mTree)) { } /// @brief Copy values from the dense grid to the sparse tree. void copy(bool serial = false) { mBlocks = new std::vector<Block>(); const CoordBBox& bbox = mDense->bbox(); // Pre-process: Construct a list of blocks aligned with (potential) leaf nodes for (CoordBBox sub=bbox; sub.min()[0] <= bbox.max()[0]; sub.min()[0] = sub.max()[0] + 1) { for (sub.min()[1] = bbox.min()[1]; sub.min()[1] <= bbox.max()[1]; sub.min()[1] = sub.max()[1] + 1) { for (sub.min()[2] = bbox.min()[2]; sub.min()[2] <= bbox.max()[2]; sub.min()[2] = sub.max()[2] + 1) { sub.max() = Coord::minComponent(bbox.max(), (sub.min()&(~(LeafT::DIM-1u))).offsetBy(LeafT::DIM-1u)); mBlocks->push_back(Block(sub)); } } } // Multi-threaded process: Convert dense grid into leaf nodes and tiles if (serial) { (*this)(tbb::blocked_range<size_t>(0, mBlocks->size())); } else { tbb::parallel_for(tbb::blocked_range<size_t>(0, mBlocks->size()), *this); } // Post-process: Insert leaf nodes and tiles into the tree, and prune the tiles only! tree::ValueAccessor<TreeT> acc(*mTree); for (size_t m=0, size = mBlocks->size(); m<size; ++m) { Block& block = (*mBlocks)[m]; if (block.leaf) { acc.addLeaf(block.leaf); } else if (block.tile.second) {//only background tiles are inactive acc.addTile(1, block.bbox.min(), block.tile.first, true);//leaf tile } } delete mBlocks; mBlocks = nullptr; tools::pruneTiles(*mTree, mTolerance);//multi-threaded } /// @brief Public method called by tbb::parallel_for /// @warning Never call this method directly! void operator()(const tbb::blocked_range<size_t> &r) const { assert(mBlocks); LeafT* leaf = new LeafT(); for (size_t m=r.begin(), n=0, end = r.end(); m != end; ++m, ++n) { Block& block = (*mBlocks)[m]; const CoordBBox &bbox = block.bbox; if (mAccessor.get() == nullptr) {//i.e. empty target tree leaf->fill(mTree->background(), false); } else {//account for existing leaf nodes in the target tree if (const LeafT* target = mAccessor->probeConstLeaf(bbox.min())) { (*leaf) = (*target); } else { ValueT value = zeroVal<ValueT>(); bool state = mAccessor->probeValue(bbox.min(), value); leaf->fill(value, state); } } leaf->copyFromDense(bbox, *mDense, mTree->background(), mTolerance); if (!leaf->isConstant(block.tile.first, block.tile.second, mTolerance)) { leaf->setOrigin(bbox.min() & (~(LeafT::DIM - 1))); block.leaf = leaf; leaf = new LeafT(); } }// loop over blocks delete leaf; } private: struct Block { CoordBBox bbox; LeafT* leaf; std::pair<ValueT, bool> tile; Block(const CoordBBox& b) : bbox(b), leaf(nullptr) {} }; const DenseT* mDense; TreeT* mTree; std::vector<Block>* mBlocks; ValueT mTolerance; std::unique_ptr<AccessorT> mAccessor; };// CopyFromDense // Convenient wrapper function for the CopyFromDense class template<typename DenseT, typename GridOrTreeT> void copyFromDense(const DenseT& dense, GridOrTreeT& sparse, const typename GridOrTreeT::ValueType& tolerance, bool serial) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; CopyFromDense<TreeT, DenseT> op(dense, Adapter::tree(sparse), tolerance); op.copy(serial); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_DENSE_HAS_BEEN_INCLUDED
23,080
C
38.590051
99
0.639688
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/ParticlesToLevelSet.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Ken Museth /// /// @file tools/ParticlesToLevelSet.h /// /// @brief Rasterize particles with position, radius and velocity /// into either a boolean mask grid or a narrow-band level set grid. /// /// @details Optionally, arbitrary attributes on the particles can be transferred, /// resulting in additional output grids with the same topology as the main grid. /// /// @note Particle to level set conversion is intended to be combined with /// some kind of surface postprocessing, using /// @vdblink::tools::LevelSetFilter LevelSetFilter@endlink, for example. /// Without such postprocessing the generated surface is typically too noisy and blobby. /// However, it serves as a great and fast starting point for subsequent /// level set surface processing and convolution. /// /// @details For particle access, any class with the following interface may be used /// (see the unit test or the From Particles Houdini SOP for practical examples): /// @code /// struct ParticleList /// { /// // Return the total number of particles in the list. /// // Always required! /// size_t size() const; /// /// // Get the world-space position of the nth particle. /// // Required by rasterizeSpheres(). /// void getPos(size_t n, Vec3R& xyz) const; /// /// // Get the world-space position and radius of the nth particle. /// // Required by rasterizeSpheres(). /// void getPosRad(size_t n, Vec3R& xyz, Real& radius) const; /// /// // Get the world-space position, radius and velocity of the nth particle. /// // Required by rasterizeTrails(). /// void getPosRadVel(size_t n, Vec3R& xyz, Real& radius, Vec3R& velocity) const; /// /// // Get the value of the nth particle's user-defined attribute (of type @c AttributeType). /// // Required only if attribute transfer is enabled in ParticlesToLevelSet. /// void getAtt(size_t n, AttributeType& att) const; /// }; /// @endcode /// /// Some functions accept an interrupter argument. This refers to any class /// with the following interface: /// @code /// struct Interrupter /// { /// void start(const char* name = nullptr) // called when computations begin /// void end() // called when computations end /// bool wasInterrupted(int percent=-1) // return true to abort computation /// }; /// @endcode /// /// The default interrupter is @vdblink::util::NullInterrupter NullInterrupter@endlink, /// for which all calls are no-ops that incur no computational overhead. #ifndef OPENVDB_TOOLS_PARTICLES_TO_LEVELSET_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_PARTICLES_TO_LEVELSET_HAS_BEEN_INCLUDED #include <tbb/parallel_reduce.h> #include <tbb/blocked_range.h> #include <openvdb/Types.h> #include <openvdb/Grid.h> #include <openvdb/math/Math.h> #include <openvdb/math/Transform.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/util/logging.h> #include <openvdb/util/NullInterrupter.h> #include "Composite.h" // for csgUnion() #include "PointPartitioner.h" #include "Prune.h" #include "SignedFloodFill.h" #include <functional> #include <iostream> #include <type_traits> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Populate a scalar, floating-point grid with CSG-unioned level set spheres /// described by the given particle positions and radii. /// @details For more control over the output, including attribute transfer, /// use the ParticlesToLevelSet class directly. template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter> inline void particlesToSdf(const ParticleListT&, GridT&, InterrupterT* = nullptr); /// @brief Populate a scalar, floating-point grid with fixed-size, CSG-unioned /// level set spheres described by the given particle positions and the specified radius. /// @details For more control over the output, including attribute transfer, /// use the ParticlesToLevelSet class directly. template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter> inline void particlesToSdf(const ParticleListT&, GridT&, Real radius, InterrupterT* = nullptr); /// @brief Populate a scalar, floating-point grid with CSG-unioned trails /// of level set spheres with decreasing radius, where the starting position and radius /// and the direction of each trail is given by particle attributes. /// @details For more control over the output, including attribute transfer, /// use the ParticlesToLevelSet class directly. /// @note The @a delta parameter controls the distance between spheres in a trail. /// Be careful not to use too small a value. template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter> inline void particleTrailsToSdf(const ParticleListT&, GridT&, Real delta=1, InterrupterT* =nullptr); /// @brief Activate a boolean grid wherever it intersects the spheres /// described by the given particle positions and radii. /// @details For more control over the output, including attribute transfer, /// use the ParticlesToLevelSet class directly. template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter> inline void particlesToMask(const ParticleListT&, GridT&, InterrupterT* = nullptr); /// @brief Activate a boolean grid wherever it intersects the fixed-size spheres /// described by the given particle positions and the specified radius. /// @details For more control over the output, including attribute transfer, /// use the ParticlesToLevelSet class directly. template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter> inline void particlesToMask(const ParticleListT&, GridT&, Real radius, InterrupterT* = nullptr); /// @brief Activate a boolean grid wherever it intersects trails of spheres /// with decreasing radius, where the starting position and radius and the direction /// of each trail is given by particle attributes. /// @details For more control over the output, including attribute transfer, /// use the ParticlesToLevelSet class directly. /// @note The @a delta parameter controls the distance between spheres in a trail. /// Be careful not to use too small a value. template<typename GridT, typename ParticleListT, typename InterrupterT = util::NullInterrupter> inline void particleTrailsToMask(const ParticleListT&, GridT&,Real delta=1,InterrupterT* =nullptr); //////////////////////////////////////// namespace p2ls_internal { // This is a simple type that combines a distance value and a particle // attribute. It's required for attribute transfer which is performed // in the ParticlesToLevelSet::Raster member class defined below. /// @private template<typename VisibleT, typename BlindT> class BlindData; } template<typename SdfGridT, typename AttributeT = void, typename InterrupterT = util::NullInterrupter> class ParticlesToLevelSet { public: using DisableT = typename std::is_void<AttributeT>::type; using InterrupterType = InterrupterT; using SdfGridType = SdfGridT; using SdfType = typename SdfGridT::ValueType; using AttType = typename std::conditional<DisableT::value, size_t, AttributeT>::type; using AttGridType = typename SdfGridT::template ValueConverter<AttType>::Type; static const bool OutputIsMask = std::is_same<SdfType, bool>::value; /// @brief Constructor using an existing boolean or narrow-band level set grid /// /// @param grid grid into which particles are rasterized /// @param interrupt callback to interrupt a long-running process /// /// @details If the input grid is already populated with signed distances, /// particles are unioned onto the existing level set surface. /// /// @details The width in voxel units of the generated narrow band level set /// is given by 2&times;<I>background</I>/<I>dx</I>, where @a background /// is the background value stored in the grid and @a dx is the voxel size /// derived from the transform associated with the grid. /// Also note that &minus;<I>background</I> corresponds to the constant value /// inside the generated narrow-band level set. /// /// @note If attribute transfer is enabled, i.e., if @c AttributeT is not @c void, /// attributes are generated only for voxels that overlap with particles, /// not for any other preexisting voxels (for which no attributes exist!). explicit ParticlesToLevelSet(SdfGridT& grid, InterrupterT* interrupt = nullptr); ~ParticlesToLevelSet() { delete mBlindGrid; } /// @brief This method syncs up the level set and attribute grids /// and therefore needs to be called before any of those grids are /// used and after the last call to any of the rasterizer methods. /// @details It has no effect or overhead if attribute transfer is disabled /// (i.e., if @c AttributeT is @c void) and @a prune is @c false. /// /// @note Avoid calling this method more than once, and call it only after /// all the particles have been rasterized. void finalize(bool prune = false); /// @brief Return a pointer to the grid containing the optional user-defined attribute. /// @warning If attribute transfer is disabled (i.e., if @c AttributeT is @c void) /// or if @link finalize() finalize@endlink is not called, the pointer will be null. typename AttGridType::Ptr attributeGrid() { return mAttGrid; } /// @brief Return the size of a voxel in world units. Real getVoxelSize() const { return mDx; } /// @brief Return the half-width of the narrow band in voxel units. Real getHalfWidth() const { return mHalfWidth; } /// @brief Return the smallest radius allowed in voxel units. Real getRmin() const { return mRmin; } /// @brief Set the smallest radius allowed in voxel units. void setRmin(Real Rmin) { mRmin = math::Max(Real(0),Rmin); } /// @brief Return the largest radius allowed in voxel units. Real getRmax() const { return mRmax; } /// @brief Set the largest radius allowed in voxel units. void setRmax(Real Rmax) { mRmax = math::Max(mRmin,Rmax); } /// @brief Return @c true if any particles were ignored due to their size. bool ignoredParticles() const { return mMinCount>0 || mMaxCount>0; } /// @brief Return the number of particles that were ignored because they were /// smaller than the minimum radius. size_t getMinCount() const { return mMinCount; } /// @brief Return the number of particles that were ignored because they were /// larger than the maximum radius. size_t getMaxCount() const { return mMaxCount; } /// @brief Return the grain size used for threading int getGrainSize() const { return mGrainSize; } /// @brief Set the grain size used for threading. /// @note A grain size of zero or less disables threading. void setGrainSize(int grainSize) { mGrainSize = grainSize; } /// @brief Rasterize each particle as a sphere with the particle's position and radius. /// @details For level set output, all spheres are CSG-unioned. template<typename ParticleListT> void rasterizeSpheres(const ParticleListT& pa); /// @brief Rasterize each particle as a sphere with the particle's position /// and a fixed radius. /// @details For level set output, all spheres are CSG-unioned. /// /// @param pa particles with positions /// @param radius fixed sphere radius in world units. template<typename ParticleListT> void rasterizeSpheres(const ParticleListT& pa, Real radius); /// @brief Rasterize each particle as a trail comprising the CSG union /// of spheres of decreasing radius. /// /// @param pa particles with position, radius and velocity. /// @param delta controls the distance between sphere instances /// /// @warning Be careful not to use too small values for @a delta, /// since this can lead to excessive computation per trail (which the /// interrupter can't stop). /// /// @note The direction of a trail is opposite to that of the velocity vector, /// and its length is given by the magnitude of the velocity. /// The radius at the head of the trail is given by the radius of the particle, /// and the radius at the tail is @a Rmin voxel units, which has /// a default value of 1.5 corresponding to the Nyquist frequency! template<typename ParticleListT> void rasterizeTrails(const ParticleListT& pa, Real delta=1.0); private: using BlindType = p2ls_internal::BlindData<SdfType, AttType>; using BlindGridType = typename SdfGridT::template ValueConverter<BlindType>::Type; /// Class with multi-threaded implementation of particle rasterization template<typename ParticleListT, typename GridT> struct Raster; SdfGridType* mSdfGrid; typename AttGridType::Ptr mAttGrid; BlindGridType* mBlindGrid; InterrupterT* mInterrupter; Real mDx, mHalfWidth; Real mRmin, mRmax; // ignore particles outside this range of radii in voxel size_t mMinCount, mMaxCount; // counters for ignored particles int mGrainSize; }; // class ParticlesToLevelSet template<typename SdfGridT, typename AttributeT, typename InterrupterT> inline ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>:: ParticlesToLevelSet(SdfGridT& grid, InterrupterT* interrupter) : mSdfGrid(&grid), mBlindGrid(nullptr), mInterrupter(interrupter), mDx(grid.voxelSize()[0]), mHalfWidth(grid.background()/mDx), mRmin(1.5),// corresponds to the Nyquist grid sampling frequency mRmax(100.0),// corresponds to a huge particle (probably too large!) mMinCount(0), mMaxCount(0), mGrainSize(1) { if (!mSdfGrid->hasUniformVoxels()) { OPENVDB_THROW(RuntimeError, "ParticlesToLevelSet only supports uniform voxels!"); } if (!DisableT::value) { mBlindGrid = new BlindGridType(BlindType(grid.background())); mBlindGrid->setTransform(mSdfGrid->transform().copy()); } } template<typename SdfGridT, typename AttributeT, typename InterrupterT> template<typename ParticleListT> inline void ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>:: rasterizeSpheres(const ParticleListT& pa) { if (DisableT::value) { Raster<ParticleListT, SdfGridT> r(*this, mSdfGrid, pa); r.rasterizeSpheres(); } else { Raster<ParticleListT, BlindGridType> r(*this, mBlindGrid, pa); r.rasterizeSpheres(); } } template<typename SdfGridT, typename AttributeT, typename InterrupterT> template<typename ParticleListT> inline void ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>:: rasterizeSpheres(const ParticleListT& pa, Real radius) { if (DisableT::value) { Raster<ParticleListT, SdfGridT> r(*this, mSdfGrid, pa); r.rasterizeSpheres(radius/mDx); } else { Raster<ParticleListT, BlindGridType> r(*this, mBlindGrid, pa); r.rasterizeSpheres(radius/mDx); } } template<typename SdfGridT, typename AttributeT, typename InterrupterT> template<typename ParticleListT> inline void ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>:: rasterizeTrails(const ParticleListT& pa, Real delta) { if (DisableT::value) { Raster<ParticleListT, SdfGridT> r(*this, mSdfGrid, pa); r.rasterizeTrails(delta); } else { Raster<ParticleListT, BlindGridType> r(*this, mBlindGrid, pa); r.rasterizeTrails(delta); } } template<typename SdfGridT, typename AttributeT, typename InterrupterT> inline void ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>::finalize(bool prune) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!mBlindGrid) { if (prune) { if (OutputIsMask) { tools::prune(mSdfGrid->tree()); } else { tools::pruneLevelSet(mSdfGrid->tree()); } } return; } if (prune) tools::prune(mBlindGrid->tree()); using AttTreeT = typename AttGridType::TreeType; using AttLeafT = typename AttTreeT::LeafNodeType; using BlindTreeT = typename BlindGridType::TreeType; using BlindLeafIterT = typename BlindTreeT::LeafCIter; using BlindLeafT = typename BlindTreeT::LeafNodeType; using SdfTreeT = typename SdfGridType::TreeType; using SdfLeafT = typename SdfTreeT::LeafNodeType; // Use topology copy constructors since output grids have the same topology as mBlindDataGrid const BlindTreeT& blindTree = mBlindGrid->tree(); // Create the output attribute grid. typename AttTreeT::Ptr attTree(new AttTreeT( blindTree, blindTree.background().blind(), openvdb::TopologyCopy())); // Note this overwrites any existing attribute grids! mAttGrid = typename AttGridType::Ptr(new AttGridType(attTree)); mAttGrid->setTransform(mBlindGrid->transform().copy()); typename SdfTreeT::Ptr sdfTree; // the output mask or level set tree // Extract the attribute grid and the mask or level set grid from mBlindDataGrid. if (OutputIsMask) { sdfTree.reset(new SdfTreeT(blindTree, /*off=*/SdfType(0), /*on=*/SdfType(1), TopologyCopy())); // Copy leaf voxels in parallel. tree::LeafManager<AttTreeT> leafNodes(*attTree); leafNodes.foreach([&](AttLeafT& attLeaf, size_t /*leafIndex*/) { if (const auto* blindLeaf = blindTree.probeConstLeaf(attLeaf.origin())) { for (auto iter = attLeaf.beginValueOn(); iter; ++iter) { const auto pos = iter.pos(); attLeaf.setValueOnly(pos, blindLeaf->getValue(pos).blind()); } } }); // Copy tiles serially. const auto blindAcc = mBlindGrid->getConstAccessor(); auto iter = attTree->beginValueOn(); iter.setMaxDepth(AttTreeT::ValueOnIter::LEAF_DEPTH - 1); for ( ; iter; ++iter) { iter.modifyValue([&](AttType& v) { v = blindAcc.getValue(iter.getCoord()).blind(); }); } } else { // Here we exploit the fact that by design level sets have no active tiles. // Only leaf voxels can be active. sdfTree.reset(new SdfTreeT(blindTree, blindTree.background().visible(), TopologyCopy())); for (BlindLeafIterT n = blindTree.cbeginLeaf(); n; ++n) { const BlindLeafT& leaf = *n; const openvdb::Coord xyz = leaf.origin(); // Get leafnodes that were allocated during topology construction! SdfLeafT* sdfLeaf = sdfTree->probeLeaf(xyz); AttLeafT* attLeaf = attTree->probeLeaf(xyz); // Use linear offset (vs coordinate) access for better performance! typename BlindLeafT::ValueOnCIter m=leaf.cbeginValueOn(); if (!m) {//no active values in leaf node so copy everything for (openvdb::Index k = 0; k!=BlindLeafT::SIZE; ++k) { const BlindType& v = leaf.getValue(k); sdfLeaf->setValueOnly(k, v.visible()); attLeaf->setValueOnly(k, v.blind()); } } else {//only copy active values (using flood fill for the inactive values) for(; m; ++m) { const openvdb::Index k = m.pos(); const BlindType& v = *m; sdfLeaf->setValueOnly(k, v.visible()); attLeaf->setValueOnly(k, v.blind()); } } } tools::signedFloodFill(*sdfTree);//required since we only transferred active voxels! } if (mSdfGrid->empty()) { mSdfGrid->setTree(sdfTree); } else { if (OutputIsMask) { mSdfGrid->tree().topologyUnion(*sdfTree); tools::prune(mSdfGrid->tree()); } else { tools::csgUnion(mSdfGrid->tree(), *sdfTree, /*prune=*/true); } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /////////////////////////////////////////////////////////// template<typename SdfGridT, typename AttributeT, typename InterrupterT> template<typename ParticleListT, typename GridT> struct ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>::Raster { using DisableT = typename std::is_void<AttributeT>::type; using ParticlesToLevelSetT = ParticlesToLevelSet<SdfGridT, AttributeT, InterrupterT>; using SdfT = typename ParticlesToLevelSetT::SdfType; // type of signed distance values using AttT = typename ParticlesToLevelSetT::AttType; // type of particle attribute using ValueT = typename GridT::ValueType; using AccessorT = typename GridT::Accessor; using TreeT = typename GridT::TreeType; using LeafNodeT = typename TreeT::LeafNodeType; using PointPartitionerT = PointPartitioner<Index32, LeafNodeT::LOG2DIM>; static const bool OutputIsMask = std::is_same<SdfT, bool>::value, DoAttrXfer = !DisableT::value; /// @brief Main constructor Raster(ParticlesToLevelSetT& parent, GridT* grid, const ParticleListT& particles) : mParent(parent) , mParticles(particles) , mGrid(grid) , mMap(*(mGrid->transform().baseMap())) , mMinCount(0) , mMaxCount(0) , mIsCopy(false) { mPointPartitioner = new PointPartitionerT; mPointPartitioner->construct(particles, mGrid->transform()); } /// @brief Copy constructor called by tbb threads Raster(Raster& other, tbb::split) : mParent(other.mParent) , mParticles(other.mParticles) , mGrid(new GridT(*other.mGrid, openvdb::ShallowCopy())) , mMap(other.mMap) , mMinCount(0) , mMaxCount(0) , mTask(other.mTask) , mIsCopy(true) , mPointPartitioner(other.mPointPartitioner) { mGrid->newTree(); } virtual ~Raster() { // Copy-constructed Rasters own temporary grids that have to be deleted, // while the original has ownership of the bucket array. if (mIsCopy) { delete mGrid; } else { delete mPointPartitioner; } } void rasterizeSpheres() { mMinCount = mMaxCount = 0; if (mParent.mInterrupter) { mParent.mInterrupter->start("Rasterizing particles to level set using spheres"); } mTask = std::bind(&Raster::rasterSpheres, std::placeholders::_1, std::placeholders::_2); this->cook(); if (mParent.mInterrupter) mParent.mInterrupter->end(); } void rasterizeSpheres(Real radius) { mMinCount = radius < mParent.mRmin ? mParticles.size() : 0; mMaxCount = radius > mParent.mRmax ? mParticles.size() : 0; if (mMinCount>0 || mMaxCount>0) {//skipping all particles! mParent.mMinCount = mMinCount; mParent.mMaxCount = mMaxCount; } else { if (mParent.mInterrupter) { mParent.mInterrupter->start( "Rasterizing particles to level set using const spheres"); } mTask = std::bind(&Raster::rasterFixedSpheres, std::placeholders::_1, std::placeholders::_2, radius); this->cook(); if (mParent.mInterrupter) mParent.mInterrupter->end(); } } void rasterizeTrails(Real delta=1.0) { mMinCount = mMaxCount = 0; if (mParent.mInterrupter) { mParent.mInterrupter->start("Rasterizing particles to level set using trails"); } mTask = std::bind(&Raster::rasterTrails, std::placeholders::_1, std::placeholders::_2, delta); this->cook(); if (mParent.mInterrupter) mParent.mInterrupter->end(); } /// @brief Kick off the optionally multithreaded computation. void operator()(const tbb::blocked_range<size_t>& r) { assert(mTask); mTask(this, r); mParent.mMinCount = mMinCount; mParent.mMaxCount = mMaxCount; } /// @brief Required by tbb::parallel_reduce void join(Raster& other) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (OutputIsMask) { if (DoAttrXfer) { tools::compMax(*mGrid, *other.mGrid); } else { mGrid->topologyUnion(*other.mGrid); } } else { tools::csgUnion(*mGrid, *other.mGrid, /*prune=*/true); } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END mMinCount += other.mMinCount; mMaxCount += other.mMaxCount; } private: /// Disallow assignment since some of the members are references Raster& operator=(const Raster&) { return *this; } /// @return true if the particle is too small or too large bool ignoreParticle(Real R) { if (R < mParent.mRmin) {// below the cutoff radius ++mMinCount; return true; } if (R > mParent.mRmax) {// above the cutoff radius ++mMaxCount; return true; } return false; } /// @brief Threaded rasterization of particles as spheres with variable radius /// @param r range of indices into the list of particles void rasterSpheres(const tbb::blocked_range<size_t>& r) { AccessorT acc = mGrid->getAccessor(); // local accessor bool run = true; const Real invDx = 1 / mParent.mDx; AttT att; Vec3R pos; Real rad; // Loop over buckets for (size_t n = r.begin(), N = r.end(); n < N; ++n) { // Loop over particles in bucket n. typename PointPartitionerT::IndexIterator iter = mPointPartitioner->indices(n); for ( ; run && iter; ++iter) { const Index32& id = *iter; mParticles.getPosRad(id, pos, rad); const Real R = invDx * rad;// in voxel units if (this->ignoreParticle(R)) continue; const Vec3R P = mMap.applyInverseMap(pos); this->getAtt<DisableT>(id, att); run = this->makeSphere(P, R, att, acc); }//end loop over particles }//end loop over buckets } /// @brief Threaded rasterization of particles as spheres with a fixed radius /// @param r range of indices into the list of particles /// @param R radius of fixed-size spheres void rasterFixedSpheres(const tbb::blocked_range<size_t>& r, Real R) { AccessorT acc = mGrid->getAccessor(); // local accessor AttT att; Vec3R pos; // Loop over buckets for (size_t n = r.begin(), N = r.end(); n < N; ++n) { // Loop over particles in bucket n. for (auto iter = mPointPartitioner->indices(n); iter; ++iter) { const Index32& id = *iter; this->getAtt<DisableT>(id, att); mParticles.getPos(id, pos); const Vec3R P = mMap.applyInverseMap(pos); this->makeSphere(P, R, att, acc); } } } /// @brief Threaded rasterization of particles as spheres with velocity trails /// @param r range of indices into the list of particles /// @param delta inter-sphere spacing void rasterTrails(const tbb::blocked_range<size_t>& r, Real delta) { AccessorT acc = mGrid->getAccessor(); // local accessor bool run = true; AttT att; Vec3R pos, vel; Real rad; const Vec3R origin = mMap.applyInverseMap(Vec3R(0,0,0)); const Real Rmin = mParent.mRmin, invDx = 1 / mParent.mDx; // Loop over buckets for (size_t n = r.begin(), N = r.end(); n < N; ++n) { // Loop over particles in bucket n. typename PointPartitionerT::IndexIterator iter = mPointPartitioner->indices(n); for ( ; run && iter; ++iter) { const Index32& id = *iter; mParticles.getPosRadVel(id, pos, rad, vel); const Real R0 = invDx * rad; if (this->ignoreParticle(R0)) continue; this->getAtt<DisableT>(id, att); const Vec3R P0 = mMap.applyInverseMap(pos); const Vec3R V = mMap.applyInverseMap(vel) - origin; // exclude translation const Real speed = V.length(), invSpeed = 1.0 / speed; const Vec3R Nrml = -V * invSpeed; // inverse normalized direction Vec3R P = P0; // local position of instance Real R = R0, d = 0; // local radius and length of trail for (size_t m = 0; run && d <= speed ; ++m) { run = this->makeSphere(P, R, att, acc); P += 0.5 * delta * R * Nrml; // adaptive offset along inverse velocity direction d = (P - P0).length(); // current length of trail R = R0 - (R0 - Rmin) * d * invSpeed; // R = R0 -> mRmin(e.g. 1.5) }//end loop over sphere instances }//end loop over particles }//end loop over buckets } void cook() { // parallelize over the point buckets const Index32 bucketCount = Index32(mPointPartitioner->size()); if (mParent.mGrainSize>0) { tbb::parallel_reduce( tbb::blocked_range<size_t>(0, bucketCount, mParent.mGrainSize), *this); } else { (*this)(tbb::blocked_range<size_t>(0, bucketCount)); } } /// @brief Rasterize sphere at position P and radius R into /// a narrow-band level set with half-width, mHalfWidth. /// @return @c false if rasterization was interrupted /// /// @param P coordinates of the particle position in voxel units /// @param R radius of particle in voxel units /// @param att an optional user-defined attribute value to be associated with voxels /// @param acc grid accessor with a private copy of the grid /// /// @note For best performance all computations are performed in voxel space, /// with the important exception of the final level set value that is converted /// to world units (the grid stores the closest Euclidean signed distances /// measured in world units). Also note we use the convention of positive distances /// outside the surface and negative distances inside the surface. template <bool IsMaskT = OutputIsMask> typename std::enable_if<!IsMaskT, bool>::type makeSphere(const Vec3R& P, Real R, const AttT& att, AccessorT& acc) { const Real dx = mParent.mDx, w = mParent.mHalfWidth, max = R + w, // maximum distance in voxel units max2 = math::Pow2(max), // square of maximum distance in voxel units min2 = math::Pow2(math::Max(Real(0), R - w)); // square of minimum distance // Bounding box of the sphere const Coord lo(math::Floor(P[0]-max),math::Floor(P[1]-max),math::Floor(P[2]-max)), hi(math::Ceil( P[0]+max),math::Ceil( P[1]+max),math::Ceil( P[2]+max)); const ValueT inside = -mGrid->background(); ValueT v; size_t count = 0; for (Coord c = lo; c.x() <= hi.x(); ++c.x()) { //only check interrupter every 32'th scan in x if (!(count++ & ((1<<5)-1)) && util::wasInterrupted(mParent.mInterrupter)) { tbb::task::self().cancel_group_execution(); return false; } const Real x2 = math::Pow2(c.x() - P[0]); for (c.y() = lo.y(); c.y() <= hi.y(); ++c.y()) { const Real x2y2 = x2 + math::Pow2(c.y() - P[1]); for (c.z() = lo.z(); c.z() <= hi.z(); ++c.z()) { const Real x2y2z2 = x2y2 + math::Pow2(c.z()-P[2]); // squared dist from c to P #if defined __INTEL_COMPILER _Pragma("warning (push)") _Pragma("warning (disable:186)") // "pointless comparison of unsigned integer with zero" #endif if (x2y2z2 >= max2 || (!acc.probeValue(c, v) && (v < ValueT(0)))) continue;//outside narrow band of the particle or inside existing level set #if defined __INTEL_COMPILER _Pragma("warning (pop)") #endif if (x2y2z2 <= min2) {//inside narrow band of the particle. acc.setValueOff(c, inside); continue; } // convert signed distance from voxel units to world units //const ValueT d=dx*(math::Sqrt(x2y2z2) - R); const ValueT d = Merge(static_cast<SdfT>(dx*(math::Sqrt(x2y2z2)-R)), att); if (d < v) acc.setValue(c, d);//CSG union }//end loop over z }//end loop over y }//end loop over x return true; } /// @brief Rasterize a sphere of radius @a r at position @a p into a boolean mask grid. /// @return @c false if rasterization was interrupted template <bool IsMaskT = OutputIsMask> typename std::enable_if<IsMaskT, bool>::type makeSphere(const Vec3R& p, Real r, const AttT& att, AccessorT& acc) { const Real rSquared = r * r, // sphere radius squared, in voxel units inW = r / math::Sqrt(6.0); // half the width in voxel units of an inscribed cube const Coord // Bounding box of the sphere outLo(math::Floor(p[0] - r), math::Floor(p[1] - r), math::Floor(p[2] - r)), outHi(math::Ceil(p[0] + r), math::Ceil(p[1] + r), math::Ceil(p[2] + r)), // Bounds of the inscribed cube inLo(math::Ceil(p[0] - inW), math::Ceil(p[1] - inW), math::Ceil(p[2] - inW)), inHi(math::Floor(p[0] + inW), math::Floor(p[1] + inW), math::Floor(p[2] + inW)); // Bounding boxes of regions comprising out - in /// @todo These could be divided further into sparsely- and densely-filled subregions. const std::vector<CoordBBox> padding{ CoordBBox(outLo.x(), outLo.y(), outLo.z(), inLo.x()-1, outHi.y(), outHi.z()), CoordBBox(inHi.x()+1, outLo.y(), outLo.z(), outHi.x(), outHi.y(), outHi.z()), CoordBBox(outLo.x(), outLo.y(), outLo.z(), outHi.x(), inLo.y()-1, outHi.z()), CoordBBox(outLo.x(), inHi.y()+1, outLo.z(), outHi.x(), outHi.y(), outHi.z()), CoordBBox(outLo.x(), outLo.y(), outLo.z(), outHi.x(), outHi.y(), inLo.z()-1), CoordBBox(outLo.x(), outLo.y(), inHi.z()+1, outHi.x(), outHi.y(), outHi.z()), }; const ValueT onValue = Merge(SdfT(1), att); // Sparsely fill the inscribed cube. /// @todo Use sparse fill only if 2r > leaf width? acc.tree().sparseFill(CoordBBox(inLo, inHi), onValue); // Densely fill the remaining regions. for (const auto& bbox: padding) { if (util::wasInterrupted(mParent.mInterrupter)) { tbb::task::self().cancel_group_execution(); return false; } const Coord &bmin = bbox.min(), &bmax = bbox.max(); Coord c; Real cx, cy, cz; for (c = bmin, cx = c.x(); c.x() <= bmax.x(); ++c.x(), cx += 1) { const Real x2 = math::Pow2(cx - p[0]); for (c.y() = bmin.y(), cy = c.y(); c.y() <= bmax.y(); ++c.y(), cy += 1) { const Real x2y2 = x2 + math::Pow2(cy - p[1]); for (c.z() = bmin.z(), cz = c.z(); c.z() <= bmax.z(); ++c.z(), cz += 1) { const Real x2y2z2 = x2y2 + math::Pow2(cz - p[2]); if (x2y2z2 < rSquared) { acc.setValue(c, onValue); } } } } } return true; } using FuncType = typename std::function<void (Raster*, const tbb::blocked_range<size_t>&)>; template<typename DisableType> typename std::enable_if<DisableType::value>::type getAtt(size_t, AttT&) const {} template<typename DisableType> typename std::enable_if<!DisableType::value>::type getAtt(size_t n, AttT& a) const { mParticles.getAtt(n, a); } template<typename T> typename std::enable_if<std::is_same<T, ValueT>::value, ValueT>::type Merge(T s, const AttT&) const { return s; } template<typename T> typename std::enable_if<!std::is_same<T, ValueT>::value, ValueT>::type Merge(T s, const AttT& a) const { return ValueT(s,a); } ParticlesToLevelSetT& mParent; const ParticleListT& mParticles;//list of particles GridT* mGrid; const math::MapBase& mMap; size_t mMinCount, mMaxCount;//counters for ignored particles! FuncType mTask; const bool mIsCopy; PointPartitionerT* mPointPartitioner; }; // struct ParticlesToLevelSet::Raster ///////////////////// YOU CAN SAFELY IGNORE THIS SECTION ///////////////////// namespace p2ls_internal { // This is a simple type that combines a distance value and a particle // attribute. It's required for attribute transfer which is defined in the // Raster class above. /// @private template<typename VisibleT, typename BlindT> class BlindData { public: using type = VisibleT; using VisibleType = VisibleT; using BlindType = BlindT; BlindData() {} explicit BlindData(VisibleT v) : mVisible(v), mBlind(zeroVal<BlindType>()) {} BlindData(VisibleT v, BlindT b) : mVisible(v), mBlind(b) {} BlindData(const BlindData&) = default; BlindData& operator=(const BlindData&) = default; const VisibleT& visible() const { return mVisible; } const BlindT& blind() const { return mBlind; } OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN bool operator==(const BlindData& rhs) const { return mVisible == rhs.mVisible; } OPENVDB_NO_FP_EQUALITY_WARNING_END bool operator< (const BlindData& rhs) const { return mVisible < rhs.mVisible; } bool operator> (const BlindData& rhs) const { return mVisible > rhs.mVisible; } BlindData operator+(const BlindData& rhs) const { return BlindData(mVisible + rhs.mVisible); } BlindData operator-(const BlindData& rhs) const { return BlindData(mVisible - rhs.mVisible); } BlindData operator-() const { return BlindData(-mVisible, mBlind); } protected: VisibleT mVisible; BlindT mBlind; }; /// @private // Required by several of the tree nodes template<typename VisibleT, typename BlindT> inline std::ostream& operator<<(std::ostream& ostr, const BlindData<VisibleT, BlindT>& rhs) { ostr << rhs.visible(); return ostr; } /// @private // Required by math::Abs template<typename VisibleT, typename BlindT> inline BlindData<VisibleT, BlindT> Abs(const BlindData<VisibleT, BlindT>& x) { return BlindData<VisibleT, BlindT>(math::Abs(x.visible()), x.blind()); } /// @private // Required to support the (zeroVal<BlindData>() + val) idiom. template<typename VisibleT, typename BlindT, typename T> inline BlindData<VisibleT, BlindT> operator+(const BlindData<VisibleT, BlindT>& x, const T& rhs) { return BlindData<VisibleT, BlindT>(x.visible() + static_cast<VisibleT>(rhs), x.blind()); } } // namespace p2ls_internal ////////////////////////////////////////////////////////////////////////////// // The following are convenience functions for common use cases. template<typename GridT, typename ParticleListT, typename InterrupterT> inline void particlesToSdf(const ParticleListT& plist, GridT& grid, InterrupterT* interrupt) { static_assert(std::is_floating_point<typename GridT::ValueType>::value, "particlesToSdf requires an SDF grid with floating-point values"); if (grid.getGridClass() != GRID_LEVEL_SET) { OPENVDB_LOG_WARN("particlesToSdf requires a level set grid;" " try Grid::setGridClass(openvdb::GRID_LEVEL_SET)"); } ParticlesToLevelSet<GridT> p2ls(grid, interrupt); p2ls.rasterizeSpheres(plist); tools::pruneLevelSet(grid.tree()); } template<typename GridT, typename ParticleListT, typename InterrupterT> inline void particlesToSdf(const ParticleListT& plist, GridT& grid, Real radius, InterrupterT* interrupt) { static_assert(std::is_floating_point<typename GridT::ValueType>::value, "particlesToSdf requires an SDF grid with floating-point values"); if (grid.getGridClass() != GRID_LEVEL_SET) { OPENVDB_LOG_WARN("particlesToSdf requires a level set grid;" " try Grid::setGridClass(openvdb::GRID_LEVEL_SET)"); } ParticlesToLevelSet<GridT> p2ls(grid, interrupt); p2ls.rasterizeSpheres(plist, radius); tools::pruneLevelSet(grid.tree()); } template<typename GridT, typename ParticleListT, typename InterrupterT> inline void particleTrailsToSdf(const ParticleListT& plist, GridT& grid, Real delta, InterrupterT* interrupt) { static_assert(std::is_floating_point<typename GridT::ValueType>::value, "particleTrailsToSdf requires an SDF grid with floating-point values"); if (grid.getGridClass() != GRID_LEVEL_SET) { OPENVDB_LOG_WARN("particlesToSdf requires a level set grid;" " try Grid::setGridClass(openvdb::GRID_LEVEL_SET)"); } ParticlesToLevelSet<GridT> p2ls(grid, interrupt); p2ls.rasterizeTrails(plist, delta); tools::pruneLevelSet(grid.tree()); } template<typename GridT, typename ParticleListT, typename InterrupterT> inline void particlesToMask(const ParticleListT& plist, GridT& grid, InterrupterT* interrupt) { static_assert(std::is_same<bool, typename GridT::ValueType>::value, "particlesToMask requires a boolean-valued grid"); ParticlesToLevelSet<GridT> p2ls(grid, interrupt); p2ls.rasterizeSpheres(plist); tools::prune(grid.tree()); } template<typename GridT, typename ParticleListT, typename InterrupterT> inline void particlesToMask(const ParticleListT& plist, GridT& grid, Real radius, InterrupterT* interrupt) { static_assert(std::is_same<bool, typename GridT::ValueType>::value, "particlesToMask requires a boolean-valued grid"); ParticlesToLevelSet<GridT> p2ls(grid, interrupt); p2ls.rasterizeSpheres(plist, radius); tools::prune(grid.tree()); } template<typename GridT, typename ParticleListT, typename InterrupterT> inline void particleTrailsToMask(const ParticleListT& plist, GridT& grid, Real delta, InterrupterT* interrupt) { static_assert(std::is_same<bool, typename GridT::ValueType>::value, "particleTrailsToMask requires a boolean-valued grid"); ParticlesToLevelSet<GridT> p2ls(grid, interrupt); p2ls.rasterizeTrails(plist, delta); tools::prune(grid.tree()); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_PARTICLES_TO_LEVELSET_HAS_BEEN_INCLUDED
42,950
C
41.150147
100
0.64163
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointIndexGrid.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file PointIndexGrid.h /// /// @brief Space-partitioning acceleration structure for points. Partitions /// the points into voxels to accelerate range and nearest neighbor /// searches. /// /// @note Leaf nodes store a single point-index array and the voxels are only /// integer offsets into that array. The actual points are never stored /// in the acceleration structure, only offsets into an external array. /// /// @author Mihai Alden #ifndef OPENVDB_TOOLS_POINT_INDEX_GRID_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_POINT_INDEX_GRID_HAS_BEEN_INCLUDED #include "PointPartitioner.h" #include <openvdb/version.h> #include <openvdb/Exceptions.h> #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/tree/LeafNode.h> #include <openvdb/tree/Tree.h> #include <tbb/atomic.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <algorithm> // for std::min(), std::max() #include <cmath> // for std::sqrt() #include <deque> #include <iostream> #include <type_traits> // for std::is_same #include <utility> // for std::pair #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { template<Index, typename> struct SameLeafConfig; // forward declaration } namespace tools { template<typename T, Index Log2Dim> struct PointIndexLeafNode; // forward declaration /// Point index tree configured to match the default OpenVDB tree configuration using PointIndexTree = tree::Tree<tree::RootNode<tree::InternalNode<tree::InternalNode <PointIndexLeafNode<PointIndex32, 3>, 4>, 5>>>; /// Point index grid using PointIndexGrid = Grid<PointIndexTree>; //////////////////////////////////////// /// @interface PointArray /// Expected interface for the PointArray container: /// @code /// template<typename VectorType> /// struct PointArray /// { /// // The type used to represent world-space point positions /// using PosType = VectorType; /// /// // Return the number of points in the array /// size_t size() const; /// /// // Return the world-space position of the nth point in the array. /// void getPos(size_t n, PosType& xyz) const; /// }; /// @endcode //////////////////////////////////////// /// @brief Partition points into a point index grid to accelerate range and /// nearest-neighbor searches. /// /// @param points world-space point array conforming to the PointArray interface /// @param voxelSize voxel size in world units template<typename GridT, typename PointArrayT> inline typename GridT::Ptr createPointIndexGrid(const PointArrayT& points, double voxelSize); /// @brief Partition points into a point index grid to accelerate range and /// nearest-neighbor searches. /// /// @param points world-space point array conforming to the PointArray interface /// @param xform world-to-index-space transform template<typename GridT, typename PointArrayT> inline typename GridT::Ptr createPointIndexGrid(const PointArrayT& points, const math::Transform& xform); /// @brief Return @c true if the given point index grid represents a valid partitioning /// of the given point array. /// /// @param points world-space point array conforming to the PointArray interface /// @param grid point index grid to validate template<typename PointArrayT, typename GridT> inline bool isValidPartition(const PointArrayT& points, const GridT& grid); /// Repartition the @a points if needed, otherwise return the input @a grid. template<typename GridT, typename PointArrayT> inline typename GridT::ConstPtr getValidPointIndexGrid(const PointArrayT& points, const typename GridT::ConstPtr& grid); /// Repartition the @a points if needed, otherwise return the input @a grid. template<typename GridT, typename PointArrayT> inline typename GridT::Ptr getValidPointIndexGrid(const PointArrayT& points, const typename GridT::Ptr& grid); //////////////////////////////////////// /// Accelerated range and nearest-neighbor searches for point index grids template<typename TreeType = PointIndexTree> struct PointIndexIterator { using ConstAccessor = tree::ValueAccessor<const TreeType>; using LeafNodeType = typename TreeType::LeafNodeType; using ValueType = typename TreeType::ValueType; PointIndexIterator(); PointIndexIterator(const PointIndexIterator& rhs); PointIndexIterator& operator=(const PointIndexIterator& rhs); /// @brief Construct an iterator over the indices of the points contained in voxel (i, j, k). /// @param ijk the voxel containing the points over which to iterate /// @param acc an accessor for the grid or tree that holds the point indices PointIndexIterator(const Coord& ijk, ConstAccessor& acc); /// @brief Construct an iterator over the indices of the points contained in /// the given bounding box. /// @param bbox the bounding box of the voxels containing the points over which to iterate /// @param acc an accessor for the grid or tree that holds the point indices /// @note The range of the @a bbox is inclusive. Thus, a bounding box with /// min = max is not empty but rather encloses a single voxel. PointIndexIterator(const CoordBBox& bbox, ConstAccessor& acc); /// @brief Clear the iterator and update it with the result of the given voxel query. /// @param ijk the voxel containing the points over which to iterate /// @param acc an accessor for the grid or tree that holds the point indices void searchAndUpdate(const Coord& ijk, ConstAccessor& acc); /// @brief Clear the iterator and update it with the result of the given voxel region query. /// @param bbox the bounding box of the voxels containing the points over which to iterate /// @param acc an accessor for the grid or tree that holds the point indices /// @note The range of the @a bbox is inclusive. Thus, a bounding box with /// min = max is not empty but rather encloses a single voxel. void searchAndUpdate(const CoordBBox& bbox, ConstAccessor& acc); /// @brief Clear the iterator and update it with the result of the given /// index-space bounding box query. /// @param bbox index-space bounding box /// @param acc an accessor for the grid or tree that holds the point indices /// @param points world-space point array conforming to the PointArray interface /// @param xform linear, uniform-scale transform (i.e., cubical voxels) template<typename PointArray> void searchAndUpdate(const BBoxd& bbox, ConstAccessor& acc, const PointArray& points, const math::Transform& xform); /// @brief Clear the iterator and update it with the result of the given /// index-space radial query. /// @param center index-space center /// @param radius index-space radius /// @param acc an accessor for the grid or tree that holds the point indices /// @param points world-space point array conforming to the PointArray interface /// @param xform linear, uniform-scale transform (i.e., cubical voxels) /// @param subvoxelAccuracy if true, check individual points against the search region, /// otherwise return all points that reside in voxels that are inside /// or intersect the search region template<typename PointArray> void searchAndUpdate(const Vec3d& center, double radius, ConstAccessor& acc, const PointArray& points, const math::Transform& xform, bool subvoxelAccuracy = true); /// @brief Clear the iterator and update it with the result of the given /// world-space bounding box query. /// @param bbox world-space bounding box /// @param acc an accessor for the grid or tree that holds the point indices /// @param points world-space point array conforming to the PointArray interface /// @param xform linear, uniform-scale transform (i.e., cubical voxels) template<typename PointArray> void worldSpaceSearchAndUpdate(const BBoxd& bbox, ConstAccessor& acc, const PointArray& points, const math::Transform& xform); /// @brief Clear the iterator and update it with the result of the given /// world-space radial query. /// @param center world-space center /// @param radius world-space radius /// @param acc an accessor for the grid or tree that holds the point indices /// @param points world-space point array conforming to the PointArray interface /// @param xform linear, uniform-scale transform (i.e., cubical voxels) /// @param subvoxelAccuracy if true, check individual points against the search region, /// otherwise return all points that reside in voxels that are inside /// or intersect the search region template<typename PointArray> void worldSpaceSearchAndUpdate(const Vec3d& center, double radius, ConstAccessor& acc, const PointArray& points, const math::Transform& xform, bool subvoxelAccuracy = true); /// Reset the iterator to point to the first item. void reset(); /// Return a const reference to the item to which this iterator is pointing. const ValueType& operator*() const { return *mRange.first; } /// @{ /// @brief Return @c true if this iterator is not yet exhausted. bool test() const { return mRange.first < mRange.second || mIter != mRangeList.end(); } operator bool() const { return this->test(); } /// @} /// Advance iterator to next item. void increment(); /// Advance iterator to next item. void operator++() { this->increment(); } /// @brief Advance iterator to next item. /// @return @c true if this iterator is not yet exhausted. bool next(); /// Return the number of point indices in the iterator range. size_t size() const; /// Return @c true if both iterators point to the same element. bool operator==(const PointIndexIterator& p) const { return mRange.first == p.mRange.first; } bool operator!=(const PointIndexIterator& p) const { return !this->operator==(p); } private: using Range = std::pair<const ValueType*, const ValueType*>; using RangeDeque = std::deque<Range>; using RangeDequeCIter = typename RangeDeque::const_iterator; using IndexArray = std::unique_ptr<ValueType[]>; void clear(); // Primary index collection Range mRange; RangeDeque mRangeList; RangeDequeCIter mIter; // Secondary index collection IndexArray mIndexArray; size_t mIndexArraySize; }; // struct PointIndexIterator /// @brief Selectively extract and filter point data using a custom filter operator. /// /// @par FilterType example: /// @interface FilterType /// @code /// template<typename T> /// struct WeightedAverageAccumulator { /// using ValueType = T; /// /// WeightedAverageAccumulator(T const * const array, const T radius) /// : mValues(array), mInvRadius(1.0/radius), mWeightSum(0.0), mValueSum(0.0) {} /// /// void reset() { mWeightSum = mValueSum = T(0.0); } /// /// // the following method is invoked by the PointIndexFilter /// void operator()(const T distSqr, const size_t pointIndex) { /// const T weight = T(1.0) - openvdb::math::Sqrt(distSqr) * mInvRadius; /// mWeightSum += weight; /// mValueSum += weight * mValues[pointIndex]; /// } /// /// T result() const { return mWeightSum > T(0.0) ? mValueSum / mWeightSum : T(0.0); } /// /// private: /// T const * const mValues; /// const T mInvRadius; /// T mWeightSum, mValueSum; /// }; // struct WeightedAverageAccumulator /// @endcode template<typename PointArray, typename TreeType = PointIndexTree> struct PointIndexFilter { using PosType = typename PointArray::PosType; using ScalarType = typename PosType::value_type; using ConstAccessor = tree::ValueAccessor<const TreeType>; /// @brief Constructor /// @param points world-space point array conforming to the PointArray interface /// @param tree a point index tree /// @param xform linear, uniform-scale transform (i.e., cubical voxels) PointIndexFilter(const PointArray& points, const TreeType& tree, const math::Transform& xform); /// Thread safe copy constructor PointIndexFilter(const PointIndexFilter& rhs); /// @brief Perform a radial search query and apply the given filter /// operator to the selected points. /// @param center world-space center /// @param radius world-space radius /// @param op custom filter operator (see the FilterType example for interface details) template<typename FilterType> void searchAndApply(const PosType& center, ScalarType radius, FilterType& op); private: PointArray const * const mPoints; ConstAccessor mAcc; const math::Transform mXform; const ScalarType mInvVoxelSize; PointIndexIterator<TreeType> mIter; }; // struct PointIndexFilter //////////////////////////////////////// // Internal operators and implementation details namespace point_index_grid_internal { template<typename PointArrayT> struct ValidPartitioningOp { ValidPartitioningOp(tbb::atomic<bool>& hasChanged, const PointArrayT& points, const math::Transform& xform) : mPoints(&points) , mTransform(&xform) , mHasChanged(&hasChanged) { } template <typename LeafT> void operator()(LeafT &leaf, size_t /*leafIndex*/) const { if ((*mHasChanged)) { tbb::task::self().cancel_group_execution(); return; } using IndexArrayT = typename LeafT::IndexArray; using IndexT = typename IndexArrayT::value_type; using PosType = typename PointArrayT::PosType; typename LeafT::ValueOnCIter iter; Coord voxelCoord; PosType point; const IndexT *begin = static_cast<IndexT*>(nullptr), *end = static_cast<IndexT*>(nullptr); for (iter = leaf.cbeginValueOn(); iter; ++iter) { if ((*mHasChanged)) break; voxelCoord = iter.getCoord(); leaf.getIndices(iter.pos(), begin, end); while (begin < end) { mPoints->getPos(*begin, point); if (voxelCoord != mTransform->worldToIndexCellCentered(point)) { mHasChanged->fetch_and_store(true); break; } ++begin; } } } private: PointArrayT const * const mPoints; math::Transform const * const mTransform; tbb::atomic<bool> * const mHasChanged; }; template<typename LeafNodeT> struct PopulateLeafNodesOp { using IndexT = uint32_t; using Partitioner = PointPartitioner<IndexT, LeafNodeT::LOG2DIM>; PopulateLeafNodesOp(std::unique_ptr<LeafNodeT*[]>& leafNodes, const Partitioner& partitioner) : mLeafNodes(leafNodes.get()) , mPartitioner(&partitioner) { } void operator()(const tbb::blocked_range<size_t>& range) const { using VoxelOffsetT = typename Partitioner::VoxelOffsetType; size_t maxPointCount = 0; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { maxPointCount = std::max(maxPointCount, mPartitioner->indices(n).size()); } const IndexT voxelCount = LeafNodeT::SIZE; // allocate histogram buffers std::unique_ptr<VoxelOffsetT[]> offsets{new VoxelOffsetT[maxPointCount]}; std::unique_ptr<IndexT[]> histogram{new IndexT[voxelCount]}; VoxelOffsetT const * const voxelOffsets = mPartitioner->voxelOffsets().get(); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { LeafNodeT* node = new LeafNodeT(); node->setOrigin(mPartitioner->origin(n)); typename Partitioner::IndexIterator it = mPartitioner->indices(n); const size_t pointCount = it.size(); IndexT const * const indices = &*it; // local copy of voxel offsets. for (IndexT i = 0; i < pointCount; ++i) { offsets[i] = voxelOffsets[ indices[i] ]; } // compute voxel-offset histogram memset(&histogram[0], 0, voxelCount * sizeof(IndexT)); for (IndexT i = 0; i < pointCount; ++i) { ++histogram[ offsets[i] ]; } typename LeafNodeT::NodeMaskType& mask = node->getValueMask(); typename LeafNodeT::Buffer& buffer = node->buffer(); // scan histogram (all-prefix-sums) IndexT count = 0, startOffset; for (int i = 0; i < int(voxelCount); ++i) { if (histogram[i] > 0) { startOffset = count; count += histogram[i]; histogram[i] = startOffset; mask.setOn(i); } buffer.setValue(i, count); } // allocate point-index array node->indices().resize(pointCount); typename LeafNodeT::ValueType * const orderedIndices = node->indices().data(); // rank and permute for (IndexT i = 0; i < pointCount; ++i) { orderedIndices[ histogram[ offsets[i] ]++ ] = indices[i]; } mLeafNodes[n] = node; } } ////////// LeafNodeT* * const mLeafNodes; Partitioner const * const mPartitioner; }; /// Construct a @c PointIndexTree template<typename TreeType, typename PointArray> inline void constructPointTree(TreeType& tree, const math::Transform& xform, const PointArray& points) { using LeafType = typename TreeType::LeafNodeType; std::unique_ptr<LeafType*[]> leafNodes; size_t leafNodeCount = 0; { // Important: Do not disable the cell-centered transform in the PointPartitioner. // This interpretation is assumed in the PointIndexGrid and all related // search algorithms. PointPartitioner<uint32_t, LeafType::LOG2DIM> partitioner; partitioner.construct(points, xform, /*voxelOrder=*/false, /*recordVoxelOffsets=*/true); if (!partitioner.usingCellCenteredTransform()) { OPENVDB_THROW(LookupError, "The PointIndexGrid requires a " "cell-centered transform."); } leafNodeCount = partitioner.size(); leafNodes.reset(new LeafType*[leafNodeCount]); const tbb::blocked_range<size_t> range(0, leafNodeCount); tbb::parallel_for(range, PopulateLeafNodesOp<LeafType>(leafNodes, partitioner)); } tree::ValueAccessor<TreeType> acc(tree); for (size_t n = 0; n < leafNodeCount; ++n) { acc.addLeaf(leafNodes[n]); } } //////////////////////////////////////// template<typename T> inline void dequeToArray(const std::deque<T>& d, std::unique_ptr<T[]>& a, size_t& size) { size = d.size(); a.reset(new T[size]); typename std::deque<T>::const_iterator it = d.begin(), itEnd = d.end(); T* item = a.get(); for ( ; it != itEnd; ++it, ++item) *item = *it; } inline void constructExclusiveRegions(std::vector<CoordBBox>& regions, const CoordBBox& bbox, const CoordBBox& ibox) { regions.clear(); regions.reserve(6); Coord cmin = ibox.min(); Coord cmax = ibox.max(); // left-face bbox regions.push_back(bbox); regions.back().max().z() = cmin.z(); // right-face bbox regions.push_back(bbox); regions.back().min().z() = cmax.z(); --cmax.z(); // accounting for cell centered bucketing. ++cmin.z(); // front-face bbox regions.push_back(bbox); CoordBBox* lastRegion = &regions.back(); lastRegion->min().z() = cmin.z(); lastRegion->max().z() = cmax.z(); lastRegion->max().x() = cmin.x(); // back-face bbox regions.push_back(*lastRegion); lastRegion = &regions.back(); lastRegion->min().x() = cmax.x(); lastRegion->max().x() = bbox.max().x(); --cmax.x(); ++cmin.x(); // bottom-face bbox regions.push_back(*lastRegion); lastRegion = &regions.back(); lastRegion->min().x() = cmin.x(); lastRegion->max().x() = cmax.x(); lastRegion->max().y() = cmin.y(); // top-face bbox regions.push_back(*lastRegion); lastRegion = &regions.back(); lastRegion->min().y() = cmax.y(); lastRegion->max().y() = bbox.max().y(); } template<typename PointArray, typename IndexT> struct BBoxFilter { using PosType = typename PointArray::PosType; using ScalarType = typename PosType::value_type; using Range = std::pair<const IndexT*, const IndexT*>; using RangeDeque = std::deque<Range>; using IndexDeque = std::deque<IndexT>; BBoxFilter(RangeDeque& ranges, IndexDeque& indices, const BBoxd& bbox, const PointArray& points, const math::Transform& xform) : mRanges(ranges) , mIndices(indices) , mRegion(bbox) , mPoints(points) , mMap(*xform.baseMap()) { } template <typename LeafNodeType> void filterLeafNode(const LeafNodeType& leaf) { typename LeafNodeType::ValueOnCIter iter; const IndexT *begin = static_cast<IndexT*>(nullptr), *end = static_cast<IndexT*>(nullptr); for (iter = leaf.cbeginValueOn(); iter; ++iter) { leaf.getIndices(iter.pos(), begin, end); filterVoxel(iter.getCoord(), begin, end); } } void filterVoxel(const Coord&, const IndexT* begin, const IndexT* end) { PosType vec; for (; begin < end; ++begin) { mPoints.getPos(*begin, vec); if (mRegion.isInside(mMap.applyInverseMap(vec))) { mIndices.push_back(*begin); } } } private: RangeDeque& mRanges; IndexDeque& mIndices; const BBoxd mRegion; const PointArray& mPoints; const math::MapBase& mMap; }; template<typename PointArray, typename IndexT> struct RadialRangeFilter { using PosType = typename PointArray::PosType; using ScalarType = typename PosType::value_type; using Range = std::pair<const IndexT*, const IndexT*>; using RangeDeque = std::deque<Range>; using IndexDeque = std::deque<IndexT>; RadialRangeFilter(RangeDeque& ranges, IndexDeque& indices, const Vec3d& xyz, double radius, const PointArray& points, const math::Transform& xform, const double leafNodeDim, const bool subvoxelAccuracy) : mRanges(ranges) , mIndices(indices) , mCenter(xyz) , mWSCenter(xform.indexToWorld(xyz)) , mVoxelDist1(ScalarType(0.0)) , mVoxelDist2(ScalarType(0.0)) , mLeafNodeDist1(ScalarType(0.0)) , mLeafNodeDist2(ScalarType(0.0)) , mWSRadiusSqr(ScalarType(radius * xform.voxelSize()[0])) , mPoints(points) , mSubvoxelAccuracy(subvoxelAccuracy) { const ScalarType voxelRadius = ScalarType(std::sqrt(3.0) * 0.5); mVoxelDist1 = voxelRadius + ScalarType(radius); mVoxelDist1 *= mVoxelDist1; if (radius > voxelRadius) { mVoxelDist2 = ScalarType(radius) - voxelRadius; mVoxelDist2 *= mVoxelDist2; } const ScalarType leafNodeRadius = ScalarType(leafNodeDim * std::sqrt(3.0) * 0.5); mLeafNodeDist1 = leafNodeRadius + ScalarType(radius); mLeafNodeDist1 *= mLeafNodeDist1; if (radius > leafNodeRadius) { mLeafNodeDist2 = ScalarType(radius) - leafNodeRadius; mLeafNodeDist2 *= mLeafNodeDist2; } mWSRadiusSqr *= mWSRadiusSqr; } template <typename LeafNodeType> void filterLeafNode(const LeafNodeType& leaf) { { const Coord& ijk = leaf.origin(); PosType vec; vec[0] = ScalarType(ijk[0]); vec[1] = ScalarType(ijk[1]); vec[2] = ScalarType(ijk[2]); vec += ScalarType(LeafNodeType::DIM - 1) * 0.5; vec -= mCenter; const ScalarType dist = vec.lengthSqr(); if (dist > mLeafNodeDist1) return; if (mLeafNodeDist2 > 0.0 && dist < mLeafNodeDist2) { const IndexT* begin = &leaf.indices().front(); mRanges.push_back(Range(begin, begin + leaf.indices().size())); return; } } typename LeafNodeType::ValueOnCIter iter; const IndexT *begin = static_cast<IndexT*>(nullptr), *end = static_cast<IndexT*>(nullptr); for (iter = leaf.cbeginValueOn(); iter; ++iter) { leaf.getIndices(iter.pos(), begin, end); filterVoxel(iter.getCoord(), begin, end); } } void filterVoxel(const Coord& ijk, const IndexT* begin, const IndexT* end) { PosType vec; { vec[0] = mCenter[0] - ScalarType(ijk[0]); vec[1] = mCenter[1] - ScalarType(ijk[1]); vec[2] = mCenter[2] - ScalarType(ijk[2]); const ScalarType dist = vec.lengthSqr(); if (dist > mVoxelDist1) return; if (!mSubvoxelAccuracy || (mVoxelDist2 > 0.0 && dist < mVoxelDist2)) { if (!mRanges.empty() && mRanges.back().second == begin) { mRanges.back().second = end; } else { mRanges.push_back(Range(begin, end)); } return; } } while (begin < end) { mPoints.getPos(*begin, vec); vec = mWSCenter - vec; if (vec.lengthSqr() < mWSRadiusSqr) { mIndices.push_back(*begin); } ++begin; } } private: RangeDeque& mRanges; IndexDeque& mIndices; const PosType mCenter, mWSCenter; ScalarType mVoxelDist1, mVoxelDist2, mLeafNodeDist1, mLeafNodeDist2, mWSRadiusSqr; const PointArray& mPoints; const bool mSubvoxelAccuracy; }; // struct RadialRangeFilter //////////////////////////////////////// template<typename RangeFilterType, typename LeafNodeType> inline void filteredPointIndexSearchVoxels(RangeFilterType& filter, const LeafNodeType& leaf, const Coord& min, const Coord& max) { using PointIndexT = typename LeafNodeType::ValueType; Index xPos(0), yPos(0), pos(0); Coord ijk(0); const PointIndexT* dataPtr = &leaf.indices().front(); PointIndexT beginOffset, endOffset; for (ijk[0] = min[0]; ijk[0] <= max[0]; ++ijk[0]) { xPos = (ijk[0] & (LeafNodeType::DIM - 1u)) << (2 * LeafNodeType::LOG2DIM); for (ijk[1] = min[1]; ijk[1] <= max[1]; ++ijk[1]) { yPos = xPos + ((ijk[1] & (LeafNodeType::DIM - 1u)) << LeafNodeType::LOG2DIM); for (ijk[2] = min[2]; ijk[2] <= max[2]; ++ijk[2]) { pos = yPos + (ijk[2] & (LeafNodeType::DIM - 1u)); beginOffset = (pos == 0 ? PointIndexT(0) : leaf.getValue(pos - 1)); endOffset = leaf.getValue(pos); if (endOffset > beginOffset) { filter.filterVoxel(ijk, dataPtr + beginOffset, dataPtr + endOffset); } } } } } template<typename RangeFilterType, typename ConstAccessor> inline void filteredPointIndexSearch(RangeFilterType& filter, ConstAccessor& acc, const CoordBBox& bbox) { using LeafNodeType = typename ConstAccessor::TreeType::LeafNodeType; Coord ijk(0), ijkMax(0), ijkA(0), ijkB(0); const Coord leafMin = bbox.min() & ~(LeafNodeType::DIM - 1); const Coord leafMax = bbox.max() & ~(LeafNodeType::DIM - 1); for (ijk[0] = leafMin[0]; ijk[0] <= leafMax[0]; ijk[0] += LeafNodeType::DIM) { for (ijk[1] = leafMin[1]; ijk[1] <= leafMax[1]; ijk[1] += LeafNodeType::DIM) { for (ijk[2] = leafMin[2]; ijk[2] <= leafMax[2]; ijk[2] += LeafNodeType::DIM) { if (const LeafNodeType* leaf = acc.probeConstLeaf(ijk)) { ijkMax = ijk; ijkMax.offset(LeafNodeType::DIM - 1); // intersect leaf bbox with search region. ijkA = Coord::maxComponent(bbox.min(), ijk); ijkB = Coord::minComponent(bbox.max(), ijkMax); if (ijkA != ijk || ijkB != ijkMax) { filteredPointIndexSearchVoxels(filter, *leaf, ijkA, ijkB); } else { // leaf bbox is inside the search region filter.filterLeafNode(*leaf); } } } } } } //////////////////////////////////////// template<typename RangeDeque, typename LeafNodeType> inline void pointIndexSearchVoxels(RangeDeque& rangeList, const LeafNodeType& leaf, const Coord& min, const Coord& max) { using PointIndexT = typename LeafNodeType::ValueType; using IntT = typename PointIndexT::IntType; using Range = typename RangeDeque::value_type; Index xPos(0), pos(0), zStride = Index(max[2] - min[2]); const PointIndexT* dataPtr = &leaf.indices().front(); PointIndexT beginOffset(0), endOffset(0), previousOffset(static_cast<IntT>(leaf.indices().size() + 1u)); Coord ijk(0); for (ijk[0] = min[0]; ijk[0] <= max[0]; ++ijk[0]) { xPos = (ijk[0] & (LeafNodeType::DIM - 1u)) << (2 * LeafNodeType::LOG2DIM); for (ijk[1] = min[1]; ijk[1] <= max[1]; ++ijk[1]) { pos = xPos + ((ijk[1] & (LeafNodeType::DIM - 1u)) << LeafNodeType::LOG2DIM); pos += (min[2] & (LeafNodeType::DIM - 1u)); beginOffset = (pos == 0 ? PointIndexT(0) : leaf.getValue(pos - 1)); endOffset = leaf.getValue(pos+zStride); if (endOffset > beginOffset) { if (beginOffset == previousOffset) { rangeList.back().second = dataPtr + endOffset; } else { rangeList.push_back(Range(dataPtr + beginOffset, dataPtr + endOffset)); } previousOffset = endOffset; } } } } template<typename RangeDeque, typename ConstAccessor> inline void pointIndexSearch(RangeDeque& rangeList, ConstAccessor& acc, const CoordBBox& bbox) { using LeafNodeType = typename ConstAccessor::TreeType::LeafNodeType; using PointIndexT = typename LeafNodeType::ValueType; using Range = typename RangeDeque::value_type; Coord ijk(0), ijkMax(0), ijkA(0), ijkB(0); const Coord leafMin = bbox.min() & ~(LeafNodeType::DIM - 1); const Coord leafMax = bbox.max() & ~(LeafNodeType::DIM - 1); for (ijk[0] = leafMin[0]; ijk[0] <= leafMax[0]; ijk[0] += LeafNodeType::DIM) { for (ijk[1] = leafMin[1]; ijk[1] <= leafMax[1]; ijk[1] += LeafNodeType::DIM) { for (ijk[2] = leafMin[2]; ijk[2] <= leafMax[2]; ijk[2] += LeafNodeType::DIM) { if (const LeafNodeType* leaf = acc.probeConstLeaf(ijk)) { ijkMax = ijk; ijkMax.offset(LeafNodeType::DIM - 1); // intersect leaf bbox with search region. ijkA = Coord::maxComponent(bbox.min(), ijk); ijkB = Coord::minComponent(bbox.max(), ijkMax); if (ijkA != ijk || ijkB != ijkMax) { pointIndexSearchVoxels(rangeList, *leaf, ijkA, ijkB); } else { // leaf bbox is inside the search region, add all indices. const PointIndexT* begin = &leaf->indices().front(); rangeList.push_back(Range(begin, (begin + leaf->indices().size()))); } } } } } } } // namespace point_index_grid_internal // PointIndexIterator implementation template<typename TreeType> inline PointIndexIterator<TreeType>::PointIndexIterator() : mRange(static_cast<ValueType*>(nullptr), static_cast<ValueType*>(nullptr)) , mRangeList() , mIter(mRangeList.begin()) , mIndexArray() , mIndexArraySize(0) { } template<typename TreeType> inline PointIndexIterator<TreeType>::PointIndexIterator(const PointIndexIterator& rhs) : mRange(rhs.mRange) , mRangeList(rhs.mRangeList) , mIter(mRangeList.begin()) , mIndexArray() , mIndexArraySize(rhs.mIndexArraySize) { if (rhs.mIndexArray) { mIndexArray.reset(new ValueType[mIndexArraySize]); memcpy(mIndexArray.get(), rhs.mIndexArray.get(), mIndexArraySize * sizeof(ValueType)); } } template<typename TreeType> inline PointIndexIterator<TreeType>& PointIndexIterator<TreeType>::operator=(const PointIndexIterator& rhs) { if (&rhs != this) { mRange = rhs.mRange; mRangeList = rhs.mRangeList; mIter = mRangeList.begin(); mIndexArray.reset(); mIndexArraySize = rhs.mIndexArraySize; if (rhs.mIndexArray) { mIndexArray.reset(new ValueType[mIndexArraySize]); memcpy(mIndexArray.get(), rhs.mIndexArray.get(), mIndexArraySize * sizeof(ValueType)); } } return *this; } template<typename TreeType> inline PointIndexIterator<TreeType>::PointIndexIterator(const Coord& ijk, ConstAccessor& acc) : mRange(static_cast<ValueType*>(nullptr), static_cast<ValueType*>(nullptr)) , mRangeList() , mIter(mRangeList.begin()) , mIndexArray() , mIndexArraySize(0) { const LeafNodeType* leaf = acc.probeConstLeaf(ijk); if (leaf && leaf->getIndices(ijk, mRange.first, mRange.second)) { mRangeList.push_back(mRange); mIter = mRangeList.begin(); } } template<typename TreeType> inline PointIndexIterator<TreeType>::PointIndexIterator(const CoordBBox& bbox, ConstAccessor& acc) : mRange(static_cast<ValueType*>(nullptr), static_cast<ValueType*>(nullptr)) , mRangeList() , mIter(mRangeList.begin()) , mIndexArray() , mIndexArraySize(0) { point_index_grid_internal::pointIndexSearch(mRangeList, acc, bbox); if (!mRangeList.empty()) { mIter = mRangeList.begin(); mRange = mRangeList.front(); } } template<typename TreeType> inline void PointIndexIterator<TreeType>::reset() { mIter = mRangeList.begin(); if (!mRangeList.empty()) { mRange = mRangeList.front(); } else if (mIndexArray) { mRange.first = mIndexArray.get(); mRange.second = mRange.first + mIndexArraySize; } else { mRange.first = static_cast<ValueType*>(nullptr); mRange.second = static_cast<ValueType*>(nullptr); } } template<typename TreeType> inline void PointIndexIterator<TreeType>::increment() { ++mRange.first; if (mRange.first >= mRange.second && mIter != mRangeList.end()) { ++mIter; if (mIter != mRangeList.end()) { mRange = *mIter; } else if (mIndexArray) { mRange.first = mIndexArray.get(); mRange.second = mRange.first + mIndexArraySize; } } } template<typename TreeType> inline bool PointIndexIterator<TreeType>::next() { if (!this->test()) return false; this->increment(); return this->test(); } template<typename TreeType> inline size_t PointIndexIterator<TreeType>::size() const { size_t count = 0; typename RangeDeque::const_iterator it = mRangeList.begin(); for ( ; it != mRangeList.end(); ++it) { count += it->second - it->first; } return count + mIndexArraySize; } template<typename TreeType> inline void PointIndexIterator<TreeType>::clear() { mRange.first = static_cast<ValueType*>(nullptr); mRange.second = static_cast<ValueType*>(nullptr); mRangeList.clear(); mIter = mRangeList.end(); mIndexArray.reset(); mIndexArraySize = 0; } template<typename TreeType> inline void PointIndexIterator<TreeType>::searchAndUpdate(const Coord& ijk, ConstAccessor& acc) { this->clear(); const LeafNodeType* leaf = acc.probeConstLeaf(ijk); if (leaf && leaf->getIndices(ijk, mRange.first, mRange.second)) { mRangeList.push_back(mRange); mIter = mRangeList.begin(); } } template<typename TreeType> inline void PointIndexIterator<TreeType>::searchAndUpdate(const CoordBBox& bbox, ConstAccessor& acc) { this->clear(); point_index_grid_internal::pointIndexSearch(mRangeList, acc, bbox); if (!mRangeList.empty()) { mIter = mRangeList.begin(); mRange = mRangeList.front(); } } template<typename TreeType> template<typename PointArray> inline void PointIndexIterator<TreeType>::searchAndUpdate(const BBoxd& bbox, ConstAccessor& acc, const PointArray& points, const math::Transform& xform) { this->clear(); std::vector<CoordBBox> searchRegions; CoordBBox region(Coord::round(bbox.min()), Coord::round(bbox.max())); const Coord dim = region.dim(); const int minExtent = std::min(dim[0], std::min(dim[1], dim[2])); if (minExtent > 2) { // collect indices that don't need to be tested CoordBBox ibox = region; ibox.expand(-1); point_index_grid_internal::pointIndexSearch(mRangeList, acc, ibox); // define regions for the filtered search ibox.expand(1); point_index_grid_internal::constructExclusiveRegions(searchRegions, region, ibox); } else { searchRegions.push_back(region); } // filtered search std::deque<ValueType> filteredIndices; point_index_grid_internal::BBoxFilter<PointArray, ValueType> filter(mRangeList, filteredIndices, bbox, points, xform); for (size_t n = 0, N = searchRegions.size(); n < N; ++n) { point_index_grid_internal::filteredPointIndexSearch(filter, acc, searchRegions[n]); } point_index_grid_internal::dequeToArray(filteredIndices, mIndexArray, mIndexArraySize); this->reset(); } template<typename TreeType> template<typename PointArray> inline void PointIndexIterator<TreeType>::searchAndUpdate(const Vec3d& center, double radius, ConstAccessor& acc, const PointArray& points, const math::Transform& xform, bool subvoxelAccuracy) { this->clear(); std::vector<CoordBBox> searchRegions; // bounding box CoordBBox bbox( Coord::round(Vec3d(center[0] - radius, center[1] - radius, center[2] - radius)), Coord::round(Vec3d(center[0] + radius, center[1] + radius, center[2] + radius))); bbox.expand(1); const double iRadius = radius * double(1.0 / std::sqrt(3.0)); if (iRadius > 2.0) { // inscribed box CoordBBox ibox( Coord::round(Vec3d(center[0] - iRadius, center[1] - iRadius, center[2] - iRadius)), Coord::round(Vec3d(center[0] + iRadius, center[1] + iRadius, center[2] + iRadius))); ibox.expand(-1); // collect indices that don't need to be tested point_index_grid_internal::pointIndexSearch(mRangeList, acc, ibox); ibox.expand(1); point_index_grid_internal::constructExclusiveRegions(searchRegions, bbox, ibox); } else { searchRegions.push_back(bbox); } // filtered search std::deque<ValueType> filteredIndices; const double leafNodeDim = double(TreeType::LeafNodeType::DIM); using FilterT = point_index_grid_internal::RadialRangeFilter<PointArray, ValueType>; FilterT filter(mRangeList, filteredIndices, center, radius, points, xform, leafNodeDim, subvoxelAccuracy); for (size_t n = 0, N = searchRegions.size(); n < N; ++n) { point_index_grid_internal::filteredPointIndexSearch(filter, acc, searchRegions[n]); } point_index_grid_internal::dequeToArray(filteredIndices, mIndexArray, mIndexArraySize); this->reset(); } template<typename TreeType> template<typename PointArray> inline void PointIndexIterator<TreeType>::worldSpaceSearchAndUpdate(const BBoxd& bbox, ConstAccessor& acc, const PointArray& points, const math::Transform& xform) { this->searchAndUpdate( BBoxd(xform.worldToIndex(bbox.min()), xform.worldToIndex(bbox.max())), acc, points, xform); } template<typename TreeType> template<typename PointArray> inline void PointIndexIterator<TreeType>::worldSpaceSearchAndUpdate(const Vec3d& center, double radius, ConstAccessor& acc, const PointArray& points, const math::Transform& xform, bool subvoxelAccuracy) { this->searchAndUpdate(xform.worldToIndex(center), (radius / xform.voxelSize()[0]), acc, points, xform, subvoxelAccuracy); } //////////////////////////////////////// // PointIndexFilter implementation template<typename PointArray, typename TreeType> inline PointIndexFilter<PointArray, TreeType>::PointIndexFilter( const PointArray& points, const TreeType& tree, const math::Transform& xform) : mPoints(&points), mAcc(tree), mXform(xform), mInvVoxelSize(1.0/xform.voxelSize()[0]) { } template<typename PointArray, typename TreeType> inline PointIndexFilter<PointArray, TreeType>::PointIndexFilter(const PointIndexFilter& rhs) : mPoints(rhs.mPoints) , mAcc(rhs.mAcc.tree()) , mXform(rhs.mXform) , mInvVoxelSize(rhs.mInvVoxelSize) { } template<typename PointArray, typename TreeType> template<typename FilterType> inline void PointIndexFilter<PointArray, TreeType>::searchAndApply( const PosType& center, ScalarType radius, FilterType& op) { if (radius * mInvVoxelSize < ScalarType(8.0)) { mIter.searchAndUpdate(openvdb::CoordBBox( mXform.worldToIndexCellCentered(center - radius), mXform.worldToIndexCellCentered(center + radius)), mAcc); } else { mIter.worldSpaceSearchAndUpdate( center, radius, mAcc, *mPoints, mXform, /*subvoxelAccuracy=*/false); } const ScalarType radiusSqr = radius * radius; ScalarType distSqr = 0.0; PosType pos; for (; mIter; ++mIter) { mPoints->getPos(*mIter, pos); pos -= center; distSqr = pos.lengthSqr(); if (distSqr < radiusSqr) { op(distSqr, *mIter); } } } //////////////////////////////////////// template<typename GridT, typename PointArrayT> inline typename GridT::Ptr createPointIndexGrid(const PointArrayT& points, const math::Transform& xform) { typename GridT::Ptr grid = GridT::create(typename GridT::ValueType(0)); grid->setTransform(xform.copy()); if (points.size() > 0) { point_index_grid_internal::constructPointTree( grid->tree(), grid->transform(), points); } return grid; } template<typename GridT, typename PointArrayT> inline typename GridT::Ptr createPointIndexGrid(const PointArrayT& points, double voxelSize) { math::Transform::Ptr xform = math::Transform::createLinearTransform(voxelSize); return createPointIndexGrid<GridT>(points, *xform); } template<typename PointArrayT, typename GridT> inline bool isValidPartition(const PointArrayT& points, const GridT& grid) { tree::LeafManager<const typename GridT::TreeType> leafs(grid.tree()); size_t pointCount = 0; for (size_t n = 0, N = leafs.leafCount(); n < N; ++n) { pointCount += leafs.leaf(n).indices().size(); } if (points.size() != pointCount) { return false; } tbb::atomic<bool> changed; changed = false; point_index_grid_internal::ValidPartitioningOp<PointArrayT> op(changed, points, grid.transform()); leafs.foreach(op); return !bool(changed); } template<typename GridT, typename PointArrayT> inline typename GridT::ConstPtr getValidPointIndexGrid(const PointArrayT& points, const typename GridT::ConstPtr& grid) { if (isValidPartition(points, *grid)) { return grid; } return createPointIndexGrid<GridT>(points, grid->transform()); } template<typename GridT, typename PointArrayT> inline typename GridT::Ptr getValidPointIndexGrid(const PointArrayT& points, const typename GridT::Ptr& grid) { if (isValidPartition(points, *grid)) { return grid; } return createPointIndexGrid<GridT>(points, grid->transform()); } //////////////////////////////////////// template<typename T, Index Log2Dim> struct PointIndexLeafNode : public tree::LeafNode<T, Log2Dim> { using LeafNodeType = PointIndexLeafNode<T, Log2Dim>; using Ptr = SharedPtr<PointIndexLeafNode>; using ValueType = T; using IndexArray = std::vector<ValueType>; IndexArray& indices() { return mIndices; } const IndexArray& indices() const { return mIndices; } bool getIndices(const Coord& ijk, const ValueType*& begin, const ValueType*& end) const; bool getIndices(Index offset, const ValueType*& begin, const ValueType*& end) const; void setOffsetOn(Index offset, const ValueType& val); void setOffsetOnly(Index offset, const ValueType& val); bool isEmpty(const CoordBBox& bbox) const; private: IndexArray mIndices; //////////////////////////////////////// // The following methods had to be copied from the LeafNode class // to make the derived PointIndexLeafNode class compatible with the tree structure. public: using BaseLeaf = tree::LeafNode<T, Log2Dim>; using NodeMaskType = util::NodeMask<Log2Dim>; using BaseLeaf::LOG2DIM; using BaseLeaf::TOTAL; using BaseLeaf::DIM; using BaseLeaf::NUM_VALUES; using BaseLeaf::NUM_VOXELS; using BaseLeaf::SIZE; using BaseLeaf::LEVEL; /// Default constructor PointIndexLeafNode() : BaseLeaf(), mIndices() {} explicit PointIndexLeafNode(const Coord& coords, const T& value = zeroVal<T>(), bool active = false) : BaseLeaf(coords, value, active) , mIndices() { } PointIndexLeafNode(PartialCreate, const Coord& coords, const T& value = zeroVal<T>(), bool active = false) : BaseLeaf(PartialCreate(), coords, value, active) , mIndices() { } /// Deep copy constructor PointIndexLeafNode(const PointIndexLeafNode& rhs) : BaseLeaf(rhs), mIndices(rhs.mIndices) {} /// @brief Return @c true if the given node (which may have a different @c ValueType /// than this node) has the same active value topology as this node. template<typename OtherType, Index OtherLog2Dim> bool hasSameTopology(const PointIndexLeafNode<OtherType, OtherLog2Dim>* other) const { return BaseLeaf::hasSameTopology(other); } /// Check for buffer, state and origin equivalence. bool operator==(const PointIndexLeafNode& other) const { return BaseLeaf::operator==(other); } bool operator!=(const PointIndexLeafNode& other) const { return !(other == *this); } template<MergePolicy Policy> void merge(const PointIndexLeafNode& rhs) { BaseLeaf::merge<Policy>(rhs); } template<MergePolicy Policy> void merge(const ValueType& tileValue, bool tileActive) { BaseLeaf::template merge<Policy>(tileValue, tileActive); } template<MergePolicy Policy> void merge(const PointIndexLeafNode& other, const ValueType& /*bg*/, const ValueType& /*otherBG*/) { BaseLeaf::template merge<Policy>(other); } void addLeaf(PointIndexLeafNode*) {} template<typename AccessorT> void addLeafAndCache(PointIndexLeafNode*, AccessorT&) {} //@{ /// @brief Return a pointer to this node. PointIndexLeafNode* touchLeaf(const Coord&) { return this; } template<typename AccessorT> PointIndexLeafNode* touchLeafAndCache(const Coord&, AccessorT&) { return this; } template<typename NodeT, typename AccessorT> NodeT* probeNodeAndCache(const Coord&, AccessorT&) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, PointIndexLeafNode>::value)) return nullptr; return reinterpret_cast<NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } PointIndexLeafNode* probeLeaf(const Coord&) { return this; } template<typename AccessorT> PointIndexLeafNode* probeLeafAndCache(const Coord&, AccessorT&) { return this; } //@} //@{ /// @brief Return a @const pointer to this node. const PointIndexLeafNode* probeConstLeaf(const Coord&) const { return this; } template<typename AccessorT> const PointIndexLeafNode* probeConstLeafAndCache(const Coord&, AccessorT&) const {return this;} template<typename AccessorT> const PointIndexLeafNode* probeLeafAndCache(const Coord&, AccessorT&) const { return this; } const PointIndexLeafNode* probeLeaf(const Coord&) const { return this; } template<typename NodeT, typename AccessorT> const NodeT* probeConstNodeAndCache(const Coord&, AccessorT&) const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, PointIndexLeafNode>::value)) return nullptr; return reinterpret_cast<const NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} // I/O methods void readBuffers(std::istream& is, bool fromHalf = false); void readBuffers(std::istream& is, const CoordBBox&, bool fromHalf = false); void writeBuffers(std::ostream& os, bool toHalf = false) const; Index64 memUsage() const; //////////////////////////////////////// // Disable all write methods to avoid unintentional changes // to the point-array offsets. void assertNonmodifiable() { assert(false && "Cannot modify voxel values in a PointIndexTree."); } void setActiveState(const Coord&, bool) { assertNonmodifiable(); } void setActiveState(Index, bool) { assertNonmodifiable(); } void setValueOnly(const Coord&, const ValueType&) { assertNonmodifiable(); } void setValueOnly(Index, const ValueType&) { assertNonmodifiable(); } void setValueOff(const Coord&) { assertNonmodifiable(); } void setValueOff(Index) { assertNonmodifiable(); } void setValueOff(const Coord&, const ValueType&) { assertNonmodifiable(); } void setValueOff(Index, const ValueType&) { assertNonmodifiable(); } void setValueOn(const Coord&) { assertNonmodifiable(); } void setValueOn(Index) { assertNonmodifiable(); } void setValueOn(const Coord&, const ValueType&) { assertNonmodifiable(); } void setValueOn(Index, const ValueType&) { assertNonmodifiable(); } void setValue(const Coord&, const ValueType&) { assertNonmodifiable(); } void setValuesOn() { assertNonmodifiable(); } void setValuesOff() { assertNonmodifiable(); } template<typename ModifyOp> void modifyValue(Index, const ModifyOp&) { assertNonmodifiable(); } template<typename ModifyOp> void modifyValue(const Coord&, const ModifyOp&) { assertNonmodifiable(); } template<typename ModifyOp> void modifyValueAndActiveState(const Coord&, const ModifyOp&) { assertNonmodifiable(); } void clip(const CoordBBox&, const ValueType&) { assertNonmodifiable(); } void fill(const CoordBBox&, const ValueType&, bool) { assertNonmodifiable(); } void fill(const ValueType&) {} void fill(const ValueType&, bool) { assertNonmodifiable(); } template<typename AccessorT> void setValueOnlyAndCache(const Coord&, const ValueType&, AccessorT&) {assertNonmodifiable();} template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord&, const ModifyOp&, AccessorT&) { assertNonmodifiable(); } template<typename AccessorT> void setValueOffAndCache(const Coord&, const ValueType&, AccessorT&) { assertNonmodifiable(); } template<typename AccessorT> void setActiveStateAndCache(const Coord&, bool, AccessorT&) { assertNonmodifiable(); } void resetBackground(const ValueType&, const ValueType&) { assertNonmodifiable(); } void signedFloodFill(const ValueType&) { assertNonmodifiable(); } void signedFloodFill(const ValueType&, const ValueType&) { assertNonmodifiable(); } void negate() { assertNonmodifiable(); } protected: using ValueOn = typename BaseLeaf::ValueOn; using ValueOff = typename BaseLeaf::ValueOff; using ValueAll = typename BaseLeaf::ValueAll; using ChildOn = typename BaseLeaf::ChildOn; using ChildOff = typename BaseLeaf::ChildOff; using ChildAll = typename BaseLeaf::ChildAll; using MaskOnIterator = typename NodeMaskType::OnIterator; using MaskOffIterator = typename NodeMaskType::OffIterator; using MaskDenseIterator = typename NodeMaskType::DenseIterator; // During topology-only construction, access is needed // to protected/private members of other template instances. template<typename, Index> friend struct PointIndexLeafNode; friend class tree::IteratorBase<MaskOnIterator, PointIndexLeafNode>; friend class tree::IteratorBase<MaskOffIterator, PointIndexLeafNode>; friend class tree::IteratorBase<MaskDenseIterator, PointIndexLeafNode>; public: using ValueOnIter = typename BaseLeaf::template ValueIter< MaskOnIterator, PointIndexLeafNode, const ValueType, ValueOn>; using ValueOnCIter = typename BaseLeaf::template ValueIter< MaskOnIterator, const PointIndexLeafNode, const ValueType, ValueOn>; using ValueOffIter = typename BaseLeaf::template ValueIter< MaskOffIterator, PointIndexLeafNode, const ValueType, ValueOff>; using ValueOffCIter = typename BaseLeaf::template ValueIter< MaskOffIterator,const PointIndexLeafNode,const ValueType, ValueOff>; using ValueAllIter = typename BaseLeaf::template ValueIter< MaskDenseIterator, PointIndexLeafNode, const ValueType, ValueAll>; using ValueAllCIter = typename BaseLeaf::template ValueIter< MaskDenseIterator,const PointIndexLeafNode,const ValueType, ValueAll>; using ChildOnIter = typename BaseLeaf::template ChildIter< MaskOnIterator, PointIndexLeafNode, ChildOn>; using ChildOnCIter = typename BaseLeaf::template ChildIter< MaskOnIterator, const PointIndexLeafNode, ChildOn>; using ChildOffIter = typename BaseLeaf::template ChildIter< MaskOffIterator, PointIndexLeafNode, ChildOff>; using ChildOffCIter = typename BaseLeaf::template ChildIter< MaskOffIterator, const PointIndexLeafNode, ChildOff>; using ChildAllIter = typename BaseLeaf::template DenseIter< PointIndexLeafNode, ValueType, ChildAll>; using ChildAllCIter = typename BaseLeaf::template DenseIter< const PointIndexLeafNode, const ValueType, ChildAll>; #define VMASK_ this->getValueMask() ValueOnCIter cbeginValueOn() const { return ValueOnCIter(VMASK_.beginOn(), this); } ValueOnCIter beginValueOn() const { return ValueOnCIter(VMASK_.beginOn(), this); } ValueOnIter beginValueOn() { return ValueOnIter(VMASK_.beginOn(), this); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(VMASK_.beginOff(), this); } ValueOffCIter beginValueOff() const { return ValueOffCIter(VMASK_.beginOff(), this); } ValueOffIter beginValueOff() { return ValueOffIter(VMASK_.beginOff(), this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(VMASK_.beginDense(), this); } ValueAllCIter beginValueAll() const { return ValueAllCIter(VMASK_.beginDense(), this); } ValueAllIter beginValueAll() { return ValueAllIter(VMASK_.beginDense(), this); } ValueOnCIter cendValueOn() const { return ValueOnCIter(VMASK_.endOn(), this); } ValueOnCIter endValueOn() const { return ValueOnCIter(VMASK_.endOn(), this); } ValueOnIter endValueOn() { return ValueOnIter(VMASK_.endOn(), this); } ValueOffCIter cendValueOff() const { return ValueOffCIter(VMASK_.endOff(), this); } ValueOffCIter endValueOff() const { return ValueOffCIter(VMASK_.endOff(), this); } ValueOffIter endValueOff() { return ValueOffIter(VMASK_.endOff(), this); } ValueAllCIter cendValueAll() const { return ValueAllCIter(VMASK_.endDense(), this); } ValueAllCIter endValueAll() const { return ValueAllCIter(VMASK_.endDense(), this); } ValueAllIter endValueAll() { return ValueAllIter(VMASK_.endDense(), this); } ChildOnCIter cbeginChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); } ChildOnCIter beginChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); } ChildOnIter beginChildOn() { return ChildOnIter(VMASK_.endOn(), this); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); } ChildOffCIter beginChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); } ChildOffIter beginChildOff() { return ChildOffIter(VMASK_.endOff(), this); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(VMASK_.beginDense(), this); } ChildAllCIter beginChildAll() const { return ChildAllCIter(VMASK_.beginDense(), this); } ChildAllIter beginChildAll() { return ChildAllIter(VMASK_.beginDense(), this); } ChildOnCIter cendChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); } ChildOnCIter endChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); } ChildOnIter endChildOn() { return ChildOnIter(VMASK_.endOn(), this); } ChildOffCIter cendChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); } ChildOffCIter endChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); } ChildOffIter endChildOff() { return ChildOffIter(VMASK_.endOff(), this); } ChildAllCIter cendChildAll() const { return ChildAllCIter(VMASK_.endDense(), this); } ChildAllCIter endChildAll() const { return ChildAllCIter(VMASK_.endDense(), this); } ChildAllIter endChildAll() { return ChildAllIter(VMASK_.endDense(), this); } #undef VMASK_ }; // struct PointIndexLeafNode template<typename T, Index Log2Dim> inline bool PointIndexLeafNode<T, Log2Dim>::getIndices(const Coord& ijk, const ValueType*& begin, const ValueType*& end) const { return getIndices(LeafNodeType::coordToOffset(ijk), begin, end); } template<typename T, Index Log2Dim> inline bool PointIndexLeafNode<T, Log2Dim>::getIndices(Index offset, const ValueType*& begin, const ValueType*& end) const { if (this->isValueMaskOn(offset)) { const ValueType* dataPtr = &mIndices.front(); begin = dataPtr + (offset == 0 ? ValueType(0) : this->buffer()[offset - 1]); end = dataPtr + this->buffer()[offset]; return true; } return false; } template<typename T, Index Log2Dim> inline void PointIndexLeafNode<T, Log2Dim>::setOffsetOn(Index offset, const ValueType& val) { this->buffer().setValue(offset, val); this->setValueMaskOn(offset); } template<typename T, Index Log2Dim> inline void PointIndexLeafNode<T, Log2Dim>::setOffsetOnly(Index offset, const ValueType& val) { this->buffer().setValue(offset, val); } template<typename T, Index Log2Dim> inline bool PointIndexLeafNode<T, Log2Dim>::isEmpty(const CoordBBox& bbox) const { Index xPos, pos, zStride = Index(bbox.max()[2] - bbox.min()[2]); Coord ijk; for (ijk[0] = bbox.min()[0]; ijk[0] <= bbox.max()[0]; ++ijk[0]) { xPos = (ijk[0] & (DIM - 1u)) << (2 * LOG2DIM); for (ijk[1] = bbox.min()[1]; ijk[1] <= bbox.max()[1]; ++ijk[1]) { pos = xPos + ((ijk[1] & (DIM - 1u)) << LOG2DIM); pos += (bbox.min()[2] & (DIM - 1u)); if (this->buffer()[pos+zStride] > (pos == 0 ? T(0) : this->buffer()[pos - 1])) { return false; } } } return true; } template<typename T, Index Log2Dim> inline void PointIndexLeafNode<T, Log2Dim>::readBuffers(std::istream& is, bool fromHalf) { BaseLeaf::readBuffers(is, fromHalf); Index64 numIndices = Index64(0); is.read(reinterpret_cast<char*>(&numIndices), sizeof(Index64)); mIndices.resize(size_t(numIndices)); is.read(reinterpret_cast<char*>(mIndices.data()), numIndices * sizeof(T)); } template<typename T, Index Log2Dim> inline void PointIndexLeafNode<T, Log2Dim>::readBuffers(std::istream& is, const CoordBBox& bbox, bool fromHalf) { // Read and clip voxel values. BaseLeaf::readBuffers(is, bbox, fromHalf); Index64 numIndices = Index64(0); is.read(reinterpret_cast<char*>(&numIndices), sizeof(Index64)); const Index64 numBytes = numIndices * sizeof(T); if (bbox.hasOverlap(this->getNodeBoundingBox())) { mIndices.resize(size_t(numIndices)); is.read(reinterpret_cast<char*>(mIndices.data()), numBytes); /// @todo If any voxels were deactivated as a result of clipping in the call to /// BaseLeaf::readBuffers(), the point index list will need to be regenerated. } else { // Read and discard voxel values. std::unique_ptr<char[]> buf{new char[numBytes]}; is.read(buf.get(), numBytes); } // Reserved for future use Index64 auxDataBytes = Index64(0); is.read(reinterpret_cast<char*>(&auxDataBytes), sizeof(Index64)); if (auxDataBytes > 0) { // For now, read and discard any auxiliary data. std::unique_ptr<char[]> auxData{new char[auxDataBytes]}; is.read(auxData.get(), auxDataBytes); } } template<typename T, Index Log2Dim> inline void PointIndexLeafNode<T, Log2Dim>::writeBuffers(std::ostream& os, bool toHalf) const { BaseLeaf::writeBuffers(os, toHalf); Index64 numIndices = Index64(mIndices.size()); os.write(reinterpret_cast<const char*>(&numIndices), sizeof(Index64)); os.write(reinterpret_cast<const char*>(mIndices.data()), numIndices * sizeof(T)); // Reserved for future use const Index64 auxDataBytes = Index64(0); os.write(reinterpret_cast<const char*>(&auxDataBytes), sizeof(Index64)); } template<typename T, Index Log2Dim> inline Index64 PointIndexLeafNode<T, Log2Dim>::memUsage() const { return BaseLeaf::memUsage() + Index64((sizeof(T)*mIndices.capacity()) + sizeof(mIndices)); } } // namespace tools //////////////////////////////////////// namespace tree { /// Helper metafunction used to implement LeafNode::SameConfiguration /// (which, as an inner class, can't be independently specialized) template<Index Dim1, typename T2> struct SameLeafConfig<Dim1, openvdb::tools::PointIndexLeafNode<T2, Dim1> > { static const bool value = true; }; } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_POINT_INDEX_GRID_HAS_BEEN_INCLUDED
62,544
C
33.593473
99
0.650614
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetAdvect.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Ken Museth /// /// @file tools/LevelSetAdvect.h /// /// @brief Hyperbolic advection of narrow-band level sets #ifndef OPENVDB_TOOLS_LEVEL_SET_ADVECT_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVEL_SET_ADVECT_HAS_BEEN_INCLUDED #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <openvdb/Platform.h> #include "LevelSetTracker.h" #include "VelocityFields.h" // for EnrightField #include <openvdb/math/FiniteDifference.h> //#include <openvdb/util/CpuTimer.h> #include <functional> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Hyperbolic advection of narrow-band level sets in an /// external velocity field /// /// The @c FieldType template argument below refers to any functor /// with the following interface (see tools/VelocityFields.h /// for examples): /// /// @code /// class VelocityField { /// ... /// public: /// openvdb::VectorType operator() (const openvdb::Coord& xyz, ValueType time) const; /// ... /// }; /// @endcode /// /// @note The functor method returns the velocity field at coordinate /// position xyz of the advection grid, and for the specified /// time. Note that since the velocity is returned in the local /// coordinate space of the grid that is being advected, the functor /// typically depends on the transformation of that grid. This design /// is chosen for performance reasons. Finally we will assume that the /// functor method is NOT threadsafe (typically uses a ValueAccessor) /// and that its lightweight enough that we can copy it per thread. /// /// The @c InterruptType template argument below refers to any class /// with the following interface: /// @code /// class Interrupter { /// ... /// public: /// void start(const char* name = nullptr) // called when computations begin /// void end() // called when computations end /// bool wasInterrupted(int percent=-1) // return true to break computation ///}; /// @endcode /// /// @note If no template argument is provided for this InterruptType /// the util::NullInterrupter is used which implies that all /// interrupter calls are no-ops (i.e. incurs no computational overhead). /// template<typename GridT, typename FieldT = EnrightField<typename GridT::ValueType>, typename InterruptT = util::NullInterrupter> class LevelSetAdvection { public: using GridType = GridT; using TrackerT = LevelSetTracker<GridT, InterruptT>; using LeafRange = typename TrackerT::LeafRange; using LeafType = typename TrackerT::LeafType; using BufferType = typename TrackerT::BufferType; using ValueType = typename TrackerT::ValueType; using VectorType = typename FieldT::VectorType; /// Main constructor LevelSetAdvection(GridT& grid, const FieldT& field, InterruptT* interrupt = nullptr): mTracker(grid, interrupt), mField(field), mSpatialScheme(math::HJWENO5_BIAS), mTemporalScheme(math::TVD_RK2) {} virtual ~LevelSetAdvection() {} /// @brief Return the spatial finite difference scheme math::BiasedGradientScheme getSpatialScheme() const { return mSpatialScheme; } /// @brief Set the spatial finite difference scheme void setSpatialScheme(math::BiasedGradientScheme scheme) { mSpatialScheme = scheme; } /// @brief Return the temporal integration scheme math::TemporalIntegrationScheme getTemporalScheme() const { return mTemporalScheme; } /// @brief Set the spatial finite difference scheme void setTemporalScheme(math::TemporalIntegrationScheme scheme) { mTemporalScheme = scheme; } /// @brief Return the spatial finite difference scheme math::BiasedGradientScheme getTrackerSpatialScheme() const { return mTracker.getSpatialScheme(); } /// @brief Set the spatial finite difference scheme void setTrackerSpatialScheme(math::BiasedGradientScheme scheme) { mTracker.setSpatialScheme(scheme); } /// @brief Return the temporal integration scheme math::TemporalIntegrationScheme getTrackerTemporalScheme() const { return mTracker.getTemporalScheme(); } /// @brief Set the spatial finite difference scheme void setTrackerTemporalScheme(math::TemporalIntegrationScheme scheme) { mTracker.setTemporalScheme(scheme); } /// @brief Return The number of normalizations performed per track or /// normalize call. int getNormCount() const { return mTracker.getNormCount(); } /// @brief Set the number of normalizations performed per track or /// normalize call. void setNormCount(int n) { mTracker.setNormCount(n); } /// @brief Return the grain-size used for multi-threading int getGrainSize() const { return mTracker.getGrainSize(); } /// @brief Set the grain-size used for multi-threading. /// @note A grain size of 0 or less disables multi-threading! void setGrainSize(int grainsize) { mTracker.setGrainSize(grainsize); } /// Advect the level set from its current time, time0, to its /// final time, time1. If time0>time1 backward advection is performed. /// /// @return number of CFL iterations used to advect from time0 to time1 size_t advect(ValueType time0, ValueType time1); private: // disallow copy construction and copy by assinment! LevelSetAdvection(const LevelSetAdvection&);// not implemented LevelSetAdvection& operator=(const LevelSetAdvection&);// not implemented // This templated private struct implements all the level set magic. template<typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> struct Advect { /// Main constructor Advect(LevelSetAdvection& parent); /// Shallow copy constructor called by tbb::parallel_for() threads Advect(const Advect& other); /// Destructor virtual ~Advect() { if (mIsMaster) this->clearField(); } /// Advect the level set from its current time, time0, to its final time, time1. /// @return number of CFL iterations size_t advect(ValueType time0, ValueType time1); /// Used internally by tbb::parallel_for() void operator()(const LeafRange& r) const { if (mTask) mTask(const_cast<Advect*>(this), r); else OPENVDB_THROW(ValueError, "task is undefined - don\'t call this method directly"); } /// method calling tbb void cook(const char* msg, size_t swapBuffer = 0); /// Sample field and return the CFL time step typename GridT::ValueType sampleField(ValueType time0, ValueType time1); template <bool Aligned> void sample(const LeafRange& r, ValueType t0, ValueType t1); inline void sampleXformed(const LeafRange& r, ValueType t0, ValueType t1) { this->sample<false>(r, t0, t1); } inline void sampleAligned(const LeafRange& r, ValueType t0, ValueType t1) { this->sample<true>(r, t0, t1); } void clearField(); // Convex combination of Phi and a forward Euler advection steps: // Phi(result) = alpha * Phi(phi) + (1-alpha) * (Phi(0) - dt * Speed(speed)*|Grad[Phi(0)]|); template <int Nominator, int Denominator> void euler(const LeafRange&, ValueType, Index, Index); inline void euler01(const LeafRange& r, ValueType t) {this->euler<0,1>(r, t, 0, 1);} inline void euler12(const LeafRange& r, ValueType t) {this->euler<1,2>(r, t, 1, 1);} inline void euler34(const LeafRange& r, ValueType t) {this->euler<3,4>(r, t, 1, 2);} inline void euler13(const LeafRange& r, ValueType t) {this->euler<1,3>(r, t, 1, 2);} LevelSetAdvection& mParent; VectorType* mVelocity; size_t* mOffsets; const MapT* mMap; typename std::function<void (Advect*, const LeafRange&)> mTask; const bool mIsMaster; }; // end of private Advect struct template<math::BiasedGradientScheme SpatialScheme> size_t advect1(ValueType time0, ValueType time1); template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> size_t advect2(ValueType time0, ValueType time1); template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MapType> size_t advect3(ValueType time0, ValueType time1); TrackerT mTracker; //each thread needs a deep copy of the field since it might contain a ValueAccessor const FieldT mField; math::BiasedGradientScheme mSpatialScheme; math::TemporalIntegrationScheme mTemporalScheme; };//end of LevelSetAdvection template<typename GridT, typename FieldT, typename InterruptT> inline size_t LevelSetAdvection<GridT, FieldT, InterruptT>::advect(ValueType time0, ValueType time1) { switch (mSpatialScheme) { case math::FIRST_BIAS: return this->advect1<math::FIRST_BIAS >(time0, time1); case math::SECOND_BIAS: return this->advect1<math::SECOND_BIAS >(time0, time1); case math::THIRD_BIAS: return this->advect1<math::THIRD_BIAS >(time0, time1); case math::WENO5_BIAS: return this->advect1<math::WENO5_BIAS >(time0, time1); case math::HJWENO5_BIAS: return this->advect1<math::HJWENO5_BIAS>(time0, time1); default: OPENVDB_THROW(ValueError, "Spatial difference scheme not supported!"); } return 0; } template<typename GridT, typename FieldT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme> inline size_t LevelSetAdvection<GridT, FieldT, InterruptT>::advect1(ValueType time0, ValueType time1) { switch (mTemporalScheme) { case math::TVD_RK1: return this->advect2<SpatialScheme, math::TVD_RK1>(time0, time1); case math::TVD_RK2: return this->advect2<SpatialScheme, math::TVD_RK2>(time0, time1); case math::TVD_RK3: return this->advect2<SpatialScheme, math::TVD_RK3>(time0, time1); default: OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!"); } return 0; } template<typename GridT, typename FieldT, typename InterruptT> template<math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline size_t LevelSetAdvection<GridT, FieldT, InterruptT>::advect2(ValueType time0, ValueType time1) { const math::Transform& trans = mTracker.grid().transform(); if (trans.mapType() == math::UniformScaleMap::mapType()) { return this->advect3<SpatialScheme, TemporalScheme, math::UniformScaleMap>(time0, time1); } else if (trans.mapType() == math::UniformScaleTranslateMap::mapType()) { return this->advect3<SpatialScheme, TemporalScheme, math::UniformScaleTranslateMap>( time0, time1); } else if (trans.mapType() == math::UnitaryMap::mapType()) { return this->advect3<SpatialScheme, TemporalScheme, math::UnitaryMap >(time0, time1); } else if (trans.mapType() == math::TranslationMap::mapType()) { return this->advect3<SpatialScheme, TemporalScheme, math::TranslationMap>(time0, time1); } else { OPENVDB_THROW(ValueError, "MapType not supported!"); } return 0; } template<typename GridT, typename FieldT, typename InterruptT> template< math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme, typename MapT> inline size_t LevelSetAdvection<GridT, FieldT, InterruptT>::advect3(ValueType time0, ValueType time1) { Advect<MapT, SpatialScheme, TemporalScheme> tmp(*this); return tmp.advect(time0, time1); } /////////////////////////////////////////////////////////////////////// template<typename GridT, typename FieldT, typename InterruptT> template< typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline LevelSetAdvection<GridT, FieldT, InterruptT>:: Advect<MapT, SpatialScheme, TemporalScheme>:: Advect(LevelSetAdvection& parent) : mParent(parent) , mVelocity(nullptr) , mOffsets(nullptr) , mMap(parent.mTracker.grid().transform().template constMap<MapT>().get()) , mTask(0) , mIsMaster(true) { } template<typename GridT, typename FieldT, typename InterruptT> template< typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline LevelSetAdvection<GridT, FieldT, InterruptT>:: Advect<MapT, SpatialScheme, TemporalScheme>:: Advect(const Advect& other) : mParent(other.mParent) , mVelocity(other.mVelocity) , mOffsets(other.mOffsets) , mMap(other.mMap) , mTask(other.mTask) , mIsMaster(false) { } template<typename GridT, typename FieldT, typename InterruptT> template< typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline size_t LevelSetAdvection<GridT, FieldT, InterruptT>:: Advect<MapT, SpatialScheme, TemporalScheme>:: advect(ValueType time0, ValueType time1) { namespace ph = std::placeholders; //util::CpuTimer timer; size_t countCFL = 0; if ( math::isZero(time0 - time1) ) return countCFL; const bool isForward = time0 < time1; while ((isForward ? time0<time1 : time0>time1) && mParent.mTracker.checkInterrupter()) { /// Make sure we have enough temporal auxiliary buffers //timer.start( "\nallocate buffers" ); mParent.mTracker.leafs().rebuildAuxBuffers(TemporalScheme == math::TVD_RK3 ? 2 : 1); //timer.stop(); const ValueType dt = this->sampleField(time0, time1); if ( math::isZero(dt) ) break;//V is essentially zero so terminate OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN //switch is resolved at compile-time switch(TemporalScheme) { case math::TVD_RK1: // Perform one explicit Euler step: t1 = t0 + dt // Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(0) mTask = std::bind(&Advect::euler01, ph::_1, ph::_2, dt); // Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1) this->cook("Advecting level set using TVD_RK1", 1); break; case math::TVD_RK2: // Perform one explicit Euler step: t1 = t0 + dt // Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(0) mTask = std::bind(&Advect::euler01, ph::_1, ph::_2, dt); // Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1) this->cook("Advecting level set using TVD_RK1 (step 1 of 2)", 1); // Convex combine explict Euler step: t2 = t0 + dt // Phi_t2(1) = 1/2 * Phi_t0(1) + 1/2 * (Phi_t1(0) - dt * V.Grad_t1(0)) mTask = std::bind(&Advect::euler12, ph::_1, ph::_2, dt); // Cook and swap buffer 0 and 1 such that Phi_t2(0) and Phi_t1(1) this->cook("Advecting level set using TVD_RK1 (step 2 of 2)", 1); break; case math::TVD_RK3: // Perform one explicit Euler step: t1 = t0 + dt // Phi_t1(1) = Phi_t0(0) - dt * VdotG_t0(0) mTask = std::bind(&Advect::euler01, ph::_1, ph::_2, dt); // Cook and swap buffer 0 and 1 such that Phi_t1(0) and Phi_t0(1) this->cook("Advecting level set using TVD_RK3 (step 1 of 3)", 1); // Convex combine explict Euler step: t2 = t0 + dt/2 // Phi_t2(2) = 3/4 * Phi_t0(1) + 1/4 * (Phi_t1(0) - dt * V.Grad_t1(0)) mTask = std::bind(&Advect::euler34, ph::_1, ph::_2, dt); // Cook and swap buffer 0 and 2 such that Phi_t2(0) and Phi_t1(2) this->cook("Advecting level set using TVD_RK3 (step 2 of 3)", 2); // Convex combine explict Euler step: t3 = t0 + dt // Phi_t3(2) = 1/3 * Phi_t0(1) + 2/3 * (Phi_t2(0) - dt * V.Grad_t2(0) mTask = std::bind(&Advect::euler13, ph::_1, ph::_2, dt); // Cook and swap buffer 0 and 2 such that Phi_t3(0) and Phi_t2(2) this->cook("Advecting level set using TVD_RK3 (step 3 of 3)", 2); break; default: OPENVDB_THROW(ValueError, "Temporal integration scheme not supported!"); }//end of compile-time resolved switch OPENVDB_NO_UNREACHABLE_CODE_WARNING_END time0 += isForward ? dt : -dt; ++countCFL; mParent.mTracker.leafs().removeAuxBuffers(); this->clearField(); /// Track the narrow band mParent.mTracker.track(); }//end wile-loop over time return countCFL;//number of CLF propagation steps } template<typename GridT, typename FieldT, typename InterruptT> template< typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline typename GridT::ValueType LevelSetAdvection<GridT, FieldT, InterruptT>:: Advect<MapT, SpatialScheme, TemporalScheme>:: sampleField(ValueType time0, ValueType time1) { namespace ph = std::placeholders; const int grainSize = mParent.mTracker.getGrainSize(); const size_t leafCount = mParent.mTracker.leafs().leafCount(); if (leafCount==0) return ValueType(0.0); // Compute the prefix sum of offsets to active voxels size_t size=0, voxelCount=mParent.mTracker.leafs().getPrefixSum(mOffsets, size, grainSize); // Sample the velocity field if (mParent.mField.transform() == mParent.mTracker.grid().transform()) { mTask = std::bind(&Advect::sampleAligned, ph::_1, ph::_2, time0, time1); } else { mTask = std::bind(&Advect::sampleXformed, ph::_1, ph::_2, time0, time1); } assert(voxelCount == mParent.mTracker.grid().activeVoxelCount()); mVelocity = new VectorType[ voxelCount ]; this->cook("Sampling advection field"); // Find the extrema of the magnitude of the velocities ValueType maxAbsV = 0; VectorType* v = mVelocity; for (size_t i = 0; i < voxelCount; ++i, ++v) { maxAbsV = math::Max(maxAbsV, ValueType(v->lengthSqr())); } // Compute the CFL number if (math::isApproxZero(maxAbsV, math::Delta<ValueType>::value())) return ValueType(0); static const ValueType CFL = (TemporalScheme == math::TVD_RK1 ? ValueType(0.3) : TemporalScheme == math::TVD_RK2 ? ValueType(0.9) : ValueType(1.0))/math::Sqrt(ValueType(3.0)); const ValueType dt = math::Abs(time1 - time0), dx = mParent.mTracker.voxelSize(); return math::Min(dt, ValueType(CFL*dx/math::Sqrt(maxAbsV))); } template<typename GridT, typename FieldT, typename InterruptT> template< typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> template<bool Aligned> inline void LevelSetAdvection<GridT, FieldT, InterruptT>:: Advect<MapT, SpatialScheme, TemporalScheme>:: sample(const LeafRange& range, ValueType time0, ValueType time1) { const bool isForward = time0 < time1; using VoxelIterT = typename LeafType::ValueOnCIter; const MapT& map = *mMap; const FieldT field( mParent.mField ); mParent.mTracker.checkInterrupter(); for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) { VectorType* vel = mVelocity + mOffsets[ leafIter.pos() ]; for (VoxelIterT iter = leafIter->cbeginValueOn(); iter; ++iter, ++vel) { const VectorType v = Aligned ? field(iter.getCoord(), time0) ://resolved at compile time field(map.applyMap(iter.getCoord().asVec3d()), time0); *vel = isForward ? v : -v; } } } template<typename GridT, typename FieldT, typename InterruptT> template< typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline void LevelSetAdvection<GridT, FieldT, InterruptT>:: Advect<MapT, SpatialScheme, TemporalScheme>:: clearField() { delete [] mOffsets; delete [] mVelocity; mOffsets = nullptr; mVelocity = nullptr; } template<typename GridT, typename FieldT, typename InterruptT> template< typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> inline void LevelSetAdvection<GridT, FieldT, InterruptT>:: Advect<MapT, SpatialScheme, TemporalScheme>:: cook(const char* msg, size_t swapBuffer) { mParent.mTracker.startInterrupter( msg ); const int grainSize = mParent.mTracker.getGrainSize(); const LeafRange range = mParent.mTracker.leafs().leafRange(grainSize); grainSize == 0 ? (*this)(range) : tbb::parallel_for(range, *this); mParent.mTracker.leafs().swapLeafBuffer(swapBuffer, grainSize == 0); mParent.mTracker.endInterrupter(); } // Convex combination of Phi and a forward Euler advection steps: // Phi(result) = alpha * Phi(phi) + (1-alpha) * (Phi(0) - dt * V.Grad(0)); template<typename GridT, typename FieldT, typename InterruptT> template< typename MapT, math::BiasedGradientScheme SpatialScheme, math::TemporalIntegrationScheme TemporalScheme> template <int Nominator, int Denominator> inline void LevelSetAdvection<GridT, FieldT, InterruptT>:: Advect<MapT, SpatialScheme, TemporalScheme>:: euler(const LeafRange& range, ValueType dt, Index phiBuffer, Index resultBuffer) { using SchemeT = math::BIAS_SCHEME<SpatialScheme>; using StencilT = typename SchemeT::template ISStencil<GridType>::StencilType; using VoxelIterT = typename LeafType::ValueOnCIter; using GradT = math::GradientBiased<MapT, SpatialScheme>; static const ValueType Alpha = ValueType(Nominator)/ValueType(Denominator); static const ValueType Beta = ValueType(1) - Alpha; mParent.mTracker.checkInterrupter(); const MapT& map = *mMap; StencilT stencil(mParent.mTracker.grid()); for (typename LeafRange::Iterator leafIter = range.begin(); leafIter; ++leafIter) { const VectorType* vel = mVelocity + mOffsets[ leafIter.pos() ]; const ValueType* phi = leafIter.buffer(phiBuffer).data(); ValueType* result = leafIter.buffer(resultBuffer).data(); for (VoxelIterT voxelIter = leafIter->cbeginValueOn(); voxelIter; ++voxelIter, ++vel) { const Index i = voxelIter.pos(); stencil.moveTo(voxelIter); const ValueType a = stencil.getValue() - dt * vel->dot(GradT::result(map, stencil, *vel)); result[i] = Nominator ? Alpha * phi[i] + Beta * a : a; }//loop over active voxels in the leaf of the mask }//loop over leafs of the level set } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVEL_SET_ADVECT_HAS_BEEN_INCLUDED
22,887
C
38.736111
100
0.673439
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PotentialFlow.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tools/PotentialFlow.h /// /// @brief Tools for creating potential flow fields through solving Laplace's equation /// /// @authors Todd Keeler, Dan Bailey #ifndef OPENVDB_TOOLS_POTENTIAL_FLOW_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_POTENTIAL_FLOW_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include "GridOperators.h" #include "GridTransformer.h" #include "Mask.h" // interiorMask #include "Morphology.h" // dilateVoxels, erodeVoxels #include "PoissonSolver.h" namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Metafunction to convert a vector-valued grid type to a scalar grid type template<typename VecGridT> struct VectorToScalarGrid { using Type = typename VecGridT::template ValueConverter<typename VecGridT::ValueType::value_type>::Type; using Ptr = typename Type::Ptr; using ConstPtr = typename Type::ConstPtr; }; /// @brief Construct a mask for the Potential Flow domain. /// @details For a level set, this represents a rebuilt exterior narrow band. /// For any other grid it is a new region that surrounds the active voxels. /// @param grid source grid to use for computing the mask /// @param dilation dilation in voxels of the source grid to form the new potential flow mask template<typename GridT, typename MaskT = typename GridT::template ValueConverter<ValueMask>::Type> inline typename MaskT::Ptr createPotentialFlowMask(const GridT& grid, int dilation = 5); /// @brief Create a Potential Flow velocities grid for the Neumann boundary. /// @param collider a level set that represents the boundary /// @param domain a mask to represent the potential flow domain /// @param boundaryVelocity an optional grid pointer to stores the velocities of the boundary /// @param backgroundVelocity a background velocity value /// @details Typically this method involves supplying a velocity grid for the /// collider boundary, however it can also be used for a global wind field /// around the collider by supplying an empty boundary Velocity and a /// non-zero background velocity. template<typename Vec3T, typename GridT, typename MaskT> inline typename GridT::template ValueConverter<Vec3T>::Type::Ptr createPotentialFlowNeumannVelocities(const GridT& collider, const MaskT& domain, const typename GridT::template ValueConverter<Vec3T>::Type::ConstPtr boundaryVelocity, const Vec3T& backgroundVelocity); /// @brief Compute the Potential on the domain using the Neumann boundary conditions on /// solid boundaries /// @param domain a mask to represent the domain in which to perform the solve /// @param neumann the topology of this grid defines where the solid boundaries are and grid /// values give the Neumann boundaries that should be applied there /// @param state the solver parameters for computing the solution /// @param interrupter pointer to an optional interrupter adhering to the /// util::NullInterrupter interface /// @details On input, the State object should specify convergence criteria /// (minimum error and maximum number of iterations); on output, it gives /// the actual termination conditions. template<typename Vec3GridT, typename MaskT, typename InterrupterT = util::NullInterrupter> inline typename VectorToScalarGrid<Vec3GridT>::Ptr computeScalarPotential(const MaskT& domain, const Vec3GridT& neumann, math::pcg::State& state, InterrupterT* interrupter = nullptr); /// @brief Compute a vector Flow Field comprising the gradient of the potential with Neumann /// boundary conditions applied /// @param potential scalar potential, typically computed from computeScalarPotential() /// @param neumann the topology of this grid defines where the solid boundaries are and grid /// values give the Neumann boundaries that should be applied there /// @param backgroundVelocity a background velocity value template<typename Vec3GridT> inline typename Vec3GridT::Ptr computePotentialFlow(const typename VectorToScalarGrid<Vec3GridT>::Type& potential, const Vec3GridT& neumann, const typename Vec3GridT::ValueType backgroundVelocity = zeroVal<typename Vec3GridT::TreeType::ValueType>()); ////////////////////////////////////////////////////////// namespace potential_flow_internal { /// @private // helper function for retrieving a mask that comprises the outer-most layer of voxels template<typename GridT> inline typename GridT::TreeType::template ValueConverter<ValueMask>::Type::Ptr extractOuterVoxelMask(GridT& inGrid) { using MaskTreeT = typename GridT::TreeType::template ValueConverter<ValueMask>::Type; typename MaskTreeT::Ptr interiorMask(new MaskTreeT(inGrid.tree(), false, TopologyCopy())); typename MaskTreeT::Ptr boundaryMask(new MaskTreeT(inGrid.tree(), false, TopologyCopy())); erodeVoxels(*interiorMask, 1, NN_FACE); boundaryMask->topologyDifference(*interiorMask); return boundaryMask; } // computes Neumann velocities through sampling the gradient and velocities template<typename Vec3GridT, typename GradientT> struct ComputeNeumannVelocityOp { using ValueT = typename Vec3GridT::ValueType; using VelocityAccessor = typename Vec3GridT::ConstAccessor; using VelocitySamplerT = GridSampler< typename Vec3GridT::ConstAccessor, BoxSampler>; using GradientValueT = typename GradientT::TreeType::ValueType; ComputeNeumannVelocityOp( const GradientT& gradient, const Vec3GridT& velocity, const ValueT& backgroundVelocity) : mGradient(gradient) , mVelocity(&velocity) , mBackgroundVelocity(backgroundVelocity) { } ComputeNeumannVelocityOp( const GradientT& gradient, const ValueT& backgroundVelocity) : mGradient(gradient) , mBackgroundVelocity(backgroundVelocity) { } void operator()(typename Vec3GridT::TreeType::LeafNodeType& leaf, size_t) const { auto gradientAccessor = mGradient.getConstAccessor(); std::unique_ptr<VelocityAccessor> velocityAccessor; std::unique_ptr<VelocitySamplerT> velocitySampler; if (mVelocity) { velocityAccessor.reset(new VelocityAccessor(mVelocity->getConstAccessor())); velocitySampler.reset(new VelocitySamplerT(*velocityAccessor, mVelocity->transform())); } for (auto it = leaf.beginValueOn(); it; ++it) { Coord ijk = it.getCoord(); auto gradient = gradientAccessor.getValue(ijk); if (gradient.normalize()) { const Vec3d xyz = mGradient.transform().indexToWorld(ijk); const ValueT sampledVelocity = velocitySampler ? velocitySampler->wsSample(xyz) : zeroVal<ValueT>(); auto velocity = sampledVelocity + mBackgroundVelocity; auto value = gradient.dot(velocity) * gradient; it.setValue(value); } else { it.setValueOff(); } } } private: const GradientT& mGradient; const Vec3GridT* mVelocity = nullptr; const ValueT& mBackgroundVelocity; }; // struct ComputeNeumannVelocityOp // initalizes the boundary conditions for use in the Poisson Solver template<typename Vec3GridT, typename MaskT> struct SolveBoundaryOp { SolveBoundaryOp(const Vec3GridT& velGrid, const MaskT& domainGrid) : mVoxelSize(domainGrid.voxelSize()[0]) , mVelGrid(velGrid) , mDomainGrid(domainGrid) { } void operator()(const Coord& ijk, const Coord& neighbor, double& source, double& diagonal) const { typename Vec3GridT::ConstAccessor velGridAccessor = mVelGrid.getAccessor(); const Coord diff = (ijk - neighbor); if (velGridAccessor.isValueOn(ijk)) { // Neumann const typename Vec3GridT::ValueType& sampleVel = velGridAccessor.getValue(ijk); source += mVoxelSize*diff[0]*sampleVel[0]; source += mVoxelSize*diff[1]*sampleVel[1]; source += mVoxelSize*diff[2]*sampleVel[2]; } else { diagonal -= 1; // Zero Dirichlet } } const double mVoxelSize; const Vec3GridT& mVelGrid; const MaskT& mDomainGrid; }; // struct SolveBoundaryOp } // namespace potential_flow_internal //////////////////////////////////////////////////////////////////////////// template<typename GridT, typename MaskT> inline typename MaskT::Ptr createPotentialFlowMask(const GridT& grid, int dilation) { using MaskTreeT = typename MaskT::TreeType; if (!grid.hasUniformVoxels()) { OPENVDB_THROW(ValueError, "Transform must have uniform voxels for Potential Flow mask."); } // construct a new mask grid representing the interior region auto interior = interiorMask(grid); // create a new mask grid from the interior topology typename MaskTreeT::Ptr maskTree(new MaskTreeT(interior->tree(), false, TopologyCopy())); typename MaskT::Ptr mask = MaskT::create(maskTree); mask->setTransform(grid.transform().copy()); dilateActiveValues(*maskTree, dilation, NN_FACE_EDGE); // subtract the interior region from the mask to leave just the exterior narrow band mask->tree().topologyDifference(interior->tree()); return mask; } template<typename Vec3T, typename GridT, typename MaskT> typename GridT::template ValueConverter<Vec3T>::Type::Ptr createPotentialFlowNeumannVelocities( const GridT& collider, const MaskT& domain, const typename GridT::template ValueConverter<Vec3T>::Type::ConstPtr boundaryVelocity, const Vec3T& backgroundVelocity) { using Vec3GridT = typename GridT::template ValueConverter<Vec3T>::Type; using TreeT = typename Vec3GridT::TreeType; using ValueT = typename TreeT::ValueType; using GradientT = typename ScalarToVectorConverter<GridT>::Type; using potential_flow_internal::ComputeNeumannVelocityOp; // this method requires the collider to be a level set to generate the gradient // use the tools::topologyToLevelset() method if you need to convert a mask into a level set if (collider.getGridClass() != GRID_LEVEL_SET || !std::is_floating_point<typename GridT::TreeType::ValueType>::value) { OPENVDB_THROW(TypeError, "Potential Flow expecting the collider to be a level set."); } // empty grid if there are no velocities if (backgroundVelocity == zeroVal<Vec3T>() && (!boundaryVelocity || boundaryVelocity->empty())) { auto neumann = Vec3GridT::create(); neumann->setTransform(collider.transform().copy()); return neumann; } // extract the intersection between the collider and the domain using MaskTreeT = typename GridT::TreeType::template ValueConverter<ValueMask>::Type; typename MaskTreeT::Ptr boundary(new MaskTreeT(domain.tree(), false, TopologyCopy())); boundary->topologyIntersection(collider.tree()); typename TreeT::Ptr neumannTree(new TreeT(*boundary, zeroVal<ValueT>(), TopologyCopy())); neumannTree->voxelizeActiveTiles(); // compute the gradient from the collider const typename GradientT::Ptr gradient = tools::gradient(collider); typename tree::LeafManager<TreeT> leafManager(*neumannTree); if (boundaryVelocity && !boundaryVelocity->empty()) { ComputeNeumannVelocityOp<Vec3GridT, GradientT> neumannOp(*gradient, *boundaryVelocity, backgroundVelocity); leafManager.foreach(neumannOp, false); } else { ComputeNeumannVelocityOp<Vec3GridT, GradientT> neumannOp(*gradient, backgroundVelocity); leafManager.foreach(neumannOp, false); } // prune any inactive values tools::pruneInactive(*neumannTree); typename Vec3GridT::Ptr neumann(Vec3GridT::create(neumannTree)); neumann->setTransform(collider.transform().copy()); return neumann; } template<typename Vec3GridT, typename MaskT, typename InterrupterT> inline typename VectorToScalarGrid<Vec3GridT>::Ptr computeScalarPotential(const MaskT& domain, const Vec3GridT& neumann, math::pcg::State& state, InterrupterT* interrupter) { using ScalarT = typename Vec3GridT::ValueType::value_type; using ScalarTreeT = typename Vec3GridT::TreeType::template ValueConverter<ScalarT>::Type; using ScalarGridT = typename Vec3GridT::template ValueConverter<ScalarT>::Type; using potential_flow_internal::SolveBoundaryOp; // create the solution tree and activate using domain topology ScalarTreeT solveTree(domain.tree(), zeroVal<ScalarT>(), TopologyCopy()); solveTree.voxelizeActiveTiles(); util::NullInterrupter nullInterrupt; if (!interrupter) interrupter = &nullInterrupt; // solve for scalar potential SolveBoundaryOp<Vec3GridT, MaskT> solve(neumann, domain); typename ScalarTreeT::Ptr potentialTree = poisson::solveWithBoundaryConditions(solveTree, solve, state, *interrupter, true); auto potential = ScalarGridT::create(potentialTree); potential->setTransform(domain.transform().copy()); return potential; } template<typename Vec3GridT> inline typename Vec3GridT::Ptr computePotentialFlow(const typename VectorToScalarGrid<Vec3GridT>::Type& potential, const Vec3GridT& neumann, const typename Vec3GridT::ValueType backgroundVelocity) { using Vec3T = const typename Vec3GridT::ValueType; using potential_flow_internal::extractOuterVoxelMask; // The VDB gradient op uses the background grid value, which is zero by default, when // computing the gradient at the boundary. This works at the zero-dirichlet boundaries, but // give spurious values at Neumann ones as the potential should be non-zero there. To avoid // the extra error, we just substitute the Neumann condition on the boundaries. // Technically, we should allow for some tangential velocity, coming from the gradient of // potential. However, considering the voxelized nature of our solve, a decent approximation // to a tangential derivative isn't probably worth our time. Any tangential component will be // found in the next interior ring of voxels. auto gradient = tools::gradient(potential); // apply Neumann values to the gradient auto applyNeumann = [&gradient, &neumann] ( const MaskGrid::TreeType::LeafNodeType& leaf, size_t) { typename Vec3GridT::Accessor gradientAccessor = gradient->getAccessor(); typename Vec3GridT::ConstAccessor neumannAccessor = neumann.getAccessor(); for (auto it = leaf.beginValueOn(); it; ++it) { const Coord ijk = it.getCoord(); typename Vec3GridT::ValueType value; if (neumannAccessor.probeValue(ijk, value)) { gradientAccessor.setValue(ijk, value); } } }; const MaskGrid::TreeType::Ptr boundary = extractOuterVoxelMask(*gradient); typename tree::LeafManager<const typename MaskGrid::TreeType> leafManager(*boundary); leafManager.foreach(applyNeumann); // apply the background value to the gradient if supplied if (backgroundVelocity != zeroVal<Vec3T>()) { auto applyBackgroundVelocity = [&backgroundVelocity] ( typename Vec3GridT::TreeType::LeafNodeType& leaf, size_t) { for (auto it = leaf.beginValueOn(); it; ++it) { it.setValue(it.getValue() - backgroundVelocity); } }; typename tree::LeafManager<typename Vec3GridT::TreeType> leafManager2(gradient->tree()); leafManager2.foreach(applyBackgroundVelocity); } return gradient; } //////////////////////////////////////// } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_POTENTIAL_FLOW_HAS_BEEN_INCLUDED
16,024
C
39.467172
99
0.704631
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/RayTracer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file RayTracer.h /// /// @author Ken Museth /// /// @brief Defines two simple but multithreaded renders, a level-set /// ray tracer and a volume render. To support these renders we also define /// perspective and orthographic cameras (both designed to mimic a Houdini camera), /// a Film class and some rather naive shaders. /// /// @note These classes are included mainly as reference implementations for /// ray-tracing of OpenVDB volumes. In other words they are not intended for /// production-quality rendering, but could be used for fast pre-visualization /// or as a starting point for a more serious render. #ifndef OPENVDB_TOOLS_RAYTRACER_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_RAYTRACER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/math/BBox.h> #include <openvdb/math/Ray.h> #include <openvdb/math/Math.h> #include <openvdb/tools/RayIntersector.h> #include <openvdb/tools/Interpolation.h> #include <deque> #include <iostream> #include <fstream> #include <limits> #include <memory> #include <string> #include <type_traits> #include <vector> #ifdef OPENVDB_TOOLS_RAYTRACER_USE_EXR #include <OpenEXR/ImfPixelType.h> #include <OpenEXR/ImfChannelList.h> #include <OpenEXR/ImfOutputFile.h> #include <OpenEXR/ImfHeader.h> #include <OpenEXR/ImfFrameBuffer.h> #endif namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { // Forward declarations class BaseCamera; class BaseShader; /// @brief Ray-trace a volume. template<typename GridT> inline void rayTrace(const GridT&, const BaseShader&, BaseCamera&, size_t pixelSamples = 1, unsigned int seed = 0, bool threaded = true); /// @brief Ray-trace a volume using a given ray intersector. template<typename GridT, typename IntersectorT> inline void rayTrace(const GridT&, const IntersectorT&, const BaseShader&, BaseCamera&, size_t pixelSamples = 1, unsigned int seed = 0, bool threaded = true); ///////////////////////////////LEVEL SET RAY TRACER /////////////////////////////////////// /// @brief A (very) simple multithreaded ray tracer specifically for narrow-band level sets. /// @details Included primarily as a reference implementation. template<typename GridT, typename IntersectorT = tools::LevelSetRayIntersector<GridT> > class LevelSetRayTracer { public: using GridType = GridT; using Vec3Type = typename IntersectorT::Vec3Type; using RayType = typename IntersectorT::RayType; /// @brief Constructor based on an instance of the grid to be rendered. LevelSetRayTracer(const GridT& grid, const BaseShader& shader, BaseCamera& camera, size_t pixelSamples = 1, unsigned int seed = 0); /// @brief Constructor based on an instance of the intersector /// performing the ray-intersections. LevelSetRayTracer(const IntersectorT& inter, const BaseShader& shader, BaseCamera& camera, size_t pixelSamples = 1, unsigned int seed = 0); /// @brief Copy constructor LevelSetRayTracer(const LevelSetRayTracer& other); /// @brief Destructor ~LevelSetRayTracer(); /// @brief Set the level set grid to be ray-traced void setGrid(const GridT& grid); /// @brief Set the intersector that performs the actual /// intersection of the rays against the narrow-band level set. void setIntersector(const IntersectorT& inter); /// @brief Set the shader derived from the abstract BaseShader class. /// /// @note The shader is not assumed to be thread-safe so each /// thread will get its only deep copy. For instance it could /// contains a ValueAccessor into another grid with auxiliary /// shading information. Thus, make sure it is relatively /// light-weight and efficient to copy (which is the case for ValueAccesors). void setShader(const BaseShader& shader); /// @brief Set the camera derived from the abstract BaseCamera class. void setCamera(BaseCamera& camera); /// @brief Set the number of pixel samples and the seed for /// jittered sub-rays. A value larger than one implies /// anti-aliasing by jittered super-sampling. /// @throw ValueError if pixelSamples is equal to zero. void setPixelSamples(size_t pixelSamples, unsigned int seed = 0); /// @brief Perform the actual (potentially multithreaded) ray-tracing. void render(bool threaded = true) const; /// @brief Public method required by tbb::parallel_for. /// @warning Never call it directly. void operator()(const tbb::blocked_range<size_t>& range) const; private: const bool mIsMaster; double* mRand; IntersectorT mInter; std::unique_ptr<const BaseShader> mShader; BaseCamera* mCamera; size_t mSubPixels; };// LevelSetRayTracer ///////////////////////////////VOLUME RENDER /////////////////////////////////////// /// @brief A (very) simple multithreaded volume render specifically for scalar density. /// @details Included primarily as a reference implementation. /// @note It will only compile if the IntersectorT is templated on a Grid with a /// floating-point voxel type. template <typename IntersectorT, typename SamplerT = tools::BoxSampler> class VolumeRender { public: using GridType = typename IntersectorT::GridType; using RayType = typename IntersectorT::RayType; using ValueType = typename GridType::ValueType; using AccessorType = typename GridType::ConstAccessor; using SamplerType = tools::GridSampler<AccessorType, SamplerT>; static_assert(std::is_floating_point<ValueType>::value, "VolumeRender requires a floating-point-valued grid"); /// @brief Constructor taking an intersector and a base camera. VolumeRender(const IntersectorT& inter, BaseCamera& camera); /// @brief Copy constructor which creates a thread-safe clone VolumeRender(const VolumeRender& other); /// @brief Perform the actual (potentially multithreaded) volume rendering. void render(bool threaded=true) const; /// @brief Set the camera derived from the abstract BaseCamera class. void setCamera(BaseCamera& camera) { mCamera = &camera; } /// @brief Set the intersector that performs the actual /// intersection of the rays against the volume. void setIntersector(const IntersectorT& inter); /// @brief Set the vector components of a directional light source /// @throw ArithmeticError if input is a null vector. void setLightDir(Real x, Real y, Real z) { mLightDir = Vec3R(x,y,z).unit(); } /// @brief Set the color of the directional light source. void setLightColor(Real r, Real g, Real b) { mLightColor = Vec3R(r,g,b); } /// @brief Set the integration step-size in voxel units for the primay ray. void setPrimaryStep(Real primaryStep) { mPrimaryStep = primaryStep; } /// @brief Set the integration step-size in voxel units for the primay ray. void setShadowStep(Real shadowStep) { mShadowStep = shadowStep; } /// @brief Set Scattering coefficients. void setScattering(Real x, Real y, Real z) { mScattering = Vec3R(x,y,z); } /// @brief Set absorption coefficients. void setAbsorption(Real x, Real y, Real z) { mAbsorption = Vec3R(x,y,z); } /// @brief Set parameter that imitates multi-scattering. A value /// of zero implies no multi-scattering. void setLightGain(Real gain) { mLightGain = gain; } /// @brief Set the cut-off value for density and transmittance. void setCutOff(Real cutOff) { mCutOff = cutOff; } /// @brief Print parameters, statistics, memory usage and other information. /// @param os a stream to which to write textual information /// @param verboseLevel 1: print parameters only; 2: include grid /// statistics; 3: include memory usage void print(std::ostream& os = std::cout, int verboseLevel = 1); /// @brief Public method required by tbb::parallel_for. /// @warning Never call it directly. void operator()(const tbb::blocked_range<size_t>& range) const; private: AccessorType mAccessor; BaseCamera* mCamera; std::unique_ptr<IntersectorT> mPrimary, mShadow; Real mPrimaryStep, mShadowStep, mCutOff, mLightGain; Vec3R mLightDir, mLightColor, mAbsorption, mScattering; };//VolumeRender //////////////////////////////////////// FILM //////////////////////////////////////// /// @brief A simple class that allows for concurrent writes to pixels in an image, /// background initialization of the image, and PPM or EXR file output. class Film { public: /// @brief Floating-point RGBA components in the range [0, 1]. /// @details This is our preferred representation for color processing. struct RGBA { using ValueT = float; RGBA() : r(0), g(0), b(0), a(1) {} explicit RGBA(ValueT intensity) : r(intensity), g(intensity), b(intensity), a(1) {} RGBA(ValueT _r, ValueT _g, ValueT _b, ValueT _a = static_cast<ValueT>(1.0)): r(_r), g(_g), b(_b), a(_a) {} RGBA(double _r, double _g, double _b, double _a = 1.0) : r(static_cast<ValueT>(_r)) , g(static_cast<ValueT>(_g)) , b(static_cast<ValueT>(_b)) , a(static_cast<ValueT>(_a)) {} RGBA operator* (ValueT scale) const { return RGBA(r*scale, g*scale, b*scale);} RGBA operator+ (const RGBA& rhs) const { return RGBA(r+rhs.r, g+rhs.g, b+rhs.b);} RGBA operator* (const RGBA& rhs) const { return RGBA(r*rhs.r, g*rhs.g, b*rhs.b);} RGBA& operator+=(const RGBA& rhs) { r+=rhs.r; g+=rhs.g; b+=rhs.b; a+=rhs.a; return *this;} void over(const RGBA& rhs) { const float s = rhs.a*(1.0f-a); r = a*r+s*rhs.r; g = a*g+s*rhs.g; b = a*b+s*rhs.b; a = a + s; } ValueT r, g, b, a; }; Film(size_t width, size_t height) : mWidth(width), mHeight(height), mSize(width*height), mPixels(new RGBA[mSize]) { } Film(size_t width, size_t height, const RGBA& bg) : mWidth(width), mHeight(height), mSize(width*height), mPixels(new RGBA[mSize]) { this->fill(bg); } const RGBA& pixel(size_t w, size_t h) const { assert(w < mWidth); assert(h < mHeight); return mPixels[w + h*mWidth]; } RGBA& pixel(size_t w, size_t h) { assert(w < mWidth); assert(h < mHeight); return mPixels[w + h*mWidth]; } void fill(const RGBA& rgb=RGBA(0)) { for (size_t i=0; i<mSize; ++i) mPixels[i] = rgb; } void checkerboard(const RGBA& c1=RGBA(0.3f), const RGBA& c2=RGBA(0.6f), size_t size=32) { RGBA *p = mPixels.get(); for (size_t j = 0; j < mHeight; ++j) { for (size_t i = 0; i < mWidth; ++i, ++p) { *p = ((i & size) ^ (j & size)) ? c1 : c2; } } } void savePPM(const std::string& fileName) { std::string name(fileName); if (name.find_last_of(".") == std::string::npos) name.append(".ppm"); std::unique_ptr<unsigned char[]> buffer(new unsigned char[3*mSize]); unsigned char *tmp = buffer.get(), *q = tmp; RGBA* p = mPixels.get(); size_t n = mSize; while (n--) { *q++ = static_cast<unsigned char>(255.0f*(*p ).r); *q++ = static_cast<unsigned char>(255.0f*(*p ).g); *q++ = static_cast<unsigned char>(255.0f*(*p++).b); } std::ofstream os(name.c_str(), std::ios_base::binary); if (!os.is_open()) { std::cerr << "Error opening PPM file \"" << name << "\"" << std::endl; return; } os << "P6\n" << mWidth << " " << mHeight << "\n255\n"; os.write(reinterpret_cast<const char*>(&(*tmp)), 3 * mSize * sizeof(unsigned char)); } #ifdef OPENVDB_TOOLS_RAYTRACER_USE_EXR void saveEXR(const std::string& fileName, size_t compression = 2, size_t threads = 8) { std::string name(fileName); if (name.find_last_of(".") == std::string::npos) name.append(".exr"); if (threads>0) Imf::setGlobalThreadCount(threads); Imf::Header header(mWidth, mHeight); if (compression==0) header.compression() = Imf::NO_COMPRESSION; if (compression==1) header.compression() = Imf::RLE_COMPRESSION; if (compression>=2) header.compression() = Imf::ZIP_COMPRESSION; header.channels().insert("R", Imf::Channel(Imf::FLOAT)); header.channels().insert("G", Imf::Channel(Imf::FLOAT)); header.channels().insert("B", Imf::Channel(Imf::FLOAT)); header.channels().insert("A", Imf::Channel(Imf::FLOAT)); Imf::FrameBuffer framebuffer; framebuffer.insert("R", Imf::Slice( Imf::FLOAT, (char *) &(mPixels[0].r), sizeof (RGBA), sizeof (RGBA) * mWidth)); framebuffer.insert("G", Imf::Slice( Imf::FLOAT, (char *) &(mPixels[0].g), sizeof (RGBA), sizeof (RGBA) * mWidth)); framebuffer.insert("B", Imf::Slice( Imf::FLOAT, (char *) &(mPixels[0].b), sizeof (RGBA), sizeof (RGBA) * mWidth)); framebuffer.insert("A", Imf::Slice( Imf::FLOAT, (char *) &(mPixels[0].a), sizeof (RGBA), sizeof (RGBA) * mWidth)); Imf::OutputFile file(name.c_str(), header); file.setFrameBuffer(framebuffer); file.writePixels(mHeight); } #endif size_t width() const { return mWidth; } size_t height() const { return mHeight; } size_t numPixels() const { return mSize; } const RGBA* pixels() const { return mPixels.get(); } private: size_t mWidth, mHeight, mSize; std::unique_ptr<RGBA[]> mPixels; };// Film //////////////////////////////////////// CAMERAS //////////////////////////////////////// /// Abstract base class for the perspective and orthographic cameras class BaseCamera { public: BaseCamera(Film& film, const Vec3R& rotation, const Vec3R& translation, double frameWidth, double nearPlane, double farPlane) : mFilm(&film) , mScaleWidth(frameWidth) , mScaleHeight(frameWidth * double(film.height()) / double(film.width())) { assert(nearPlane > 0 && farPlane > nearPlane); mScreenToWorld.accumPostRotation(math::X_AXIS, rotation[0] * M_PI / 180.0); mScreenToWorld.accumPostRotation(math::Y_AXIS, rotation[1] * M_PI / 180.0); mScreenToWorld.accumPostRotation(math::Z_AXIS, rotation[2] * M_PI / 180.0); mScreenToWorld.accumPostTranslation(translation); this->initRay(nearPlane, farPlane); } virtual ~BaseCamera() {} Film::RGBA& pixel(size_t i, size_t j) { return mFilm->pixel(i, j); } size_t width() const { return mFilm->width(); } size_t height() const { return mFilm->height(); } /// Rotate the camera so its negative z-axis points at xyz and its /// y axis is in the plane of the xyz and up vectors. In other /// words the camera will look at xyz and use up as the /// horizontal direction. void lookAt(const Vec3R& xyz, const Vec3R& up = Vec3R(0.0, 1.0, 0.0)) { const Vec3R orig = mScreenToWorld.applyMap(Vec3R(0.0)); const Vec3R dir = orig - xyz; try { Mat4d xform = math::aim<Mat4d>(dir, up); xform.postTranslate(orig); mScreenToWorld = math::AffineMap(xform); this->initRay(mRay.t0(), mRay.t1()); } catch (...) {} } Vec3R rasterToScreen(double i, double j, double z) const { return Vec3R( (2 * i / double(mFilm->width()) - 1) * mScaleWidth, (1 - 2 * j / double(mFilm->height())) * mScaleHeight, z ); } /// @brief Return a Ray in world space given the pixel indices and /// optional offsets in the range [0, 1]. An offset of 0.5 corresponds /// to the center of the pixel. virtual math::Ray<double> getRay( size_t i, size_t j, double iOffset = 0.5, double jOffset = 0.5) const = 0; protected: void initRay(double t0, double t1) { mRay.setTimes(t0, t1); mRay.setEye(mScreenToWorld.applyMap(Vec3R(0.0))); mRay.setDir(mScreenToWorld.applyJacobian(Vec3R(0.0, 0.0, -1.0))); } Film* mFilm; double mScaleWidth, mScaleHeight; math::Ray<double> mRay; math::AffineMap mScreenToWorld; };// BaseCamera class PerspectiveCamera: public BaseCamera { public: /// @brief Constructor /// @param film film (i.e. image) defining the pixel resolution /// @param rotation rotation in degrees of the camera in world space /// (applied in x, y, z order) /// @param translation translation of the camera in world-space units, /// applied after rotation /// @param focalLength focal length of the camera in mm /// (the default of 50mm corresponds to Houdini's default camera) /// @param aperture width in mm of the frame, i.e., the visible field /// (the default 41.2136 mm corresponds to Houdini's default camera) /// @param nearPlane depth of the near clipping plane in world-space units /// @param farPlane depth of the far clipping plane in world-space units /// /// @details If no rotation or translation is provided, the camera is placed /// at (0,0,0) in world space and points in the direction of the negative z axis. PerspectiveCamera(Film& film, const Vec3R& rotation = Vec3R(0.0), const Vec3R& translation = Vec3R(0.0), double focalLength = 50.0, double aperture = 41.2136, double nearPlane = 1e-3, double farPlane = std::numeric_limits<double>::max()) : BaseCamera(film, rotation, translation, 0.5*aperture/focalLength, nearPlane, farPlane) { } ~PerspectiveCamera() override = default; /// @brief Return a Ray in world space given the pixel indices and /// optional offsets in the range [0,1]. An offset of 0.5 corresponds /// to the center of the pixel. math::Ray<double> getRay( size_t i, size_t j, double iOffset = 0.5, double jOffset = 0.5) const override { math::Ray<double> ray(mRay); Vec3R dir = BaseCamera::rasterToScreen(Real(i) + iOffset, Real(j) + jOffset, -1.0); dir = BaseCamera::mScreenToWorld.applyJacobian(dir); dir.normalize(); ray.scaleTimes(1.0/dir.dot(ray.dir())); ray.setDir(dir); return ray; } /// @brief Return the horizontal field of view in degrees given a /// focal lenth in mm and the specified aperture in mm. static double focalLengthToFieldOfView(double length, double aperture) { return 360.0 / M_PI * atan(aperture/(2.0*length)); } /// @brief Return the focal length in mm given a horizontal field of /// view in degrees and the specified aperture in mm. static double fieldOfViewToFocalLength(double fov, double aperture) { return aperture/(2.0*(tan(fov * M_PI / 360.0))); } };// PerspectiveCamera class OrthographicCamera: public BaseCamera { public: /// @brief Constructor /// @param film film (i.e. image) defining the pixel resolution /// @param rotation rotation in degrees of the camera in world space /// (applied in x, y, z order) /// @param translation translation of the camera in world-space units, /// applied after rotation /// @param frameWidth width in of the frame in world-space units /// @param nearPlane depth of the near clipping plane in world-space units /// @param farPlane depth of the far clipping plane in world-space units /// /// @details If no rotation or translation is provided, the camera is placed /// at (0,0,0) in world space and points in the direction of the negative z axis. OrthographicCamera(Film& film, const Vec3R& rotation = Vec3R(0.0), const Vec3R& translation = Vec3R(0.0), double frameWidth = 1.0, double nearPlane = 1e-3, double farPlane = std::numeric_limits<double>::max()) : BaseCamera(film, rotation, translation, 0.5*frameWidth, nearPlane, farPlane) { } ~OrthographicCamera() override = default; math::Ray<double> getRay( size_t i, size_t j, double iOffset = 0.5, double jOffset = 0.5) const override { math::Ray<double> ray(mRay); Vec3R eye = BaseCamera::rasterToScreen(Real(i) + iOffset, Real(j) + jOffset, 0.0); ray.setEye(BaseCamera::mScreenToWorld.applyMap(eye)); return ray; } };// OrthographicCamera //////////////////////////////////////// SHADERS //////////////////////////////////////// /// Abstract base class for the shaders class BaseShader { public: using RayT = math::Ray<Real>; BaseShader() {} BaseShader(const BaseShader&) = default; virtual ~BaseShader() = default; /// @brief Defines the interface of the virtual function that returns a RGB color. /// @param xyz World position of the intersection point. /// @param nml Normal in world space at the intersection point. /// @param dir Direction of the ray in world space. virtual Film::RGBA operator()(const Vec3R& xyz, const Vec3R& nml, const Vec3R& dir) const = 0; virtual BaseShader* copy() const = 0; }; /// @brief Shader that produces a simple matte. /// /// @details The color can either be constant (if GridT = /// Film::RGBA which is the default) or defined in a separate Vec3 /// color grid. Use SamplerType to define the order of interpolation /// (default is zero order, i.e. closes-point). template<typename GridT = Film::RGBA, typename SamplerType = tools::PointSampler> class MatteShader: public BaseShader { public: MatteShader(const GridT& grid) : mAcc(grid.getAccessor()), mXform(&grid.transform()) {} MatteShader(const MatteShader&) = default; ~MatteShader() override = default; Film::RGBA operator()(const Vec3R& xyz, const Vec3R&, const Vec3R&) const override { typename GridT::ValueType v = zeroVal<typename GridT::ValueType>(); SamplerType::sample(mAcc, mXform->worldToIndex(xyz), v); return Film::RGBA(v[0], v[1], v[2]); } BaseShader* copy() const override { return new MatteShader<GridT, SamplerType>(*this); } private: typename GridT::ConstAccessor mAcc; const math::Transform* mXform; }; // Template specialization using a constant color of the material. template<typename SamplerType> class MatteShader<Film::RGBA, SamplerType>: public BaseShader { public: MatteShader(const Film::RGBA& c = Film::RGBA(1.0f)): mRGBA(c) {} MatteShader(const MatteShader&) = default; ~MatteShader() override = default; Film::RGBA operator()(const Vec3R&, const Vec3R&, const Vec3R&) const override { return mRGBA; } BaseShader* copy() const override { return new MatteShader<Film::RGBA, SamplerType>(*this); } private: const Film::RGBA mRGBA; }; /// @brief Color shader that treats the surface normal (x, y, z) as an /// RGB color. /// /// @details The color can either be constant (if GridT = /// Film::RGBA which is the default) or defined in a separate Vec3 /// color grid. Use SamplerType to define the order of interpolation /// (default is zero order, i.e. closes-point). template<typename GridT = Film::RGBA, typename SamplerType = tools::PointSampler> class NormalShader: public BaseShader { public: NormalShader(const GridT& grid) : mAcc(grid.getAccessor()), mXform(&grid.transform()) {} NormalShader(const NormalShader&) = default; ~NormalShader() override = default; Film::RGBA operator()(const Vec3R& xyz, const Vec3R& normal, const Vec3R&) const override { typename GridT::ValueType v = zeroVal<typename GridT::ValueType>(); SamplerType::sample(mAcc, mXform->worldToIndex(xyz), v); return Film::RGBA(v[0]*(normal[0]+1.0), v[1]*(normal[1]+1.0), v[2]*(normal[2]+1.0)); } BaseShader* copy() const override { return new NormalShader<GridT, SamplerType>(*this); } private: typename GridT::ConstAccessor mAcc; const math::Transform* mXform; }; // Template specialization using a constant color of the material. template<typename SamplerType> class NormalShader<Film::RGBA, SamplerType>: public BaseShader { public: NormalShader(const Film::RGBA& c = Film::RGBA(1.0f)) : mRGBA(c*0.5f) {} NormalShader(const NormalShader&) = default; ~NormalShader() override = default; Film::RGBA operator()(const Vec3R&, const Vec3R& normal, const Vec3R&) const override { return mRGBA * Film::RGBA(normal[0] + 1.0, normal[1] + 1.0, normal[2] + 1.0); } BaseShader* copy() const override { return new NormalShader<Film::RGBA, SamplerType>(*this); } private: const Film::RGBA mRGBA; }; /// @brief Color shader that treats position (x, y, z) as an RGB color in a /// cube defined from an axis-aligned bounding box in world space. /// /// @details The color can either be constant (if GridT = /// Film::RGBA which is the default) or defined in a separate Vec3 /// color grid. Use SamplerType to define the order of interpolation /// (default is zero order, i.e. closes-point). template<typename GridT = Film::RGBA, typename SamplerType = tools::PointSampler> class PositionShader: public BaseShader { public: PositionShader(const math::BBox<Vec3R>& bbox, const GridT& grid) : mMin(bbox.min()) , mInvDim(1.0/bbox.extents()) , mAcc(grid.getAccessor()) , mXform(&grid.transform()) { } PositionShader(const PositionShader&) = default; ~PositionShader() override = default; Film::RGBA operator()(const Vec3R& xyz, const Vec3R&, const Vec3R&) const override { typename GridT::ValueType v = zeroVal<typename GridT::ValueType>(); SamplerType::sample(mAcc, mXform->worldToIndex(xyz), v); const Vec3R rgb = (xyz - mMin) * mInvDim; return Film::RGBA(v[0],v[1],v[2]) * Film::RGBA(rgb[0], rgb[1], rgb[2]); } BaseShader* copy() const override { return new PositionShader<GridT, SamplerType>(*this); } private: const Vec3R mMin, mInvDim; typename GridT::ConstAccessor mAcc; const math::Transform* mXform; }; // Template specialization using a constant color of the material. template<typename SamplerType> class PositionShader<Film::RGBA, SamplerType>: public BaseShader { public: PositionShader(const math::BBox<Vec3R>& bbox, const Film::RGBA& c = Film::RGBA(1.0f)) : mMin(bbox.min()), mInvDim(1.0/bbox.extents()), mRGBA(c) {} PositionShader(const PositionShader&) = default; ~PositionShader() override = default; Film::RGBA operator()(const Vec3R& xyz, const Vec3R&, const Vec3R&) const override { const Vec3R rgb = (xyz - mMin)*mInvDim; return mRGBA*Film::RGBA(rgb[0], rgb[1], rgb[2]); } BaseShader* copy() const override { return new PositionShader<Film::RGBA, SamplerType>(*this); } private: const Vec3R mMin, mInvDim; const Film::RGBA mRGBA; }; /// @brief Simple diffuse Lambertian surface shader. /// /// @details The diffuse color can either be constant (if GridT = /// Film::RGBA which is the default) or defined in a separate Vec3 /// color grid. Lambertian implies that the (radiant) intensity is /// directly proportional to the cosine of the angle between the /// surface normal and the direction of the light source. Use /// SamplerType to define the order of interpolation (default is /// zero order, i.e. closes-point). template<typename GridT = Film::RGBA, typename SamplerType = tools::PointSampler> class DiffuseShader: public BaseShader { public: DiffuseShader(const GridT& grid): mAcc(grid.getAccessor()), mXform(&grid.transform()) {} DiffuseShader(const DiffuseShader&) = default; ~DiffuseShader() override = default; Film::RGBA operator()(const Vec3R& xyz, const Vec3R& normal, const Vec3R& rayDir) const override { typename GridT::ValueType v = zeroVal<typename GridT::ValueType>(); SamplerType::sample(mAcc, mXform->worldToIndex(xyz), v); // We take the abs of the dot product corresponding to having // light sources at +/- rayDir, i.e., two-sided shading. return Film::RGBA(v[0],v[1],v[2]) * static_cast<Film::RGBA::ValueT>(math::Abs(normal.dot(rayDir))); } BaseShader* copy() const override { return new DiffuseShader<GridT, SamplerType>(*this); } private: typename GridT::ConstAccessor mAcc; const math::Transform* mXform; }; // Template specialization using a constant color of the material. template <typename SamplerType> class DiffuseShader<Film::RGBA, SamplerType>: public BaseShader { public: DiffuseShader(const Film::RGBA& d = Film::RGBA(1.0f)): mRGBA(d) {} DiffuseShader(const DiffuseShader&) = default; ~DiffuseShader() override = default; Film::RGBA operator()(const Vec3R&, const Vec3R& normal, const Vec3R& rayDir) const override { // We assume a single directional light source at the camera, // so the cosine of the angle between the surface normal and the // direction of the light source becomes the dot product of the // surface normal and inverse direction of the ray. We also ignore // negative dot products, corresponding to strict one-sided shading. //return mRGBA * math::Max(0.0, normal.dot(-rayDir)); // We take the abs of the dot product corresponding to having // light sources at +/- rayDir, i.e., two-sided shading. return mRGBA * static_cast<Film::RGBA::ValueT>(math::Abs(normal.dot(rayDir))); } BaseShader* copy() const override { return new DiffuseShader<Film::RGBA, SamplerType>(*this); } private: const Film::RGBA mRGBA; }; //////////////////////////////////////// RAYTRACER //////////////////////////////////////// template<typename GridT> inline void rayTrace(const GridT& grid, const BaseShader& shader, BaseCamera& camera, size_t pixelSamples, unsigned int seed, bool threaded) { LevelSetRayTracer<GridT, tools::LevelSetRayIntersector<GridT> > tracer(grid, shader, camera, pixelSamples, seed); tracer.render(threaded); } template<typename GridT, typename IntersectorT> inline void rayTrace(const GridT&, const IntersectorT& inter, const BaseShader& shader, BaseCamera& camera, size_t pixelSamples, unsigned int seed, bool threaded) { LevelSetRayTracer<GridT, IntersectorT> tracer(inter, shader, camera, pixelSamples, seed); tracer.render(threaded); } //////////////////////////////////////// LevelSetRayTracer //////////////////////////////////////// template<typename GridT, typename IntersectorT> inline LevelSetRayTracer<GridT, IntersectorT>:: LevelSetRayTracer(const GridT& grid, const BaseShader& shader, BaseCamera& camera, size_t pixelSamples, unsigned int seed) : mIsMaster(true), mRand(nullptr), mInter(grid), mShader(shader.copy()), mCamera(&camera) { this->setPixelSamples(pixelSamples, seed); } template<typename GridT, typename IntersectorT> inline LevelSetRayTracer<GridT, IntersectorT>:: LevelSetRayTracer(const IntersectorT& inter, const BaseShader& shader, BaseCamera& camera, size_t pixelSamples, unsigned int seed) : mIsMaster(true), mRand(nullptr), mInter(inter), mShader(shader.copy()), mCamera(&camera) { this->setPixelSamples(pixelSamples, seed); } template<typename GridT, typename IntersectorT> inline LevelSetRayTracer<GridT, IntersectorT>:: LevelSetRayTracer(const LevelSetRayTracer& other) : mIsMaster(false), mRand(other.mRand), mInter(other.mInter), mShader(other.mShader->copy()), mCamera(other.mCamera), mSubPixels(other.mSubPixels) { } template<typename GridT, typename IntersectorT> inline LevelSetRayTracer<GridT, IntersectorT>:: ~LevelSetRayTracer() { if (mIsMaster) delete [] mRand; } template<typename GridT, typename IntersectorT> inline void LevelSetRayTracer<GridT, IntersectorT>:: setGrid(const GridT& grid) { assert(mIsMaster); mInter = IntersectorT(grid); } template<typename GridT, typename IntersectorT> inline void LevelSetRayTracer<GridT, IntersectorT>:: setIntersector(const IntersectorT& inter) { assert(mIsMaster); mInter = inter; } template<typename GridT, typename IntersectorT> inline void LevelSetRayTracer<GridT, IntersectorT>:: setShader(const BaseShader& shader) { assert(mIsMaster); mShader.reset(shader.copy()); } template<typename GridT, typename IntersectorT> inline void LevelSetRayTracer<GridT, IntersectorT>:: setCamera(BaseCamera& camera) { assert(mIsMaster); mCamera = &camera; } template<typename GridT, typename IntersectorT> inline void LevelSetRayTracer<GridT, IntersectorT>:: setPixelSamples(size_t pixelSamples, unsigned int seed) { assert(mIsMaster); if (pixelSamples == 0) { OPENVDB_THROW(ValueError, "pixelSamples must be larger than zero!"); } mSubPixels = pixelSamples - 1; delete [] mRand; if (mSubPixels > 0) { mRand = new double[16]; math::Rand01<double> rand(seed);//offsets for anti-aliaing by jittered super-sampling for (size_t i=0; i<16; ++i) mRand[i] = rand(); } else { mRand = nullptr; } } template<typename GridT, typename IntersectorT> inline void LevelSetRayTracer<GridT, IntersectorT>:: render(bool threaded) const { tbb::blocked_range<size_t> range(0, mCamera->height()); threaded ? tbb::parallel_for(range, *this) : (*this)(range); } template<typename GridT, typename IntersectorT> inline void LevelSetRayTracer<GridT, IntersectorT>:: operator()(const tbb::blocked_range<size_t>& range) const { const BaseShader& shader = *mShader; Vec3Type xyz, nml; const float frac = 1.0f / (1.0f + float(mSubPixels)); for (size_t j=range.begin(), n=0, je = range.end(); j<je; ++j) { for (size_t i=0, ie = mCamera->width(); i<ie; ++i) { Film::RGBA& bg = mCamera->pixel(i,j); RayType ray = mCamera->getRay(i, j);//primary ray Film::RGBA c = mInter.intersectsWS(ray, xyz, nml) ? shader(xyz, nml, ray.dir()) : bg; for (size_t k=0; k<mSubPixels; ++k, n +=2 ) { ray = mCamera->getRay(i, j, mRand[n & 15], mRand[(n+1) & 15]); c += mInter.intersectsWS(ray, xyz, nml) ? shader(xyz, nml, ray.dir()) : bg; }//loop over sub-pixels bg = c*frac; }//loop over image height }//loop over image width } //////////////////////////////////////// VolumeRender //////////////////////////////////////// template<typename IntersectorT, typename SampleT> inline VolumeRender<IntersectorT, SampleT>:: VolumeRender(const IntersectorT& inter, BaseCamera& camera) : mAccessor(inter.grid().getConstAccessor()) , mCamera(&camera) , mPrimary(new IntersectorT(inter)) , mShadow(new IntersectorT(inter)) , mPrimaryStep(1.0) , mShadowStep(3.0) , mCutOff(0.005) , mLightGain(0.2) , mLightDir(Vec3R(0.3, 0.3, 0).unit()) , mLightColor(0.7, 0.7, 0.7) , mAbsorption(0.1) , mScattering(1.5) { } template<typename IntersectorT, typename SampleT> inline VolumeRender<IntersectorT, SampleT>:: VolumeRender(const VolumeRender& other) : mAccessor(other.mAccessor) , mCamera(other.mCamera) , mPrimary(new IntersectorT(*(other.mPrimary))) , mShadow(new IntersectorT(*(other.mShadow))) , mPrimaryStep(other.mPrimaryStep) , mShadowStep(other.mShadowStep) , mCutOff(other.mCutOff) , mLightGain(other.mLightGain) , mLightDir(other.mLightDir) , mLightColor(other.mLightColor) , mAbsorption(other.mAbsorption) , mScattering(other.mScattering) { } template<typename IntersectorT, typename SampleT> inline void VolumeRender<IntersectorT, SampleT>:: print(std::ostream& os, int verboseLevel) { if (verboseLevel>0) { os << "\nPrimary step: " << mPrimaryStep << "\nShadow step: " << mShadowStep << "\nCutoff: " << mCutOff << "\nLightGain: " << mLightGain << "\nLightDir: " << mLightDir << "\nLightColor: " << mLightColor << "\nAbsorption: " << mAbsorption << "\nScattering: " << mScattering << std::endl; } mPrimary->print(os, verboseLevel); } template<typename IntersectorT, typename SampleT> inline void VolumeRender<IntersectorT, SampleT>:: setIntersector(const IntersectorT& inter) { mPrimary.reset(new IntersectorT(inter)); mShadow.reset( new IntersectorT(inter)); } template<typename IntersectorT, typename SampleT> inline void VolumeRender<IntersectorT, SampleT>:: render(bool threaded) const { tbb::blocked_range<size_t> range(0, mCamera->height()); threaded ? tbb::parallel_for(range, *this) : (*this)(range); } template<typename IntersectorT, typename SampleT> inline void VolumeRender<IntersectorT, SampleT>:: operator()(const tbb::blocked_range<size_t>& range) const { SamplerType sampler(mAccessor, mShadow->grid().transform());//light-weight wrapper // Any variable prefixed with p (or s) means it's associated with a primary (or shadow) ray const Vec3R extinction = -mScattering-mAbsorption, One(1.0); const Vec3R albedo = mLightColor*mScattering/(mScattering+mAbsorption);//single scattering const Real sGain = mLightGain;//in-scattering along shadow ray const Real pStep = mPrimaryStep;//Integration step along primary ray in voxel units const Real sStep = mShadowStep;//Integration step along shadow ray in voxel units const Real cutoff = mCutOff;//Cutoff for density and transmittance // For the sake of completeness we show how to use two different // methods (hits/march) in VolumeRayIntersector that produce // segments along the ray that intersects active values. Comment out // the line below to use VolumeRayIntersector::march instead of // VolumeRayIntersector::hits. #define USE_HITS #ifdef USE_HITS std::vector<typename RayType::TimeSpan> pTS, sTS; //std::deque<typename RayType::TimeSpan> pTS, sTS; #endif RayType sRay(Vec3R(0), mLightDir);//Shadow ray for (size_t j=range.begin(), je = range.end(); j<je; ++j) { for (size_t i=0, ie = mCamera->width(); i<ie; ++i) { Film::RGBA& bg = mCamera->pixel(i, j); bg.a = bg.r = bg.g = bg.b = 0; RayType pRay = mCamera->getRay(i, j);// Primary ray if( !mPrimary->setWorldRay(pRay)) continue; Vec3R pTrans(1.0), pLumi(0.0); #ifndef USE_HITS Real pT0, pT1; while (mPrimary->march(pT0, pT1)) { for (Real pT = pStep*ceil(pT0/pStep); pT <= pT1; pT += pStep) { #else mPrimary->hits(pTS); for (size_t k=0; k<pTS.size(); ++k) { Real pT = pStep*ceil(pTS[k].t0/pStep), pT1=pTS[k].t1; for (; pT <= pT1; pT += pStep) { #endif Vec3R pPos = mPrimary->getWorldPos(pT); const Real density = sampler.wsSample(pPos); if (density < cutoff) continue; const Vec3R dT = math::Exp(extinction * density * pStep); Vec3R sTrans(1.0); sRay.setEye(pPos); if( !mShadow->setWorldRay(sRay)) continue; #ifndef USE_HITS Real sT0, sT1; while (mShadow->march(sT0, sT1)) { for (Real sT = sStep*ceil(sT0/sStep); sT <= sT1; sT+= sStep) { #else mShadow->hits(sTS); for (size_t l=0; l<sTS.size(); ++l) { Real sT = sStep*ceil(sTS[l].t0/sStep), sT1=sTS[l].t1; for (; sT <= sT1; sT+= sStep) { #endif const Real d = sampler.wsSample(mShadow->getWorldPos(sT)); if (d < cutoff) continue; sTrans *= math::Exp(extinction * d * sStep/(1.0+sT*sGain)); if (sTrans.lengthSqr()<cutoff) goto Luminance;//Terminate sRay }//Integration over shadow segment }// Shadow ray march Luminance: pLumi += albedo * sTrans * pTrans * (One-dT); pTrans *= dT; if (pTrans.lengthSqr()<cutoff) goto Pixel; // Terminate Ray }//Integration over primary segment }// Primary ray march Pixel: bg.r = static_cast<Film::RGBA::ValueT>(pLumi[0]); bg.g = static_cast<Film::RGBA::ValueT>(pLumi[1]); bg.b = static_cast<Film::RGBA::ValueT>(pLumi[2]); bg.a = static_cast<Film::RGBA::ValueT>(1.0f - pTrans.sum()/3.0f); }//Horizontal pixel scan }//Vertical pixel scan } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_RAYTRACER_HAS_BEEN_INCLUDED
42,232
C
37.56895
100
0.623129
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Merge.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file Merge.h /// /// @brief Functions to efficiently merge grids /// /// @author Dan Bailey #ifndef OPENVDB_TOOLS_MERGE_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_MERGE_HAS_BEEN_INCLUDED #include <openvdb/Platform.h> #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/Grid.h> #include <openvdb/tree/NodeManager.h> #include <unordered_map> #include <unordered_set> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Convenience class that contains a pointer to a tree to be stolen or /// deep copied depending on the tag dispatch class used and a subset of /// methods to retrieve data from the tree. /// /// @details The primary purpose of this class is to be able to create an array /// of TreeToMerge objects that each store a tree to be stolen or a tree to be /// deep-copied in an arbitrary order. Certain operations such as floating-point /// addition are non-associative so the order in which they are merged is /// important for the operation to remain deterministic regardless of how the /// data is being extracted from the tree. /// /// @note Stealing data requires a non-const tree pointer. There is a constructor /// to pass in a tree shared pointer for cases where it is desirable for this class /// to maintain shared ownership. template <typename TreeT> struct TreeToMerge { using TreeType = std::remove_const_t<TreeT>; using RootNodeType = typename TreeType::RootNodeType; using ValueType = typename TreeType::ValueType; using MaskTreeType = typename TreeT::template ValueConverter<ValueMask>::Type; TreeToMerge() = delete; /// @brief Non-const pointer tree constructor for stealing data. TreeToMerge(TreeType& tree, Steal) : mTree(&tree), mSteal(true) { } /// @brief Non-const shared pointer tree constructor for stealing data. TreeToMerge(typename TreeType::Ptr treePtr, Steal) : mTreePtr(treePtr), mTree(mTreePtr.get()), mSteal(true) { } /// @brief Const tree pointer constructor for deep-copying data. As the /// tree is not mutable and thus cannot be pruned, a lightweight mask tree /// with the same topology is created that can be pruned to use as a /// reference. Initialization of this mask tree can optionally be disabled /// for delayed construction. TreeToMerge(const TreeType& tree, DeepCopy, bool initialize = true) : mTree(&tree), mSteal(false) { if (mTree && initialize) this->initializeMask(); } /// @brief Non-const tree pointer constructor for deep-copying data. The /// tree is not intended to be modified so is not pruned, instead a /// lightweight mask tree with the same topology is created that can be /// pruned to use as a reference. Initialization of this mask tree can /// optionally be disabled for delayed construction. TreeToMerge(TreeType& tree, DeepCopy tag, bool initialize = true) : TreeToMerge(static_cast<const TreeType&>(tree), tag, initialize) { } /// @brief Reset the non-const tree shared pointer. This is primarily /// used to preserve the order of trees to merge in a container but have /// the data in the tree be lazily loaded or resampled. void reset(typename TreeType::Ptr treePtr, Steal); /// @brief Return a pointer to the tree to be stolen. TreeType* treeToSteal() { return mSteal ? const_cast<TreeType*>(mTree) : nullptr; } /// @brief Return a pointer to the tree to be deep-copied. const TreeType* treeToDeepCopy() { return mSteal ? nullptr : mTree; } /// @brief Retrieve a const pointer to the root node. const RootNodeType* rootPtr() const; /// @brief Return a pointer to the node of type @c NodeT that contains /// voxel (x, y, z). If no such node exists, return @c nullptr. template<typename NodeT> const NodeT* probeConstNode(const Coord& ijk) const; /// @brief Return a pointer to the node of type @c NodeT that contains voxel (x, y, z). /// If the tree is non-const, steal the node and replace it with an inactive /// background-value tile. /// If the tree is const, deep-copy the node and modify the mask tree to prune the node. template <typename NodeT> std::unique_ptr<NodeT> stealOrDeepCopyNode(const Coord& ijk); /// @brief Add a tile containing voxel (x, y, z) at the level of NodeT, /// deleting the existing branch if necessary. template <typename NodeT> void addTile(const Coord& ijk, const ValueType& value, bool active); // build a lightweight mask using a union of the const tree where leaf nodes // are converted into active tiles void initializeMask(); // returns true if mask has been initialized bool hasMask() const; // returns MaskTree pointer or nullptr MaskTreeType* mask() { return mMaskTree.ptr.get(); } const MaskTreeType* mask() const { return mMaskTree.ptr.get(); } private: struct MaskPtr; struct MaskUnionOp; typename TreeType::Ptr mTreePtr; const TreeType* mTree; MaskPtr mMaskTree; bool mSteal; }; // struct TreeToMerge /// @brief Wrapper around unique_ptr that deep-copies mask on copy construction template <typename TreeT> struct TreeToMerge<TreeT>::MaskPtr { std::unique_ptr<MaskTreeType> ptr; MaskPtr() = default; ~MaskPtr() = default; MaskPtr(MaskPtr&& other) = default; MaskPtr& operator=(MaskPtr&& other) = default; MaskPtr(const MaskPtr& other) : ptr(bool(other.ptr) ? std::make_unique<MaskTreeType>(*other.ptr) : nullptr) { } MaskPtr& operator=(const MaskPtr& other) { ptr.reset(bool(other.ptr) ? std::make_unique<MaskTreeType>(*other.ptr) : nullptr); return *this; } }; /// @brief DynamicNodeManager operator used to generate a mask of the input /// tree, but with dense leaf nodes replaced with active tiles for compactness template <typename TreeT> struct TreeToMerge<TreeT>::MaskUnionOp { using MaskT = MaskTreeType; using RootT = typename MaskT::RootNodeType; using LeafT = typename MaskT::LeafNodeType; explicit MaskUnionOp(const TreeT& tree) : mTree(tree) { } bool operator()(RootT& root, size_t) const; template<typename NodeT> bool operator()(NodeT& node, size_t) const; bool operator()(LeafT&, size_t) const { return false; } private: const TreeT& mTree; }; // struct TreeToMerge<TreeT>::MaskUnionOp //////////////////////////////////////// /// @brief DynamicNodeManager operator to merge trees using a CSG union or intersection. /// @note This class modifies the topology of the tree so is designed to be used /// from DynamicNodeManager::foreachTopDown(). /// @details A union and an intersection are opposite operations to each other so /// implemented in a combined class. Use the CsgUnionOp and CsgIntersectionOp aliases /// for convenience. template<typename TreeT, bool Union> struct CsgUnionOrIntersectionOp { using ValueT = typename TreeT::ValueType; using RootT = typename TreeT::RootNodeType; using LeafT = typename TreeT::LeafNodeType; /// @brief Convenience constructor to CSG union or intersect a single /// non-const tree with another. This constructor takes a Steal or DeepCopy /// tag dispatch class. template <typename TagT> CsgUnionOrIntersectionOp(TreeT& tree, TagT tag) { mTreesToMerge.emplace_back(tree, tag); } /// @brief Convenience constructor to CSG union or intersect a single /// const tree with another. This constructor requires a DeepCopy tag /// dispatch class. CsgUnionOrIntersectionOp(const TreeT& tree, DeepCopy tag) { mTreesToMerge.emplace_back(tree, tag); } /// @brief Constructor to CSG union or intersect a container of multiple /// const or non-const tree pointers. A Steal tag requires a container of /// non-const trees, a DeepCopy tag will accept either const or non-const /// trees. template <typename TreesT, typename TagT> CsgUnionOrIntersectionOp(TreesT& trees, TagT tag) { for (auto* tree : trees) { if (tree) { mTreesToMerge.emplace_back(*tree, tag); } } } /// @brief Constructor to accept a vector of TreeToMerge objects, primarily /// used when mixing const/non-const trees. /// @note Union/intersection order is preserved. explicit CsgUnionOrIntersectionOp(const std::vector<TreeToMerge<TreeT>>& trees) : mTreesToMerge(trees) { } /// @brief Constructor to accept a deque of TreeToMerge objects, primarily /// used when mixing const/non-const trees. /// @note Union/intersection order is preserved. explicit CsgUnionOrIntersectionOp(const std::deque<TreeToMerge<TreeT>>& trees) : mTreesToMerge(trees.cbegin(), trees.cend()) { } /// @brief Return true if no trees being merged bool empty() const { return mTreesToMerge.empty(); } /// @brief Return the number of trees being merged size_t size() const { return mTreesToMerge.size(); } // Processes the root node. Required by the NodeManager bool operator()(RootT& root, size_t idx) const; // Processes the internal nodes. Required by the NodeManager template<typename NodeT> bool operator()(NodeT& node, size_t idx) const; // Processes the leaf nodes. Required by the NodeManager bool operator()(LeafT& leaf, size_t idx) const; private: // on processing the root node, the background value is stored, retrieve it // and check that the root node has already been processed const ValueT& background() const; mutable std::vector<TreeToMerge<TreeT>> mTreesToMerge; mutable const ValueT* mBackground = nullptr; }; // struct CsgUnionOrIntersectionOp template <typename TreeT> using CsgUnionOp = CsgUnionOrIntersectionOp<TreeT, /*Union=*/true>; template <typename TreeT> using CsgIntersectionOp = CsgUnionOrIntersectionOp<TreeT, /*Union=*/false>; /// @brief DynamicNodeManager operator to merge two trees using a CSG difference. /// @note This class modifies the topology of the tree so is designed to be used /// from DynamicNodeManager::foreachTopDown(). template<typename TreeT> struct CsgDifferenceOp { using ValueT = typename TreeT::ValueType; using RootT = typename TreeT::RootNodeType; using LeafT = typename TreeT::LeafNodeType; /// @brief Convenience constructor to CSG difference a single non-const /// tree from another. This constructor takes a Steal or DeepCopy tag /// dispatch class. template <typename TagT> CsgDifferenceOp(TreeT& tree, TagT tag) : mTree(tree, tag) { } /// @brief Convenience constructor to CSG difference a single const /// tree from another. This constructor requires an explicit DeepCopy tag /// dispatch class. CsgDifferenceOp(const TreeT& tree, DeepCopy tag) : mTree(tree, tag) { } /// @brief Constructor to CSG difference the tree in a TreeToMerge object /// from another. explicit CsgDifferenceOp(TreeToMerge<TreeT>& tree) : mTree(tree) { } /// @brief Return the number of trees being merged (only ever 1) size_t size() const { return 1; } // Processes the root node. Required by the NodeManager bool operator()(RootT& root, size_t idx) const; // Processes the internal nodes. Required by the NodeManager template<typename NodeT> bool operator()(NodeT& node, size_t idx) const; // Processes the leaf nodes. Required by the NodeManager bool operator()(LeafT& leaf, size_t idx) const; private: // on processing the root node, the background values are stored, retrieve them // and check that the root nodes have already been processed const ValueT& background() const; const ValueT& otherBackground() const; // note that this vector is copied in NodeTransformer every time a foreach call is made, // however in typical use cases this cost will be dwarfed by the actual merge algorithm mutable TreeToMerge<TreeT> mTree; mutable const ValueT* mBackground = nullptr; mutable const ValueT* mOtherBackground = nullptr; }; // struct CsgDifferenceOp //////////////////////////////////////// template<typename TreeT> void TreeToMerge<TreeT>::initializeMask() { if (mSteal) return; mMaskTree.ptr.reset(new MaskTreeType); MaskUnionOp op(*mTree); tree::DynamicNodeManager<MaskTreeType, MaskTreeType::RootNodeType::LEVEL-1> manager(*this->mask()); manager.foreachTopDown(op); } template<typename TreeT> bool TreeToMerge<TreeT>::hasMask() const { return bool(mMaskTree.ptr); } template<typename TreeT> void TreeToMerge<TreeT>::reset(typename TreeType::Ptr treePtr, Steal) { if (!treePtr) { OPENVDB_THROW(RuntimeError, "Cannot reset with empty Tree shared pointer."); } mSteal = true; mTreePtr = treePtr; mTree = mTreePtr.get(); } template<typename TreeT> const typename TreeToMerge<TreeT>::RootNodeType* TreeToMerge<TreeT>::rootPtr() const { return &mTree->root(); } template<typename TreeT> template<typename NodeT> const NodeT* TreeToMerge<TreeT>::probeConstNode(const Coord& ijk) const { // test mutable mask first, node may have already been pruned if (!mSteal && !this->mask()->isValueOn(ijk)) return nullptr; return mTree->template probeConstNode<NodeT>(ijk); } template<typename TreeT> template<typename NodeT> std::unique_ptr<NodeT> TreeToMerge<TreeT>::stealOrDeepCopyNode(const Coord& ijk) { if (mSteal) { TreeType* tree = const_cast<TreeType*>(mTree); return std::unique_ptr<NodeT>( tree->root().template stealNode<NodeT>(ijk, mTree->root().background(), false) ); } else { auto* child = this->probeConstNode<NodeT>(ijk); if (child) { assert(this->hasMask()); auto result = std::make_unique<NodeT>(*child); // prune mask tree this->mask()->addTile(NodeT::LEVEL, ijk, false, false); return result; } } return std::unique_ptr<NodeT>(); } template<typename TreeT> template<typename NodeT> void TreeToMerge<TreeT>::addTile(const Coord& ijk, const ValueType& value, bool active) { // ignore leaf node tiles (values) if (NodeT::LEVEL == 0) return; if (mSteal) { TreeType* tree = const_cast<TreeType*>(mTree); auto* node = tree->template probeNode<NodeT>(ijk); if (node) { const Index pos = NodeT::coordToOffset(ijk); node->addTile(pos, value, active); } } else { auto* node = mTree->template probeConstNode<NodeT>(ijk); // prune mask tree if (node) { assert(this->hasMask()); this->mask()->addTile(NodeT::LEVEL, ijk, false, false); } } } //////////////////////////////////////// template <typename TreeT> bool TreeToMerge<TreeT>::MaskUnionOp::operator()(RootT& root, size_t /*idx*/) const { using ChildT = typename RootT::ChildNodeType; const Index count = mTree.root().childCount(); std::vector<std::unique_ptr<ChildT>> children(count); // allocate new root children tbb::parallel_for( tbb::blocked_range<Index>(0, count), [&](tbb::blocked_range<Index>& range) { for (Index i = range.begin(); i < range.end(); i++) { children[i] = std::make_unique<ChildT>(Coord::max(), true, true); } } ); // apply origins and add root children to new root node size_t i = 0; for (auto iter = mTree.root().cbeginChildOn(); iter; ++iter) { children[i]->setOrigin(iter->origin()); root.addChild(children[i].release()); i++; } return true; } template <typename TreeT> template <typename NodeT> bool TreeToMerge<TreeT>::MaskUnionOp::operator()(NodeT& node, size_t /*idx*/) const { using ChildT = typename NodeT::ChildNodeType; const auto* otherNode = mTree.template probeConstNode<NodeT>(node.origin()); if (!otherNode) return false; // this mask tree stores active tiles in place of leaf nodes for compactness if (NodeT::LEVEL == 1) { for (auto iter = otherNode->cbeginChildOn(); iter; ++iter) { node.addTile(iter.pos(), true, true); } } else { for (auto iter = otherNode->cbeginChildOn(); iter; ++iter) { auto* child = new ChildT(iter->origin(), true, true); node.addChild(child); } } return true; } //////////////////////////////////////// namespace merge_internal { template <typename BufferT, typename ValueT> struct UnallocatedBuffer { static void allocateAndFill(BufferT& buffer, const ValueT& background) { if (!buffer.isOutOfCore() && buffer.empty()) { buffer.allocate(); buffer.fill(background); } } static bool isPartiallyConstructed(const BufferT& buffer) { return !buffer.isOutOfCore() && buffer.empty(); } }; // struct AllocateAndFillBuffer template <typename BufferT> struct UnallocatedBuffer<BufferT, bool> { // do nothing for bool buffers as they cannot be unallocated static void allocateAndFill(BufferT&, const bool&) { } static bool isPartiallyConstructed(const BufferT&) { return false; } }; // struct AllocateAndFillBuffer } // namespace merge_internal //////////////////////////////////////// template <typename TreeT, bool Union> bool CsgUnionOrIntersectionOp<TreeT, Union>::operator()(RootT& root, size_t) const { const bool Intersect = !Union; if (this->empty()) return false; // store the background value if (!mBackground) mBackground = &root.background(); // does the key exist in the root node? auto keyExistsInRoot = [&](const Coord& key) -> bool { return root.getValueDepth(key) > -1; }; // does the key exist in all merge tree root nodes? auto keyExistsInAllTrees = [&](const Coord& key) -> bool { for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) { const auto* mergeRoot = mergeTree.rootPtr(); if (!mergeRoot) return false; if (mergeRoot->getValueDepth(key) == -1) return false; } return true; }; // delete any background tiles root.eraseBackgroundTiles(); // for intersection, delete any root node keys that are not present in all trees if (Intersect) { // find all tile coordinates to delete std::vector<Coord> toDelete; for (auto valueIter = root.cbeginValueAll(); valueIter; ++valueIter) { const Coord& key = valueIter.getCoord(); if (!keyExistsInAllTrees(key)) toDelete.push_back(key); } // find all child coordinates to delete for (auto childIter = root.cbeginChildOn(); childIter; ++childIter) { const Coord& key = childIter.getCoord(); if (!keyExistsInAllTrees(key)) toDelete.push_back(key); } // only mechanism to delete elements in root node is to delete background tiles, // so insert background tiles (which will replace any child nodes) and then delete for (Coord& key : toDelete) root.addTile(key, *mBackground, false); root.eraseBackgroundTiles(); } // find all tile values in this root and track inside/outside and active state // note that level sets should never contain active tiles, but we handle them anyway constexpr uint8_t ACTIVE_TILE = 0x1; constexpr uint8_t INSIDE_TILE = 0x2; constexpr uint8_t OUTSIDE_TILE = 0x4; constexpr uint8_t INSIDE_STATE = Union ? INSIDE_TILE : OUTSIDE_TILE; constexpr uint8_t OUTSIDE_STATE = Union ? OUTSIDE_TILE : INSIDE_TILE; const ValueT insideBackground = Union ? -this->background() : this->background(); const ValueT outsideBackground = -insideBackground; auto getTileFlag = [&](auto& valueIter) -> uint8_t { uint8_t flag(0); const ValueT& value = valueIter.getValue(); if (value < zeroVal<ValueT>()) flag |= INSIDE_TILE; else if (value > zeroVal<ValueT>()) flag |= OUTSIDE_TILE; if (valueIter.isValueOn()) flag |= ACTIVE_TILE; return flag; }; std::unordered_map<Coord, /*flags*/uint8_t> tiles; if (root.getTableSize() > 0) { for (auto valueIter = root.cbeginValueAll(); valueIter; ++valueIter) { const Coord& key = valueIter.getCoord(); tiles.insert({key, getTileFlag(valueIter)}); } } // find all tiles values in other roots and replace outside tiles with inside tiles for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) { const auto* mergeRoot = mergeTree.rootPtr(); if (!mergeRoot) continue; for (auto valueIter = mergeRoot->cbeginValueAll(); valueIter; ++valueIter) { const Coord& key = valueIter.getCoord(); auto it = tiles.find(key); if (it == tiles.end()) { // if no tile with this key, insert it tiles.insert({key, getTileFlag(valueIter)}); } else { // replace an outside tile with an inside tile const uint8_t flag = it->second; if (flag & OUTSIDE_STATE) { const uint8_t newFlag = getTileFlag(valueIter); if (newFlag & INSIDE_STATE) { it->second = newFlag; } } } } } // insert all inside tiles for (auto it : tiles) { const uint8_t flag = it.second; if (flag & INSIDE_STATE) { const Coord& key = it.first; const bool state = flag & ACTIVE_TILE; // for intersection, only add the tile if the key already exists in the tree if (Union || keyExistsInRoot(key)) { root.addTile(key, insideBackground, state); } } } std::unordered_set<Coord> children; if (root.getTableSize() > 0) { for (auto childIter = root.cbeginChildOn(); childIter; ++childIter) { const Coord& key = childIter.getCoord(); children.insert(key); } } bool continueRecurse = false; // find all children in other roots and insert them if a child or tile with this key // does not already exist or if the child will replace an outside tile for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) { const auto* mergeRoot = mergeTree.rootPtr(); if (!mergeRoot) continue; for (auto childIter = mergeRoot->cbeginChildOn(); childIter; ++childIter) { const Coord& key = childIter.getCoord(); // for intersection, only add child nodes if the key already exists in the tree if (Intersect && !keyExistsInRoot(key)) continue; // if child already exists, merge recursion will need to continue to resolve conflict if (children.count(key)) { continueRecurse = true; continue; } // if an inside tile exists, do nothing auto it = tiles.find(key); if (it != tiles.end() && it->second == INSIDE_STATE) continue; auto childPtr = mergeTree.template stealOrDeepCopyNode<typename RootT::ChildNodeType>(key); childPtr->resetBackground(mergeRoot->background(), root.background()); if (childPtr) root.addChild(childPtr.release()); children.insert(key); } } // insert all outside tiles that don't replace an inside tile or a child node for (auto it : tiles) { const uint8_t flag = it.second; if (flag & OUTSIDE_STATE) { const Coord& key = it.first; if (!children.count(key)) { const bool state = flag & ACTIVE_TILE; // for intersection, only add the tile if the key already exists in the tree if (Union || keyExistsInRoot(key)) { root.addTile(key, outsideBackground, state); } } } } // finish by removing any background tiles root.eraseBackgroundTiles(); return continueRecurse; } template<typename TreeT, bool Union> template<typename NodeT> bool CsgUnionOrIntersectionOp<TreeT, Union>::operator()(NodeT& node, size_t) const { using NonConstNodeT = typename std::remove_const<NodeT>::type; if (this->empty()) return false; const ValueT insideBackground = Union ? -this->background() : this->background(); const ValueT outsideBackground = -insideBackground; using NodeMaskT = typename NodeT::NodeMaskType; // store temporary masks to track inside and outside tile states NodeMaskT validTile; NodeMaskT invalidTile; auto isValid = [](const ValueT& value) { return Union ? value < zeroVal<ValueT>() : value > zeroVal<ValueT>(); }; auto isInvalid = [](const ValueT& value) { return Union ? value > zeroVal<ValueT>() : value < zeroVal<ValueT>(); }; for (auto iter = node.cbeginValueAll(); iter; ++iter) { if (isValid(iter.getValue())) { validTile.setOn(iter.pos()); } else if (isInvalid(iter.getValue())) { invalidTile.setOn(iter.pos()); } } bool continueRecurse = false; for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) { auto* mergeNode = mergeTree.template probeConstNode<NonConstNodeT>(node.origin()); if (!mergeNode) continue; // iterate over all tiles for (auto iter = mergeNode->cbeginValueAll(); iter; ++iter) { Index pos = iter.pos(); // source node contains an inside tile, so ignore if (validTile.isOn(pos)) continue; // this node contains an inside tile, so turn into an inside tile if (isValid(iter.getValue())) { node.addTile(pos, insideBackground, iter.isValueOn()); validTile.setOn(pos); } } // iterate over all child nodes for (auto iter = mergeNode->cbeginChildOn(); iter; ++iter) { Index pos = iter.pos(); const Coord& ijk = iter.getCoord(); // source node contains an inside tile, so ensure other node has no child if (validTile.isOn(pos)) { mergeTree.template addTile<NonConstNodeT>(ijk, outsideBackground, false); } else if (invalidTile.isOn(pos)) { auto childPtr = mergeTree.template stealOrDeepCopyNode<typename NodeT::ChildNodeType>(ijk); if (childPtr) { childPtr->resetBackground(mergeTree.rootPtr()->background(), this->background()); node.addChild(childPtr.release()); } invalidTile.setOff(pos); } else { // if both source and target are child nodes, merge recursion needs to continue // along this branch to resolve the conflict continueRecurse = true; } } } return continueRecurse; } template <typename TreeT, bool Union> bool CsgUnionOrIntersectionOp<TreeT, Union>::operator()(LeafT& leaf, size_t) const { using LeafT = typename TreeT::LeafNodeType; using ValueT = typename LeafT::ValueType; using BufferT = typename LeafT::Buffer; if (this->empty()) return false; const ValueT background = Union ? this->background() : -this->background(); // if buffer is not out-of-core and empty, leaf node must have only been // partially constructed, so allocate and fill with background value merge_internal::UnallocatedBuffer<BufferT, ValueT>::allocateAndFill( leaf.buffer(), background); for (TreeToMerge<TreeT>& mergeTree : mTreesToMerge) { const LeafT* mergeLeaf = mergeTree.template probeConstNode<LeafT>(leaf.origin()); if (!mergeLeaf) continue; // if buffer is not out-of-core yet empty, leaf node must have only been // partially constructed, so skip merge if (merge_internal::UnallocatedBuffer<BufferT, ValueT>::isPartiallyConstructed( mergeLeaf->buffer())) { continue; } for (Index i = 0 ; i < LeafT::SIZE; i++) { const ValueT& newValue = mergeLeaf->getValue(i); const bool doMerge = Union ? newValue < leaf.getValue(i) : newValue > leaf.getValue(i); if (doMerge) { leaf.setValueOnly(i, newValue); leaf.setActiveState(i, mergeLeaf->isValueOn(i)); } } } return false; } template <typename TreeT, bool Union> const typename CsgUnionOrIntersectionOp<TreeT, Union>::ValueT& CsgUnionOrIntersectionOp<TreeT, Union>::background() const { // this operator is only intended to be used with foreachTopDown() assert(mBackground); return *mBackground; } //////////////////////////////////////// template <typename TreeT> bool CsgDifferenceOp<TreeT>::operator()(RootT& root, size_t) const { // store the background values if (!mBackground) mBackground = &root.background(); if (!mOtherBackground) mOtherBackground = &mTree.rootPtr()->background(); // find all tile values in this root and track inside/outside and active state // note that level sets should never contain active tiles, but we handle them anyway constexpr uint8_t ACTIVE_TILE = 0x1; constexpr uint8_t INSIDE_TILE = 0x2; constexpr uint8_t CHILD = 0x4; auto getTileFlag = [&](auto& valueIter) -> uint8_t { uint8_t flag(0); const ValueT& value = valueIter.getValue(); if (value < zeroVal<ValueT>()) flag |= INSIDE_TILE; if (valueIter.isValueOn()) flag |= ACTIVE_TILE; return flag; }; // delete any background tiles root.eraseBackgroundTiles(); std::unordered_map<Coord, /*flags*/uint8_t> flags; if (root.getTableSize() > 0) { for (auto valueIter = root.cbeginValueAll(); valueIter; ++valueIter) { const Coord& key = valueIter.getCoord(); const uint8_t flag = getTileFlag(valueIter); if (flag & INSIDE_TILE) { flags.insert({key, getTileFlag(valueIter)}); } } for (auto childIter = root.cbeginChildOn(); childIter; ++childIter) { const Coord& key = childIter.getCoord(); flags.insert({key, CHILD}); } } bool continueRecurse = false; const auto* mergeRoot = mTree.rootPtr(); if (mergeRoot) { for (auto valueIter = mergeRoot->cbeginValueAll(); valueIter; ++valueIter) { const Coord& key = valueIter.getCoord(); const uint8_t flag = getTileFlag(valueIter); if (flag & INSIDE_TILE) { auto it = flags.find(key); if (it != flags.end()) { const bool state = flag & ACTIVE_TILE; root.addTile(key, this->background(), state); } } } for (auto childIter = mergeRoot->cbeginChildOn(); childIter; ++childIter) { const Coord& key = childIter.getCoord(); auto it = flags.find(key); if (it != flags.end()) { const uint8_t otherFlag = it->second; if (otherFlag & CHILD) { // if child already exists, merge recursion will need to continue to resolve conflict continueRecurse = true; } else if (otherFlag & INSIDE_TILE) { auto childPtr = mTree.template stealOrDeepCopyNode<typename RootT::ChildNodeType>(key); if (childPtr) { childPtr->resetBackground(this->otherBackground(), this->background()); childPtr->negate(); root.addChild(childPtr.release()); } } } } } // finish by removing any background tiles root.eraseBackgroundTiles(); return continueRecurse; } template<typename TreeT> template<typename NodeT> bool CsgDifferenceOp<TreeT>::operator()(NodeT& node, size_t) const { using NonConstNodeT = typename std::remove_const<NodeT>::type; using NodeMaskT = typename NodeT::NodeMaskType; // store temporary mask to track inside tile state NodeMaskT insideTile; for (auto iter = node.cbeginValueAll(); iter; ++iter) { if (iter.getValue() < zeroVal<ValueT>()) { insideTile.setOn(iter.pos()); } } bool continueRecurse = false; auto* mergeNode = mTree.template probeConstNode<NonConstNodeT>(node.origin()); if (!mergeNode) return continueRecurse; // iterate over all tiles for (auto iter = mergeNode->cbeginValueAll(); iter; ++iter) { Index pos = iter.pos(); if (iter.getValue() < zeroVal<ValueT>()) { if (insideTile.isOn(pos) || node.isChildMaskOn(pos)) { node.addTile(pos, this->background(), iter.isValueOn()); } } } // iterate over all children for (auto iter = mergeNode->cbeginChildOn(); iter; ++iter) { Index pos = iter.pos(); const Coord& ijk = iter.getCoord(); if (insideTile.isOn(pos)) { auto childPtr = mTree.template stealOrDeepCopyNode<typename NodeT::ChildNodeType>(ijk); if (childPtr) { childPtr->resetBackground(this->otherBackground(), this->background()); childPtr->negate(); node.addChild(childPtr.release()); } } else if (node.isChildMaskOn(pos)) { // if both source and target are child nodes, merge recursion needs to continue // along this branch to resolve the conflict continueRecurse = true; } } return continueRecurse; } template <typename TreeT> bool CsgDifferenceOp<TreeT>::operator()(LeafT& leaf, size_t) const { using LeafT = typename TreeT::LeafNodeType; using ValueT = typename LeafT::ValueType; using BufferT = typename LeafT::Buffer; // if buffer is not out-of-core and empty, leaf node must have only been // partially constructed, so allocate and fill with background value merge_internal::UnallocatedBuffer<BufferT, ValueT>::allocateAndFill( leaf.buffer(), this->background()); const LeafT* mergeLeaf = mTree.template probeConstNode<LeafT>(leaf.origin()); if (!mergeLeaf) return false; // if buffer is not out-of-core yet empty, leaf node must have only been // partially constructed, so skip merge if (merge_internal::UnallocatedBuffer<BufferT, ValueT>::isPartiallyConstructed( mergeLeaf->buffer())) { return false; } for (Index i = 0 ; i < LeafT::SIZE; i++) { const ValueT& aValue = leaf.getValue(i); ValueT bValue = math::negative(mergeLeaf->getValue(i)); if (aValue < bValue) { // a = max(a, -b) leaf.setValueOnly(i, bValue); leaf.setActiveState(i, mergeLeaf->isValueOn(i)); } } return false; } template <typename TreeT> const typename CsgDifferenceOp<TreeT>::ValueT& CsgDifferenceOp<TreeT>::background() const { // this operator is only intended to be used with foreachTopDown() assert(mBackground); return *mBackground; } template <typename TreeT> const typename CsgDifferenceOp<TreeT>::ValueT& CsgDifferenceOp<TreeT>::otherBackground() const { // this operator is only intended to be used with foreachTopDown() assert(mOtherBackground); return *mOtherBackground; } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_MERGE_HAS_BEEN_INCLUDED
35,978
C
34.273529
107
0.635666
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetRebuild.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TOOLS_LEVELSETREBUILD_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVELSETREBUILD_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/Exceptions.h> #include <openvdb/math/Math.h> #include <openvdb/math/Transform.h> #include <openvdb/tools/VolumeToMesh.h> #include <openvdb/tools/MeshToVolume.h> #include <openvdb/util/NullInterrupter.h> #include <openvdb/util/Util.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Return a new grid of type @c GridType that contains a narrow-band level set /// representation of an isosurface of a given grid. /// /// @param grid a scalar, floating-point grid with one or more disjoint, /// closed isosurfaces at the given @a isovalue /// @param isovalue the isovalue that defines the implicit surface (defaults to zero, /// which is typical if the input grid is already a level set or a SDF). /// @param halfWidth half the width of the narrow band, in voxel units /// (defaults to 3 voxels, which is required for some level set operations) /// @param xform optional transform for the output grid /// (if not provided, the transform of the input @a grid will be matched) /// /// @throw TypeError if @a grid is not scalar or not floating-point /// /// @note If the input grid contains overlapping isosurfaces, interior edges will be lost. template<class GridType> inline typename GridType::Ptr levelSetRebuild(const GridType& grid, float isovalue = 0, float halfWidth = float(LEVEL_SET_HALF_WIDTH), const math::Transform* xform = nullptr); /// @brief Return a new grid of type @c GridType that contains a narrow-band level set /// representation of an isosurface of a given grid. /// /// @param grid a scalar, floating-point grid with one or more disjoint, /// closed isosurfaces at the given @a isovalue /// @param isovalue the isovalue that defines the implicit surface /// @param exBandWidth the exterior narrow-band width in voxel units /// @param inBandWidth the interior narrow-band width in voxel units /// @param xform optional transform for the output grid /// (if not provided, the transform of the input @a grid will be matched) /// /// @throw TypeError if @a grid is not scalar or not floating-point /// /// @note If the input grid contains overlapping isosurfaces, interior edges will be lost. template<class GridType> inline typename GridType::Ptr levelSetRebuild(const GridType& grid, float isovalue, float exBandWidth, float inBandWidth, const math::Transform* xform = nullptr); /// @brief Return a new grid of type @c GridType that contains a narrow-band level set /// representation of an isosurface of a given grid. /// /// @param grid a scalar, floating-point grid with one or more disjoint, /// closed isosurfaces at the given @a isovalue /// @param isovalue the isovalue that defines the implicit surface /// @param exBandWidth the exterior narrow-band width in voxel units /// @param inBandWidth the interior narrow-band width in voxel units /// @param xform optional transform for the output grid /// (if not provided, the transform of the input @a grid will be matched) /// @param interrupter optional interrupter object /// /// @throw TypeError if @a grid is not scalar or not floating-point /// /// @note If the input grid contains overlapping isosurfaces, interior edges will be lost. template<class GridType, typename InterruptT> inline typename GridType::Ptr levelSetRebuild(const GridType& grid, float isovalue, float exBandWidth, float inBandWidth, const math::Transform* xform = nullptr, InterruptT* interrupter = nullptr); //////////////////////////////////////// // Internal utility objects and implementation details namespace internal { class PointListTransform { public: PointListTransform(const PointList& pointsIn, std::vector<Vec3s>& pointsOut, const math::Transform& xform) : mPointsIn(pointsIn) , mPointsOut(&pointsOut) , mXform(xform) { } void runParallel() { tbb::parallel_for(tbb::blocked_range<size_t>(0, mPointsOut->size()), *this); } void runSerial() { (*this)(tbb::blocked_range<size_t>(0, mPointsOut->size())); } inline void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(); n < range.end(); ++n) { (*mPointsOut)[n] = Vec3s(mXform.worldToIndex(mPointsIn[n])); } } private: const PointList& mPointsIn; std::vector<Vec3s> * const mPointsOut; const math::Transform& mXform; }; class PrimCpy { public: PrimCpy(const PolygonPoolList& primsIn, const std::vector<size_t>& indexList, std::vector<Vec4I>& primsOut) : mPrimsIn(primsIn) , mIndexList(indexList) , mPrimsOut(&primsOut) { } void runParallel() { tbb::parallel_for(tbb::blocked_range<size_t>(0, mIndexList.size()), *this); } void runSerial() { (*this)(tbb::blocked_range<size_t>(0, mIndexList.size())); } inline void operator()(const tbb::blocked_range<size_t>& range) const { openvdb::Vec4I quad; quad[3] = openvdb::util::INVALID_IDX; std::vector<Vec4I>& primsOut = *mPrimsOut; for (size_t n = range.begin(); n < range.end(); ++n) { size_t index = mIndexList[n]; PolygonPool& polygons = mPrimsIn[n]; // Copy quads for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { primsOut[index++] = polygons.quad(i); } polygons.clearQuads(); // Copy triangles (adaptive mesh) for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { const openvdb::Vec3I& triangle = polygons.triangle(i); quad[0] = triangle[0]; quad[1] = triangle[1]; quad[2] = triangle[2]; primsOut[index++] = quad; } polygons.clearTriangles(); } } private: const PolygonPoolList& mPrimsIn; const std::vector<size_t>& mIndexList; std::vector<Vec4I> * const mPrimsOut; }; } // namespace internal //////////////////////////////////////// //{ /// @cond OPENVDB_LEVEL_SET_REBUILD_INTERNAL /// The normal entry points for level set rebuild are the levelSetRebuild() functions. /// doLevelSetRebuild() is mainly for internal use, but when the isovalue and half band /// widths are given in ValueType units (for example, if they are queried from /// a grid), it might be more convenient to call this function directly. /// /// @internal This overload is enabled only for grids with a scalar, floating-point ValueType. template<class GridType, typename InterruptT> inline typename std::enable_if< std::is_floating_point<typename GridType::ValueType>::value, typename GridType::Ptr>::type doLevelSetRebuild(const GridType& grid, typename GridType::ValueType iso, typename GridType::ValueType exWidth, typename GridType::ValueType inWidth, const math::Transform* xform, InterruptT* interrupter) { const float isovalue = float(iso), exBandWidth = float(exWidth), inBandWidth = float(inWidth); tools::VolumeToMesh mesher(isovalue); mesher(grid); math::Transform::Ptr transform = (xform != nullptr) ? xform->copy() : grid.transform().copy(); std::vector<Vec3s> points(mesher.pointListSize()); { // Copy and transform (required for MeshToVolume) points to grid space. internal::PointListTransform ptnXForm(mesher.pointList(), points, *transform); ptnXForm.runParallel(); mesher.pointList().reset(nullptr); } std::vector<Vec4I> primitives; { // Copy primitives. PolygonPoolList& polygonPoolList = mesher.polygonPoolList(); size_t numPrimitives = 0; std::vector<size_t> indexlist(mesher.polygonPoolListSize()); for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { const openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; indexlist[n] = numPrimitives; numPrimitives += polygons.numQuads(); numPrimitives += polygons.numTriangles(); } primitives.resize(numPrimitives); internal::PrimCpy primCpy(polygonPoolList, indexlist, primitives); primCpy.runParallel(); } QuadAndTriangleDataAdapter<Vec3s, Vec4I> mesh(points, primitives); if (interrupter) { return meshToVolume<GridType>(*interrupter, mesh, *transform, exBandWidth, inBandWidth, DISABLE_RENORMALIZATION, nullptr); } return meshToVolume<GridType>(mesh, *transform, exBandWidth, inBandWidth, DISABLE_RENORMALIZATION, nullptr); } /// @internal This overload is enabled only for grids that do not have a scalar, /// floating-point ValueType. template<class GridType, typename InterruptT> inline typename std::enable_if< !std::is_floating_point<typename GridType::ValueType>::value, typename GridType::Ptr>::type doLevelSetRebuild(const GridType&, typename GridType::ValueType /*isovalue*/, typename GridType::ValueType /*exWidth*/, typename GridType::ValueType /*inWidth*/, const math::Transform*, InterruptT*) { OPENVDB_THROW(TypeError, "level set rebuild is supported only for scalar, floating-point grids"); } /// @endcond //} //////////////////////////////////////// template<class GridType, typename InterruptT> inline typename GridType::Ptr levelSetRebuild(const GridType& grid, float iso, float exWidth, float inWidth, const math::Transform* xform, InterruptT* interrupter) { using ValueT = typename GridType::ValueType; ValueT isovalue(zeroVal<ValueT>() + ValueT(iso)), exBandWidth(zeroVal<ValueT>() + ValueT(exWidth)), inBandWidth(zeroVal<ValueT>() + ValueT(inWidth)); return doLevelSetRebuild(grid, isovalue, exBandWidth, inBandWidth, xform, interrupter); } template<class GridType> inline typename GridType::Ptr levelSetRebuild(const GridType& grid, float iso, float exWidth, float inWidth, const math::Transform* xform) { using ValueT = typename GridType::ValueType; ValueT isovalue(zeroVal<ValueT>() + ValueT(iso)), exBandWidth(zeroVal<ValueT>() + ValueT(exWidth)), inBandWidth(zeroVal<ValueT>() + ValueT(inWidth)); return doLevelSetRebuild<GridType, util::NullInterrupter>( grid, isovalue, exBandWidth, inBandWidth, xform, nullptr); } template<class GridType> inline typename GridType::Ptr levelSetRebuild(const GridType& grid, float iso, float halfVal, const math::Transform* xform) { using ValueT = typename GridType::ValueType; ValueT isovalue(zeroVal<ValueT>() + ValueT(iso)), halfWidth(zeroVal<ValueT>() + ValueT(halfVal)); return doLevelSetRebuild<GridType, util::NullInterrupter>( grid, isovalue, halfWidth, halfWidth, xform, nullptr); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVELSETREBUILD_HAS_BEEN_INCLUDED
11,442
C
33.887195
98
0.668852
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/GridOperators.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tools/GridOperators.h /// /// @brief Apply an operator to an input grid to produce an output grid /// with the same active voxel topology but a potentially different value type. #ifndef OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/math/Operators.h> #include <openvdb/util/NullInterrupter.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/tree/ValueAccessor.h> #include "ValueTransformer.h" // for tools::foreach() #include <tbb/parallel_for.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief VectorToScalarConverter<VectorGridType>::Type is the type of a grid /// having the same tree configuration as VectorGridType but a scalar value type, T, /// where T is the type of the original vector components. /// @details For example, VectorToScalarConverter<Vec3DGrid>::Type is equivalent to DoubleGrid. template<typename VectorGridType> struct VectorToScalarConverter { typedef typename VectorGridType::ValueType::value_type VecComponentValueT; typedef typename VectorGridType::template ValueConverter<VecComponentValueT>::Type Type; }; /// @brief ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid /// having the same tree configuration as ScalarGridType but value type Vec3<T> /// where T is ScalarGridType::ValueType. /// @details For example, ScalarToVectorConverter<DoubleGrid>::Type is equivalent to Vec3DGrid. template<typename ScalarGridType> struct ScalarToVectorConverter { typedef math::Vec3<typename ScalarGridType::ValueType> VectorValueT; typedef typename ScalarGridType::template ValueConverter<VectorValueT>::Type Type; }; /// @brief Compute the Closest-Point Transform (CPT) from a distance field. /// @return a new vector-valued grid with the same numerical precision as the input grid /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid) /// @details When a mask grid is specified, the solution is calculated only in /// the intersection of the mask active topology and the input active topology /// independent of the transforms associated with either grid. template<typename GridType, typename InterruptT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr cpt(const GridType& grid, bool threaded, InterruptT* interrupt); template<typename GridType, typename MaskT, typename InterruptT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt); template<typename GridType> inline typename ScalarToVectorConverter<GridType>::Type::Ptr cpt(const GridType& grid, bool threaded = true) { return cpt<GridType, util::NullInterrupter>(grid, threaded, nullptr); } template<typename GridType, typename MaskT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr cpt(const GridType& grid, const MaskT& mask, bool threaded = true) { return cpt<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr); } /// @brief Compute the curl of the given vector-valued grid. /// @return a new vector-valued grid /// @details When a mask grid is specified, the solution is calculated only in /// the intersection of the mask active topology and the input active topology /// independent of the transforms associated with either grid. template<typename GridType, typename InterruptT> inline typename GridType::Ptr curl(const GridType& grid, bool threaded, InterruptT* interrupt); template<typename GridType, typename MaskT, typename InterruptT> inline typename GridType::Ptr curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt); template<typename GridType> inline typename GridType::Ptr curl(const GridType& grid, bool threaded = true) { return curl<GridType, util::NullInterrupter>(grid, threaded, nullptr); } template<typename GridType, typename MaskT> inline typename GridType::Ptr curl(const GridType& grid, const MaskT& mask, bool threaded = true) { return curl<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr); } /// @brief Compute the divergence of the given vector-valued grid. /// @return a new scalar-valued grid with the same numerical precision as the input grid /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid) /// @details When a mask grid is specified, the solution is calculated only in /// the intersection of the mask active topology and the input active topology /// independent of the transforms associated with either grid. template<typename GridType, typename InterruptT> inline typename VectorToScalarConverter<GridType>::Type::Ptr divergence(const GridType& grid, bool threaded, InterruptT* interrupt); template<typename GridType, typename MaskT, typename InterruptT> inline typename VectorToScalarConverter<GridType>::Type::Ptr divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt); template<typename GridType> inline typename VectorToScalarConverter<GridType>::Type::Ptr divergence(const GridType& grid, bool threaded = true) { return divergence<GridType, util::NullInterrupter>(grid, threaded, nullptr); } template<typename GridType, typename MaskT> inline typename VectorToScalarConverter<GridType>::Type::Ptr divergence(const GridType& grid, const MaskT& mask, bool threaded = true) { return divergence<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr); } /// @brief Compute the gradient of the given scalar grid. /// @return a new vector-valued grid with the same numerical precision as the input grid /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid) /// @details When a mask grid is specified, the solution is calculated only in /// the intersection of the mask active topology and the input active topology /// independent of the transforms associated with either grid. template<typename GridType, typename InterruptT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr gradient(const GridType& grid, bool threaded, InterruptT* interrupt); template<typename GridType, typename MaskT, typename InterruptT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt); template<typename GridType> inline typename ScalarToVectorConverter<GridType>::Type::Ptr gradient(const GridType& grid, bool threaded = true) { return gradient<GridType, util::NullInterrupter>(grid, threaded, nullptr); } template<typename GridType, typename MaskT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr gradient(const GridType& grid, const MaskT& mask, bool threaded = true) { return gradient<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr); } /// @brief Compute the Laplacian of the given scalar grid. /// @return a new scalar grid /// @details When a mask grid is specified, the solution is calculated only in /// the intersection of the mask active topology and the input active topology /// independent of the transforms associated with either grid. template<typename GridType, typename InterruptT> inline typename GridType::Ptr laplacian(const GridType& grid, bool threaded, InterruptT* interrupt); template<typename GridType, typename MaskT, typename InterruptT> inline typename GridType::Ptr laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt); template<typename GridType> inline typename GridType::Ptr laplacian(const GridType& grid, bool threaded = true) { return laplacian<GridType, util::NullInterrupter>(grid, threaded, nullptr); } template<typename GridType, typename MaskT> inline typename GridType::Ptr laplacian(const GridType& grid, const MaskT mask, bool threaded = true) { return laplacian<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr); } /// @brief Compute the mean curvature of the given grid. /// @return a new grid /// @details When a mask grid is specified, the solution is calculated only in /// the intersection of the mask active topology and the input active topology /// independent of the transforms associated with either grid. template<typename GridType, typename InterruptT> inline typename GridType::Ptr meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt); template<typename GridType, typename MaskT, typename InterruptT> inline typename GridType::Ptr meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt); template<typename GridType> inline typename GridType::Ptr meanCurvature(const GridType& grid, bool threaded = true) { return meanCurvature<GridType, util::NullInterrupter>(grid, threaded, nullptr); } template<typename GridType, typename MaskT> inline typename GridType::Ptr meanCurvature(const GridType& grid, const MaskT& mask, bool threaded = true) { return meanCurvature<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr); } /// @brief Compute the magnitudes of the vectors of the given vector-valued grid. /// @return a new scalar-valued grid with the same numerical precision as the input grid /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid) /// @details When a mask grid is specified, the solution is calculated only in /// the intersection of the mask active topology and the input active topology /// independent of the transforms associated with either grid. template<typename GridType, typename InterruptT> inline typename VectorToScalarConverter<GridType>::Type::Ptr magnitude(const GridType& grid, bool threaded, InterruptT* interrupt); template<typename GridType, typename MaskT, typename InterruptT> inline typename VectorToScalarConverter<GridType>::Type::Ptr magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt); template<typename GridType> inline typename VectorToScalarConverter<GridType>::Type::Ptr magnitude(const GridType& grid, bool threaded = true) { return magnitude<GridType, util::NullInterrupter>(grid, threaded, nullptr); } template<typename GridType, typename MaskT> inline typename VectorToScalarConverter<GridType>::Type::Ptr magnitude(const GridType& grid, const MaskT& mask, bool threaded = true) { return magnitude<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr); } /// @brief Normalize the vectors of the given vector-valued grid. /// @return a new vector-valued grid /// @details When a mask grid is specified, the solution is calculated only in /// the intersection of the mask active topology and the input active topology /// independent of the transforms associated with either grid. template<typename GridType, typename InterruptT> inline typename GridType::Ptr normalize(const GridType& grid, bool threaded, InterruptT* interrupt); template<typename GridType, typename MaskT, typename InterruptT> inline typename GridType::Ptr normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt); template<typename GridType> inline typename GridType::Ptr normalize(const GridType& grid, bool threaded = true) { return normalize<GridType, util::NullInterrupter>(grid, threaded, nullptr); } template<typename GridType, typename MaskT> inline typename GridType::Ptr normalize(const GridType& grid, const MaskT& mask, bool threaded = true) { return normalize<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr); } //////////////////////////////////////// namespace gridop { /// @brief ToMaskGrid<T>::Type is the type of a grid having the same /// tree hierarchy as grid type T but a value equal to its active state. /// @details For example, ToMaskGrid<FloatGrid>::Type is equivalent to MaskGrid. template<typename GridType> struct ToMaskGrid { typedef Grid<typename GridType::TreeType::template ValueConverter<ValueMask>::Type> Type; }; /// @brief Apply an operator to an input grid to produce an output grid /// with the same active voxel topology but a potentially different value type. /// @details To facilitate inlining, this class is also templated on a Map type. /// /// @note This is a helper class and should never be used directly. template< typename InGridT, typename MaskGridType, typename OutGridT, typename MapT, typename OperatorT, typename InterruptT = util::NullInterrupter> class GridOperator { public: typedef typename OutGridT::TreeType OutTreeT; typedef typename OutTreeT::LeafNodeType OutLeafT; typedef typename tree::LeafManager<OutTreeT> LeafManagerT; GridOperator(const InGridT& grid, const MaskGridType* mask, const MapT& map, InterruptT* interrupt = nullptr, bool densify = true) : mAcc(grid.getConstAccessor()) , mMap(map) , mInterrupt(interrupt) , mMask(mask) , mDensify(densify) ///< @todo consider adding a "NeedsDensification" operator trait { } GridOperator(const GridOperator&) = default; GridOperator& operator=(const GridOperator&) = default; virtual ~GridOperator() = default; typename OutGridT::Ptr process(bool threaded = true) { if (mInterrupt) mInterrupt->start("Processing grid"); // Derive background value of the output grid typename InGridT::TreeType tmp(mAcc.tree().background()); typename OutGridT::ValueType backg = OperatorT::result(mMap, tmp, math::Coord(0)); // The output tree is topology copy, optionally densified, of the input tree. // (Densification is necessary for some operators because applying the operator to // a constant tile produces distinct output values, particularly along tile borders.) /// @todo Can tiles be handled correctly without densification, or by densifying /// only to the width of the operator stencil? typename OutTreeT::Ptr tree(new OutTreeT(mAcc.tree(), backg, TopologyCopy())); if (mDensify) tree->voxelizeActiveTiles(); // create grid with output tree and unit transform typename OutGridT::Ptr result(new OutGridT(tree)); // Modify the solution area if a mask was supplied. if (mMask) { result->topologyIntersection(*mMask); } // transform of output grid = transform of input grid result->setTransform(math::Transform::Ptr(new math::Transform( mMap.copy() ))); LeafManagerT leafManager(*tree); if (threaded) { tbb::parallel_for(leafManager.leafRange(), *this); } else { (*this)(leafManager.leafRange()); } // If the tree wasn't densified, it might have active tiles that need to be processed. if (!mDensify) { using TileIter = typename OutTreeT::ValueOnIter; TileIter tileIter = tree->beginValueOn(); tileIter.setMaxDepth(tileIter.getLeafDepth() - 1); // skip leaf values (i.e., voxels) AccessorT inAcc = mAcc; // each thread needs its own accessor, captured by value auto tileOp = [this, inAcc](const TileIter& it) { // Apply the operator to the input grid's tile value at the iterator's // current coordinates, and set the output tile's value to the result. it.setValue(OperatorT::result(this->mMap, inAcc, it.getCoord())); }; // Apply the operator to tile values, optionally in parallel. // (But don't share the functor; each thread needs its own accessor.) tools::foreach(tileIter, tileOp, threaded, /*shareFunctor=*/false); } if (mDensify) tree->prune(); if (mInterrupt) mInterrupt->end(); return result; } /// @brief Iterate sequentially over LeafNodes and voxels in the output /// grid and apply the operator using a value accessor for the input grid. /// /// @note Never call this public method directly - it is called by /// TBB threads only! void operator()(const typename LeafManagerT::LeafRange& range) const { if (util::wasInterrupted(mInterrupt)) tbb::task::self().cancel_group_execution(); for (typename LeafManagerT::LeafRange::Iterator leaf=range.begin(); leaf; ++leaf) { for (typename OutLeafT::ValueOnIter value=leaf->beginValueOn(); value; ++value) { value.setValue(OperatorT::result(mMap, mAcc, value.getCoord())); } } } protected: typedef typename InGridT::ConstAccessor AccessorT; mutable AccessorT mAcc; const MapT& mMap; InterruptT* mInterrupt; const MaskGridType* mMask; const bool mDensify; }; // end of GridOperator class } // namespace gridop //////////////////////////////////////// /// @brief Compute the closest-point transform of a scalar grid. template< typename InGridT, typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type, typename InterruptT = util::NullInterrupter> class Cpt { public: typedef InGridT InGridType; typedef typename ScalarToVectorConverter<InGridT>::Type OutGridType; Cpt(const InGridType& grid, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr) { } Cpt(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(&mask) { } typename OutGridType::Ptr process(bool threaded = true, bool useWorldTransform = true) { Functor functor(mInputGrid, mMask, threaded, useWorldTransform, mInterrupt); processTypedMap(mInputGrid.transform(), functor); if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_CONTRAVARIANT_ABSOLUTE); return functor.mOutputGrid; } private: struct IsOpT { template<typename MapT, typename AccT> static typename OutGridType::ValueType result(const MapT& map, const AccT& acc, const Coord& xyz) { return math::CPT<MapT, math::CD_2ND>::result(map, acc, xyz); } }; struct WsOpT { template<typename MapT, typename AccT> static typename OutGridType::ValueType result(const MapT& map, const AccT& acc, const Coord& xyz) { return math::CPT_RANGE<MapT, math::CD_2ND>::result(map, acc, xyz); } }; struct Functor { Functor(const InGridType& grid, const MaskGridType* mask, bool threaded, bool worldspace, InterruptT* interrupt) : mThreaded(threaded) , mWorldSpace(worldspace) , mInputGrid(grid) , mInterrupt(interrupt) , mMask(mask) {} template<typename MapT> void operator()(const MapT& map) { if (mWorldSpace) { gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, WsOpT, InterruptT> op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false); mOutputGrid = op.process(mThreaded); // cache the result } else { gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, IsOpT, InterruptT> op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false); mOutputGrid = op.process(mThreaded); // cache the result } } const bool mThreaded; const bool mWorldSpace; const InGridType& mInputGrid; typename OutGridType::Ptr mOutputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; const InGridType& mInputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // end of Cpt class //////////////////////////////////////// /// @brief Compute the curl of a vector grid. template< typename GridT, typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type, typename InterruptT = util::NullInterrupter> class Curl { public: typedef GridT InGridType; typedef GridT OutGridType; Curl(const GridT& grid, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr) { } Curl(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(&mask) { } typename GridT::Ptr process(bool threaded = true) { Functor functor(mInputGrid, mMask, threaded, mInterrupt); processTypedMap(mInputGrid.transform(), functor); if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT); return functor.mOutputGrid; } private: struct Functor { Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt): mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {} template<typename MapT> void operator()(const MapT& map) { typedef math::Curl<MapT, math::CD_2ND> OpT; gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT> op(mInputGrid, mMask, map, mInterrupt); mOutputGrid = op.process(mThreaded); // cache the result } const bool mThreaded; const GridT& mInputGrid; typename GridT::Ptr mOutputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // Private Functor const GridT& mInputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // end of Curl class //////////////////////////////////////// /// @brief Compute the divergence of a vector grid. template< typename InGridT, typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type, typename InterruptT = util::NullInterrupter> class Divergence { public: typedef InGridT InGridType; typedef typename VectorToScalarConverter<InGridT>::Type OutGridType; Divergence(const InGridT& grid, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr) { } Divergence(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(&mask) { } typename OutGridType::Ptr process(bool threaded = true) { if (mInputGrid.getGridClass() == GRID_STAGGERED) { Functor<math::FD_1ST> functor(mInputGrid, mMask, threaded, mInterrupt); processTypedMap(mInputGrid.transform(), functor); return functor.mOutputGrid; } else { Functor<math::CD_2ND> functor(mInputGrid, mMask, threaded, mInterrupt); processTypedMap(mInputGrid.transform(), functor); return functor.mOutputGrid; } } protected: template<math::DScheme DiffScheme> struct Functor { Functor(const InGridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt): mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {} template<typename MapT> void operator()(const MapT& map) { typedef math::Divergence<MapT, DiffScheme> OpT; gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, OpT, InterruptT> op(mInputGrid, mMask, map, mInterrupt); mOutputGrid = op.process(mThreaded); // cache the result } const bool mThreaded; const InGridType& mInputGrid; typename OutGridType::Ptr mOutputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // Private Functor const InGridType& mInputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // end of Divergence class //////////////////////////////////////// /// @brief Compute the gradient of a scalar grid. template< typename InGridT, typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type, typename InterruptT = util::NullInterrupter> class Gradient { public: typedef InGridT InGridType; typedef typename ScalarToVectorConverter<InGridT>::Type OutGridType; Gradient(const InGridT& grid, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr) { } Gradient(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(&mask) { } typename OutGridType::Ptr process(bool threaded = true) { Functor functor(mInputGrid, mMask, threaded, mInterrupt); processTypedMap(mInputGrid.transform(), functor); if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT); return functor.mOutputGrid; } protected: struct Functor { Functor(const InGridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt): mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {} template<typename MapT> void operator()(const MapT& map) { typedef math::Gradient<MapT, math::CD_2ND> OpT; gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, OpT, InterruptT> op(mInputGrid, mMask, map, mInterrupt); mOutputGrid = op.process(mThreaded); // cache the result } const bool mThreaded; const InGridT& mInputGrid; typename OutGridType::Ptr mOutputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // Private Functor const InGridT& mInputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // end of Gradient class //////////////////////////////////////// template< typename GridT, typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type, typename InterruptT = util::NullInterrupter> class Laplacian { public: typedef GridT InGridType; typedef GridT OutGridType; Laplacian(const GridT& grid, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr) { } Laplacian(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(&mask) { } typename GridT::Ptr process(bool threaded = true) { Functor functor(mInputGrid, mMask, threaded, mInterrupt); processTypedMap(mInputGrid.transform(), functor); if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT); return functor.mOutputGrid; } protected: struct Functor { Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt): mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {} template<typename MapT> void operator()(const MapT& map) { typedef math::Laplacian<MapT, math::CD_SECOND> OpT; gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT> op(mInputGrid, mMask, map, mInterrupt); mOutputGrid = op.process(mThreaded); // cache the result } const bool mThreaded; const GridT& mInputGrid; typename GridT::Ptr mOutputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // Private Functor const GridT& mInputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // end of Laplacian class //////////////////////////////////////// template< typename GridT, typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type, typename InterruptT = util::NullInterrupter> class MeanCurvature { public: typedef GridT InGridType; typedef GridT OutGridType; MeanCurvature(const GridT& grid, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr) { } MeanCurvature(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(&mask) { } typename GridT::Ptr process(bool threaded = true) { Functor functor(mInputGrid, mMask, threaded, mInterrupt); processTypedMap(mInputGrid.transform(), functor); if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT); return functor.mOutputGrid; } protected: struct Functor { Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt): mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {} template<typename MapT> void operator()(const MapT& map) { typedef math::MeanCurvature<MapT, math::CD_SECOND, math::CD_2ND> OpT; gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT> op(mInputGrid, mMask, map, mInterrupt); mOutputGrid = op.process(mThreaded); // cache the result } const bool mThreaded; const GridT& mInputGrid; typename GridT::Ptr mOutputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // Private Functor const GridT& mInputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // end of MeanCurvature class //////////////////////////////////////// template< typename InGridT, typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type, typename InterruptT = util::NullInterrupter> class Magnitude { public: typedef InGridT InGridType; typedef typename VectorToScalarConverter<InGridT>::Type OutGridType; Magnitude(const InGridType& grid, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr) { } Magnitude(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(&mask) { } typename OutGridType::Ptr process(bool threaded = true) { Functor functor(mInputGrid, mMask, threaded, mInterrupt); processTypedMap(mInputGrid.transform(), functor); return functor.mOutputGrid; } protected: struct OpT { template<typename MapT, typename AccT> static typename OutGridType::ValueType result(const MapT&, const AccT& acc, const Coord& xyz) { return acc.getValue(xyz).length();} }; struct Functor { Functor(const InGridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt): mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {} template<typename MapT> void operator()(const MapT& map) { gridop::GridOperator<InGridType, MaskGridType, OutGridType, MapT, OpT, InterruptT> op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false); mOutputGrid = op.process(mThreaded); // cache the result } const bool mThreaded; const InGridType& mInputGrid; typename OutGridType::Ptr mOutputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // Private Functor const InGridType& mInputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // end of Magnitude class //////////////////////////////////////// template< typename GridT, typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type, typename InterruptT = util::NullInterrupter> class Normalize { public: typedef GridT InGridType; typedef GridT OutGridType; Normalize(const GridT& grid, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr) { } Normalize(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr): mInputGrid(grid), mInterrupt(interrupt), mMask(&mask) { } typename GridT::Ptr process(bool threaded = true) { Functor functor(mInputGrid, mMask, threaded, mInterrupt); processTypedMap(mInputGrid.transform(), functor); if (typename GridT::Ptr outGrid = functor.mOutputGrid) { const VecType vecType = mInputGrid.getVectorType(); if (vecType == VEC_COVARIANT) { outGrid->setVectorType(VEC_COVARIANT_NORMALIZE); } else { outGrid->setVectorType(vecType); } } return functor.mOutputGrid; } protected: struct OpT { template<typename MapT, typename AccT> static typename OutGridType::ValueType result(const MapT&, const AccT& acc, const Coord& xyz) { typename OutGridType::ValueType vec = acc.getValue(xyz); if ( !vec.normalize() ) vec.setZero(); return vec; } }; struct Functor { Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt): mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {} template<typename MapT> void operator()(const MapT& map) { gridop::GridOperator<GridT, MaskGridType, GridT, MapT, OpT, InterruptT> op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false); mOutputGrid = op.process(mThreaded); // cache the result } const bool mThreaded; const GridT& mInputGrid; typename GridT::Ptr mOutputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // Private Functor const GridT& mInputGrid; InterruptT* mInterrupt; const MaskGridType* mMask; }; // end of Normalize class //////////////////////////////////////// template<typename GridType, typename InterruptT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr cpt(const GridType& grid, bool threaded, InterruptT* interrupt) { Cpt<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt); return op.process(threaded); } template<typename GridType, typename MaskT, typename InterruptT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt) { Cpt<GridType, MaskT, InterruptT> op(grid, mask, interrupt); return op.process(threaded); } template<typename GridType, typename InterruptT> inline typename GridType::Ptr curl(const GridType& grid, bool threaded, InterruptT* interrupt) { Curl<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt); return op.process(threaded); } template<typename GridType, typename MaskT, typename InterruptT> inline typename GridType::Ptr curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt) { Curl<GridType, MaskT, InterruptT> op(grid, mask, interrupt); return op.process(threaded); } template<typename GridType, typename InterruptT> inline typename VectorToScalarConverter<GridType>::Type::Ptr divergence(const GridType& grid, bool threaded, InterruptT* interrupt) { Divergence<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt); return op.process(threaded); } template<typename GridType, typename MaskT, typename InterruptT> inline typename VectorToScalarConverter<GridType>::Type::Ptr divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt) { Divergence<GridType, MaskT, InterruptT> op(grid, mask, interrupt); return op.process(threaded); } template<typename GridType, typename InterruptT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr gradient(const GridType& grid, bool threaded, InterruptT* interrupt) { Gradient<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt); return op.process(threaded); } template<typename GridType, typename MaskT, typename InterruptT> inline typename ScalarToVectorConverter<GridType>::Type::Ptr gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt) { Gradient<GridType, MaskT, InterruptT> op(grid, mask, interrupt); return op.process(threaded); } template<typename GridType, typename InterruptT> inline typename GridType::Ptr laplacian(const GridType& grid, bool threaded, InterruptT* interrupt) { Laplacian<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt); return op.process(threaded); } template<typename GridType, typename MaskT, typename InterruptT> inline typename GridType::Ptr laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt) { Laplacian<GridType, MaskT, InterruptT> op(grid, mask, interrupt); return op.process(threaded); } template<typename GridType, typename InterruptT> inline typename GridType::Ptr meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt) { MeanCurvature<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt); return op.process(threaded); } template<typename GridType, typename MaskT, typename InterruptT> inline typename GridType::Ptr meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt) { MeanCurvature<GridType, MaskT, InterruptT> op(grid, mask, interrupt); return op.process(threaded); } template<typename GridType, typename InterruptT> inline typename VectorToScalarConverter<GridType>::Type::Ptr magnitude(const GridType& grid, bool threaded, InterruptT* interrupt) { Magnitude<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt); return op.process(threaded); } template<typename GridType, typename MaskT, typename InterruptT> inline typename VectorToScalarConverter<GridType>::Type::Ptr magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt) { Magnitude<GridType, MaskT, InterruptT> op(grid, mask, interrupt); return op.process(threaded); } template<typename GridType, typename InterruptT> inline typename GridType::Ptr normalize(const GridType& grid, bool threaded, InterruptT* interrupt) { Normalize<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt); return op.process(threaded); } template<typename GridType, typename MaskT, typename InterruptT> inline typename GridType::Ptr normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt) { Normalize<GridType, MaskT, InterruptT> op(grid, mask, interrupt); return op.process(threaded); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
39,697
C
35.621771
100
0.687709
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/VolumeToMesh.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file VolumeToMesh.h /// /// @brief Extract polygonal surfaces from scalar volumes. /// /// @author Mihai Alden #ifndef OPENVDB_TOOLS_VOLUME_TO_MESH_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_VOLUME_TO_MESH_HAS_BEEN_INCLUDED #include <openvdb/Platform.h> #include <openvdb/math/Operators.h> // for ISGradient #include <openvdb/tree/ValueAccessor.h> #include <openvdb/util/Util.h> // for INVALID_IDX #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/task_scheduler_init.h> #include <cmath> // for std::isfinite() #include <map> #include <memory> #include <set> #include <type_traits> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { //////////////////////////////////////// // Wrapper functions for the VolumeToMesh converter /// @brief Uniformly mesh any scalar grid that has a continuous isosurface. /// /// @param grid a scalar grid to mesh /// @param points output list of world space points /// @param quads output quad index list /// @param isovalue determines which isosurface to mesh /// /// @throw TypeError if @a grid does not have a scalar value type template<typename GridType> inline void volumeToMesh( const GridType& grid, std::vector<Vec3s>& points, std::vector<Vec4I>& quads, double isovalue = 0.0); /// @brief Adaptively mesh any scalar grid that has a continuous isosurface. /// /// @param grid a scalar grid to mesh /// @param points output list of world space points /// @param triangles output triangle index list /// @param quads output quad index list /// @param isovalue determines which isosurface to mesh /// @param adaptivity surface adaptivity threshold [0 to 1] /// @param relaxDisorientedTriangles toggle relaxing disoriented triangles during /// adaptive meshing. /// /// @throw TypeError if @a grid does not have a scalar value type template<typename GridType> inline void volumeToMesh( const GridType& grid, std::vector<Vec3s>& points, std::vector<Vec3I>& triangles, std::vector<Vec4I>& quads, double isovalue = 0.0, double adaptivity = 0.0, bool relaxDisorientedTriangles = true); //////////////////////////////////////// /// @brief Polygon flags, used for reference based meshing. enum { POLYFLAG_EXTERIOR = 0x1, POLYFLAG_FRACTURE_SEAM = 0x2, POLYFLAG_SUBDIVIDED = 0x4 }; /// @brief Collection of quads and triangles class PolygonPool { public: inline PolygonPool(); inline PolygonPool(const size_t numQuads, const size_t numTriangles); inline void copy(const PolygonPool& rhs); inline void resetQuads(size_t size); inline void clearQuads(); inline void resetTriangles(size_t size); inline void clearTriangles(); // polygon accessor methods const size_t& numQuads() const { return mNumQuads; } openvdb::Vec4I& quad(size_t n) { return mQuads[n]; } const openvdb::Vec4I& quad(size_t n) const { return mQuads[n]; } const size_t& numTriangles() const { return mNumTriangles; } openvdb::Vec3I& triangle(size_t n) { return mTriangles[n]; } const openvdb::Vec3I& triangle(size_t n) const { return mTriangles[n]; } // polygon flags accessor methods char& quadFlags(size_t n) { return mQuadFlags[n]; } const char& quadFlags(size_t n) const { return mQuadFlags[n]; } char& triangleFlags(size_t n) { return mTriangleFlags[n]; } const char& triangleFlags(size_t n) const { return mTriangleFlags[n]; } // reduce the polygon containers, n has to // be smaller than the current container size. inline bool trimQuads(const size_t n, bool reallocate = false); inline bool trimTrinagles(const size_t n, bool reallocate = false); private: // disallow copy by assignment void operator=(const PolygonPool&) {} size_t mNumQuads, mNumTriangles; std::unique_ptr<openvdb::Vec4I[]> mQuads; std::unique_ptr<openvdb::Vec3I[]> mTriangles; std::unique_ptr<char[]> mQuadFlags, mTriangleFlags; }; /// @{ /// @brief Point and primitive list types. using PointList = std::unique_ptr<openvdb::Vec3s[]>; using PolygonPoolList = std::unique_ptr<PolygonPool[]>; /// @} //////////////////////////////////////// /// @brief Mesh any scalar grid that has a continuous isosurface. struct VolumeToMesh { /// @param isovalue Determines which isosurface to mesh. /// @param adaptivity Adaptivity threshold [0 to 1] /// @param relaxDisorientedTriangles Toggle relaxing disoriented triangles during /// adaptive meshing. VolumeToMesh(double isovalue = 0, double adaptivity = 0, bool relaxDisorientedTriangles = true); ////////// /// @{ // Mesh data accessors size_t pointListSize() const { return mPointListSize; } PointList& pointList() { return mPoints; } const PointList& pointList() const { return mPoints; } size_t polygonPoolListSize() const { return mPolygonPoolListSize; } PolygonPoolList& polygonPoolList() { return mPolygons; } const PolygonPoolList& polygonPoolList() const { return mPolygons; } std::vector<uint8_t>& pointFlags() { return mPointFlags; } const std::vector<uint8_t>& pointFlags() const { return mPointFlags; } /// @} ////////// /// @brief Main call /// @note Call with scalar typed grid. template<typename InputGridType> void operator()(const InputGridType&); ////////// /// @brief When surfacing fractured SDF fragments, the original unfractured /// SDF grid can be used to eliminate seam lines and tag polygons that are /// coincident with the reference surface with the @c POLYFLAG_EXTERIOR /// flag and polygons that are in proximity to the seam lines with the /// @c POLYFLAG_FRACTURE_SEAM flag. (The performance cost for using this /// reference based scheme compared to the regular meshing scheme is /// approximately 15% for the first fragment and neglect-able for /// subsequent fragments.) /// /// @note Attributes from the original asset such as uv coordinates, normals etc. /// are typically transfered to polygons that are marked with the /// @c POLYFLAG_EXTERIOR flag. Polygons that are not marked with this flag /// are interior to reference surface and might need projected UV coordinates /// or a different material. Polygons marked as @c POLYFLAG_FRACTURE_SEAM can /// be used to drive secondary elements such as debris and dust in a FX pipeline. /// /// @param grid reference surface grid of @c GridT type. /// @param secAdaptivity Secondary adaptivity threshold [0 to 1]. Used in regions /// that do not exist in the reference grid. (Parts of the /// fragment surface that are not coincident with the /// reference surface.) void setRefGrid(const GridBase::ConstPtr& grid, double secAdaptivity = 0); /// @param mask A boolean grid whose active topology defines the region to mesh. /// @param invertMask Toggle to mesh the complement of the mask. /// @note The mask's tree configuration has to match @c GridT's tree configuration. void setSurfaceMask(const GridBase::ConstPtr& mask, bool invertMask = false); /// @param grid A scalar grid used as a spatial multiplier for the adaptivity threshold. /// @note The grid's tree configuration has to match @c GridT's tree configuration. void setSpatialAdaptivity(const GridBase::ConstPtr& grid); /// @param tree A boolean tree whose active topology defines the adaptivity mask. /// @note The tree configuration has to match @c GridT's tree configuration. void setAdaptivityMask(const TreeBase::ConstPtr& tree); private: // Disallow copying VolumeToMesh(const VolumeToMesh&); VolumeToMesh& operator=(const VolumeToMesh&); PointList mPoints; PolygonPoolList mPolygons; size_t mPointListSize, mSeamPointListSize, mPolygonPoolListSize; double mIsovalue, mPrimAdaptivity, mSecAdaptivity; GridBase::ConstPtr mRefGrid, mSurfaceMaskGrid, mAdaptivityGrid; TreeBase::ConstPtr mAdaptivityMaskTree; TreeBase::Ptr mRefSignTree, mRefIdxTree; bool mInvertSurfaceMask, mRelaxDisorientedTriangles; std::unique_ptr<uint32_t[]> mQuantizedSeamPoints; std::vector<uint8_t> mPointFlags; }; // struct VolumeToMesh //////////////////////////////////////// /// @brief Given a set of tangent elements, @c points with corresponding @c normals, /// this method returns the intersection point of all tangent elements. /// /// @note Used to extract surfaces with sharp edges and corners from volume data, /// see the following paper for details: "Feature Sensitive Surface /// Extraction from Volume Data, Kobbelt et al. 2001". inline Vec3d findFeaturePoint( const std::vector<Vec3d>& points, const std::vector<Vec3d>& normals) { using Mat3d = math::Mat3d; Vec3d avgPos(0.0); if (points.empty()) return avgPos; for (size_t n = 0, N = points.size(); n < N; ++n) { avgPos += points[n]; } avgPos /= double(points.size()); // Unique components of the 3x3 A^TA matrix, where A is // the matrix of normals. double m00=0,m01=0,m02=0, m11=0,m12=0, m22=0; // The rhs vector, A^Tb, where b = n dot p Vec3d rhs(0.0); for (size_t n = 0, N = points.size(); n < N; ++n) { const Vec3d& n_ref = normals[n]; // A^TA m00 += n_ref[0] * n_ref[0]; // diagonal m11 += n_ref[1] * n_ref[1]; m22 += n_ref[2] * n_ref[2]; m01 += n_ref[0] * n_ref[1]; // Upper-tri m02 += n_ref[0] * n_ref[2]; m12 += n_ref[1] * n_ref[2]; // A^Tb (centered around the origin) rhs += n_ref * n_ref.dot(points[n] - avgPos); } Mat3d A(m00,m01,m02, m01,m11,m12, m02,m12,m22); /* // Inverse const double det = A.det(); if (det > 0.01) { Mat3d A_inv = A.adjoint(); A_inv *= (1.0 / det); return avgPos + A_inv * rhs; } */ // Compute the pseudo inverse math::Mat3d eigenVectors; Vec3d eigenValues; diagonalizeSymmetricMatrix(A, eigenVectors, eigenValues, 300); Mat3d D = Mat3d::identity(); double tolerance = std::max(std::abs(eigenValues[0]), std::abs(eigenValues[1])); tolerance = std::max(tolerance, std::abs(eigenValues[2])); tolerance *= 0.01; int clamped = 0; for (int i = 0; i < 3; ++i ) { if (std::abs(eigenValues[i]) < tolerance) { D[i][i] = 0.0; ++clamped; } else { D[i][i] = 1.0 / eigenValues[i]; } } // Assemble the pseudo inverse and calc. the intersection point if (clamped < 3) { Mat3d pseudoInv = eigenVectors * D * eigenVectors.transpose(); return avgPos + pseudoInv * rhs; } return avgPos; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Internal utility objects and implementation details namespace volume_to_mesh_internal { template<typename ValueType> struct FillArray { FillArray(ValueType* array, const ValueType& v) : mArray(array), mValue(v) { } void operator()(const tbb::blocked_range<size_t>& range) const { const ValueType v = mValue; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { mArray[n] = v; } } ValueType * const mArray; const ValueType mValue; }; template<typename ValueType> inline void fillArray(ValueType* array, const ValueType& val, const size_t length) { const auto grainSize = std::max<size_t>( length / tbb::task_scheduler_init::default_num_threads(), 1024); const tbb::blocked_range<size_t> range(0, length, grainSize); tbb::parallel_for(range, FillArray<ValueType>(array, val), tbb::simple_partitioner()); } /// @brief Bit-flags used to classify cells. enum { SIGNS = 0xFF, EDGES = 0xE00, INSIDE = 0x100, XEDGE = 0x200, YEDGE = 0x400, ZEDGE = 0x800, SEAM = 0x1000}; /// @brief Used to quickly determine if a given cell is adaptable. const bool sAdaptable[256] = { 1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,1,1,1,0,1,0,0,0,1,0,1,0,1,0,1,0,1, 1,0,1,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,0,1, 1,0,0,0,1,0,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,1,1,1,0,1,1,0,0,0,0,1,0,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,1, 1,0,0,0,0,0,0,0,1,1,0,1,1,1,1,1,1,1,0,1,0,0,0,0,1,1,0,1,1,1,0,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,0,1,0,0,0,1, 1,0,0,0,1,0,1,0,1,1,0,0,1,1,1,1,1,1,0,0,1,0,0,0,1,1,0,0,1,1,0,1, 1,0,1,0,1,0,1,0,1,0,0,0,1,0,1,1,1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,1}; /// @brief Contains the ambiguous face index for certain cell configuration. const unsigned char sAmbiguousFace[256] = { 0,0,0,0,0,5,0,0,0,0,5,0,0,0,0,0,0,0,1,0,0,5,1,0,4,0,0,0,4,0,0,0, 0,1,0,0,2,0,0,0,0,1,5,0,2,0,0,0,0,0,0,0,2,0,0,0,4,0,0,0,0,0,0,0, 0,0,2,2,0,5,0,0,3,3,0,0,0,0,0,0,6,6,0,0,6,0,0,0,0,0,0,0,0,0,0,0, 0,1,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,4,0,4,3,0,3,0,0,0,5,0,0,0,0,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0, 6,0,6,0,0,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,4,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; /// @brief Lookup table for different cell sign configurations. The first entry specifies /// the total number of points that need to be generated inside a cell and the /// remaining 12 entries indicate different edge groups. const unsigned char sEdgeGroupTable[256][13] = { {0,0,0,0,0,0,0,0,0,0,0,0,0},{1,1,0,0,1,0,0,0,0,1,0,0,0},{1,1,1,0,0,0,0,0,0,0,1,0,0}, {1,0,1,0,1,0,0,0,0,1,1,0,0},{1,0,1,1,0,0,0,0,0,0,0,1,0},{1,1,1,1,1,0,0,0,0,1,0,1,0}, {1,1,0,1,0,0,0,0,0,0,1,1,0},{1,0,0,1,1,0,0,0,0,1,1,1,0},{1,0,0,1,1,0,0,0,0,0,0,0,1}, {1,1,0,1,0,0,0,0,0,1,0,0,1},{1,1,1,1,1,0,0,0,0,0,1,0,1},{1,0,1,1,0,0,0,0,0,1,1,0,1}, {1,0,1,0,1,0,0,0,0,0,0,1,1},{1,1,1,0,0,0,0,0,0,1,0,1,1},{1,1,0,0,1,0,0,0,0,0,1,1,1}, {1,0,0,0,0,0,0,0,0,1,1,1,1},{1,0,0,0,0,1,0,0,1,1,0,0,0},{1,1,0,0,1,1,0,0,1,0,0,0,0}, {1,1,1,0,0,1,0,0,1,1,1,0,0},{1,0,1,0,1,1,0,0,1,0,1,0,0},{2,0,1,1,0,2,0,0,2,2,0,1,0}, {1,1,1,1,1,1,0,0,1,0,0,1,0},{1,1,0,1,0,1,0,0,1,1,1,1,0},{1,0,0,1,1,1,0,0,1,0,1,1,0}, {1,0,0,1,1,1,0,0,1,1,0,0,1},{1,1,0,1,0,1,0,0,1,0,0,0,1},{2,2,1,1,2,1,0,0,1,2,1,0,1}, {1,0,1,1,0,1,0,0,1,0,1,0,1},{1,0,1,0,1,1,0,0,1,1,0,1,1},{1,1,1,0,0,1,0,0,1,0,0,1,1}, {2,1,0,0,1,2,0,0,2,1,2,2,2},{1,0,0,0,0,1,0,0,1,0,1,1,1},{1,0,0,0,0,1,1,0,0,0,1,0,0}, {1,1,0,0,1,1,1,0,0,1,1,0,0},{1,1,1,0,0,1,1,0,0,0,0,0,0},{1,0,1,0,1,1,1,0,0,1,0,0,0}, {1,0,1,1,0,1,1,0,0,0,1,1,0},{2,2,2,1,1,1,1,0,0,1,2,1,0},{1,1,0,1,0,1,1,0,0,0,0,1,0}, {1,0,0,1,1,1,1,0,0,1,0,1,0},{2,0,0,2,2,1,1,0,0,0,1,0,2},{1,1,0,1,0,1,1,0,0,1,1,0,1}, {1,1,1,1,1,1,1,0,0,0,0,0,1},{1,0,1,1,0,1,1,0,0,1,0,0,1},{1,0,1,0,1,1,1,0,0,0,1,1,1}, {2,1,1,0,0,2,2,0,0,2,1,2,2},{1,1,0,0,1,1,1,0,0,0,0,1,1},{1,0,0,0,0,1,1,0,0,1,0,1,1}, {1,0,0,0,0,0,1,0,1,1,1,0,0},{1,1,0,0,1,0,1,0,1,0,1,0,0},{1,1,1,0,0,0,1,0,1,1,0,0,0}, {1,0,1,0,1,0,1,0,1,0,0,0,0},{1,0,1,1,0,0,1,0,1,1,1,1,0},{2,1,1,2,2,0,2,0,2,0,1,2,0}, {1,1,0,1,0,0,1,0,1,1,0,1,0},{1,0,0,1,1,0,1,0,1,0,0,1,0},{1,0,0,1,1,0,1,0,1,1,1,0,1}, {1,1,0,1,0,0,1,0,1,0,1,0,1},{2,1,2,2,1,0,2,0,2,1,0,0,2},{1,0,1,1,0,0,1,0,1,0,0,0,1}, {2,0,2,0,2,0,1,0,1,2,2,1,1},{2,2,2,0,0,0,1,0,1,0,2,1,1},{2,2,0,0,2,0,1,0,1,2,0,1,1}, {1,0,0,0,0,0,1,0,1,0,0,1,1},{1,0,0,0,0,0,1,1,0,0,0,1,0},{2,1,0,0,1,0,2,2,0,1,0,2,0}, {1,1,1,0,0,0,1,1,0,0,1,1,0},{1,0,1,0,1,0,1,1,0,1,1,1,0},{1,0,1,1,0,0,1,1,0,0,0,0,0}, {1,1,1,1,1,0,1,1,0,1,0,0,0},{1,1,0,1,0,0,1,1,0,0,1,0,0},{1,0,0,1,1,0,1,1,0,1,1,0,0}, {1,0,0,1,1,0,1,1,0,0,0,1,1},{1,1,0,1,0,0,1,1,0,1,0,1,1},{2,1,2,2,1,0,1,1,0,0,1,2,1}, {2,0,1,1,0,0,2,2,0,2,2,1,2},{1,0,1,0,1,0,1,1,0,0,0,0,1},{1,1,1,0,0,0,1,1,0,1,0,0,1}, {1,1,0,0,1,0,1,1,0,0,1,0,1},{1,0,0,0,0,0,1,1,0,1,1,0,1},{1,0,0,0,0,1,1,1,1,1,0,1,0}, {1,1,0,0,1,1,1,1,1,0,0,1,0},{2,1,1,0,0,2,2,1,1,1,2,1,0},{2,0,2,0,2,1,1,2,2,0,1,2,0}, {1,0,1,1,0,1,1,1,1,1,0,0,0},{2,2,2,1,1,2,2,1,1,0,0,0,0},{2,2,0,2,0,1,1,2,2,2,1,0,0}, {2,0,0,1,1,2,2,1,1,0,2,0,0},{2,0,0,1,1,1,1,2,2,1,0,1,2},{2,2,0,2,0,2,2,1,1,0,0,2,1}, {4,3,2,2,3,4,4,1,1,3,4,2,1},{3,0,2,2,0,1,1,3,3,0,1,2,3},{2,0,2,0,2,2,2,1,1,2,0,0,1}, {2,1,1,0,0,1,1,2,2,0,0,0,2},{3,1,0,0,1,2,2,3,3,1,2,0,3},{2,0,0,0,0,1,1,2,2,0,1,0,2}, {1,0,0,0,0,1,0,1,0,0,1,1,0},{1,1,0,0,1,1,0,1,0,1,1,1,0},{1,1,1,0,0,1,0,1,0,0,0,1,0}, {1,0,1,0,1,1,0,1,0,1,0,1,0},{1,0,1,1,0,1,0,1,0,0,1,0,0},{2,1,1,2,2,2,0,2,0,2,1,0,0}, {1,1,0,1,0,1,0,1,0,0,0,0,0},{1,0,0,1,1,1,0,1,0,1,0,0,0},{1,0,0,1,1,1,0,1,0,0,1,1,1}, {2,2,0,2,0,1,0,1,0,1,2,2,1},{2,2,1,1,2,2,0,2,0,0,0,1,2},{2,0,2,2,0,1,0,1,0,1,0,2,1}, {1,0,1,0,1,1,0,1,0,0,1,0,1},{2,2,2,0,0,1,0,1,0,1,2,0,1},{1,1,0,0,1,1,0,1,0,0,0,0,1}, {1,0,0,0,0,1,0,1,0,1,0,0,1},{1,0,0,0,0,0,0,1,1,1,1,1,0},{1,1,0,0,1,0,0,1,1,0,1,1,0}, {1,1,1,0,0,0,0,1,1,1,0,1,0},{1,0,1,0,1,0,0,1,1,0,0,1,0},{1,0,1,1,0,0,0,1,1,1,1,0,0}, {2,2,2,1,1,0,0,1,1,0,2,0,0},{1,1,0,1,0,0,0,1,1,1,0,0,0},{1,0,0,1,1,0,0,1,1,0,0,0,0}, {2,0,0,2,2,0,0,1,1,2,2,2,1},{2,1,0,1,0,0,0,2,2,0,1,1,2},{3,2,1,1,2,0,0,3,3,2,0,1,3}, {2,0,1,1,0,0,0,2,2,0,0,1,2},{2,0,1,0,1,0,0,2,2,1,1,0,2},{2,1,1,0,0,0,0,2,2,0,1,0,2}, {2,1,0,0,1,0,0,2,2,1,0,0,2},{1,0,0,0,0,0,0,1,1,0,0,0,1},{1,0,0,0,0,0,0,1,1,0,0,0,1}, {1,1,0,0,1,0,0,1,1,1,0,0,1},{2,1,1,0,0,0,0,2,2,0,1,0,2},{1,0,1,0,1,0,0,1,1,1,1,0,1}, {1,0,1,1,0,0,0,1,1,0,0,1,1},{2,1,1,2,2,0,0,1,1,1,0,1,2},{1,1,0,1,0,0,0,1,1,0,1,1,1}, {2,0,0,1,1,0,0,2,2,2,2,2,1},{1,0,0,1,1,0,0,1,1,0,0,0,0},{1,1,0,1,0,0,0,1,1,1,0,0,0}, {1,1,1,1,1,0,0,1,1,0,1,0,0},{1,0,1,1,0,0,0,1,1,1,1,0,0},{1,0,1,0,1,0,0,1,1,0,0,1,0}, {1,1,1,0,0,0,0,1,1,1,0,1,0},{1,1,0,0,1,0,0,1,1,0,1,1,0},{1,0,0,0,0,0,0,1,1,1,1,1,0}, {1,0,0,0,0,1,0,1,0,1,0,0,1},{1,1,0,0,1,1,0,1,0,0,0,0,1},{1,1,1,0,0,1,0,1,0,1,1,0,1}, {1,0,1,0,1,1,0,1,0,0,1,0,1},{1,0,1,1,0,1,0,1,0,1,0,1,1},{2,2,2,1,1,2,0,2,0,0,0,2,1}, {2,1,0,1,0,2,0,2,0,1,2,2,1},{2,0,0,2,2,1,0,1,0,0,1,1,2},{1,0,0,1,1,1,0,1,0,1,0,0,0}, {1,1,0,1,0,1,0,1,0,0,0,0,0},{2,1,2,2,1,2,0,2,0,1,2,0,0},{1,0,1,1,0,1,0,1,0,0,1,0,0}, {1,0,1,0,1,1,0,1,0,1,0,1,0},{1,1,1,0,0,1,0,1,0,0,0,1,0},{2,2,0,0,2,1,0,1,0,2,1,1,0}, {1,0,0,0,0,1,0,1,0,0,1,1,0},{1,0,0,0,0,1,1,1,1,0,1,0,1},{2,1,0,0,1,2,1,1,2,2,1,0,1}, {1,1,1,0,0,1,1,1,1,0,0,0,1},{2,0,2,0,2,1,2,2,1,1,0,0,2},{2,0,1,1,0,1,2,2,1,0,1,2,1}, {4,1,1,3,3,2,4,4,2,2,1,4,3},{2,2,0,2,0,2,1,1,2,0,0,1,2},{3,0,0,1,1,2,3,3,2,2,0,3,1}, {1,0,0,1,1,1,1,1,1,0,1,0,0},{2,2,0,2,0,1,2,2,1,1,2,0,0},{2,2,1,1,2,2,1,1,2,0,0,0,0}, {2,0,1,1,0,2,1,1,2,2,0,0,0},{2,0,2,0,2,2,1,1,2,0,2,1,0},{3,1,1,0,0,3,2,2,3,3,1,2,0}, {2,1,0,0,1,1,2,2,1,0,0,2,0},{2,0,0,0,0,2,1,1,2,2,0,1,0},{1,0,0,0,0,0,1,1,0,1,1,0,1}, {1,1,0,0,1,0,1,1,0,0,1,0,1},{1,1,1,0,0,0,1,1,0,1,0,0,1},{1,0,1,0,1,0,1,1,0,0,0,0,1}, {2,0,2,2,0,0,1,1,0,2,2,1,2},{3,1,1,2,2,0,3,3,0,0,1,3,2},{2,1,0,1,0,0,2,2,0,1,0,2,1}, {2,0,0,1,1,0,2,2,0,0,0,2,1},{1,0,0,1,1,0,1,1,0,1,1,0,0},{1,1,0,1,0,0,1,1,0,0,1,0,0}, {2,2,1,1,2,0,1,1,0,2,0,0,0},{1,0,1,1,0,0,1,1,0,0,0,0,0},{2,0,1,0,1,0,2,2,0,1,1,2,0}, {2,1,1,0,0,0,2,2,0,0,1,2,0},{2,1,0,0,1,0,2,2,0,1,0,2,0},{1,0,0,0,0,0,1,1,0,0,0,1,0}, {1,0,0,0,0,0,1,0,1,0,0,1,1},{1,1,0,0,1,0,1,0,1,1,0,1,1},{1,1,1,0,0,0,1,0,1,0,1,1,1}, {2,0,2,0,2,0,1,0,1,1,1,2,2},{1,0,1,1,0,0,1,0,1,0,0,0,1},{2,2,2,1,1,0,2,0,2,2,0,0,1}, {1,1,0,1,0,0,1,0,1,0,1,0,1},{2,0,0,2,2,0,1,0,1,1,1,0,2},{1,0,0,1,1,0,1,0,1,0,0,1,0}, {1,1,0,1,0,0,1,0,1,1,0,1,0},{2,2,1,1,2,0,2,0,2,0,2,1,0},{2,0,2,2,0,0,1,0,1,1,1,2,0}, {1,0,1,0,1,0,1,0,1,0,0,0,0},{1,1,1,0,0,0,1,0,1,1,0,0,0},{1,1,0,0,1,0,1,0,1,0,1,0,0}, {1,0,0,0,0,0,1,0,1,1,1,0,0},{1,0,0,0,0,1,1,0,0,1,0,1,1},{1,1,0,0,1,1,1,0,0,0,0,1,1}, {2,2,2,0,0,1,1,0,0,2,1,2,2},{2,0,1,0,1,2,2,0,0,0,2,1,1},{1,0,1,1,0,1,1,0,0,1,0,0,1}, {2,1,1,2,2,1,1,0,0,0,0,0,2},{2,1,0,1,0,2,2,0,0,1,2,0,1},{2,0,0,2,2,1,1,0,0,0,1,0,2}, {1,0,0,1,1,1,1,0,0,1,0,1,0},{1,1,0,1,0,1,1,0,0,0,0,1,0},{3,1,2,2,1,3,3,0,0,1,3,2,0}, {2,0,1,1,0,2,2,0,0,0,2,1,0},{1,0,1,0,1,1,1,0,0,1,0,0,0},{1,1,1,0,0,1,1,0,0,0,0,0,0}, {2,2,0,0,2,1,1,0,0,2,1,0,0},{1,0,0,0,0,1,1,0,0,0,1,0,0},{1,0,0,0,0,1,0,0,1,0,1,1,1}, {2,2,0,0,2,1,0,0,1,1,2,2,2},{1,1,1,0,0,1,0,0,1,0,0,1,1},{2,0,1,0,1,2,0,0,2,2,0,1,1}, {1,0,1,1,0,1,0,0,1,0,1,0,1},{3,1,1,3,3,2,0,0,2,2,1,0,3},{1,1,0,1,0,1,0,0,1,0,0,0,1}, {2,0,0,2,2,1,0,0,1,1,0,0,2},{1,0,0,1,1,1,0,0,1,0,1,1,0},{2,1,0,1,0,2,0,0,2,2,1,1,0}, {2,1,2,2,1,1,0,0,1,0,0,2,0},{2,0,1,1,0,2,0,0,2,2,0,1,0},{1,0,1,0,1,1,0,0,1,0,1,0,0}, {2,1,1,0,0,2,0,0,2,2,1,0,0},{1,1,0,0,1,1,0,0,1,0,0,0,0},{1,0,0,0,0,1,0,0,1,1,0,0,0}, {1,0,0,0,0,0,0,0,0,1,1,1,1},{1,1,0,0,1,0,0,0,0,0,1,1,1},{1,1,1,0,0,0,0,0,0,1,0,1,1}, {1,0,1,0,1,0,0,0,0,0,0,1,1},{1,0,1,1,0,0,0,0,0,1,1,0,1},{2,1,1,2,2,0,0,0,0,0,1,0,2}, {1,1,0,1,0,0,0,0,0,1,0,0,1},{1,0,0,1,1,0,0,0,0,0,0,0,1},{1,0,0,1,1,0,0,0,0,1,1,1,0}, {1,1,0,1,0,0,0,0,0,0,1,1,0},{2,1,2,2,1,0,0,0,0,1,0,2,0},{1,0,1,1,0,0,0,0,0,0,0,1,0}, {1,0,1,0,1,0,0,0,0,1,1,0,0},{1,1,1,0,0,0,0,0,0,0,1,0,0},{1,1,0,0,1,0,0,0,0,1,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0}}; //////////////////////////////////////// inline bool isPlanarQuad( const Vec3d& p0, const Vec3d& p1, const Vec3d& p2, const Vec3d& p3, double epsilon = 0.001) { // compute representative plane Vec3d normal = (p2-p0).cross(p1-p3); normal.normalize(); const Vec3d centroid = (p0 + p1 + p2 + p3); const double d = centroid.dot(normal) * 0.25; // test vertice distance to plane double absDist = std::abs(p0.dot(normal) - d); if (absDist > epsilon) return false; absDist = std::abs(p1.dot(normal) - d); if (absDist > epsilon) return false; absDist = std::abs(p2.dot(normal) - d); if (absDist > epsilon) return false; absDist = std::abs(p3.dot(normal) - d); if (absDist > epsilon) return false; return true; } //////////////////////////////////////// /// @{ /// @brief Utility methods for point quantization. enum { MASK_FIRST_10_BITS = 0x000003FF, MASK_DIRTY_BIT = 0x80000000, MASK_INVALID_BIT = 0x40000000 }; inline uint32_t packPoint(const Vec3d& v) { uint32_t data = 0; // values are expected to be in the [0.0 to 1.0] range. assert(!(v.x() > 1.0) && !(v.y() > 1.0) && !(v.z() > 1.0)); assert(!(v.x() < 0.0) && !(v.y() < 0.0) && !(v.z() < 0.0)); data |= (uint32_t(v.x() * 1023.0) & MASK_FIRST_10_BITS) << 20; data |= (uint32_t(v.y() * 1023.0) & MASK_FIRST_10_BITS) << 10; data |= (uint32_t(v.z() * 1023.0) & MASK_FIRST_10_BITS); return data; } inline Vec3d unpackPoint(uint32_t data) { Vec3d v; v.z() = double(data & MASK_FIRST_10_BITS) * 0.0009775171; data = data >> 10; v.y() = double(data & MASK_FIRST_10_BITS) * 0.0009775171; data = data >> 10; v.x() = double(data & MASK_FIRST_10_BITS) * 0.0009775171; return v; } /// @} //////////////////////////////////////// template<typename T> inline bool isBoolValue() { return false; } template<> inline bool isBoolValue<bool>() { return true; } template<typename T> inline bool isInsideValue(T value, T isovalue) { return value < isovalue; } template<> inline bool isInsideValue<bool>(bool value, bool /*isovalue*/) { return value; } template<typename AccessorT> inline void getCellVertexValues(const AccessorT& accessor, Coord ijk, math::Tuple<8, typename AccessorT::ValueType>& values) { values[0] = accessor.getValue(ijk); // i, j, k ++ijk[0]; values[1] = accessor.getValue(ijk); // i+1, j, k ++ijk[2]; values[2] = accessor.getValue(ijk); // i+1, j, k+1 --ijk[0]; values[3] = accessor.getValue(ijk); // i, j, k+1 --ijk[2]; ++ijk[1]; values[4] = accessor.getValue(ijk); // i, j+1, k ++ijk[0]; values[5] = accessor.getValue(ijk); // i+1, j+1, k ++ijk[2]; values[6] = accessor.getValue(ijk); // i+1, j+1, k+1 --ijk[0]; values[7] = accessor.getValue(ijk); // i, j+1, k+1 } template<typename LeafT> inline void getCellVertexValues(const LeafT& leaf, const Index offset, math::Tuple<8, typename LeafT::ValueType>& values) { values[0] = leaf.getValue(offset); // i, j, k values[3] = leaf.getValue(offset + 1); // i, j, k+1 values[4] = leaf.getValue(offset + LeafT::DIM); // i, j+1, k values[7] = leaf.getValue(offset + LeafT::DIM + 1); // i, j+1, k+1 values[1] = leaf.getValue(offset + (LeafT::DIM * LeafT::DIM)); // i+1, j, k values[2] = leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + 1); // i+1, j, k+1 values[5] = leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM); // i+1, j+1, k values[6] = leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM + 1); // i+1, j+1, k+1 } template<typename ValueType> inline uint8_t computeSignFlags(const math::Tuple<8, ValueType>& values, const ValueType iso) { unsigned signs = 0; signs |= isInsideValue(values[0], iso) ? 1u : 0u; signs |= isInsideValue(values[1], iso) ? 2u : 0u; signs |= isInsideValue(values[2], iso) ? 4u : 0u; signs |= isInsideValue(values[3], iso) ? 8u : 0u; signs |= isInsideValue(values[4], iso) ? 16u : 0u; signs |= isInsideValue(values[5], iso) ? 32u : 0u; signs |= isInsideValue(values[6], iso) ? 64u : 0u; signs |= isInsideValue(values[7], iso) ? 128u : 0u; return uint8_t(signs); } /// @brief General method that computes the cell-sign configuration at the given /// @c ijk coordinate. template<typename AccessorT> inline uint8_t evalCellSigns(const AccessorT& accessor, const Coord& ijk, typename AccessorT::ValueType iso) { unsigned signs = 0; Coord coord = ijk; // i, j, k if (isInsideValue(accessor.getValue(coord), iso)) signs |= 1u; coord[0] += 1; // i+1, j, k if (isInsideValue(accessor.getValue(coord), iso)) signs |= 2u; coord[2] += 1; // i+1, j, k+1 if (isInsideValue(accessor.getValue(coord), iso)) signs |= 4u; coord[0] = ijk[0]; // i, j, k+1 if (isInsideValue(accessor.getValue(coord), iso)) signs |= 8u; coord[1] += 1; coord[2] = ijk[2]; // i, j+1, k if (isInsideValue(accessor.getValue(coord), iso)) signs |= 16u; coord[0] += 1; // i+1, j+1, k if (isInsideValue(accessor.getValue(coord), iso)) signs |= 32u; coord[2] += 1; // i+1, j+1, k+1 if (isInsideValue(accessor.getValue(coord), iso)) signs |= 64u; coord[0] = ijk[0]; // i, j+1, k+1 if (isInsideValue(accessor.getValue(coord), iso)) signs |= 128u; return uint8_t(signs); } /// @brief Leaf node optimized method that computes the cell-sign configuration /// at the given local @c offset template<typename LeafT> inline uint8_t evalCellSigns(const LeafT& leaf, const Index offset, typename LeafT::ValueType iso) { unsigned signs = 0; // i, j, k if (isInsideValue(leaf.getValue(offset), iso)) signs |= 1u; // i, j, k+1 if (isInsideValue(leaf.getValue(offset + 1), iso)) signs |= 8u; // i, j+1, k if (isInsideValue(leaf.getValue(offset + LeafT::DIM), iso)) signs |= 16u; // i, j+1, k+1 if (isInsideValue(leaf.getValue(offset + LeafT::DIM + 1), iso)) signs |= 128u; // i+1, j, k if (isInsideValue(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) ), iso)) signs |= 2u; // i+1, j, k+1 if (isInsideValue(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + 1), iso)) signs |= 4u; // i+1, j+1, k if (isInsideValue(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM), iso)) signs |= 32u; // i+1, j+1, k+1 if (isInsideValue(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM + 1), iso)) signs |= 64u; return uint8_t(signs); } /// @brief Used to correct topological ambiguities related to two adjacent cells /// that share an ambiguous face. template<class AccessorT> inline void correctCellSigns(uint8_t& signs, uint8_t face, const AccessorT& acc, Coord ijk, typename AccessorT::ValueType iso) { switch (int(face)) { case 1: ijk[2] -= 1; if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 3) signs = uint8_t(~signs); break; case 2: ijk[0] += 1; if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 4) signs = uint8_t(~signs); break; case 3: ijk[2] += 1; if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 1) signs = uint8_t(~signs); break; case 4: ijk[0] -= 1; if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 2) signs = uint8_t(~signs); break; case 5: ijk[1] -= 1; if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 6) signs = uint8_t(~signs); break; case 6: ijk[1] += 1; if (sAmbiguousFace[evalCellSigns(acc, ijk, iso)] == 5) signs = uint8_t(~signs); break; default: break; } } template<class AccessorT> inline bool isNonManifold(const AccessorT& accessor, const Coord& ijk, typename AccessorT::ValueType isovalue, const int dim) { int hDim = dim >> 1; bool m, p[8]; // Corner signs Coord coord = ijk; // i, j, k p[0] = isInsideValue(accessor.getValue(coord), isovalue); coord[0] += dim; // i+dim, j, k p[1] = isInsideValue(accessor.getValue(coord), isovalue); coord[2] += dim; // i+dim, j, k+dim p[2] = isInsideValue(accessor.getValue(coord), isovalue); coord[0] = ijk[0]; // i, j, k+dim p[3] = isInsideValue(accessor.getValue(coord), isovalue); coord[1] += dim; coord[2] = ijk[2]; // i, j+dim, k p[4] = isInsideValue(accessor.getValue(coord), isovalue); coord[0] += dim; // i+dim, j+dim, k p[5] = isInsideValue(accessor.getValue(coord), isovalue); coord[2] += dim; // i+dim, j+dim, k+dim p[6] = isInsideValue(accessor.getValue(coord), isovalue); coord[0] = ijk[0]; // i, j+dim, k+dim p[7] = isInsideValue(accessor.getValue(coord), isovalue); // Check if the corner sign configuration is ambiguous unsigned signs = 0; if (p[0]) signs |= 1u; if (p[1]) signs |= 2u; if (p[2]) signs |= 4u; if (p[3]) signs |= 8u; if (p[4]) signs |= 16u; if (p[5]) signs |= 32u; if (p[6]) signs |= 64u; if (p[7]) signs |= 128u; if (!sAdaptable[signs]) return true; // Manifold check // Evaluate edges int i = ijk[0], ip = ijk[0] + hDim, ipp = ijk[0] + dim; int j = ijk[1], jp = ijk[1] + hDim, jpp = ijk[1] + dim; int k = ijk[2], kp = ijk[2] + hDim, kpp = ijk[2] + dim; // edge 1 coord.reset(ip, j, k); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[0] != m && p[1] != m) return true; // edge 2 coord.reset(ipp, j, kp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[1] != m && p[2] != m) return true; // edge 3 coord.reset(ip, j, kpp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[2] != m && p[3] != m) return true; // edge 4 coord.reset(i, j, kp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[0] != m && p[3] != m) return true; // edge 5 coord.reset(ip, jpp, k); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[4] != m && p[5] != m) return true; // edge 6 coord.reset(ipp, jpp, kp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[5] != m && p[6] != m) return true; // edge 7 coord.reset(ip, jpp, kpp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[6] != m && p[7] != m) return true; // edge 8 coord.reset(i, jpp, kp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[7] != m && p[4] != m) return true; // edge 9 coord.reset(i, jp, k); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[0] != m && p[4] != m) return true; // edge 10 coord.reset(ipp, jp, k); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[1] != m && p[5] != m) return true; // edge 11 coord.reset(ipp, jp, kpp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[2] != m && p[6] != m) return true; // edge 12 coord.reset(i, jp, kpp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[3] != m && p[7] != m) return true; // Evaluate faces // face 1 coord.reset(ip, jp, k); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[0] != m && p[1] != m && p[4] != m && p[5] != m) return true; // face 2 coord.reset(ipp, jp, kp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[1] != m && p[2] != m && p[5] != m && p[6] != m) return true; // face 3 coord.reset(ip, jp, kpp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[2] != m && p[3] != m && p[6] != m && p[7] != m) return true; // face 4 coord.reset(i, jp, kp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[0] != m && p[3] != m && p[4] != m && p[7] != m) return true; // face 5 coord.reset(ip, j, kp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[0] != m && p[1] != m && p[2] != m && p[3] != m) return true; // face 6 coord.reset(ip, jpp, kp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[4] != m && p[5] != m && p[6] != m && p[7] != m) return true; // test cube center coord.reset(ip, jp, kp); m = isInsideValue(accessor.getValue(coord), isovalue); if (p[0] != m && p[1] != m && p[2] != m && p[3] != m && p[4] != m && p[5] != m && p[6] != m && p[7] != m) return true; return false; } //////////////////////////////////////// template <class LeafType> inline void mergeVoxels(LeafType& leaf, const Coord& start, int dim, int regionId) { Coord ijk, end = start; end[0] += dim; end[1] += dim; end[2] += dim; for (ijk[0] = start[0]; ijk[0] < end[0]; ++ijk[0]) { for (ijk[1] = start[1]; ijk[1] < end[1]; ++ijk[1]) { for (ijk[2] = start[2]; ijk[2] < end[2]; ++ijk[2]) { leaf.setValueOnly(ijk, regionId); } } } } // Note that we must use ValueType::value_type or else Visual C++ gets confused // thinking that it is a constructor. template <class LeafType> inline bool isMergable(LeafType& leaf, const Coord& start, int dim, typename LeafType::ValueType::value_type adaptivity) { if (adaptivity < 1e-6) return false; using VecT = typename LeafType::ValueType; Coord ijk, end = start; end[0] += dim; end[1] += dim; end[2] += dim; std::vector<VecT> norms; for (ijk[0] = start[0]; ijk[0] < end[0]; ++ijk[0]) { for (ijk[1] = start[1]; ijk[1] < end[1]; ++ijk[1]) { for (ijk[2] = start[2]; ijk[2] < end[2]; ++ijk[2]) { if(!leaf.isValueOn(ijk)) continue; norms.push_back(leaf.getValue(ijk)); } } } size_t N = norms.size(); for (size_t ni = 0; ni < N; ++ni) { VecT n_i = norms[ni]; for (size_t nj = 0; nj < N; ++nj) { VecT n_j = norms[nj]; if ((1.0 - n_i.dot(n_j)) > adaptivity) return false; } } return true; } //////////////////////////////////////// /// linear interpolation. inline double evalZeroCrossing(double v0, double v1, double iso) { return (iso - v0) / (v1 - v0); } /// @brief Extracts the eight corner values for leaf inclusive cells. template<typename LeafT> inline void collectCornerValues(const LeafT& leaf, const Index offset, std::vector<double>& values) { values[0] = double(leaf.getValue(offset)); // i, j, k values[3] = double(leaf.getValue(offset + 1)); // i, j, k+1 values[4] = double(leaf.getValue(offset + LeafT::DIM)); // i, j+1, k values[7] = double(leaf.getValue(offset + LeafT::DIM + 1)); // i, j+1, k+1 values[1] = double(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM))); // i+1, j, k values[2] = double(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + 1)); // i+1, j, k+1 values[5] = double(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM)); // i+1, j+1, k values[6] = double(leaf.getValue(offset + (LeafT::DIM * LeafT::DIM) + LeafT::DIM + 1)); // i+1, j+1, k+1 } /// @brief Extracts the eight corner values for a cell starting at the given @ijk coordinate. template<typename AccessorT> inline void collectCornerValues(const AccessorT& acc, const Coord& ijk, std::vector<double>& values) { Coord coord = ijk; values[0] = double(acc.getValue(coord)); // i, j, k coord[0] += 1; values[1] = double(acc.getValue(coord)); // i+1, j, k coord[2] += 1; values[2] = double(acc.getValue(coord)); // i+i, j, k+1 coord[0] = ijk[0]; values[3] = double(acc.getValue(coord)); // i, j, k+1 coord[1] += 1; coord[2] = ijk[2]; values[4] = double(acc.getValue(coord)); // i, j+1, k coord[0] += 1; values[5] = double(acc.getValue(coord)); // i+1, j+1, k coord[2] += 1; values[6] = double(acc.getValue(coord)); // i+1, j+1, k+1 coord[0] = ijk[0]; values[7] = double(acc.getValue(coord)); // i, j+1, k+1 } /// @brief Computes the average cell point for a given edge group. inline Vec3d computePoint(const std::vector<double>& values, unsigned char signs, unsigned char edgeGroup, double iso) { Vec3d avg(0.0, 0.0, 0.0); int samples = 0; if (sEdgeGroupTable[signs][1] == edgeGroup) { // Edged: 0 - 1 avg[0] += evalZeroCrossing(values[0], values[1], iso); ++samples; } if (sEdgeGroupTable[signs][2] == edgeGroup) { // Edged: 1 - 2 avg[0] += 1.0; avg[2] += evalZeroCrossing(values[1], values[2], iso); ++samples; } if (sEdgeGroupTable[signs][3] == edgeGroup) { // Edged: 3 - 2 avg[0] += evalZeroCrossing(values[3], values[2], iso); avg[2] += 1.0; ++samples; } if (sEdgeGroupTable[signs][4] == edgeGroup) { // Edged: 0 - 3 avg[2] += evalZeroCrossing(values[0], values[3], iso); ++samples; } if (sEdgeGroupTable[signs][5] == edgeGroup) { // Edged: 4 - 5 avg[0] += evalZeroCrossing(values[4], values[5], iso); avg[1] += 1.0; ++samples; } if (sEdgeGroupTable[signs][6] == edgeGroup) { // Edged: 5 - 6 avg[0] += 1.0; avg[1] += 1.0; avg[2] += evalZeroCrossing(values[5], values[6], iso); ++samples; } if (sEdgeGroupTable[signs][7] == edgeGroup) { // Edged: 7 - 6 avg[0] += evalZeroCrossing(values[7], values[6], iso); avg[1] += 1.0; avg[2] += 1.0; ++samples; } if (sEdgeGroupTable[signs][8] == edgeGroup) { // Edged: 4 - 7 avg[1] += 1.0; avg[2] += evalZeroCrossing(values[4], values[7], iso); ++samples; } if (sEdgeGroupTable[signs][9] == edgeGroup) { // Edged: 0 - 4 avg[1] += evalZeroCrossing(values[0], values[4], iso); ++samples; } if (sEdgeGroupTable[signs][10] == edgeGroup) { // Edged: 1 - 5 avg[0] += 1.0; avg[1] += evalZeroCrossing(values[1], values[5], iso); ++samples; } if (sEdgeGroupTable[signs][11] == edgeGroup) { // Edged: 2 - 6 avg[0] += 1.0; avg[1] += evalZeroCrossing(values[2], values[6], iso); avg[2] += 1.0; ++samples; } if (sEdgeGroupTable[signs][12] == edgeGroup) { // Edged: 3 - 7 avg[1] += evalZeroCrossing(values[3], values[7], iso); avg[2] += 1.0; ++samples; } if (samples > 1) { double w = 1.0 / double(samples); avg[0] *= w; avg[1] *= w; avg[2] *= w; } return avg; } /// @brief Computes the average cell point for a given edge group, ignoring edge /// samples present in the @c signsMask configuration. inline int computeMaskedPoint(Vec3d& avg, const std::vector<double>& values, unsigned char signs, unsigned char signsMask, unsigned char edgeGroup, double iso) { avg = Vec3d(0.0, 0.0, 0.0); int samples = 0; if (sEdgeGroupTable[signs][1] == edgeGroup && sEdgeGroupTable[signsMask][1] == 0) { // Edged: 0 - 1 avg[0] += evalZeroCrossing(values[0], values[1], iso); ++samples; } if (sEdgeGroupTable[signs][2] == edgeGroup && sEdgeGroupTable[signsMask][2] == 0) { // Edged: 1 - 2 avg[0] += 1.0; avg[2] += evalZeroCrossing(values[1], values[2], iso); ++samples; } if (sEdgeGroupTable[signs][3] == edgeGroup && sEdgeGroupTable[signsMask][3] == 0) { // Edged: 3 - 2 avg[0] += evalZeroCrossing(values[3], values[2], iso); avg[2] += 1.0; ++samples; } if (sEdgeGroupTable[signs][4] == edgeGroup && sEdgeGroupTable[signsMask][4] == 0) { // Edged: 0 - 3 avg[2] += evalZeroCrossing(values[0], values[3], iso); ++samples; } if (sEdgeGroupTable[signs][5] == edgeGroup && sEdgeGroupTable[signsMask][5] == 0) { // Edged: 4 - 5 avg[0] += evalZeroCrossing(values[4], values[5], iso); avg[1] += 1.0; ++samples; } if (sEdgeGroupTable[signs][6] == edgeGroup && sEdgeGroupTable[signsMask][6] == 0) { // Edged: 5 - 6 avg[0] += 1.0; avg[1] += 1.0; avg[2] += evalZeroCrossing(values[5], values[6], iso); ++samples; } if (sEdgeGroupTable[signs][7] == edgeGroup && sEdgeGroupTable[signsMask][7] == 0) { // Edged: 7 - 6 avg[0] += evalZeroCrossing(values[7], values[6], iso); avg[1] += 1.0; avg[2] += 1.0; ++samples; } if (sEdgeGroupTable[signs][8] == edgeGroup && sEdgeGroupTable[signsMask][8] == 0) { // Edged: 4 - 7 avg[1] += 1.0; avg[2] += evalZeroCrossing(values[4], values[7], iso); ++samples; } if (sEdgeGroupTable[signs][9] == edgeGroup && sEdgeGroupTable[signsMask][9] == 0) { // Edged: 0 - 4 avg[1] += evalZeroCrossing(values[0], values[4], iso); ++samples; } if (sEdgeGroupTable[signs][10] == edgeGroup && sEdgeGroupTable[signsMask][10] == 0) { // Edged: 1 - 5 avg[0] += 1.0; avg[1] += evalZeroCrossing(values[1], values[5], iso); ++samples; } if (sEdgeGroupTable[signs][11] == edgeGroup && sEdgeGroupTable[signsMask][11] == 0) { // Edged: 2 - 6 avg[0] += 1.0; avg[1] += evalZeroCrossing(values[2], values[6], iso); avg[2] += 1.0; ++samples; } if (sEdgeGroupTable[signs][12] == edgeGroup && sEdgeGroupTable[signsMask][12] == 0) { // Edged: 3 - 7 avg[1] += evalZeroCrossing(values[3], values[7], iso); avg[2] += 1.0; ++samples; } if (samples > 1) { double w = 1.0 / double(samples); avg[0] *= w; avg[1] *= w; avg[2] *= w; } return samples; } /// @brief Computes the average cell point for a given edge group, by computing /// convex weights based on the distance from the sample point @c p. inline Vec3d computeWeightedPoint(const Vec3d& p, const std::vector<double>& values, unsigned char signs, unsigned char edgeGroup, double iso) { std::vector<Vec3d> samples; samples.reserve(8); std::vector<double> weights; weights.reserve(8); Vec3d avg(0.0, 0.0, 0.0); if (sEdgeGroupTable[signs][1] == edgeGroup) { // Edged: 0 - 1 avg[0] = evalZeroCrossing(values[0], values[1], iso); avg[1] = 0.0; avg[2] = 0.0; samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][2] == edgeGroup) { // Edged: 1 - 2 avg[0] = 1.0; avg[1] = 0.0; avg[2] = evalZeroCrossing(values[1], values[2], iso); samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][3] == edgeGroup) { // Edged: 3 - 2 avg[0] = evalZeroCrossing(values[3], values[2], iso); avg[1] = 0.0; avg[2] = 1.0; samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][4] == edgeGroup) { // Edged: 0 - 3 avg[0] = 0.0; avg[1] = 0.0; avg[2] = evalZeroCrossing(values[0], values[3], iso); samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][5] == edgeGroup) { // Edged: 4 - 5 avg[0] = evalZeroCrossing(values[4], values[5], iso); avg[1] = 1.0; avg[2] = 0.0; samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][6] == edgeGroup) { // Edged: 5 - 6 avg[0] = 1.0; avg[1] = 1.0; avg[2] = evalZeroCrossing(values[5], values[6], iso); samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][7] == edgeGroup) { // Edged: 7 - 6 avg[0] = evalZeroCrossing(values[7], values[6], iso); avg[1] = 1.0; avg[2] = 1.0; samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][8] == edgeGroup) { // Edged: 4 - 7 avg[0] = 0.0; avg[1] = 1.0; avg[2] = evalZeroCrossing(values[4], values[7], iso); samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][9] == edgeGroup) { // Edged: 0 - 4 avg[0] = 0.0; avg[1] = evalZeroCrossing(values[0], values[4], iso); avg[2] = 0.0; samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][10] == edgeGroup) { // Edged: 1 - 5 avg[0] = 1.0; avg[1] = evalZeroCrossing(values[1], values[5], iso); avg[2] = 0.0; samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][11] == edgeGroup) { // Edged: 2 - 6 avg[0] = 1.0; avg[1] = evalZeroCrossing(values[2], values[6], iso); avg[2] = 1.0; samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } if (sEdgeGroupTable[signs][12] == edgeGroup) { // Edged: 3 - 7 avg[0] = 0.0; avg[1] = evalZeroCrossing(values[3], values[7], iso); avg[2] = 1.0; samples.push_back(avg); weights.push_back((avg-p).lengthSqr()); } double minWeight = std::numeric_limits<double>::max(); double maxWeight = -std::numeric_limits<double>::max(); for (size_t i = 0, I = weights.size(); i < I; ++i) { minWeight = std::min(minWeight, weights[i]); maxWeight = std::max(maxWeight, weights[i]); } const double offset = maxWeight + minWeight * 0.1; for (size_t i = 0, I = weights.size(); i < I; ++i) { weights[i] = offset - weights[i]; } double weightSum = 0.0; for (size_t i = 0, I = weights.size(); i < I; ++i) { weightSum += weights[i]; } avg[0] = 0.0; avg[1] = 0.0; avg[2] = 0.0; if (samples.size() > 1) { for (size_t i = 0, I = samples.size(); i < I; ++i) { avg += samples[i] * (weights[i] / weightSum); } } else { avg = samples.front(); } return avg; } /// @brief Computes the average cell points defined by the sign configuration /// @c signs and the given corner values @c values. inline void computeCellPoints(std::vector<Vec3d>& points, const std::vector<double>& values, unsigned char signs, double iso) { for (size_t n = 1, N = sEdgeGroupTable[signs][0] + 1; n < N; ++n) { points.push_back(computePoint(values, signs, uint8_t(n), iso)); } } /// @brief Given a sign configuration @c lhsSigns and an edge group @c groupId, /// finds the corresponding edge group in a different sign configuration /// @c rhsSigns. Returns -1 if no match is found. inline int matchEdgeGroup(unsigned char groupId, unsigned char lhsSigns, unsigned char rhsSigns) { int id = -1; for (size_t i = 1; i <= 12; ++i) { if (sEdgeGroupTable[lhsSigns][i] == groupId && sEdgeGroupTable[rhsSigns][i] != 0) { id = sEdgeGroupTable[rhsSigns][i]; break; } } return id; } /// @brief Computes the average cell points defined by the sign configuration /// @c signs and the given corner values @c values. Combines data from /// two different level sets to eliminate seam lines when meshing /// fractured segments. inline void computeCellPoints(std::vector<Vec3d>& points, std::vector<bool>& weightedPointMask, const std::vector<double>& lhsValues, const std::vector<double>& rhsValues, unsigned char lhsSigns, unsigned char rhsSigns, double iso, size_t pointIdx, const uint32_t * seamPointArray) { for (size_t n = 1, N = sEdgeGroupTable[lhsSigns][0] + 1; n < N; ++n) { int id = matchEdgeGroup(uint8_t(n), lhsSigns, rhsSigns); if (id != -1) { const unsigned char e = uint8_t(id); const uint32_t& quantizedPoint = seamPointArray[pointIdx + (id - 1)]; if ((quantizedPoint & MASK_DIRTY_BIT) && !(quantizedPoint & MASK_INVALID_BIT)) { Vec3d p = unpackPoint(quantizedPoint); points.push_back(computeWeightedPoint(p, rhsValues, rhsSigns, e, iso)); weightedPointMask.push_back(true); } else { points.push_back(computePoint(rhsValues, rhsSigns, e, iso)); weightedPointMask.push_back(false); } } else { points.push_back(computePoint(lhsValues, lhsSigns, uint8_t(n), iso)); weightedPointMask.push_back(false); } } } template <typename InputTreeType> struct ComputePoints { using InputLeafNodeType = typename InputTreeType::LeafNodeType; using InputValueType = typename InputLeafNodeType::ValueType; using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type; using Int16LeafNodeType = typename Int16TreeType::LeafNodeType; using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type; using Index32LeafNodeType = typename Index32TreeType::LeafNodeType; ComputePoints(Vec3s * pointArray, const InputTreeType& inputTree, const std::vector<Index32LeafNodeType*>& pointIndexLeafNodes, const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes, const std::unique_ptr<Index32[]>& leafNodeOffsets, const math::Transform& xform, double iso); void setRefData(const InputTreeType& refInputTree, const Index32TreeType& refPointIndexTree, const Int16TreeType& refSignFlagsTree, const uint32_t * quantizedSeamLinePoints, uint8_t * seamLinePointsFlags); void operator()(const tbb::blocked_range<size_t>&) const; private: Vec3s * const mPoints; InputTreeType const * const mInputTree; Index32LeafNodeType * const * const mPointIndexNodes; Int16LeafNodeType const * const * const mSignFlagsNodes; Index32 const * const mNodeOffsets; math::Transform const mTransform; double const mIsovalue; // reference meshing data InputTreeType const * mRefInputTree; Index32TreeType const * mRefPointIndexTree; Int16TreeType const * mRefSignFlagsTree; uint32_t const * mQuantizedSeamLinePoints; uint8_t * mSeamLinePointsFlags; }; // struct ComputePoints template <typename InputTreeType> ComputePoints<InputTreeType>::ComputePoints( Vec3s * pointArray, const InputTreeType& inputTree, const std::vector<Index32LeafNodeType*>& pointIndexLeafNodes, const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes, const std::unique_ptr<Index32[]>& leafNodeOffsets, const math::Transform& xform, double iso) : mPoints(pointArray) , mInputTree(&inputTree) , mPointIndexNodes(pointIndexLeafNodes.empty() ? nullptr : &pointIndexLeafNodes.front()) , mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front()) , mNodeOffsets(leafNodeOffsets.get()) , mTransform(xform) , mIsovalue(iso) , mRefInputTree(nullptr) , mRefPointIndexTree(nullptr) , mRefSignFlagsTree(nullptr) , mQuantizedSeamLinePoints(nullptr) , mSeamLinePointsFlags(nullptr) { } template <typename InputTreeType> void ComputePoints<InputTreeType>::setRefData( const InputTreeType& refInputTree, const Index32TreeType& refPointIndexTree, const Int16TreeType& refSignFlagsTree, const uint32_t * quantizedSeamLinePoints, uint8_t * seamLinePointsFlags) { mRefInputTree = &refInputTree; mRefPointIndexTree = &refPointIndexTree; mRefSignFlagsTree = &refSignFlagsTree; mQuantizedSeamLinePoints = quantizedSeamLinePoints; mSeamLinePointsFlags = seamLinePointsFlags; } template <typename InputTreeType> void ComputePoints<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range) const { using InputTreeAccessor = tree::ValueAccessor<const InputTreeType>; using Index32TreeAccessor = tree::ValueAccessor<const Index32TreeType>; using Int16TreeAccessor = tree::ValueAccessor<const Int16TreeType>; using IndexType = typename Index32TreeType::ValueType; using IndexArray = std::vector<Index>; using IndexArrayMap = std::map<IndexType, IndexArray>; InputTreeAccessor inputAcc(*mInputTree); Vec3d xyz; Coord ijk; std::vector<Vec3d> points(4); std::vector<bool> weightedPointMask(4); std::vector<double> values(8), refValues(8); const double iso = mIsovalue; // reference data accessors std::unique_ptr<InputTreeAccessor> refInputAcc; std::unique_ptr<Index32TreeAccessor> refPointIndexAcc; std::unique_ptr<Int16TreeAccessor> refSignFlagsAcc; const bool hasReferenceData = mRefInputTree && mRefPointIndexTree && mRefSignFlagsTree; if (hasReferenceData) { refInputAcc.reset(new InputTreeAccessor(*mRefInputTree)); refPointIndexAcc.reset(new Index32TreeAccessor(*mRefPointIndexTree)); refSignFlagsAcc.reset(new Int16TreeAccessor(*mRefSignFlagsTree)); } for (size_t n = range.begin(), N = range.end(); n != N; ++n) { Index32LeafNodeType& pointIndexNode = *mPointIndexNodes[n]; const Coord& origin = pointIndexNode.origin(); const Int16LeafNodeType& signFlagsNode = *mSignFlagsNodes[n]; const InputLeafNodeType * inputNode = inputAcc.probeConstLeaf(origin); // get reference data const InputLeafNodeType * refInputNode = nullptr; const Index32LeafNodeType * refPointIndexNode = nullptr; const Int16LeafNodeType * refSignFlagsNode = nullptr; if (hasReferenceData) { refInputNode = refInputAcc->probeConstLeaf(origin); refPointIndexNode = refPointIndexAcc->probeConstLeaf(origin); refSignFlagsNode = refSignFlagsAcc->probeConstLeaf(origin); } IndexType pointOffset = IndexType(mNodeOffsets[n]); IndexArrayMap regions; for (auto it = pointIndexNode.beginValueOn(); it; ++it) { const Index offset = it.pos(); const IndexType id = it.getValue(); if (id != 0) { if (id != IndexType(util::INVALID_IDX)) { regions[id].push_back(offset); } continue; } pointIndexNode.setValueOnly(offset, pointOffset); const Int16 flags = signFlagsNode.getValue(offset); uint8_t signs = uint8_t(SIGNS & flags); uint8_t refSigns = 0; if ((flags & SEAM) && refPointIndexNode && refSignFlagsNode) { if (refSignFlagsNode->isValueOn(offset)) { refSigns = uint8_t(SIGNS & refSignFlagsNode->getValue(offset)); } } ijk = Index32LeafNodeType::offsetToLocalCoord(offset); const bool inclusiveCell = inputNode && ijk[0] < int(Index32LeafNodeType::DIM - 1) && ijk[1] < int(Index32LeafNodeType::DIM - 1) && ijk[2] < int(Index32LeafNodeType::DIM - 1); ijk += origin; if (inclusiveCell) collectCornerValues(*inputNode, offset, values); else collectCornerValues(inputAcc, ijk, values); points.clear(); weightedPointMask.clear(); if (refSigns == 0) { computeCellPoints(points, values, signs, iso); } else { if (inclusiveCell && refInputNode) { collectCornerValues(*refInputNode, offset, refValues); } else { collectCornerValues(*refInputAcc, ijk, refValues); } computeCellPoints(points, weightedPointMask, values, refValues, signs, refSigns, iso, refPointIndexNode->getValue(offset), mQuantizedSeamLinePoints); } xyz[0] = double(ijk[0]); xyz[1] = double(ijk[1]); xyz[2] = double(ijk[2]); for (size_t i = 0, I = points.size(); i < I; ++i) { Vec3d& point = points[i]; // Checks for both NaN and inf vertex positions, i.e. any value that is not finite. if (!std::isfinite(point[0]) || !std::isfinite(point[1]) || !std::isfinite(point[2])) { OPENVDB_THROW(ValueError, "VolumeToMesh encountered NaNs or infs in the input VDB!" " Hint: Check the input and consider using the \"Diagnostics\" tool " "to detect and resolve the NaNs."); } point += xyz; point = mTransform.indexToWorld(point); Vec3s& pos = mPoints[pointOffset]; pos[0] = float(point[0]); pos[1] = float(point[1]); pos[2] = float(point[2]); if (mSeamLinePointsFlags && !weightedPointMask.empty() && weightedPointMask[i]) { mSeamLinePointsFlags[pointOffset] = uint8_t(1); } ++pointOffset; } } // generate collapsed region points for (typename IndexArrayMap::iterator it = regions.begin(); it != regions.end(); ++it) { Vec3d avg(0.0), point(0.0); int count = 0; const IndexArray& voxels = it->second; for (size_t i = 0, I = voxels.size(); i < I; ++i) { const Index offset = voxels[i]; ijk = Index32LeafNodeType::offsetToLocalCoord(offset); const bool inclusiveCell = inputNode && ijk[0] < int(Index32LeafNodeType::DIM - 1) && ijk[1] < int(Index32LeafNodeType::DIM - 1) && ijk[2] < int(Index32LeafNodeType::DIM - 1); ijk += origin; pointIndexNode.setValueOnly(offset, pointOffset); uint8_t signs = uint8_t(SIGNS & signFlagsNode.getValue(offset)); if (inclusiveCell) collectCornerValues(*inputNode, offset, values); else collectCornerValues(inputAcc, ijk, values); points.clear(); computeCellPoints(points, values, signs, iso); avg[0] += double(ijk[0]) + points[0][0]; avg[1] += double(ijk[1]) + points[0][1]; avg[2] += double(ijk[2]) + points[0][2]; ++count; } if (count > 1) { double w = 1.0 / double(count); avg[0] *= w; avg[1] *= w; avg[2] *= w; } avg = mTransform.indexToWorld(avg); Vec3s& pos = mPoints[pointOffset]; pos[0] = float(avg[0]); pos[1] = float(avg[1]); pos[2] = float(avg[2]); ++pointOffset; } } } // ComputePoints::operator() //////////////////////////////////////// template <typename InputTreeType> struct SeamLineWeights { using InputLeafNodeType = typename InputTreeType::LeafNodeType; using InputValueType = typename InputLeafNodeType::ValueType; using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type; using Int16LeafNodeType = typename Int16TreeType::LeafNodeType; using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type; using Index32LeafNodeType = typename Index32TreeType::LeafNodeType; SeamLineWeights(const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes, const InputTreeType& inputTree, const Index32TreeType& refPointIndexTree, const Int16TreeType& refSignFlagsTree, uint32_t * quantizedPoints, InputValueType iso) : mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front()) , mInputTree(&inputTree) , mRefPointIndexTree(&refPointIndexTree) , mRefSignFlagsTree(&refSignFlagsTree) , mQuantizedPoints(quantizedPoints) , mIsovalue(iso) { } void operator()(const tbb::blocked_range<size_t>& range) const { tree::ValueAccessor<const InputTreeType> inputTreeAcc(*mInputTree); tree::ValueAccessor<const Index32TreeType> pointIndexTreeAcc(*mRefPointIndexTree); tree::ValueAccessor<const Int16TreeType> signFlagsTreeAcc(*mRefSignFlagsTree); std::vector<double> values(8); const double iso = double(mIsovalue); Coord ijk; Vec3d pos; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const Int16LeafNodeType& signFlagsNode = *mSignFlagsNodes[n]; const Coord& origin = signFlagsNode.origin(); const Int16LeafNodeType * refSignNode = signFlagsTreeAcc.probeConstLeaf(origin); if (!refSignNode) continue; const Index32LeafNodeType* refPointIndexNode = pointIndexTreeAcc.probeConstLeaf(origin); if (!refPointIndexNode) continue; const InputLeafNodeType * inputNode = inputTreeAcc.probeConstLeaf(origin); for (typename Int16LeafNodeType::ValueOnCIter it = signFlagsNode.cbeginValueOn(); it; ++it) { const Index offset = it.pos(); ijk = Index32LeafNodeType::offsetToLocalCoord(offset); const bool inclusiveCell = inputNode && ijk[0] < int(Index32LeafNodeType::DIM - 1) && ijk[1] < int(Index32LeafNodeType::DIM - 1) && ijk[2] < int(Index32LeafNodeType::DIM - 1); ijk += origin; if ((it.getValue() & SEAM) && refSignNode->isValueOn(offset)) { uint8_t lhsSigns = uint8_t(SIGNS & it.getValue()); uint8_t rhsSigns = uint8_t(SIGNS & refSignNode->getValue(offset)); if (inclusiveCell) { collectCornerValues(*inputNode, offset, values); } else { collectCornerValues(inputTreeAcc, ijk, values); } for (unsigned i = 1, I = sEdgeGroupTable[lhsSigns][0] + 1; i < I; ++i) { int id = matchEdgeGroup(uint8_t(i), lhsSigns, rhsSigns); if (id != -1) { uint32_t& data = mQuantizedPoints[ refPointIndexNode->getValue(offset) + (id - 1)]; if (!(data & MASK_DIRTY_BIT)) { int smaples = computeMaskedPoint( pos, values, lhsSigns, rhsSigns, uint8_t(i), iso); if (smaples > 0) data = packPoint(pos); else data = MASK_INVALID_BIT; data |= MASK_DIRTY_BIT; } } } // end point group loop } } // end value on loop } // end leaf node loop } private: Int16LeafNodeType const * const * const mSignFlagsNodes; InputTreeType const * const mInputTree; Index32TreeType const * const mRefPointIndexTree; Int16TreeType const * const mRefSignFlagsTree; uint32_t * const mQuantizedPoints; InputValueType const mIsovalue; }; // struct SeamLineWeights template <typename TreeType> struct SetSeamLineFlags { using LeafNodeType = typename TreeType::LeafNodeType; SetSeamLineFlags(const std::vector<LeafNodeType*>& signFlagsLeafNodes, const TreeType& refSignFlagsTree) : mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front()) , mRefSignFlagsTree(&refSignFlagsTree) { } void operator()(const tbb::blocked_range<size_t>& range) const { tree::ValueAccessor<const TreeType> refSignFlagsTreeAcc(*mRefSignFlagsTree); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { LeafNodeType& signFlagsNode = *mSignFlagsNodes[n]; const Coord& origin = signFlagsNode.origin(); const LeafNodeType * refSignNode = refSignFlagsTreeAcc.probeConstLeaf(origin); if (!refSignNode) continue; for (auto it = signFlagsNode.cbeginValueOn(); it; ++it) { const Index offset = it.pos(); uint8_t rhsSigns = uint8_t(refSignNode->getValue(offset) & SIGNS); if (sEdgeGroupTable[rhsSigns][0] > 0) { const typename LeafNodeType::ValueType value = it.getValue(); uint8_t lhsSigns = uint8_t(value & SIGNS); if (rhsSigns != lhsSigns) { signFlagsNode.setValueOnly(offset, value | SEAM); } } } // end value on loop } // end leaf node loop } private: LeafNodeType * const * const mSignFlagsNodes; TreeType const * const mRefSignFlagsTree; }; // struct SetSeamLineFlags template <typename BoolTreeType, typename SignDataType> struct TransferSeamLineFlags { using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; using SignDataTreeType = typename BoolTreeType::template ValueConverter<SignDataType>::Type; using SignDataLeafNodeType = typename SignDataTreeType::LeafNodeType; TransferSeamLineFlags(const std::vector<SignDataLeafNodeType*>& signFlagsLeafNodes, const BoolTreeType& maskTree) : mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front()) , mMaskTree(&maskTree) { } void operator()(const tbb::blocked_range<size_t>& range) const { tree::ValueAccessor<const BoolTreeType> maskAcc(*mMaskTree); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { SignDataLeafNodeType& signFlagsNode = *mSignFlagsNodes[n]; const Coord& origin = signFlagsNode.origin(); const BoolLeafNodeType * maskNode = maskAcc.probeConstLeaf(origin); if (!maskNode) continue; using ValueOnCIter = typename SignDataLeafNodeType::ValueOnCIter; for (ValueOnCIter it = signFlagsNode.cbeginValueOn(); it; ++it) { const Index offset = it.pos(); if (maskNode->isValueOn(offset)) { signFlagsNode.setValueOnly(offset, it.getValue() | SEAM); } } // end value on loop } // end leaf node loop } private: SignDataLeafNodeType * const * const mSignFlagsNodes; BoolTreeType const * const mMaskTree; }; // struct TransferSeamLineFlags template <typename TreeType> struct MaskSeamLineVoxels { using LeafNodeType = typename TreeType::LeafNodeType; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; MaskSeamLineVoxels(const std::vector<LeafNodeType*>& signFlagsLeafNodes, const TreeType& signFlagsTree, BoolTreeType& mask) : mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front()) , mSignFlagsTree(&signFlagsTree) , mTempMask(false) , mMask(&mask) { } MaskSeamLineVoxels(MaskSeamLineVoxels& rhs, tbb::split) : mSignFlagsNodes(rhs.mSignFlagsNodes) , mSignFlagsTree(rhs.mSignFlagsTree) , mTempMask(false) , mMask(&mTempMask) { } void join(MaskSeamLineVoxels& rhs) { mMask->merge(*rhs.mMask); } void operator()(const tbb::blocked_range<size_t>& range) { using ValueOnCIter = typename LeafNodeType::ValueOnCIter; using ValueType = typename LeafNodeType::ValueType; tree::ValueAccessor<const TreeType> signFlagsAcc(*mSignFlagsTree); tree::ValueAccessor<BoolTreeType> maskAcc(*mMask); Coord ijk(0, 0, 0); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { LeafNodeType& signFlagsNode = *mSignFlagsNodes[n]; for (ValueOnCIter it = signFlagsNode.cbeginValueOn(); it; ++it) { const ValueType flags = it.getValue(); if (!(flags & SEAM) && (flags & EDGES)) { ijk = it.getCoord(); bool isSeamLineVoxel = false; if (flags & XEDGE) { ijk[1] -= 1; isSeamLineVoxel = (signFlagsAcc.getValue(ijk) & SEAM); ijk[2] -= 1; isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM); ijk[1] += 1; isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM); ijk[2] += 1; } if (!isSeamLineVoxel && flags & YEDGE) { ijk[2] -= 1; isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM); ijk[0] -= 1; isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM); ijk[2] += 1; isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM); ijk[0] += 1; } if (!isSeamLineVoxel && flags & ZEDGE) { ijk[1] -= 1; isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM); ijk[0] -= 1; isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM); ijk[1] += 1; isSeamLineVoxel = isSeamLineVoxel || (signFlagsAcc.getValue(ijk) & SEAM); ijk[0] += 1; } if (isSeamLineVoxel) { maskAcc.setValue(it.getCoord(), true); } } } // end value on loop } // end leaf node loop } private: LeafNodeType * const * const mSignFlagsNodes; TreeType const * const mSignFlagsTree; BoolTreeType mTempMask; BoolTreeType * const mMask; }; // struct MaskSeamLineVoxels template<typename SignDataTreeType> inline void markSeamLineData(SignDataTreeType& signFlagsTree, const SignDataTreeType& refSignFlagsTree) { using SignDataType = typename SignDataTreeType::ValueType; using SignDataLeafNodeType = typename SignDataTreeType::LeafNodeType; using BoolTreeType = typename SignDataTreeType::template ValueConverter<bool>::Type; std::vector<SignDataLeafNodeType*> signFlagsLeafNodes; signFlagsTree.getNodes(signFlagsLeafNodes); const tbb::blocked_range<size_t> nodeRange(0, signFlagsLeafNodes.size()); tbb::parallel_for(nodeRange, SetSeamLineFlags<SignDataTreeType>(signFlagsLeafNodes, refSignFlagsTree)); BoolTreeType seamLineMaskTree(false); MaskSeamLineVoxels<SignDataTreeType> maskSeamLine(signFlagsLeafNodes, signFlagsTree, seamLineMaskTree); tbb::parallel_reduce(nodeRange, maskSeamLine); tbb::parallel_for(nodeRange, TransferSeamLineFlags<BoolTreeType, SignDataType>(signFlagsLeafNodes, seamLineMaskTree)); } //////////////////////////////////////// template <typename InputGridType> struct MergeVoxelRegions { using InputTreeType = typename InputGridType::TreeType; using InputLeafNodeType = typename InputTreeType::LeafNodeType; using InputValueType = typename InputLeafNodeType::ValueType; using FloatTreeType = typename InputTreeType::template ValueConverter<float>::Type; using FloatLeafNodeType = typename FloatTreeType::LeafNodeType; using FloatGridType = Grid<FloatTreeType>; using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type; using Int16LeafNodeType = typename Int16TreeType::LeafNodeType; using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type; using Index32LeafNodeType = typename Index32TreeType::LeafNodeType; using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; MergeVoxelRegions(const InputGridType& inputGrid, const Index32TreeType& pointIndexTree, const std::vector<Index32LeafNodeType*>& pointIndexLeafNodes, const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes, InputValueType iso, float adaptivity, bool invertSurfaceOrientation); void setSpatialAdaptivity(const FloatGridType& grid) { mSpatialAdaptivityTree = &grid.tree(); mSpatialAdaptivityTransform = &grid.transform(); } void setAdaptivityMask(const BoolTreeType& mask) { mMaskTree = &mask; } void setRefSignFlagsData(const Int16TreeType& signFlagsData, float internalAdaptivity) { mRefSignFlagsTree = &signFlagsData; mInternalAdaptivity = internalAdaptivity; } void operator()(const tbb::blocked_range<size_t>&) const; private: InputTreeType const * const mInputTree; math::Transform const * const mInputTransform; Index32TreeType const * const mPointIndexTree; Index32LeafNodeType * const * const mPointIndexNodes; Int16LeafNodeType const * const * const mSignFlagsNodes; InputValueType mIsovalue; float mSurfaceAdaptivity, mInternalAdaptivity; bool mInvertSurfaceOrientation; FloatTreeType const * mSpatialAdaptivityTree; BoolTreeType const * mMaskTree; Int16TreeType const * mRefSignFlagsTree; math::Transform const * mSpatialAdaptivityTransform; }; // struct MergeVoxelRegions template <typename InputGridType> MergeVoxelRegions<InputGridType>::MergeVoxelRegions( const InputGridType& inputGrid, const Index32TreeType& pointIndexTree, const std::vector<Index32LeafNodeType*>& pointIndexLeafNodes, const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes, InputValueType iso, float adaptivity, bool invertSurfaceOrientation) : mInputTree(&inputGrid.tree()) , mInputTransform(&inputGrid.transform()) , mPointIndexTree(&pointIndexTree) , mPointIndexNodes(pointIndexLeafNodes.empty() ? nullptr : &pointIndexLeafNodes.front()) , mSignFlagsNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front()) , mIsovalue(iso) , mSurfaceAdaptivity(adaptivity) , mInternalAdaptivity(adaptivity) , mInvertSurfaceOrientation(invertSurfaceOrientation) , mSpatialAdaptivityTree(nullptr) , mMaskTree(nullptr) , mRefSignFlagsTree(nullptr) , mSpatialAdaptivityTransform(nullptr) { } template <typename InputGridType> void MergeVoxelRegions<InputGridType>::operator()(const tbb::blocked_range<size_t>& range) const { using Vec3sType = math::Vec3<float>; using Vec3sLeafNodeType = typename InputLeafNodeType::template ValueConverter<Vec3sType>::Type; using InputTreeAccessor = tree::ValueAccessor<const InputTreeType>; using FloatTreeAccessor = tree::ValueAccessor<const FloatTreeType>; using Index32TreeAccessor = tree::ValueAccessor<const Index32TreeType>; using Int16TreeAccessor = tree::ValueAccessor<const Int16TreeType>; using BoolTreeAccessor = tree::ValueAccessor<const BoolTreeType>; std::unique_ptr<FloatTreeAccessor> spatialAdaptivityAcc; if (mSpatialAdaptivityTree && mSpatialAdaptivityTransform) { spatialAdaptivityAcc.reset(new FloatTreeAccessor(*mSpatialAdaptivityTree)); } std::unique_ptr<BoolTreeAccessor> maskAcc; if (mMaskTree) { maskAcc.reset(new BoolTreeAccessor(*mMaskTree)); } std::unique_ptr<Int16TreeAccessor> refSignFlagsAcc; if (mRefSignFlagsTree) { refSignFlagsAcc.reset(new Int16TreeAccessor(*mRefSignFlagsTree)); } InputTreeAccessor inputAcc(*mInputTree); Index32TreeAccessor pointIndexAcc(*mPointIndexTree); BoolLeafNodeType mask; const bool invertGradientDir = mInvertSurfaceOrientation || isBoolValue<InputValueType>(); std::unique_ptr<Vec3sLeafNodeType> gradientNode; Coord ijk, end; const int LeafDim = InputLeafNodeType::DIM; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { mask.setValuesOff(); const Int16LeafNodeType& signFlagsNode = *mSignFlagsNodes[n]; Index32LeafNodeType& pointIndexNode = *mPointIndexNodes[n]; const Coord& origin = pointIndexNode.origin(); end[0] = origin[0] + LeafDim; end[1] = origin[1] + LeafDim; end[2] = origin[2] + LeafDim; // Mask off seam line adjacent voxels if (maskAcc) { const BoolLeafNodeType* maskLeaf = maskAcc->probeConstLeaf(origin); if (maskLeaf != nullptr) { for (typename BoolLeafNodeType::ValueOnCIter it = maskLeaf->cbeginValueOn(); it; ++it) { mask.setActiveState(it.getCoord() & ~1u, true); } } } float adaptivity = (refSignFlagsAcc && !refSignFlagsAcc->probeConstLeaf(origin)) ? mInternalAdaptivity : mSurfaceAdaptivity; bool useGradients = adaptivity < 1.0f; // Set region adaptivity FloatLeafNodeType adaptivityLeaf(origin, adaptivity); if (spatialAdaptivityAcc) { useGradients = false; for (Index offset = 0; offset < FloatLeafNodeType::NUM_VALUES; ++offset) { ijk = adaptivityLeaf.offsetToGlobalCoord(offset); ijk = mSpatialAdaptivityTransform->worldToIndexCellCentered( mInputTransform->indexToWorld(ijk)); float weight = spatialAdaptivityAcc->getValue(ijk); float adaptivityValue = weight * adaptivity; if (adaptivityValue < 1.0f) useGradients = true; adaptivityLeaf.setValueOnly(offset, adaptivityValue); } } // Mask off ambiguous voxels for (auto it = signFlagsNode.cbeginValueOn(); it; ++it) { const Int16 flags = it.getValue(); const unsigned char signs = static_cast<unsigned char>(SIGNS & int(flags)); if ((flags & SEAM) || !sAdaptable[signs] || sEdgeGroupTable[signs][0] > 1) { mask.setActiveState(it.getCoord() & ~1u, true); } else if (flags & EDGES) { bool maskRegion = false; ijk = it.getCoord(); if (!pointIndexAcc.isValueOn(ijk)) maskRegion = true; if (!maskRegion && flags & XEDGE) { ijk[1] -= 1; if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true; ijk[2] -= 1; if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true; ijk[1] += 1; if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true; ijk[2] += 1; } if (!maskRegion && flags & YEDGE) { ijk[2] -= 1; if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true; ijk[0] -= 1; if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true; ijk[2] += 1; if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true; ijk[0] += 1; } if (!maskRegion && flags & ZEDGE) { ijk[1] -= 1; if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true; ijk[0] -= 1; if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true; ijk[1] += 1; if (!maskRegion && !pointIndexAcc.isValueOn(ijk)) maskRegion = true; ijk[0] += 1; } if (maskRegion) { mask.setActiveState(it.getCoord() & ~1u, true); } } } // Mask off topologically ambiguous 2x2x2 voxel sub-blocks int dim = 2; for (ijk[0] = origin[0]; ijk[0] < end[0]; ijk[0] += dim) { for (ijk[1] = origin[1]; ijk[1] < end[1]; ijk[1] += dim) { for (ijk[2] = origin[2]; ijk[2] < end[2]; ijk[2] += dim) { if (!mask.isValueOn(ijk) && isNonManifold(inputAcc, ijk, mIsovalue, dim)) { mask.setActiveState(ijk, true); } } } } // Compute the gradient for the remaining voxels if (useGradients) { if (gradientNode) { gradientNode->setValuesOff(); } else { gradientNode.reset(new Vec3sLeafNodeType()); } for (auto it = signFlagsNode.cbeginValueOn(); it; ++it) { ijk = it.getCoord(); if (!mask.isValueOn(ijk & ~1u)) { Vec3sType dir(math::ISGradient<math::CD_2ND>::result(inputAcc, ijk)); dir.normalize(); if (invertGradientDir) { dir = -dir; } gradientNode->setValueOn(it.pos(), dir); } } } // Merge regions int regionId = 1; for ( ; dim <= LeafDim; dim = dim << 1) { const unsigned coordMask = ~((dim << 1) - 1); for (ijk[0] = origin[0]; ijk[0] < end[0]; ijk[0] += dim) { for (ijk[1] = origin[1]; ijk[1] < end[1]; ijk[1] += dim) { for (ijk[2] = origin[2]; ijk[2] < end[2]; ijk[2] += dim) { adaptivity = adaptivityLeaf.getValue(ijk); if (mask.isValueOn(ijk) || isNonManifold(inputAcc, ijk, mIsovalue, dim) || (useGradients && !isMergable(*gradientNode, ijk, dim, adaptivity))) { mask.setActiveState(ijk & coordMask, true); } else { mergeVoxels(pointIndexNode, ijk, dim, regionId++); } } } } } } } // MergeVoxelRegions::operator() //////////////////////////////////////// // Constructs qudas struct UniformPrimBuilder { UniformPrimBuilder(): mIdx(0), mPolygonPool(nullptr) {} void init(const size_t upperBound, PolygonPool& quadPool) { mPolygonPool = &quadPool; mPolygonPool->resetQuads(upperBound); mIdx = 0; } template<typename IndexType> void addPrim(const math::Vec4<IndexType>& verts, bool reverse, char flags = 0) { if (!reverse) { mPolygonPool->quad(mIdx) = verts; } else { Vec4I& quad = mPolygonPool->quad(mIdx); quad[0] = verts[3]; quad[1] = verts[2]; quad[2] = verts[1]; quad[3] = verts[0]; } mPolygonPool->quadFlags(mIdx) = flags; ++mIdx; } void done() { mPolygonPool->trimQuads(mIdx); } private: size_t mIdx; PolygonPool* mPolygonPool; }; // Constructs qudas and triangles struct AdaptivePrimBuilder { AdaptivePrimBuilder() : mQuadIdx(0), mTriangleIdx(0), mPolygonPool(nullptr) {} void init(const size_t upperBound, PolygonPool& polygonPool) { mPolygonPool = &polygonPool; mPolygonPool->resetQuads(upperBound); mPolygonPool->resetTriangles(upperBound); mQuadIdx = 0; mTriangleIdx = 0; } template<typename IndexType> void addPrim(const math::Vec4<IndexType>& verts, bool reverse, char flags = 0) { if (verts[0] != verts[1] && verts[0] != verts[2] && verts[0] != verts[3] && verts[1] != verts[2] && verts[1] != verts[3] && verts[2] != verts[3]) { mPolygonPool->quadFlags(mQuadIdx) = flags; addQuad(verts, reverse); } else if ( verts[0] == verts[3] && verts[1] != verts[2] && verts[1] != verts[0] && verts[2] != verts[0]) { mPolygonPool->triangleFlags(mTriangleIdx) = flags; addTriangle(verts[0], verts[1], verts[2], reverse); } else if ( verts[1] == verts[2] && verts[0] != verts[3] && verts[0] != verts[1] && verts[3] != verts[1]) { mPolygonPool->triangleFlags(mTriangleIdx) = flags; addTriangle(verts[0], verts[1], verts[3], reverse); } else if ( verts[0] == verts[1] && verts[2] != verts[3] && verts[2] != verts[0] && verts[3] != verts[0]) { mPolygonPool->triangleFlags(mTriangleIdx) = flags; addTriangle(verts[0], verts[2], verts[3], reverse); } else if ( verts[2] == verts[3] && verts[0] != verts[1] && verts[0] != verts[2] && verts[1] != verts[2]) { mPolygonPool->triangleFlags(mTriangleIdx) = flags; addTriangle(verts[0], verts[1], verts[2], reverse); } } void done() { mPolygonPool->trimQuads(mQuadIdx, /*reallocate=*/true); mPolygonPool->trimTrinagles(mTriangleIdx, /*reallocate=*/true); } private: template<typename IndexType> void addQuad(const math::Vec4<IndexType>& verts, bool reverse) { if (!reverse) { mPolygonPool->quad(mQuadIdx) = verts; } else { Vec4I& quad = mPolygonPool->quad(mQuadIdx); quad[0] = verts[3]; quad[1] = verts[2]; quad[2] = verts[1]; quad[3] = verts[0]; } ++mQuadIdx; } void addTriangle(unsigned v0, unsigned v1, unsigned v2, bool reverse) { Vec3I& prim = mPolygonPool->triangle(mTriangleIdx); prim[1] = v1; if (!reverse) { prim[0] = v0; prim[2] = v2; } else { prim[0] = v2; prim[2] = v0; } ++mTriangleIdx; } size_t mQuadIdx, mTriangleIdx; PolygonPool *mPolygonPool; }; template<typename SignAccT, typename IdxAccT, typename PrimBuilder> inline void constructPolygons( bool invertSurfaceOrientation, Int16 flags, Int16 refFlags, const Vec3i& offsets, const Coord& ijk, const SignAccT& signAcc, const IdxAccT& idxAcc, PrimBuilder& mesher) { using IndexType = typename IdxAccT::ValueType; IndexType v0 = IndexType(util::INVALID_IDX); const bool isActive = idxAcc.probeValue(ijk, v0); if (isActive == false || v0 == IndexType(util::INVALID_IDX)) return; char tag[2]; tag[0] = (flags & SEAM) ? POLYFLAG_FRACTURE_SEAM : 0; tag[1] = tag[0] | char(POLYFLAG_EXTERIOR); bool isInside = flags & INSIDE; isInside = invertSurfaceOrientation ? !isInside : isInside; Coord coord = ijk; math::Vec4<IndexType> quad(0,0,0,0); if (flags & XEDGE) { quad[0] = v0 + offsets[0]; // i, j-1, k coord[1]--; bool activeValues = idxAcc.probeValue(coord, quad[1]); uint8_t cell = uint8_t(SIGNS & signAcc.getValue(coord)); quad[1] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][5] - 1 : 0; // i, j-1, k-1 coord[2]--; activeValues = activeValues && idxAcc.probeValue(coord, quad[2]); cell = uint8_t(SIGNS & signAcc.getValue(coord)); quad[2] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][7] - 1 : 0; // i, j, k-1 coord[1]++; activeValues = activeValues && idxAcc.probeValue(coord, quad[3]); cell = uint8_t(SIGNS & signAcc.getValue(coord)); quad[3] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][3] - 1 : 0; if (activeValues) { mesher.addPrim(quad, isInside, tag[bool(refFlags & XEDGE)]); } coord[2]++; // i, j, k } if (flags & YEDGE) { quad[0] = v0 + offsets[1]; // i, j, k-1 coord[2]--; bool activeValues = idxAcc.probeValue(coord, quad[1]); uint8_t cell = uint8_t(SIGNS & signAcc.getValue(coord)); quad[1] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][12] - 1 : 0; // i-1, j, k-1 coord[0]--; activeValues = activeValues && idxAcc.probeValue(coord, quad[2]); cell = uint8_t(SIGNS & signAcc.getValue(coord)); quad[2] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][11] - 1 : 0; // i-1, j, k coord[2]++; activeValues = activeValues && idxAcc.probeValue(coord, quad[3]); cell = uint8_t(SIGNS & signAcc.getValue(coord)); quad[3] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][10] - 1 : 0; if (activeValues) { mesher.addPrim(quad, isInside, tag[bool(refFlags & YEDGE)]); } coord[0]++; // i, j, k } if (flags & ZEDGE) { quad[0] = v0 + offsets[2]; // i, j-1, k coord[1]--; bool activeValues = idxAcc.probeValue(coord, quad[1]); uint8_t cell = uint8_t(SIGNS & signAcc.getValue(coord)); quad[1] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][8] - 1 : 0; // i-1, j-1, k coord[0]--; activeValues = activeValues && idxAcc.probeValue(coord, quad[2]); cell = uint8_t(SIGNS & signAcc.getValue(coord)); quad[2] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][6] - 1 : 0; // i-1, j, k coord[1]++; activeValues = activeValues && idxAcc.probeValue(coord, quad[3]); cell = uint8_t(SIGNS & signAcc.getValue(coord)); quad[3] += sEdgeGroupTable[cell][0] > 1 ? sEdgeGroupTable[cell][2] - 1 : 0; if (activeValues) { mesher.addPrim(quad, !isInside, tag[bool(refFlags & ZEDGE)]); } } } //////////////////////////////////////// template<typename InputTreeType> struct MaskTileBorders { using InputValueType = typename InputTreeType::ValueType; using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type; MaskTileBorders(const InputTreeType& inputTree, InputValueType iso, BoolTreeType& mask, const Vec4i* tileArray) : mInputTree(&inputTree) , mIsovalue(iso) , mTempMask(false) , mMask(&mask) , mTileArray(tileArray) { } MaskTileBorders(MaskTileBorders& rhs, tbb::split) : mInputTree(rhs.mInputTree) , mIsovalue(rhs.mIsovalue) , mTempMask(false) , mMask(&mTempMask) , mTileArray(rhs.mTileArray) { } void join(MaskTileBorders& rhs) { mMask->merge(*rhs.mMask); } void operator()(const tbb::blocked_range<size_t>&); private: InputTreeType const * const mInputTree; InputValueType const mIsovalue; BoolTreeType mTempMask; BoolTreeType * const mMask; Vec4i const * const mTileArray; }; // MaskTileBorders template<typename InputTreeType> void MaskTileBorders<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const InputTreeType> inputTreeAcc(*mInputTree); CoordBBox region, bbox; Coord ijk, nijk; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const Vec4i& tile = mTileArray[n]; bbox.min()[0] = tile[0]; bbox.min()[1] = tile[1]; bbox.min()[2] = tile[2]; bbox.max() = bbox.min(); bbox.max().offset(tile[3]); InputValueType value = mInputTree->background(); const bool isInside = isInsideValue(inputTreeAcc.getValue(bbox.min()), mIsovalue); const int valueDepth = inputTreeAcc.getValueDepth(bbox.min()); // eval x-edges ijk = bbox.max(); nijk = ijk; ++nijk[0]; bool processRegion = true; if (valueDepth >= inputTreeAcc.getValueDepth(nijk)) { processRegion = isInside != isInsideValue(inputTreeAcc.getValue(nijk), mIsovalue); } if (processRegion) { region = bbox; region.expand(1); region.min()[0] = region.max()[0] = ijk[0]; mMask->fill(region, false); } ijk = bbox.min(); --ijk[0]; processRegion = true; if (valueDepth >= inputTreeAcc.getValueDepth(ijk)) { processRegion = (!inputTreeAcc.probeValue(ijk, value) && isInside != isInsideValue(value, mIsovalue)); } if (processRegion) { region = bbox; region.expand(1); region.min()[0] = region.max()[0] = ijk[0]; mMask->fill(region, false); } // eval y-edges ijk = bbox.max(); nijk = ijk; ++nijk[1]; processRegion = true; if (valueDepth >= inputTreeAcc.getValueDepth(nijk)) { processRegion = isInside != isInsideValue(inputTreeAcc.getValue(nijk), mIsovalue); } if (processRegion) { region = bbox; region.expand(1); region.min()[1] = region.max()[1] = ijk[1]; mMask->fill(region, false); } ijk = bbox.min(); --ijk[1]; processRegion = true; if (valueDepth >= inputTreeAcc.getValueDepth(ijk)) { processRegion = (!inputTreeAcc.probeValue(ijk, value) && isInside != isInsideValue(value, mIsovalue)); } if (processRegion) { region = bbox; region.expand(1); region.min()[1] = region.max()[1] = ijk[1]; mMask->fill(region, false); } // eval z-edges ijk = bbox.max(); nijk = ijk; ++nijk[2]; processRegion = true; if (valueDepth >= inputTreeAcc.getValueDepth(nijk)) { processRegion = isInside != isInsideValue(inputTreeAcc.getValue(nijk), mIsovalue); } if (processRegion) { region = bbox; region.expand(1); region.min()[2] = region.max()[2] = ijk[2]; mMask->fill(region, false); } ijk = bbox.min(); --ijk[2]; processRegion = true; if (valueDepth >= inputTreeAcc.getValueDepth(ijk)) { processRegion = (!inputTreeAcc.probeValue(ijk, value) && isInside != isInsideValue(value, mIsovalue)); } if (processRegion) { region = bbox; region.expand(1); region.min()[2] = region.max()[2] = ijk[2]; mMask->fill(region, false); } } } // MaskTileBorders::operator() template<typename InputTreeType> inline void maskActiveTileBorders(const InputTreeType& inputTree, typename InputTreeType::ValueType iso, typename InputTreeType::template ValueConverter<bool>::Type& mask) { typename InputTreeType::ValueOnCIter tileIter(inputTree); tileIter.setMaxDepth(InputTreeType::ValueOnCIter::LEAF_DEPTH - 1); size_t tileCount = 0; for ( ; tileIter; ++tileIter) { ++tileCount; } if (tileCount > 0) { std::unique_ptr<Vec4i[]> tiles(new Vec4i[tileCount]); CoordBBox bbox; size_t index = 0; tileIter = inputTree.cbeginValueOn(); tileIter.setMaxDepth(InputTreeType::ValueOnCIter::LEAF_DEPTH - 1); for (; tileIter; ++tileIter) { Vec4i& tile = tiles[index++]; tileIter.getBoundingBox(bbox); tile[0] = bbox.min()[0]; tile[1] = bbox.min()[1]; tile[2] = bbox.min()[2]; tile[3] = bbox.max()[0] - bbox.min()[0]; } MaskTileBorders<InputTreeType> op(inputTree, iso, mask, tiles.get()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, tileCount), op); } } //////////////////////////////////////// // Utility class for the volumeToMesh wrapper class PointListCopy { public: PointListCopy(const PointList& pointsIn, std::vector<Vec3s>& pointsOut) : mPointsIn(pointsIn) , mPointsOut(pointsOut) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(); n < range.end(); ++n) { mPointsOut[n] = mPointsIn[n]; } } private: const PointList& mPointsIn; std::vector<Vec3s>& mPointsOut; }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// struct LeafNodeVoxelOffsets { using IndexVector = std::vector<Index>; template<typename LeafNodeType> void constructOffsetList(); /// Return internal core voxel offsets. const IndexVector& core() const { return mCore; } /// Return front face voxel offsets. const IndexVector& minX() const { return mMinX; } /// Return back face voxel offsets. const IndexVector& maxX() const { return mMaxX; } /// Return bottom face voxel offsets. const IndexVector& minY() const { return mMinY; } /// Return top face voxel offsets. const IndexVector& maxY() const { return mMaxY; } /// Return left face voxel offsets. const IndexVector& minZ() const { return mMinZ; } /// Return right face voxel offsets. const IndexVector& maxZ() const { return mMaxZ; } /// Return voxel offsets with internal neighbours in x + 1. const IndexVector& internalNeighborsX() const { return mInternalNeighborsX; } /// Return voxel offsets with internal neighbours in y + 1. const IndexVector& internalNeighborsY() const { return mInternalNeighborsY; } /// Return voxel offsets with internal neighbours in z + 1. const IndexVector& internalNeighborsZ() const { return mInternalNeighborsZ; } private: IndexVector mCore, mMinX, mMaxX, mMinY, mMaxY, mMinZ, mMaxZ, mInternalNeighborsX, mInternalNeighborsY, mInternalNeighborsZ; }; // struct LeafNodeOffsets template<typename LeafNodeType> inline void LeafNodeVoxelOffsets::constructOffsetList() { // internal core voxels mCore.clear(); mCore.reserve((LeafNodeType::DIM - 2) * (LeafNodeType::DIM - 2)); for (Index x = 1; x < (LeafNodeType::DIM - 1); ++x) { const Index offsetX = x << (2 * LeafNodeType::LOG2DIM); for (Index y = 1; y < (LeafNodeType::DIM - 1); ++y) { const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM); for (Index z = 1; z < (LeafNodeType::DIM - 1); ++z) { mCore.push_back(offsetXY + z); } } } // internal neighbors in x + 1 mInternalNeighborsX.clear(); mInternalNeighborsX.reserve(LeafNodeType::SIZE - (LeafNodeType::DIM * LeafNodeType::DIM)); for (Index x = 0; x < (LeafNodeType::DIM - 1); ++x) { const Index offsetX = x << (2 * LeafNodeType::LOG2DIM); for (Index y = 0; y < LeafNodeType::DIM; ++y) { const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM); for (Index z = 0; z < LeafNodeType::DIM; ++z) { mInternalNeighborsX.push_back(offsetXY + z); } } } // internal neighbors in y + 1 mInternalNeighborsY.clear(); mInternalNeighborsY.reserve(LeafNodeType::SIZE - (LeafNodeType::DIM * LeafNodeType::DIM)); for (Index x = 0; x < LeafNodeType::DIM; ++x) { const Index offsetX = x << (2 * LeafNodeType::LOG2DIM); for (Index y = 0; y < (LeafNodeType::DIM - 1); ++y) { const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM); for (Index z = 0; z < LeafNodeType::DIM; ++z) { mInternalNeighborsY.push_back(offsetXY + z); } } } // internal neighbors in z + 1 mInternalNeighborsZ.clear(); mInternalNeighborsZ.reserve(LeafNodeType::SIZE - (LeafNodeType::DIM * LeafNodeType::DIM)); for (Index x = 0; x < LeafNodeType::DIM; ++x) { const Index offsetX = x << (2 * LeafNodeType::LOG2DIM); for (Index y = 0; y < LeafNodeType::DIM; ++y) { const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM); for (Index z = 0; z < (LeafNodeType::DIM - 1); ++z) { mInternalNeighborsZ.push_back(offsetXY + z); } } } // min x mMinX.clear(); mMinX.reserve(LeafNodeType::DIM * LeafNodeType::DIM); { for (Index y = 0; y < LeafNodeType::DIM; ++y) { const Index offsetXY = (y << LeafNodeType::LOG2DIM); for (Index z = 0; z < LeafNodeType::DIM; ++z) { mMinX.push_back(offsetXY + z); } } } // max x mMaxX.clear(); mMaxX.reserve(LeafNodeType::DIM * LeafNodeType::DIM); { const Index offsetX = (LeafNodeType::DIM - 1) << (2 * LeafNodeType::LOG2DIM); for (Index y = 0; y < LeafNodeType::DIM; ++y) { const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM); for (Index z = 0; z < LeafNodeType::DIM; ++z) { mMaxX.push_back(offsetXY + z); } } } // min y mMinY.clear(); mMinY.reserve(LeafNodeType::DIM * LeafNodeType::DIM); { for (Index x = 0; x < LeafNodeType::DIM; ++x) { const Index offsetX = x << (2 * LeafNodeType::LOG2DIM); for (Index z = 0; z < (LeafNodeType::DIM - 1); ++z) { mMinY.push_back(offsetX + z); } } } // max y mMaxY.clear(); mMaxY.reserve(LeafNodeType::DIM * LeafNodeType::DIM); { const Index offsetY = (LeafNodeType::DIM - 1) << LeafNodeType::LOG2DIM; for (Index x = 0; x < LeafNodeType::DIM; ++x) { const Index offsetX = x << (2 * LeafNodeType::LOG2DIM); for (Index z = 0; z < (LeafNodeType::DIM - 1); ++z) { mMaxY.push_back(offsetX + offsetY + z); } } } // min z mMinZ.clear(); mMinZ.reserve(LeafNodeType::DIM * LeafNodeType::DIM); { for (Index x = 0; x < LeafNodeType::DIM; ++x) { const Index offsetX = x << (2 * LeafNodeType::LOG2DIM); for (Index y = 0; y < LeafNodeType::DIM; ++y) { const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM); mMinZ.push_back(offsetXY); } } } // max z mMaxZ.clear(); mMaxZ.reserve(LeafNodeType::DIM * LeafNodeType::DIM); { for (Index x = 0; x < LeafNodeType::DIM; ++x) { const Index offsetX = x << (2 * LeafNodeType::LOG2DIM); for (Index y = 0; y < LeafNodeType::DIM; ++y) { const Index offsetXY = offsetX + (y << LeafNodeType::LOG2DIM); mMaxZ.push_back(offsetXY + (LeafNodeType::DIM - 1)); } } } } //////////////////////////////////////// /// Utility method to marks all voxels that share an edge. template<typename AccessorT, int _AXIS> struct VoxelEdgeAccessor { enum { AXIS = _AXIS }; AccessorT& acc; VoxelEdgeAccessor(AccessorT& _acc) : acc(_acc) {} void set(Coord ijk) { if (_AXIS == 0) { // x + 1 edge acc.setActiveState(ijk); --ijk[1]; // set i, j-1, k acc.setActiveState(ijk); --ijk[2]; // set i, j-1, k-1 acc.setActiveState(ijk); ++ijk[1]; // set i, j, k-1 acc.setActiveState(ijk); } else if (_AXIS == 1) { // y + 1 edge acc.setActiveState(ijk); --ijk[2]; // set i, j, k-1 acc.setActiveState(ijk); --ijk[0]; // set i-1, j, k-1 acc.setActiveState(ijk); ++ijk[2]; // set i-1, j, k acc.setActiveState(ijk); } else { // z + 1 edge acc.setActiveState(ijk); --ijk[1]; // set i, j-1, k acc.setActiveState(ijk); --ijk[0]; // set i-1, j-1, k acc.setActiveState(ijk); ++ijk[1]; // set i-1, j, k acc.setActiveState(ijk); } } }; /// Utility method to check for sign changes along the x + 1, y + 1 or z + 1 directions. /// The direction is determined by the @a edgeAcc parameter. Only voxels that have internal /// neighbours are evaluated. template<typename VoxelEdgeAcc, typename LeafNode> void evalInternalVoxelEdges(VoxelEdgeAcc& edgeAcc, const LeafNode& leafnode, const LeafNodeVoxelOffsets& voxels, const typename LeafNode::ValueType iso) { Index nvo = 1; // neighbour voxel offset, z + 1 direction assumed initially. const std::vector<Index>* offsets = &voxels.internalNeighborsZ(); if (VoxelEdgeAcc::AXIS == 0) { // x + 1 direction nvo = LeafNode::DIM * LeafNode::DIM; offsets = &voxels.internalNeighborsX(); } else if (VoxelEdgeAcc::AXIS == 1) { // y + 1 direction nvo = LeafNode::DIM; offsets = &voxels.internalNeighborsY(); } for (size_t n = 0, N = offsets->size(); n < N; ++n) { const Index& pos = (*offsets)[n]; bool isActive = leafnode.isValueOn(pos) || leafnode.isValueOn(pos + nvo); if (isActive && (isInsideValue(leafnode.getValue(pos), iso) != isInsideValue(leafnode.getValue(pos + nvo), iso))) { edgeAcc.set(leafnode.offsetToGlobalCoord(pos)); } } } /// Utility method to check for sign changes along the x + 1, y + 1 or z + 1 directions. /// The direction is determined by the @a edgeAcc parameter. All voxels that reside in the /// specified leafnode face: back, top or right are evaluated. template<typename LeafNode, typename TreeAcc, typename VoxelEdgeAcc> void evalExtrenalVoxelEdges(VoxelEdgeAcc& edgeAcc, TreeAcc& acc, const LeafNode& lhsNode, const LeafNodeVoxelOffsets& voxels, const typename LeafNode::ValueType iso) { const std::vector<Index>* lhsOffsets = &voxels.maxX(); const std::vector<Index>* rhsOffsets = &voxels.minX(); Coord ijk = lhsNode.origin(); if (VoxelEdgeAcc::AXIS == 0) { // back leafnode face ijk[0] += LeafNode::DIM; } else if (VoxelEdgeAcc::AXIS == 1) { // top leafnode face ijk[1] += LeafNode::DIM; lhsOffsets = &voxels.maxY(); rhsOffsets = &voxels.minY(); } else if (VoxelEdgeAcc::AXIS == 2) { // right leafnode face ijk[2] += LeafNode::DIM; lhsOffsets = &voxels.maxZ(); rhsOffsets = &voxels.minZ(); } typename LeafNode::ValueType value; const LeafNode* rhsNodePt = acc.probeConstLeaf(ijk); if (rhsNodePt) { for (size_t n = 0, N = lhsOffsets->size(); n < N; ++n) { const Index& pos = (*lhsOffsets)[n]; bool isActive = lhsNode.isValueOn(pos) || rhsNodePt->isValueOn((*rhsOffsets)[n]); if (isActive && (isInsideValue(lhsNode.getValue(pos), iso) != isInsideValue(rhsNodePt->getValue((*rhsOffsets)[n]), iso))) { edgeAcc.set(lhsNode.offsetToGlobalCoord(pos)); } } } else if (!acc.probeValue(ijk, value)) { const bool inside = isInsideValue(value, iso); for (size_t n = 0, N = lhsOffsets->size(); n < N; ++n) { const Index& pos = (*lhsOffsets)[n]; if (lhsNode.isValueOn(pos) && (inside != isInsideValue(lhsNode.getValue(pos), iso))) { edgeAcc.set(lhsNode.offsetToGlobalCoord(pos)); } } } } /// Utility method to check for sign changes along the x - 1, y - 1 or z - 1 directions. /// The direction is determined by the @a edgeAcc parameter. All voxels that reside in the /// specified leafnode face: front, bottom or left are evaluated. template<typename LeafNode, typename TreeAcc, typename VoxelEdgeAcc> void evalExtrenalVoxelEdgesInv(VoxelEdgeAcc& edgeAcc, TreeAcc& acc, const LeafNode& leafnode, const LeafNodeVoxelOffsets& voxels, const typename LeafNode::ValueType iso) { Coord ijk = leafnode.origin(); if (VoxelEdgeAcc::AXIS == 0) --ijk[0]; // front leafnode face else if (VoxelEdgeAcc::AXIS == 1) --ijk[1]; // bottom leafnode face else if (VoxelEdgeAcc::AXIS == 2) --ijk[2]; // left leafnode face typename LeafNode::ValueType value; if (!acc.probeConstLeaf(ijk) && !acc.probeValue(ijk, value)) { const std::vector<Index>* offsets = &voxels.internalNeighborsX(); if (VoxelEdgeAcc::AXIS == 1) offsets = &voxels.internalNeighborsY(); else if (VoxelEdgeAcc::AXIS == 2) offsets = &voxels.internalNeighborsZ(); const bool inside = isInsideValue(value, iso); for (size_t n = 0, N = offsets->size(); n < N; ++n) { const Index& pos = (*offsets)[n]; if (leafnode.isValueOn(pos) && (inside != isInsideValue(leafnode.getValue(pos), iso))) { ijk = leafnode.offsetToGlobalCoord(pos); if (VoxelEdgeAcc::AXIS == 0) --ijk[0]; else if (VoxelEdgeAcc::AXIS == 1) --ijk[1]; else if (VoxelEdgeAcc::AXIS == 2) --ijk[2]; edgeAcc.set(ijk); } } } } template<typename InputTreeType> struct IdentifyIntersectingVoxels { using InputLeafNodeType = typename InputTreeType::LeafNodeType; using InputValueType = typename InputLeafNodeType::ValueType; using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type; IdentifyIntersectingVoxels( const InputTreeType& inputTree, const std::vector<const InputLeafNodeType*>& inputLeafNodes, BoolTreeType& intersectionTree, InputValueType iso); IdentifyIntersectingVoxels(IdentifyIntersectingVoxels&, tbb::split); void operator()(const tbb::blocked_range<size_t>&); void join(const IdentifyIntersectingVoxels& rhs) { mIntersectionAccessor.tree().merge(rhs.mIntersectionAccessor.tree()); } private: tree::ValueAccessor<const InputTreeType> mInputAccessor; InputLeafNodeType const * const * const mInputNodes; BoolTreeType mIntersectionTree; tree::ValueAccessor<BoolTreeType> mIntersectionAccessor; LeafNodeVoxelOffsets mOffsetData; const LeafNodeVoxelOffsets* mOffsets; InputValueType mIsovalue; }; // struct IdentifyIntersectingVoxels template<typename InputTreeType> IdentifyIntersectingVoxels<InputTreeType>::IdentifyIntersectingVoxels( const InputTreeType& inputTree, const std::vector<const InputLeafNodeType*>& inputLeafNodes, BoolTreeType& intersectionTree, InputValueType iso) : mInputAccessor(inputTree) , mInputNodes(inputLeafNodes.empty() ? nullptr : &inputLeafNodes.front()) , mIntersectionTree(false) , mIntersectionAccessor(intersectionTree) , mOffsetData() , mOffsets(&mOffsetData) , mIsovalue(iso) { mOffsetData.constructOffsetList<InputLeafNodeType>(); } template<typename InputTreeType> IdentifyIntersectingVoxels<InputTreeType>::IdentifyIntersectingVoxels( IdentifyIntersectingVoxels& rhs, tbb::split) : mInputAccessor(rhs.mInputAccessor.tree()) , mInputNodes(rhs.mInputNodes) , mIntersectionTree(false) , mIntersectionAccessor(mIntersectionTree) // use local tree. , mOffsetData() , mOffsets(rhs.mOffsets) // reference data from main instance. , mIsovalue(rhs.mIsovalue) { } template<typename InputTreeType> void IdentifyIntersectingVoxels<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range) { VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 0> xEdgeAcc(mIntersectionAccessor); VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 1> yEdgeAcc(mIntersectionAccessor); VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 2> zEdgeAcc(mIntersectionAccessor); for (size_t n = range.begin(); n != range.end(); ++n) { const InputLeafNodeType& node = *mInputNodes[n]; // internal x + 1 voxel edges evalInternalVoxelEdges(xEdgeAcc, node, *mOffsets, mIsovalue); // internal y + 1 voxel edges evalInternalVoxelEdges(yEdgeAcc, node, *mOffsets, mIsovalue); // internal z + 1 voxel edges evalInternalVoxelEdges(zEdgeAcc, node, *mOffsets, mIsovalue); // external x + 1 voxels edges (back face) evalExtrenalVoxelEdges(xEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue); // external y + 1 voxels edges (top face) evalExtrenalVoxelEdges(yEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue); // external z + 1 voxels edges (right face) evalExtrenalVoxelEdges(zEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue); // The remaining edges are only checked if the leafnode neighbour, in the // corresponding direction, is an inactive tile. // external x - 1 voxels edges (front face) evalExtrenalVoxelEdgesInv(xEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue); // external y - 1 voxels edges (bottom face) evalExtrenalVoxelEdgesInv(yEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue); // external z - 1 voxels edges (left face) evalExtrenalVoxelEdgesInv(zEdgeAcc, mInputAccessor, node, *mOffsets, mIsovalue); } } // IdentifyIntersectingVoxels::operator() template<typename InputTreeType> inline void identifySurfaceIntersectingVoxels( typename InputTreeType::template ValueConverter<bool>::Type& intersectionTree, const InputTreeType& inputTree, typename InputTreeType::ValueType isovalue) { using InputLeafNodeType = typename InputTreeType::LeafNodeType; std::vector<const InputLeafNodeType*> inputLeafNodes; inputTree.getNodes(inputLeafNodes); IdentifyIntersectingVoxels<InputTreeType> op( inputTree, inputLeafNodes, intersectionTree, isovalue); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, inputLeafNodes.size()), op); maskActiveTileBorders(inputTree, isovalue, intersectionTree); } //////////////////////////////////////// template<typename InputTreeType> struct MaskIntersectingVoxels { using InputLeafNodeType = typename InputTreeType::LeafNodeType; using InputValueType = typename InputLeafNodeType::ValueType; using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; MaskIntersectingVoxels( const InputTreeType& inputTree, const std::vector<BoolLeafNodeType*>& nodes, BoolTreeType& intersectionTree, InputValueType iso); MaskIntersectingVoxels(MaskIntersectingVoxels&, tbb::split); void operator()(const tbb::blocked_range<size_t>&); void join(const MaskIntersectingVoxels& rhs) { mIntersectionAccessor.tree().merge(rhs.mIntersectionAccessor.tree()); } private: tree::ValueAccessor<const InputTreeType> mInputAccessor; BoolLeafNodeType const * const * const mNodes; BoolTreeType mIntersectionTree; tree::ValueAccessor<BoolTreeType> mIntersectionAccessor; InputValueType mIsovalue; }; // struct MaskIntersectingVoxels template<typename InputTreeType> MaskIntersectingVoxels<InputTreeType>::MaskIntersectingVoxels( const InputTreeType& inputTree, const std::vector<BoolLeafNodeType*>& nodes, BoolTreeType& intersectionTree, InputValueType iso) : mInputAccessor(inputTree) , mNodes(nodes.empty() ? nullptr : &nodes.front()) , mIntersectionTree(false) , mIntersectionAccessor(intersectionTree) , mIsovalue(iso) { } template<typename InputTreeType> MaskIntersectingVoxels<InputTreeType>::MaskIntersectingVoxels( MaskIntersectingVoxels& rhs, tbb::split) : mInputAccessor(rhs.mInputAccessor.tree()) , mNodes(rhs.mNodes) , mIntersectionTree(false) , mIntersectionAccessor(mIntersectionTree) // use local tree. , mIsovalue(rhs.mIsovalue) { } template<typename InputTreeType> void MaskIntersectingVoxels<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range) { VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 0> xEdgeAcc(mIntersectionAccessor); VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 1> yEdgeAcc(mIntersectionAccessor); VoxelEdgeAccessor<tree::ValueAccessor<BoolTreeType>, 2> zEdgeAcc(mIntersectionAccessor); Coord ijk(0, 0, 0); InputValueType iso(mIsovalue); for (size_t n = range.begin(); n != range.end(); ++n) { const BoolLeafNodeType& node = *mNodes[n]; for (typename BoolLeafNodeType::ValueOnCIter it = node.cbeginValueOn(); it; ++it) { if (!it.getValue()) { ijk = it.getCoord(); const bool inside = isInsideValue(mInputAccessor.getValue(ijk), iso); if (inside != isInsideValue(mInputAccessor.getValue(ijk.offsetBy(1, 0, 0)), iso)) { xEdgeAcc.set(ijk); } if (inside != isInsideValue(mInputAccessor.getValue(ijk.offsetBy(0, 1, 0)), iso)) { yEdgeAcc.set(ijk); } if (inside != isInsideValue(mInputAccessor.getValue(ijk.offsetBy(0, 0, 1)), iso)) { zEdgeAcc.set(ijk); } } } } } // MaskIntersectingVoxels::operator() template<typename BoolTreeType> struct MaskBorderVoxels { using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; MaskBorderVoxels(const BoolTreeType& maskTree, const std::vector<BoolLeafNodeType*>& maskNodes, BoolTreeType& borderTree) : mMaskTree(&maskTree) , mMaskNodes(maskNodes.empty() ? nullptr : &maskNodes.front()) , mTmpBorderTree(false) , mBorderTree(&borderTree) { } MaskBorderVoxels(MaskBorderVoxels& rhs, tbb::split) : mMaskTree(rhs.mMaskTree) , mMaskNodes(rhs.mMaskNodes) , mTmpBorderTree(false) , mBorderTree(&mTmpBorderTree) { } void join(MaskBorderVoxels& rhs) { mBorderTree->merge(*rhs.mBorderTree); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const BoolTreeType> maskAcc(*mMaskTree); tree::ValueAccessor<BoolTreeType> borderAcc(*mBorderTree); Coord ijk(0, 0, 0); for (size_t n = range.begin(); n != range.end(); ++n) { const BoolLeafNodeType& node = *mMaskNodes[n]; for (typename BoolLeafNodeType::ValueOnCIter it = node.cbeginValueOn(); it; ++it) { ijk = it.getCoord(); const bool lhs = it.getValue(); bool rhs = lhs; bool isEdgeVoxel = false; ijk[2] += 1; // i, j, k+1 isEdgeVoxel = (maskAcc.probeValue(ijk, rhs) && lhs != rhs); ijk[1] += 1; // i, j+1, k+1 isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs); ijk[0] += 1; // i+1, j+1, k+1 isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs); ijk[1] -= 1; // i+1, j, k+1 isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs); ijk[2] -= 1; // i+1, j, k isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs); ijk[1] += 1; // i+1, j+1, k isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs); ijk[0] -= 1; // i, j+1, k isEdgeVoxel = isEdgeVoxel || (maskAcc.probeValue(ijk, rhs) && lhs != rhs); if (isEdgeVoxel) { ijk[1] -= 1; // i, j, k borderAcc.setValue(ijk, true); } } } } private: BoolTreeType const * const mMaskTree; BoolLeafNodeType const * const * const mMaskNodes; BoolTreeType mTmpBorderTree; BoolTreeType * const mBorderTree; }; // struct MaskBorderVoxels template<typename BoolTreeType> struct SyncMaskValues { using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; SyncMaskValues(const std::vector<BoolLeafNodeType*>& nodes, const BoolTreeType& mask) : mNodes(nodes.empty() ? nullptr : &nodes.front()) , mMaskTree(&mask) { } void operator()(const tbb::blocked_range<size_t>& range) const { using ValueOnIter = typename BoolLeafNodeType::ValueOnIter; tree::ValueAccessor<const BoolTreeType> maskTreeAcc(*mMaskTree); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { BoolLeafNodeType& node = *mNodes[n]; const BoolLeafNodeType * maskNode = maskTreeAcc.probeConstLeaf(node.origin()); if (maskNode) { for (ValueOnIter it = node.beginValueOn(); it; ++it) { const Index pos = it.pos(); if (maskNode->getValue(pos)) { node.setValueOnly(pos, true); } } } } } private: BoolLeafNodeType * const * const mNodes; BoolTreeType const * const mMaskTree; }; // struct SyncMaskValues //////////////////////////////////////// template<typename BoolTreeType> struct MaskSurface { using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; MaskSurface(const std::vector<BoolLeafNodeType*>& nodes, const BoolTreeType& mask, const math::Transform& inputTransform, const math::Transform& maskTransform, bool invert) : mNodes(nodes.empty() ? nullptr : &nodes.front()) , mMaskTree(&mask) , mInputTransform(inputTransform) , mMaskTransform(maskTransform) , mInvertMask(invert) { } void operator()(const tbb::blocked_range<size_t>& range) const { using ValueOnIter = typename BoolLeafNodeType::ValueOnIter; tree::ValueAccessor<const BoolTreeType> maskTreeAcc(*mMaskTree); const bool matchingTransforms = mInputTransform == mMaskTransform; const bool maskState = mInvertMask; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { BoolLeafNodeType& node = *mNodes[n]; if (matchingTransforms) { const BoolLeafNodeType * maskNode = maskTreeAcc.probeConstLeaf(node.origin()); if (maskNode) { for (ValueOnIter it = node.beginValueOn(); it; ++it) { const Index pos = it.pos(); if (maskNode->isValueOn(pos) == maskState) { node.setValueOnly(pos, true); } } } else { if (maskTreeAcc.isValueOn(node.origin()) == maskState) { for (ValueOnIter it = node.beginValueOn(); it; ++it) { node.setValueOnly(it.pos(), true); } } } } else { Coord ijk(0, 0, 0); for (ValueOnIter it = node.beginValueOn(); it; ++it) { ijk = mMaskTransform.worldToIndexCellCentered( mInputTransform.indexToWorld(it.getCoord())); if (maskTreeAcc.isValueOn(ijk) == maskState) { node.setValueOnly(it.pos(), true); } } } } } private: BoolLeafNodeType * const * const mNodes; BoolTreeType const * const mMaskTree; math::Transform const mInputTransform; math::Transform const mMaskTransform; bool const mInvertMask; }; // struct MaskSurface template<typename InputGridType> inline void applySurfaceMask( typename InputGridType::TreeType::template ValueConverter<bool>::Type& intersectionTree, typename InputGridType::TreeType::template ValueConverter<bool>::Type& borderTree, const InputGridType& inputGrid, const GridBase::ConstPtr& maskGrid, bool invertMask, typename InputGridType::ValueType isovalue) { using InputTreeType = typename InputGridType::TreeType; using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; using BoolGridType = Grid<BoolTreeType>; if (maskGrid && maskGrid->type() == BoolGridType::gridType()) { const math::Transform& transform = inputGrid.transform(); const InputTreeType& inputTree = inputGrid.tree(); const BoolGridType * surfaceMask = static_cast<const BoolGridType*>(maskGrid.get()); const BoolTreeType& maskTree = surfaceMask->tree(); const math::Transform& maskTransform = surfaceMask->transform(); // mark masked voxels std::vector<BoolLeafNodeType*> intersectionLeafNodes; intersectionTree.getNodes(intersectionLeafNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, intersectionLeafNodes.size()), MaskSurface<BoolTreeType>( intersectionLeafNodes, maskTree, transform, maskTransform, invertMask)); // mask surface-mask border MaskBorderVoxels<BoolTreeType> borderOp( intersectionTree, intersectionLeafNodes, borderTree); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, intersectionLeafNodes.size()), borderOp); // recompute isosurface intersection mask BoolTreeType tmpIntersectionTree(false); MaskIntersectingVoxels<InputTreeType> op( inputTree, intersectionLeafNodes, tmpIntersectionTree, isovalue); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, intersectionLeafNodes.size()), op); std::vector<BoolLeafNodeType*> tmpIntersectionLeafNodes; tmpIntersectionTree.getNodes(tmpIntersectionLeafNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, tmpIntersectionLeafNodes.size()), SyncMaskValues<BoolTreeType>(tmpIntersectionLeafNodes, intersectionTree)); intersectionTree.clear(); intersectionTree.merge(tmpIntersectionTree); } } //////////////////////////////////////// template<typename InputTreeType> struct ComputeAuxiliaryData { using InputLeafNodeType = typename InputTreeType::LeafNodeType; using InputValueType = typename InputLeafNodeType::ValueType; using BoolLeafNodeType = tree::LeafNode<bool, InputLeafNodeType::LOG2DIM>; using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type; using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type; ComputeAuxiliaryData(const InputTreeType& inputTree, const std::vector<const BoolLeafNodeType*>& intersectionLeafNodes, Int16TreeType& signFlagsTree, Index32TreeType& pointIndexTree, InputValueType iso); ComputeAuxiliaryData(ComputeAuxiliaryData&, tbb::split); void operator()(const tbb::blocked_range<size_t>&); void join(const ComputeAuxiliaryData& rhs) { mSignFlagsAccessor.tree().merge(rhs.mSignFlagsAccessor.tree()); mPointIndexAccessor.tree().merge(rhs.mPointIndexAccessor.tree()); } private: tree::ValueAccessor<const InputTreeType> mInputAccessor; BoolLeafNodeType const * const * const mIntersectionNodes; Int16TreeType mSignFlagsTree; tree::ValueAccessor<Int16TreeType> mSignFlagsAccessor; Index32TreeType mPointIndexTree; tree::ValueAccessor<Index32TreeType> mPointIndexAccessor; const InputValueType mIsovalue; }; template<typename InputTreeType> ComputeAuxiliaryData<InputTreeType>::ComputeAuxiliaryData( const InputTreeType& inputTree, const std::vector<const BoolLeafNodeType*>& intersectionLeafNodes, Int16TreeType& signFlagsTree, Index32TreeType& pointIndexTree, InputValueType iso) : mInputAccessor(inputTree) , mIntersectionNodes(&intersectionLeafNodes.front()) , mSignFlagsTree(0) , mSignFlagsAccessor(signFlagsTree) , mPointIndexTree(std::numeric_limits<Index32>::max()) , mPointIndexAccessor(pointIndexTree) , mIsovalue(iso) { pointIndexTree.root().setBackground(std::numeric_limits<Index32>::max(), false); } template<typename InputTreeType> ComputeAuxiliaryData<InputTreeType>::ComputeAuxiliaryData(ComputeAuxiliaryData& rhs, tbb::split) : mInputAccessor(rhs.mInputAccessor.tree()) , mIntersectionNodes(rhs.mIntersectionNodes) , mSignFlagsTree(0) , mSignFlagsAccessor(mSignFlagsTree) , mPointIndexTree(std::numeric_limits<Index32>::max()) , mPointIndexAccessor(mPointIndexTree) , mIsovalue(rhs.mIsovalue) { } template<typename InputTreeType> void ComputeAuxiliaryData<InputTreeType>::operator()(const tbb::blocked_range<size_t>& range) { using Int16LeafNodeType = typename Int16TreeType::LeafNodeType; Coord ijk; math::Tuple<8, InputValueType> cellVertexValues; typename std::unique_ptr<Int16LeafNodeType> signsNodePt(new Int16LeafNodeType(ijk, 0)); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const BoolLeafNodeType& maskNode = *mIntersectionNodes[n]; const Coord& origin = maskNode.origin(); const InputLeafNodeType *leafPt = mInputAccessor.probeConstLeaf(origin); if (!signsNodePt.get()) signsNodePt.reset(new Int16LeafNodeType(origin, 0)); else signsNodePt->setOrigin(origin); bool updatedNode = false; for (typename BoolLeafNodeType::ValueOnCIter it = maskNode.cbeginValueOn(); it; ++it) { const Index pos = it.pos(); ijk = BoolLeafNodeType::offsetToLocalCoord(pos); if (leafPt && ijk[0] < int(BoolLeafNodeType::DIM - 1) && ijk[1] < int(BoolLeafNodeType::DIM - 1) && ijk[2] < int(BoolLeafNodeType::DIM - 1) ) { getCellVertexValues(*leafPt, pos, cellVertexValues); } else { getCellVertexValues(mInputAccessor, origin + ijk, cellVertexValues); } uint8_t signFlags = computeSignFlags(cellVertexValues, mIsovalue); if (signFlags != 0 && signFlags != 0xFF) { const bool inside = signFlags & 0x1; int edgeFlags = inside ? INSIDE : 0; if (!it.getValue()) { edgeFlags |= inside != ((signFlags & 0x02) != 0) ? XEDGE : 0; edgeFlags |= inside != ((signFlags & 0x10) != 0) ? YEDGE : 0; edgeFlags |= inside != ((signFlags & 0x08) != 0) ? ZEDGE : 0; } const uint8_t ambiguousCellFlags = sAmbiguousFace[signFlags]; if (ambiguousCellFlags != 0) { correctCellSigns(signFlags, ambiguousCellFlags, mInputAccessor, origin + ijk, mIsovalue); } edgeFlags |= int(signFlags); signsNodePt->setValueOn(pos, Int16(edgeFlags)); updatedNode = true; } } if (updatedNode) { typename Index32TreeType::LeafNodeType* idxNode = mPointIndexAccessor.touchLeaf(origin); idxNode->topologyUnion(*signsNodePt); // zero fill for (auto it = idxNode->beginValueOn(); it; ++it) { idxNode->setValueOnly(it.pos(), 0); } mSignFlagsAccessor.addLeaf(signsNodePt.release()); } } } // ComputeAuxiliaryData::operator() template<typename InputTreeType> inline void computeAuxiliaryData( typename InputTreeType::template ValueConverter<Int16>::Type& signFlagsTree, typename InputTreeType::template ValueConverter<Index32>::Type& pointIndexTree, const typename InputTreeType::template ValueConverter<bool>::Type& intersectionTree, const InputTreeType& inputTree, typename InputTreeType::ValueType isovalue) { using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type; using BoolLeafNodeType = typename BoolTreeType::LeafNodeType; std::vector<const BoolLeafNodeType*> intersectionLeafNodes; intersectionTree.getNodes(intersectionLeafNodes); ComputeAuxiliaryData<InputTreeType> op( inputTree, intersectionLeafNodes, signFlagsTree, pointIndexTree, isovalue); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, intersectionLeafNodes.size()), op); } //////////////////////////////////////// template<Index32 LeafNodeLog2Dim> struct LeafNodePointCount { using Int16LeafNodeType = tree::LeafNode<Int16, LeafNodeLog2Dim>; LeafNodePointCount(const std::vector<Int16LeafNodeType*>& leafNodes, std::unique_ptr<Index32[]>& leafNodeCount) : mLeafNodes(leafNodes.empty() ? nullptr : &leafNodes.front()) , mData(leafNodeCount.get()) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { Index32 count = 0; Int16 const * p = mLeafNodes[n]->buffer().data(); Int16 const * const endP = p + Int16LeafNodeType::SIZE; while (p < endP) { count += Index32(sEdgeGroupTable[(SIGNS & *p)][0]); ++p; } mData[n] = count; } } private: Int16LeafNodeType * const * const mLeafNodes; Index32 *mData; }; // struct LeafNodePointCount template<typename PointIndexLeafNode> struct AdaptiveLeafNodePointCount { using Int16LeafNodeType = tree::LeafNode<Int16, PointIndexLeafNode::LOG2DIM>; AdaptiveLeafNodePointCount(const std::vector<PointIndexLeafNode*>& pointIndexNodes, const std::vector<Int16LeafNodeType*>& signDataNodes, std::unique_ptr<Index32[]>& leafNodeCount) : mPointIndexNodes(pointIndexNodes.empty() ? nullptr : &pointIndexNodes.front()) , mSignDataNodes(signDataNodes.empty() ? nullptr : &signDataNodes.front()) , mData(leafNodeCount.get()) { } void operator()(const tbb::blocked_range<size_t>& range) const { using IndexType = typename PointIndexLeafNode::ValueType; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const PointIndexLeafNode& node = *mPointIndexNodes[n]; const Int16LeafNodeType& signNode = *mSignDataNodes[n]; size_t count = 0; std::set<IndexType> uniqueRegions; for (typename PointIndexLeafNode::ValueOnCIter it = node.cbeginValueOn(); it; ++it) { IndexType id = it.getValue(); if (id == 0) { count += size_t(sEdgeGroupTable[(SIGNS & signNode.getValue(it.pos()))][0]); } else if (id != IndexType(util::INVALID_IDX)) { uniqueRegions.insert(id); } } mData[n] = Index32(count + uniqueRegions.size()); } } private: PointIndexLeafNode const * const * const mPointIndexNodes; Int16LeafNodeType const * const * const mSignDataNodes; Index32 *mData; }; // struct AdaptiveLeafNodePointCount template<typename PointIndexLeafNode> struct MapPoints { using Int16LeafNodeType = tree::LeafNode<Int16, PointIndexLeafNode::LOG2DIM>; MapPoints(const std::vector<PointIndexLeafNode*>& pointIndexNodes, const std::vector<Int16LeafNodeType*>& signDataNodes, std::unique_ptr<Index32[]>& leafNodeCount) : mPointIndexNodes(pointIndexNodes.empty() ? nullptr : &pointIndexNodes.front()) , mSignDataNodes(signDataNodes.empty() ? nullptr : &signDataNodes.front()) , mData(leafNodeCount.get()) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const Int16LeafNodeType& signNode = *mSignDataNodes[n]; PointIndexLeafNode& indexNode = *mPointIndexNodes[n]; Index32 pointOffset = mData[n]; for (auto it = indexNode.beginValueOn(); it; ++it) { const Index pos = it.pos(); indexNode.setValueOnly(pos, pointOffset); const int signs = SIGNS & int(signNode.getValue(pos)); pointOffset += Index32(sEdgeGroupTable[signs][0]); } } } private: PointIndexLeafNode * const * const mPointIndexNodes; Int16LeafNodeType const * const * const mSignDataNodes; Index32 * const mData; }; // struct MapPoints template<typename TreeType, typename PrimBuilder> struct ComputePolygons { using Int16TreeType = typename TreeType::template ValueConverter<Int16>::Type; using Int16LeafNodeType = typename Int16TreeType::LeafNodeType; using Index32TreeType = typename TreeType::template ValueConverter<Index32>::Type; using Index32LeafNodeType = typename Index32TreeType::LeafNodeType; ComputePolygons( const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes, const Int16TreeType& signFlagsTree, const Index32TreeType& idxTree, PolygonPoolList& polygons, bool invertSurfaceOrientation); void setRefSignTree(const Int16TreeType * r) { mRefSignFlagsTree = r; } void operator()(const tbb::blocked_range<size_t>&) const; private: Int16LeafNodeType * const * const mSignFlagsLeafNodes; Int16TreeType const * const mSignFlagsTree; Int16TreeType const * mRefSignFlagsTree; Index32TreeType const * const mIndexTree; PolygonPoolList * const mPolygonPoolList; bool const mInvertSurfaceOrientation; }; // struct ComputePolygons template<typename TreeType, typename PrimBuilder> ComputePolygons<TreeType, PrimBuilder>::ComputePolygons( const std::vector<Int16LeafNodeType*>& signFlagsLeafNodes, const Int16TreeType& signFlagsTree, const Index32TreeType& idxTree, PolygonPoolList& polygons, bool invertSurfaceOrientation) : mSignFlagsLeafNodes(signFlagsLeafNodes.empty() ? nullptr : &signFlagsLeafNodes.front()) , mSignFlagsTree(&signFlagsTree) , mRefSignFlagsTree(nullptr) , mIndexTree(&idxTree) , mPolygonPoolList(&polygons) , mInvertSurfaceOrientation(invertSurfaceOrientation) { } template<typename InputTreeType, typename PrimBuilder> void ComputePolygons<InputTreeType, PrimBuilder>::operator()(const tbb::blocked_range<size_t>& range) const { using Int16ValueAccessor = tree::ValueAccessor<const Int16TreeType>; Int16ValueAccessor signAcc(*mSignFlagsTree); tree::ValueAccessor<const Index32TreeType> idxAcc(*mIndexTree); const bool invertSurfaceOrientation = mInvertSurfaceOrientation; PrimBuilder mesher; size_t edgeCount; Coord ijk, origin; // reference data std::unique_ptr<Int16ValueAccessor> refSignAcc; if (mRefSignFlagsTree) refSignAcc.reset(new Int16ValueAccessor(*mRefSignFlagsTree)); for (size_t n = range.begin(); n != range.end(); ++n) { const Int16LeafNodeType& node = *mSignFlagsLeafNodes[n]; origin = node.origin(); // Get an upper bound on the number of primitives. edgeCount = 0; typename Int16LeafNodeType::ValueOnCIter iter = node.cbeginValueOn(); for (; iter; ++iter) { if (iter.getValue() & XEDGE) ++edgeCount; if (iter.getValue() & YEDGE) ++edgeCount; if (iter.getValue() & ZEDGE) ++edgeCount; } if(edgeCount == 0) continue; mesher.init(edgeCount, (*mPolygonPoolList)[n]); const Int16LeafNodeType *signleafPt = signAcc.probeConstLeaf(origin); const Index32LeafNodeType *idxLeafPt = idxAcc.probeConstLeaf(origin); if (!signleafPt || !idxLeafPt) continue; const Int16LeafNodeType *refSignLeafPt = nullptr; if (refSignAcc) refSignLeafPt = refSignAcc->probeConstLeaf(origin); Vec3i offsets; for (iter = node.cbeginValueOn(); iter; ++iter) { ijk = iter.getCoord(); Int16 flags = iter.getValue(); if (!(flags & 0xE00)) continue; Int16 refFlags = 0; if (refSignLeafPt) { refFlags = refSignLeafPt->getValue(iter.pos()); } offsets[0] = 0; offsets[1] = 0; offsets[2] = 0; const uint8_t cell = uint8_t(SIGNS & flags); if (sEdgeGroupTable[cell][0] > 1) { offsets[0] = (sEdgeGroupTable[cell][1] - 1); offsets[1] = (sEdgeGroupTable[cell][9] - 1); offsets[2] = (sEdgeGroupTable[cell][4] - 1); } if (ijk[0] > origin[0] && ijk[1] > origin[1] && ijk[2] > origin[2]) { constructPolygons(invertSurfaceOrientation, flags, refFlags, offsets, ijk, *signleafPt, *idxLeafPt, mesher); } else { constructPolygons(invertSurfaceOrientation, flags, refFlags, offsets, ijk, signAcc, idxAcc, mesher); } } mesher.done(); } } // ComputePolygons::operator() //////////////////////////////////////// template<typename T> struct CopyArray { CopyArray(T * outputArray, const T * inputArray, size_t outputOffset = 0) : mOutputArray(outputArray), mInputArray(inputArray), mOutputOffset(outputOffset) { } void operator()(const tbb::blocked_range<size_t>& inputArrayRange) const { const size_t offset = mOutputOffset; for (size_t n = inputArrayRange.begin(), N = inputArrayRange.end(); n < N; ++n) { mOutputArray[offset + n] = mInputArray[n]; } } private: T * const mOutputArray; T const * const mInputArray; size_t const mOutputOffset; }; // struct CopyArray struct FlagAndCountQuadsToSubdivide { FlagAndCountQuadsToSubdivide(PolygonPoolList& polygons, const std::vector<uint8_t>& pointFlags, std::unique_ptr<openvdb::Vec3s[]>& points, std::unique_ptr<unsigned[]>& numQuadsToDivide) : mPolygonPoolList(&polygons) , mPointFlags(pointFlags.empty() ? nullptr : &pointFlags.front()) , mPoints(points.get()) , mNumQuadsToDivide(numQuadsToDivide.get()) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { PolygonPool& polygons = (*mPolygonPoolList)[n]; unsigned count = 0; // count and tag nonplanar seam line quads. for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { char& flags = polygons.quadFlags(i); if ((flags & POLYFLAG_FRACTURE_SEAM) && !(flags & POLYFLAG_EXTERIOR)) { Vec4I& quad = polygons.quad(i); const bool edgePoly = mPointFlags[quad[0]] || mPointFlags[quad[1]] || mPointFlags[quad[2]] || mPointFlags[quad[3]]; if (!edgePoly) continue; const Vec3s& p0 = mPoints[quad[0]]; const Vec3s& p1 = mPoints[quad[1]]; const Vec3s& p2 = mPoints[quad[2]]; const Vec3s& p3 = mPoints[quad[3]]; if (!isPlanarQuad(p0, p1, p2, p3, 1e-6f)) { flags |= POLYFLAG_SUBDIVIDED; count++; } } } mNumQuadsToDivide[n] = count; } } private: PolygonPoolList * const mPolygonPoolList; uint8_t const * const mPointFlags; Vec3s const * const mPoints; unsigned * const mNumQuadsToDivide; }; // struct FlagAndCountQuadsToSubdivide struct SubdivideQuads { SubdivideQuads(PolygonPoolList& polygons, const std::unique_ptr<openvdb::Vec3s[]>& points, size_t pointCount, std::unique_ptr<openvdb::Vec3s[]>& centroids, std::unique_ptr<unsigned[]>& numQuadsToDivide, std::unique_ptr<unsigned[]>& centroidOffsets) : mPolygonPoolList(&polygons) , mPoints(points.get()) , mCentroids(centroids.get()) , mNumQuadsToDivide(numQuadsToDivide.get()) , mCentroidOffsets(centroidOffsets.get()) , mPointCount(pointCount) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { PolygonPool& polygons = (*mPolygonPoolList)[n]; const size_t nonplanarCount = size_t(mNumQuadsToDivide[n]); if (nonplanarCount > 0) { PolygonPool tmpPolygons; tmpPolygons.resetQuads(polygons.numQuads() - nonplanarCount); tmpPolygons.resetTriangles(polygons.numTriangles() + size_t(4) * nonplanarCount); size_t offset = mCentroidOffsets[n]; size_t triangleIdx = 0; for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { const char quadFlags = polygons.quadFlags(i); if (!(quadFlags & POLYFLAG_SUBDIVIDED)) continue; unsigned newPointIdx = unsigned(offset + mPointCount); openvdb::Vec4I& quad = polygons.quad(i); mCentroids[offset] = (mPoints[quad[0]] + mPoints[quad[1]] + mPoints[quad[2]] + mPoints[quad[3]]) * 0.25f; ++offset; { Vec3I& triangle = tmpPolygons.triangle(triangleIdx); triangle[0] = quad[0]; triangle[1] = newPointIdx; triangle[2] = quad[3]; tmpPolygons.triangleFlags(triangleIdx) = quadFlags; } ++triangleIdx; { Vec3I& triangle = tmpPolygons.triangle(triangleIdx); triangle[0] = quad[0]; triangle[1] = quad[1]; triangle[2] = newPointIdx; tmpPolygons.triangleFlags(triangleIdx) = quadFlags; } ++triangleIdx; { Vec3I& triangle = tmpPolygons.triangle(triangleIdx); triangle[0] = quad[1]; triangle[1] = quad[2]; triangle[2] = newPointIdx; tmpPolygons.triangleFlags(triangleIdx) = quadFlags; } ++triangleIdx; { Vec3I& triangle = tmpPolygons.triangle(triangleIdx); triangle[0] = quad[2]; triangle[1] = quad[3]; triangle[2] = newPointIdx; tmpPolygons.triangleFlags(triangleIdx) = quadFlags; } ++triangleIdx; quad[0] = util::INVALID_IDX; // mark for deletion } for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { tmpPolygons.triangle(triangleIdx) = polygons.triangle(i); tmpPolygons.triangleFlags(triangleIdx) = polygons.triangleFlags(i); ++triangleIdx; } size_t quadIdx = 0; for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { openvdb::Vec4I& quad = polygons.quad(i); if (quad[0] != util::INVALID_IDX) { // ignore invalid quads tmpPolygons.quad(quadIdx) = quad; tmpPolygons.quadFlags(quadIdx) = polygons.quadFlags(i); ++quadIdx; } } polygons.copy(tmpPolygons); } } } private: PolygonPoolList * const mPolygonPoolList; Vec3s const * const mPoints; Vec3s * const mCentroids; unsigned * const mNumQuadsToDivide; unsigned * const mCentroidOffsets; size_t const mPointCount; }; // struct SubdivideQuads inline void subdivideNonplanarSeamLineQuads( PolygonPoolList& polygonPoolList, size_t polygonPoolListSize, PointList& pointList, size_t& pointListSize, std::vector<uint8_t>& pointFlags) { const tbb::blocked_range<size_t> polygonPoolListRange(0, polygonPoolListSize); std::unique_ptr<unsigned[]> numQuadsToDivide(new unsigned[polygonPoolListSize]); tbb::parallel_for(polygonPoolListRange, FlagAndCountQuadsToSubdivide(polygonPoolList, pointFlags, pointList, numQuadsToDivide)); std::unique_ptr<unsigned[]> centroidOffsets(new unsigned[polygonPoolListSize]); size_t centroidCount = 0; { unsigned sum = 0; for (size_t n = 0, N = polygonPoolListSize; n < N; ++n) { centroidOffsets[n] = sum; sum += numQuadsToDivide[n]; } centroidCount = size_t(sum); } std::unique_ptr<Vec3s[]> centroidList(new Vec3s[centroidCount]); tbb::parallel_for(polygonPoolListRange, SubdivideQuads(polygonPoolList, pointList, pointListSize, centroidList, numQuadsToDivide, centroidOffsets)); if (centroidCount > 0) { const size_t newPointListSize = centroidCount + pointListSize; std::unique_ptr<openvdb::Vec3s[]> newPointList(new openvdb::Vec3s[newPointListSize]); tbb::parallel_for(tbb::blocked_range<size_t>(0, pointListSize), CopyArray<Vec3s>(newPointList.get(), pointList.get())); tbb::parallel_for(tbb::blocked_range<size_t>(0, newPointListSize - pointListSize), CopyArray<Vec3s>(newPointList.get(), centroidList.get(), pointListSize)); pointListSize = newPointListSize; pointList.swap(newPointList); pointFlags.resize(pointListSize, 0); } } struct ReviseSeamLineFlags { ReviseSeamLineFlags(PolygonPoolList& polygons, const std::vector<uint8_t>& pointFlags) : mPolygonPoolList(&polygons) , mPointFlags(pointFlags.empty() ? nullptr : &pointFlags.front()) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { PolygonPool& polygons = (*mPolygonPoolList)[n]; for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { char& flags = polygons.quadFlags(i); if (flags & POLYFLAG_FRACTURE_SEAM) { openvdb::Vec4I& verts = polygons.quad(i); const bool hasSeamLinePoint = mPointFlags[verts[0]] || mPointFlags[verts[1]] || mPointFlags[verts[2]] || mPointFlags[verts[3]]; if (!hasSeamLinePoint) { flags &= ~POLYFLAG_FRACTURE_SEAM; } } } // end quad loop for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { char& flags = polygons.triangleFlags(i); if (flags & POLYFLAG_FRACTURE_SEAM) { openvdb::Vec3I& verts = polygons.triangle(i); const bool hasSeamLinePoint = mPointFlags[verts[0]] || mPointFlags[verts[1]] || mPointFlags[verts[2]]; if (!hasSeamLinePoint) { flags &= ~POLYFLAG_FRACTURE_SEAM; } } } // end triangle loop } // end polygon pool loop } private: PolygonPoolList * const mPolygonPoolList; uint8_t const * const mPointFlags; }; // struct ReviseSeamLineFlags inline void reviseSeamLineFlags(PolygonPoolList& polygonPoolList, size_t polygonPoolListSize, std::vector<uint8_t>& pointFlags) { tbb::parallel_for(tbb::blocked_range<size_t>(0, polygonPoolListSize), ReviseSeamLineFlags(polygonPoolList, pointFlags)); } //////////////////////////////////////// template<typename InputTreeType> struct MaskDisorientedTrianglePoints { MaskDisorientedTrianglePoints(const InputTreeType& inputTree, const PolygonPoolList& polygons, const PointList& pointList, std::unique_ptr<uint8_t[]>& pointMask, const math::Transform& transform, bool invertSurfaceOrientation) : mInputTree(&inputTree) , mPolygonPoolList(&polygons) , mPointList(&pointList) , mPointMask(pointMask.get()) , mTransform(transform) , mInvertSurfaceOrientation(invertSurfaceOrientation) { } void operator()(const tbb::blocked_range<size_t>& range) const { using ValueType = typename InputTreeType::LeafNodeType::ValueType; tree::ValueAccessor<const InputTreeType> inputAcc(*mInputTree); Vec3s centroid, normal; Coord ijk; const bool invertGradientDir = mInvertSurfaceOrientation || isBoolValue<ValueType>(); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const PolygonPool& polygons = (*mPolygonPoolList)[n]; for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { const Vec3I& verts = polygons.triangle(i); const Vec3s& v0 = (*mPointList)[verts[0]]; const Vec3s& v1 = (*mPointList)[verts[1]]; const Vec3s& v2 = (*mPointList)[verts[2]]; normal = (v2 - v0).cross((v1 - v0)); normal.normalize(); centroid = (v0 + v1 + v2) * (1.0f / 3.0f); ijk = mTransform.worldToIndexCellCentered(centroid); Vec3s dir( math::ISGradient<math::CD_2ND>::result(inputAcc, ijk) ); dir.normalize(); if (invertGradientDir) { dir = -dir; } // check if the angle is obtuse if (dir.dot(normal) < -0.5f) { // Concurrent writes to same memory address can occur, but // all threads are writing the same value and char is atomic. // (It is extremely rare that disoriented triangles share points, // false sharing related performance impacts are not a concern.) mPointMask[verts[0]] = 1; mPointMask[verts[1]] = 1; mPointMask[verts[2]] = 1; } } // end triangle loop } // end polygon pool loop } private: InputTreeType const * const mInputTree; PolygonPoolList const * const mPolygonPoolList; PointList const * const mPointList; uint8_t * const mPointMask; math::Transform const mTransform; bool const mInvertSurfaceOrientation; }; // struct MaskDisorientedTrianglePoints template<typename InputTree> inline void relaxDisorientedTriangles( bool invertSurfaceOrientation, const InputTree& inputTree, const math::Transform& transform, PolygonPoolList& polygonPoolList, size_t polygonPoolListSize, PointList& pointList, const size_t pointListSize) { const tbb::blocked_range<size_t> polygonPoolListRange(0, polygonPoolListSize); std::unique_ptr<uint8_t[]> pointMask(new uint8_t[pointListSize]); fillArray(pointMask.get(), uint8_t(0), pointListSize); tbb::parallel_for(polygonPoolListRange, MaskDisorientedTrianglePoints<InputTree>( inputTree, polygonPoolList, pointList, pointMask, transform, invertSurfaceOrientation)); std::unique_ptr<uint8_t[]> pointUpdates(new uint8_t[pointListSize]); fillArray(pointUpdates.get(), uint8_t(0), pointListSize); std::unique_ptr<Vec3s[]> newPoints(new Vec3s[pointListSize]); fillArray(newPoints.get(), Vec3s(0.0f, 0.0f, 0.0f), pointListSize); for (size_t n = 0, N = polygonPoolListSize; n < N; ++n) { PolygonPool& polygons = polygonPoolList[n]; for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { openvdb::Vec4I& verts = polygons.quad(i); for (int v = 0; v < 4; ++v) { const unsigned pointIdx = verts[v]; if (pointMask[pointIdx] == 1) { newPoints[pointIdx] += pointList[verts[0]] + pointList[verts[1]] + pointList[verts[2]] + pointList[verts[3]]; pointUpdates[pointIdx] = uint8_t(pointUpdates[pointIdx] + 4); } } } for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { openvdb::Vec3I& verts = polygons.triangle(i); for (int v = 0; v < 3; ++v) { const unsigned pointIdx = verts[v]; if (pointMask[pointIdx] == 1) { newPoints[pointIdx] += pointList[verts[0]] + pointList[verts[1]] + pointList[verts[2]]; pointUpdates[pointIdx] = uint8_t(pointUpdates[pointIdx] + 3); } } } } for (size_t n = 0, N = pointListSize; n < N; ++n) { if (pointUpdates[n] > 0) { const double weight = 1.0 / double(pointUpdates[n]); pointList[n] = newPoints[n] * float(weight); } } } } // volume_to_mesh_internal namespace //////////////////////////////////////// inline PolygonPool::PolygonPool() : mNumQuads(0) , mNumTriangles(0) , mQuads(nullptr) , mTriangles(nullptr) , mQuadFlags(nullptr) , mTriangleFlags(nullptr) { } inline PolygonPool::PolygonPool(const size_t numQuads, const size_t numTriangles) : mNumQuads(numQuads) , mNumTriangles(numTriangles) , mQuads(new openvdb::Vec4I[mNumQuads]) , mTriangles(new openvdb::Vec3I[mNumTriangles]) , mQuadFlags(new char[mNumQuads]) , mTriangleFlags(new char[mNumTriangles]) { } inline void PolygonPool::copy(const PolygonPool& rhs) { resetQuads(rhs.numQuads()); resetTriangles(rhs.numTriangles()); for (size_t i = 0; i < mNumQuads; ++i) { mQuads[i] = rhs.mQuads[i]; mQuadFlags[i] = rhs.mQuadFlags[i]; } for (size_t i = 0; i < mNumTriangles; ++i) { mTriangles[i] = rhs.mTriangles[i]; mTriangleFlags[i] = rhs.mTriangleFlags[i]; } } inline void PolygonPool::resetQuads(size_t size) { mNumQuads = size; mQuads.reset(new openvdb::Vec4I[mNumQuads]); mQuadFlags.reset(new char[mNumQuads]); } inline void PolygonPool::clearQuads() { mNumQuads = 0; mQuads.reset(nullptr); mQuadFlags.reset(nullptr); } inline void PolygonPool::resetTriangles(size_t size) { mNumTriangles = size; mTriangles.reset(new openvdb::Vec3I[mNumTriangles]); mTriangleFlags.reset(new char[mNumTriangles]); } inline void PolygonPool::clearTriangles() { mNumTriangles = 0; mTriangles.reset(nullptr); mTriangleFlags.reset(nullptr); } inline bool PolygonPool::trimQuads(const size_t n, bool reallocate) { if (!(n < mNumQuads)) return false; if (reallocate) { if (n == 0) { mQuads.reset(nullptr); } else { std::unique_ptr<openvdb::Vec4I[]> quads(new openvdb::Vec4I[n]); std::unique_ptr<char[]> flags(new char[n]); for (size_t i = 0; i < n; ++i) { quads[i] = mQuads[i]; flags[i] = mQuadFlags[i]; } mQuads.swap(quads); mQuadFlags.swap(flags); } } mNumQuads = n; return true; } inline bool PolygonPool::trimTrinagles(const size_t n, bool reallocate) { if (!(n < mNumTriangles)) return false; if (reallocate) { if (n == 0) { mTriangles.reset(nullptr); } else { std::unique_ptr<openvdb::Vec3I[]> triangles(new openvdb::Vec3I[n]); std::unique_ptr<char[]> flags(new char[n]); for (size_t i = 0; i < n; ++i) { triangles[i] = mTriangles[i]; flags[i] = mTriangleFlags[i]; } mTriangles.swap(triangles); mTriangleFlags.swap(flags); } } mNumTriangles = n; return true; } //////////////////////////////////////// inline VolumeToMesh::VolumeToMesh(double isovalue, double adaptivity, bool relaxDisorientedTriangles) : mPoints(nullptr) , mPolygons() , mPointListSize(0) , mSeamPointListSize(0) , mPolygonPoolListSize(0) , mIsovalue(isovalue) , mPrimAdaptivity(adaptivity) , mSecAdaptivity(0.0) , mRefGrid(GridBase::ConstPtr()) , mSurfaceMaskGrid(GridBase::ConstPtr()) , mAdaptivityGrid(GridBase::ConstPtr()) , mAdaptivityMaskTree(TreeBase::ConstPtr()) , mRefSignTree(TreeBase::Ptr()) , mRefIdxTree(TreeBase::Ptr()) , mInvertSurfaceMask(false) , mRelaxDisorientedTriangles(relaxDisorientedTriangles) , mQuantizedSeamPoints(nullptr) , mPointFlags(0) { } inline void VolumeToMesh::setRefGrid(const GridBase::ConstPtr& grid, double secAdaptivity) { mRefGrid = grid; mSecAdaptivity = secAdaptivity; // Clear out old auxiliary data mRefSignTree = TreeBase::Ptr(); mRefIdxTree = TreeBase::Ptr(); mSeamPointListSize = 0; mQuantizedSeamPoints.reset(nullptr); } inline void VolumeToMesh::setSurfaceMask(const GridBase::ConstPtr& mask, bool invertMask) { mSurfaceMaskGrid = mask; mInvertSurfaceMask = invertMask; } inline void VolumeToMesh::setSpatialAdaptivity(const GridBase::ConstPtr& grid) { mAdaptivityGrid = grid; } inline void VolumeToMesh::setAdaptivityMask(const TreeBase::ConstPtr& tree) { mAdaptivityMaskTree = tree; } template<typename InputGridType> inline void VolumeToMesh::operator()(const InputGridType& inputGrid) { // input data types using InputTreeType = typename InputGridType::TreeType; using InputLeafNodeType = typename InputTreeType::LeafNodeType; using InputValueType = typename InputLeafNodeType::ValueType; // auxiliary data types using FloatTreeType = typename InputTreeType::template ValueConverter<float>::Type; using FloatGridType = Grid<FloatTreeType>; using BoolTreeType = typename InputTreeType::template ValueConverter<bool>::Type; using Int16TreeType = typename InputTreeType::template ValueConverter<Int16>::Type; using Int16LeafNodeType = typename Int16TreeType::LeafNodeType; using Index32TreeType = typename InputTreeType::template ValueConverter<Index32>::Type; using Index32LeafNodeType = typename Index32TreeType::LeafNodeType; // clear old data mPointListSize = 0; mPoints.reset(); mPolygonPoolListSize = 0; mPolygons.reset(); mPointFlags.clear(); // settings const math::Transform& transform = inputGrid.transform(); const InputValueType isovalue = InputValueType(mIsovalue); const float adaptivityThreshold = float(mPrimAdaptivity); const bool adaptive = mPrimAdaptivity > 1e-7 || mSecAdaptivity > 1e-7; // The default surface orientation is setup for level set and bool/mask grids. // Boolean grids are handled correctly by their value type. Signed distance fields, // unsigned distance fields and fog volumes have the same value type but use different // inside value classifications. const bool invertSurfaceOrientation = (!volume_to_mesh_internal::isBoolValue<InputValueType>() && (inputGrid.getGridClass() != openvdb::GRID_LEVEL_SET)); // references, masks and auxiliary data const InputTreeType& inputTree = inputGrid.tree(); BoolTreeType intersectionTree(false), adaptivityMask(false); if (mAdaptivityMaskTree && mAdaptivityMaskTree->type() == BoolTreeType::treeType()) { const BoolTreeType *refAdaptivityMask= static_cast<const BoolTreeType*>(mAdaptivityMaskTree.get()); adaptivityMask.topologyUnion(*refAdaptivityMask); } Int16TreeType signFlagsTree(0); Index32TreeType pointIndexTree(std::numeric_limits<Index32>::max()); // collect auxiliary data volume_to_mesh_internal::identifySurfaceIntersectingVoxels( intersectionTree, inputTree, isovalue); volume_to_mesh_internal::applySurfaceMask(intersectionTree, adaptivityMask, inputGrid, mSurfaceMaskGrid, mInvertSurfaceMask, isovalue); if (intersectionTree.empty()) return; volume_to_mesh_internal::computeAuxiliaryData( signFlagsTree, pointIndexTree, intersectionTree, inputTree, isovalue); intersectionTree.clear(); std::vector<Index32LeafNodeType*> pointIndexLeafNodes; pointIndexTree.getNodes(pointIndexLeafNodes); std::vector<Int16LeafNodeType*> signFlagsLeafNodes; signFlagsTree.getNodes(signFlagsLeafNodes); const tbb::blocked_range<size_t> auxiliaryLeafNodeRange(0, signFlagsLeafNodes.size()); // optionally collect auxiliary data from a reference volume. Int16TreeType* refSignFlagsTree = nullptr; Index32TreeType* refPointIndexTree = nullptr; InputTreeType const* refInputTree = nullptr; if (mRefGrid && mRefGrid->type() == InputGridType::gridType()) { const InputGridType* refGrid = static_cast<const InputGridType*>(mRefGrid.get()); refInputTree = &refGrid->tree(); if (!mRefSignTree && !mRefIdxTree) { // first time, collect and cache auxiliary data. typename Int16TreeType::Ptr refSignFlagsTreePt(new Int16TreeType(0)); typename Index32TreeType::Ptr refPointIndexTreePt( new Index32TreeType(std::numeric_limits<Index32>::max())); BoolTreeType refIntersectionTree(false); volume_to_mesh_internal::identifySurfaceIntersectingVoxels( refIntersectionTree, *refInputTree, isovalue); volume_to_mesh_internal::computeAuxiliaryData(*refSignFlagsTreePt, *refPointIndexTreePt, refIntersectionTree, *refInputTree, isovalue); mRefSignTree = refSignFlagsTreePt; mRefIdxTree = refPointIndexTreePt; } if (mRefSignTree && mRefIdxTree) { // get cached auxiliary data refSignFlagsTree = static_cast<Int16TreeType*>(mRefSignTree.get()); refPointIndexTree = static_cast<Index32TreeType*>(mRefIdxTree.get()); } if (refSignFlagsTree && refPointIndexTree) { // generate seam line sample points volume_to_mesh_internal::markSeamLineData(signFlagsTree, *refSignFlagsTree); if (mSeamPointListSize == 0) { // count unique points on reference surface std::vector<Int16LeafNodeType*> refSignFlagsLeafNodes; refSignFlagsTree->getNodes(refSignFlagsLeafNodes); std::unique_ptr<Index32[]> leafNodeOffsets( new Index32[refSignFlagsLeafNodes.size()]); tbb::parallel_for(tbb::blocked_range<size_t>(0, refSignFlagsLeafNodes.size()), volume_to_mesh_internal::LeafNodePointCount<Int16LeafNodeType::LOG2DIM>( refSignFlagsLeafNodes, leafNodeOffsets)); { Index32 count = 0; for (size_t n = 0, N = refSignFlagsLeafNodes.size(); n < N; ++n) { const Index32 tmp = leafNodeOffsets[n]; leafNodeOffsets[n] = count; count += tmp; } mSeamPointListSize = size_t(count); } if (mSeamPointListSize != 0) { mQuantizedSeamPoints.reset(new uint32_t[mSeamPointListSize]); memset(mQuantizedSeamPoints.get(), 0, sizeof(uint32_t) * mSeamPointListSize); std::vector<Index32LeafNodeType*> refPointIndexLeafNodes; refPointIndexTree->getNodes(refPointIndexLeafNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, refPointIndexLeafNodes.size()), volume_to_mesh_internal::MapPoints<Index32LeafNodeType>( refPointIndexLeafNodes, refSignFlagsLeafNodes, leafNodeOffsets)); } } if (mSeamPointListSize != 0) { tbb::parallel_for(auxiliaryLeafNodeRange, volume_to_mesh_internal::SeamLineWeights<InputTreeType>( signFlagsLeafNodes, inputTree, *refPointIndexTree, *refSignFlagsTree, mQuantizedSeamPoints.get(), isovalue)); } } } const bool referenceMeshing = refSignFlagsTree && refPointIndexTree && refInputTree; // adapt and count unique points std::unique_ptr<Index32[]> leafNodeOffsets(new Index32[signFlagsLeafNodes.size()]); if (adaptive) { volume_to_mesh_internal::MergeVoxelRegions<InputGridType> mergeOp( inputGrid, pointIndexTree, pointIndexLeafNodes, signFlagsLeafNodes, isovalue, adaptivityThreshold, invertSurfaceOrientation); if (mAdaptivityGrid && mAdaptivityGrid->type() == FloatGridType::gridType()) { const FloatGridType* adaptivityGrid = static_cast<const FloatGridType*>(mAdaptivityGrid.get()); mergeOp.setSpatialAdaptivity(*adaptivityGrid); } if (!adaptivityMask.empty()) { mergeOp.setAdaptivityMask(adaptivityMask); } if (referenceMeshing) { mergeOp.setRefSignFlagsData(*refSignFlagsTree, float(mSecAdaptivity)); } tbb::parallel_for(auxiliaryLeafNodeRange, mergeOp); volume_to_mesh_internal::AdaptiveLeafNodePointCount<Index32LeafNodeType> op(pointIndexLeafNodes, signFlagsLeafNodes, leafNodeOffsets); tbb::parallel_for(auxiliaryLeafNodeRange, op); } else { volume_to_mesh_internal::LeafNodePointCount<Int16LeafNodeType::LOG2DIM> op(signFlagsLeafNodes, leafNodeOffsets); tbb::parallel_for(auxiliaryLeafNodeRange, op); } { Index32 pointCount = 0; for (size_t n = 0, N = signFlagsLeafNodes.size(); n < N; ++n) { const Index32 tmp = leafNodeOffsets[n]; leafNodeOffsets[n] = pointCount; pointCount += tmp; } mPointListSize = size_t(pointCount); mPoints.reset(new openvdb::Vec3s[mPointListSize]); mPointFlags.clear(); } // compute points { volume_to_mesh_internal::ComputePoints<InputTreeType> op(mPoints.get(), inputTree, pointIndexLeafNodes, signFlagsLeafNodes, leafNodeOffsets, transform, mIsovalue); if (referenceMeshing) { mPointFlags.resize(mPointListSize); op.setRefData(*refInputTree, *refPointIndexTree, *refSignFlagsTree, mQuantizedSeamPoints.get(), &mPointFlags.front()); } tbb::parallel_for(auxiliaryLeafNodeRange, op); } // compute polygons mPolygonPoolListSize = signFlagsLeafNodes.size(); mPolygons.reset(new PolygonPool[mPolygonPoolListSize]); if (adaptive) { using PrimBuilder = volume_to_mesh_internal::AdaptivePrimBuilder; volume_to_mesh_internal::ComputePolygons<Int16TreeType, PrimBuilder> op(signFlagsLeafNodes, signFlagsTree, pointIndexTree, mPolygons, invertSurfaceOrientation); if (referenceMeshing) { op.setRefSignTree(refSignFlagsTree); } tbb::parallel_for(auxiliaryLeafNodeRange, op); } else { using PrimBuilder = volume_to_mesh_internal::UniformPrimBuilder; volume_to_mesh_internal::ComputePolygons<Int16TreeType, PrimBuilder> op(signFlagsLeafNodes, signFlagsTree, pointIndexTree, mPolygons, invertSurfaceOrientation); if (referenceMeshing) { op.setRefSignTree(refSignFlagsTree); } tbb::parallel_for(auxiliaryLeafNodeRange, op); } signFlagsTree.clear(); pointIndexTree.clear(); if (adaptive && mRelaxDisorientedTriangles) { volume_to_mesh_internal::relaxDisorientedTriangles(invertSurfaceOrientation, inputTree, transform, mPolygons, mPolygonPoolListSize, mPoints, mPointListSize); } if (referenceMeshing) { volume_to_mesh_internal::subdivideNonplanarSeamLineQuads( mPolygons, mPolygonPoolListSize, mPoints, mPointListSize, mPointFlags); volume_to_mesh_internal::reviseSeamLineFlags(mPolygons, mPolygonPoolListSize, mPointFlags); } } //////////////////////////////////////// //{ /// @cond OPENVDB_VOLUME_TO_MESH_INTERNAL /// @internal This overload is enabled only for grids with a scalar ValueType. template<typename GridType> inline typename std::enable_if<std::is_scalar<typename GridType::ValueType>::value, void>::type doVolumeToMesh( const GridType& grid, std::vector<Vec3s>& points, std::vector<Vec3I>& triangles, std::vector<Vec4I>& quads, double isovalue, double adaptivity, bool relaxDisorientedTriangles) { VolumeToMesh mesher(isovalue, adaptivity, relaxDisorientedTriangles); mesher(grid); // Preallocate the point list points.clear(); points.resize(mesher.pointListSize()); { // Copy points volume_to_mesh_internal::PointListCopy ptnCpy(mesher.pointList(), points); tbb::parallel_for(tbb::blocked_range<size_t>(0, points.size()), ptnCpy); mesher.pointList().reset(nullptr); } PolygonPoolList& polygonPoolList = mesher.polygonPoolList(); { // Preallocate primitive lists size_t numQuads = 0, numTriangles = 0; for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; numTriangles += polygons.numTriangles(); numQuads += polygons.numQuads(); } triangles.clear(); triangles.resize(numTriangles); quads.clear(); quads.resize(numQuads); } // Copy primitives size_t qIdx = 0, tIdx = 0; for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { quads[qIdx++] = polygons.quad(i); } for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { triangles[tIdx++] = polygons.triangle(i); } } } /// @internal This overload is enabled only for grids that do not have a scalar ValueType. template<typename GridType> inline typename std::enable_if<!std::is_scalar<typename GridType::ValueType>::value, void>::type doVolumeToMesh( const GridType&, std::vector<Vec3s>&, std::vector<Vec3I>&, std::vector<Vec4I>&, double, double, bool) { OPENVDB_THROW(TypeError, "volume to mesh conversion is supported only for scalar grids"); } /// @endcond //} template<typename GridType> inline void volumeToMesh( const GridType& grid, std::vector<Vec3s>& points, std::vector<Vec3I>& triangles, std::vector<Vec4I>& quads, double isovalue, double adaptivity, bool relaxDisorientedTriangles) { doVolumeToMesh(grid, points, triangles, quads, isovalue, adaptivity, relaxDisorientedTriangles); } template<typename GridType> inline void volumeToMesh( const GridType& grid, std::vector<Vec3s>& points, std::vector<Vec4I>& quads, double isovalue) { std::vector<Vec3I> triangles; doVolumeToMesh(grid, points, triangles, quads, isovalue, 0.0, true); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_VOLUME_TO_MESH_HAS_BEEN_INCLUDED
179,054
C
33.021471
109
0.585237
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetFilter.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @author Ken Museth /// /// @file tools/LevelSetFilter.h /// /// @brief Performs various types of level set deformations with /// interface tracking. These unrestricted deformations include /// surface smoothing (e.g., Laplacian flow), filtering (e.g., mean /// value) and morphological operations (e.g., morphological opening). /// All these operations can optionally be masked with another grid that /// acts as an alpha-mask. #ifndef OPENVDB_TOOLS_LEVELSETFILTER_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVELSETFILTER_HAS_BEEN_INCLUDED #include "LevelSetTracker.h" #include "Interpolation.h" #include <algorithm> // for std::max() #include <functional> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Filtering (e.g. diffusion) of narrow-band level sets. An /// optional scalar field can be used to produce a (smooth) alpha mask /// for the filtering. /// /// @note This class performs proper interface tracking which allows /// for unrestricted surface deformations template<typename GridT, typename MaskT = typename GridT::template ValueConverter<float>::Type, typename InterruptT = util::NullInterrupter> class LevelSetFilter : public LevelSetTracker<GridT, InterruptT> { public: using BaseType = LevelSetTracker<GridT, InterruptT>; using GridType = GridT; using MaskType = MaskT; using TreeType = typename GridType::TreeType; using ValueType = typename TreeType::ValueType; using AlphaType = typename MaskType::ValueType; static_assert(std::is_floating_point<AlphaType>::value, "LevelSetFilter requires a mask grid with floating-point values"); /// @brief Main constructor from a grid /// @param grid The level set to be filtered. /// @param interrupt Optional interrupter. LevelSetFilter(GridType& grid, InterruptT* interrupt = nullptr) : BaseType(grid, interrupt) , mMinMask(0) , mMaxMask(1) , mInvertMask(false) { } /// @brief Default destructor ~LevelSetFilter() override {} /// @brief Return the minimum value of the mask to be used for the /// derivation of a smooth alpha value. AlphaType minMask() const { return mMinMask; } /// @brief Return the maximum value of the mask to be used for the /// derivation of a smooth alpha value. AlphaType maxMask() const { return mMaxMask; } /// @brief Define the range for the (optional) scalar mask. /// @param min Minimum value of the range. /// @param max Maximum value of the range. /// @details Mask values outside the range maps to alpha values of /// respectfully zero and one, and values inside the range maps /// smoothly to 0->1 (unless of course the mask is inverted). /// @throw ValueError if @a min is not smaller than @a max. void setMaskRange(AlphaType min, AlphaType max) { if (!(min < max)) OPENVDB_THROW(ValueError, "Invalid mask range (expects min < max)"); mMinMask = min; mMaxMask = max; } /// @brief Return true if the mask is inverted, i.e. min->max in the /// original mask maps to 1->0 in the inverted alpha mask. bool isMaskInverted() const { return mInvertMask; } /// @brief Invert the optional mask, i.e. min->max in the original /// mask maps to 1->0 in the inverted alpha mask. void invertMask(bool invert=true) { mInvertMask = invert; } /// @brief One iteration of mean-curvature flow of the level set. /// @param mask Optional alpha mask. void meanCurvature(const MaskType* mask = nullptr) { Filter f(this, mask); f.meanCurvature(); } /// @brief One iteration of Laplacian flow of the level set. /// @param mask Optional alpha mask. void laplacian(const MaskType* mask = nullptr) { Filter f(this, mask); f.laplacian(); } /// @brief One iteration of a fast separable Gaussian filter. /// @param width Width of the Gaussian kernel in voxel units. /// @param mask Optional alpha mask. /// /// @note This is approximated as 4 iterations of a separable mean filter /// which typically leads an approximation that's better than 95%! void gaussian(int width = 1, const MaskType* mask = nullptr) { Filter f(this, mask); f.gaussian(width); } /// @brief Offset the level set by the specified (world) distance. /// @param offset Value of the offset. /// @param mask Optional alpha mask. void offset(ValueType offset, const MaskType* mask = nullptr) { Filter f(this, mask); f.offset(offset); } /// @brief One iteration of median-value flow of the level set. /// @param width Width of the median-value kernel in voxel units. /// @param mask Optional alpha mask. /// /// @warning This filter is not separable and is hence relatively /// slow! void median(int width = 1, const MaskType* mask = nullptr) { Filter f(this, mask); f.median(width); } /// @brief One iteration of mean-value flow of the level set. /// @param width Width of the mean-value kernel in voxel units. /// @param mask Optional alpha mask. /// /// @note This filter is separable so it's fast! void mean(int width = 1, const MaskType* mask = nullptr) { Filter f(this, mask); f.mean(width); } private: // disallow copy construction and copy by assignment! LevelSetFilter(const LevelSetFilter&);// not implemented LevelSetFilter& operator=(const LevelSetFilter&);// not implemented // Private struct that implements all the filtering. struct Filter { using LeafT = typename TreeType::LeafNodeType; using VoxelIterT = typename LeafT::ValueOnIter; using VoxelCIterT = typename LeafT::ValueOnCIter; using BufferT = typename tree::LeafManager<TreeType>::BufferType; using LeafRange = typename tree::LeafManager<TreeType>::LeafRange; using LeafIterT = typename LeafRange::Iterator; using AlphaMaskT = tools::AlphaMask<GridT, MaskT>; Filter(LevelSetFilter* parent, const MaskType* mask) : mParent(parent), mMask(mask) {} Filter(const Filter&) = default; virtual ~Filter() {} void box(int width); void median(int width); void mean(int width); void gaussian(int width); void laplacian(); void meanCurvature(); void offset(ValueType value); void operator()(const LeafRange& r) const { if (mTask) mTask(const_cast<Filter*>(this), r); else OPENVDB_THROW(ValueError, "task is undefined - don\'t call this method directly"); } void cook(bool swap) { const int n = mParent->getGrainSize(); if (n>0) { tbb::parallel_for(mParent->leafs().leafRange(n), *this); } else { (*this)(mParent->leafs().leafRange()); } if (swap) mParent->leafs().swapLeafBuffer(1, n==0); } template <size_t Axis> struct Avg { Avg(const GridT& grid, Int32 w) : acc(grid.tree()), width(w), frac(1/ValueType(2*w+1)) {} inline ValueType operator()(Coord xyz) { ValueType sum = zeroVal<ValueType>(); Int32& i = xyz[Axis], j = i + width; for (i -= width; i <= j; ++i) sum += acc.getValue(xyz); return sum*frac; } typename GridT::ConstAccessor acc; const Int32 width; const ValueType frac; }; template<typename AvgT> void boxImpl(const LeafRange& r, Int32 w); void boxXImpl(const LeafRange& r, Int32 w) { this->boxImpl<Avg<0> >(r,w); } void boxZImpl(const LeafRange& r, Int32 w) { this->boxImpl<Avg<1> >(r,w); } void boxYImpl(const LeafRange& r, Int32 w) { this->boxImpl<Avg<2> >(r,w); } void medianImpl(const LeafRange&, int); void meanCurvatureImpl(const LeafRange&); void laplacianImpl(const LeafRange&); void offsetImpl(const LeafRange&, ValueType); LevelSetFilter* mParent; const MaskType* mMask; typename std::function<void (Filter*, const LeafRange&)> mTask; }; // end of private Filter struct AlphaType mMinMask, mMaxMask; bool mInvertMask; }; // end of LevelSetFilter class //////////////////////////////////////// template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::median(int width) { mParent->startInterrupter("Median-value flow of level set"); mParent->leafs().rebuildAuxBuffers(1, mParent->getGrainSize()==0); mTask = std::bind(&Filter::medianImpl, std::placeholders::_1, std::placeholders::_2, std::max(1, width)); this->cook(true); mParent->track(); mParent->endInterrupter(); } template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::mean(int width) { mParent->startInterrupter("Mean-value flow of level set"); this->box(width); mParent->endInterrupter(); } template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::gaussian(int width) { mParent->startInterrupter("Gaussian flow of level set"); for (int n=0; n<4; ++n) this->box(width); mParent->endInterrupter(); } template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::box(int width) { mParent->leafs().rebuildAuxBuffers(1, mParent->getGrainSize()==0); width = std::max(1, width); mTask = std::bind(&Filter::boxXImpl, std::placeholders::_1, std::placeholders::_2, width); this->cook(true); mTask = std::bind(&Filter::boxYImpl, std::placeholders::_1, std::placeholders::_2, width); this->cook(true); mTask = std::bind(&Filter::boxZImpl, std::placeholders::_1, std::placeholders::_2, width); this->cook(true); mParent->track(); } template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::meanCurvature() { mParent->startInterrupter("Mean-curvature flow of level set"); mParent->leafs().rebuildAuxBuffers(1, mParent->getGrainSize()==0); mTask = std::bind(&Filter::meanCurvatureImpl, std::placeholders::_1, std::placeholders::_2); this->cook(true); mParent->track(); mParent->endInterrupter(); } template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::laplacian() { mParent->startInterrupter("Laplacian flow of level set"); mParent->leafs().rebuildAuxBuffers(1, mParent->getGrainSize()==0); mTask = std::bind(&Filter::laplacianImpl, std::placeholders::_1, std::placeholders::_2); this->cook(true); mParent->track(); mParent->endInterrupter(); } template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::offset(ValueType value) { mParent->startInterrupter("Offsetting level set"); mParent->leafs().removeAuxBuffers();// no auxiliary buffers required const ValueType CFL = ValueType(0.5) * mParent->voxelSize(), offset = openvdb::math::Abs(value); ValueType dist = 0.0; while (offset-dist > ValueType(0.001)*CFL && mParent->checkInterrupter()) { const ValueType delta = openvdb::math::Min(offset-dist, CFL); dist += delta; mTask = std::bind(&Filter::offsetImpl, std::placeholders::_1, std::placeholders::_2, copysign(delta, value)); this->cook(false); mParent->track(); } mParent->endInterrupter(); } ///////////////////////// PRIVATE METHODS ////////////////////// /// Performs parabolic mean-curvature diffusion template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::meanCurvatureImpl(const LeafRange& range) { mParent->checkInterrupter(); //const float CFL = 0.9f, dt = CFL * mDx * mDx / 6.0f; const ValueType dx = mParent->voxelSize(), dt = math::Pow2(dx) / ValueType(3.0); math::CurvatureStencil<GridType> stencil(mParent->grid(), dx); if (mMask) { typename AlphaMaskT::FloatType a, b; AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(), mParent->maxMask(), mParent->isMaskInverted()); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { ValueType* buffer = leafIter.buffer(1).data(); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { if (alpha(iter.getCoord(), a, b)) { stencil.moveTo(iter); const ValueType phi0 = *iter, phi1 = phi0 + dt*stencil.meanCurvatureNormGrad(); buffer[iter.pos()] = b * phi0 + a * phi1; } } } } else { for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { ValueType* buffer = leafIter.buffer(1).data(); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { stencil.moveTo(iter); buffer[iter.pos()] = *iter + dt*stencil.meanCurvatureNormGrad(); } } } } /// Performs Laplacian diffusion. Note if the grids contains a true /// signed distance field (e.g. a solution to the Eikonal equation) /// Laplacian diffusions (e.g. geometric heat equation) is actually /// identical to mean curvature diffusion, yet less computationally /// expensive! In other words if you're performing renormalization /// anyway (e.g. rebuilding the narrow-band) you should consider /// performing Laplacian diffusion over mean curvature flow! template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::laplacianImpl(const LeafRange& range) { mParent->checkInterrupter(); //const float CFL = 0.9f, half_dt = CFL * mDx * mDx / 12.0f; const ValueType dx = mParent->voxelSize(), dt = math::Pow2(dx) / ValueType(6.0); math::GradStencil<GridType> stencil(mParent->grid(), dx); if (mMask) { typename AlphaMaskT::FloatType a, b; AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(), mParent->maxMask(), mParent->isMaskInverted()); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { ValueType* buffer = leafIter.buffer(1).data(); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { if (alpha(iter.getCoord(), a, b)) { stencil.moveTo(iter); const ValueType phi0 = *iter, phi1 = phi0 + dt*stencil.laplacian(); buffer[iter.pos()] = b * phi0 + a * phi1; } } } } else { for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { ValueType* buffer = leafIter.buffer(1).data(); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { stencil.moveTo(iter); buffer[iter.pos()] = *iter + dt*stencil.laplacian(); } } } } /// Offsets the values by a constant template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::offsetImpl( const LeafRange& range, ValueType offset) { mParent->checkInterrupter(); if (mMask) { typename AlphaMaskT::FloatType a, b; AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(), mParent->maxMask(), mParent->isMaskInverted()); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { for (VoxelIterT iter = leafIter->beginValueOn(); iter; ++iter) { if (alpha(iter.getCoord(), a, b)) iter.setValue(*iter + a*offset); } } } else { for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { for (VoxelIterT iter = leafIter->beginValueOn(); iter; ++iter) { iter.setValue(*iter + offset); } } } } /// Performs simple but slow median-value diffusion template<typename GridT, typename MaskT, typename InterruptT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::medianImpl(const LeafRange& range, int width) { mParent->checkInterrupter(); typename math::DenseStencil<GridType> stencil(mParent->grid(), width);//creates local cache! if (mMask) { typename AlphaMaskT::FloatType a, b; AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(), mParent->maxMask(), mParent->isMaskInverted()); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { ValueType* buffer = leafIter.buffer(1).data(); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { if (alpha(iter.getCoord(), a, b)) { stencil.moveTo(iter); buffer[iter.pos()] = b * (*iter) + a * stencil.median(); } } } } else { for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { ValueType* buffer = leafIter.buffer(1).data(); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { stencil.moveTo(iter); buffer[iter.pos()] = stencil.median(); } } } } /// One dimensional convolution of a separable box filter template<typename GridT, typename MaskT, typename InterruptT> template <typename AvgT> inline void LevelSetFilter<GridT, MaskT, InterruptT>::Filter::boxImpl(const LeafRange& range, Int32 w) { mParent->checkInterrupter(); AvgT avg(mParent->grid(), w); if (mMask) { typename AlphaMaskT::FloatType a, b; AlphaMaskT alpha(mParent->grid(), *mMask, mParent->minMask(), mParent->maxMask(), mParent->isMaskInverted()); for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { ValueType* buffer = leafIter.buffer(1).data(); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { const Coord xyz = iter.getCoord(); if (alpha(xyz, a, b)) buffer[iter.pos()] = b * (*iter)+ a * avg(xyz); } } } else { for (LeafIterT leafIter=range.begin(); leafIter; ++leafIter) { ValueType* buffer = leafIter.buffer(1).data(); for (VoxelCIterT iter = leafIter->cbeginValueOn(); iter; ++iter) { buffer[iter.pos()] = avg(iter.getCoord()); } } } } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVELSETFILTER_HAS_BEEN_INCLUDED
19,133
C
36.444227
100
0.629122
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Composite.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file Composite.h /// /// @brief Functions to efficiently perform various compositing operations on grids /// /// @authors Peter Cucka, Mihai Alden, Ken Museth #ifndef OPENVDB_TOOLS_COMPOSITE_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_COMPOSITE_HAS_BEEN_INCLUDED #include <openvdb/Platform.h> #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/Grid.h> #include <openvdb/math/Math.h> // for isExactlyEqual() #include "Merge.h" #include "ValueTransformer.h" // for transformValues() #include "Prune.h"// for prune #include "SignedFloodFill.h" // for signedFloodFill() #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/task_group.h> #include <tbb/task_scheduler_init.h> #include <type_traits> #include <functional> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Given two level set grids, replace the A grid with the union of A and B. /// @throw ValueError if the background value of either grid is not greater than zero. /// @note This operation always leaves the B grid empty. template<typename GridOrTreeT> inline void csgUnion(GridOrTreeT& a, GridOrTreeT& b, bool prune = true); /// @brief Given two level set grids, replace the A grid with the intersection of A and B. /// @throw ValueError if the background value of either grid is not greater than zero. /// @note This operation always leaves the B grid empty. template<typename GridOrTreeT> inline void csgIntersection(GridOrTreeT& a, GridOrTreeT& b, bool prune = true); /// @brief Given two level set grids, replace the A grid with the difference A / B. /// @throw ValueError if the background value of either grid is not greater than zero. /// @note This operation always leaves the B grid empty. template<typename GridOrTreeT> inline void csgDifference(GridOrTreeT& a, GridOrTreeT& b, bool prune = true); /// @brief Threaded CSG union operation that produces a new grid or tree from /// immutable inputs. /// @return The CSG union of the @a and @b level set inputs. template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgUnionCopy(const GridOrTreeT& a, const GridOrTreeT& b); /// @brief Threaded CSG intersection operation that produces a new grid or tree from /// immutable inputs. /// @return The CSG intersection of the @a and @b level set inputs. template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgIntersectionCopy(const GridOrTreeT& a, const GridOrTreeT& b); /// @brief Threaded CSG difference operation that produces a new grid or tree from /// immutable inputs. /// @return The CSG difference of the @a and @b level set inputs. template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgDifferenceCopy(const GridOrTreeT& a, const GridOrTreeT& b); /// @brief Given grids A and B, compute max(a, b) per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compMax(GridOrTreeT& a, GridOrTreeT& b); /// @brief Given grids A and B, compute min(a, b) per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compMin(GridOrTreeT& a, GridOrTreeT& b); /// @brief Given grids A and B, compute a + b per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compSum(GridOrTreeT& a, GridOrTreeT& b); /// @brief Given grids A and B, compute a * b per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compMul(GridOrTreeT& a, GridOrTreeT& b); /// @brief Given grids A and B, compute a / b per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compDiv(GridOrTreeT& a, GridOrTreeT& b); /// Copy the active voxels of B into A. template<typename GridOrTreeT> inline void compReplace(GridOrTreeT& a, const GridOrTreeT& b); //////////////////////////////////////// namespace composite { // composite::min() and composite::max() for non-vector types compare with operator<(). template<typename T> inline const typename std::enable_if<!VecTraits<T>::IsVec, T>::type& // = T if T is not a vector type min(const T& a, const T& b) { return std::min(a, b); } template<typename T> inline const typename std::enable_if<!VecTraits<T>::IsVec, T>::type& max(const T& a, const T& b) { return std::max(a, b); } // composite::min() and composite::max() for OpenVDB vector types compare by magnitude. template<typename T> inline const typename std::enable_if<VecTraits<T>::IsVec, T>::type& // = T if T is a vector type min(const T& a, const T& b) { const typename T::ValueType aMag = a.lengthSqr(), bMag = b.lengthSqr(); return (aMag < bMag ? a : (bMag < aMag ? b : std::min(a, b))); } template<typename T> inline const typename std::enable_if<VecTraits<T>::IsVec, T>::type& max(const T& a, const T& b) { const typename T::ValueType aMag = a.lengthSqr(), bMag = b.lengthSqr(); return (aMag < bMag ? b : (bMag < aMag ? a : std::max(a, b))); } template<typename T> inline typename std::enable_if<!std::is_integral<T>::value, T>::type // = T if T is not an integer type divide(const T& a, const T& b) { return a / b; } template<typename T> inline typename std::enable_if<std::is_integral<T>::value, T>::type // = T if T is an integer type divide(const T& a, const T& b) { const T zero(0); if (b != zero) return a / b; if (a == zero) return 0; return (a > 0 ? std::numeric_limits<T>::max() : -std::numeric_limits<T>::max()); } // If b is true, return a / 1 = a. // If b is false and a is true, return 1 / 0 = inf = MAX_BOOL = 1 = a. // If b is false and a is false, return 0 / 0 = NaN = 0 = a. inline bool divide(bool a, bool /*b*/) { return a; } enum CSGOperation { CSG_UNION, CSG_INTERSECTION, CSG_DIFFERENCE }; template<typename TreeType, CSGOperation Operation> struct BuildPrimarySegment { using ValueType = typename TreeType::ValueType; using TreePtrType = typename TreeType::Ptr; using LeafNodeType = typename TreeType::LeafNodeType; using NodeMaskType = typename LeafNodeType::NodeMaskType; using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; BuildPrimarySegment(const TreeType& lhs, const TreeType& rhs) : mSegment(new TreeType(lhs.background())) , mLhsTree(&lhs) , mRhsTree(&rhs) { } void operator()() const { std::vector<const LeafNodeType*> leafNodes; { std::vector<const InternalNodeType*> internalNodes; mLhsTree->getNodes(internalNodes); ProcessInternalNodes op(internalNodes, *mRhsTree, *mSegment, leafNodes); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, internalNodes.size()), op); } ProcessLeafNodes op(leafNodes, *mRhsTree, *mSegment); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, leafNodes.size()), op); } TreePtrType& segment() { return mSegment; } private: struct ProcessInternalNodes { ProcessInternalNodes(std::vector<const InternalNodeType*>& lhsNodes, const TreeType& rhsTree, TreeType& outputTree, std::vector<const LeafNodeType*>& outputLeafNodes) : mLhsNodes(lhsNodes.empty() ? nullptr : &lhsNodes.front()) , mRhsTree(&rhsTree) , mLocalTree(mRhsTree->background()) , mOutputTree(&outputTree) , mLocalLeafNodes() , mOutputLeafNodes(&outputLeafNodes) { } ProcessInternalNodes(ProcessInternalNodes& other, tbb::split) : mLhsNodes(other.mLhsNodes) , mRhsTree(other.mRhsTree) , mLocalTree(mRhsTree->background()) , mOutputTree(&mLocalTree) , mLocalLeafNodes() , mOutputLeafNodes(&mLocalLeafNodes) { } void join(ProcessInternalNodes& other) { mOutputTree->merge(*other.mOutputTree); mOutputLeafNodes->insert(mOutputLeafNodes->end(), other.mOutputLeafNodes->begin(), other.mOutputLeafNodes->end()); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const TreeType> rhsAcc(*mRhsTree); tree::ValueAccessor<TreeType> outputAcc(*mOutputTree); std::vector<const LeafNodeType*> tmpLeafNodes; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const InternalNodeType& lhsNode = *mLhsNodes[n]; const Coord& ijk = lhsNode.origin(); const InternalNodeType * rhsNode = rhsAcc.template probeConstNode<InternalNodeType>(ijk); if (rhsNode) { lhsNode.getNodes(*mOutputLeafNodes); } else { if (Operation == CSG_INTERSECTION) { if (rhsAcc.getValue(ijk) < ValueType(0.0)) { tmpLeafNodes.clear(); lhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { outputAcc.addLeaf(new LeafNodeType(*tmpLeafNodes[i])); } } } else { // Union & Difference if (!(rhsAcc.getValue(ijk) < ValueType(0.0))) { tmpLeafNodes.clear(); lhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { outputAcc.addLeaf(new LeafNodeType(*tmpLeafNodes[i])); } } } } } // end range loop } InternalNodeType const * const * const mLhsNodes; TreeType const * const mRhsTree; TreeType mLocalTree; TreeType * const mOutputTree; std::vector<const LeafNodeType*> mLocalLeafNodes; std::vector<const LeafNodeType*> * const mOutputLeafNodes; }; // struct ProcessInternalNodes struct ProcessLeafNodes { ProcessLeafNodes(std::vector<const LeafNodeType*>& lhsNodes, const TreeType& rhsTree, TreeType& output) : mLhsNodes(lhsNodes.empty() ? nullptr : &lhsNodes.front()) , mRhsTree(&rhsTree) , mLocalTree(mRhsTree->background()) , mOutputTree(&output) { } ProcessLeafNodes(ProcessLeafNodes& other, tbb::split) : mLhsNodes(other.mLhsNodes) , mRhsTree(other.mRhsTree) , mLocalTree(mRhsTree->background()) , mOutputTree(&mLocalTree) { } void join(ProcessLeafNodes& rhs) { mOutputTree->merge(*rhs.mOutputTree); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const TreeType> rhsAcc(*mRhsTree); tree::ValueAccessor<TreeType> outputAcc(*mOutputTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const LeafNodeType& lhsNode = *mLhsNodes[n]; const Coord& ijk = lhsNode.origin(); const LeafNodeType* rhsNodePt = rhsAcc.probeConstLeaf(ijk); if (rhsNodePt) { // combine overlapping nodes LeafNodeType* outputNode = outputAcc.touchLeaf(ijk); ValueType * outputData = outputNode->buffer().data(); NodeMaskType& outputMask = outputNode->getValueMask(); const ValueType * lhsData = lhsNode.buffer().data(); const NodeMaskType& lhsMask = lhsNode.getValueMask(); const ValueType * rhsData = rhsNodePt->buffer().data(); const NodeMaskType& rhsMask = rhsNodePt->getValueMask(); if (Operation == CSG_INTERSECTION) { for (Index pos = 0; pos < LeafNodeType::SIZE; ++pos) { const bool fromRhs = lhsData[pos] < rhsData[pos]; outputData[pos] = fromRhs ? rhsData[pos] : lhsData[pos]; outputMask.set(pos, fromRhs ? rhsMask.isOn(pos) : lhsMask.isOn(pos)); } } else if (Operation == CSG_DIFFERENCE){ for (Index pos = 0; pos < LeafNodeType::SIZE; ++pos) { const ValueType rhsVal = math::negative(rhsData[pos]); const bool fromRhs = lhsData[pos] < rhsVal; outputData[pos] = fromRhs ? rhsVal : lhsData[pos]; outputMask.set(pos, fromRhs ? rhsMask.isOn(pos) : lhsMask.isOn(pos)); } } else { // Union for (Index pos = 0; pos < LeafNodeType::SIZE; ++pos) { const bool fromRhs = lhsData[pos] > rhsData[pos]; outputData[pos] = fromRhs ? rhsData[pos] : lhsData[pos]; outputMask.set(pos, fromRhs ? rhsMask.isOn(pos) : lhsMask.isOn(pos)); } } } else { if (Operation == CSG_INTERSECTION) { if (rhsAcc.getValue(ijk) < ValueType(0.0)) { outputAcc.addLeaf(new LeafNodeType(lhsNode)); } } else { // Union & Difference if (!(rhsAcc.getValue(ijk) < ValueType(0.0))) { outputAcc.addLeaf(new LeafNodeType(lhsNode)); } } } } // end range loop } LeafNodeType const * const * const mLhsNodes; TreeType const * const mRhsTree; TreeType mLocalTree; TreeType * const mOutputTree; }; // struct ProcessLeafNodes TreePtrType mSegment; TreeType const * const mLhsTree; TreeType const * const mRhsTree; }; // struct BuildPrimarySegment template<typename TreeType, CSGOperation Operation> struct BuildSecondarySegment { using ValueType = typename TreeType::ValueType; using TreePtrType = typename TreeType::Ptr; using LeafNodeType = typename TreeType::LeafNodeType; using NodeMaskType = typename LeafNodeType::NodeMaskType; using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; BuildSecondarySegment(const TreeType& lhs, const TreeType& rhs) : mSegment(new TreeType(lhs.background())) , mLhsTree(&lhs) , mRhsTree(&rhs) { } void operator()() const { std::vector<const LeafNodeType*> leafNodes; { std::vector<const InternalNodeType*> internalNodes; mRhsTree->getNodes(internalNodes); ProcessInternalNodes op(internalNodes, *mLhsTree, *mSegment, leafNodes); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, internalNodes.size()), op); } ProcessLeafNodes op(leafNodes, *mLhsTree, *mSegment); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, leafNodes.size()), op); } TreePtrType& segment() { return mSegment; } private: struct ProcessInternalNodes { ProcessInternalNodes(std::vector<const InternalNodeType*>& rhsNodes, const TreeType& lhsTree, TreeType& outputTree, std::vector<const LeafNodeType*>& outputLeafNodes) : mRhsNodes(rhsNodes.empty() ? nullptr : &rhsNodes.front()) , mLhsTree(&lhsTree) , mLocalTree(mLhsTree->background()) , mOutputTree(&outputTree) , mLocalLeafNodes() , mOutputLeafNodes(&outputLeafNodes) { } ProcessInternalNodes(ProcessInternalNodes& other, tbb::split) : mRhsNodes(other.mRhsNodes) , mLhsTree(other.mLhsTree) , mLocalTree(mLhsTree->background()) , mOutputTree(&mLocalTree) , mLocalLeafNodes() , mOutputLeafNodes(&mLocalLeafNodes) { } void join(ProcessInternalNodes& other) { mOutputTree->merge(*other.mOutputTree); mOutputLeafNodes->insert(mOutputLeafNodes->end(), other.mOutputLeafNodes->begin(), other.mOutputLeafNodes->end()); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const TreeType> lhsAcc(*mLhsTree); tree::ValueAccessor<TreeType> outputAcc(*mOutputTree); std::vector<const LeafNodeType*> tmpLeafNodes; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const InternalNodeType& rhsNode = *mRhsNodes[n]; const Coord& ijk = rhsNode.origin(); const InternalNodeType * lhsNode = lhsAcc.template probeConstNode<InternalNodeType>(ijk); if (lhsNode) { rhsNode.getNodes(*mOutputLeafNodes); } else { if (Operation == CSG_INTERSECTION) { if (lhsAcc.getValue(ijk) < ValueType(0.0)) { tmpLeafNodes.clear(); rhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { outputAcc.addLeaf(new LeafNodeType(*tmpLeafNodes[i])); } } } else if (Operation == CSG_DIFFERENCE) { if (lhsAcc.getValue(ijk) < ValueType(0.0)) { tmpLeafNodes.clear(); rhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { LeafNodeType* outputNode = new LeafNodeType(*tmpLeafNodes[i]); outputNode->negate(); outputAcc.addLeaf(outputNode); } } } else { // Union if (!(lhsAcc.getValue(ijk) < ValueType(0.0))) { tmpLeafNodes.clear(); rhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { outputAcc.addLeaf(new LeafNodeType(*tmpLeafNodes[i])); } } } } } // end range loop } InternalNodeType const * const * const mRhsNodes; TreeType const * const mLhsTree; TreeType mLocalTree; TreeType * const mOutputTree; std::vector<const LeafNodeType*> mLocalLeafNodes; std::vector<const LeafNodeType*> * const mOutputLeafNodes; }; // struct ProcessInternalNodes struct ProcessLeafNodes { ProcessLeafNodes(std::vector<const LeafNodeType*>& rhsNodes, const TreeType& lhsTree, TreeType& output) : mRhsNodes(rhsNodes.empty() ? nullptr : &rhsNodes.front()) , mLhsTree(&lhsTree) , mLocalTree(mLhsTree->background()) , mOutputTree(&output) { } ProcessLeafNodes(ProcessLeafNodes& rhs, tbb::split) : mRhsNodes(rhs.mRhsNodes) , mLhsTree(rhs.mLhsTree) , mLocalTree(mLhsTree->background()) , mOutputTree(&mLocalTree) { } void join(ProcessLeafNodes& rhs) { mOutputTree->merge(*rhs.mOutputTree); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const TreeType> lhsAcc(*mLhsTree); tree::ValueAccessor<TreeType> outputAcc(*mOutputTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const LeafNodeType& rhsNode = *mRhsNodes[n]; const Coord& ijk = rhsNode.origin(); const LeafNodeType* lhsNode = lhsAcc.probeConstLeaf(ijk); if (!lhsNode) { if (Operation == CSG_INTERSECTION) { if (lhsAcc.getValue(ijk) < ValueType(0.0)) { outputAcc.addLeaf(new LeafNodeType(rhsNode)); } } else if (Operation == CSG_DIFFERENCE) { if (lhsAcc.getValue(ijk) < ValueType(0.0)) { LeafNodeType* outputNode = new LeafNodeType(rhsNode); outputNode->negate(); outputAcc.addLeaf(outputNode); } } else { // Union if (!(lhsAcc.getValue(ijk) < ValueType(0.0))) { outputAcc.addLeaf(new LeafNodeType(rhsNode)); } } } } // end range loop } LeafNodeType const * const * const mRhsNodes; TreeType const * const mLhsTree; TreeType mLocalTree; TreeType * const mOutputTree; }; // struct ProcessLeafNodes TreePtrType mSegment; TreeType const * const mLhsTree; TreeType const * const mRhsTree; }; // struct BuildSecondarySegment template<CSGOperation Operation, typename TreeType> inline typename TreeType::Ptr doCSGCopy(const TreeType& lhs, const TreeType& rhs) { BuildPrimarySegment<TreeType, Operation> primary(lhs, rhs); BuildSecondarySegment<TreeType, Operation> secondary(lhs, rhs); // Exploiting nested parallelism tbb::task_group tasks; tasks.run(primary); tasks.run(secondary); tasks.wait(); primary.segment()->merge(*secondary.segment()); // The leafnode (level = 0) sign is set in the segment construction. tools::signedFloodFill(*primary.segment(), /*threaded=*/true, /*grainSize=*/1, /*minLevel=*/1); return primary.segment(); } //////////////////////////////////////// template<typename TreeType> struct GridOrTreeConstructor { using TreeTypePtr = typename TreeType::Ptr; static TreeTypePtr construct(const TreeType&, TreeTypePtr& tree) { return tree; } }; template<typename TreeType> struct GridOrTreeConstructor<Grid<TreeType> > { using GridType = Grid<TreeType>; using GridTypePtr = typename Grid<TreeType>::Ptr; using TreeTypePtr = typename TreeType::Ptr; static GridTypePtr construct(const GridType& grid, TreeTypePtr& tree) { GridTypePtr maskGrid(GridType::create(tree)); maskGrid->setTransform(grid.transform().copy()); maskGrid->insertMeta(grid); return maskGrid; } }; //////////////////////////////////////// /// @cond COMPOSITE_INTERNAL /// List of pairs of leaf node pointers template <typename LeafT> using LeafPairList = std::vector<std::pair<LeafT*, LeafT*>>; /// @endcond /// @cond COMPOSITE_INTERNAL /// Transfers leaf nodes from a source tree into a /// desitnation tree, unless it already exists in the destination tree /// in which case pointers to both leaf nodes are added to a list for /// subsequent compositing operations. template <typename TreeT> inline void transferLeafNodes(TreeT &srcTree, TreeT &dstTree, LeafPairList<typename TreeT::LeafNodeType> &overlapping) { using LeafT = typename TreeT::LeafNodeType; tree::ValueAccessor<TreeT> acc(dstTree);//destination std::vector<LeafT*> srcLeafNodes; srcLeafNodes.reserve(srcTree.leafCount()); srcTree.stealNodes(srcLeafNodes); srcTree.clear(); for (LeafT *srcLeaf : srcLeafNodes) { LeafT *dstLeaf = acc.probeLeaf(srcLeaf->origin()); if (dstLeaf) { overlapping.emplace_back(dstLeaf, srcLeaf);//dst, src } else { acc.addLeaf(srcLeaf); } } } /// @endcond /// @cond COMPOSITE_INTERNAL /// Template specailization of compActiveLeafVoxels template <typename TreeT, typename OpT> inline typename std::enable_if< !std::is_same<typename TreeT::ValueType, bool>::value && !std::is_same<typename TreeT::BuildType, ValueMask>::value && std::is_same<typename TreeT::LeafNodeType::Buffer::ValueType, typename TreeT::LeafNodeType::Buffer::StorageType>::value>::type doCompActiveLeafVoxels(TreeT &srcTree, TreeT &dstTree, OpT op) { using LeafT = typename TreeT::LeafNodeType; LeafPairList<LeafT> overlapping;//dst, src transferLeafNodes(srcTree, dstTree, overlapping); using RangeT = tbb::blocked_range<size_t>; tbb::parallel_for(RangeT(0, overlapping.size()), [op, &overlapping](const RangeT& r) { for (auto i = r.begin(); i != r.end(); ++i) { LeafT *dstLeaf = overlapping[i].first, *srcLeaf = overlapping[i].second; dstLeaf->getValueMask() |= srcLeaf->getValueMask(); auto *ptr = dstLeaf->buffer().data(); for (auto v = srcLeaf->cbeginValueOn(); v; ++v) op(ptr[v.pos()], *v); delete srcLeaf; } }); } /// @endcond /// @cond COMPOSITE_INTERNAL /// Template specailization of compActiveLeafVoxels template <typename TreeT, typename OpT> inline typename std::enable_if< std::is_same<typename TreeT::BuildType, ValueMask>::value && std::is_same<typename TreeT::ValueType, bool>::value>::type doCompActiveLeafVoxels(TreeT &srcTree, TreeT &dstTree, OpT) { using LeafT = typename TreeT::LeafNodeType; LeafPairList<LeafT> overlapping;//dst, src transferLeafNodes(srcTree, dstTree, overlapping); using RangeT = tbb::blocked_range<size_t>; tbb::parallel_for(RangeT(0, overlapping.size()), [&overlapping](const RangeT& r) { for (auto i = r.begin(); i != r.end(); ++i) { overlapping[i].first->getValueMask() |= overlapping[i].second->getValueMask(); delete overlapping[i].second; } }); } /// @cond COMPOSITE_INTERNAL /// Template specailization of compActiveLeafVoxels template <typename TreeT, typename OpT> inline typename std::enable_if< std::is_same<typename TreeT::ValueType, bool>::value && !std::is_same<typename TreeT::BuildType, ValueMask>::value>::type doCompActiveLeafVoxels(TreeT &srcTree, TreeT &dstTree, OpT op) { using LeafT = typename TreeT::LeafNodeType; LeafPairList<LeafT> overlapping;//dst, src transferLeafNodes(srcTree, dstTree, overlapping); using RangeT = tbb::blocked_range<size_t>; using WordT = typename LeafT::Buffer::WordType; tbb::parallel_for(RangeT(0, overlapping.size()), [op, &overlapping](const RangeT& r) { for (auto i = r.begin(); i != r.end(); ++i) { LeafT *dstLeaf = overlapping[i].first, *srcLeaf = overlapping[i].second; WordT *w1 = dstLeaf->buffer().data(); const WordT *w2 = srcLeaf->buffer().data(); const WordT *w3 = &(srcLeaf->getValueMask().template getWord<WordT>(0)); for (Index32 n = LeafT::Buffer::WORD_COUNT; n--; ++w1) { WordT tmp = *w1, state = *w3++; op (tmp, *w2++); *w1 = (state & tmp) | (~state & *w1);//inactive values are unchanged } dstLeaf->getValueMask() |= srcLeaf->getValueMask(); delete srcLeaf; } }); } /// @endcond /// @cond COMPOSITE_INTERNAL /// Default functor for compActiveLeafVoxels template <typename TreeT> struct CopyOp { using ValueT = typename TreeT::ValueType; CopyOp() = default; void operator()(ValueT& dst, const ValueT& src) const { dst = src; } }; /// @endcond template <typename TreeT> inline void validateLevelSet(const TreeT& tree, const std::string& gridName = std::string("")) { using ValueT = typename TreeT::ValueType; const ValueT zero = zeroVal<ValueT>(); if (!(tree.background() > zero)) { std::stringstream ss; ss << "expected grid "; if (!gridName.empty()) ss << gridName << " "; ss << "outside value > 0, got " << tree.background(); OPENVDB_THROW(ValueError, ss.str()); } if (!(-tree.background() < zero)) { std::stringstream ss; ss << "expected grid "; if (!gridName.empty()) ss << gridName << " "; ss << "inside value < 0, got " << -tree.background(); OPENVDB_THROW(ValueError, ss.str()); } } } // namespace composite template<typename GridOrTreeT> inline void compMax(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; using ValueT = typename TreeT::ValueType; struct Local { static inline void op(CombineArgs<ValueT>& args) { args.setResult(composite::max(args.a(), args.b())); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } template<typename GridOrTreeT> inline void compMin(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; using ValueT = typename TreeT::ValueType; struct Local { static inline void op(CombineArgs<ValueT>& args) { args.setResult(composite::min(args.a(), args.b())); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } template<typename GridOrTreeT> inline void compSum(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; struct Local { static inline void op(CombineArgs<typename TreeT::ValueType>& args) { args.setResult(args.a() + args.b()); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } template<typename GridOrTreeT> inline void compMul(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; struct Local { static inline void op(CombineArgs<typename TreeT::ValueType>& args) { args.setResult(args.a() * args.b()); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } template<typename GridOrTreeT> inline void compDiv(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; struct Local { static inline void op(CombineArgs<typename TreeT::ValueType>& args) { args.setResult(composite::divide(args.a(), args.b())); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } //////////////////////////////////////// template<typename TreeT> struct CompReplaceOp { TreeT* const aTree; CompReplaceOp(TreeT& _aTree): aTree(&_aTree) {} /// @note fill operation is not thread safe void operator()(const typename TreeT::ValueOnCIter& iter) const { CoordBBox bbox; iter.getBoundingBox(bbox); aTree->fill(bbox, *iter); } void operator()(const typename TreeT::LeafCIter& leafIter) const { tree::ValueAccessor<TreeT> acc(*aTree); for (typename TreeT::LeafCIter::LeafNodeT::ValueOnCIter iter = leafIter->cbeginValueOn(); iter; ++iter) { acc.setValue(iter.getCoord(), *iter); } } }; template<typename GridOrTreeT> inline void compReplace(GridOrTreeT& aTree, const GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; using ValueOnCIterT = typename TreeT::ValueOnCIter; // Copy active states (but not values) from B to A. Adapter::tree(aTree).topologyUnion(Adapter::tree(bTree)); CompReplaceOp<TreeT> op(Adapter::tree(aTree)); // Copy all active tile values from B to A. ValueOnCIterT iter = bTree.cbeginValueOn(); iter.setMaxDepth(iter.getLeafDepth() - 1); // don't descend into leaf nodes foreach(iter, op, /*threaded=*/false); // Copy all active voxel values from B to A. foreach(Adapter::tree(bTree).cbeginLeaf(), op); } //////////////////////////////////////// template<typename GridOrTreeT> inline void csgUnion(GridOrTreeT& a, GridOrTreeT& b, bool prune) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; TreeT &aTree = Adapter::tree(a), &bTree = Adapter::tree(b); composite::validateLevelSet(aTree, "A"); composite::validateLevelSet(bTree, "B"); CsgUnionOp<TreeT> op(bTree, Steal()); tree::DynamicNodeManager<TreeT> nodeManager(aTree); nodeManager.foreachTopDown(op); if (prune) tools::pruneLevelSet(aTree); } template<typename GridOrTreeT> inline void csgIntersection(GridOrTreeT& a, GridOrTreeT& b, bool prune) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; TreeT &aTree = Adapter::tree(a), &bTree = Adapter::tree(b); composite::validateLevelSet(aTree, "A"); composite::validateLevelSet(bTree, "B"); CsgIntersectionOp<TreeT> op(bTree, Steal()); tree::DynamicNodeManager<TreeT> nodeManager(aTree); nodeManager.foreachTopDown(op); if (prune) tools::pruneLevelSet(aTree); } template<typename GridOrTreeT> inline void csgDifference(GridOrTreeT& a, GridOrTreeT& b, bool prune) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; TreeT &aTree = Adapter::tree(a), &bTree = Adapter::tree(b); composite::validateLevelSet(aTree, "A"); composite::validateLevelSet(bTree, "B"); CsgDifferenceOp<TreeT> op(bTree, Steal()); tree::DynamicNodeManager<TreeT> nodeManager(aTree); nodeManager.foreachTopDown(op); if (prune) tools::pruneLevelSet(aTree); } template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgUnionCopy(const GridOrTreeT& a, const GridOrTreeT& b) { using Adapter = TreeAdapter<GridOrTreeT>; using TreePtrT = typename Adapter::TreeType::Ptr; TreePtrT output = composite::doCSGCopy<composite::CSG_UNION>( Adapter::tree(a), Adapter::tree(b)); return composite::GridOrTreeConstructor<GridOrTreeT>::construct(a, output); } template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgIntersectionCopy(const GridOrTreeT& a, const GridOrTreeT& b) { using Adapter = TreeAdapter<GridOrTreeT>; using TreePtrT = typename Adapter::TreeType::Ptr; TreePtrT output = composite::doCSGCopy<composite::CSG_INTERSECTION>( Adapter::tree(a), Adapter::tree(b)); return composite::GridOrTreeConstructor<GridOrTreeT>::construct(a, output); } template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgDifferenceCopy(const GridOrTreeT& a, const GridOrTreeT& b) { using Adapter = TreeAdapter<GridOrTreeT>; using TreePtrT = typename Adapter::TreeType::Ptr; TreePtrT output = composite::doCSGCopy<composite::CSG_DIFFERENCE>( Adapter::tree(a), Adapter::tree(b)); return composite::GridOrTreeConstructor<GridOrTreeT>::construct(a, output); } //////////////////////////////////////////////////////// /// @brief Composite the active values in leaf nodes, i.e. active /// voxels, of a source tree into a destination tree. /// /// @param srcTree source tree from which active voxels are composited. /// /// @param dstTree destination tree into which active voxels are composited. /// /// @param op a functor of the form <tt>void op(T& dst, const T& src)</tt>, /// where @c T is the @c ValueType of the tree, that composites /// a source value into a destination value. By default /// it copies the value from src to dst. /// /// @details All active voxels in the source tree will /// be active in the destination tree, and their value is /// determined by a use-defined functor (OpT op) that operates on the /// source and destination values. The only exception is when /// the tree type is MaskTree, in which case no functor is /// needed since by defintion a MaskTree has no values (only topology). /// /// @warning This function only operated on leaf node values, /// i.e. tile values are ignored. template<typename TreeT, typename OpT = composite::CopyOp<TreeT> > inline void compActiveLeafVoxels(TreeT &srcTree, TreeT &dstTree, OpT op = composite::CopyOp<TreeT>()) { composite::doCompActiveLeafVoxels<TreeT, OpT>(srcTree, dstTree, op); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_COMPOSITE_HAS_BEEN_INCLUDED
37,675
C
36.451292
99
0.607273
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/FastSweeping.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file FastSweeping.h /// /// @author Ken Museth /// /// @brief Defined the six functions {fog,sdf}To{Sdf,Ext,SdfAndExt} in /// addition to the two functions maskSdf and dilateSdf. Sdf denotes /// a signed-distance field (i.e. negative values are inside), fog /// is a scalar fog volume (i.e. higher values are inside), and Ext is /// a field (of arbitrary type) that is extended off the iso-surface. /// All these functions are implemented with the methods in the class /// named FastSweeping. /// /// @note Solves the (simplified) Eikonal Eq: @f$|\nabla \phi|^2 = 1@f$ and /// performs velocity extension, @f$\nabla f\nabla \phi = 0@f$, both /// by means of the fast sweeping algorithm detailed in: /// "A Fast Sweeping Method For Eikonal Equations" /// by H. Zhao, Mathematics of Computation, Vol 74(230), pp 603-627, 2004 /// /// @details The algorithm used below for parallel fast sweeping was first publised in: /// "New Algorithm for Sparse and Parallel Fast Sweeping: Efficient /// Computation of Sparse Distance Fields" by K. Museth, ACM SIGGRAPH Talk, /// 2017, http://www.museth.org/Ken/Publications_files/Museth_SIG17.pdf #ifndef OPENVDB_TOOLS_FASTSWEEPING_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_FASTSWEEPING_HAS_BEEN_INCLUDED //#define BENCHMARK_FAST_SWEEPING #include <type_traits>// for static_assert #include <cmath> #include <limits> #include <deque> #include <unordered_map> #include <utility>// for std::make_pair #include <tbb/parallel_for.h> #include <tbb/enumerable_thread_specific.h> #include <tbb/task_group.h> #include <openvdb/math/Math.h> // for Abs() and isExactlyEqual() #include <openvdb/math/Stencils.h> // for GradStencil #include <openvdb/tree/LeafManager.h> #include "LevelSetUtil.h" #include "Morphology.h" #include "Statistics.h" #ifdef BENCHMARK_FAST_SWEEPING #include <openvdb/util/CpuTimer.h> #endif namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Converts a scalar fog volume into a signed distance function. Active input voxels /// with scalar values above the given isoValue will have NEGATIVE distance /// values on output, i.e. they are assumed to be INSIDE the iso-surface. /// /// @return A shared pointer to a signed-distance field defined on the active values /// of the input fog volume. /// /// @param fogGrid Scalar (floating-point) volume from which an /// iso-surface can be defined. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a fogGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note Strictly speaking a fog volume is normalized to the range [0,1] but this /// method accepts a scalar volume with an arbitary range, as long as the it /// includes the @a isoValue. /// /// @details Topology of output grid is identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grid! /// /// @warning If @a isoValue does not intersect any active values in /// @a fogGrid then the returned grid has all its active values set to /// plus or minus infinity, depending on if the input values are larger or /// smaller than @a isoValue. template<typename GridT> typename GridT::Ptr fogToSdf(const GridT &fogGrid, typename GridT::ValueType isoValue, int nIter = 1); /// @brief Given an existing approximate SDF it solves the Eikonal equation for all its /// active voxels. Active input voxels with a signed distance value above the /// given isoValue will have POSITIVE distance values on output, i.e. they are /// assumed to be OUTSIDE the iso-surface. /// /// @return A shared pointer to a signed-distance field defined on the active values /// of the input sdf volume. /// /// @param sdfGrid An approximate signed distance field to the specified iso-surface. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a sdfGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note The only difference between this method and fogToSdf, defined above, is the /// convention of the sign of the output distance field. /// /// @details Topology of output grid is identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grid! /// /// @warning If @a isoValue does not intersect any active values in /// @a sdfGrid then the returned grid has all its active values set to /// plus or minus infinity, depending on if the input values are larger or /// smaller than @a isoValue. template<typename GridT> typename GridT::Ptr sdfToSdf(const GridT &sdfGrid, typename GridT::ValueType isoValue = 0, int nIter = 1); /// @brief Computes the extension of a field, defined by the specified functor, /// off an iso-surface from an input FOG volume. /// /// @return A shared pointer to the extension field defined from the active values in /// the input fog volume. /// /// @param fogGrid Scalar (floating-point) volume from which an /// iso-surface can be defined. /// /// @param op Functor with signature [](const Vec3R &xyz)->ExtValueT that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a fogGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note Strictly speaking a fog volume is normalized to the range [0,1] but this /// method accepts a scalar volume with an arbitary range, as long as the it /// includes the @a isoValue. /// /// @details Topology of output grid is identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grid! /// /// @warning If @a isoValue does not intersect any active values in /// @a fogGrid then the returned grid has all its active values set to /// @a background. template<typename FogGridT, typename ExtOpT, typename ExtValueT> typename FogGridT::template ValueConverter<ExtValueT>::Type::Ptr fogToExt(const FogGridT &fogGrid, const ExtOpT &op, const ExtValueT& background, typename FogGridT::ValueType isoValue, int nIter = 1); /// @brief Computes the extension of a scalar field, defined by the specified functor, /// off an iso-surface from an input SDF volume. /// /// @return A shared pointer to the extension field defined on the active values in the /// input signed distance field. /// /// @param sdfGrid An approximate signed distance field to the specified iso-surface. /// /// @param op Functor with signature [](const Vec3R &xyz)->float that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a sdfGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note The only difference between this method and fogToEXT, defined above, is the /// convention of the sign of the signed distance field. /// /// @details Topology of output grid is identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grid! /// /// @warning If @a isoValue does not intersect any active values in /// @a sdfGrid then the returned grid has all its active values set to /// @a background. template<typename SdfGridT, typename ExtOpT, typename ExtValueT> typename SdfGridT::template ValueConverter<ExtValueT>::Type::Ptr sdfToExt(const SdfGridT &sdfGrid, const ExtOpT &op, const ExtValueT &background, typename SdfGridT::ValueType isoValue = 0, int nIter = 1); /// @brief Computes the signed distance field and the extension of a scalar field, /// defined by the specified functor, off an iso-surface from an input FOG volume. /// /// @return An pair of two shared pointers to respectively the SDF and extension field /// /// @param fogGrid Scalar (floating-point) volume from which an /// iso-surface can be defined. /// /// @param op Functor with signature [](const Vec3R &xyz)->float that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a fogGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note Strictly speaking a fog volume is normalized to the range [0,1] but this /// method accepts a scalar volume with an arbitary range, as long as the it /// includes the @a isoValue. /// /// @details Topology of output grids are identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grids! /// /// @warning If @a isoValue does not intersect any active values in /// @a fogGrid then a pair of the following grids is returned: The first /// is a signed distance grid with its active values set to plus or minus /// infinity depending of whether its input values are above or below @a isoValue. /// The second grid, which represents the extension field, has all its active /// values set to @a background. template<typename FogGridT, typename ExtOpT, typename ExtValueT> std::pair<typename FogGridT::Ptr, typename FogGridT::template ValueConverter<ExtValueT>::Type::Ptr> fogToSdfAndExt(const FogGridT &fogGrid, const ExtOpT &op, const ExtValueT &background, typename FogGridT::ValueType isoValue, int nIter = 1); /// @brief Computes the signed distance field and the extension of a scalar field, /// defined by the specified functor, off an iso-surface from an input SDF volume. /// /// @return A pair of two shared pointers to respectively the SDF and extension field /// /// @param sdfGrid Scalar (floating-point) volume from which an /// iso-surface can be defined. /// /// @param op Functor with signature [](const Vec3R &xyz)->float that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a sdfGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note Strictly speaking a fog volume is normalized to the range [0,1] but this /// method accepts a scalar volume with an arbitary range, as long as the it /// includes the @a isoValue. /// /// @details Topology of output grids are identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grids! /// /// @warning If @a isoValue does not intersect any active values in /// @a sdfGrid then a pair of the following grids is returned: The first /// is a signed distance grid with its active values set to plus or minus /// infinity depending of whether its input values are above or below @a isoValue. /// The second grid, which represents the extension field, has all its active /// values set to @a background. template<typename SdfGridT, typename ExtOpT, typename ExtValueT> std::pair<typename SdfGridT::Ptr, typename SdfGridT::template ValueConverter<ExtValueT>::Type::Ptr> sdfToSdfAndExt(const SdfGridT &sdfGrid, const ExtOpT &op, const ExtValueT &background, typename SdfGridT::ValueType isoValue = 0, int nIter = 1); /// @brief Dilates an existing signed distance filed by a specified number of voxels /// /// @return A shared pointer to the dilated signed distance field. /// /// @param sdfGrid Input signed distance field to be dilated. /// /// @param dilation Numer of voxels that the input SDF will be dilated. /// /// @param nn Stencil-pattern used for dilation /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @details Topology will change as a result of this dilation. E.g. if /// sdfGrid has a width of 3 and @a dilation = 6 then the grid /// returned by this method is a narrow band signed distance field /// with a total vidth of 9 units. template<typename GridT> typename GridT::Ptr dilateSdf(const GridT &sdfGrid, int dilation, NearestNeighbors nn = NN_FACE, int nIter = 1); /// @brief Fills mask by extending an existing signed distance field into /// the active values of this input ree of arbitrary value type. /// /// @return A shared pointer to the masked signed distance field. /// /// @param sdfGrid Input signed distance field to be extended into the mask. /// /// @param mask Mask used to idetify the topology of the output SDF. /// Note this mask is assume to overlap with the sdfGrid. /// /// @param ignoreActiveTiles If false, active tiles in the mask are treated /// as active voxels. Else they are ignored. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @details Topology of the output SDF is determined by the union of the active /// voxels (or optionally values) in @a sdfGrid and @a mask. template<typename GridT, typename MaskTreeT> typename GridT::Ptr maskSdf(const GridT &sdfGrid, const Grid<MaskTreeT> &mask, bool ignoreActiveTiles = false, int nIter = 1); //////////////////////////////////////////////////////////////////////////////// /// @brief Computes signed distance values from an initial iso-surface and /// optionally performs velocty extension at the same time. This is /// done by means of a novel sparse and parallel fast sweeping /// algorithm based on a first order Goudonov's scheme. /// /// Solves: @f$|\nabla \phi|^2 = 1 @f$ /// /// @warning Note, it is important to call one of the initialization methods before /// called the sweep function. Failure to do so will throw a RuntimeError. /// Consider instead call one of the many higher-level free-standing functions /// defined above! template<typename SdfGridT, typename ExtValueT = typename SdfGridT::ValueType> class FastSweeping { static_assert(std::is_floating_point<typename SdfGridT::ValueType>::value, "FastSweeping requires SdfGridT to have floating-point values"); // Defined types related to the signed disntance (or fog) grid using SdfValueT = typename SdfGridT::ValueType; using SdfTreeT = typename SdfGridT::TreeType; using SdfAccT = tree::ValueAccessor<SdfTreeT, false>;//don't register accessors // define types related to the extension field using ExtGridT = typename SdfGridT::template ValueConverter<ExtValueT>::Type; using ExtTreeT = typename ExtGridT::TreeType; using ExtAccT = tree::ValueAccessor<ExtTreeT, false>; // define types related to the tree that masks out the active voxels to be solved for using SweepMaskTreeT = typename SdfTreeT::template ValueConverter<ValueMask>::Type; using SweepMaskAccT = tree::ValueAccessor<SweepMaskTreeT, false>;//don't register accessors public: /// @brief Constructor FastSweeping(); /// @brief Destructor. ~FastSweeping() { this->clear(); } /// @brief Disallow copy construction. FastSweeping(const FastSweeping&) = delete; /// @brief Disallow copy assignment. FastSweeping& operator=(const FastSweeping&) = delete; /// @brief Returns a shared pointer to the signed distance field computed /// by this class. /// /// @warning This shared pointer might point to NULL if the grid has not been /// initialize (by one of the init methods) or computed (by the sweep /// method). typename SdfGridT::Ptr sdfGrid() { return mSdfGrid; } /// @brief Returns a shared pointer to the extension field computed /// by this class. /// /// @warning This shared pointer might point to NULL if the grid has not been /// initialize (by one of the init methods) or computed (by the sweep /// method). typename ExtGridT::Ptr extGrid() { return mExtGrid; } /// @brief Initializer for input grids that are either a signed distance /// field or a scalar fog volume. /// /// @return True if the initialization succeeded. /// /// @param sdfGrid Input scalar grid that represents an existing signed distance /// field or a fog volume (signified by @a isInputSdf). /// /// @param isoValue Iso-value to be used to define the Dirichlet boundary condition /// of the fast sweeping algorithm (typically 0 for sdfs and a /// positive value for fog volumes). /// /// @param isInputSdf Used to determine if @a sdfGrid is a sigend distance field (true) /// or a scalar fog volume (false). /// /// @details This, or any of ther other initilization methods, should be called /// before any call to sweep(). Failure to do so will throw a RuntimeError. /// /// @warning Note, if this method fails, i.e. returns false, a subsequent call /// to sweep will trow a RuntimeError. Instead call clear and try again. bool initSdf(const SdfGridT &sdfGrid, SdfValueT isoValue, bool isInputSdf); /// @brief Initializer used whenever velocity extension is performed in addition /// to the computation of signed distance fields. /// /// @return True if the initialization succeeded. /// /// /// @param sdfGrid Input scalar grid that represents an existing signed distance /// field or a fog volume (signified by @a isInputSdf). /// /// @param op Functor with signature [](const Vec3R &xyz)->ExtValueT that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. Strictly the return type of this functor /// is only required to be convertible to ExtValueT! /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue Iso-value to be used for the boundary condition of the fast /// sweeping algorithm (typically 0 for sdfs and a positive value /// for fog volumes). /// /// @param isInputSdf Used to determine if @a sdfGrid is a sigend distance field (true) /// or a scalar fog volume (false). /// /// @details This, or any of ther other initilization methods, should be called /// before any call to sweep(). Failure to do so will throw a RuntimeError. /// /// @warning Note, if this method fails, i.e. returns false, a subsequent call /// to sweep will trow a RuntimeError. Instead call clear and try again. template <typename ExtOpT> bool initExt(const SdfGridT &sdfGrid, const ExtOpT &op, const ExtValueT &background, SdfValueT isoValue, bool isInputSdf); /// @brief Initializer used when dilating an exsiting signed distance field. /// /// @return True if the initialization succeeded. /// /// @param sdfGrid Input signed distance field to to be dilated. /// /// @param dilation Numer of voxels that the input SDF will be dilated. /// /// @param nn Stencil-pattern used for dilation /// /// @details This, or any of ther other initilization methods, should be called /// before any call to sweep(). Failure to do so will throw a RuntimeError. /// /// @warning Note, if this method fails, i.e. returns false, a subsequent call /// to sweep will trow a RuntimeError. Instead call clear and try again. bool initDilate(const SdfGridT &sdfGrid, int dilation, NearestNeighbors nn = NN_FACE); /// @brief Initializer used for the extamnsion of an exsiting signed distance field /// into the active values of an input mask of arbitrary value type. /// /// @return True if the initialization succeeded. /// /// @param sdfGrid Input signed distance field to be extended into the mask. /// /// @param mask Mask used to idetify the topology of the output SDF. /// Note this mask is assume to overlap with the sdfGrid. /// /// @param ignoreActiveTiles If false, active tiles in the mask are treated /// as active voxels. Else they are ignored. /// /// @details This, or any of ther other initilization methods, should be called /// before any call to sweep(). Failure to do so will throw a RuntimeError. /// /// @warning Note, if this method fails, i.e. returns false, a subsequent call /// to sweep will trow a RuntimeError. Instead call clear and try again. template<typename MaskTreeT> bool initMask(const SdfGridT &sdfGrid, const Grid<MaskTreeT> &mask, bool ignoreActiveTiles = false); /// @brief Perform @a nIter iterations of the fast sweeping algorithm. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @param finalize If true the (possibly asymmetric) inside and outside values of the /// resulting signed distance field are properly set. Unless you're /// an expert this should remain true! /// /// @throw RuntimeError if sweepingVoxelCount() or boundaryVoxelCount() return zero. /// This might happen if none of the initialization methods above were called /// or if that initialization failed. void sweep(int nIter = 1, bool finalize = true); /// @brief Clears all the grids and counters so initializtion can be called again. void clear(); /// @brief Return the number of voxels that will be solved for. size_t sweepingVoxelCount() const { return mSweepingVoxelCount; } /// @brief Return the number of voxels that defined the boundary condition. size_t boundaryVoxelCount() const { return mBoundaryVoxelCount; } /// @brief Return true if there are voxels and boundaries to solve for bool isValid() const { return mSweepingVoxelCount > 0 && mBoundaryVoxelCount > 0; } private: /// @brief Private method to prune the sweep mask and cache leaf origins. void computeSweepMaskLeafOrigins(); // Private utility classes template<typename> struct MaskKernel;// initialization to extend a SDF into a mask template<typename> struct InitExt; struct InitSdf; struct DilateKernel;// initialization to dilate a SDF struct MinMaxKernel; struct SweepingKernel;// performs the actual concurrent sparse fast sweeping // Define the topology (i.e. stencil) of the neighboring grid points static const Coord mOffset[6];// = {{-1,0,0},{1,0,0},{0,-1,0},{0,1,0},{0,0,-1},{0,0,1}}; // Private member data of FastSweeping typename SdfGridT::Ptr mSdfGrid; typename ExtGridT::Ptr mExtGrid; SweepMaskTreeT mSweepMask; // mask tree containing all non-boundary active voxels std::vector<Coord> mSweepMaskLeafOrigins; // cache of leaf node origins for mask tree size_t mSweepingVoxelCount, mBoundaryVoxelCount; };// FastSweeping //////////////////////////////////////////////////////////////////////////////// // Static member data initialization template <typename SdfGridT, typename ExtValueT> const Coord FastSweeping<SdfGridT, ExtValueT>::mOffset[6] = {{-1,0,0},{1,0,0}, {0,-1,0},{0,1,0}, {0,0,-1},{0,0,1}}; template <typename SdfGridT, typename ExtValueT> FastSweeping<SdfGridT, ExtValueT>::FastSweeping() : mSdfGrid(nullptr), mExtGrid(nullptr), mSweepingVoxelCount(0), mBoundaryVoxelCount(0) { } template <typename SdfGridT, typename ExtValueT> void FastSweeping<SdfGridT, ExtValueT>::clear() { mSdfGrid.reset(); mExtGrid.reset(); mSweepMask.clear(); mSweepingVoxelCount = mBoundaryVoxelCount = 0; } template <typename SdfGridT, typename ExtValueT> void FastSweeping<SdfGridT, ExtValueT>::computeSweepMaskLeafOrigins() { // replace any inactive leaf nodes with tiles and voxelize any active tiles pruneInactive(mSweepMask); mSweepMask.voxelizeActiveTiles(); using LeafManagerT = tree::LeafManager<SweepMaskTreeT>; using LeafT = typename SweepMaskTreeT::LeafNodeType; LeafManagerT leafManager(mSweepMask); mSweepMaskLeafOrigins.resize(leafManager.leafCount()); tbb::atomic<size_t> sweepingVoxelCount = 0; auto kernel = [&](const LeafT& leaf, size_t leafIdx) { mSweepMaskLeafOrigins[leafIdx] = leaf.origin(); sweepingVoxelCount += leaf.onVoxelCount(); }; leafManager.foreach(kernel, /*threaded=*/true, /*grainsize=*/1024); mBoundaryVoxelCount = 0; mSweepingVoxelCount = sweepingVoxelCount; if (mSdfGrid) { const size_t totalCount = mSdfGrid->constTree().activeVoxelCount(); assert( totalCount >= mSweepingVoxelCount ); mBoundaryVoxelCount = totalCount - mSweepingVoxelCount; } }// FastSweeping::computeSweepMaskLeafOrigins template <typename SdfGridT, typename ExtValueT> bool FastSweeping<SdfGridT, ExtValueT>::initSdf(const SdfGridT &fogGrid, SdfValueT isoValue, bool isInputSdf) { this->clear(); mSdfGrid = fogGrid.deepCopy();//very fast InitSdf kernel(*this); kernel.run(isoValue, isInputSdf); return this->isValid(); } template <typename SdfGridT, typename ExtValueT> template <typename OpT> bool FastSweeping<SdfGridT, ExtValueT>::initExt(const SdfGridT &fogGrid, const OpT &op, const ExtValueT &background, SdfValueT isoValue, bool isInputSdf) { this->clear(); mSdfGrid = fogGrid.deepCopy();//very fast mExtGrid = createGrid<ExtGridT>( background ); mExtGrid->topologyUnion( *mSdfGrid );//very fast InitExt<OpT> kernel(*this); kernel.run(isoValue, op, isInputSdf); return this->isValid(); } template <typename SdfGridT, typename ExtValueT> bool FastSweeping<SdfGridT, ExtValueT>::initDilate(const SdfGridT &sdfGrid, int dilate, NearestNeighbors nn) { this->clear(); mSdfGrid = sdfGrid.deepCopy();//very fast DilateKernel kernel(*this); kernel.run(dilate, nn); return this->isValid(); } template <typename SdfGridT, typename ExtValueT> template<typename MaskTreeT> bool FastSweeping<SdfGridT, ExtValueT>::initMask(const SdfGridT &sdfGrid, const Grid<MaskTreeT> &mask, bool ignoreActiveTiles) { this->clear(); mSdfGrid = sdfGrid.deepCopy();//very fast if (mSdfGrid->transform() != mask.transform()) { OPENVDB_THROW(RuntimeError, "FastSweeping: Mask not aligned with the grid!"); } if (mask.getGridClass() == GRID_LEVEL_SET) { using T = typename MaskTreeT::template ValueConverter<bool>::Type; typename Grid<T>::Ptr tmp = sdfInteriorMask(mask);//might have active tiles tmp->tree().voxelizeActiveTiles();//multi-threaded MaskKernel<T> kernel(*this); kernel.run(tmp->tree());//multi-threaded } else { if (ignoreActiveTiles || !mask.tree().hasActiveTiles()) { MaskKernel<MaskTreeT> kernel(*this); kernel.run(mask.tree());//multi-threaded } else { using T = typename MaskTreeT::template ValueConverter<ValueMask>::Type; T tmp(mask.tree(), false, TopologyCopy());//multi-threaded tmp.voxelizeActiveTiles(true);//multi-threaded MaskKernel<T> kernel(*this); kernel.run(tmp);//multi-threaded } } return this->isValid(); }// FastSweeping::initMask template <typename SdfGridT, typename ExtValueT> void FastSweeping<SdfGridT, ExtValueT>::sweep(int nIter, bool finalize) { if (!mSdfGrid) { OPENVDB_THROW(RuntimeError, "FastSweeping::sweep called before initialization"); } if (this->boundaryVoxelCount() == 0) { OPENVDB_THROW(RuntimeError, "FastSweeping: No boundary voxels found!"); } else if (this->sweepingVoxelCount() == 0) { OPENVDB_THROW(RuntimeError, "FastSweeping: No computing voxels found!"); } // note: SweepingKernel is non copy-constructible, so use a deque instead of a vector std::deque<SweepingKernel> kernels; for (int i = 0; i < 4; i++) kernels.emplace_back(*this); { // compute voxel slices #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Computing voxel slices"); #endif // Exploiting nested parallelism - all voxel slice data is precomputed tbb::task_group tasks; tasks.run([&] { kernels[0].computeVoxelSlices([](const Coord &a){ return a[0]+a[1]+a[2]; });/*+++ & ---*/ }); tasks.run([&] { kernels[1].computeVoxelSlices([](const Coord &a){ return a[0]+a[1]-a[2]; });/*++- & --+*/ }); tasks.run([&] { kernels[2].computeVoxelSlices([](const Coord &a){ return a[0]-a[1]+a[2]; });/*+-+ & -+-*/ }); tasks.run([&] { kernels[3].computeVoxelSlices([](const Coord &a){ return a[0]-a[1]-a[2]; });/*+-- & -++*/ }); tasks.wait(); #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif } // perform nIter iterations of bi-directional sweeping in all directions for (int i = 0; i < nIter; ++i) { for (SweepingKernel& kernel : kernels) kernel.sweep(); } if (finalize) { #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Computing extrema values"); #endif MinMaxKernel kernel; auto e = kernel.run(*mSdfGrid);//multi-threaded //auto e = extrema(mGrid->beginValueOn());// 100x slower!!!! #ifdef BENCHMARK_FAST_SWEEPING std::cerr << "Min = " << e.min() << " Max = " << e.max() << std::endl; timer.restart("Changing asymmetric background value"); #endif changeAsymmetricLevelSetBackground(mSdfGrid->tree(), e.max(), e.min());//multi-threaded #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif } }// FastSweeping::sweep /// Private class of FastSweeping to quickly compute the extrema /// values of the active voxels in the leaf nodes. Several orders /// of magnitude faster than tools::extrema! template <typename SdfGridT, typename ExtValueT> struct FastSweeping<SdfGridT, ExtValueT>::MinMaxKernel { using LeafMgr = tree::LeafManager<const SdfTreeT>; using LeafRange = typename LeafMgr::LeafRange; MinMaxKernel() : mMin(std::numeric_limits<SdfValueT>::max()), mMax(-mMin) {} MinMaxKernel(MinMaxKernel& other, tbb::split) : mMin(other.mMin), mMax(other.mMax) {} math::MinMax<SdfValueT> run(const SdfGridT &grid) { LeafMgr mgr(grid.tree());// super fast tbb::parallel_reduce(mgr.leafRange(), *this); return math::MinMax<SdfValueT>(mMin, mMax); } void operator()(const LeafRange& r) { for (auto leafIter = r.begin(); leafIter; ++leafIter) { for (auto voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) { const SdfValueT v = *voxelIter; if (v < mMin) mMin = v; if (v > mMax) mMax = v; } } } void join(const MinMaxKernel& other) { if (other.mMin < mMin) mMin = other.mMin; if (other.mMax > mMax) mMax = other.mMax; } SdfValueT mMin, mMax; };// FastSweeping::MinMaxKernel //////////////////////////////////////////////////////////////////////////////// /// Private class of FastSweeping to perform multi-threaded initialization template <typename SdfGridT, typename ExtValueT> struct FastSweeping<SdfGridT, ExtValueT>::DilateKernel { using LeafRange = typename tree::LeafManager<SdfTreeT>::LeafRange; DilateKernel(FastSweeping &parent) : mParent(&parent), mBackground(parent.mSdfGrid->background()) { } DilateKernel(const DilateKernel &parent) = default;// for tbb::parallel_for DilateKernel& operator=(const DilateKernel&) = delete; void run(int dilation, NearestNeighbors nn) { #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Construct LeafManager"); #endif tree::LeafManager<SdfTreeT> mgr(mParent->mSdfGrid->tree());// super fast #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Changing background value"); #endif static const SdfValueT Unknown = std::numeric_limits<SdfValueT>::max(); changeLevelSetBackground(mgr, Unknown);//multi-threaded #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Dilating and updating mgr (parallel)"); //timer.restart("Dilating and updating mgr (serial)"); #endif const int delta = 5; for (int i=0, d = dilation/delta; i<d; ++i) dilateActiveValues(mgr, delta, nn, IGNORE_TILES); dilateActiveValues(mgr, dilation % delta, nn, IGNORE_TILES); //for (int i=0, n=5, d=dilation/n; i<d; ++i) dilateActiveValues(mgr, n, nn, IGNORE_TILES); //dilateVoxels(mgr, dilation, nn); #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Initializing grid and sweep mask"); #endif mParent->mSweepMask.clear(); mParent->mSweepMask.topologyUnion(mParent->mSdfGrid->constTree()); using LeafManagerT = tree::LeafManager<typename SdfGridT::TreeType>; using LeafT = typename SdfGridT::TreeType::LeafNodeType; LeafManagerT leafManager(mParent->mSdfGrid->tree()); auto kernel = [&](LeafT& leaf, size_t /*leafIdx*/) { static const SdfValueT Unknown = std::numeric_limits<SdfValueT>::max(); const SdfValueT background = mBackground;//local copy auto* maskLeaf = mParent->mSweepMask.probeLeaf(leaf.origin()); assert(maskLeaf); for (auto voxelIter = leaf.beginValueOn(); voxelIter; ++voxelIter) { const SdfValueT value = *voxelIter; if (math::Abs(value) < background) {// disable boundary voxels from the mask tree maskLeaf->setValueOff(voxelIter.pos()); } else { voxelIter.setValue(value > 0 ? Unknown : -Unknown); } } }; leafManager.foreach( kernel ); // cache the leaf node origins for fast lookup in the sweeping kernels mParent->computeSweepMaskLeafOrigins(); #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif }// FastSweeping::DilateKernel::run // Private member data of DilateKernel FastSweeping *mParent; const SdfValueT mBackground; };// FastSweeping::DilateKernel //////////////////////////////////////////////////////////////////////////////// template <typename SdfGridT, typename ExtValueT> struct FastSweeping<SdfGridT, ExtValueT>::InitSdf { using LeafRange = typename tree::LeafManager<SdfTreeT>::LeafRange; InitSdf(FastSweeping &parent): mParent(&parent), mSdfGrid(parent.mSdfGrid.get()), mIsoValue(0), mAboveSign(0) {} InitSdf(const InitSdf&) = default;// for tbb::parallel_for InitSdf& operator=(const InitSdf&) = delete; void run(SdfValueT isoValue, bool isInputSdf) { mIsoValue = isoValue; mAboveSign = isInputSdf ? SdfValueT(1) : SdfValueT(-1); SdfTreeT &tree = mSdfGrid->tree();//sdf const bool hasActiveTiles = tree.hasActiveTiles(); if (isInputSdf && hasActiveTiles) { OPENVDB_THROW(RuntimeError, "FastSweeping: A SDF should not have active tiles!"); } #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Initialize voxels"); #endif mParent->mSweepMask.clear(); mParent->mSweepMask.topologyUnion(mParent->mSdfGrid->constTree()); {// Process all voxels tree::LeafManager<SdfTreeT> mgr(tree, 1);// we need one auxiliary buffer tbb::parallel_for(mgr.leafRange(32), *this);//multi-threaded mgr.swapLeafBuffer(1);//swap voxel values } #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Initialize tiles - new"); #endif // Process all tiles tree::NodeManager<SdfTreeT, SdfTreeT::RootNodeType::LEVEL-1> mgr(tree); mgr.foreachBottomUp(*this);//multi-threaded tree.root().setBackground(std::numeric_limits<SdfValueT>::max(), false); if (hasActiveTiles) tree.voxelizeActiveTiles();//multi-threaded // cache the leaf node origins for fast lookup in the sweeping kernels mParent->computeSweepMaskLeafOrigins(); }// FastSweeping::InitSdf::run void operator()(const LeafRange& r) const { SweepMaskAccT sweepMaskAcc(mParent->mSweepMask); math::GradStencil<SdfGridT, false> stencil(*mSdfGrid); const SdfValueT isoValue = mIsoValue, above = mAboveSign*std::numeric_limits<SdfValueT>::max();//local copy const SdfValueT h = mAboveSign*static_cast<SdfValueT>(mSdfGrid->voxelSize()[0]);//Voxel size for (auto leafIter = r.begin(); leafIter; ++leafIter) { SdfValueT* sdf = leafIter.buffer(1).data(); for (auto voxelIter = leafIter->beginValueAll(); voxelIter; ++voxelIter) { const SdfValueT value = *voxelIter; const bool isAbove = value > isoValue; if (!voxelIter.isValueOn()) {// inactive voxels sdf[voxelIter.pos()] = isAbove ? above : -above; } else {// active voxels const Coord ijk = voxelIter.getCoord(); stencil.moveTo(ijk, value); const auto mask = stencil.intersectionMask( isoValue ); if (mask.none()) {// most common case sdf[voxelIter.pos()] = isAbove ? above : -above; } else {// compute distance to iso-surface // disable boundary voxels from the mask tree sweepMaskAcc.setValueOff(ijk); const SdfValueT delta = value - isoValue;//offset relative to iso-value if (math::isApproxZero(delta)) {//voxel is on the iso-surface sdf[voxelIter.pos()] = 0; } else {//voxel is neighboring the iso-surface SdfValueT sum = 0; for (int i=0; i<6;) { SdfValueT d = std::numeric_limits<SdfValueT>::max(), d2; if (mask.test(i++)) d = math::Abs(delta/(value-stencil.getValue(i))); if (mask.test(i++)) { d2 = math::Abs(delta/(value-stencil.getValue(i))); if (d2 < d) d = d2; } if (d < std::numeric_limits<SdfValueT>::max()) sum += 1/(d*d); } sdf[voxelIter.pos()] = isAbove ? h / math::Sqrt(sum) : -h / math::Sqrt(sum); }// voxel is neighboring the iso-surface }// intersecting voxels }// active voxels }// loop over voxels }// loop over leaf nodes }// FastSweeping::InitSdf::operator(const LeafRange&) template<typename RootOrInternalNodeT> void operator()(const RootOrInternalNodeT& node) const { const SdfValueT isoValue = mIsoValue, above = mAboveSign*std::numeric_limits<SdfValueT>::max(); for (auto it = node.cbeginValueAll(); it; ++it) { SdfValueT& v = const_cast<SdfValueT&>(*it); v = v > isoValue ? above : -above; }//loop over all tiles }// FastSweeping::InitSdf::operator()(const RootOrInternalNodeT&) // Public member data FastSweeping *mParent; SdfGridT *mSdfGrid;//raw pointer, i.e. lock free SdfValueT mIsoValue; SdfValueT mAboveSign;//sign of distance values above the iso-value };// FastSweeping::InitSdf /// Private class of FastSweeping to perform multi-threaded initialization template <typename SdfGridT, typename ExtValueT> template <typename OpT> struct FastSweeping<SdfGridT, ExtValueT>::InitExt { using LeafRange = typename tree::LeafManager<SdfTreeT>::LeafRange; using OpPoolT = tbb::enumerable_thread_specific<OpT>; InitExt(FastSweeping &parent) : mParent(&parent), mOpPool(nullptr), mSdfGrid(parent.mSdfGrid.get()), mExtGrid(parent.mExtGrid.get()), mIsoValue(0), mAboveSign(0) {} InitExt(const InitExt&) = default;// for tbb::parallel_for InitExt& operator=(const InitExt&) = delete; void run(SdfValueT isoValue, const OpT &opPrototype, bool isInputSdf) { static_assert(std::is_convertible<decltype(opPrototype(Vec3d(0))),ExtValueT>::value, "Invalid return type of functor"); if (!mExtGrid) { OPENVDB_THROW(RuntimeError, "FastSweeping::InitExt expected an extension grid!"); } mAboveSign = isInputSdf ? SdfValueT(1) : SdfValueT(-1); mIsoValue = isoValue; auto &tree1 = mSdfGrid->tree(); auto &tree2 = mExtGrid->tree(); const bool hasActiveTiles = tree1.hasActiveTiles();//very fast if (isInputSdf && hasActiveTiles) { OPENVDB_THROW(RuntimeError, "FastSweeping: A SDF should not have active tiles!"); } #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Initialize voxels"); #endif mParent->mSweepMask.clear(); mParent->mSweepMask.topologyUnion(mParent->mSdfGrid->constTree()); {// Process all voxels // Define thread-local operators OpPoolT opPool(opPrototype); mOpPool = &opPool; tree::LeafManager<SdfTreeT> mgr(tree1, 1);// we need one auxiliary buffer tbb::parallel_for(mgr.leafRange(32), *this);//multi-threaded mgr.swapLeafBuffer(1);//swap out auxiliary buffer } #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Initialize tiles"); #endif {// Process all tiles tree::NodeManager<SdfTreeT, SdfTreeT::RootNodeType::LEVEL-1> mgr(tree1); mgr.foreachBottomUp(*this);//multi-threaded tree1.root().setBackground(std::numeric_limits<SdfValueT>::max(), false); if (hasActiveTiles) { #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Voxelizing active tiles"); #endif tree1.voxelizeActiveTiles();//multi-threaded tree2.voxelizeActiveTiles();//multi-threaded } } // cache the leaf node origins for fast lookup in the sweeping kernels mParent->computeSweepMaskLeafOrigins(); #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif }// FastSweeping::InitExt::run void operator()(const LeafRange& r) const { ExtAccT acc(mExtGrid->tree()); SweepMaskAccT sweepMaskAcc(mParent->mSweepMask); math::GradStencil<SdfGridT, false> stencil(*mSdfGrid); const math::Transform& xform = mExtGrid->transform(); typename OpPoolT::reference op = mOpPool->local(); const SdfValueT isoValue = mIsoValue, above = mAboveSign*std::numeric_limits<SdfValueT>::max();//local copy const SdfValueT h = mAboveSign*static_cast<SdfValueT>(mSdfGrid->voxelSize()[0]);//Voxel size for (auto leafIter = r.begin(); leafIter; ++leafIter) { SdfValueT *sdf = leafIter.buffer(1).data(); ExtValueT *ext = acc.probeLeaf(leafIter->origin())->buffer().data();//should be safe! for (auto voxelIter = leafIter->beginValueAll(); voxelIter; ++voxelIter) { const SdfValueT value = *voxelIter; const bool isAbove = value > isoValue; if (!voxelIter.isValueOn()) {// inactive voxels sdf[voxelIter.pos()] = isAbove ? above : -above; } else {// active voxels const Coord ijk = voxelIter.getCoord(); stencil.moveTo(ijk, value); const auto mask = stencil.intersectionMask( isoValue ); if (mask.none()) {// no zero-crossing neighbors, most common case sdf[voxelIter.pos()] = isAbove ? above : -above; // the ext grid already has its active values set to the bakground value } else {// compute distance to iso-surface // disable boundary voxels from the mask tree sweepMaskAcc.setValueOff(ijk); const SdfValueT delta = value - isoValue;//offset relative to iso-value if (math::isApproxZero(delta)) {//voxel is on the iso-surface sdf[voxelIter.pos()] = 0; ext[voxelIter.pos()] = ExtValueT(op(xform.indexToWorld(ijk))); } else {//voxel is neighboring the iso-surface SdfValueT sum1 = 0; ExtValueT sum2 = zeroVal<ExtValueT>(); for (int n=0, i=0; i<6;) { SdfValueT d = std::numeric_limits<SdfValueT>::max(), d2; if (mask.test(i++)) { d = math::Abs(delta/(value-stencil.getValue(i))); n = i - 1; } if (mask.test(i++)) { d2 = math::Abs(delta/(value-stencil.getValue(i))); if (d2 < d) { d = d2; n = i - 1; } } if (d < std::numeric_limits<SdfValueT>::max()) { d2 = 1/(d*d); sum1 += d2; const Vec3R xyz(static_cast<SdfValueT>(ijk[0])+d*static_cast<SdfValueT>(FastSweeping::mOffset[n][0]), static_cast<SdfValueT>(ijk[1])+d*static_cast<SdfValueT>(FastSweeping::mOffset[n][1]), static_cast<SdfValueT>(ijk[2])+d*static_cast<SdfValueT>(FastSweeping::mOffset[n][2])); sum2 += d2*ExtValueT(op(xform.indexToWorld(xyz))); } }//look over six cases ext[voxelIter.pos()] = (SdfValueT(1) / sum1) * sum2; sdf[voxelIter.pos()] = isAbove ? h / math::Sqrt(sum1) : -h / math::Sqrt(sum1); }// voxel is neighboring the iso-surface }// intersecting voxels }// active voxels }// loop over voxels }// loop over leaf nodes }// FastSweeping::InitExt::operator(const LeafRange& r) template<typename RootOrInternalNodeT> void operator()(const RootOrInternalNodeT& node) const { const SdfValueT isoValue = mIsoValue, above = mAboveSign*std::numeric_limits<SdfValueT>::max(); for (auto it = node.cbeginValueAll(); it; ++it) { SdfValueT& v = const_cast<SdfValueT&>(*it); v = v > isoValue ? above : -above; }//loop over all tiles } // Public member data FastSweeping *mParent; OpPoolT *mOpPool; SdfGridT *mSdfGrid; ExtGridT *mExtGrid; SdfValueT mIsoValue; SdfValueT mAboveSign;//sign of distance values above the iso-value };// FastSweeping::InitExt /// Private class of FastSweeping to perform multi-threaded initialization template <typename SdfGridT, typename ExtValueT> template <typename MaskTreeT> struct FastSweeping<SdfGridT, ExtValueT>::MaskKernel { using LeafRange = typename tree::LeafManager<const MaskTreeT>::LeafRange; MaskKernel(FastSweeping &parent) : mParent(&parent), mSdfGrid(parent.mSdfGrid.get()) {} MaskKernel(const MaskKernel &parent) = default;// for tbb::parallel_for MaskKernel& operator=(const MaskKernel&) = delete; void run(const MaskTreeT &mask) { #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer; #endif auto &lsTree = mSdfGrid->tree(); static const SdfValueT Unknown = std::numeric_limits<SdfValueT>::max(); #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Changing background value"); #endif changeLevelSetBackground(lsTree, Unknown);//multi-threaded #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Union with mask");//multi-threaded #endif lsTree.topologyUnion(mask);//multi-threaded // ignore active tiles since the input grid is assumed to be a level set tree::LeafManager<const MaskTreeT> mgr(mask);// super fast #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Initializing grid and sweep mask"); #endif mParent->mSweepMask.clear(); mParent->mSweepMask.topologyUnion(mParent->mSdfGrid->constTree()); using LeafManagerT = tree::LeafManager<SweepMaskTreeT>; using LeafT = typename SweepMaskTreeT::LeafNodeType; LeafManagerT leafManager(mParent->mSweepMask); auto kernel = [&](LeafT& leaf, size_t /*leafIdx*/) { static const SdfValueT Unknown = std::numeric_limits<SdfValueT>::max(); SdfAccT acc(mSdfGrid->tree()); // The following hack is safe due to the topoloyUnion in // init and the fact that SdfValueT is known to be a floating point! SdfValueT *data = acc.probeLeaf(leaf.origin())->buffer().data(); for (auto voxelIter = leaf.beginValueOn(); voxelIter; ++voxelIter) {// mask voxels if (math::Abs( data[voxelIter.pos()] ) < Unknown ) { // disable boundary voxels from the mask tree voxelIter.setValue(false); } } }; leafManager.foreach( kernel ); // cache the leaf node origins for fast lookup in the sweeping kernels mParent->computeSweepMaskLeafOrigins(); #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif }// FastSweeping::MaskKernel::run // Private member data of MaskKernel FastSweeping *mParent; SdfGridT *mSdfGrid;//raw pointer, i.e. lock free };// FastSweeping::MaskKernel /// @brief Private class of FastSweeping to perform concurrent fast sweeping in two directions template <typename SdfGridT, typename ExtValueT> struct FastSweeping<SdfGridT, ExtValueT>::SweepingKernel { SweepingKernel(FastSweeping &parent) : mParent(&parent) {} SweepingKernel(const SweepingKernel&) = delete; SweepingKernel& operator=(const SweepingKernel&) = delete; /// Main method that performs concurrent bi-directional sweeps template<typename HashOp> void computeVoxelSlices(HashOp hash) { #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer; #endif // mask of the active voxels to be solved for, i.e. excluding boundary voxels const SweepMaskTreeT& maskTree = mParent->mSweepMask; using LeafManagerT = typename tree::LeafManager<const SweepMaskTreeT>; using LeafT = typename SweepMaskTreeT::LeafNodeType; LeafManagerT leafManager(maskTree); // compute the leaf node slices that have active voxels in them // the sliding window of the has keys is -14 to 21 (based on an 8x8x8 leaf node // and the extrema hash values i-j-k and i+j+k), but we use a larger mask window here to // easily accomodate any leaf dimension. The mask offset is used to be able to // store this in a fixed-size byte array constexpr int maskOffset = LeafT::DIM * 3; constexpr int maskRange = maskOffset * 2; // mark each possible slice in each leaf node that has one or more active voxels in it std::vector<int8_t> leafSliceMasks(leafManager.leafCount()*maskRange); auto kernel1 = [&](const LeafT& leaf, size_t leafIdx) { const size_t leafOffset = leafIdx * maskRange; for (auto voxelIter = leaf.cbeginValueOn(); voxelIter; ++voxelIter) { const Coord ijk = LeafT::offsetToLocalCoord(voxelIter.pos()); leafSliceMasks[leafOffset + hash(ijk) + maskOffset] = uint8_t(1); } }; leafManager.foreach( kernel1 ); // compute the voxel slice map using a thread-local-storage hash map // the key of the hash map is the slice index of the voxel coord (ijk.x() + ijk.y() + ijk.z()) // the values are an array of indices for every leaf that has active voxels with this slice index using ThreadLocalMap = std::unordered_map</*voxelSliceKey=*/int64_t, /*leafIdx=*/std::deque<size_t>>; tbb::enumerable_thread_specific<ThreadLocalMap> pool; auto kernel2 = [&](const LeafT& leaf, size_t leafIdx) { ThreadLocalMap& map = pool.local(); const Coord& origin = leaf.origin(); const int64_t leafKey = hash(origin); const size_t leafOffset = leafIdx * maskRange; for (int sliceIdx = 0; sliceIdx < maskRange; sliceIdx++) { if (leafSliceMasks[leafOffset + sliceIdx] == uint8_t(1)) { const int64_t voxelSliceKey = leafKey+sliceIdx-maskOffset; map[voxelSliceKey].emplace_back(leafIdx); } } }; leafManager.foreach( kernel2 ); // combine into a single ordered map keyed by the voxel slice key // note that this is now stored in a map ordered by voxel slice key, // so sweep slices can be processed in order for (auto poolIt = pool.begin(); poolIt != pool.end(); ++poolIt) { const ThreadLocalMap& map = *poolIt; for (const auto& it : map) { for (const size_t leafIdx : it.second) { mVoxelSliceMap[it.first].emplace_back(leafIdx, NodeMaskPtrT()); } } } // extract the voxel slice keys for random access into the map mVoxelSliceKeys.reserve(mVoxelSliceMap.size()); for (const auto& it : mVoxelSliceMap) { mVoxelSliceKeys.push_back(it.first); } // allocate the node masks in parallel, as the map is populated in serial auto kernel3 = [&](tbb::blocked_range<size_t>& range) { for (size_t i = range.begin(); i < range.end(); i++) { const int64_t key = mVoxelSliceKeys[i]; for (auto& it : mVoxelSliceMap[key]) { it.second = std::make_unique<NodeMaskT>(); } } }; tbb::parallel_for(tbb::blocked_range<size_t>(0, mVoxelSliceKeys.size()), kernel3); // each voxel slice contains a leafIdx-nodeMask pair, // this routine populates these node masks to select only the active voxels // from the mask tree that have the same voxel slice key // TODO: a small optimization here would be to union this leaf node mask with // a pre-computed one for this particular slice pattern auto kernel4 = [&](tbb::blocked_range<size_t>& range) { for (size_t i = range.begin(); i < range.end(); i++) { const int64_t voxelSliceKey = mVoxelSliceKeys[i]; LeafSliceArray& leafSliceArray = mVoxelSliceMap[voxelSliceKey]; for (LeafSlice& leafSlice : leafSliceArray) { const size_t leafIdx = leafSlice.first; NodeMaskPtrT& nodeMask = leafSlice.second; const LeafT& leaf = leafManager.leaf(leafIdx); const Coord& origin = leaf.origin(); const int64_t leafKey = hash(origin); for (auto voxelIter = leaf.cbeginValueOn(); voxelIter; ++voxelIter) { const Index voxelIdx = voxelIter.pos(); const Coord ijk = LeafT::offsetToLocalCoord(voxelIdx); const int64_t key = leafKey + hash(ijk); if (key == voxelSliceKey) { nodeMask->setOn(voxelIdx); } } } } }; tbb::parallel_for(tbb::blocked_range<size_t>(0, mVoxelSliceKeys.size()), kernel4); }// FastSweeping::SweepingKernel::computeVoxelSlices // Private struct for nearest neighbor grid points (very memory light!) struct NN { SdfValueT v; int n; inline static Coord ijk(const Coord &p, int i) { return p + FastSweeping::mOffset[i]; } NN() : v(), n() {} NN(const SdfAccT &a, const Coord &p, int i) : v(math::Abs(a.getValue(ijk(p,i)))), n(i) {} inline Coord operator()(const Coord &p) const { return ijk(p, n); } inline bool operator<(const NN &rhs) const { return v < rhs.v; } inline operator bool() const { return v < SdfValueT(1000); } };// NN void sweep() { typename ExtGridT::TreeType *tree2 = mParent->mExtGrid ? &mParent->mExtGrid->tree() : nullptr; const SdfValueT h = static_cast<SdfValueT>(mParent->mSdfGrid->voxelSize()[0]); const SdfValueT sqrt2h = math::Sqrt(SdfValueT(2))*h; const std::vector<Coord>& leafNodeOrigins = mParent->mSweepMaskLeafOrigins; int64_t voxelSliceIndex(0); auto kernel = [&](const tbb::blocked_range<size_t>& range) { using LeafT = typename SdfGridT::TreeType::LeafNodeType; SdfAccT acc1(mParent->mSdfGrid->tree()); auto acc2 = std::unique_ptr<ExtAccT>(tree2 ? new ExtAccT(*tree2) : nullptr); SdfValueT absV, sign, update, D; NN d1, d2, d3;//distance values and coordinates of closest neighbor points const LeafSliceArray& leafSliceArray = mVoxelSliceMap[voxelSliceIndex]; // Solves Goudonov's scheme: [x-d1]^2 + [x-d2]^2 + [x-d3]^2 = h^2 // where [X] = (X>0?X:0) and ai=min(di+1,di-1) for (size_t i = range.begin(); i < range.end(); ++i) { // iterate over all leafs in the slice and extract the leaf // and node mask for each slice pattern const LeafSlice& leafSlice = leafSliceArray[i]; const size_t leafIdx = leafSlice.first; const NodeMaskPtrT& nodeMask = leafSlice.second; const Coord& origin = leafNodeOrigins[leafIdx]; Coord ijk; for (auto indexIter = nodeMask->beginOn(); indexIter; ++indexIter) { // Get coordinate of center point of the FD stencil ijk = origin + LeafT::offsetToLocalCoord(indexIter.pos()); // Find the closes neighbors in the three axial directions d1 = std::min(NN(acc1, ijk, 0), NN(acc1, ijk, 1)); d2 = std::min(NN(acc1, ijk, 2), NN(acc1, ijk, 3)); d3 = std::min(NN(acc1, ijk, 4), NN(acc1, ijk, 5)); if (!(d1 || d2 || d3)) continue;//no valid neighbors // Get the center point of the FD stencil (assumed to be an active voxel) // Note this const_cast is normally unsafe but by design we know the tree // to be static, of floating-point type and containing active voxels only! SdfValueT &value = const_cast<SdfValueT&>(acc1.getValue(ijk)); // Extract the sign sign = value >= SdfValueT(0) ? SdfValueT(1) : SdfValueT(-1); // Absolute value absV = math::Abs(value); // sort values so d1 <= d2 <= d3 if (d2 < d1) std::swap(d1, d2); if (d3 < d2) std::swap(d2, d3); if (d2 < d1) std::swap(d1, d2); // Test if there is a solution depending on ONE of the neighboring voxels // if d2 - d1 >= h => d2 >= d1 + h then: // (x-d1)^2=h^2 => x = d1 + h update = d1.v + h; if (update <= d2.v) { if (update < absV) { value = sign * update; if (acc2) acc2->setValue(ijk, acc2->getValue(d1(ijk)));//update ext? }//update sdf? continue; }// one neighbor case // Test if there is a solution depending on TWO of the neighboring voxels // (x-d1)^2 + (x-d2)^2 = h^2 //D = SdfValueT(2) * h * h - math::Pow2(d1.v - d2.v);// = 2h^2-(d1-d2)^2 //if (D >= SdfValueT(0)) {// non-negative discriminant if (d2.v <= sqrt2h + d1.v) { D = SdfValueT(2) * h * h - math::Pow2(d1.v - d2.v);// = 2h^2-(d1-d2)^2 update = SdfValueT(0.5) * (d1.v + d2.v + std::sqrt(D)); if (update > d2.v && update <= d3.v) { if (update < absV) { value = sign * update; if (acc2) { d1.v -= update; d2.v -= update; // affine combination of two neighboring extension values const SdfValueT w = SdfValueT(1)/(d1.v+d2.v); acc2->setValue(ijk, w*(d1.v*acc2->getValue(d1(ijk)) + d2.v*acc2->getValue(d2(ijk)))); }//update ext? }//update sdf? continue; }//test for two neighbor case }//test for non-negative determinant // Test if there is a solution depending on THREE of the neighboring voxels // (x-d1)^2 + (x-d2)^2 + (x-d3)^2 = h^2 // 3x^2 - 2(d1 + d2 + d3)x + d1^2 + d2^2 + d3^2 = h^2 // ax^2 + bx + c=0, a=3, b=-2(d1+d2+d3), c=d1^2 + d2^2 + d3^2 - h^2 const SdfValueT d123 = d1.v + d2.v + d3.v; D = d123*d123 - SdfValueT(3)*(d1.v*d1.v + d2.v*d2.v + d3.v*d3.v - h * h); if (D >= SdfValueT(0)) {// non-negative discriminant update = SdfValueT(1.0/3.0) * (d123 + std::sqrt(D));//always passes test //if (update > d3.v) {//disabled due to round-off errors if (update < absV) { value = sign * update; if (acc2) { d1.v -= update; d2.v -= update; d3.v -= update; // affine combination of three neighboring extension values const SdfValueT w = SdfValueT(1)/(d1.v+d2.v+d3.v); acc2->setValue(ijk, w*(d1.v*acc2->getValue(d1(ijk)) + d2.v*acc2->getValue(d2(ijk)) + d3.v*acc2->getValue(d3(ijk)))); }//update ext? }//update sdf? }//test for non-negative determinant }//loop over coordinates } }; #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Forward sweep"); #endif for (size_t i = 0; i < mVoxelSliceKeys.size(); i++) { voxelSliceIndex = mVoxelSliceKeys[i]; tbb::parallel_for(tbb::blocked_range<size_t>(0, mVoxelSliceMap[voxelSliceIndex].size()), kernel); } #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Backward sweeps"); #endif for (size_t i = mVoxelSliceKeys.size(); i > 0; i--) { voxelSliceIndex = mVoxelSliceKeys[i-1]; tbb::parallel_for(tbb::blocked_range<size_t>(0, mVoxelSliceMap[voxelSliceIndex].size()), kernel); } #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif }// FastSweeping::SweepingKernel::sweep private: using NodeMaskT = typename SweepMaskTreeT::LeafNodeType::NodeMaskType; using NodeMaskPtrT = std::unique_ptr<NodeMaskT>; // using a unique ptr for the NodeMask allows for parallel allocation, // but makes this class not copy-constructible using LeafSlice = std::pair</*leafIdx=*/size_t, /*leafMask=*/NodeMaskPtrT>; using LeafSliceArray = std::deque<LeafSlice>; using VoxelSliceMap = std::map</*voxelSliceKey=*/int64_t, LeafSliceArray>; // Private member data of SweepingKernel FastSweeping *mParent; VoxelSliceMap mVoxelSliceMap; std::vector<int64_t> mVoxelSliceKeys; };// FastSweeping::SweepingKernel //////////////////////////////////////////////////////////////////////////////// template<typename GridT> typename GridT::Ptr fogToSdf(const GridT &fogGrid, typename GridT::ValueType isoValue, int nIter) { FastSweeping<GridT> fs; if (fs.initSdf(fogGrid, isoValue, /*isInputSdf*/false)) fs.sweep(nIter); return fs.sdfGrid(); } template<typename GridT> typename GridT::Ptr sdfToSdf(const GridT &sdfGrid, typename GridT::ValueType isoValue, int nIter) { FastSweeping<GridT> fs; if (fs.initSdf(sdfGrid, isoValue, /*isInputSdf*/true)) fs.sweep(nIter); return fs.sdfGrid(); } template<typename FogGridT, typename ExtOpT, typename ExtValueT> typename FogGridT::template ValueConverter<ExtValueT>::Type::Ptr fogToExt(const FogGridT &fogGrid, const ExtOpT &op, const ExtValueT& background, typename FogGridT::ValueType isoValue, int nIter) { FastSweeping<FogGridT, ExtValueT> fs; if (fs.initExt(fogGrid, op, background, isoValue, /*isInputSdf*/false)) fs.sweep(nIter); return fs.extGrid(); } template<typename SdfGridT, typename OpT, typename ExtValueT> typename SdfGridT::template ValueConverter<ExtValueT>::Type::Ptr sdfToExt(const SdfGridT &sdfGrid, const OpT &op, const ExtValueT &background, typename SdfGridT::ValueType isoValue, int nIter) { FastSweeping<SdfGridT> fs; if (fs.initExt(sdfGrid, op, background, isoValue, /*isInputSdf*/true)) fs.sweep(nIter); return fs.extGrid(); } template<typename FogGridT, typename ExtOpT, typename ExtValueT> std::pair<typename FogGridT::Ptr, typename FogGridT::template ValueConverter<ExtValueT>::Type::Ptr> fogToSdfAndExt(const FogGridT &fogGrid, const ExtOpT &op, const ExtValueT &background, typename FogGridT::ValueType isoValue, int nIter) { FastSweeping<FogGridT, ExtValueT> fs; if (fs.initExt(fogGrid, op, background, isoValue, /*isInputSdf*/false)) fs.sweep(nIter); return std::make_pair(fs.sdfGrid(), fs.extGrid()); } template<typename SdfGridT, typename ExtOpT, typename ExtValueT> std::pair<typename SdfGridT::Ptr, typename SdfGridT::template ValueConverter<ExtValueT>::Type::Ptr> sdfToSdfAndExt(const SdfGridT &sdfGrid, const ExtOpT &op, const ExtValueT &background, typename SdfGridT::ValueType isoValue, int nIter) { FastSweeping<SdfGridT, ExtValueT> fs; if (fs.initExt(sdfGrid, op, background, isoValue, /*isInputSdf*/true)) fs.sweep(nIter); return std::make_pair(fs.sdfGrid(), fs.extGrid()); } template<typename GridT> typename GridT::Ptr dilateSdf(const GridT &sdfGrid, int dilation, NearestNeighbors nn, int nIter) { FastSweeping<GridT> fs; if (fs.initDilate(sdfGrid, dilation, nn)) fs.sweep(nIter); return fs.sdfGrid(); } template<typename GridT, typename MaskTreeT> typename GridT::Ptr maskSdf(const GridT &sdfGrid, const Grid<MaskTreeT> &mask, bool ignoreActiveTiles, int nIter) { FastSweeping<GridT> fs; if (fs.initMask(sdfGrid, mask, ignoreActiveTiles)) fs.sweep(nIter); return fs.sdfGrid(); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_FASTSWEEPING_HAS_BEEN_INCLUDED
70,315
C
43.90166
153
0.613511
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/DenseSparseTools.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TOOLS_DENSESPARSETOOLS_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_DENSESPARSETOOLS_HAS_BEEN_INCLUDED #include <tbb/parallel_reduce.h> #include <tbb/blocked_range3d.h> #include <tbb/blocked_range2d.h> #include <tbb/blocked_range.h> #include <openvdb/Types.h> #include <openvdb/tree/LeafManager.h> #include "Dense.h" #include <algorithm> // for std::min() #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Selectively extract and transform data from a dense grid, producing a /// sparse tree with leaf nodes only (e.g. create a tree from the square /// of values greater than a cutoff.) /// @param dense A dense grid that acts as a data source /// @param functor A functor that selects and transforms data for output /// @param background The background value of the resulting sparse grid /// @param threaded Option to use threaded or serial code path /// @return @c Ptr to tree with the valuetype and configuration defined /// by typedefs in the @c functor. /// @note To achieve optimal sparsity consider calling the prune() /// method on the result. /// @note To simply copy the all the data from a Dense grid to a /// OpenVDB Grid, use tools::copyFromDense() for better performance. /// /// The type of the sparse tree is determined by the specified OtpType /// functor by means of the typedef OptType::ResultTreeType /// /// The OptType function is responsible for the the transformation of /// dense grid data to sparse grid data on a per-voxel basis. /// /// Only leaf nodes with active values will be added to the sparse grid. /// /// The OpType must struct that defines a the minimal form /// @code /// struct ExampleOp /// { /// using ResultTreeType = DesiredTreeType; /// /// template<typename IndexOrCoord> /// void OpType::operator() (const DenseValueType a, const IndexOrCoord& ijk, /// ResultTreeType::LeafNodeType* leaf); /// }; /// @endcode /// /// For example, to generate a <ValueType, 5, 4, 3> tree with valuesOn /// at locations greater than a given maskvalue /// @code /// template<typename ValueType> /// class Rule /// { /// public: /// // Standard tree type (e.g. MaskTree or FloatTree in openvdb.h) /// using ResultTreeType = typename openvdb::tree::Tree4<ValueType, 5, 4, 3>::Type; /// /// using ResultLeafNodeType = typename ResultTreeType::LeafNodeType; /// using ResultValueType = typename ResultTreeType::ValueType; /// /// using DenseValueType = float; /// /// using Index = vdbmath::Coord::ValueType; /// /// Rule(const DenseValueType& value): mMaskValue(value){}; /// /// template<typename IndexOrCoord> /// void operator()(const DenseValueType& a, const IndexOrCoord& offset, /// ResultLeafNodeType* leaf) const /// { /// if (a > mMaskValue) { /// leaf->setValueOn(offset, a); /// } /// } /// /// private: /// const DenseValueType mMaskValue; /// }; /// @endcode template<typename OpType, typename DenseType> typename OpType::ResultTreeType::Ptr extractSparseTree(const DenseType& dense, const OpType& functor, const typename OpType::ResultValueType& background, bool threaded = true); /// This struct that aids template resolution of a new tree type /// has the same configuration at TreeType, but the ValueType from /// DenseType. template<typename DenseType, typename TreeType> struct DSConverter { using ValueType = typename DenseType::ValueType; using Type = typename TreeType::template ValueConverter<ValueType>::Type; }; /// @brief Copy data from the intersection of a sparse tree and a dense input grid. /// The resulting tree has the same configuration as the sparse tree, but holds /// the data type specified by the dense input. /// @param dense A dense grid that acts as a data source /// @param mask The active voxels and tiles intersected with dense define iteration mask /// @param background The background value of the resulting sparse grid /// @param threaded Option to use threaded or serial code path /// @return @c Ptr to tree with the same configuration as @c mask but of value type /// defined by @c dense. template<typename DenseType, typename MaskTreeType> typename DSConverter<DenseType, MaskTreeType>::Type::Ptr extractSparseTreeWithMask(const DenseType& dense, const MaskTreeType& mask, const typename DenseType::ValueType& background, bool threaded = true); /// Apply a point-wise functor to the intersection of a dense grid and a given bounding box /// @param dense A dense grid to be transformed /// @param bbox Index space bounding box, define region where the transformation is applied /// @param op A functor that acts on the dense grid value type /// @param parallel Used to select multithreaded or single threaded /// Minimally, the @c op class has to support a @c operator() method, /// @code /// // Square values in a grid /// struct Op /// { /// ValueT operator()(const ValueT& in) const /// { /// // do work /// ValueT result = in * in; /// /// return result; /// } /// }; /// @endcode /// NB: only Dense grids with memory layout zxy are supported template<typename ValueT, typename OpType> void transformDense(Dense<ValueT, openvdb::tools::LayoutZYX>& dense, const openvdb::CoordBBox& bbox, const OpType& op, bool parallel=true); /// We currrently support the following operations when compositing sparse /// data into a dense grid. enum DSCompositeOp { DS_OVER, DS_ADD, DS_SUB, DS_MIN, DS_MAX, DS_MULT, DS_SET }; /// @brief Composite data from a sparse tree into a dense array of the same value type. /// @param dense Dense grid to be altered by the operation /// @param source Sparse data to composite into @c dense /// @param alpha Sparse Alpha mask used in compositing operations. /// @param beta Constant multiplier on src /// @param strength Constant multiplier on alpha /// @param threaded Enable threading for this operation. template<DSCompositeOp, typename TreeT> void compositeToDense(Dense<typename TreeT::ValueType, LayoutZYX>& dense, const TreeT& source, const TreeT& alpha, const typename TreeT::ValueType beta, const typename TreeT::ValueType strength, bool threaded = true); /// @brief Functor-based class used to extract data that satisfies some /// criteria defined by the embedded @c OpType functor. The @c extractSparseTree /// function wraps this class. template<typename OpType, typename DenseType> class SparseExtractor { public: using Index = openvdb::math::Coord::ValueType; using DenseValueType = typename DenseType::ValueType; using ResultTreeType = typename OpType::ResultTreeType; using ResultValueType = typename ResultTreeType::ValueType; using ResultLeafNodeType = typename ResultTreeType::LeafNodeType; using MaskTree = typename ResultTreeType::template ValueConverter<ValueMask>::Type; using Range3d = tbb::blocked_range3d<Index, Index, Index>; private: const DenseType& mDense; const OpType& mFunctor; const ResultValueType mBackground; const openvdb::math::CoordBBox mBBox; const Index mWidth; typename ResultTreeType::Ptr mMask; openvdb::math::Coord mMin; public: SparseExtractor(const DenseType& dense, const OpType& functor, const ResultValueType background) : mDense(dense), mFunctor(functor), mBackground(background), mBBox(dense.bbox()), mWidth(ResultLeafNodeType::DIM), mMask( new ResultTreeType(mBackground)) {} SparseExtractor(const DenseType& dense, const openvdb::math::CoordBBox& bbox, const OpType& functor, const ResultValueType background) : mDense(dense), mFunctor(functor), mBackground(background), mBBox(bbox), mWidth(ResultLeafNodeType::DIM), mMask( new ResultTreeType(mBackground)) { // mBBox must be inside the coordinate rage of the dense grid if (!dense.bbox().isInside(mBBox)) { OPENVDB_THROW(ValueError, "Data extraction window out of bound"); } } SparseExtractor(SparseExtractor& other, tbb::split): mDense(other.mDense), mFunctor(other.mFunctor), mBackground(other.mBackground), mBBox(other.mBBox), mWidth(other.mWidth), mMask(new ResultTreeType(mBackground)), mMin(other.mMin) {} typename ResultTreeType::Ptr extract(bool threaded = true) { // Construct 3D range of leaf nodes that // intersect mBBox. // Snap the bbox to nearest leaf nodes min and max openvdb::math::Coord padded_min = mBBox.min(); openvdb::math::Coord padded_max = mBBox.max(); padded_min &= ~(mWidth - 1); padded_max &= ~(mWidth - 1); padded_max[0] += mWidth - 1; padded_max[1] += mWidth - 1; padded_max[2] += mWidth - 1; // number of leaf nodes in each direction // division by leaf width, e.g. 8 in most cases const Index xleafCount = ( padded_max.x() - padded_min.x() + 1 ) / mWidth; const Index yleafCount = ( padded_max.y() - padded_min.y() + 1 ) / mWidth; const Index zleafCount = ( padded_max.z() - padded_min.z() + 1 ) / mWidth; mMin = padded_min; Range3d leafRange(0, xleafCount, 1, 0, yleafCount, 1, 0, zleafCount, 1); // Iterate over the leafnodes applying *this as a functor. if (threaded) { tbb::parallel_reduce(leafRange, *this); } else { (*this)(leafRange); } return mMask; } void operator()(const Range3d& range) { ResultLeafNodeType* leaf = nullptr; // Unpack the range3d item. const Index imin = range.pages().begin(); const Index imax = range.pages().end(); const Index jmin = range.rows().begin(); const Index jmax = range.rows().end(); const Index kmin = range.cols().begin(); const Index kmax = range.cols().end(); // loop over all the candidate leafs. Adding only those with 'true' values // to the tree for (Index i = imin; i < imax; ++i) { for (Index j = jmin; j < jmax; ++j) { for (Index k = kmin; k < kmax; ++k) { // Calculate the origin of candidate leaf const openvdb::math::Coord origin = mMin + openvdb::math::Coord(mWidth * i, mWidth * j, mWidth * k ); if (leaf == nullptr) { leaf = new ResultLeafNodeType(origin, mBackground); } else { leaf->setOrigin(origin); leaf->fill(mBackground); leaf->setValuesOff(); } // The bounding box for this leaf openvdb::math::CoordBBox localBBox = leaf->getNodeBoundingBox(); // Shrink to the intersection with mBBox (i.e. the dense // volume) localBBox.intersect(mBBox); // Early out for non-intersecting leafs if (localBBox.empty()) continue; const openvdb::math::Coord start = localBBox.getStart(); const openvdb::math::Coord end = localBBox.getEnd(); // Order the looping to respect the memory layout in // the Dense source if (mDense.memoryLayout() == openvdb::tools::LayoutZYX) { openvdb::math::Coord ijk; Index offset; const DenseValueType* dp; for (ijk[0] = start.x(); ijk[0] < end.x(); ++ijk[0] ) { for (ijk[1] = start.y(); ijk[1] < end.y(); ++ijk[1] ) { for (ijk[2] = start.z(), offset = ResultLeafNodeType::coordToOffset(ijk), dp = &mDense.getValue(ijk); ijk[2] < end.z(); ++ijk[2], ++offset, ++dp) { mFunctor(*dp, offset, leaf); } } } } else { openvdb::math::Coord ijk; const DenseValueType* dp; for (ijk[2] = start.z(); ijk[2] < end.z(); ++ijk[2]) { for (ijk[1] = start.y(); ijk[1] < end.y(); ++ijk[1]) { for (ijk[0] = start.x(), dp = &mDense.getValue(ijk); ijk[0] < end.x(); ++ijk[0], ++dp) { mFunctor(*dp, ijk, leaf); } } } } // Only add non-empty leafs (empty is defined as all inactive) if (!leaf->isEmpty()) { mMask->addLeaf(leaf); leaf = nullptr; } } } } // Clean up an unused leaf. if (leaf != nullptr) delete leaf; } void join(SparseExtractor& rhs) { mMask->merge(*rhs.mMask); } }; // class SparseExtractor template<typename OpType, typename DenseType> typename OpType::ResultTreeType::Ptr extractSparseTree(const DenseType& dense, const OpType& functor, const typename OpType::ResultValueType& background, bool threaded) { // Construct the mask using a parallel reduce pattern. // Each thread computes disjoint mask-trees. The join merges // into a single tree. SparseExtractor<OpType, DenseType> extractor(dense, functor, background); return extractor.extract(threaded); } /// @brief Functor-based class used to extract data from a dense grid, at /// the index-space intersection with a supplied mask in the form of a sparse tree. /// The @c extractSparseTreeWithMask function wraps this class. template<typename DenseType, typename MaskTreeType> class SparseMaskedExtractor { public: using _ResultTreeType = typename DSConverter<DenseType, MaskTreeType>::Type; using ResultTreeType = _ResultTreeType; using ResultLeafNodeType = typename ResultTreeType::LeafNodeType; using ResultValueType = typename ResultTreeType::ValueType; using DenseValueType = ResultValueType; using MaskTree = typename ResultTreeType::template ValueConverter<ValueMask>::Type; using MaskLeafCIter = typename MaskTree::LeafCIter; using MaskLeafVec = std::vector<const typename MaskTree::LeafNodeType*>; SparseMaskedExtractor(const DenseType& dense, const ResultValueType& background, const MaskLeafVec& leafVec ): mDense(dense), mBackground(background), mBBox(dense.bbox()), mLeafVec(leafVec), mResult(new ResultTreeType(mBackground)) {} SparseMaskedExtractor(const SparseMaskedExtractor& other, tbb::split): mDense(other.mDense), mBackground(other.mBackground), mBBox(other.mBBox), mLeafVec(other.mLeafVec), mResult( new ResultTreeType(mBackground)) {} typename ResultTreeType::Ptr extract(bool threaded = true) { tbb::blocked_range<size_t> range(0, mLeafVec.size()); if (threaded) { tbb::parallel_reduce(range, *this); } else { (*this)(range); } return mResult; } // Used in looping over leaf nodes in the masked grid // and using the active mask to select data to void operator()(const tbb::blocked_range<size_t>& range) { ResultLeafNodeType* leaf = nullptr; // loop over all the candidate leafs. Adding only those with 'true' values // to the tree for (size_t idx = range.begin(); idx < range.end(); ++ idx) { const typename MaskTree::LeafNodeType* maskLeaf = mLeafVec[idx]; // The bounding box for this leaf openvdb::math::CoordBBox localBBox = maskLeaf->getNodeBoundingBox(); // Shrink to the intersection with the dense volume localBBox.intersect(mBBox); // Early out if there was no intersection if (localBBox.empty()) continue; // Reset or allocate the target leaf if (leaf == nullptr) { leaf = new ResultLeafNodeType(maskLeaf->origin(), mBackground); } else { leaf->setOrigin(maskLeaf->origin()); leaf->fill(mBackground); leaf->setValuesOff(); } // Iterate over the intersecting bounding box // copying active values to the result tree const openvdb::math::Coord start = localBBox.getStart(); const openvdb::math::Coord end = localBBox.getEnd(); openvdb::math::Coord ijk; if (mDense.memoryLayout() == openvdb::tools::LayoutZYX && maskLeaf->isDense()) { Index offset; const DenseValueType* src; for (ijk[0] = start.x(); ijk[0] < end.x(); ++ijk[0] ) { for (ijk[1] = start.y(); ijk[1] < end.y(); ++ijk[1] ) { for (ijk[2] = start.z(), offset = ResultLeafNodeType::coordToOffset(ijk), src = &mDense.getValue(ijk); ijk[2] < end.z(); ++ijk[2], ++offset, ++src) { // copy into leaf leaf->setValueOn(offset, *src); } } } } else { Index offset; for (ijk[0] = start.x(); ijk[0] < end.x(); ++ijk[0] ) { for (ijk[1] = start.y(); ijk[1] < end.y(); ++ijk[1] ) { for (ijk[2] = start.z(), offset = ResultLeafNodeType::coordToOffset(ijk); ijk[2] < end.z(); ++ijk[2], ++offset) { if (maskLeaf->isValueOn(offset)) { const ResultValueType denseValue = mDense.getValue(ijk); leaf->setValueOn(offset, denseValue); } } } } } // Only add non-empty leafs (empty is defined as all inactive) if (!leaf->isEmpty()) { mResult->addLeaf(leaf); leaf = nullptr; } } // Clean up an unused leaf. if (leaf != nullptr) delete leaf; } void join(SparseMaskedExtractor& rhs) { mResult->merge(*rhs.mResult); } private: const DenseType& mDense; const ResultValueType mBackground; const openvdb::math::CoordBBox& mBBox; const MaskLeafVec& mLeafVec; typename ResultTreeType::Ptr mResult; }; // class SparseMaskedExtractor /// @brief a simple utility class used by @c extractSparseTreeWithMask template<typename _ResultTreeType, typename DenseValueType> struct ExtractAll { using ResultTreeType = _ResultTreeType; using ResultLeafNodeType = typename ResultTreeType::LeafNodeType; template<typename CoordOrIndex> inline void operator()(const DenseValueType& a, const CoordOrIndex& offset, ResultLeafNodeType* leaf) const { leaf->setValueOn(offset, a); } }; template<typename DenseType, typename MaskTreeType> typename DSConverter<DenseType, MaskTreeType>::Type::Ptr extractSparseTreeWithMask(const DenseType& dense, const MaskTreeType& maskProxy, const typename DenseType::ValueType& background, bool threaded) { using LeafExtractor = SparseMaskedExtractor<DenseType, MaskTreeType>; using DenseValueType = typename LeafExtractor::DenseValueType; using ResultTreeType = typename LeafExtractor::ResultTreeType; using MaskLeafVec = typename LeafExtractor::MaskLeafVec; using MaskTree = typename LeafExtractor::MaskTree; using MaskLeafCIter = typename LeafExtractor::MaskLeafCIter; using ExtractionRule = ExtractAll<ResultTreeType, DenseValueType>; // Use Mask tree to hold the topology MaskTree maskTree(maskProxy, false, TopologyCopy()); // Construct an array of pointers to the mask leafs. const size_t leafCount = maskTree.leafCount(); MaskLeafVec leafarray(leafCount); MaskLeafCIter leafiter = maskTree.cbeginLeaf(); for (size_t n = 0; n != leafCount; ++n, ++leafiter) { leafarray[n] = leafiter.getLeaf(); } // Extract the data that is masked leaf nodes in the mask. LeafExtractor leafextractor(dense, background, leafarray); typename ResultTreeType::Ptr resultTree = leafextractor.extract(threaded); // Extract data that is masked by tiles in the mask. // Loop over the mask tiles, extracting the data into new trees. // These trees will be leaf-orthogonal to the leafTree (i.e. no leaf // nodes will overlap). Merge these trees into the result. typename MaskTreeType::ValueOnCIter tileIter(maskProxy); tileIter.setMaxDepth(MaskTreeType::ValueOnCIter::LEAF_DEPTH - 1); // Return the leaf tree if the mask had no tiles if (!tileIter) return resultTree; ExtractionRule allrule; // Loop over the tiles in series, but the actual data extraction // is in parallel. CoordBBox bbox; for ( ; tileIter; ++tileIter) { // Find the intersection of the tile with the dense grid. tileIter.getBoundingBox(bbox); bbox.intersect(dense.bbox()); if (bbox.empty()) continue; SparseExtractor<ExtractionRule, DenseType> copyData(dense, bbox, allrule, background); typename ResultTreeType::Ptr fromTileTree = copyData.extract(threaded); resultTree->merge(*fromTileTree); } return resultTree; } /// @brief Class that applies a functor to the index space intersection /// of a prescribed bounding box and the dense grid. /// NB: This class only supports DenseGrids with ZYX memory layout. template<typename _ValueT, typename OpType> class DenseTransformer { public: using ValueT = _ValueT; using DenseT = Dense<ValueT, openvdb::tools::LayoutZYX>; using IntType = openvdb::math::Coord::ValueType; using RangeType = tbb::blocked_range2d<IntType, IntType>; private: DenseT& mDense; const OpType& mOp; openvdb::math::CoordBBox mBBox; public: DenseTransformer(DenseT& dense, const openvdb::math::CoordBBox& bbox, const OpType& functor): mDense(dense), mOp(functor), mBBox(dense.bbox()) { // The iteration space is the intersection of the // input bbox and the index-space covered by the dense grid mBBox.intersect(bbox); } DenseTransformer(const DenseTransformer& other) : mDense(other.mDense), mOp(other.mOp), mBBox(other.mBBox) {} void apply(bool threaded = true) { // Early out if the iteration space is empty if (mBBox.empty()) return; const openvdb::math::Coord start = mBBox.getStart(); const openvdb::math::Coord end = mBBox.getEnd(); // The iteration range only the slower two directions. const RangeType range(start.x(), end.x(), 1, start.y(), end.y(), 1); if (threaded) { tbb::parallel_for(range, *this); } else { (*this)(range); } } void operator()(const RangeType& range) const { // The stride in the z-direction. // Note: the bbox is [inclusive, inclusive] const size_t zlength = size_t(mBBox.max().z() - mBBox.min().z() + 1); const IntType imin = range.rows().begin(); const IntType imax = range.rows().end(); const IntType jmin = range.cols().begin(); const IntType jmax = range.cols().end(); openvdb::math::Coord xyz(imin, jmin, mBBox.min().z()); for (xyz[0] = imin; xyz[0] != imax; ++xyz[0]) { for (xyz[1] = jmin; xyz[1] != jmax; ++xyz[1]) { mOp.transform(mDense, xyz, zlength); } } } }; // class DenseTransformer /// @brief a wrapper struct used to avoid unnecessary computation of /// memory access from @c Coord when all offsets are guaranteed to be /// within the dense grid. template<typename ValueT, typename PointWiseOp> struct ContiguousOp { ContiguousOp(const PointWiseOp& op) : mOp(op){} using DenseT = Dense<ValueT, openvdb::tools::LayoutZYX>; inline void transform(DenseT& dense, openvdb::math::Coord& ijk, size_t size) const { ValueT* dp = const_cast<ValueT*>(&dense.getValue(ijk)); for (size_t offset = 0; offset < size; ++offset) { dp[offset] = mOp(dp[offset]); } } const PointWiseOp mOp; }; /// Apply a point-wise functor to the intersection of a dense grid and a given bounding box template<typename ValueT, typename PointwiseOpT> void transformDense(Dense<ValueT, openvdb::tools::LayoutZYX>& dense, const openvdb::CoordBBox& bbox, const PointwiseOpT& functor, bool parallel) { using OpT = ContiguousOp<ValueT, PointwiseOpT>; // Convert the Op so it operates on a contiguous line in memory OpT op(functor); // Apply to the index space intersection in the dense grid DenseTransformer<ValueT, OpT> transformer(dense, bbox, op); transformer.apply(parallel); } template<typename CompositeMethod, typename _TreeT> class SparseToDenseCompositor { public: using TreeT = _TreeT; using ValueT = typename TreeT::ValueType; using LeafT = typename TreeT::LeafNodeType; using MaskTreeT = typename TreeT::template ValueConverter<ValueMask>::Type; using MaskLeafT = typename MaskTreeT::LeafNodeType; using DenseT = Dense<ValueT, openvdb::tools::LayoutZYX>; using Index = openvdb::math::Coord::ValueType; using Range3d = tbb::blocked_range3d<Index, Index, Index>; SparseToDenseCompositor(DenseT& dense, const TreeT& source, const TreeT& alpha, const ValueT beta, const ValueT strength) : mDense(dense), mSource(source), mAlpha(alpha), mBeta(beta), mStrength(strength) {} SparseToDenseCompositor(const SparseToDenseCompositor& other): mDense(other.mDense), mSource(other.mSource), mAlpha(other.mAlpha), mBeta(other.mBeta), mStrength(other.mStrength) {} void sparseComposite(bool threaded) { const ValueT beta = mBeta; const ValueT strength = mStrength; // construct a tree that defines the iteration space MaskTreeT maskTree(mSource, false /*background*/, openvdb::TopologyCopy()); maskTree.topologyUnion(mAlpha); // Composite regions that are represented by leafnodes in either mAlpha or mSource // Parallelize over bool-leafs openvdb::tree::LeafManager<const MaskTreeT> maskLeafs(maskTree); maskLeafs.foreach(*this, threaded); // Composite regions that are represented by tiles // Parallelize within each tile. typename MaskTreeT::ValueOnCIter citer = maskTree.cbeginValueOn(); citer.setMaxDepth(MaskTreeT::ValueOnCIter::LEAF_DEPTH - 1); if (!citer) return; typename tree::ValueAccessor<const TreeT> alphaAccessor(mAlpha); typename tree::ValueAccessor<const TreeT> sourceAccessor(mSource); for (; citer; ++citer) { const openvdb::math::Coord org = citer.getCoord(); // Early out if both alpha and source are zero in this tile. const ValueT alphaValue = alphaAccessor.getValue(org); const ValueT sourceValue = sourceAccessor.getValue(org); if (openvdb::math::isZero(alphaValue) && openvdb::math::isZero(sourceValue)) continue; // Compute overlap of tile with the dense grid openvdb::math::CoordBBox localBBox = citer.getBoundingBox(); localBBox.intersect(mDense.bbox()); // Early out if there is no intersection if (localBBox.empty()) continue; // Composite the tile-uniform values into the dense grid. compositeFromTile(mDense, localBBox, sourceValue, alphaValue, beta, strength, threaded); } } // Composites leaf values where the alpha values are active. // Used in sparseComposite void inline operator()(const MaskLeafT& maskLeaf, size_t /*i*/) const { using ULeaf = UniformLeaf; openvdb::math::CoordBBox localBBox = maskLeaf.getNodeBoundingBox(); localBBox.intersect(mDense.bbox()); // Early out for non-overlapping leafs if (localBBox.empty()) return; const openvdb::math::Coord org = maskLeaf.origin(); const LeafT* alphaLeaf = mAlpha.probeLeaf(org); const LeafT* sourceLeaf = mSource.probeLeaf(org); if (!sourceLeaf) { // Create a source leaf proxy with the correct value ULeaf uniformSource(mSource.getValue(org)); if (!alphaLeaf) { // Create an alpha leaf proxy with the correct value ULeaf uniformAlpha(mAlpha.getValue(org)); compositeFromLeaf(mDense, localBBox, uniformSource, uniformAlpha, mBeta, mStrength); } else { compositeFromLeaf(mDense, localBBox, uniformSource, *alphaLeaf, mBeta, mStrength); } } else { if (!alphaLeaf) { // Create an alpha leaf proxy with the correct value ULeaf uniformAlpha(mAlpha.getValue(org)); compositeFromLeaf(mDense, localBBox, *sourceLeaf, uniformAlpha, mBeta, mStrength); } else { compositeFromLeaf(mDense, localBBox, *sourceLeaf, *alphaLeaf, mBeta, mStrength); } } } // i.e. it assumes that all valueOff Alpha voxels have value 0. template<typename LeafT1, typename LeafT2> inline static void compositeFromLeaf(DenseT& dense, const openvdb::math::CoordBBox& bbox, const LeafT1& source, const LeafT2& alpha, const ValueT beta, const ValueT strength) { using IntType = openvdb::math::Coord::ValueType; const ValueT sbeta = strength * beta; openvdb::math::Coord ijk = bbox.min(); if (alpha.isDense() /*all active values*/) { // Optimal path for dense alphaLeaf const IntType size = bbox.max().z() + 1 - bbox.min().z(); for (ijk[0] = bbox.min().x(); ijk[0] < bbox.max().x() + 1; ++ijk[0]) { for (ijk[1] = bbox.min().y(); ijk[1] < bbox.max().y() + 1; ++ijk[1]) { ValueT* d = const_cast<ValueT*>(&dense.getValue(ijk)); const ValueT* a = &alpha.getValue(ijk); const ValueT* s = &source.getValue(ijk); for (IntType idx = 0; idx < size; ++idx) { d[idx] = CompositeMethod::apply(d[idx], a[idx], s[idx], strength, beta, sbeta); } } } } else { // AlphaLeaf has non-active cells. for (ijk[0] = bbox.min().x(); ijk[0] < bbox.max().x() + 1; ++ijk[0]) { for (ijk[1] = bbox.min().y(); ijk[1] < bbox.max().y() + 1; ++ijk[1]) { for (ijk[2] = bbox.min().z(); ijk[2] < bbox.max().z() + 1; ++ijk[2]) { if (alpha.isValueOn(ijk)) { dense.setValue(ijk, CompositeMethod::apply(dense.getValue(ijk), alpha.getValue(ijk), source.getValue(ijk), strength, beta, sbeta)); } } } } } } inline static void compositeFromTile(DenseT& dense, openvdb::math::CoordBBox& bbox, const ValueT& sourceValue, const ValueT& alphaValue, const ValueT& beta, const ValueT& strength, bool threaded) { using TileTransformer = UniformTransformer; TileTransformer functor(sourceValue, alphaValue, beta, strength); // Transform the data inside the bbox according to the TileTranformer. transformDense(dense, bbox, functor, threaded); } void denseComposite(bool threaded) { /// Construct a range that corresponds to the /// bounding box of the dense volume const openvdb::math::CoordBBox& bbox = mDense.bbox(); Range3d range(bbox.min().x(), bbox.max().x(), LeafT::DIM, bbox.min().y(), bbox.max().y(), LeafT::DIM, bbox.min().z(), bbox.max().z(), LeafT::DIM); // Iterate over the range, compositing into // the dense grid using value accessors for // sparse the grids. if (threaded) { tbb::parallel_for(range, *this); } else { (*this)(range); } } // Composites a dense region using value accessors // into a dense grid void operator()(const Range3d& range) const { // Use value accessors to alpha and source typename tree::ValueAccessor<const TreeT> alphaAccessor(mAlpha); typename tree::ValueAccessor<const TreeT> sourceAccessor(mSource); const ValueT strength = mStrength; const ValueT beta = mBeta; const ValueT sbeta = strength * beta; // Unpack the range3d item. const Index imin = range.pages().begin(); const Index imax = range.pages().end(); const Index jmin = range.rows().begin(); const Index jmax = range.rows().end(); const Index kmin = range.cols().begin(); const Index kmax = range.cols().end(); openvdb::Coord ijk; for (ijk[0] = imin; ijk[0] < imax; ++ijk[0]) { for (ijk[1] = jmin; ijk[1] < jmax; ++ijk[1]) { for (ijk[2] = kmin; ijk[2] < kmax; ++ijk[2]) { const ValueT d_old = mDense.getValue(ijk); const ValueT& alpha = alphaAccessor.getValue(ijk); const ValueT& src = sourceAccessor.getValue(ijk); mDense.setValue(ijk, CompositeMethod::apply(d_old, alpha, src, strength, beta, sbeta)); } } } } private: // Internal class that wraps the templated composite method // for use when both alpha and source are uniform over // a prescribed bbox (e.g. a tile). class UniformTransformer { public: UniformTransformer(const ValueT& source, const ValueT& alpha, const ValueT& _beta, const ValueT& _strength) : mSource(source), mAlpha(alpha), mBeta(_beta), mStrength(_strength), mSBeta(_strength * _beta) {} ValueT operator()(const ValueT& input) const { return CompositeMethod::apply(input, mAlpha, mSource, mStrength, mBeta, mSBeta); } private: const ValueT mSource; const ValueT mAlpha; const ValueT mBeta; const ValueT mStrength; const ValueT mSBeta; }; // Simple Class structure that mimics a leaf // with uniform values. Holds LeafT::DIM copies // of a value in an array. struct Line { ValueT mValues[LeafT::DIM]; }; class UniformLeaf : private Line { public: using ValueT = typename LeafT::ValueType; using BaseT = Line; UniformLeaf(const ValueT& value) : BaseT(init(value)) {} static const BaseT init(const ValueT& value) { BaseT tmp; for (openvdb::Index i = 0; i < LeafT::DIM; ++i) { tmp.mValues[i] = value; } return tmp; } bool isDense() const { return true; } bool isValueOn(openvdb::math::Coord&) const { return true; } const ValueT& getValue(const openvdb::math::Coord&) const { return BaseT::mValues[0]; } }; private: DenseT& mDense; const TreeT& mSource; const TreeT& mAlpha; ValueT mBeta; ValueT mStrength; }; // class SparseToDenseCompositor namespace ds { //@{ /// @brief Point wise methods used to apply various compositing operations. template<typename ValueT> struct OpOver { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT strength, const ValueT beta, const ValueT /*sbeta*/) { return (u + strength * alpha * (beta * v - u)); } }; template<typename ValueT> struct OpAdd { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT /*strength*/, const ValueT /*beta*/, const ValueT sbeta) { return (u + sbeta * alpha * v); } }; template<typename ValueT> struct OpSub { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT /*strength*/, const ValueT /*beta*/, const ValueT sbeta) { return (u - sbeta * alpha * v); } }; template<typename ValueT> struct OpMin { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT s /*trength*/, const ValueT beta, const ValueT /*sbeta*/) { return ( ( 1 - s * alpha) * u + s * alpha * std::min(u, beta * v) ); } }; template<typename ValueT> struct OpMax { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT s/*trength*/, const ValueT beta, const ValueT /*sbeta*/) { return ( ( 1 - s * alpha ) * u + s * alpha * std::min(u, beta * v) ); } }; template<typename ValueT> struct OpMult { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT s/*trength*/, const ValueT /*beta*/, const ValueT sbeta) { return ( ( 1 + alpha * (sbeta * v - s)) * u ); } }; //@} //@{ /// Translator that converts an enum to compositing functor types template<DSCompositeOp OP, typename ValueT> struct CompositeFunctorTranslator{}; template<typename ValueT> struct CompositeFunctorTranslator<DS_OVER, ValueT>{ using OpT = OpOver<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_ADD, ValueT>{ using OpT = OpAdd<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_SUB, ValueT>{ using OpT = OpSub<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_MIN, ValueT>{ using OpT = OpMin<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_MAX, ValueT>{ using OpT = OpMax<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_MULT, ValueT>{ using OpT = OpMult<ValueT>; }; //@} } // namespace ds template<DSCompositeOp OpT, typename TreeT> inline void compositeToDense( Dense<typename TreeT::ValueType, LayoutZYX>& dense, const TreeT& source, const TreeT& alpha, const typename TreeT::ValueType beta, const typename TreeT::ValueType strength, bool threaded) { using ValueT = typename TreeT::ValueType; using Translator = ds::CompositeFunctorTranslator<OpT, ValueT>; using Method = typename Translator::OpT; if (openvdb::math::isZero(strength)) return; SparseToDenseCompositor<Method, TreeT> tool(dense, source, alpha, beta, strength); if (openvdb::math::isZero(alpha.background()) && openvdb::math::isZero(source.background())) { // Use the sparsity of (alpha U source) as the iteration space. tool.sparseComposite(threaded); } else { // Use the bounding box of dense as the iteration space. tool.denseComposite(threaded); } } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif //OPENVDB_TOOLS_DENSESPARSETOOLS_HAS_BEEN_INCLUDED
42,215
C
34.386421
99
0.580813
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetSphere.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// /// @file LevelSetSphere.h /// /// @brief Generate a narrow-band level set of sphere. /// /// @note By definition a level set has a fixed narrow band width /// (the half width is defined by LEVEL_SET_HALF_WIDTH in Types.h), /// whereas an SDF can have a variable narrow band width. #ifndef OPENVDB_TOOLS_LEVELSETSPHERE_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVELSETSPHERE_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/math/Math.h> #include <openvdb/util/NullInterrupter.h> #include "SignedFloodFill.h" #include <type_traits> #include <tbb/enumerable_thread_specific.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/blocked_range.h> #include <thread> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a sphere. /// /// @param radius radius of the sphere in world units /// @param center center of the sphere in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// @param threaded if true multi-threading is enabled (true by default) /// /// @note @c GridType::ValueType must be a floating-point scalar. /// @note The leapfrog algorithm employed in this method is best suited /// for a single large sphere. For multiple small spheres consider /// using the faster algorithm in ParticlesToLevelSet.h template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetSphere(float radius, const openvdb::Vec3f& center, float voxelSize, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr, bool threaded = true); /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a sphere. /// /// @param radius radius of the sphere in world units /// @param center center of the sphere in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param threaded if true multi-threading is enabled (true by default) /// /// @note @c GridType::ValueType must be a floating-point scalar. /// @note The leapfrog algorithm employed in this method is best suited /// for a single large sphere. For multiple small spheres consider /// using the faster algorithm in ParticlesToLevelSet.h template<typename GridType> typename GridType::Ptr createLevelSetSphere(float radius, const openvdb::Vec3f& center, float voxelSize, float halfWidth = float(LEVEL_SET_HALF_WIDTH), bool threaded = true) { return createLevelSetSphere<GridType, util::NullInterrupter>(radius,center,voxelSize,halfWidth,nullptr,threaded); } //////////////////////////////////////// /// @brief Generates a signed distance field (or narrow band level /// set) to a single sphere. /// /// @note The leapfrog algorithm employed in this class is best /// suited for a single large sphere. For multiple small spheres consider /// using the faster algorithm in tools/ParticlesToLevelSet.h template<typename GridT, typename InterruptT = util::NullInterrupter> class LevelSetSphere { public: using TreeT = typename GridT::TreeType; using ValueT = typename GridT::ValueType; using Vec3T = typename math::Vec3<ValueT>; static_assert(std::is_floating_point<ValueT>::value, "level set grids must have scalar, floating-point value types"); /// @brief Constructor /// /// @param radius radius of the sphere in world units /// @param center center of the sphere in world units /// @param interrupt pointer to optional interrupter. Use template /// argument util::NullInterrupter if no interruption is desired. /// /// @note If the radius of the sphere is smaller than /// 1.5*voxelSize, i.e. the sphere is smaller than the Nyquist /// frequency of the grid, it is ignored! LevelSetSphere(ValueT radius, const Vec3T &center, InterruptT* interrupt = nullptr) : mRadius(radius), mCenter(center), mInterrupt(interrupt) { if (mRadius<=0) OPENVDB_THROW(ValueError, "radius must be positive"); } /// @return a narrow-band level set of the sphere /// /// @param voxelSize Size of voxels in world units /// @param halfWidth Half-width of narrow-band in voxel units /// @param threaded If true multi-threading is enabled (true by default) typename GridT::Ptr getLevelSet(ValueT voxelSize, ValueT halfWidth, bool threaded = true) { mGrid = createLevelSet<GridT>(voxelSize, halfWidth); this->rasterSphere(voxelSize, halfWidth, threaded); mGrid->setGridClass(GRID_LEVEL_SET); return mGrid; } private: void rasterSphere(ValueT dx, ValueT w, bool threaded) { if (!(dx>0.0f)) OPENVDB_THROW(ValueError, "voxel size must be positive"); if (!(w>1)) OPENVDB_THROW(ValueError, "half-width must be larger than one"); // Define radius of sphere and narrow-band in voxel units const ValueT r0 = mRadius/dx, rmax = r0 + w; // Radius below the Nyquist frequency if (r0 < 1.5f) return; // Define center of sphere in voxel units const Vec3T c(mCenter[0]/dx, mCenter[1]/dx, mCenter[2]/dx); // Define bounds of the voxel coordinates const int imin=math::Floor(c[0]-rmax), imax=math::Ceil(c[0]+rmax); const int jmin=math::Floor(c[1]-rmax), jmax=math::Ceil(c[1]+rmax); const int kmin=math::Floor(c[2]-rmax), kmax=math::Ceil(c[2]+rmax); // Allocate a ValueAccessor for accelerated random access typename GridT::Accessor accessor = mGrid->getAccessor(); if (mInterrupt) mInterrupt->start("Generating level set of sphere"); tbb::enumerable_thread_specific<TreeT> pool(mGrid->tree()); auto kernel = [&](const tbb::blocked_range<int>& r) { openvdb::Coord ijk; int &i = ijk[0], &j = ijk[1], &k = ijk[2], m=1; TreeT &tree = pool.local(); typename GridT::Accessor acc(tree); // Compute signed distances to sphere using leapfrogging in k for (i = r.begin(); i <= r.end(); ++i) { if (util::wasInterrupted(mInterrupt)) return; const auto x2 = math::Pow2(ValueT(i) - c[0]); for (j = jmin; j <= jmax; ++j) { const auto x2y2 = math::Pow2(ValueT(j) - c[1]) + x2; for (k = kmin; k <= kmax; k += m) { m = 1; // Distance in voxel units to sphere const auto v = math::Sqrt(x2y2 + math::Pow2(ValueT(k)-c[2]))-r0; const auto d = math::Abs(v); if (d < w) { // inside narrow band acc.setValue(ijk, dx*v);// distance in world units } else { // outside narrow band m += math::Floor(d-w);// leapfrog } }//end leapfrog over k }//end loop over j }//end loop over i };// kernel if (threaded) { // The code blow is making use of a TLS container to minimize the number of concurrent trees // initially populated by tbb::parallel_for and subsequently merged by tbb::parallel_reduce. // Experiments have demonstrated this approach to outperform others, including serial reduction // and a custom concurrent reduction implementation. tbb::parallel_for(tbb::blocked_range<int>(imin, imax, 128), kernel); using RangeT = tbb::blocked_range<typename tbb::enumerable_thread_specific<TreeT>::iterator>; struct Op { const bool mDelete; TreeT *mTree; Op(TreeT &tree) : mDelete(false), mTree(&tree) {} Op(const Op& other, tbb::split) : mDelete(true), mTree(new TreeT(other.mTree->background())) {} ~Op() { if (mDelete) delete mTree; } void operator()(RangeT &r) { for (auto i=r.begin(); i!=r.end(); ++i) this->merge(*i);} void join(Op &other) { this->merge(*(other.mTree)); } void merge(TreeT &tree) { mTree->merge(tree, openvdb::MERGE_ACTIVE_STATES); } } op( mGrid->tree() ); tbb::parallel_reduce(RangeT(pool.begin(), pool.end(), 4), op); } else { kernel(tbb::blocked_range<int>(imin, imax));//serial mGrid->tree().merge(*pool.begin(), openvdb::MERGE_ACTIVE_STATES); } // Define consistent signed distances outside the narrow-band tools::signedFloodFill(mGrid->tree(), threaded); if (mInterrupt) mInterrupt->end(); } const ValueT mRadius; const Vec3T mCenter; InterruptT* mInterrupt; typename GridT::Ptr mGrid; };// LevelSetSphere //////////////////////////////////////// template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetSphere(float radius, const openvdb::Vec3f& center, float voxelSize, float halfWidth, InterruptT* interrupt, bool threaded) { // GridType::ValueType is required to be a floating-point scalar. static_assert(std::is_floating_point<typename GridType::ValueType>::value, "level set grids must have scalar, floating-point value types"); using ValueT = typename GridType::ValueType; LevelSetSphere<GridType, InterruptT> factory(ValueT(radius), center, interrupt); return factory.getLevelSet(ValueT(voxelSize), ValueT(halfWidth), threaded); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVELSETSPHERE_HAS_BEEN_INCLUDED
10,109
C
42.205128
117
0.640716
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointPartitioner.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file PointPartitioner.h /// /// @brief Spatially partitions points using a parallel radix-based /// sorting algorithm. /// /// @details Performs a stable deterministic sort; partitioning the same /// point sequence will produce the same result each time. /// @details The algorithm is unbounded meaning that points may be /// distributed anywhere in index space. /// @details The actual points are never stored in the tool, only /// offsets into an external array. /// /// @author Mihai Alden #ifndef OPENVDB_TOOLS_POINT_PARTITIONER_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_POINT_PARTITIONER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/task_scheduler_init.h> #include <algorithm> #include <cmath> // for std::isfinite() #include <deque> #include <map> #include <set> #include <utility> // std::pair #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { //////////////////////////////////////// /// @brief Partitions points into @c BucketLog2Dim aligned buckets /// using a parallel radix-based sorting algorithm. /// /// @interface PointArray /// Expected interface for the PointArray container: /// @code /// template<typename VectorType> /// struct PointArray /// { /// // The type used to represent world-space point positions /// using PosType = VectorType; /// /// // Return the number of points in the array /// size_t size() const; /// /// // Return the world-space position of the nth point in the array. /// void getPos(size_t n, PosType& xyz) const; /// }; /// @endcode /// /// @details Performs a stable deterministic sort; partitioning the same /// point sequence will produce the same result each time. /// @details The algorithm is unbounded meaning that points may be /// distributed anywhere in index space. /// @details The actual points are never stored in the tool, only /// offsets into an external array. /// @details @c BucketLog2Dim defines the bucket coordinate dimensions, /// i.e. BucketLog2Dim = 3 corresponds to a bucket that spans /// a (2^3)^3 = 8^3 voxel region. template<typename PointIndexType = uint32_t, Index BucketLog2Dim = 3> class PointPartitioner { public: enum { LOG2DIM = BucketLog2Dim }; using Ptr = SharedPtr<PointPartitioner>; using ConstPtr = SharedPtr<const PointPartitioner>; using IndexType = PointIndexType; static constexpr Index bits = 1 + (3 * BucketLog2Dim); // signed, so if bits is exactly 16, int32 is required using VoxelOffsetType = typename std::conditional<(bits < 16), int16_t, typename std::conditional<(bits < 32), int32_t, int64_t>::type>::type; using VoxelOffsetArray = std::unique_ptr<VoxelOffsetType[]>; class IndexIterator; ////////// PointPartitioner(); /// @brief Partitions point indices into @c BucketLog2Dim aligned buckets. /// /// @param points list of world space points. /// @param xform world to index space transform. /// @param voxelOrder sort point indices by local voxel offsets. /// @param recordVoxelOffsets construct local voxel offsets /// @param cellCenteredTransform toggle the cell-centered interpretation that imagines world /// space as divided into discrete cells (e.g., cubes) centered /// on the image of the index-space lattice points. template<typename PointArray> void construct(const PointArray& points, const math::Transform& xform, bool voxelOrder = false, bool recordVoxelOffsets = false, bool cellCenteredTransform = true); /// @brief Partitions point indices into @c BucketLog2Dim aligned buckets. /// /// @param points list of world space points. /// @param xform world to index space transform. /// @param voxelOrder sort point indices by local voxel offsets. /// @param recordVoxelOffsets construct local voxel offsets /// @param cellCenteredTransform toggle the cell-centered interpretation that imagines world /// space as divided into discrete cells (e.g., cubes) centered /// on the image of the index-space lattice points. template<typename PointArray> static Ptr create(const PointArray& points, const math::Transform& xform, bool voxelOrder = false, bool recordVoxelOffsets = false, bool cellCenteredTransform = true); /// @brief Returns the number of buckets. size_t size() const { return mPageCount; } /// @brief true if the container size is 0, false otherwise. bool empty() const { return mPageCount == 0; } /// @brief Removes all data and frees up memory. void clear(); /// @brief Exchanges the content of the container by another. void swap(PointPartitioner&); /// @brief Returns the point indices for bucket @a n IndexIterator indices(size_t n) const; /// @brief Returns the coordinate-aligned bounding box for bucket @a n CoordBBox getBBox(size_t n) const { return CoordBBox::createCube(mPageCoordinates[n], (1u << BucketLog2Dim)); } /// @brief Returns the origin coordinate for bucket @a n const Coord& origin(size_t n) const { return mPageCoordinates[n]; } /// @brief Returns a list of @c LeafNode voxel offsets for the points. /// @note The list is optionally constructed. const VoxelOffsetArray& voxelOffsets() const { return mVoxelOffsets; } /// @brief Returns @c true if this point partitioning was constructed /// using a cell-centered transform. /// @note Cell-centered interpretation is the default behavior. bool usingCellCenteredTransform() const { return mUsingCellCenteredTransform; } private: // Disallow copying PointPartitioner(const PointPartitioner&); PointPartitioner& operator=(const PointPartitioner&); std::unique_ptr<IndexType[]> mPointIndices; VoxelOffsetArray mVoxelOffsets; std::unique_ptr<IndexType[]> mPageOffsets; std::unique_ptr<Coord[]> mPageCoordinates; IndexType mPageCount; bool mUsingCellCenteredTransform; }; // class PointPartitioner using UInt32PointPartitioner = PointPartitioner<uint32_t, 3>; template<typename PointIndexType, Index BucketLog2Dim> class PointPartitioner<PointIndexType, BucketLog2Dim>::IndexIterator { public: using IndexType = PointIndexType; IndexIterator(IndexType* begin = nullptr, IndexType* end = nullptr) : mBegin(begin), mEnd(end), mItem(begin) {} /// @brief Rewind to first item. void reset() { mItem = mBegin; } /// @brief Number of point indices in the iterator range. size_t size() const { return mEnd - mBegin; } /// @brief Returns the item to which this iterator is currently pointing. IndexType& operator*() { assert(mItem != nullptr); return *mItem; } const IndexType& operator*() const { assert(mItem != nullptr); return *mItem; } /// @brief Return @c true if this iterator is not yet exhausted. operator bool() const { return mItem < mEnd; } bool test() const { return mItem < mEnd; } /// @brief Advance to the next item. IndexIterator& operator++() { assert(this->test()); ++mItem; return *this; } /// @brief Advance to the next item. bool next() { this->operator++(); return this->test(); } bool increment() { this->next(); return this->test(); } /// @brief Equality operators bool operator==(const IndexIterator& other) const { return mItem == other.mItem; } bool operator!=(const IndexIterator& other) const { return !this->operator==(other); } private: IndexType * const mBegin, * const mEnd; IndexType * mItem; }; // class PointPartitioner::IndexIterator //////////////////////////////////////// //////////////////////////////////////// // Implementation details namespace point_partitioner_internal { template<typename PointIndexType> struct ComputePointOrderOp { ComputePointOrderOp(PointIndexType* pointOrder, const PointIndexType* bucketCounters, const PointIndexType* bucketOffsets) : mPointOrder(pointOrder) , mBucketCounters(bucketCounters) , mBucketOffsets(bucketOffsets) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { mPointOrder[n] += mBucketCounters[mBucketOffsets[n]]; } } PointIndexType * const mPointOrder; PointIndexType const * const mBucketCounters; PointIndexType const * const mBucketOffsets; }; // struct ComputePointOrderOp template<typename PointIndexType> struct CreateOrderedPointIndexArrayOp { CreateOrderedPointIndexArrayOp(PointIndexType* orderedIndexArray, const PointIndexType* pointOrder, const PointIndexType* indices) : mOrderedIndexArray(orderedIndexArray) , mPointOrder(pointOrder) , mIndices(indices) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { mOrderedIndexArray[mPointOrder[n]] = mIndices[n]; } } PointIndexType * const mOrderedIndexArray; PointIndexType const * const mPointOrder; PointIndexType const * const mIndices; }; // struct CreateOrderedPointIndexArrayOp template<typename PointIndexType, Index BucketLog2Dim> struct VoxelOrderOp { static constexpr Index bits = 1 + (3 * BucketLog2Dim); // signed, so if bits is exactly 16, int32 is required using VoxelOffsetType = typename std::conditional<(bits < 16), int16_t, typename std::conditional<(bits < 32), int32_t, int64_t>::type>::type; using VoxelOffsetArray = std::unique_ptr<VoxelOffsetType[]>; using IndexArray = std::unique_ptr<PointIndexType[]>; VoxelOrderOp(IndexArray& indices, const IndexArray& pages,const VoxelOffsetArray& offsets) : mIndices(indices.get()) , mPages(pages.get()) , mVoxelOffsets(offsets.get()) { } void operator()(const tbb::blocked_range<size_t>& range) const { PointIndexType pointCount = 0; for (size_t n(range.begin()), N(range.end()); n != N; ++n) { pointCount = std::max(pointCount, (mPages[n + 1] - mPages[n])); } const PointIndexType voxelCount = 1 << (3 * BucketLog2Dim); // allocate histogram buffers std::unique_ptr<VoxelOffsetType[]> offsets(new VoxelOffsetType[pointCount]); std::unique_ptr<PointIndexType[]> sortedIndices(new PointIndexType[pointCount]); std::unique_ptr<PointIndexType[]> histogram(new PointIndexType[voxelCount]); for (size_t n(range.begin()), N(range.end()); n != N; ++n) { PointIndexType * const indices = mIndices + mPages[n]; pointCount = mPages[n + 1] - mPages[n]; // local copy of voxel offsets. for (PointIndexType i = 0; i < pointCount; ++i) { offsets[i] = mVoxelOffsets[ indices[i] ]; } // reset histogram memset(&histogram[0], 0, voxelCount * sizeof(PointIndexType)); // compute histogram for (PointIndexType i = 0; i < pointCount; ++i) { ++histogram[ offsets[i] ]; } PointIndexType count = 0, startOffset; for (int i = 0; i < int(voxelCount); ++i) { if (histogram[i] > 0) { startOffset = count; count += histogram[i]; histogram[i] = startOffset; } } // sort indices based on voxel offset for (PointIndexType i = 0; i < pointCount; ++i) { sortedIndices[ histogram[ offsets[i] ]++ ] = indices[i]; } memcpy(&indices[0], &sortedIndices[0], sizeof(PointIndexType) * pointCount); } } PointIndexType * const mIndices; PointIndexType const * const mPages; VoxelOffsetType const * const mVoxelOffsets; }; // struct VoxelOrderOp //////////////////////////////////////// template<typename T> struct Array { using Ptr = std::unique_ptr<Array>; Array(size_t size) : mSize(size), mData(new T[size]) { } size_t size() const { return mSize; } T* data() { return mData.get(); } const T* data() const { return mData.get(); } void clear() { mSize = 0; mData.reset(); } private: size_t mSize; std::unique_ptr<T[]> mData; }; // struct Array template<typename PointIndexType> struct MoveSegmentDataOp { using SegmentPtr = typename Array<PointIndexType>::Ptr; MoveSegmentDataOp(std::vector<PointIndexType*>& indexLists, SegmentPtr* segments) : mIndexLists(&indexLists[0]), mSegments(segments) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n(range.begin()), N(range.end()); n != N; ++n) { PointIndexType* indices = mIndexLists[n]; SegmentPtr& segment = mSegments[n]; tbb::parallel_for(tbb::blocked_range<size_t>(0, segment->size()), CopyData(indices, segment->data())); segment.reset(); // clear data } } private: struct CopyData { CopyData(PointIndexType* lhs, const PointIndexType* rhs) : mLhs(lhs), mRhs(rhs) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { mLhs[n] = mRhs[n]; } } PointIndexType * const mLhs; PointIndexType const * const mRhs; }; PointIndexType * const * const mIndexLists; SegmentPtr * const mSegments; }; // struct MoveSegmentDataOp template<typename PointIndexType> struct MergeBinsOp { using Segment = Array<PointIndexType>; using SegmentPtr = typename Segment::Ptr; using IndexPair = std::pair<PointIndexType, PointIndexType>; using IndexPairList = std::deque<IndexPair>; using IndexPairListPtr = std::shared_ptr<IndexPairList>; using IndexPairListMap = std::map<Coord, IndexPairListPtr>; using IndexPairListMapPtr = std::shared_ptr<IndexPairListMap>; MergeBinsOp(IndexPairListMapPtr* bins, SegmentPtr* indexSegments, SegmentPtr* offsetSegments, Coord* coords, size_t numSegments) : mBins(bins) , mIndexSegments(indexSegments) , mOffsetSegments(offsetSegments) , mCoords(coords) , mNumSegments(numSegments) { } void operator()(const tbb::blocked_range<size_t>& range) const { std::vector<IndexPairListPtr*> data; std::vector<PointIndexType> arrayOffsets; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const Coord& ijk = mCoords[n]; size_t numIndices = 0; data.clear(); for (size_t i = 0, I = mNumSegments; i < I; ++i) { IndexPairListMap& idxMap = *mBins[i]; typename IndexPairListMap::iterator iter = idxMap.find(ijk); if (iter != idxMap.end() && iter->second) { IndexPairListPtr& idxListPtr = iter->second; data.push_back(&idxListPtr); numIndices += idxListPtr->size(); } } if (data.empty() || numIndices == 0) continue; SegmentPtr& indexSegment = mIndexSegments[n]; SegmentPtr& offsetSegment = mOffsetSegments[n]; indexSegment.reset(new Segment(numIndices)); offsetSegment.reset(new Segment(numIndices)); arrayOffsets.clear(); arrayOffsets.reserve(data.size()); for (size_t i = 0, count = 0, I = data.size(); i < I; ++i) { arrayOffsets.push_back(PointIndexType(count)); count += (*data[i])->size(); } tbb::parallel_for(tbb::blocked_range<size_t>(0, data.size()), CopyData(&data[0], &arrayOffsets[0], indexSegment->data(), offsetSegment->data())); } } private: struct CopyData { CopyData(IndexPairListPtr** indexLists, const PointIndexType* arrayOffsets, PointIndexType* indices, PointIndexType* offsets) : mIndexLists(indexLists) , mArrayOffsets(arrayOffsets) , mIndices(indices) , mOffsets(offsets) { } void operator()(const tbb::blocked_range<size_t>& range) const { using CIter = typename IndexPairList::const_iterator; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const PointIndexType arrayOffset = mArrayOffsets[n]; PointIndexType* indexPtr = &mIndices[arrayOffset]; PointIndexType* offsetPtr = &mOffsets[arrayOffset]; IndexPairListPtr& list = *mIndexLists[n]; for (CIter it = list->begin(), end = list->end(); it != end; ++it) { const IndexPair& data = *it; *indexPtr++ = data.first; *offsetPtr++ = data.second; } list.reset(); // clear data } } IndexPairListPtr * const * const mIndexLists; PointIndexType const * const mArrayOffsets; PointIndexType * const mIndices; PointIndexType * const mOffsets; }; // struct CopyData IndexPairListMapPtr * const mBins; SegmentPtr * const mIndexSegments; SegmentPtr * const mOffsetSegments; Coord const * const mCoords; size_t const mNumSegments; }; // struct MergeBinsOp template<typename PointArray, typename PointIndexType, typename VoxelOffsetType> struct BinPointIndicesOp { using PosType = typename PointArray::PosType; using IndexPair = std::pair<PointIndexType, PointIndexType>; using IndexPairList = std::deque<IndexPair>; using IndexPairListPtr = std::shared_ptr<IndexPairList>; using IndexPairListMap = std::map<Coord, IndexPairListPtr>; using IndexPairListMapPtr = std::shared_ptr<IndexPairListMap>; BinPointIndicesOp(IndexPairListMapPtr* data, const PointArray& points, VoxelOffsetType* voxelOffsets, const math::Transform& m, Index binLog2Dim, Index bucketLog2Dim, size_t numSegments, bool cellCenteredTransform) : mData(data) , mPoints(&points) , mVoxelOffsets(voxelOffsets) , mXForm(m) , mBinLog2Dim(binLog2Dim) , mBucketLog2Dim(bucketLog2Dim) , mNumSegments(numSegments) , mCellCenteredTransform(cellCenteredTransform) { } void operator()(const tbb::blocked_range<size_t>& range) const { const Index log2dim = mBucketLog2Dim; const Index log2dim2 = 2 * log2dim; const Index bucketMask = (1u << log2dim) - 1u; const Index binLog2dim = mBinLog2Dim; const Index binLog2dim2 = 2 * binLog2dim; const Index binMask = (1u << (log2dim + binLog2dim)) - 1u; const Index invBinMask = ~binMask; IndexPairList * idxList = nullptr; Coord ijk(0, 0, 0), loc(0, 0, 0), binCoord(0, 0, 0), lastBinCoord(1, 2, 3); PosType pos; PointIndexType bucketOffset = 0; VoxelOffsetType voxelOffset = 0; const bool cellCentered = mCellCenteredTransform; const size_t numPoints = mPoints->size(); const size_t segmentSize = numPoints / mNumSegments; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { IndexPairListMapPtr& dataPtr = mData[n]; if (!dataPtr) dataPtr.reset(new IndexPairListMap()); IndexPairListMap& idxMap = *dataPtr; const bool isLastSegment = (n + 1) >= mNumSegments; const size_t start = n * segmentSize; const size_t end = isLastSegment ? numPoints : (start + segmentSize); for (size_t i = start; i != end; ++i) { mPoints->getPos(i, pos); if (std::isfinite(pos[0]) && std::isfinite(pos[1]) && std::isfinite(pos[2])) { ijk = cellCentered ? mXForm.worldToIndexCellCentered(pos) : mXForm.worldToIndexNodeCentered(pos); if (mVoxelOffsets) { loc[0] = ijk[0] & bucketMask; loc[1] = ijk[1] & bucketMask; loc[2] = ijk[2] & bucketMask; voxelOffset = VoxelOffsetType( (loc[0] << log2dim2) + (loc[1] << log2dim) + loc[2]); } binCoord[0] = ijk[0] & invBinMask; binCoord[1] = ijk[1] & invBinMask; binCoord[2] = ijk[2] & invBinMask; ijk[0] &= binMask; ijk[1] &= binMask; ijk[2] &= binMask; ijk[0] >>= log2dim; ijk[1] >>= log2dim; ijk[2] >>= log2dim; bucketOffset = PointIndexType( (ijk[0] << binLog2dim2) + (ijk[1] << binLog2dim) + ijk[2]); if (lastBinCoord != binCoord) { lastBinCoord = binCoord; IndexPairListPtr& idxListPtr = idxMap[lastBinCoord]; if (!idxListPtr) idxListPtr.reset(new IndexPairList()); idxList = idxListPtr.get(); } idxList->push_back(IndexPair(PointIndexType(i), bucketOffset)); if (mVoxelOffsets) mVoxelOffsets[i] = voxelOffset; } } } } IndexPairListMapPtr * const mData; PointArray const * const mPoints; VoxelOffsetType * const mVoxelOffsets; math::Transform const mXForm; Index const mBinLog2Dim; Index const mBucketLog2Dim; size_t const mNumSegments; bool const mCellCenteredTransform; }; // struct BinPointIndicesOp template<typename PointIndexType> struct OrderSegmentsOp { using IndexArray = std::unique_ptr<PointIndexType[]>; using SegmentPtr = typename Array<PointIndexType>::Ptr; OrderSegmentsOp(SegmentPtr* indexSegments, SegmentPtr* offsetSegments, IndexArray* pageOffsetArrays, IndexArray* pageIndexArrays, Index binVolume) : mIndexSegments(indexSegments) , mOffsetSegments(offsetSegments) , mPageOffsetArrays(pageOffsetArrays) , mPageIndexArrays(pageIndexArrays) , mBinVolume(binVolume) { } void operator()(const tbb::blocked_range<size_t>& range) const { const size_t bucketCountersSize = size_t(mBinVolume); IndexArray bucketCounters(new PointIndexType[bucketCountersSize]); size_t maxSegmentSize = 0; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { maxSegmentSize = std::max(maxSegmentSize, mIndexSegments[n]->size()); } IndexArray bucketIndices(new PointIndexType[maxSegmentSize]); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { memset(bucketCounters.get(), 0, sizeof(PointIndexType) * bucketCountersSize); const size_t segmentSize = mOffsetSegments[n]->size(); PointIndexType* offsets = mOffsetSegments[n]->data(); // Count the number of points per bucket and assign a local bucket index // to each point. for (size_t i = 0; i < segmentSize; ++i) { bucketIndices[i] = bucketCounters[offsets[i]]++; } PointIndexType nonemptyBucketCount = 0; for (size_t i = 0; i < bucketCountersSize; ++i) { nonemptyBucketCount += static_cast<PointIndexType>(bucketCounters[i] != 0); } IndexArray& pageOffsets = mPageOffsetArrays[n]; pageOffsets.reset(new PointIndexType[nonemptyBucketCount + 1]); pageOffsets[0] = nonemptyBucketCount + 1; // stores array size in first element IndexArray& pageIndices = mPageIndexArrays[n]; pageIndices.reset(new PointIndexType[nonemptyBucketCount]); // Compute bucket counter prefix sum PointIndexType count = 0, idx = 0; for (size_t i = 0; i < bucketCountersSize; ++i) { if (bucketCounters[i] != 0) { pageIndices[idx] = static_cast<PointIndexType>(i); pageOffsets[idx+1] = bucketCounters[i]; bucketCounters[i] = count; count += pageOffsets[idx+1]; ++idx; } } PointIndexType* indices = mIndexSegments[n]->data(); const tbb::blocked_range<size_t> segmentRange(0, segmentSize); // Compute final point order by incrementing the local bucket point index // with the prefix sum offset. tbb::parallel_for(segmentRange, ComputePointOrderOp<PointIndexType>( bucketIndices.get(), bucketCounters.get(), offsets)); tbb::parallel_for(segmentRange, CreateOrderedPointIndexArrayOp<PointIndexType>( offsets, bucketIndices.get(), indices)); mIndexSegments[n]->clear(); // clear data } } SegmentPtr * const mIndexSegments; SegmentPtr * const mOffsetSegments; IndexArray * const mPageOffsetArrays; IndexArray * const mPageIndexArrays; Index const mBinVolume; }; // struct OrderSegmentsOp //////////////////////////////////////// /// @brief Segment points using one level of least significant digit radix bins. template<typename PointIndexType, typename VoxelOffsetType, typename PointArray> inline void binAndSegment( const PointArray& points, const math::Transform& xform, std::unique_ptr<typename Array<PointIndexType>::Ptr[]>& indexSegments, std::unique_ptr<typename Array<PointIndexType>::Ptr[]>& offsetSegments, std::vector<Coord>& coords, const Index binLog2Dim, const Index bucketLog2Dim, VoxelOffsetType* voxelOffsets = nullptr, bool cellCenteredTransform = true) { using IndexPair = std::pair<PointIndexType, PointIndexType>; using IndexPairList = std::deque<IndexPair>; using IndexPairListPtr = std::shared_ptr<IndexPairList>; using IndexPairListMap = std::map<Coord, IndexPairListPtr>; using IndexPairListMapPtr = std::shared_ptr<IndexPairListMap>; size_t numTasks = 1, numThreads = size_t(tbb::task_scheduler_init::default_num_threads()); if (points.size() > (numThreads * 2)) numTasks = numThreads * 2; else if (points.size() > numThreads) numTasks = numThreads; std::unique_ptr<IndexPairListMapPtr[]> bins(new IndexPairListMapPtr[numTasks]); using BinOp = BinPointIndicesOp<PointArray, PointIndexType, VoxelOffsetType>; tbb::parallel_for(tbb::blocked_range<size_t>(0, numTasks), BinOp(bins.get(), points, voxelOffsets, xform, binLog2Dim, bucketLog2Dim, numTasks, cellCenteredTransform)); std::set<Coord> uniqueCoords; for (size_t i = 0; i < numTasks; ++i) { IndexPairListMap& idxMap = *bins[i]; for (typename IndexPairListMap::iterator it = idxMap.begin(); it != idxMap.end(); ++it) { uniqueCoords.insert(it->first); } } coords.assign(uniqueCoords.begin(), uniqueCoords.end()); uniqueCoords.clear(); size_t segmentCount = coords.size(); using SegmentPtr = typename Array<PointIndexType>::Ptr; indexSegments.reset(new SegmentPtr[segmentCount]); offsetSegments.reset(new SegmentPtr[segmentCount]); using MergeOp = MergeBinsOp<PointIndexType>; tbb::parallel_for(tbb::blocked_range<size_t>(0, segmentCount), MergeOp(bins.get(), indexSegments.get(), offsetSegments.get(), &coords[0], numTasks)); } template<typename PointIndexType, typename VoxelOffsetType, typename PointArray> inline void partition( const PointArray& points, const math::Transform& xform, const Index bucketLog2Dim, std::unique_ptr<PointIndexType[]>& pointIndices, std::unique_ptr<PointIndexType[]>& pageOffsets, std::unique_ptr<Coord[]>& pageCoordinates, PointIndexType& pageCount, std::unique_ptr<VoxelOffsetType[]>& voxelOffsets, bool recordVoxelOffsets, bool cellCenteredTransform) { using SegmentPtr = typename Array<PointIndexType>::Ptr; if (recordVoxelOffsets) voxelOffsets.reset(new VoxelOffsetType[points.size()]); else voxelOffsets.reset(); const Index binLog2Dim = 5u; // note: Bins span a (2^(binLog2Dim + bucketLog2Dim))^3 voxel region, // i.e. bucketLog2Dim = 3 and binLog2Dim = 5 corresponds to a // (2^8)^3 = 256^3 voxel region. std::vector<Coord> segmentCoords; std::unique_ptr<SegmentPtr[]> indexSegments; std::unique_ptr<SegmentPtr[]> offsetSegments; binAndSegment<PointIndexType, VoxelOffsetType, PointArray>(points, xform, indexSegments, offsetSegments, segmentCoords, binLog2Dim, bucketLog2Dim, voxelOffsets.get(), cellCenteredTransform); size_t numSegments = segmentCoords.size(); const tbb::blocked_range<size_t> segmentRange(0, numSegments); using IndexArray = std::unique_ptr<PointIndexType[]>; std::unique_ptr<IndexArray[]> pageOffsetArrays(new IndexArray[numSegments]); std::unique_ptr<IndexArray[]> pageIndexArrays(new IndexArray[numSegments]); const Index binVolume = 1u << (3u * binLog2Dim); tbb::parallel_for(segmentRange, OrderSegmentsOp<PointIndexType> (indexSegments.get(), offsetSegments.get(), pageOffsetArrays.get(), pageIndexArrays.get(), binVolume)); indexSegments.reset(); std::vector<Index> segmentOffsets; segmentOffsets.reserve(numSegments); pageCount = 0; for (size_t n = 0; n < numSegments; ++n) { segmentOffsets.push_back(pageCount); pageCount += pageOffsetArrays[n][0] - 1; } pageOffsets.reset(new PointIndexType[pageCount + 1]); PointIndexType count = 0; for (size_t n = 0, idx = 0; n < numSegments; ++n) { PointIndexType* offsets = pageOffsetArrays[n].get(); size_t size = size_t(offsets[0]); for (size_t i = 1; i < size; ++i) { pageOffsets[idx++] = count; count += offsets[i]; } } pageOffsets[pageCount] = count; pointIndices.reset(new PointIndexType[points.size()]); std::vector<PointIndexType*> indexArray; indexArray.reserve(numSegments); PointIndexType* index = pointIndices.get(); for (size_t n = 0; n < numSegments; ++n) { indexArray.push_back(index); index += offsetSegments[n]->size(); } // compute leaf node origin for each page pageCoordinates.reset(new Coord[pageCount]); tbb::parallel_for(segmentRange, [&](tbb::blocked_range<size_t>& range) { for (size_t n = range.begin(); n < range.end(); n++) { Index segmentOffset = segmentOffsets[n]; PointIndexType* indices = pageIndexArrays[n].get(); const Coord& segmentCoord = segmentCoords[n]; // segment size stored in the first value of the offset array const size_t segmentSize = pageOffsetArrays[n][0] - 1; tbb::blocked_range<size_t> copyRange(0, segmentSize); tbb::parallel_for(copyRange, [&](tbb::blocked_range<size_t>& r) { for (size_t i = r.begin(); i < r.end(); i++) { Index pageIndex = indices[i]; Coord& ijk = pageCoordinates[segmentOffset+i]; ijk[0] = pageIndex >> (2 * binLog2Dim); Index pageIndexModulo = pageIndex - (ijk[0] << (2 * binLog2Dim)); ijk[1] = pageIndexModulo >> binLog2Dim; ijk[2] = pageIndexModulo - (ijk[1] << binLog2Dim); ijk = (ijk << bucketLog2Dim) + segmentCoord; } } ); } } ); // move segment data tbb::parallel_for(segmentRange, MoveSegmentDataOp<PointIndexType>(indexArray, offsetSegments.get())); } } // namespace point_partitioner_internal //////////////////////////////////////// template<typename PointIndexType, Index BucketLog2Dim> inline PointPartitioner<PointIndexType, BucketLog2Dim>::PointPartitioner() : mPointIndices(nullptr) , mVoxelOffsets(nullptr) , mPageOffsets(nullptr) , mPageCoordinates(nullptr) , mPageCount(0) , mUsingCellCenteredTransform(true) { } template<typename PointIndexType, Index BucketLog2Dim> inline void PointPartitioner<PointIndexType, BucketLog2Dim>::clear() { mPageCount = 0; mUsingCellCenteredTransform = true; mPointIndices.reset(); mVoxelOffsets.reset(); mPageOffsets.reset(); mPageCoordinates.reset(); } template<typename PointIndexType, Index BucketLog2Dim> inline void PointPartitioner<PointIndexType, BucketLog2Dim>::swap(PointPartitioner& rhs) { const IndexType tmpLhsPageCount = mPageCount; mPageCount = rhs.mPageCount; rhs.mPageCount = tmpLhsPageCount; mPointIndices.swap(rhs.mPointIndices); mVoxelOffsets.swap(rhs.mVoxelOffsets); mPageOffsets.swap(rhs.mPageOffsets); mPageCoordinates.swap(rhs.mPageCoordinates); bool lhsCellCenteredTransform = mUsingCellCenteredTransform; mUsingCellCenteredTransform = rhs.mUsingCellCenteredTransform; rhs.mUsingCellCenteredTransform = lhsCellCenteredTransform; } template<typename PointIndexType, Index BucketLog2Dim> inline typename PointPartitioner<PointIndexType, BucketLog2Dim>::IndexIterator PointPartitioner<PointIndexType, BucketLog2Dim>::indices(size_t n) const { assert(bool(mPointIndices) && bool(mPageCount)); return IndexIterator( mPointIndices.get() + mPageOffsets[n], mPointIndices.get() + mPageOffsets[n + 1]); } template<typename PointIndexType, Index BucketLog2Dim> template<typename PointArray> inline void PointPartitioner<PointIndexType, BucketLog2Dim>::construct( const PointArray& points, const math::Transform& xform, bool voxelOrder, bool recordVoxelOffsets, bool cellCenteredTransform) { mUsingCellCenteredTransform = cellCenteredTransform; point_partitioner_internal::partition(points, xform, BucketLog2Dim, mPointIndices, mPageOffsets, mPageCoordinates, mPageCount, mVoxelOffsets, (voxelOrder || recordVoxelOffsets), cellCenteredTransform); const tbb::blocked_range<size_t> pageRange(0, mPageCount); if (mVoxelOffsets && voxelOrder) { tbb::parallel_for(pageRange, point_partitioner_internal::VoxelOrderOp< IndexType, BucketLog2Dim>(mPointIndices, mPageOffsets, mVoxelOffsets)); } if (mVoxelOffsets && !recordVoxelOffsets) { mVoxelOffsets.reset(); } } template<typename PointIndexType, Index BucketLog2Dim> template<typename PointArray> inline typename PointPartitioner<PointIndexType, BucketLog2Dim>::Ptr PointPartitioner<PointIndexType, BucketLog2Dim>::create( const PointArray& points, const math::Transform& xform, bool voxelOrder, bool recordVoxelOffsets, bool cellCenteredTransform) { Ptr ret(new PointPartitioner()); ret->construct(points, xform, voxelOrder, recordVoxelOffsets, cellCenteredTransform); return ret; } //////////////////////////////////////// } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_POINT_PARTITIONER_HAS_BEEN_INCLUDED
36,569
C
33.828571
99
0.619213
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetPlatonic.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Ken Museth /// /// @file LevelSetPlatonic.h /// /// @brief Generate a narrow-band level sets of the five platonic solids. /// /// @note By definition a level set has a fixed narrow band width /// (the half width is defined by LEVEL_SET_HALF_WIDTH in Types.h), /// whereas an SDF can have a variable narrow band width. #ifndef OPENVDB_TOOLS_LEVELSETPLATONIC_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVELSETPLATONIC_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/math/Math.h> #include <openvdb/math/Transform.h> #include <openvdb/tools/MeshToVolume.h> #include <openvdb/util/NullInterrupter.h> #include <type_traits> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a platonic solid. /// /// @param faceCount number of faces of the platonic solid, i.e. 4, 6, 8, 12 or 20 /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @details Faces: TETRAHEDRON=4, CUBE=6, OCTAHEDRON=8, DODECAHEDRON=12, ICOSAHEDRON=20 /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetPlatonic( int faceCount, // 4, 6, 8, 12 or 20 float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr); /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a platonic solid. /// /// @param faceCount number of faces of the platonic solid, i.e. 4, 6, 8, 12 or 20 /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @details Faces: TETRAHEDRON=4, CUBE=6, OCTAHEDRON=8, DODECAHEDRON=12, ICOSAHEDRON=20 /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetPlatonic( int faceCount,// 4, 6, 8, 12 or 20 float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(faceCount, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a tetrahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetTetrahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType, InterruptT>( 4, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a tetrahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetTetrahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(4, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a cube. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetCube( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType>(6, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a cube. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetCube( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(6, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of an octahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetOctahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType>(8, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of an octahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetOctahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(8, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a dodecahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetDodecahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType>(12, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a dodecahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetDodecahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(12, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of an icosahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetIcosahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType>(20, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of an icosahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetIcosahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(20, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetPlatonic(int faceCount,float scale, const Vec3f& center, float voxelSize, float halfWidth, InterruptT *interrupt) { // GridType::ValueType is required to be a floating-point scalar. static_assert(std::is_floating_point<typename GridType::ValueType>::value, "level set grids must have scalar, floating-point value types"); const math::Transform::Ptr xform = math::Transform::createLinearTransform( voxelSize ); std::vector<Vec3f> vtx; std::vector<Vec3I> tri; std::vector<Vec4I> qua; if (faceCount == 4) {// Tetrahedron vtx.push_back( Vec3f( 0.0f, 1.0f, 0.0f) ); vtx.push_back( Vec3f(-0.942810297f, -0.333329707f, 0.0f) ); vtx.push_back( Vec3f( 0.471405149f, -0.333329707f, 0.816497624f) ); vtx.push_back( Vec3f( 0.471405149f, -0.333329707f, -0.816497624f) ); tri.push_back( Vec3I(0, 2, 3) ); tri.push_back( Vec3I(0, 3, 1) ); tri.push_back( Vec3I(0, 1, 2) ); tri.push_back( Vec3I(1, 3, 2) ); } else if (faceCount == 6) {// Cube vtx.push_back( Vec3f(-0.5f, -0.5f, -0.5f) ); vtx.push_back( Vec3f( 0.5f, -0.5f, -0.5f) ); vtx.push_back( Vec3f( 0.5f, -0.5f, 0.5f) ); vtx.push_back( Vec3f(-0.5f, -0.5f, 0.5f) ); vtx.push_back( Vec3f(-0.5f, 0.5f, -0.5f) ); vtx.push_back( Vec3f( 0.5f, 0.5f, -0.5f) ); vtx.push_back( Vec3f( 0.5f, 0.5f, 0.5f) ); vtx.push_back( Vec3f(-0.5f, 0.5f, 0.5f) ); qua.push_back( Vec4I(1, 0, 4, 5) ); qua.push_back( Vec4I(2, 1, 5, 6) ); qua.push_back( Vec4I(3, 2, 6, 7) ); qua.push_back( Vec4I(0, 3, 7, 4) ); qua.push_back( Vec4I(2, 3, 0, 1) ); qua.push_back( Vec4I(5, 4, 7, 6) ); } else if (faceCount == 8) {// Octahedron vtx.push_back( Vec3f( 0.0f, 0.0f, -1.0f) ); vtx.push_back( Vec3f( 1.0f, 0.0f, 0.0f) ); vtx.push_back( Vec3f( 0.0f, 0.0f, 1.0f) ); vtx.push_back( Vec3f(-1.0f, 0.0f, 0.0f) ); vtx.push_back( Vec3f( 0.0f,-1.0f, 0.0f) ); vtx.push_back( Vec3f( 0.0f, 1.0f, 0.0f) ); tri.push_back( Vec3I(0, 4, 3) ); tri.push_back( Vec3I(0, 1, 4) ); tri.push_back( Vec3I(1, 2, 4) ); tri.push_back( Vec3I(2, 3, 4) ); tri.push_back( Vec3I(0, 3, 5) ); tri.push_back( Vec3I(0, 5, 1) ); tri.push_back( Vec3I(1, 5, 2) ); tri.push_back( Vec3I(2, 5, 3) ); } else if (faceCount == 12) {// Dodecahedron vtx.push_back( Vec3f( 0.354437858f, 0.487842113f, -0.789344311f) ); vtx.push_back( Vec3f( 0.573492587f, -0.186338872f, -0.78934437f) ); vtx.push_back( Vec3f( 0.0f, -0.603005826f, -0.78934443f) ); vtx.push_back( Vec3f(-0.573492587f, -0.186338872f, -0.78934437f) ); vtx.push_back( Vec3f(-0.354437858f, 0.487842113f, -0.789344311f) ); vtx.push_back( Vec3f(-0.573492587f, 0.789345026f, -0.186338797f) ); vtx.push_back( Vec3f(-0.927930415f, -0.301502913f, -0.186338872f) ); vtx.push_back( Vec3f( 0.0f, -0.975683928f, -0.186338902f) ); vtx.push_back( Vec3f( 0.927930415f, -0.301502913f, -0.186338872f) ); vtx.push_back( Vec3f( 0.573492587f, 0.789345026f, -0.186338797f) ); vtx.push_back( Vec3f( 0.0f, 0.975683868f, 0.186338902f) ); vtx.push_back( Vec3f(-0.927930415f, 0.301502913f, 0.186338872f) ); vtx.push_back( Vec3f(-0.573492587f, -0.789345026f, 0.186338797f) ); vtx.push_back( Vec3f( 0.573492587f, -0.789345026f, 0.186338797f) ); vtx.push_back( Vec3f( 0.927930415f, 0.301502913f, 0.186338872f) ); vtx.push_back( Vec3f( 0.0f, 0.603005826f, 0.78934443f) ); vtx.push_back( Vec3f( 0.573492587f, 0.186338872f, 0.78934437f) ); vtx.push_back( Vec3f( 0.354437858f, -0.487842113f, 0.789344311f) ); vtx.push_back( Vec3f(-0.354437858f, -0.487842113f, 0.789344311f) ); vtx.push_back( Vec3f(-0.573492587f, 0.186338872f, 0.78934437f) ); qua.push_back( Vec4I(0, 1, 2, 3) ); tri.push_back( Vec3I(0, 3, 4) ); qua.push_back( Vec4I(0, 4, 5, 10) ); tri.push_back( Vec3I(0, 10, 9) ); qua.push_back( Vec4I(0, 9, 14, 8) ); tri.push_back( Vec3I(0, 8, 1) ); qua.push_back( Vec4I(1, 8, 13, 7) ); tri.push_back( Vec3I(1, 7, 2) ); qua.push_back( Vec4I(2, 7, 12, 6) ); tri.push_back( Vec3I(2, 6, 3) ); qua.push_back( Vec4I(3, 6, 11, 5) ); tri.push_back( Vec3I(3, 5, 4) ); qua.push_back( Vec4I(5, 11, 19, 15) ); tri.push_back( Vec3I(5, 15, 10) ); qua.push_back( Vec4I(6, 12, 18, 19) ); tri.push_back( Vec3I(6, 19, 11) ); qua.push_back( Vec4I(7, 13, 17, 18) ); tri.push_back( Vec3I(7, 18, 12) ); qua.push_back( Vec4I(8, 14, 16, 17) ); tri.push_back( Vec3I(8, 17, 13) ); qua.push_back( Vec4I(9, 10, 15, 16) ); tri.push_back( Vec3I(9, 16, 14) ); qua.push_back( Vec4I(15, 19, 18, 17) ); tri.push_back( Vec3I(15, 17, 16) ); } else if (faceCount == 20) {// Icosahedron vtx.push_back( Vec3f(0.0f, 0.0f, -1.0f) ); vtx.push_back( Vec3f(0.0f, 0.894427359f, -0.447213143f) ); vtx.push_back( Vec3f(0.850650847f, 0.276393682f, -0.447213203f) ); vtx.push_back( Vec3f(0.525731206f, -0.723606944f, -0.447213262f) ); vtx.push_back( Vec3f(-0.525731206f, -0.723606944f, -0.447213262f) ); vtx.push_back( Vec3f(-0.850650847f, 0.276393682f, -0.447213203f) ); vtx.push_back( Vec3f(-0.525731206f, 0.723606944f, 0.447213262f) ); vtx.push_back( Vec3f(-0.850650847f, -0.276393682f, 0.447213203f) ); vtx.push_back( Vec3f(0.0f, -0.894427359f, 0.447213143f) ); vtx.push_back( Vec3f(0.850650847f, -0.276393682f, 0.447213203f) ); vtx.push_back( Vec3f(0.525731206f, 0.723606944f, 0.447213262f) ); vtx.push_back( Vec3f(0.0f, 0.0f, 1.0f) ); tri.push_back( Vec3I( 2, 0, 1) ); tri.push_back( Vec3I( 3, 0, 2) ); tri.push_back( Vec3I( 4, 0, 3) ); tri.push_back( Vec3I( 5, 0, 4) ); tri.push_back( Vec3I( 1, 0, 5) ); tri.push_back( Vec3I( 6, 1, 5) ); tri.push_back( Vec3I( 7, 5, 4) ); tri.push_back( Vec3I( 8, 4, 3) ); tri.push_back( Vec3I( 9, 3, 2) ); tri.push_back( Vec3I(10, 2, 1) ); tri.push_back( Vec3I(10, 1, 6) ); tri.push_back( Vec3I( 6, 5, 7) ); tri.push_back( Vec3I( 7, 4, 8) ); tri.push_back( Vec3I( 8, 3, 9) ); tri.push_back( Vec3I( 9, 2, 10) ); tri.push_back( Vec3I( 6, 11, 10) ); tri.push_back( Vec3I(10, 11, 9) ); tri.push_back( Vec3I( 9, 11, 8) ); tri.push_back( Vec3I( 8, 11, 7) ); tri.push_back( Vec3I( 7, 11, 6) ); } else { OPENVDB_THROW(RuntimeError, "Invalid face count"); } // Apply scale and translation to all the vertices for ( size_t i = 0; i<vtx.size(); ++i ) vtx[i] = scale * vtx[i] + center; typename GridType::Ptr grid; if (interrupt == nullptr) { util::NullInterrupter tmp; grid = meshToLevelSet<GridType>(tmp, *xform, vtx, tri, qua, halfWidth); } else { grid = meshToLevelSet<GridType>(*interrupt, *xform, vtx, tri, qua, halfWidth); } return grid; } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVELSETPLATONIC_HAS_BEEN_INCLUDED
19,566
C
39.849687
98
0.634417
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointsToMask.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Ken Museth /// /// @file tools/PointsToMask.h /// /// @brief This tool produces a grid where every voxel that contains a /// point is active. It employes thread-local storage for best performance. /// /// The @c PointListT template argument below refers to any class /// with the following interface (see unittest/TestPointsToMask.cc /// and SOP_OpenVDB_From_Particles.cc for practical examples): /// @code /// /// class PointList { /// ... /// public: /// /// // Return the total number of particles in list. /// size_t size() const; /// /// // Get the world space position of the nth particle. /// void getPos(size_t n, Vec3R& xyz) const; /// }; /// @endcode /// /// @note See unittest/TestPointsToMask.cc for an example. /// /// The @c InterruptT template argument below refers to any class /// with the following interface: /// @code /// class Interrupter { /// ... /// public: /// void start(const char* name = nullptr) // called when computations begin /// void end() // called when computations end /// bool wasInterrupted(int percent = -1) // return true to break computation /// }; /// @endcode /// /// @note If no template argument is provided for this InterruptT /// the util::NullInterrupter is used which implies that all /// interrupter calls are no-ops (i.e. incurs no computational overhead). #ifndef OPENVDB_TOOLS_POINTSTOMASK_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_POINTSTOMASK_HAS_BEEN_INCLUDED #include <tbb/enumerable_thread_specific.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/blocked_range.h> #include <openvdb/openvdb.h> // for MaskGrid #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/util/NullInterrupter.h> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { // Forward declaration of main class template<typename GridT = MaskGrid, typename InterrupterT = util::NullInterrupter> class PointsToMask; /// @brief Makes every voxel of the @c grid active if it contains a point. /// /// @param points points that active the voxels of @c grid /// @param grid on out its voxels with points are active template<typename PointListT, typename GridT> inline void maskPoints(const PointListT& points, GridT& grid) { PointsToMask<GridT, util::NullInterrupter> tmp(grid, nullptr); tmp.addPoints(points); } /// @brief Return a MaskGrid where each binary voxel value /// is on if the voxel contains one (or more) points (i.e. /// the 3D position of a point is closer to this voxel than /// any other voxels). /// /// @param points points that active the voxels in the returned grid. /// @param xform transform from world space to voxels in grid space. template<typename PointListT> inline MaskGrid::Ptr createPointMask(const PointListT& points, const math::Transform& xform) { MaskGrid::Ptr grid = createGrid<MaskGrid>( false ); grid->setTransform( xform.copy() ); maskPoints( points, *grid ); return grid; } //////////////////////////////////////// /// @brief Makes every voxel of a grid active if it contains a point. template<typename GridT, typename InterrupterT> class PointsToMask { public: using ValueT = typename GridT::ValueType; /// @brief Constructor from a grid and optional interrupter /// /// @param grid Grid whoes voxels will have their state activated by points. /// @param interrupter Optional interrupter to prematurely terminate execution. explicit PointsToMask(GridT& grid, InterrupterT* interrupter = nullptr) : mGrid(&grid) , mInterrupter(interrupter) { } /// @brief Activates the state of any voxel in the input grid that contains a point. /// /// @param points List of points that active the voxels in the input grid. /// @param grainSize Set the grain-size used for multi-threading. A value of 0 /// disables multi-threading! template<typename PointListT> void addPoints(const PointListT& points, size_t grainSize = 1024) { if (mInterrupter) mInterrupter->start("PointsToMask: adding points"); if (grainSize > 0) { typename GridT::Ptr examplar = mGrid->copyWithNewTree(); PoolType pool( *examplar );//thread local storage pool of grids AddPoints<PointListT> tmp(points, pool, grainSize, *this ); if ( this->interrupt() ) return; ReducePool reducePool(pool, mGrid, size_t(0)); } else { const math::Transform& xform = mGrid->transform(); typename GridT::Accessor acc = mGrid->getAccessor(); Vec3R wPos; for (size_t i = 0, n = points.size(); i < n; ++i) { if ( this->interrupt() ) break; points.getPos(i, wPos); acc.setValueOn( xform.worldToIndexCellCentered( wPos ) ); } } if (mInterrupter) mInterrupter->end(); } private: // Disallow copy construction and copy by assignment! PointsToMask(const PointsToMask&);// not implemented PointsToMask& operator=(const PointsToMask&);// not implemented bool interrupt() const { if (mInterrupter && util::wasInterrupted(mInterrupter)) { tbb::task::self().cancel_group_execution(); return true; } return false; } // Private struct that implements concurrent thread-local // insersion of points into a grid using PoolType = tbb::enumerable_thread_specific<GridT>; template<typename PointListT> struct AddPoints; // Private class that implements concurrent reduction of a thread-local pool struct ReducePool; GridT* mGrid; InterrupterT* mInterrupter; };// PointsToMask // Private member class that implements concurrent thread-local // insersion of points into a grid template<typename GridT, typename InterrupterT> template<typename PointListT> struct PointsToMask<GridT, InterrupterT>::AddPoints { AddPoints(const PointListT& points, PoolType& pool, size_t grainSize, const PointsToMask& parent) : mPoints(&points) , mParent(&parent) , mPool(&pool) { tbb::parallel_for(tbb::blocked_range<size_t>(0, mPoints->size(), grainSize), *this); } void operator()(const tbb::blocked_range<size_t>& range) const { if (mParent->interrupt()) return; GridT& grid = mPool->local(); const math::Transform& xform = grid.transform(); typename GridT::Accessor acc = grid.getAccessor(); Vec3R wPos; for (size_t i=range.begin(), n=range.end(); i!=n; ++i) { mPoints->getPos(i, wPos); acc.setValueOn( xform.worldToIndexCellCentered( wPos ) ); } } const PointListT* mPoints; const PointsToMask* mParent; PoolType* mPool; };// end of private member class AddPoints // Private member class that implements concurrent reduction of a thread-local pool template<typename GridT, typename InterrupterT> struct PointsToMask<GridT, InterrupterT>::ReducePool { using VecT = std::vector<GridT*>; using IterT = typename VecT::iterator; using RangeT = tbb::blocked_range<IterT>; ReducePool(PoolType& pool, GridT* grid, size_t grainSize = 1) : mOwnsGrid(false) , mGrid(grid) { if (grainSize == 0) { for (typename PoolType::const_iterator i = pool.begin(); i != pool.end(); ++i) { mGrid->topologyUnion(*i); } } else { VecT grids( pool.size() ); typename PoolType::iterator i = pool.begin(); for (size_t j=0; j != pool.size(); ++i, ++j) grids[j] = &(*i); tbb::parallel_reduce( RangeT( grids.begin(), grids.end(), grainSize ), *this ); } } ReducePool(const ReducePool&, tbb::split) : mOwnsGrid(true) , mGrid(new GridT()) { } ~ReducePool() { if (mOwnsGrid) delete mGrid; } void operator()(const RangeT& r) { for (IterT i=r.begin(); i!=r.end(); ++i) mGrid->topologyUnion( *(*i) ); } void join(ReducePool& other) { mGrid->topologyUnion(*other.mGrid); } const bool mOwnsGrid; GridT* mGrid; };// end of private member class ReducePool } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_POINTSTOMASK_HAS_BEEN_INCLUDED
8,594
C
32.972332
92
0.646847
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/VelocityFields.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /////////////////////////////////////////////////////////////////////////// // /// @author Ken Museth /// /// @file VelocityFields.h /// /// @brief Defines two simple wrapper classes for advection velocity /// fields as well as VelocitySampler and VelocityIntegrator /// /// /// @details DiscreteField wraps a velocity grid and EnrightField is mostly /// intended for debugging (it's an analytical divergence free and /// periodic field). They both share the same API required by the /// LevelSetAdvection class defined in LevelSetAdvect.h. Thus, any /// class with this API should work with LevelSetAdvection. /// /// @warning Note the Field wrapper classes below always assume the velocity /// is represented in the world-frame of reference. For DiscreteField /// this implies the input grid must contain velocities in world /// coordinates. #ifndef OPENVDB_TOOLS_VELOCITY_FIELDS_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_VELOCITY_FIELDS_HAS_BEEN_INCLUDED #include <tbb/parallel_reduce.h> #include <openvdb/Platform.h> #include <openvdb/openvdb.h> #include "Interpolation.h" // for Sampler, etc. #include <openvdb/math/FiniteDifference.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Thin wrapper class for a velocity grid /// @note Consider replacing BoxSampler with StaggeredBoxSampler template <typename VelGridT, typename Interpolator = BoxSampler> class DiscreteField { public: typedef typename VelGridT::ValueType VectorType; typedef typename VectorType::ValueType ValueType; static_assert(std::is_floating_point<ValueType>::value, "DiscreteField requires a floating point grid."); DiscreteField(const VelGridT &vel) : mAccessor(vel.tree()) , mTransform(&vel.transform()) { } /// @brief Copy constructor DiscreteField(const DiscreteField& other) : mAccessor(other.mAccessor.tree()) , mTransform(other.mTransform) { } /// @return const reference to the transform between world and index space /// @note Use this method to determine if a client grid is /// aligned with the coordinate space of the velocity grid. const math::Transform& transform() const { return *mTransform; } /// @return the interpolated velocity at the world space position xyz /// /// @warning Not threadsafe since it uses a ValueAccessor! So use /// one instance per thread (which is fine since its lightweight). inline VectorType operator() (const Vec3d& xyz, ValueType/*dummy time*/) const { return Interpolator::sample(mAccessor, mTransform->worldToIndex(xyz)); } /// @return the velocity at the coordinate space position ijk /// /// @warning Not threadsafe since it uses a ValueAccessor! So use /// one instance per thread (which is fine since its lightweight). inline VectorType operator() (const Coord& ijk, ValueType/*dummy time*/) const { return mAccessor.getValue(ijk); } private: const typename VelGridT::ConstAccessor mAccessor;//Not thread-safe const math::Transform* mTransform; }; // end of DiscreteField /////////////////////////////////////////////////////////////////////// /// @brief Analytical, divergence-free and periodic velocity field /// @note Primarily intended for debugging! /// @warning This analytical velocity only produce meaningful values /// in the unit box in world space. In other words make sure any level /// set surface is fully enclosed in the axis aligned bounding box /// spanning 0->1 in world units. template <typename ScalarT = float> class EnrightField { public: typedef ScalarT ValueType; typedef math::Vec3<ScalarT> VectorType; static_assert(std::is_floating_point<ScalarT>::value, "EnrightField requires a floating point grid."); EnrightField() {} /// @return const reference to the identity transform between world and index space /// @note Use this method to determine if a client grid is /// aligned with the coordinate space of this velocity field math::Transform transform() const { return math::Transform(); } /// @return the velocity in world units, evaluated at the world /// position xyz and at the specified time inline VectorType operator() (const Vec3d& xyz, ValueType time) const; /// @return the velocity at the coordinate space position ijk inline VectorType operator() (const Coord& ijk, ValueType time) const { return (*this)(ijk.asVec3d(), time); } }; // end of EnrightField template <typename ScalarT> inline math::Vec3<ScalarT> EnrightField<ScalarT>::operator() (const Vec3d& xyz, ValueType time) const { const ScalarT pi = math::pi<ScalarT>(); const ScalarT phase = pi / ScalarT(3); const ScalarT Px = pi * ScalarT(xyz[0]), Py = pi * ScalarT(xyz[1]), Pz = pi * ScalarT(xyz[2]); const ScalarT tr = math::Cos(ScalarT(time) * phase); const ScalarT a = math::Sin(ScalarT(2)*Py); const ScalarT b = -math::Sin(ScalarT(2)*Px); const ScalarT c = math::Sin(ScalarT(2)*Pz); return math::Vec3<ScalarT>( tr * ( ScalarT(2) * math::Pow2(math::Sin(Px)) * a * c ), tr * ( b * math::Pow2(math::Sin(Py)) * c ), tr * ( b * a * math::Pow2(math::Sin(Pz)) )); } /////////////////////////////////////////////////////////////////////// /// Class to hold a Vec3 field interpreted as a velocity field. /// Primarily exists to provide a method(s) that integrate a passive /// point forward in the velocity field for a single time-step (dt) template<typename GridT = Vec3fGrid, bool Staggered = false, size_t Order = 1> class VelocitySampler { public: typedef typename GridT::ConstAccessor AccessorType; typedef typename GridT::ValueType ValueType; /// @brief Constructor from a grid VelocitySampler(const GridT& grid): mGrid(&grid), mAcc(grid.getAccessor()) { } /// @brief Copy-constructor VelocitySampler(const VelocitySampler& other): mGrid(other.mGrid), mAcc(mGrid->getAccessor()) { } /// @brief Samples the velocity at world position onto result. Supports both /// staggered (i.e. MAC) and collocated velocity grids. /// /// @return @c true if any one of the sampled values is active. /// /// @warning Not threadsafe since it uses a ValueAccessor! So use /// one instance per thread (which is fine since its lightweight). template <typename LocationType> inline bool sample(const LocationType& world, ValueType& result) const { const Vec3R xyz = mGrid->worldToIndex(Vec3R(world[0], world[1], world[2])); bool active = Sampler<Order, Staggered>::sample(mAcc, xyz, result); return active; } /// @brief Samples the velocity at world position onto result. Supports both /// staggered (i.e. MAC) and co-located velocity grids. /// /// @warning Not threadsafe since it uses a ValueAccessor! So use /// one instance per thread (which is fine since its lightweight). template <typename LocationType> inline ValueType sample(const LocationType& world) const { const Vec3R xyz = mGrid->worldToIndex(Vec3R(world[0], world[1], world[2])); return Sampler<Order, Staggered>::sample(mAcc, xyz); } private: // holding the Grids for the transforms const GridT* mGrid; // Velocity vector field AccessorType mAcc; };// end of VelocitySampler class /////////////////////////////////////////////////////////////////////// /// @brief Performs Runge-Kutta time integration of variable order in /// a static velocity field. /// /// @note Note that the order of the velocity sampling is controlled /// with the SampleOrder template parameter, which defaults /// to one, i.e. a tri-linear interpolation kernel. template<typename GridT = Vec3fGrid, bool Staggered = false, size_t SampleOrder = 1> class VelocityIntegrator { public: typedef typename GridT::ValueType VecType; typedef typename VecType::ValueType ElementType; VelocityIntegrator(const GridT& velGrid): mVelSampler(velGrid) { } /// @brief Variable order Runge-Kutta time integration for a single time step /// /// @param dt Time sub-step for the Runge-Kutte integrator of order OrderRK /// @param world Location in world space coordinates (both input and output) template<size_t OrderRK, typename LocationType> inline void rungeKutta(const ElementType dt, LocationType& world) const { BOOST_STATIC_ASSERT(OrderRK <= 4); VecType P(static_cast<ElementType>(world[0]), static_cast<ElementType>(world[1]), static_cast<ElementType>(world[2])); // Note the if-branching below is optimized away at compile time if (OrderRK == 0) { return;// do nothing } else if (OrderRK == 1) { VecType V0; mVelSampler.sample(P, V0); P = dt * V0; } else if (OrderRK == 2) { VecType V0, V1; mVelSampler.sample(P, V0); mVelSampler.sample(P + ElementType(0.5) * dt * V0, V1); P = dt * V1; } else if (OrderRK == 3) { VecType V0, V1, V2; mVelSampler.sample(P, V0); mVelSampler.sample(P + ElementType(0.5) * dt * V0, V1); mVelSampler.sample(P + dt * (ElementType(2.0) * V1 - V0), V2); P = dt * (V0 + ElementType(4.0) * V1 + V2) * ElementType(1.0 / 6.0); } else if (OrderRK == 4) { VecType V0, V1, V2, V3; mVelSampler.sample(P, V0); mVelSampler.sample(P + ElementType(0.5) * dt * V0, V1); mVelSampler.sample(P + ElementType(0.5) * dt * V1, V2); mVelSampler.sample(P + dt * V2, V3); P = dt * (V0 + ElementType(2.0) * (V1 + V2) + V3) * ElementType(1.0 / 6.0); } typedef typename LocationType::ValueType OutType; world += LocationType(static_cast<OutType>(P[0]), static_cast<OutType>(P[1]), static_cast<OutType>(P[2])); } private: VelocitySampler<GridT, Staggered, SampleOrder> mVelSampler; };// end of VelocityIntegrator class } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_VELOCITY_FIELDS_HAS_BEEN_INCLUDED
10,691
C
37.599278
99
0.634272
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Clip.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file Clip.h /// /// @brief Functions to clip a grid against a bounding box, a camera frustum, /// or another grid's active voxel topology #ifndef OPENVDB_TOOLS_CLIP_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_CLIP_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/math/Math.h> // for math::isNegative() #include <openvdb/math/Maps.h> // for math::NonlinearFrustumMap #include <openvdb/tree/LeafManager.h> #include "GridTransformer.h" // for tools::resampleToMatch() #include "Prune.h" #include <tbb/blocked_range.h> #include <tbb/parallel_reduce.h> #include <type_traits> // for std::enable_if, std::is_same #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Clip the given grid against a world-space bounding box /// and return a new grid containing the result. /// @param grid the grid to be clipped /// @param bbox a world-space bounding box /// @param keepInterior if true, discard voxels that lie outside the bounding box; /// if false, discard voxels that lie inside the bounding box /// @warning Clipping a level set will likely produce a grid that is /// no longer a valid level set. template<typename GridType> inline typename GridType::Ptr clip(const GridType& grid, const BBoxd& bbox, bool keepInterior = true); /// @brief Clip the given grid against a frustum and return a new grid containing the result. /// @param grid the grid to be clipped /// @param frustum a frustum map /// @param keepInterior if true, discard voxels that lie outside the frustum; /// if false, discard voxels that lie inside the frustum /// @warning Clipping a level set will likely produce a grid that is /// no longer a valid level set. template<typename GridType> inline typename GridType::Ptr clip(const GridType& grid, const math::NonlinearFrustumMap& frustum, bool keepInterior = true); /// @brief Clip a grid against the active voxels of another grid /// and return a new grid containing the result. /// @param grid the grid to be clipped /// @param mask a grid whose active voxels form a boolean clipping mask /// @param keepInterior if true, discard voxels that do not intersect the mask; /// if false, discard voxels that intersect the mask /// @details The mask grid need not have the same transform as the source grid. /// Also, if the mask grid is a level set, consider using tools::sdfInteriorMask /// to construct a new mask comprising the interior (rather than the narrow band) /// of the level set. /// @warning Clipping a level set will likely produce a grid that is /// no longer a valid level set. template<typename GridType, typename MaskTreeType> inline typename GridType::Ptr clip(const GridType& grid, const Grid<MaskTreeType>& mask, bool keepInterior = true); //////////////////////////////////////// namespace clip_internal { // Use either MaskGrids or BoolGrids internally. // (MaskGrids have a somewhat lower memory footprint.) using MaskValueType = ValueMask; //using MaskValueType = bool; template<typename TreeT> class MaskInteriorVoxels { public: using ValueT = typename TreeT::ValueType; using LeafNodeT = typename TreeT::LeafNodeType; MaskInteriorVoxels(const TreeT& tree): mAcc(tree) {} template<typename LeafNodeType> void operator()(LeafNodeType& leaf, size_t /*leafIndex*/) const { const auto* refLeaf = mAcc.probeConstLeaf(leaf.origin()); if (refLeaf) { for (auto iter = leaf.beginValueOff(); iter; ++iter) { const auto pos = iter.pos(); leaf.setActiveState(pos, math::isNegative(refLeaf->getValue(pos))); } } } private: tree::ValueAccessor<const TreeT> mAcc; }; //////////////////////////////////////// template<typename TreeT> class CopyLeafNodes { public: using MaskTreeT = typename TreeT::template ValueConverter<MaskValueType>::Type; using MaskLeafManagerT = tree::LeafManager<const MaskTreeT>; CopyLeafNodes(const TreeT&, const MaskLeafManagerT&); void run(bool threaded = true); typename TreeT::Ptr tree() const { return mNewTree; } CopyLeafNodes(CopyLeafNodes&, tbb::split); void operator()(const tbb::blocked_range<size_t>&); void join(const CopyLeafNodes& rhs) { mNewTree->merge(*rhs.mNewTree); } private: const MaskTreeT* mClipMask; const TreeT* mTree; const MaskLeafManagerT* mLeafNodes; typename TreeT::Ptr mNewTree; }; template<typename TreeT> CopyLeafNodes<TreeT>::CopyLeafNodes(const TreeT& tree, const MaskLeafManagerT& leafNodes) : mTree(&tree) , mLeafNodes(&leafNodes) , mNewTree(new TreeT(mTree->background())) { } template<typename TreeT> CopyLeafNodes<TreeT>::CopyLeafNodes(CopyLeafNodes& rhs, tbb::split) : mTree(rhs.mTree) , mLeafNodes(rhs.mLeafNodes) , mNewTree(new TreeT(mTree->background())) { } template<typename TreeT> void CopyLeafNodes<TreeT>::run(bool threaded) { if (threaded) tbb::parallel_reduce(mLeafNodes->getRange(), *this); else (*this)(mLeafNodes->getRange()); } template<typename TreeT> void CopyLeafNodes<TreeT>::operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<TreeT> acc(*mNewTree); tree::ValueAccessor<const TreeT> refAcc(*mTree); for (auto n = range.begin(); n != range.end(); ++n) { const auto& maskLeaf = mLeafNodes->leaf(n); const auto& ijk = maskLeaf.origin(); const auto* refLeaf = refAcc.probeConstLeaf(ijk); auto* newLeaf = acc.touchLeaf(ijk); if (refLeaf) { for (auto it = maskLeaf.cbeginValueOn(); it; ++it) { const auto pos = it.pos(); newLeaf->setValueOnly(pos, refLeaf->getValue(pos)); newLeaf->setActiveState(pos, refLeaf->isValueOn(pos)); } } else { typename TreeT::ValueType value; bool isActive = refAcc.probeValue(ijk, value); for (auto it = maskLeaf.cbeginValueOn(); it; ++it) { const auto pos = it.pos(); newLeaf->setValueOnly(pos, value); newLeaf->setActiveState(pos, isActive); } } } } //////////////////////////////////////// struct BoolSampler { static const char* name() { return "bin"; } static int radius() { return 2; } static bool mipmap() { return false; } static bool consistent() { return true; } template<class TreeT> static bool sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result) { return inTree.probeValue(Coord::floor(inCoord), result); } }; //////////////////////////////////////// // Convert a grid of one type to a grid of another type template<typename FromGridT, typename ToGridT> struct ConvertGrid { using FromGridCPtrT = typename FromGridT::ConstPtr; using ToGridPtrT = typename ToGridT::Ptr; ToGridPtrT operator()(const FromGridCPtrT& grid) { return ToGridPtrT(new ToGridT(*grid)); } }; // Partial specialization that avoids copying when // the input and output grid types are the same template<typename GridT> struct ConvertGrid<GridT, GridT> { using GridCPtrT = typename GridT::ConstPtr; GridCPtrT operator()(const GridCPtrT& grid) { return grid; } }; //////////////////////////////////////// // Convert a grid of arbitrary type to a mask grid with the same tree configuration // and return a pointer to the new grid. /// @private template<typename GridT> inline typename std::enable_if<!std::is_same<MaskValueType, typename GridT::BuildType>::value, typename GridT::template ValueConverter<MaskValueType>::Type::Ptr>::type convertToMaskGrid(const GridT& grid) { using MaskGridT = typename GridT::template ValueConverter<MaskValueType>::Type; auto mask = MaskGridT::create(/*background=*/false); mask->topologyUnion(grid); mask->setTransform(grid.constTransform().copy()); return mask; } // Overload that avoids any processing if the input grid is already a mask grid /// @private template<typename GridT> inline typename std::enable_if<std::is_same<MaskValueType, typename GridT::BuildType>::value, typename GridT::ConstPtr>::type convertToMaskGrid(const GridT& grid) { return grid.copy(); // shallow copy } //////////////////////////////////////// /// @private template<typename GridType> inline typename GridType::Ptr doClip( const GridType& grid, const typename GridType::template ValueConverter<MaskValueType>::Type& clipMask, bool keepInterior) { using TreeT = typename GridType::TreeType; using MaskTreeT = typename GridType::TreeType::template ValueConverter<MaskValueType>::Type; const auto gridClass = grid.getGridClass(); const auto& tree = grid.tree(); MaskTreeT gridMask(false); gridMask.topologyUnion(tree); if (gridClass == GRID_LEVEL_SET) { tree::LeafManager<MaskTreeT> leafNodes(gridMask); leafNodes.foreach(MaskInteriorVoxels<TreeT>(tree)); tree::ValueAccessor<const TreeT> acc(tree); typename MaskTreeT::ValueAllIter iter(gridMask); iter.setMaxDepth(MaskTreeT::ValueAllIter::LEAF_DEPTH - 1); for ( ; iter; ++iter) { iter.setActiveState(math::isNegative(acc.getValue(iter.getCoord()))); } } if (keepInterior) { gridMask.topologyIntersection(clipMask.constTree()); } else { gridMask.topologyDifference(clipMask.constTree()); } auto outGrid = grid.copyWithNewTree(); { // Copy voxel values and states. tree::LeafManager<const MaskTreeT> leafNodes(gridMask); CopyLeafNodes<TreeT> maskOp(tree, leafNodes); maskOp.run(); outGrid->setTree(maskOp.tree()); } { // Copy tile values and states. tree::ValueAccessor<const TreeT> refAcc(tree); tree::ValueAccessor<const MaskTreeT> maskAcc(gridMask); typename TreeT::ValueAllIter it(outGrid->tree()); it.setMaxDepth(TreeT::ValueAllIter::LEAF_DEPTH - 1); for ( ; it; ++it) { Coord ijk = it.getCoord(); if (maskAcc.isValueOn(ijk)) { typename TreeT::ValueType value; bool isActive = refAcc.probeValue(ijk, value); it.setValue(value); if (!isActive) it.setValueOff(); } } } outGrid->setTransform(grid.transform().copy()); if (gridClass != GRID_LEVEL_SET) outGrid->setGridClass(gridClass); return outGrid; } } // namespace clip_internal //////////////////////////////////////// /// @private template<typename GridType> inline typename GridType::Ptr clip(const GridType& grid, const BBoxd& bbox, bool keepInterior) { using MaskValueT = clip_internal::MaskValueType; using MaskGridT = typename GridType::template ValueConverter<MaskValueT>::Type; // Transform the world-space bounding box into the source grid's index space. Vec3d idxMin, idxMax; math::calculateBounds(grid.constTransform(), bbox.min(), bbox.max(), idxMin, idxMax); CoordBBox region(Coord::floor(idxMin), Coord::floor(idxMax)); // Construct a boolean mask grid that is true inside the index-space bounding box // and false everywhere else. MaskGridT clipMask(/*background=*/false); clipMask.fill(region, /*value=*/true, /*active=*/true); return clip_internal::doClip(grid, clipMask, keepInterior); } /// @private template<typename SrcGridType, typename ClipTreeType> inline typename SrcGridType::Ptr clip(const SrcGridType& srcGrid, const Grid<ClipTreeType>& clipGrid, bool keepInterior) { using MaskValueT = clip_internal::MaskValueType; using ClipGridType = Grid<ClipTreeType>; using SrcMaskGridType = typename SrcGridType::template ValueConverter<MaskValueT>::Type; using ClipMaskGridType = typename ClipGridType::template ValueConverter<MaskValueT>::Type; // Convert the clipping grid to a boolean-valued mask grid with the same tree configuration. auto maskGrid = clip_internal::convertToMaskGrid(clipGrid); // Resample the mask grid into the source grid's index space. if (srcGrid.constTransform() != maskGrid->constTransform()) { auto resampledMask = ClipMaskGridType::create(/*background=*/false); resampledMask->setTransform(srcGrid.constTransform().copy()); tools::resampleToMatch<clip_internal::BoolSampler>(*maskGrid, *resampledMask); tools::prune(resampledMask->tree()); maskGrid = resampledMask; } // Convert the mask grid to a mask grid with the same tree configuration as the source grid. auto clipMask = clip_internal::ConvertGrid< /*from=*/ClipMaskGridType, /*to=*/SrcMaskGridType>()(maskGrid); // Clip the source grid against the mask grid. return clip_internal::doClip(srcGrid, *clipMask, keepInterior); } /// @private template<typename GridType> inline typename GridType::Ptr clip(const GridType& inGrid, const math::NonlinearFrustumMap& frustumMap, bool keepInterior) { using ValueT = typename GridType::ValueType; using TreeT = typename GridType::TreeType; using LeafT = typename TreeT::LeafNodeType; const auto& gridXform = inGrid.transform(); const auto frustumIndexBBox = frustumMap.getBBox(); // Return true if index-space point (i,j,k) lies inside the frustum. auto frustumContainsCoord = [&](const Coord& ijk) -> bool { auto xyz = gridXform.indexToWorld(ijk); xyz = frustumMap.applyInverseMap(xyz); return frustumIndexBBox.isInside(xyz); }; // Return the frustum index-space bounding box of the corners of // the given grid index-space bounding box. auto toFrustumIndexSpace = [&](const CoordBBox& inBBox) -> BBoxd { const Coord bounds[2] = { inBBox.min(), inBBox.max() }; Coord ijk; BBoxd outBBox; for (int i = 0; i < 8; ++i) { ijk[0] = bounds[(i & 1) >> 0][0]; ijk[1] = bounds[(i & 2) >> 1][1]; ijk[2] = bounds[(i & 4) >> 2][2]; auto xyz = gridXform.indexToWorld(ijk); xyz = frustumMap.applyInverseMap(xyz); outBBox.expand(xyz); } return outBBox; }; // Construct an output grid with the same transform and metadata as the input grid. auto outGrid = inGrid.copyWithNewTree(); if (outGrid->getGridClass() == GRID_LEVEL_SET) { // After clipping, a level set grid might no longer be a valid SDF. outGrid->setGridClass(GRID_UNKNOWN); } const auto& bg = outGrid->background(); auto outAcc = outGrid->getAccessor(); // Copy active and inactive tiles that intersect the clipping region // from the input grid to the output grid. // ("Clipping region" refers to either the interior or the exterior // of the frustum, depending on the value of keepInterior.) auto tileIter = inGrid.beginValueAll(); tileIter.setMaxDepth(GridType::ValueAllIter::LEAF_DEPTH - 1); CoordBBox tileBBox; for ( ; tileIter; ++tileIter) { const bool tileActive = tileIter.isValueOn(); const auto& tileValue = tileIter.getValue(); // Skip background tiles. if (!tileActive && math::isApproxEqual(tileValue, bg)) continue; // Transform the tile's bounding box into frustum index space. tileIter.getBoundingBox(tileBBox); const auto tileFrustumBBox = toFrustumIndexSpace(tileBBox); // Determine whether any or all of the tile intersects the clipping region. enum class CopyTile { kNone, kPartial, kFull }; auto copyTile = CopyTile::kNone; if (keepInterior) { if (frustumIndexBBox.isInside(tileFrustumBBox)) { copyTile = CopyTile::kFull; } else if (frustumIndexBBox.hasOverlap(tileFrustumBBox)) { copyTile = CopyTile::kPartial; } } else { if (!frustumIndexBBox.hasOverlap(tileFrustumBBox)) { copyTile = CopyTile::kFull; } else if (!frustumIndexBBox.isInside(tileFrustumBBox)) { copyTile = CopyTile::kPartial; } } switch (copyTile) { case CopyTile::kNone: break; case CopyTile::kFull: // Copy the entire tile. outAcc.addTile(tileIter.getLevel(), tileBBox.min(), tileValue, tileActive); break; case CopyTile::kPartial: // Copy only voxels inside the clipping region. for (std::vector<CoordBBox> bboxVec = { tileBBox }; !bboxVec.empty(); ) { // For efficiency, subdivide sufficiently large tiles and discard // subregions based on additional bounding box intersection tests. // The mimimum subregion size is chosen so that cost of the // bounding box test is comparable to testing every voxel. if (bboxVec.back().volume() > 64 && bboxVec.back().is_divisible()) { // Subdivide this region in-place and append the other half to the list. bboxVec.emplace_back(bboxVec.back(), tbb::split{}); continue; } auto subBBox = bboxVec.back(); bboxVec.pop_back(); // Discard the subregion if it lies completely outside the clipping region. if (keepInterior) { if (!frustumIndexBBox.hasOverlap(toFrustumIndexSpace(subBBox))) continue; } else { if (frustumIndexBBox.isInside(toFrustumIndexSpace(subBBox))) continue; } // Test every voxel within the subregion. for (const auto& ijk: subBBox) { if (frustumContainsCoord(ijk) == keepInterior) { if (tileActive) { outAcc.setValueOn(ijk, tileValue); } else { outAcc.setValueOff(ijk, tileValue); } } } } break; } } tools::prune(outGrid->tree()); // Ensure that the output grid has the same leaf node topology as the input grid, // with the exception of leaf nodes that lie completely outside the clipping region. // (This operation is serial.) for (auto leafIter = inGrid.constTree().beginLeaf(); leafIter; ++leafIter) { const auto leafBBox = leafIter->getNodeBoundingBox(); const auto leafFrustumBBox = toFrustumIndexSpace(leafBBox); if (keepInterior) { if (frustumIndexBBox.hasOverlap(leafFrustumBBox)) { outAcc.touchLeaf(leafBBox.min()); } } else { if (!frustumIndexBBox.hasOverlap(leafFrustumBBox) || !frustumIndexBBox.isInside(leafFrustumBBox)) { outAcc.touchLeaf(leafBBox.min()); } } } // In parallel across output leaf nodes, copy leaf voxels // from the input grid to the output grid. tree::LeafManager<TreeT> outLeafNodes{outGrid->tree()}; outLeafNodes.foreach( [&](LeafT& leaf, size_t /*idx*/) { auto inAcc = inGrid.getConstAccessor(); ValueT val; for (auto voxelIter = leaf.beginValueAll(); voxelIter; ++voxelIter) { const auto ijk = voxelIter.getCoord(); if (frustumContainsCoord(ijk) == keepInterior) { const bool active = inAcc.probeValue(ijk, val); voxelIter.setValue(val); voxelIter.setValueOn(active); } } } ); return outGrid; } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_CLIP_HAS_BEEN_INCLUDED
20,145
C
34.46831
97
0.636833
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/TopologyToLevelSet.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file TopologyToLevelSet.h /// /// @brief This tool generates a narrow-band signed distance field / level set /// from the interface between active and inactive voxels in a vdb grid. /// /// @par Example: /// Combine with @c tools::PointsToVolume for fast point cloud to level set conversion. #ifndef OPENVDB_TOOLS_TOPOLOGY_TO_LEVELSET_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_TOPOLOGY_TO_LEVELSET_HAS_BEEN_INCLUDED #include "LevelSetFilter.h" #include "Morphology.h" // for erodeVoxels and dilateActiveValues #include "SignedFloodFill.h" #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/math/FiniteDifference.h> // for math::BiasedGradientScheme #include <openvdb/util/NullInterrupter.h> #include <tbb/task_group.h> #include <algorithm> // for std::min(), std::max() #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Compute the narrow-band signed distance to the interface between /// active and inactive voxels in the input grid. /// /// @return A shared pointer to a new sdf / level set grid of type @c float /// /// @param grid Input grid of arbitrary type whose active voxels are used /// in constructing the level set. /// @param halfWidth Half the width of the narrow band in voxel units. /// @param closingSteps Number of morphological closing steps used to fill gaps /// in the active voxel region. /// @param dilation Number of voxels to expand the active voxel region. /// @param smoothingSteps Number of smoothing interations. template<typename GridT> inline typename GridT::template ValueConverter<float>::Type::Ptr topologyToLevelSet(const GridT& grid, int halfWidth = 3, int closingSteps = 1, int dilation = 0, int smoothingSteps = 0); /// @brief Compute the narrow-band signed distance to the interface between /// active and inactive voxels in the input grid. /// /// @return A shared pointer to a new sdf / level set grid of type @c float /// /// @param grid Input grid of arbitrary type whose active voxels are used /// in constructing the level set. /// @param halfWidth Half the width of the narrow band in voxel units. /// @param closingSteps Number of morphological closing steps used to fill gaps /// in the active voxel region. /// @param dilation Number of voxels to expand the active voxel region. /// @param smoothingSteps Number of smoothing interations. /// @param interrupt Optional object adhering to the util::NullInterrupter interface. template<typename GridT, typename InterrupterT> inline typename GridT::template ValueConverter<float>::Type::Ptr topologyToLevelSet(const GridT& grid, int halfWidth = 3, int closingSteps = 1, int dilation = 0, int smoothingSteps = 0, InterrupterT* interrupt = nullptr); //////////////////////////////////////// namespace ttls_internal { template<typename TreeT> struct DilateOp { DilateOp(TreeT& t, int n) : tree(&t), size(n) {} void operator()() const { dilateActiveValues( *tree, size, tools::NN_FACE, tools::IGNORE_TILES); } TreeT* tree; const int size; }; template<typename TreeT> struct ErodeOp { ErodeOp(TreeT& t, int n) : tree(&t), size(n) {} void operator()() const { erodeVoxels( *tree, size); } TreeT* tree; const int size; }; template<typename TreeType> struct OffsetAndMinComp { using LeafNodeType = typename TreeType::LeafNodeType; using ValueType = typename TreeType::ValueType; OffsetAndMinComp(std::vector<LeafNodeType*>& lhsNodes, const TreeType& rhsTree, ValueType offset) : mLhsNodes(lhsNodes.empty() ? nullptr : &lhsNodes[0]), mRhsTree(&rhsTree), mOffset(offset) { } void operator()(const tbb::blocked_range<size_t>& range) const { using Iterator = typename LeafNodeType::ValueOnIter; tree::ValueAccessor<const TreeType> rhsAcc(*mRhsTree); const ValueType offset = mOffset; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { LeafNodeType& lhsNode = *mLhsNodes[n]; const LeafNodeType * rhsNodePt = rhsAcc.probeConstLeaf(lhsNode.origin()); if (!rhsNodePt) continue; for (Iterator it = lhsNode.beginValueOn(); it; ++it) { ValueType& val = const_cast<ValueType&>(it.getValue()); val = std::min(val, offset + rhsNodePt->getValue(it.pos())); } } } private: LeafNodeType * * const mLhsNodes; TreeType const * const mRhsTree; ValueType const mOffset; }; // struct OffsetAndMinComp template<typename GridType, typename InterrupterType> inline void normalizeLevelSet(GridType& grid, const int halfWidthInVoxels, InterrupterType* interrupt = nullptr) { LevelSetFilter<GridType, GridType, InterrupterType> filter(grid, interrupt); filter.setSpatialScheme(math::FIRST_BIAS); filter.setNormCount(halfWidthInVoxels); filter.normalize(); filter.prune(); } template<typename GridType, typename InterrupterType> inline void smoothLevelSet(GridType& grid, int iterations, int halfBandWidthInVoxels, InterrupterType* interrupt = nullptr) { using ValueType = typename GridType::ValueType; using TreeType = typename GridType::TreeType; using LeafNodeType = typename TreeType::LeafNodeType; GridType filterGrid(grid); LevelSetFilter<GridType, GridType, InterrupterType> filter(filterGrid, interrupt); filter.setSpatialScheme(math::FIRST_BIAS); for (int n = 0; n < iterations; ++n) { if (interrupt && interrupt->wasInterrupted()) break; filter.mean(1); } std::vector<LeafNodeType*> nodes; grid.tree().getNodes(nodes); const ValueType offset = ValueType(double(0.5) * grid.transform().voxelSize()[0]); tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), OffsetAndMinComp<TreeType>(nodes, filterGrid.tree(), -offset)); // Clean up any damanage that was done by the min operation normalizeLevelSet(grid, halfBandWidthInVoxels, interrupt); } } // namespace ttls_internal template<typename GridT, typename InterrupterT> inline typename GridT::template ValueConverter<float>::Type::Ptr topologyToLevelSet(const GridT& grid, int halfWidth, int closingSteps, int dilation, int smoothingSteps, InterrupterT* interrupt) { using MaskTreeT = typename GridT::TreeType::template ValueConverter<ValueMask>::Type; using FloatTreeT = typename GridT::TreeType::template ValueConverter<float>::Type; using FloatGridT = Grid<FloatTreeT>; // Check inputs halfWidth = std::max(halfWidth, 1); closingSteps = std::max(closingSteps, 0); dilation = std::max(dilation, 0); if (!grid.hasUniformVoxels()) { OPENVDB_THROW(ValueError, "Non-uniform voxels are not supported!"); } // Copy the topology into a MaskGrid. MaskTreeT maskTree( grid.tree(), false/*background*/, openvdb::TopologyCopy() ); // Morphological closing operation. dilateActiveValues( maskTree, closingSteps + dilation, tools::NN_FACE, tools::IGNORE_TILES ); erodeVoxels( maskTree, closingSteps ); // Generate a volume with an implicit zero crossing at the boundary // between active and inactive values in the input grid. const float background = float(grid.voxelSize()[0]) * float(halfWidth); typename FloatTreeT::Ptr lsTree( new FloatTreeT( maskTree, /*out=*/background, /*in=*/-background, openvdb::TopologyCopy() ) ); tbb::task_group pool; pool.run( ttls_internal::ErodeOp< MaskTreeT >( maskTree, halfWidth ) ); pool.run( ttls_internal::DilateOp<FloatTreeT>( *lsTree , halfWidth ) ); pool.wait();// wait for both tasks to complete lsTree->topologyDifference( maskTree ); tools::pruneLevelSet( *lsTree, /*threading=*/true); // Create a level set grid from the tree typename FloatGridT::Ptr lsGrid = FloatGridT::create( lsTree ); lsGrid->setTransform( grid.transform().copy() ); lsGrid->setGridClass( openvdb::GRID_LEVEL_SET ); // Use a PDE based scheme to propagate distance values from the // implicit zero crossing. ttls_internal::normalizeLevelSet(*lsGrid, 3*halfWidth, interrupt); // Additional filtering if (smoothingSteps > 0) { ttls_internal::smoothLevelSet(*lsGrid, smoothingSteps, halfWidth, interrupt); } return lsGrid; } template<typename GridT> inline typename GridT::template ValueConverter<float>::Type::Ptr topologyToLevelSet(const GridT& grid, int halfWidth, int closingSteps, int dilation, int smoothingSteps) { util::NullInterrupter interrupt; return topologyToLevelSet(grid, halfWidth, closingSteps, dilation, smoothingSteps, &interrupt); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_TOPOLOGY_TO_LEVELSET_HAS_BEEN_INCLUDED
9,138
C
34.285714
104
0.69118
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/RenderModules.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_VIEWER_RENDERMODULES_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_RENDERMODULES_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/tools/VolumeToMesh.h> #include <openvdb/tools/MeshToVolume.h> #include <openvdb/tools/PointScatter.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/math/Operators.h> #include <string> #include <vector> #if defined(__APPLE__) || defined(MACOSX) #include <OpenGL/gl.h> #include <OpenGL/glu.h> #elif defined(_WIN32) #include <GL/glew.h> #else #include <GL/gl.h> #include <GL/glu.h> #endif namespace openvdb_viewer { // OpenGL helper objects class BufferObject { public: BufferObject(); ~BufferObject(); void render() const; /// @note accepted @c primType: GL_POINTS, GL_LINE_STRIP, GL_LINE_LOOP, /// GL_LINES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN, GL_TRIANGLES, /// GL_QUAD_STRIP, GL_QUADS and GL_POLYGON void genIndexBuffer(const std::vector<GLuint>&, GLenum primType); void genVertexBuffer(const std::vector<GLfloat>&); void genNormalBuffer(const std::vector<GLfloat>&); void genColorBuffer(const std::vector<GLfloat>&); void clear(); private: GLuint mVertexBuffer, mNormalBuffer, mIndexBuffer, mColorBuffer; GLenum mPrimType; GLsizei mPrimNum; }; class ShaderProgram { public: ShaderProgram(); ~ShaderProgram(); void setVertShader(const std::string&); void setFragShader(const std::string&); void build(); void build(const std::vector<GLchar*>& attributes); void startShading() const; void stopShading() const; void clear(); private: GLuint mProgram, mVertShader, mFragShader; }; //////////////////////////////////////// /// @brief interface class class RenderModule { public: virtual ~RenderModule() {} virtual void render() = 0; bool visible() { return mIsVisible; } void setVisible(bool b) { mIsVisible = b; } protected: RenderModule(): mIsVisible(true) {} bool mIsVisible; }; //////////////////////////////////////// /// @brief Basic render module, axis gnomon and ground plane. class ViewportModule: public RenderModule { public: ViewportModule(); ~ViewportModule() override = default; void render() override; private: float mAxisGnomonScale, mGroundPlaneScale; }; //////////////////////////////////////// /// @brief Tree topology render module class TreeTopologyModule: public RenderModule { public: TreeTopologyModule(const openvdb::GridBase::ConstPtr&); ~TreeTopologyModule() override = default; void render() override; private: void init(); const openvdb::GridBase::ConstPtr& mGrid; BufferObject mBufferObject; bool mIsInitialized; ShaderProgram mShader; }; //////////////////////////////////////// /// @brief Module to render active voxels as points class VoxelModule: public RenderModule { public: VoxelModule(const openvdb::GridBase::ConstPtr&); ~VoxelModule() override = default; void render() override; private: void init(); const openvdb::GridBase::ConstPtr& mGrid; BufferObject mInteriorBuffer, mSurfaceBuffer, mVectorBuffer; bool mIsInitialized; ShaderProgram mFlatShader, mSurfaceShader; }; //////////////////////////////////////// /// @brief Surfacing render module class MeshModule: public RenderModule { public: MeshModule(const openvdb::GridBase::ConstPtr&); ~MeshModule() override = default; void render() override; private: void init(); const openvdb::GridBase::ConstPtr& mGrid; BufferObject mBufferObject; bool mIsInitialized; ShaderProgram mShader; }; } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_RENDERMODULES_HAS_BEEN_INCLUDED
3,803
C
19.562162
75
0.670523
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Viewer.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Viewer.h" #include "Camera.h" #include "ClipBox.h" #include "Font.h" #include "RenderModules.h" #include <openvdb/util/Formats.h> // for formattedInt() #include <openvdb/util/logging.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointCount.h> #include <openvdb/version.h> // for OPENVDB_LIBRARY_MAJOR_VERSION, etc. #include <tbb/atomic.h> #include <tbb/mutex.h> #include <cmath> // for fabs() #include <iomanip> // for std::setprecision() #include <iostream> #include <memory> #include <sstream> #include <vector> #include <limits> #include <thread> #include <chrono> #if defined(_WIN32) #include <GL/glew.h> #endif #include <GLFW/glfw3.h> namespace openvdb_viewer { class ViewerImpl { public: using CameraPtr = std::shared_ptr<Camera>; using ClipBoxPtr = std::shared_ptr<ClipBox>; using RenderModulePtr = std::shared_ptr<RenderModule>; ViewerImpl(); void init(const std::string& progName); std::string getVersionString() const; bool isOpen() const; bool open(int width = 900, int height = 800); void view(const openvdb::GridCPtrVec&); void handleEvents(); void close(); void resize(int width, int height); void showPrevGrid(); void showNextGrid(); bool needsDisplay(); void setNeedsDisplay(); void toggleRenderModule(size_t n); void toggleInfoText(); // Internal void render(); void interrupt(); void setWindowTitle(double fps = 0.0); void showNthGrid(size_t n); void updateCutPlanes(int wheelPos); void swapBuffers(); void keyCallback(int key, int action); void mouseButtonCallback(int button, int action); void mousePosCallback(int x, int y); void mouseWheelCallback(int pos); void windowSizeCallback(int width, int height); void windowRefreshCallback(); static openvdb::BBoxd worldSpaceBBox(const openvdb::math::Transform&, const openvdb::CoordBBox&); static void sleep(double seconds); private: bool mDidInit; CameraPtr mCamera; ClipBoxPtr mClipBox; RenderModulePtr mViewportModule; std::vector<RenderModulePtr> mRenderModules; openvdb::GridCPtrVec mGrids; size_t mGridIdx, mUpdates; std::string mGridName, mProgName, mGridInfo, mTransformInfo, mTreeInfo; int mWheelPos; bool mShiftIsDown, mCtrlIsDown, mShowInfo; bool mInterrupt; GLFWwindow* mWindow; }; // class ViewerImpl class ThreadManager { public: ThreadManager(); void view(const openvdb::GridCPtrVec& gridList); void close(); void resize(int width, int height); private: void doView(); static void* doViewTask(void* arg); tbb::atomic<bool> mRedisplay; bool mClose, mHasThread; std::thread mThread; openvdb::GridCPtrVec mGrids; }; //////////////////////////////////////// namespace { ViewerImpl* sViewer = nullptr; ThreadManager* sThreadMgr = nullptr; tbb::mutex sLock; void keyCB(GLFWwindow*, int key, int /*scancode*/, int action, int /*modifiers*/) { if (sViewer) sViewer->keyCallback(key, action); } void mouseButtonCB(GLFWwindow*, int button, int action, int /*modifiers*/) { if (sViewer) sViewer->mouseButtonCallback(button, action); } void mousePosCB(GLFWwindow*, double x, double y) { if (sViewer) sViewer->mousePosCallback(int(x), int(y)); } void mouseWheelCB(GLFWwindow*, double /*xoffset*/, double yoffset) { if (sViewer) sViewer->mouseWheelCallback(int(yoffset)); } void windowSizeCB(GLFWwindow*, int width, int height) { if (sViewer) sViewer->windowSizeCallback(width, height); } void windowRefreshCB(GLFWwindow*) { if (sViewer) sViewer->windowRefreshCallback(); } } // unnamed namespace //////////////////////////////////////// Viewer init(const std::string& progName, bool background) { if (sViewer == nullptr) { tbb::mutex::scoped_lock lock(sLock); if (sViewer == nullptr) { OPENVDB_START_THREADSAFE_STATIC_WRITE sViewer = new ViewerImpl; OPENVDB_FINISH_THREADSAFE_STATIC_WRITE } } sViewer->init(progName); if (background) { if (sThreadMgr == nullptr) { tbb::mutex::scoped_lock lock(sLock); if (sThreadMgr == nullptr) { OPENVDB_START_THREADSAFE_STATIC_WRITE sThreadMgr = new ThreadManager; OPENVDB_FINISH_THREADSAFE_STATIC_WRITE } } } else { if (sThreadMgr != nullptr) { tbb::mutex::scoped_lock lock(sLock); delete sThreadMgr; OPENVDB_START_THREADSAFE_STATIC_WRITE sThreadMgr = nullptr; OPENVDB_FINISH_THREADSAFE_STATIC_WRITE } } return Viewer(); } void exit() { glfwTerminate(); } //////////////////////////////////////// Viewer::Viewer() { OPENVDB_LOG_DEBUG_RUNTIME("constructed Viewer from thread " << std::this_thread::get_id()); } void Viewer::open(int width, int height) { if (sViewer) sViewer->open(width, height); } void Viewer::view(const openvdb::GridCPtrVec& grids) { if (sThreadMgr) { sThreadMgr->view(grids); } else if (sViewer) { sViewer->view(grids); } } void Viewer::handleEvents() { if (sViewer) sViewer->handleEvents(); } void Viewer::close() { if (sThreadMgr) sThreadMgr->close(); else if (sViewer) sViewer->close(); } void Viewer::resize(int width, int height) { if (sViewer) sViewer->resize(width, height); } std::string Viewer::getVersionString() const { std::string version; if (sViewer) version = sViewer->getVersionString(); return version; } //////////////////////////////////////// ThreadManager::ThreadManager() : mClose(false) , mHasThread(false) { mRedisplay = false; } void ThreadManager::view(const openvdb::GridCPtrVec& gridList) { if (!sViewer) return; mGrids = gridList; mClose = false; mRedisplay = true; if (!mHasThread) { mThread = std::thread(doViewTask, this); mHasThread = true; } } void ThreadManager::close() { if (!sViewer) return; // Tell the viewer thread to exit. mRedisplay = false; mClose = true; // Tell the viewer to terminate its event loop. sViewer->interrupt(); if (mHasThread) { mThread.join(); mHasThread = false; } // Tell the viewer to close its window. sViewer->close(); } void ThreadManager::doView() { // This function runs in its own thread. // The mClose and mRedisplay flags are set from the main thread. while (!mClose) { if (mRedisplay.compare_and_swap(/*set to*/false, /*if*/true)) { if (sViewer) sViewer->view(mGrids); } sViewer->sleep(0.5/*sec*/); } } //static void* ThreadManager::doViewTask(void* arg) { if (ThreadManager* self = static_cast<ThreadManager*>(arg)) { self->doView(); } return nullptr; } //////////////////////////////////////// ViewerImpl::ViewerImpl() : mDidInit(false) , mCamera(new Camera) , mClipBox(new ClipBox) , mGridIdx(0) , mUpdates(0) , mWheelPos(0) , mShiftIsDown(false) , mCtrlIsDown(false) , mShowInfo(true) , mInterrupt(false) , mWindow(nullptr) { } void ViewerImpl::init(const std::string& progName) { mProgName = progName; if (!mDidInit) { struct Local { static void errorCB(int error, const char* descr) { OPENVDB_LOG_ERROR("GLFW Error " << error << ": " << descr); } }; glfwSetErrorCallback(Local::errorCB); if (glfwInit() == GL_TRUE) { OPENVDB_LOG_DEBUG_RUNTIME("initialized GLFW from thread " << std::this_thread::get_id()); mDidInit = true; } else { OPENVDB_LOG_ERROR("GLFW initialization failed"); } } mViewportModule.reset(new ViewportModule); } std::string ViewerImpl::getVersionString() const { std::ostringstream ostr; ostr << "OpenVDB: " << openvdb::OPENVDB_LIBRARY_MAJOR_VERSION << "." << openvdb::OPENVDB_LIBRARY_MINOR_VERSION << "." << openvdb::OPENVDB_LIBRARY_PATCH_VERSION; int major, minor, rev; glfwGetVersion(&major, &minor, &rev); ostr << ", " << "GLFW: " << major << "." << minor << "." << rev; if (mDidInit) { ostr << ", " << "OpenGL: "; std::shared_ptr<GLFWwindow> wPtr; GLFWwindow* w = mWindow; if (!w) { wPtr.reset(glfwCreateWindow(100, 100, "", nullptr, nullptr), &glfwDestroyWindow); w = wPtr.get(); } if (w) { ostr << glfwGetWindowAttrib(w, GLFW_CONTEXT_VERSION_MAJOR) << "." << glfwGetWindowAttrib(w, GLFW_CONTEXT_VERSION_MINOR) << "." << glfwGetWindowAttrib(w, GLFW_CONTEXT_REVISION); } } return ostr.str(); } bool ViewerImpl::open(int width, int height) { if (mWindow == nullptr) { glfwWindowHint(GLFW_RED_BITS, 8); glfwWindowHint(GLFW_GREEN_BITS, 8); glfwWindowHint(GLFW_BLUE_BITS, 8); glfwWindowHint(GLFW_ALPHA_BITS, 8); glfwWindowHint(GLFW_DEPTH_BITS, 32); glfwWindowHint(GLFW_STENCIL_BITS, 0); mWindow = glfwCreateWindow( width, height, mProgName.c_str(), /*monitor=*/nullptr, /*share=*/nullptr); OPENVDB_LOG_DEBUG_RUNTIME("created window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); if (mWindow != nullptr) { // Temporarily make the new window the current context, then create a font. std::shared_ptr<GLFWwindow> curWindow( glfwGetCurrentContext(), glfwMakeContextCurrent); glfwMakeContextCurrent(mWindow); BitmapFont13::initialize(); } } mCamera->setWindow(mWindow); if (mWindow != nullptr) { glfwSetKeyCallback(mWindow, keyCB); glfwSetMouseButtonCallback(mWindow, mouseButtonCB); glfwSetCursorPosCallback(mWindow, mousePosCB); glfwSetScrollCallback(mWindow, mouseWheelCB); glfwSetWindowSizeCallback(mWindow, windowSizeCB); glfwSetWindowRefreshCallback(mWindow, windowRefreshCB); } return (mWindow != nullptr); } bool ViewerImpl::isOpen() const { return (mWindow != nullptr); } // Set a flag so as to break out of the event loop on the next iteration. // (Useful only if the event loop is running in a separate thread.) void ViewerImpl::interrupt() { mInterrupt = true; if (mWindow) glfwSetWindowShouldClose(mWindow, true); } void ViewerImpl::handleEvents() { glfwPollEvents(); } void ViewerImpl::close() { OPENVDB_LOG_DEBUG_RUNTIME("about to close window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); mViewportModule.reset(); mRenderModules.clear(); mCamera->setWindow(nullptr); GLFWwindow* win = mWindow; mWindow = nullptr; glfwDestroyWindow(win); OPENVDB_LOG_DEBUG_RUNTIME("destroyed window " << std::hex << win << std::dec << " from thread " << std::this_thread::get_id()); } //////////////////////////////////////// void ViewerImpl::view(const openvdb::GridCPtrVec& gridList) { if (!isOpen()) return; mGrids = gridList; mGridIdx = size_t(-1); mGridName.clear(); // Compute the combined bounding box of all the grids. openvdb::BBoxd bbox(openvdb::Vec3d(0.0), openvdb::Vec3d(0.0)); if (!gridList.empty()) { bbox = worldSpaceBBox( gridList[0]->transform(), gridList[0]->evalActiveVoxelBoundingBox()); openvdb::Vec3d voxelSize = gridList[0]->voxelSize(); for (size_t n = 1; n < gridList.size(); ++n) { bbox.expand(worldSpaceBBox(gridList[n]->transform(), gridList[n]->evalActiveVoxelBoundingBox())); voxelSize = minComponent(voxelSize, gridList[n]->voxelSize()); } mClipBox->setStepSize(voxelSize); } mClipBox->setBBox(bbox); // Prepare window for rendering. glfwMakeContextCurrent(mWindow); #if defined(_WIN32) // This must come after glfwMakeContextCurrent if (GLEW_OK != glewInit()) { OPENVDB_LOG_ERROR("GLEW initialization failed"); } #endif { // set up camera openvdb::Vec3d extents = bbox.extents(); double maxExtent = std::max(extents[0], std::max(extents[1], extents[2])); mCamera->setTarget(bbox.getCenter(), maxExtent); mCamera->lookAtTarget(); mCamera->setSpeed(); } swapBuffers(); setNeedsDisplay(); ////////// // Screen color glClearColor(0.85f, 0.85f, 0.85f, 0.0f); glDepthFunc(GL_LESS); glEnable(GL_DEPTH_TEST); glShadeModel(GL_SMOOTH); glPointSize(4); glLineWidth(2); ////////// // construct render modules showNthGrid(/*n=*/0); // main loop size_t frame = 0; double time = glfwGetTime(); glfwSwapInterval(1); OPENVDB_LOG_DEBUG_RUNTIME("starting to render in window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); mInterrupt = false; for (bool stop = false; !stop; ) { if (needsDisplay()) render(); // eval fps ++frame; double elapsed = glfwGetTime() - time; if (elapsed > 1.0) { time = glfwGetTime(); setWindowTitle(/*fps=*/double(frame) / elapsed); frame = 0; } // Swap front and back buffers swapBuffers(); sleep(0.01/*sec*/); // Exit if the Esc key is pressed or the window is closed. handleEvents(); stop = (mInterrupt || glfwWindowShouldClose(mWindow)); } if (glfwGetCurrentContext() == mWindow) { ///< @todo not thread-safe // Detach this viewer's GL context. glfwMakeContextCurrent(nullptr); OPENVDB_LOG_DEBUG_RUNTIME("detached window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); } OPENVDB_LOG_DEBUG_RUNTIME("finished rendering in window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); } //////////////////////////////////////// void ViewerImpl::resize(int width, int height) { if (mWindow) glfwSetWindowSize(mWindow, width, height); } //////////////////////////////////////// void ViewerImpl::render() { if (mWindow == nullptr) return; // Prepare window for rendering. glfwMakeContextCurrent(mWindow); mCamera->aim(); // draw scene mViewportModule->render(); // ground plane. mClipBox->render(); mClipBox->enableClipping(); for (size_t n = 0, N = mRenderModules.size(); n < N; ++n) { mRenderModules[n]->render(); } mClipBox->disableClipping(); // Render text if (mShowInfo) { BitmapFont13::enableFontRendering(); glColor3d(0.2, 0.2, 0.2); int width, height; glfwGetFramebufferSize(mWindow, &width, &height); BitmapFont13::print(10, height - 13 - 10, mGridInfo); BitmapFont13::print(10, height - 13 - 30, mTransformInfo); BitmapFont13::print(10, height - 13 - 50, mTreeInfo); // Indicate via their hotkeys which render modules are enabled. std::string keys = "123"; for (auto n: {0, 1, 2}) { if (!mRenderModules[n]->visible()) keys[n] = ' '; } BitmapFont13::print(width - 10 - 30, 10, keys); glColor3d(0.75, 0.75, 0.75); BitmapFont13::print(width - 10 - 30, 10, "123"); BitmapFont13::disableFontRendering(); } } //////////////////////////////////////// //static void ViewerImpl::sleep(double secs) { secs = fabs(secs); int isecs = int(secs); std::this_thread::sleep_for(std::chrono::seconds(isecs)); } //////////////////////////////////////// //static openvdb::BBoxd ViewerImpl::worldSpaceBBox(const openvdb::math::Transform& xform, const openvdb::CoordBBox& bbox) { openvdb::Vec3d pMin = openvdb::Vec3d(std::numeric_limits<double>::max()); openvdb::Vec3d pMax = -pMin; const openvdb::Coord& min = bbox.min(); const openvdb::Coord& max = bbox.max(); openvdb::Coord ijk; // corner 1 openvdb::Vec3d ptn = xform.indexToWorld(min); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 2 ijk[0] = min.x(); ijk[1] = min.y(); ijk[2] = max.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 3 ijk[0] = max.x(); ijk[1] = min.y(); ijk[2] = max.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 4 ijk[0] = max.x(); ijk[1] = min.y(); ijk[2] = min.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 5 ijk[0] = min.x(); ijk[1] = max.y(); ijk[2] = min.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 6 ijk[0] = min.x(); ijk[1] = max.y(); ijk[2] = max.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 7 ptn = xform.indexToWorld(max); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 8 ijk[0] = max.x(); ijk[1] = max.y(); ijk[2] = min.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } return openvdb::BBoxd(pMin, pMax); } //////////////////////////////////////// void ViewerImpl::updateCutPlanes(int wheelPos) { double speed = std::abs(mWheelPos - wheelPos); if (mWheelPos < wheelPos) mClipBox->update(speed); else mClipBox->update(-speed); setNeedsDisplay(); } //////////////////////////////////////// void ViewerImpl::swapBuffers() { glfwSwapBuffers(mWindow); } //////////////////////////////////////// void ViewerImpl::setWindowTitle(double fps) { std::ostringstream ss; ss << mProgName << ": " << (mGridName.empty() ? std::string("OpenVDB") : mGridName) << " (" << (mGridIdx + 1) << " of " << mGrids.size() << ") @ " << std::setprecision(1) << std::fixed << fps << " fps"; if (mWindow) glfwSetWindowTitle(mWindow, ss.str().c_str()); } //////////////////////////////////////// void ViewerImpl::showPrevGrid() { if (const size_t numGrids = mGrids.size()) { size_t idx = ((numGrids + mGridIdx) - 1) % numGrids; showNthGrid(idx); } } void ViewerImpl::showNextGrid() { if (const size_t numGrids = mGrids.size()) { size_t idx = (mGridIdx + 1) % numGrids; showNthGrid(idx); } } void ViewerImpl::showNthGrid(size_t n) { if (mGrids.empty()) return; n = n % mGrids.size(); if (n == mGridIdx) return; mGridName = mGrids[n]->getName(); mGridIdx = n; // save render settings std::vector<bool> active(mRenderModules.size()); for (size_t i = 0, I = active.size(); i < I; ++i) { active[i] = mRenderModules[i]->visible(); } mRenderModules.clear(); mRenderModules.push_back(RenderModulePtr(new TreeTopologyModule(mGrids[n]))); mRenderModules.push_back(RenderModulePtr(new MeshModule(mGrids[n]))); mRenderModules.push_back(RenderModulePtr(new VoxelModule(mGrids[n]))); if (active.empty()) { for (size_t i = 1, I = mRenderModules.size(); i < I; ++i) { mRenderModules[i]->setVisible(false); } } else { for (size_t i = 0, I = active.size(); i < I; ++i) { mRenderModules[i]->setVisible(active[i]); } } // Collect info { std::ostringstream ostrm; std::string s = mGrids[n]->getName(); const openvdb::GridClass cls = mGrids[n]->getGridClass(); if (!s.empty()) ostrm << s << " / "; ostrm << mGrids[n]->valueType() << " / "; if (cls == openvdb::GRID_UNKNOWN) ostrm << " class unknown"; else ostrm << " " << openvdb::GridBase::gridClassToString(cls); mGridInfo = ostrm.str(); } { openvdb::Coord dim = mGrids[n]->evalActiveVoxelDim(); std::ostringstream ostrm; ostrm << dim[0] << " x " << dim[1] << " x " << dim[2] << " / voxel size " << std::setprecision(4) << mGrids[n]->voxelSize()[0] << " (" << mGrids[n]->transform().mapType() << ")"; mTransformInfo = ostrm.str(); } { std::ostringstream ostrm; const openvdb::Index64 count = mGrids[n]->activeVoxelCount(); ostrm << openvdb::util::formattedInt(count) << " active voxel" << (count == 1 ? "" : "s"); mTreeInfo = ostrm.str(); } { if (mGrids[n]->isType<openvdb::points::PointDataGrid>()) { const openvdb::points::PointDataGrid::ConstPtr points = openvdb::gridConstPtrCast<openvdb::points::PointDataGrid>(mGrids[n]); const openvdb::Index64 count = openvdb::points::pointCount(points->tree()); std::ostringstream ostrm; ostrm << " / " << openvdb::util::formattedInt(count) << " point" << (count == 1 ? "" : "s"); mTreeInfo.append(ostrm.str()); } } setWindowTitle(); } //////////////////////////////////////// void ViewerImpl::keyCallback(int key, int action) { mCamera->keyCallback(key, action); if (mWindow == nullptr) return; const bool keyPress = (glfwGetKey(mWindow, key) == GLFW_PRESS); /// @todo Should use "modifiers" argument to keyCB(). mShiftIsDown = glfwGetKey(mWindow, GLFW_KEY_LEFT_SHIFT); mCtrlIsDown = glfwGetKey(mWindow, GLFW_KEY_LEFT_CONTROL); if (keyPress) { switch (key) { case '1': case GLFW_KEY_KP_1: toggleRenderModule(0); break; case '2': case GLFW_KEY_KP_2: toggleRenderModule(1); break; case '3': case GLFW_KEY_KP_3: toggleRenderModule(2); break; case 'c': case 'C': mClipBox->reset(); break; case 'h': case 'H': // center home mCamera->lookAt(openvdb::Vec3d(0.0), 10.0); break; case 'g': case 'G': // center geometry mCamera->lookAtTarget(); break; case 'i': case 'I': toggleInfoText(); break; case GLFW_KEY_LEFT: showPrevGrid(); break; case GLFW_KEY_RIGHT: showNextGrid(); break; case GLFW_KEY_ESCAPE: glfwSetWindowShouldClose(mWindow, true); break; } } switch (key) { case 'x': case 'X': mClipBox->activateXPlanes() = keyPress; break; case 'y': case 'Y': mClipBox->activateYPlanes() = keyPress; break; case 'z': case 'Z': mClipBox->activateZPlanes() = keyPress; break; } mClipBox->shiftIsDown() = mShiftIsDown; mClipBox->ctrlIsDown() = mCtrlIsDown; setNeedsDisplay(); } void ViewerImpl::mouseButtonCallback(int button, int action) { mCamera->mouseButtonCallback(button, action); mClipBox->mouseButtonCallback(button, action); if (mCamera->needsDisplay()) setNeedsDisplay(); } void ViewerImpl::mousePosCallback(int x, int y) { bool handled = mClipBox->mousePosCallback(x, y); if (!handled) mCamera->mousePosCallback(x, y); if (mCamera->needsDisplay()) setNeedsDisplay(); } void ViewerImpl::mouseWheelCallback(int pos) { pos += mWheelPos; if (mClipBox->isActive()) { updateCutPlanes(pos); } else { mCamera->mouseWheelCallback(pos, mWheelPos); if (mCamera->needsDisplay()) setNeedsDisplay(); } mWheelPos = pos; } void ViewerImpl::windowSizeCallback(int, int) { setNeedsDisplay(); } void ViewerImpl::windowRefreshCallback() { setNeedsDisplay(); } //////////////////////////////////////// bool ViewerImpl::needsDisplay() { if (mUpdates < 2) { mUpdates += 1; return true; } return false; } void ViewerImpl::setNeedsDisplay() { mUpdates = 0; } void ViewerImpl::toggleRenderModule(size_t n) { mRenderModules[n]->setVisible(!mRenderModules[n]->visible()); } void ViewerImpl::toggleInfoText() { mShowInfo = !mShowInfo; } } // namespace openvdb_viewer
25,156
C++
22.511215
97
0.574018
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Font.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_VIEWER_FONT_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_FONT_HAS_BEEN_INCLUDED #include <string> #if defined(__APPLE__) || defined(MACOSX) #include <OpenGL/gl.h> #include <OpenGL/glu.h> #elif defined(_WIN32) #include <GL/glew.h> #else #include <GL/gl.h> #include <GL/glu.h> #endif namespace openvdb_viewer { class BitmapFont13 { public: BitmapFont13() {} static void initialize(); static void enableFontRendering(); static void disableFontRendering(); static void print(GLint px, GLint py, const std::string&); private: static GLuint sOffset; static GLubyte sCharacters[95][13]; }; } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_FONT_HAS_BEEN_INCLUDED
798
C
18.023809
62
0.715539
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/ClipBox.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "ClipBox.h" namespace openvdb_viewer { ClipBox::ClipBox() : mStepSize(1.0) , mBBox() , mXIsActive(false) , mYIsActive(false) , mZIsActive(false) , mShiftIsDown(false) , mCtrlIsDown(false) { GLdouble front [] = { 0.0, 0.0, 1.0, 0.0}; std::copy(front, front + 4, mFrontPlane); GLdouble back [] = { 0.0, 0.0,-1.0, 0.0}; std::copy(back, back + 4, mBackPlane); GLdouble left [] = { 1.0, 0.0, 0.0, 0.0}; std::copy(left, left + 4, mLeftPlane); GLdouble right [] = {-1.0, 0.0, 0.0, 0.0}; std::copy(right, right + 4, mRightPlane); GLdouble top [] = { 0.0, 1.0, 0.0, 0.0}; std::copy(top, top + 4, mTopPlane); GLdouble bottom [] = { 0.0,-1.0, 0.0, 0.0}; std::copy(bottom, bottom + 4, mBottomPlane); } void ClipBox::setBBox(const openvdb::BBoxd& bbox) { mBBox = bbox; reset(); } void ClipBox::update(double steps) { if (mXIsActive) { GLdouble s = steps * mStepSize.x() * 4.0; if (mShiftIsDown || mCtrlIsDown) { mLeftPlane[3] -= s; mLeftPlane[3] = -std::min(-mLeftPlane[3], (mRightPlane[3] - mStepSize.x())); mLeftPlane[3] = -std::max(-mLeftPlane[3], mBBox.min().x()); } if (!mShiftIsDown || mCtrlIsDown) { mRightPlane[3] += s; mRightPlane[3] = std::min(mRightPlane[3], mBBox.max().x()); mRightPlane[3] = std::max(mRightPlane[3], (-mLeftPlane[3] + mStepSize.x())); } } if (mYIsActive) { GLdouble s = steps * mStepSize.y() * 4.0; if (mShiftIsDown || mCtrlIsDown) { mTopPlane[3] -= s; mTopPlane[3] = -std::min(-mTopPlane[3], (mBottomPlane[3] - mStepSize.y())); mTopPlane[3] = -std::max(-mTopPlane[3], mBBox.min().y()); } if (!mShiftIsDown || mCtrlIsDown) { mBottomPlane[3] += s; mBottomPlane[3] = std::min(mBottomPlane[3], mBBox.max().y()); mBottomPlane[3] = std::max(mBottomPlane[3], (-mTopPlane[3] + mStepSize.y())); } } if (mZIsActive) { GLdouble s = steps * mStepSize.z() * 4.0; if (mShiftIsDown || mCtrlIsDown) { mFrontPlane[3] -= s; mFrontPlane[3] = -std::min(-mFrontPlane[3], (mBackPlane[3] - mStepSize.z())); mFrontPlane[3] = -std::max(-mFrontPlane[3], mBBox.min().z()); } if (!mShiftIsDown || mCtrlIsDown) { mBackPlane[3] += s; mBackPlane[3] = std::min(mBackPlane[3], mBBox.max().z()); mBackPlane[3] = std::max(mBackPlane[3], (-mFrontPlane[3] + mStepSize.z())); } } } void ClipBox::reset() { mFrontPlane[3] = std::abs(mBBox.min().z()); mBackPlane[3] = mBBox.max().z(); mLeftPlane[3] = std::abs(mBBox.min().x()); mRightPlane[3] = mBBox.max().x(); mTopPlane[3] = std::abs(mBBox.min().y()); mBottomPlane[3] = mBBox.max().y(); } void ClipBox::update() const { glClipPlane(GL_CLIP_PLANE0, mFrontPlane); glClipPlane(GL_CLIP_PLANE1, mBackPlane); glClipPlane(GL_CLIP_PLANE2, mLeftPlane); glClipPlane(GL_CLIP_PLANE3, mRightPlane); glClipPlane(GL_CLIP_PLANE4, mTopPlane); glClipPlane(GL_CLIP_PLANE5, mBottomPlane); } void ClipBox::enableClipping() const { update(); if (-mFrontPlane[3] > mBBox.min().z()) glEnable(GL_CLIP_PLANE0); if (mBackPlane[3] < mBBox.max().z()) glEnable(GL_CLIP_PLANE1); if (-mLeftPlane[3] > mBBox.min().x()) glEnable(GL_CLIP_PLANE2); if (mRightPlane[3] < mBBox.max().x()) glEnable(GL_CLIP_PLANE3); if (-mTopPlane[3] > mBBox.min().y()) glEnable(GL_CLIP_PLANE4); if (mBottomPlane[3] < mBBox.max().y()) glEnable(GL_CLIP_PLANE5); } void ClipBox::disableClipping() const { glDisable(GL_CLIP_PLANE0); glDisable(GL_CLIP_PLANE1); glDisable(GL_CLIP_PLANE2); glDisable(GL_CLIP_PLANE3); glDisable(GL_CLIP_PLANE4); glDisable(GL_CLIP_PLANE5); } void ClipBox::render() { bool drawBbox = false; const GLenum geoMode = GL_LINE_LOOP; glColor3d(0.1, 0.1, 0.9); if (-mFrontPlane[3] > mBBox.min().z()) { glBegin(geoMode); glVertex3d(mBBox.min().x(), mBBox.min().y(), -mFrontPlane[3]); glVertex3d(mBBox.min().x(), mBBox.max().y(), -mFrontPlane[3]); glVertex3d(mBBox.max().x(), mBBox.max().y(), -mFrontPlane[3]); glVertex3d(mBBox.max().x(), mBBox.min().y(), -mFrontPlane[3]); glEnd(); drawBbox = true; } if (mBackPlane[3] < mBBox.max().z()) { glBegin(geoMode); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBackPlane[3]); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBackPlane[3]); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBackPlane[3]); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBackPlane[3]); glEnd(); drawBbox = true; } glColor3d(0.9, 0.1, 0.1); if (-mLeftPlane[3] > mBBox.min().x()) { glBegin(geoMode); glVertex3d(-mLeftPlane[3], mBBox.min().y(), mBBox.min().z()); glVertex3d(-mLeftPlane[3], mBBox.max().y(), mBBox.min().z()); glVertex3d(-mLeftPlane[3], mBBox.max().y(), mBBox.max().z()); glVertex3d(-mLeftPlane[3], mBBox.min().y(), mBBox.max().z()); glEnd(); drawBbox = true; } if (mRightPlane[3] < mBBox.max().x()) { glBegin(geoMode); glVertex3d(mRightPlane[3], mBBox.min().y(), mBBox.min().z()); glVertex3d(mRightPlane[3], mBBox.max().y(), mBBox.min().z()); glVertex3d(mRightPlane[3], mBBox.max().y(), mBBox.max().z()); glVertex3d(mRightPlane[3], mBBox.min().y(), mBBox.max().z()); glEnd(); drawBbox = true; } glColor3d(0.1, 0.9, 0.1); if (-mTopPlane[3] > mBBox.min().y()) { glBegin(geoMode); glVertex3d(mBBox.min().x(), -mTopPlane[3], mBBox.min().z()); glVertex3d(mBBox.min().x(), -mTopPlane[3], mBBox.max().z()); glVertex3d(mBBox.max().x(), -mTopPlane[3], mBBox.max().z()); glVertex3d(mBBox.max().x(), -mTopPlane[3], mBBox.min().z()); glEnd(); drawBbox = true; } if (mBottomPlane[3] < mBBox.max().y()) { glBegin(geoMode); glVertex3d(mBBox.min().x(), mBottomPlane[3], mBBox.min().z()); glVertex3d(mBBox.min().x(), mBottomPlane[3], mBBox.max().z()); glVertex3d(mBBox.max().x(), mBottomPlane[3], mBBox.max().z()); glVertex3d(mBBox.max().x(), mBottomPlane[3], mBBox.min().z()); glEnd(); drawBbox = true; } if (drawBbox) { glColor3d(0.5, 0.5, 0.5); glBegin(GL_LINE_LOOP); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBBox.min().z()); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBBox.min().z()); glEnd(); glBegin(GL_LINE_LOOP); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBBox.min().z()); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBBox.min().z()); glEnd(); glBegin(GL_LINES); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBBox.min().z()); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBBox.min().z()); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBBox.max().z()); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBBox.min().z()); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBBox.min().z()); glEnd(); } } //////////////////////////////////////// bool ClipBox::mouseButtonCallback(int /*button*/, int /*action*/) { return false; // unhandled } bool ClipBox::mousePosCallback(int /*x*/, int /*y*/) { return false; // unhandled } } // namespace openvdb_viewer
8,209
C++
29.749064
89
0.545621
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Viewer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_VIEWER_VIEWER_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_VIEWER_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <string> namespace openvdb_viewer { class Viewer; enum { DEFAULT_WIDTH = 900, DEFAULT_HEIGHT = 800 }; /// @brief Initialize and return a viewer. /// @param progName the name of the calling program (for use in info displays) /// @param background if true, run the viewer in a separate thread /// @note Currently, the viewer window is a singleton (but that might change /// in the future), so although this function returns a new Viewer instance /// on each call, all instances are associated with the same window. Viewer init(const std::string& progName, bool background); /// @brief Destroy all viewer windows and release resources. /// @details This should be called from the main thread before your program exits. void exit(); /// Manager for a window that displays OpenVDB grids class Viewer { public: /// Set the size of and open the window associated with this viewer. void open(int width = DEFAULT_WIDTH, int height = DEFAULT_HEIGHT); /// Display the given grids. void view(const openvdb::GridCPtrVec&); /// @brief Process any pending user input (keyboard, mouse, etc.) /// in the window associated with this viewer. void handleEvents(); /// @brief Close the window associated with this viewer. /// @warning The window associated with this viewer might be shared with other viewers. void close(); /// Resize the window associated with this viewer. void resize(int width, int height); /// Return a string with version number information. std::string getVersionString() const; private: friend Viewer init(const std::string&, bool); Viewer(); }; } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_VIEWER_HAS_BEEN_INCLUDED
1,939
C
29.79365
91
0.720474
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/ClipBox.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_VIEWER_CLIPBOX_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_CLIPBOX_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #if defined(__APPLE__) || defined(MACOSX) #include <OpenGL/gl.h> #include <OpenGL/glu.h> #elif defined(_WIN32) #include <GL/glew.h> #else #include <GL/gl.h> #include <GL/glu.h> #endif namespace openvdb_viewer { class ClipBox { public: ClipBox(); void enableClipping() const; void disableClipping() const; void setBBox(const openvdb::BBoxd&); void setStepSize(const openvdb::Vec3d& s) { mStepSize = s; } void render(); void update(double steps); void reset(); bool isActive() const { return (mXIsActive || mYIsActive ||mZIsActive); } bool& activateXPlanes() { return mXIsActive; } bool& activateYPlanes() { return mYIsActive; } bool& activateZPlanes() { return mZIsActive; } bool& shiftIsDown() { return mShiftIsDown; } bool& ctrlIsDown() { return mCtrlIsDown; } bool mouseButtonCallback(int button, int action); bool mousePosCallback(int x, int y); private: void update() const; openvdb::Vec3d mStepSize; openvdb::BBoxd mBBox; bool mXIsActive, mYIsActive, mZIsActive, mShiftIsDown, mCtrlIsDown; GLdouble mFrontPlane[4], mBackPlane[4], mLeftPlane[4], mRightPlane[4], mTopPlane[4], mBottomPlane[4]; }; // class ClipBox } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_CLIPBOX_HAS_BEEN_INCLUDED
1,521
C
23.15873
77
0.69428
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Camera.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file Camera.h /// @brief Basic GL camera class #ifndef OPENVDB_VIEWER_CAMERA_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_CAMERA_HAS_BEEN_INCLUDED #include <openvdb/Types.h> struct GLFWwindow; // forward declaration namespace openvdb_viewer { class Camera { public: Camera(); void setWindow(GLFWwindow* w) { mWindow = w; } void aim(); void lookAt(const openvdb::Vec3d& p, double dist = 1.0); void lookAtTarget(); void setTarget(const openvdb::Vec3d& p, double dist = 1.0); void setNearFarPlanes(double n, double f) { mNearPlane = n; mFarPlane = f; } void setFieldOfView(double degrees) { mFov = degrees; } void setSpeed(double zoomSpeed = 0.1, double strafeSpeed = 0.002, double tumblingSpeed = 0.02); void keyCallback(int key, int action); void mouseButtonCallback(int button, int action); void mousePosCallback(int x, int y); void mouseWheelCallback(int pos, int prevPos); bool needsDisplay() const { return mNeedsDisplay; } private: // Camera parameters double mFov, mNearPlane, mFarPlane; openvdb::Vec3d mTarget, mLookAt, mUp, mForward, mRight, mEye; double mTumblingSpeed, mZoomSpeed, mStrafeSpeed; double mHead, mPitch, mTargetDistance, mDistance; // Input states bool mMouseDown, mStartTumbling, mZoomMode, mChanged, mNeedsDisplay; double mMouseXPos, mMouseYPos; GLFWwindow* mWindow; static const double sDeg2rad; }; // class Camera } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_CAMERA_HAS_BEEN_INCLUDED
1,624
C
25.639344
99
0.710591
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Font.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Font.h" #include <openvdb/Types.h> // for OPENVDB_START_THREADSAFE_STATIC_WRITE namespace openvdb_viewer { GLuint BitmapFont13::sOffset = 0; GLubyte BitmapFont13::sCharacters[95][13] = { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36 }, { 0x00, 0x00, 0x00, 0x66, 0x66, 0xFF, 0x66, 0x66, 0xFF, 0x66, 0x66, 0x00, 0x00 }, { 0x00, 0x00, 0x18, 0x7E, 0xFF, 0x1B, 0x1F, 0x7E, 0xF8, 0xD8, 0xFF, 0x7E, 0x18 }, { 0x00, 0x00, 0x0E, 0x1B, 0xDB, 0x6E, 0x30, 0x18, 0x0C, 0x76, 0xDB, 0xD8, 0x70 }, { 0x00, 0x00, 0x7F, 0xC6, 0xCF, 0xD8, 0x70, 0x70, 0xD8, 0xCC, 0xCC, 0x6C, 0x38 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x1C, 0x0C, 0x0E }, { 0x00, 0x00, 0x0C, 0x18, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x18, 0x0C }, { 0x00, 0x00, 0x30, 0x18, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x18, 0x30 }, { 0x00, 0x00, 0x00, 0x00, 0x99, 0x5A, 0x3C, 0xFF, 0x3C, 0x5A, 0x99, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0xFF, 0xFF, 0x18, 0x18, 0x18, 0x00, 0x00 }, { 0x00, 0x00, 0x30, 0x18, 0x1C, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x60, 0x60, 0x30, 0x30, 0x18, 0x18, 0x0C, 0x0C, 0x06, 0x06, 0x03, 0x03 }, { 0x00, 0x00, 0x3C, 0x66, 0xC3, 0xE3, 0xF3, 0xDB, 0xCF, 0xC7, 0xC3, 0x66, 0x3C }, { 0x00, 0x00, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x78, 0x38, 0x18 }, { 0x00, 0x00, 0xFF, 0xC0, 0xC0, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0xE7, 0x7E }, { 0x00, 0x00, 0x7E, 0xE7, 0x03, 0x03, 0x07, 0x7E, 0x07, 0x03, 0x03, 0xE7, 0x7E }, { 0x00, 0x00, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0xFF, 0xCC, 0x6C, 0x3C, 0x1C, 0x0C }, { 0x00, 0x00, 0x7E, 0xE7, 0x03, 0x03, 0x07, 0xFE, 0xC0, 0xC0, 0xC0, 0xC0, 0xFF }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xC7, 0xFE, 0xC0, 0xC0, 0xC0, 0xE7, 0x7E }, { 0x00, 0x00, 0x30, 0x30, 0x30, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0xFF }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xE7, 0x7E, 0xE7, 0xC3, 0xC3, 0xE7, 0x7E }, { 0x00, 0x00, 0x7E, 0xE7, 0x03, 0x03, 0x03, 0x7F, 0xE7, 0xC3, 0xC3, 0xE7, 0x7E }, { 0x00, 0x00, 0x00, 0x38, 0x38, 0x00, 0x00, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x30, 0x18, 0x1C, 0x1C, 0x00, 0x00, 0x1C, 0x1C, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0x60, 0x30, 0x18, 0x0C, 0x06 }, { 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60 }, { 0x00, 0x00, 0x18, 0x00, 0x00, 0x18, 0x18, 0x0C, 0x06, 0x03, 0xC3, 0xC3, 0x7E }, { 0x00, 0x00, 0x3F, 0x60, 0xCF, 0xDB, 0xD3, 0xDD, 0xC3, 0x7E, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC3, 0xC3, 0xC3, 0xC3, 0xFF, 0xC3, 0xC3, 0xC3, 0x66, 0x3C, 0x18 }, { 0x00, 0x00, 0xFE, 0xC7, 0xC3, 0xC3, 0xC7, 0xFE, 0xC7, 0xC3, 0xC3, 0xC7, 0xFE }, { 0x00, 0x00, 0x7E, 0xE7, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xE7, 0x7E }, { 0x00, 0x00, 0xFC, 0xCE, 0xC7, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC7, 0xCE, 0xFC }, { 0x00, 0x00, 0xFF, 0xC0, 0xC0, 0xC0, 0xC0, 0xFC, 0xC0, 0xC0, 0xC0, 0xC0, 0xFF }, { 0x00, 0x00, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xFC, 0xC0, 0xC0, 0xC0, 0xFF }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xCF, 0xC0, 0xC0, 0xC0, 0xC0, 0xE7, 0x7E }, { 0x00, 0x00, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xFF, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3 }, { 0x00, 0x00, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7E }, { 0x00, 0x00, 0x7C, 0xEE, 0xC6, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 }, { 0x00, 0x00, 0xC3, 0xC6, 0xCC, 0xD8, 0xF0, 0xE0, 0xF0, 0xD8, 0xCC, 0xC6, 0xC3 }, { 0x00, 0x00, 0xFF, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0 }, { 0x00, 0x00, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xDB, 0xFF, 0xFF, 0xE7, 0xC3 }, { 0x00, 0x00, 0xC7, 0xC7, 0xCF, 0xCF, 0xDF, 0xDB, 0xFB, 0xF3, 0xF3, 0xE3, 0xE3 }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xE7, 0x7E }, { 0x00, 0x00, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xFE, 0xC7, 0xC3, 0xC3, 0xC7, 0xFE }, { 0x00, 0x00, 0x3F, 0x6E, 0xDF, 0xDB, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0x66, 0x3C }, { 0x00, 0x00, 0xC3, 0xC6, 0xCC, 0xD8, 0xF0, 0xFE, 0xC7, 0xC3, 0xC3, 0xC7, 0xFE }, { 0x00, 0x00, 0x7E, 0xE7, 0x03, 0x03, 0x07, 0x7E, 0xE0, 0xC0, 0xC0, 0xE7, 0x7E }, { 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xFF }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3 }, { 0x00, 0x00, 0x18, 0x3C, 0x3C, 0x66, 0x66, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3 }, { 0x00, 0x00, 0xC3, 0xE7, 0xFF, 0xFF, 0xDB, 0xDB, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3 }, { 0x00, 0x00, 0xC3, 0x66, 0x66, 0x3C, 0x3C, 0x18, 0x3C, 0x3C, 0x66, 0x66, 0xC3 }, { 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3C, 0x3C, 0x66, 0x66, 0xC3 }, { 0x00, 0x00, 0xFF, 0xC0, 0xC0, 0x60, 0x30, 0x7E, 0x0C, 0x06, 0x03, 0x03, 0xFF }, { 0x00, 0x00, 0x3C, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x3C }, { 0x00, 0x03, 0x03, 0x06, 0x06, 0x0C, 0x0C, 0x18, 0x18, 0x30, 0x30, 0x60, 0x60 }, { 0x00, 0x00, 0x3C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x3C }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC3, 0x66, 0x3C, 0x18 }, { 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x30, 0x70 }, { 0x00, 0x00, 0x7F, 0xC3, 0xC3, 0x7F, 0x03, 0xC3, 0x7E, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xFE, 0xC3, 0xC3, 0xC3, 0xC3, 0xFE, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0 }, { 0x00, 0x00, 0x7E, 0xC3, 0xC0, 0xC0, 0xC0, 0xC3, 0x7E, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x7F, 0xC3, 0xC3, 0xC3, 0xC3, 0x7F, 0x03, 0x03, 0x03, 0x03, 0x03 }, { 0x00, 0x00, 0x7F, 0xC0, 0xC0, 0xFE, 0xC3, 0xC3, 0x7E, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x30, 0x30, 0x30, 0x30, 0x30, 0xFC, 0x30, 0x30, 0x30, 0x33, 0x1E }, { 0x7E, 0xC3, 0x03, 0x03, 0x7F, 0xC3, 0xC3, 0xC3, 0x7E, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xFE, 0xC0, 0xC0, 0xC0, 0xC0 }, { 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x18, 0x00 }, { 0x38, 0x6C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x00, 0x00, 0x0C, 0x00 }, { 0x00, 0x00, 0xC6, 0xCC, 0xF8, 0xF0, 0xD8, 0xCC, 0xC6, 0xC0, 0xC0, 0xC0, 0xC0 }, { 0x00, 0x00, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x78 }, { 0x00, 0x00, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xFE, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xFC, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x7C, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0x7C, 0x00, 0x00, 0x00, 0x00 }, { 0xC0, 0xC0, 0xC0, 0xFE, 0xC3, 0xC3, 0xC3, 0xC3, 0xFE, 0x00, 0x00, 0x00, 0x00 }, { 0x03, 0x03, 0x03, 0x7F, 0xC3, 0xC3, 0xC3, 0xC3, 0x7F, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xE0, 0xFE, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xFE, 0x03, 0x03, 0x7E, 0xC0, 0xC0, 0x7F, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x1C, 0x36, 0x30, 0x30, 0x30, 0x30, 0xFC, 0x30, 0x30, 0x30, 0x00 }, { 0x00, 0x00, 0x7E, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x18, 0x3C, 0x3C, 0x66, 0x66, 0xC3, 0xC3, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC3, 0xE7, 0xFF, 0xDB, 0xC3, 0xC3, 0xC3, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC3, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0xC3, 0x00, 0x00, 0x00, 0x00 }, { 0xC0, 0x60, 0x60, 0x30, 0x18, 0x3C, 0x66, 0x66, 0xC3, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xFF, 0x60, 0x30, 0x18, 0x0C, 0x06, 0xFF, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x0F, 0x18, 0x18, 0x18, 0x38, 0xF0, 0x38, 0x18, 0x18, 0x18, 0x0F }, { 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18 }, { 0x00, 0x00, 0xF0, 0x18, 0x18, 0x18, 0x1C, 0x0F, 0x1C, 0x18, 0x18, 0x18, 0xF0 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x8F, 0xF1, 0x60, 0x00, 0x00, 0x00 } }; // sCharacters void BitmapFont13::initialize() { OPENVDB_START_THREADSAFE_STATIC_WRITE glShadeModel(GL_FLAT); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); BitmapFont13::sOffset = glGenLists(128); for (GLuint c = 32; c < 127; ++c) { glNewList(c + BitmapFont13::sOffset, GL_COMPILE); glBitmap(8, 13, 0.0, 2.0, 10.0, 0.0, BitmapFont13::sCharacters[c-32]); glEndList(); } OPENVDB_FINISH_THREADSAFE_STATIC_WRITE } void BitmapFont13::enableFontRendering() { glPushMatrix(); GLint vp[4] = { 0, 0, 0, 0 }; glGetIntegerv(GL_VIEWPORT, vp); const int width = vp[2], height = std::max(1, vp[3]); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, width, 0, height, -1.0, 1.0); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); //glShadeModel(GL_FLAT); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); } void BitmapFont13::disableFontRendering() { glFlush(); glPopMatrix(); } void BitmapFont13::print(GLint px, GLint py, const std::string& str) { glRasterPos2i(px, py); glPushAttrib(GL_LIST_BIT); glListBase(BitmapFont13::sOffset); glCallLists(GLsizei(str.length()), GL_UNSIGNED_BYTE, reinterpret_cast<const GLubyte*>(str.c_str())); glPopAttrib(); } } // namespace openvdb_viewer
9,760
C++
56.417647
85
0.614344
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/RenderModules.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "RenderModules.h" #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointCount.h> #include <openvdb/points/PointConversion.h> #include <openvdb/tools/Morphology.h> #include <openvdb/tools/Prune.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/util/logging.h> #include <algorithm> // for std::min() #include <cmath> // for std::abs(), std::fabs(), std::floor() #include <limits> #include <type_traits> // for std::is_const namespace openvdb_viewer { namespace util { /// Helper class used internally by processTypedGrid() template<typename GridType, typename OpType, bool IsConst/*=false*/> struct GridProcessor { static inline void call(OpType& op, openvdb::GridBase::Ptr grid) { op.template operator()<GridType>(openvdb::gridPtrCast<GridType>(grid)); } }; /// Helper class used internally by processTypedGrid() template<typename GridType, typename OpType> struct GridProcessor<GridType, OpType, /*IsConst=*/true> { static inline void call(OpType& op, openvdb::GridBase::ConstPtr grid) { op.template operator()<GridType>(openvdb::gridConstPtrCast<GridType>(grid)); } }; /// Helper function used internally by processTypedGrid() template<typename GridType, typename OpType, typename GridPtrType> inline void doProcessTypedGrid(GridPtrType grid, OpType& op) { GridProcessor<GridType, OpType, std::is_const<typename GridPtrType::element_type>::value>::call(op, grid); } //////////////////////////////////////// /// @brief Utility function that, given a generic grid pointer, /// calls a functor on the fully-resolved grid /// /// Usage: /// @code /// struct PruneOp { /// template<typename GridT> /// void operator()(typename GridT::Ptr grid) const { grid->tree()->prune(); } /// }; /// /// processTypedGrid(myGridPtr, PruneOp()); /// @endcode /// /// @return @c false if the grid type is unknown or unhandled. template<typename GridPtrType, typename OpType> bool processTypedGrid(GridPtrType grid, OpType& op) { using namespace openvdb; if (grid->template isType<BoolGrid>()) doProcessTypedGrid<BoolGrid>(grid, op); else if (grid->template isType<FloatGrid>()) doProcessTypedGrid<FloatGrid>(grid, op); else if (grid->template isType<DoubleGrid>()) doProcessTypedGrid<DoubleGrid>(grid, op); else if (grid->template isType<Int32Grid>()) doProcessTypedGrid<Int32Grid>(grid, op); else if (grid->template isType<Int64Grid>()) doProcessTypedGrid<Int64Grid>(grid, op); else if (grid->template isType<Vec3IGrid>()) doProcessTypedGrid<Vec3IGrid>(grid, op); else if (grid->template isType<Vec3SGrid>()) doProcessTypedGrid<Vec3SGrid>(grid, op); else if (grid->template isType<Vec3DGrid>()) doProcessTypedGrid<Vec3DGrid>(grid, op); else if (grid->template isType<points::PointDataGrid>()) { doProcessTypedGrid<points::PointDataGrid>(grid, op); } else return false; return true; } /// @brief Utility function that, given a generic grid pointer, calls /// a functor on the fully-resolved grid, provided that the grid's /// voxel values are scalars template<typename GridPtrType, typename OpType> bool processTypedScalarGrid(GridPtrType grid, OpType& op) { using namespace openvdb; if (grid->template isType<FloatGrid>()) doProcessTypedGrid<FloatGrid>(grid, op); else if (grid->template isType<DoubleGrid>()) doProcessTypedGrid<DoubleGrid>(grid, op); else if (grid->template isType<Int32Grid>()) doProcessTypedGrid<Int32Grid>(grid, op); else if (grid->template isType<Int64Grid>()) doProcessTypedGrid<Int64Grid>(grid, op); else return false; return true; } /// @brief Utility function that, given a generic grid pointer, calls /// a functor on the fully-resolved grid, provided that the grid's /// voxel values are scalars or PointIndex objects template<typename GridPtrType, typename OpType> bool processTypedScalarOrPointDataGrid(GridPtrType grid, OpType& op) { using namespace openvdb; if (processTypedScalarGrid(grid, op)) return true; if (grid->template isType<points::PointDataGrid>()) { doProcessTypedGrid<points::PointDataGrid>(grid, op); return true; } return false; } /// @brief Utility function that, given a generic grid pointer, calls /// a functor on the fully-resolved grid, provided that the grid's /// voxel values are vectors template<typename GridPtrType, typename OpType> bool processTypedVectorGrid(GridPtrType grid, OpType& op) { using namespace openvdb; if (grid->template isType<Vec3IGrid>()) doProcessTypedGrid<Vec3IGrid>(grid, op); else if (grid->template isType<Vec3SGrid>()) doProcessTypedGrid<Vec3SGrid>(grid, op); else if (grid->template isType<Vec3DGrid>()) doProcessTypedGrid<Vec3DGrid>(grid, op); else return false; return true; } template<class TreeType> class MinMaxVoxel { public: using LeafArray = openvdb::tree::LeafManager<TreeType>; using ValueType = typename TreeType::ValueType; // LeafArray = openvdb::tree::LeafManager<TreeType> leafs(myTree) MinMaxVoxel(LeafArray&); void runParallel(); void runSerial(); const ValueType& minVoxel() const { return mMin; } const ValueType& maxVoxel() const { return mMax; } inline MinMaxVoxel(const MinMaxVoxel<TreeType>&, tbb::split); inline void operator()(const tbb::blocked_range<size_t>&); inline void join(const MinMaxVoxel<TreeType>&); private: LeafArray& mLeafArray; ValueType mMin, mMax; }; template <class TreeType> MinMaxVoxel<TreeType>::MinMaxVoxel(LeafArray& leafs) : mLeafArray(leafs) , mMin(std::numeric_limits<ValueType>::max()) , mMax(std::numeric_limits<ValueType>::lowest()) { } template <class TreeType> inline MinMaxVoxel<TreeType>::MinMaxVoxel(const MinMaxVoxel<TreeType>& rhs, tbb::split) : mLeafArray(rhs.mLeafArray) , mMin(std::numeric_limits<ValueType>::max()) , mMax(std::numeric_limits<ValueType>::lowest()) { } template <class TreeType> void MinMaxVoxel<TreeType>::runParallel() { tbb::parallel_reduce(mLeafArray.getRange(), *this); } template <class TreeType> void MinMaxVoxel<TreeType>::runSerial() { (*this)(mLeafArray.getRange()); } template <class TreeType> inline void MinMaxVoxel<TreeType>::operator()(const tbb::blocked_range<size_t>& range) { typename TreeType::LeafNodeType::ValueOnCIter iter; for (size_t n = range.begin(); n < range.end(); ++n) { iter = mLeafArray.leaf(n).cbeginValueOn(); for (; iter; ++iter) { const ValueType value = iter.getValue(); mMin = std::min(mMin, value); mMax = std::max(mMax, value); } } } template <class TreeType> inline void MinMaxVoxel<TreeType>::join(const MinMaxVoxel<TreeType>& rhs) { mMin = std::min(mMin, rhs.mMin); mMax = std::max(mMax, rhs.mMax); } } // namespace util //////////////////////////////////////// // BufferObject BufferObject::BufferObject(): mVertexBuffer(0), mNormalBuffer(0), mIndexBuffer(0), mColorBuffer(0), mPrimType(GL_POINTS), mPrimNum(0) { } BufferObject::~BufferObject() { clear(); } void BufferObject::render() const { if (mPrimNum == 0 || !glIsBuffer(mIndexBuffer) || !glIsBuffer(mVertexBuffer)) { OPENVDB_LOG_DEBUG_RUNTIME("request to render empty or uninitialized buffer"); return; } const bool usesColorBuffer = glIsBuffer(mColorBuffer); const bool usesNormalBuffer = glIsBuffer(mNormalBuffer); glBindBuffer(GL_ARRAY_BUFFER, mVertexBuffer); glEnableClientState(GL_VERTEX_ARRAY); glVertexPointer(3, GL_FLOAT, 0, nullptr); if (usesColorBuffer) { glBindBuffer(GL_ARRAY_BUFFER, mColorBuffer); glEnableClientState(GL_COLOR_ARRAY); glColorPointer(3, GL_FLOAT, 0, nullptr); } if (usesNormalBuffer) { glEnableClientState(GL_NORMAL_ARRAY); glBindBuffer(GL_ARRAY_BUFFER, mNormalBuffer); glNormalPointer(GL_FLOAT, 0, nullptr); } glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer); glDrawElements(mPrimType, mPrimNum, GL_UNSIGNED_INT, nullptr); // disable client-side capabilities if (usesColorBuffer) glDisableClientState(GL_COLOR_ARRAY); if (usesNormalBuffer) glDisableClientState(GL_NORMAL_ARRAY); // release vbo's glBindBuffer(GL_ARRAY_BUFFER, 0); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); } void BufferObject::genIndexBuffer(const std::vector<GLuint>& v, GLenum primType) { // clear old buffer if (glIsBuffer(mIndexBuffer) == GL_TRUE) glDeleteBuffers(1, &mIndexBuffer); // gen new buffer glGenBuffers(1, &mIndexBuffer); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer); if (glIsBuffer(mIndexBuffer) == GL_FALSE) throw "Error: Unable to create index buffer"; // upload data glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint) * v.size(), &v[0], GL_STATIC_DRAW); // upload data if (GL_NO_ERROR != glGetError()) throw "Error: Unable to upload index buffer data"; // release buffer glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); mPrimNum = GLsizei(v.size()); mPrimType = primType; } void BufferObject::genVertexBuffer(const std::vector<GLfloat>& v) { if (glIsBuffer(mVertexBuffer) == GL_TRUE) glDeleteBuffers(1, &mVertexBuffer); glGenBuffers(1, &mVertexBuffer); glBindBuffer(GL_ARRAY_BUFFER, mVertexBuffer); if (glIsBuffer(mVertexBuffer) == GL_FALSE) throw "Error: Unable to create vertex buffer"; glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * v.size(), &v[0], GL_STATIC_DRAW); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to upload vertex buffer data"; glBindBuffer(GL_ARRAY_BUFFER, 0); } void BufferObject::genNormalBuffer(const std::vector<GLfloat>& v) { if (glIsBuffer(mNormalBuffer) == GL_TRUE) glDeleteBuffers(1, &mNormalBuffer); glGenBuffers(1, &mNormalBuffer); glBindBuffer(GL_ARRAY_BUFFER, mNormalBuffer); if (glIsBuffer(mNormalBuffer) == GL_FALSE) throw "Error: Unable to create normal buffer"; glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * v.size(), &v[0], GL_STATIC_DRAW); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to upload normal buffer data"; glBindBuffer(GL_ARRAY_BUFFER, 0); } void BufferObject::genColorBuffer(const std::vector<GLfloat>& v) { if (glIsBuffer(mColorBuffer) == GL_TRUE) glDeleteBuffers(1, &mColorBuffer); glGenBuffers(1, &mColorBuffer); glBindBuffer(GL_ARRAY_BUFFER, mColorBuffer); if (glIsBuffer(mColorBuffer) == GL_FALSE) throw "Error: Unable to create color buffer"; glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * v.size(), &v[0], GL_STATIC_DRAW); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to upload color buffer data"; glBindBuffer(GL_ARRAY_BUFFER, 0); } void BufferObject::clear() { if (glIsBuffer(mIndexBuffer) == GL_TRUE) glDeleteBuffers(1, &mIndexBuffer); if (glIsBuffer(mVertexBuffer) == GL_TRUE) glDeleteBuffers(1, &mVertexBuffer); if (glIsBuffer(mColorBuffer) == GL_TRUE) glDeleteBuffers(1, &mColorBuffer); if (glIsBuffer(mNormalBuffer) == GL_TRUE) glDeleteBuffers(1, &mNormalBuffer); mPrimType = GL_POINTS; mPrimNum = 0; } //////////////////////////////////////// ShaderProgram::ShaderProgram(): mProgram(0), mVertShader(0), mFragShader(0) { } ShaderProgram::~ShaderProgram() { clear(); } void ShaderProgram::setVertShader(const std::string& s) { mVertShader = glCreateShader(GL_VERTEX_SHADER); if (glIsShader(mVertShader) == GL_FALSE) throw "Error: Unable to create shader program."; GLint length = GLint(s.length()); const char *str = s.c_str(); glShaderSource(mVertShader, 1, &str, &length); glCompileShader(mVertShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to compile vertex shader."; } void ShaderProgram::setFragShader(const std::string& s) { mFragShader = glCreateShader(GL_FRAGMENT_SHADER); if (glIsShader(mFragShader) == GL_FALSE) throw "Error: Unable to create shader program."; GLint length = GLint(s.length()); const char *str = s.c_str(); glShaderSource(mFragShader, 1, &str, &length); glCompileShader(mFragShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to compile fragment shader."; } void ShaderProgram::build() { mProgram = glCreateProgram(); if (glIsProgram(mProgram) == GL_FALSE) throw "Error: Unable to create shader program."; if (glIsShader(mVertShader) == GL_TRUE) glAttachShader(mProgram, mVertShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to attach vertex shader."; if (glIsShader(mFragShader) == GL_TRUE) glAttachShader(mProgram, mFragShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to attach fragment shader."; glLinkProgram(mProgram); GLint linked = 0; glGetProgramiv(mProgram, GL_LINK_STATUS, &linked); if (!linked) throw "Error: Unable to link shader program."; } void ShaderProgram::build(const std::vector<GLchar*>& attributes) { mProgram = glCreateProgram(); if (glIsProgram(mProgram) == GL_FALSE) throw "Error: Unable to create shader program."; for (GLuint n = 0, N = GLuint(attributes.size()); n < N; ++n) { glBindAttribLocation(mProgram, n, attributes[n]); } if (glIsShader(mVertShader) == GL_TRUE) glAttachShader(mProgram, mVertShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to attach vertex shader."; if (glIsShader(mFragShader) == GL_TRUE) glAttachShader(mProgram, mFragShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to attach fragment shader."; glLinkProgram(mProgram); GLint linked; glGetProgramiv(mProgram, GL_LINK_STATUS, &linked); if (!linked) throw "Error: Unable to link shader program."; } void ShaderProgram::startShading() const { if (glIsProgram(mProgram) == GL_FALSE) { throw "Error: called startShading() on uncompiled shader program."; } glUseProgram(mProgram); } void ShaderProgram::stopShading() const { glUseProgram(0); } void ShaderProgram::clear() { GLsizei numShaders = 0; GLuint shaders[2] = { 0, 0 }; glGetAttachedShaders(mProgram, 2, &numShaders, shaders); // detach and remove shaders for (GLsizei n = 0; n < numShaders; ++n) { glDetachShader(mProgram, shaders[n]); if (glIsShader(shaders[n]) == GL_TRUE) glDeleteShader(shaders[n]); } // remove program if (glIsProgram(mProgram)) glDeleteProgram(mProgram); } //////////////////////////////////////// // ViewportModule ViewportModule::ViewportModule(): mAxisGnomonScale(1.5), mGroundPlaneScale(8.0) { } void ViewportModule::render() { if (!mIsVisible) return; /// @todo use VBO's // Ground plane glPushMatrix(); glScalef(mGroundPlaneScale, mGroundPlaneScale, mGroundPlaneScale); glColor3d(0.6, 0.6, 0.6); OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN float step = 0.125; for (float x = -1; x < 1.125; x+=step) { if (std::fabs(x) == 0.5 || std::fabs(x) == 0.0) { glLineWidth(1.5); } else { glLineWidth(1.0); } glBegin(GL_LINES); glVertex3f(x, 0, 1); glVertex3f(x, 0, -1); glVertex3f(1, 0, x); glVertex3f(-1, 0, x); glEnd(); } OPENVDB_NO_FP_EQUALITY_WARNING_END glPopMatrix(); // Axis gnomon GLfloat modelview[16]; glGetFloatv(GL_MODELVIEW_MATRIX, &modelview[0]); // Stash current viewport settigs. GLint viewport[4]; glGetIntegerv(GL_VIEWPORT, &viewport[0]); GLint width = viewport[2] / 20; GLint height = viewport[3] / 20; glViewport(0, 0, width, height); glPushMatrix(); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); GLfloat campos[3] = { modelview[2], modelview[6], modelview[10] }; GLfloat up[3] = { modelview[1], modelview[5], modelview[9] }; gluLookAt(campos[0], campos[1], campos[2], 0.0, 0.0, 0.0, up[0], up[1], up[2]); glScalef(mAxisGnomonScale, mAxisGnomonScale, mAxisGnomonScale); glLineWidth(1.0); glBegin(GL_LINES); glColor3f(1.0f, 0.0f, 0.0f); glVertex3f(0, 0, 0); glVertex3f(1, 0, 0); glColor3f(0.0f, 1.0f, 0.0f ); glVertex3f(0, 0, 0); glVertex3f(0, 1, 0); glColor3f(0.0f, 0.0f, 1.0f); glVertex3f(0, 0, 0); glVertex3f(0, 0, 1); glEnd(); glLineWidth(1.0); // reset viewport glPopMatrix(); glViewport(viewport[0], viewport[1], viewport[2], viewport[3]); } //////////////////////////////////////// class TreeTopologyOp { public: TreeTopologyOp(BufferObject& buffer) : mBuffer(&buffer) {} template<typename GridType> void operator()(typename GridType::ConstPtr grid) { using openvdb::Index64; Index64 nodeCount = grid->tree().leafCount() + grid->tree().nonLeafCount(); const Index64 N = nodeCount * 8 * 3; std::vector<GLfloat> points(N); std::vector<GLfloat> colors(N); std::vector<GLuint> indices(N); openvdb::Vec3d ptn; openvdb::Vec3s color; openvdb::CoordBBox bbox; Index64 pOffset = 0, iOffset = 0, cOffset = 0, idx = 0; for (typename GridType::TreeType::NodeCIter iter = grid->tree().cbeginNode(); iter; ++iter) { iter.getBoundingBox(bbox); // Nodes are rendered as cell-centered const openvdb::Vec3d min(bbox.min().x()-0.5, bbox.min().y()-0.5, bbox.min().z()-0.5); const openvdb::Vec3d max(bbox.max().x()+0.5, bbox.max().y()+0.5, bbox.max().z()+0.5); // corner 1 ptn = grid->indexToWorld(min); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 2 ptn = openvdb::Vec3d(min.x(), min.y(), max.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 3 ptn = openvdb::Vec3d(max.x(), min.y(), max.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 4 ptn = openvdb::Vec3d(max.x(), min.y(), min.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 5 ptn = openvdb::Vec3d(min.x(), max.y(), min.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 6 ptn = openvdb::Vec3d(min.x(), max.y(), max.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 7 ptn = grid->indexToWorld(max); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 8 ptn = openvdb::Vec3d(max.x(), max.y(), min.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // edge 1 indices[iOffset++] = GLuint(idx); indices[iOffset++] = GLuint(idx + 1); // edge 2 indices[iOffset++] = GLuint(idx + 1); indices[iOffset++] = GLuint(idx + 2); // edge 3 indices[iOffset++] = GLuint(idx + 2); indices[iOffset++] = GLuint(idx + 3); // edge 4 indices[iOffset++] = GLuint(idx + 3); indices[iOffset++] = GLuint(idx); // edge 5 indices[iOffset++] = GLuint(idx + 4); indices[iOffset++] = GLuint(idx + 5); // edge 6 indices[iOffset++] = GLuint(idx + 5); indices[iOffset++] = GLuint(idx + 6); // edge 7 indices[iOffset++] = GLuint(idx + 6); indices[iOffset++] = GLuint(idx + 7); // edge 8 indices[iOffset++] = GLuint(idx + 7); indices[iOffset++] = GLuint(idx + 4); // edge 9 indices[iOffset++] = GLuint(idx); indices[iOffset++] = GLuint(idx + 4); // edge 10 indices[iOffset++] = GLuint(idx + 1); indices[iOffset++] = GLuint(idx + 5); // edge 11 indices[iOffset++] = GLuint(idx + 2); indices[iOffset++] = GLuint(idx + 6); // edge 12 indices[iOffset++] = GLuint(idx + 3); indices[iOffset++] = GLuint(idx + 7); // node vertex color const int level = iter.getLevel(); color = sNodeColors[(level == 0) ? 3 : (level == 1) ? 2 : 1]; for (Index64 n = 0; n < 8; ++n) { colors[cOffset++] = color[0]; colors[cOffset++] = color[1]; colors[cOffset++] = color[2]; } idx += 8; } // end node iteration // gen buffers and upload data to GPU mBuffer->genVertexBuffer(points); mBuffer->genColorBuffer(colors); mBuffer->genIndexBuffer(indices, GL_LINES); } private: BufferObject *mBuffer; static openvdb::Vec3s sNodeColors[]; }; // TreeTopologyOp openvdb::Vec3s TreeTopologyOp::sNodeColors[] = { openvdb::Vec3s(0.045f, 0.045f, 0.045f), // root openvdb::Vec3s(0.0432f, 0.33f, 0.0411023f), // first internal node level openvdb::Vec3s(0.871f, 0.394f, 0.01916f), // intermediate internal node levels openvdb::Vec3s(0.00608299f, 0.279541f, 0.625f) // leaf nodes }; //////////////////////////////////////// // Tree topology render module TreeTopologyModule::TreeTopologyModule(const openvdb::GridBase::ConstPtr& grid): mGrid(grid), mIsInitialized(false) { mShader.setVertShader( "#version 120\n" "void main() {\n" "gl_FrontColor = gl_Color;\n" "gl_Position = ftransform();\n" "gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;\n" "}\n"); mShader.setFragShader( "#version 120\n" "void main() {\n" "gl_FragColor = gl_Color;}\n"); mShader.build(); } void TreeTopologyModule::init() { mIsInitialized = true; // extract grid topology TreeTopologyOp drawTopology(mBufferObject); if (!util::processTypedGrid(mGrid, drawTopology)) { OPENVDB_LOG_INFO("Ignoring unrecognized grid type" " during tree topology module initialization."); } } void TreeTopologyModule::render() { if (!mIsVisible) return; if (!mIsInitialized) init(); mShader.startShading(); mBufferObject.render(); mShader.stopShading(); } //////////////////////////////////////// template<typename TreeType> class PointGenerator { public: using LeafManagerType = openvdb::tree::LeafManager<TreeType>; PointGenerator( std::vector<GLfloat>& points, std::vector<GLuint>& indices, LeafManagerType& leafs, std::vector<size_t>& indexMap, const openvdb::math::Transform& transform, openvdb::Index64 voxelsPerLeaf = TreeType::LeafNodeType::NUM_VOXELS) : mPoints(points) , mIndices(indices) , mLeafs(leafs) , mIndexMap(indexMap) , mTransform(transform) , mVoxelsPerLeaf(voxelsPerLeaf) { } void runParallel() { tbb::parallel_for(mLeafs.getRange(), *this); } inline void operator()(const typename LeafManagerType::RangeType& range) const { using openvdb::Index64; using ValueOnCIter = typename TreeType::LeafNodeType::ValueOnCIter; openvdb::Vec3d pos; size_t index = 0; Index64 activeVoxels = 0; for (size_t n = range.begin(); n < range.end(); ++n) { index = mIndexMap[n]; ValueOnCIter it = mLeafs.leaf(n).cbeginValueOn(); activeVoxels = mLeafs.leaf(n).onVoxelCount(); if (activeVoxels <= mVoxelsPerLeaf) { for ( ; it; ++it) { pos = mTransform.indexToWorld(it.getCoord()); insertPoint(pos, index); ++index; } } else if (1 == mVoxelsPerLeaf) { pos = mTransform.indexToWorld(it.getCoord()); insertPoint(pos, index); } else { std::vector<openvdb::Coord> coords; coords.reserve(static_cast<size_t>(activeVoxels)); for ( ; it; ++it) { coords.push_back(it.getCoord()); } pos = mTransform.indexToWorld(coords[0]); insertPoint(pos, index); ++index; pos = mTransform.indexToWorld(coords[static_cast<size_t>(activeVoxels-1)]); insertPoint(pos, index); ++index; Index64 r = Index64(std::floor(double(mVoxelsPerLeaf) / double(activeVoxels))); for (Index64 i = 1, I = mVoxelsPerLeaf - 2; i < I; ++i) { pos = mTransform.indexToWorld(coords[static_cast<size_t>(i * r)]); insertPoint(pos, index); ++index; } } } } private: void insertPoint(const openvdb::Vec3d& pos, size_t index) const { mIndices[index] = GLuint(index); const size_t element = index * 3; mPoints[element ] = static_cast<GLfloat>(pos[0]); mPoints[element + 1] = static_cast<GLfloat>(pos[1]); mPoints[element + 2] = static_cast<GLfloat>(pos[2]); } std::vector<GLfloat>& mPoints; std::vector<GLuint>& mIndices; LeafManagerType& mLeafs; std::vector<size_t>& mIndexMap; const openvdb::math::Transform& mTransform; const openvdb::Index64 mVoxelsPerLeaf; }; // PointGenerator template<typename GridType> class NormalGenerator { public: using AccessorType = typename GridType::ConstAccessor; using Grad = openvdb::math::ISGradient<openvdb::math::CD_2ND>; NormalGenerator(const AccessorType& acc): mAccessor(acc) {} NormalGenerator(const NormalGenerator&) = delete; NormalGenerator& operator=(const NormalGenerator&) = delete; void operator()(const openvdb::Coord& ijk, openvdb::Vec3d& normal) { openvdb::Vec3d v{Grad::result(mAccessor, ijk)}; const double length = v.length(); if (length > 1.0e-7) { v *= 1.0 / length; normal = v; } } private: const AccessorType& mAccessor; }; // class NormalGenerator // Specialization for PointDataGrids, for which normals are not generated template<> class NormalGenerator<openvdb::points::PointDataGrid> { public: NormalGenerator(const openvdb::points::PointDataGrid::ConstAccessor&) {} NormalGenerator(const NormalGenerator&) = delete; NormalGenerator& operator=(const NormalGenerator&) = delete; void operator()(const openvdb::Coord&, openvdb::Vec3d&) {} }; template<typename GridType> class PointAttributeGenerator { public: using ValueType = typename GridType::ValueType; PointAttributeGenerator( std::vector<GLfloat>& points, std::vector<GLfloat>& colors, const GridType& grid, ValueType minValue, ValueType maxValue, openvdb::Vec3s (&colorMap)[4], bool isLevelSet = false) : mPoints(points) , mColors(colors) , mNormals(nullptr) , mGrid(grid) , mAccessor(grid.tree()) , mMinValue(minValue) , mMaxValue(maxValue) , mColorMap(colorMap) , mIsLevelSet(isLevelSet) , mZeroValue(openvdb::zeroVal<ValueType>()) { init(); } PointAttributeGenerator( std::vector<GLfloat>& points, std::vector<GLfloat>& colors, std::vector<GLfloat>& normals, const GridType& grid, ValueType minValue, ValueType maxValue, openvdb::Vec3s (&colorMap)[4], bool isLevelSet = false) : mPoints(points) , mColors(colors) , mNormals(&normals) , mGrid(grid) , mAccessor(grid.tree()) , mMinValue(minValue) , mMaxValue(maxValue) , mColorMap(colorMap) , mIsLevelSet(isLevelSet) , mZeroValue(openvdb::zeroVal<ValueType>()) { init(); } void runParallel() { tbb::parallel_for(tbb::blocked_range<size_t>(0, (mPoints.size() / 3)), *this); } inline void operator()(const tbb::blocked_range<size_t>& range) const { openvdb::Coord ijk; openvdb::Vec3d pos, normal(0.0, -1.0, 0.0); openvdb::Vec3s color(0.9f, 0.3f, 0.3f); float w = 0.0; NormalGenerator<GridType> computeNormal{mAccessor}; size_t e1, e2, e3, voxelNum = 0; for (size_t n = range.begin(); n < range.end(); ++n) { e1 = 3 * n; e2 = e1 + 1; e3 = e2 + 1; pos[0] = mPoints[e1]; pos[1] = mPoints[e2]; pos[2] = mPoints[e3]; pos = mGrid.worldToIndex(pos); ijk[0] = int(pos[0]); ijk[1] = int(pos[1]); ijk[2] = int(pos[2]); const ValueType& value = mAccessor.getValue(ijk); if (value < mZeroValue) { // is negative if (mIsLevelSet) { color = mColorMap[1]; } else { w = (float(value) - mOffset[1]) * mScale[1]; color = openvdb::Vec3s(w * mColorMap[0] + (1.0 - w) * mColorMap[1]); } } else { if (mIsLevelSet) { color = mColorMap[2]; } else { w = (float(value) - mOffset[0]) * mScale[0]; color = openvdb::Vec3s(w * mColorMap[2] + (1.0 - w) * mColorMap[3]); } } mColors[e1] = color[0]; mColors[e2] = color[1]; mColors[e3] = color[2]; if (mNormals) { if ((voxelNum % 2) == 0) { computeNormal(ijk, normal); } ++voxelNum; (*mNormals)[e1] = static_cast<GLfloat>(normal[0]); (*mNormals)[e2] = static_cast<GLfloat>(normal[1]); (*mNormals)[e3] = static_cast<GLfloat>(normal[2]); } } } private: void init() { mOffset[0] = static_cast<float>(std::min(mZeroValue, mMinValue)); mScale[0] = static_cast<float>( 1.0 / (std::abs(float(std::max(mZeroValue, mMaxValue)) - mOffset[0]))); mOffset[1] = static_cast<float>(std::min(mZeroValue, mMinValue)); mScale[1] = static_cast<float>( 1.0 / (std::abs(float(std::max(mZeroValue, mMaxValue)) - mOffset[1]))); } std::vector<GLfloat>& mPoints; std::vector<GLfloat>& mColors; std::vector<GLfloat>* mNormals; const GridType& mGrid; openvdb::tree::ValueAccessor<const typename GridType::TreeType> mAccessor; ValueType mMinValue, mMaxValue; openvdb::Vec3s (&mColorMap)[4]; const bool mIsLevelSet; ValueType mZeroValue; float mOffset[2], mScale[2]; }; // PointAttributeGenerator //////////////////////////////////////// class ActiveScalarValuesOp { public: ActiveScalarValuesOp( BufferObject& interiorBuffer, BufferObject& surfaceBuffer) : mInteriorBuffer(&interiorBuffer) , mSurfaceBuffer(&surfaceBuffer) { } template<typename GridType> void operator()(typename GridType::ConstPtr grid) { using openvdb::Index64; const Index64 maxVoxelPoints = 26000000; openvdb::Vec3s colorMap[4]; colorMap[0] = openvdb::Vec3s(0.3f, 0.9f, 0.3f); // green colorMap[1] = openvdb::Vec3s(0.9f, 0.3f, 0.3f); // red colorMap[2] = openvdb::Vec3s(0.9f, 0.9f, 0.3f); // yellow colorMap[3] = openvdb::Vec3s(0.3f, 0.3f, 0.9f); // blue ////////// using ValueType = typename GridType::ValueType; using TreeType = typename GridType::TreeType; using BoolTreeT = typename TreeType::template ValueConverter<bool>::Type; const TreeType& tree = grid->tree(); const bool isLevelSetGrid = grid->getGridClass() == openvdb::GRID_LEVEL_SET; ValueType minValue, maxValue; openvdb::tree::LeafManager<const TreeType> leafs(tree); { util::MinMaxVoxel<const TreeType> minmax(leafs); minmax.runParallel(); minValue = minmax.minVoxel(); maxValue = minmax.maxVoxel(); } openvdb::Index64 voxelsPerLeaf = TreeType::LeafNodeType::NUM_VOXELS; if (!isLevelSetGrid) { typename BoolTreeT::Ptr interiorMask(new BoolTreeT(false)); { // Generate Interior Points interiorMask->topologyUnion(tree); interiorMask->voxelizeActiveTiles(); if (interiorMask->activeLeafVoxelCount() > maxVoxelPoints) { voxelsPerLeaf = std::max<Index64>(1, (maxVoxelPoints / interiorMask->leafCount())); } openvdb::tools::erodeVoxels(*interiorMask, 2); openvdb::tree::LeafManager<BoolTreeT> maskleafs(*interiorMask); std::vector<size_t> indexMap(maskleafs.leafCount()); size_t voxelCount = 0; for (Index64 l = 0, L = maskleafs.leafCount(); l < L; ++l) { indexMap[l] = voxelCount; voxelCount += std::min(maskleafs.leaf(l).onVoxelCount(), voxelsPerLeaf); } std::vector<GLfloat> points(voxelCount * 3), colors(voxelCount * 3); std::vector<GLuint> indices(voxelCount); PointGenerator<BoolTreeT> pointGen( points, indices, maskleafs, indexMap, grid->transform(), voxelsPerLeaf); pointGen.runParallel(); PointAttributeGenerator<GridType> attributeGen( points, colors, *grid, minValue, maxValue, colorMap); attributeGen.runParallel(); // gen buffers and upload data to GPU mInteriorBuffer->genVertexBuffer(points); mInteriorBuffer->genColorBuffer(colors); mInteriorBuffer->genIndexBuffer(indices, GL_POINTS); } { // Generate Surface Points typename BoolTreeT::Ptr surfaceMask(new BoolTreeT(false)); surfaceMask->topologyUnion(tree); surfaceMask->voxelizeActiveTiles(); openvdb::tree::ValueAccessor<BoolTreeT> interiorAcc(*interiorMask); for (typename BoolTreeT::LeafIter leafIt = surfaceMask->beginLeaf(); leafIt; ++leafIt) { const typename BoolTreeT::LeafNodeType* leaf = interiorAcc.probeConstLeaf(leafIt->origin()); if (leaf) leafIt->topologyDifference(*leaf, false); } openvdb::tools::pruneInactive(*surfaceMask); openvdb::tree::LeafManager<BoolTreeT> maskleafs(*surfaceMask); std::vector<size_t> indexMap(maskleafs.leafCount()); size_t voxelCount = 0; for (Index64 l = 0, L = maskleafs.leafCount(); l < L; ++l) { indexMap[l] = voxelCount; voxelCount += std::min(maskleafs.leaf(l).onVoxelCount(), voxelsPerLeaf); } std::vector<GLfloat> points(voxelCount * 3), colors(voxelCount * 3), normals(voxelCount * 3); std::vector<GLuint> indices(voxelCount); PointGenerator<BoolTreeT> pointGen( points, indices, maskleafs, indexMap, grid->transform(), voxelsPerLeaf); pointGen.runParallel(); PointAttributeGenerator<GridType> attributeGen( points, colors, normals, *grid, minValue, maxValue, colorMap); attributeGen.runParallel(); mSurfaceBuffer->genVertexBuffer(points); mSurfaceBuffer->genColorBuffer(colors); mSurfaceBuffer->genNormalBuffer(normals); mSurfaceBuffer->genIndexBuffer(indices, GL_POINTS); } return; } // Level set rendering if (tree.activeLeafVoxelCount() > maxVoxelPoints) { voxelsPerLeaf = std::max<Index64>(1, (maxVoxelPoints / tree.leafCount())); } std::vector<size_t> indexMap(leafs.leafCount()); size_t voxelCount = 0; for (Index64 l = 0, L = leafs.leafCount(); l < L; ++l) { indexMap[l] = voxelCount; voxelCount += std::min(leafs.leaf(l).onVoxelCount(), voxelsPerLeaf); } std::vector<GLfloat> points(voxelCount * 3), colors(voxelCount * 3), normals(voxelCount * 3); std::vector<GLuint> indices(voxelCount); PointGenerator<const TreeType> pointGen( points, indices, leafs, indexMap, grid->transform(), voxelsPerLeaf); pointGen.runParallel(); PointAttributeGenerator<GridType> attributeGen( points, colors, normals, *grid, minValue, maxValue, colorMap, isLevelSetGrid); attributeGen.runParallel(); mSurfaceBuffer->genVertexBuffer(points); mSurfaceBuffer->genColorBuffer(colors); mSurfaceBuffer->genNormalBuffer(normals); mSurfaceBuffer->genIndexBuffer(indices, GL_POINTS); } private: BufferObject *mInteriorBuffer; BufferObject *mSurfaceBuffer; }; // ActiveScalarValuesOp class ActiveVectorValuesOp { public: ActiveVectorValuesOp(BufferObject& vectorBuffer) : mVectorBuffer(&vectorBuffer) { } template<typename GridType> void operator()(typename GridType::ConstPtr grid) { using openvdb::Index64; using ValueType = typename GridType::ValueType; using TreeType = typename GridType::TreeType; using BoolTreeT = typename TreeType::template ValueConverter<bool>::Type; const TreeType& tree = grid->tree(); double length = 0.0; { ValueType minVal, maxVal; tree.evalMinMax(minVal, maxVal); length = maxVal.length(); } typename BoolTreeT::Ptr mask(new BoolTreeT(false)); mask->topologyUnion(tree); mask->voxelizeActiveTiles(); ///@todo thread and restructure. const Index64 voxelCount = mask->activeLeafVoxelCount(); const Index64 pointCount = voxelCount * 2; std::vector<GLfloat> points(pointCount*3), colors(pointCount*3); std::vector<GLuint> indices(pointCount); openvdb::Coord ijk; openvdb::Vec3d pos, color; openvdb::tree::LeafManager<BoolTreeT> leafs(*mask); openvdb::tree::ValueAccessor<const TreeType> acc(tree); Index64 idx = 0, pt = 0, cc = 0; for (Index64 l = 0, L = leafs.leafCount(); l < L; ++l) { typename BoolTreeT::LeafNodeType::ValueOnIter iter = leafs.leaf(l).beginValueOn(); for (; iter; ++iter) { ijk = iter.getCoord(); ValueType vec = acc.getValue(ijk); pos = grid->indexToWorld(ijk); points[idx++] = static_cast<GLfloat>(pos[0]); points[idx++] = static_cast<GLfloat>(pos[1]); points[idx++] = static_cast<GLfloat>(pos[2]); indices[pt] = GLuint(pt); ++pt; indices[pt] = GLuint(pt); ++pt; double w = vec.length() / length; vec.normalize(); pos += grid->voxelSize()[0] * 0.9 * vec; points[idx++] = static_cast<GLfloat>(pos[0]); points[idx++] = static_cast<GLfloat>(pos[1]); points[idx++] = static_cast<GLfloat>(pos[2]); color = w * openvdb::Vec3d(0.9, 0.3, 0.3) + (1.0 - w) * openvdb::Vec3d(0.3, 0.3, 0.9); colors[cc++] = static_cast<GLfloat>(color[0] * 0.3); colors[cc++] = static_cast<GLfloat>(color[1] * 0.3); colors[cc++] = static_cast<GLfloat>(color[2] * 0.3); colors[cc++] = static_cast<GLfloat>(color[0]); colors[cc++] = static_cast<GLfloat>(color[1]); colors[cc++] = static_cast<GLfloat>(color[2]); } } mVectorBuffer->genVertexBuffer(points); mVectorBuffer->genColorBuffer(colors); mVectorBuffer->genIndexBuffer(indices, GL_LINES); } private: BufferObject *mVectorBuffer; }; // ActiveVectorValuesOp class PointDataOp { public: using GLfloatVec = std::vector<GLfloat>; using GLuintVec = std::vector<GLuint>; private: struct VectorAttributeWrapper { using ValueType = openvdb::Vec3f; struct Handle { explicit Handle(VectorAttributeWrapper& attribute): mValues(attribute.mValues), mIndices(attribute.mIndices) {} void set(openvdb::Index offset, openvdb::Index/*unused*/, const ValueType& value) { if (mIndices) (*mIndices)[offset] = static_cast<GLuint>(offset); offset *= 3; for (int i = 0; i < 3; ++i, ++offset) { mValues[offset] = value[i]; } } private: GLfloatVec& mValues; GLuintVec* mIndices; }; // struct Handle explicit VectorAttributeWrapper(GLfloatVec& values, GLuintVec* indices = nullptr): mValues(values), mIndices(indices) {} void expand() {} void compact() {} private: GLfloatVec& mValues; GLuintVec* mIndices; }; // struct VectorAttributeWrapper public: explicit PointDataOp(BufferObject& buffer) : mBuffer(&buffer) {} template<typename GridType> void operator()(typename GridType::ConstPtr grid) { const typename GridType::TreeType& tree = grid->tree(); // obtain cumulative point offsets and total points std::vector<openvdb::Index64> pointOffsets; const openvdb::Index64 total = openvdb::points::pointOffsets(pointOffsets, tree); // @todo use glDrawArrays with GL_POINTS to avoid generating indices GLfloatVec values(total * 3); GLuintVec indices(total); VectorAttributeWrapper positionWrapper{values, &indices}; openvdb::points::convertPointDataGridPosition(positionWrapper, *grid, pointOffsets, 0); // gen buffers and upload data to GPU mBuffer->genVertexBuffer(values); mBuffer->genIndexBuffer(indices, GL_POINTS); const auto leafIter = tree.cbeginLeaf(); if (!leafIter) return; const size_t colorIdx = leafIter->attributeSet().find("Cd"); if (colorIdx == openvdb::points::AttributeSet::INVALID_POS) return; const auto& colorArray = leafIter->constAttributeArray(colorIdx); if (colorArray.template hasValueType<openvdb::Vec3f>()) { VectorAttributeWrapper colorWrapper{values}; openvdb::points::convertPointDataGridAttribute(colorWrapper, tree, pointOffsets, /*startOffset=*/0, static_cast<unsigned>(colorIdx)); // gen color buffer mBuffer->genColorBuffer(values); } } private: BufferObject* mBuffer; }; // PointDataOp //////////////////////////////////////// // Active value render module VoxelModule::VoxelModule(const openvdb::GridBase::ConstPtr& grid): mGrid(grid), mIsInitialized(false) { mFlatShader.setVertShader( "#version 120\n" "void main() {\n" "gl_FrontColor = gl_Color;\n" "gl_Position = ftransform();\n" "gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;\n" "}\n"); mFlatShader.setFragShader( "#version 120\n" "void main() {\n" "gl_FragColor = gl_Color;}\n"); mFlatShader.build(); mSurfaceShader.setVertShader( "#version 120\n" "varying vec3 normal;\n" "void main() {\n" "gl_FrontColor = gl_Color;\n" "normal = normalize(gl_NormalMatrix * gl_Normal);\n" "gl_Position = ftransform();\n" "gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;\n" "}\n"); mSurfaceShader.setFragShader( "#version 120\n" "varying vec3 normal;\n" "void main() {\n" "vec3 normalized_normal = normalize(normal);\n" "float w = 0.5 * (1.0 + dot(normalized_normal, vec3(0.0, 1.0, 0.0)));\n" "vec4 diffuseColor = w * gl_Color + (1.0 - w) * (gl_Color * 0.3);\n" "gl_FragColor = diffuseColor;\n" "}\n"); mSurfaceShader.build(); } void VoxelModule::init() { mIsInitialized = true; if (mGrid->isType<openvdb::points::PointDataGrid>()) { mSurfaceBuffer.clear(); PointDataOp drawPoints(mInteriorBuffer); util::doProcessTypedGrid<openvdb::points::PointDataGrid>(mGrid, drawPoints); } else { ActiveScalarValuesOp drawScalars(mInteriorBuffer, mSurfaceBuffer); if (!util::processTypedScalarOrPointDataGrid(mGrid, drawScalars)) { ActiveVectorValuesOp drawVectors(mVectorBuffer); if (!util::processTypedVectorGrid(mGrid, drawVectors)) { OPENVDB_LOG_INFO("Ignoring unrecognized grid type " << mGrid->type() << " during active value module initialization."); } } } } void VoxelModule::render() { if (!mIsVisible) return; if (!mIsInitialized) init(); mFlatShader.startShading(); mInteriorBuffer.render(); mVectorBuffer.render(); mFlatShader.stopShading(); mSurfaceShader.startShading(); mSurfaceBuffer.render(); mSurfaceShader.stopShading(); } //////////////////////////////////////// class MeshOp { public: MeshOp(BufferObject& buffer) : mBuffer(&buffer) {} template<typename GridType> void operator()(typename GridType::ConstPtr grid) { using openvdb::Index64; openvdb::tools::VolumeToMesh mesher( grid->getGridClass() == openvdb::GRID_LEVEL_SET ? 0.0 : 0.01); mesher(*grid); // Copy points and generate point normals. std::vector<GLfloat> points(mesher.pointListSize() * 3); std::vector<GLfloat> normals(mesher.pointListSize() * 3); openvdb::tree::ValueAccessor<const typename GridType::TreeType> acc(grid->tree()); openvdb::math::GenericMap map(grid->transform()); openvdb::Coord ijk; for (Index64 n = 0, i = 0, N = mesher.pointListSize(); n < N; ++n) { const openvdb::Vec3s& p = mesher.pointList()[n]; points[i++] = p[0]; points[i++] = p[1]; points[i++] = p[2]; } // Copy primitives openvdb::tools::PolygonPoolList& polygonPoolList = mesher.polygonPoolList(); Index64 numQuads = 0; for (Index64 n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { numQuads += polygonPoolList[n].numQuads(); } std::vector<GLuint> indices; indices.reserve(numQuads * 4); openvdb::Vec3d normal, e1, e2; for (Index64 n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { const openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; for (Index64 i = 0, I = polygons.numQuads(); i < I; ++i) { const openvdb::Vec4I& quad = polygons.quad(i); indices.push_back(quad[0]); indices.push_back(quad[1]); indices.push_back(quad[2]); indices.push_back(quad[3]); e1 = mesher.pointList()[quad[1]]; e1 -= mesher.pointList()[quad[0]]; e2 = mesher.pointList()[quad[2]]; e2 -= mesher.pointList()[quad[1]]; normal = e1.cross(e2); const double length = normal.length(); if (length > 1.0e-7) normal *= (1.0 / length); for (int v = 0; v < 4; ++v) { normals[quad[v]*3] = static_cast<GLfloat>(-normal[0]); normals[quad[v]*3+1] = static_cast<GLfloat>(-normal[1]); normals[quad[v]*3+2] = static_cast<GLfloat>(-normal[2]); } } } // Construct and transfer GPU buffers. mBuffer->genVertexBuffer(points); mBuffer->genNormalBuffer(normals); mBuffer->genIndexBuffer(indices, GL_QUADS); } private: BufferObject *mBuffer; static openvdb::Vec3s sNodeColors[]; }; // MeshOp //////////////////////////////////////// // Meshing module MeshModule::MeshModule(const openvdb::GridBase::ConstPtr& grid): mGrid(grid), mIsInitialized(false) { mShader.setVertShader( "#version 120\n" "varying vec3 normal;\n" "void main() {\n" "normal = normalize(gl_NormalMatrix * gl_Normal);\n" "gl_Position = ftransform();\n" "gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;\n" "}\n"); mShader.setFragShader( "#version 120\n" "varying vec3 normal;\n" "const vec4 skyColor = vec4(0.9, 0.9, 1.0, 1.0);\n" "const vec4 groundColor = vec4(0.3, 0.3, 0.2, 1.0);\n" "void main() {\n" "vec3 normalized_normal = normalize(normal);\n" "float w = 0.5 * (1.0 + dot(normalized_normal, vec3(0.0, 1.0, 0.0)));\n" "vec4 diffuseColor = w * skyColor + (1.0 - w) * groundColor;\n" "gl_FragColor = diffuseColor;\n" "}\n"); mShader.build(); } void MeshModule::init() { mIsInitialized = true; MeshOp drawMesh(mBufferObject); if (!util::processTypedScalarGrid(mGrid, drawMesh)) { OPENVDB_LOG_INFO( "Ignoring non-scalar grid type during mesh module initialization."); } } void MeshModule::render() { if (!mIsVisible) return; if (!mIsInitialized) init(); mShader.startShading(); mBufferObject.render(); mShader.stopShading(); } } // namespace openvdb_viewer
51,351
C++
29.972256
99
0.59251
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Camera.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Camera.h" #include <cmath> #define GLFW_INCLUDE_GLU #include <GLFW/glfw3.h> namespace openvdb_viewer { const double Camera::sDeg2rad = M_PI / 180.0; Camera::Camera() : mFov(65.0) , mNearPlane(0.1) , mFarPlane(10000.0) , mTarget(openvdb::Vec3d(0.0)) , mLookAt(mTarget) , mUp(openvdb::Vec3d(0.0, 1.0, 0.0)) , mForward(openvdb::Vec3d(0.0, 0.0, 1.0)) , mRight(openvdb::Vec3d(1.0, 0.0, 0.0)) , mEye(openvdb::Vec3d(0.0, 0.0, -1.0)) , mTumblingSpeed(0.5) , mZoomSpeed(0.2) , mStrafeSpeed(0.05) , mHead(30.0) , mPitch(45.0) , mTargetDistance(25.0) , mDistance(mTargetDistance) , mMouseDown(false) , mStartTumbling(false) , mZoomMode(false) , mChanged(true) , mNeedsDisplay(true) , mMouseXPos(0.0) , mMouseYPos(0.0) , mWindow(nullptr) { } void Camera::lookAt(const openvdb::Vec3d& p, double dist) { mLookAt = p; mDistance = dist; mNeedsDisplay = true; } void Camera::lookAtTarget() { mLookAt = mTarget; mDistance = mTargetDistance; mNeedsDisplay = true; } void Camera::setSpeed(double zoomSpeed, double strafeSpeed, double tumblingSpeed) { mZoomSpeed = std::max(0.0001, mDistance * zoomSpeed); mStrafeSpeed = std::max(0.0001, mDistance * strafeSpeed); mTumblingSpeed = std::max(0.2, mDistance * tumblingSpeed); mTumblingSpeed = std::min(1.0, mDistance * tumblingSpeed); } void Camera::setTarget(const openvdb::Vec3d& p, double dist) { mTarget = p; mTargetDistance = dist; } void Camera::aim() { if (mWindow == nullptr) return; // Get the window size int width, height; glfwGetFramebufferSize(mWindow, &width, &height); // Make sure that height is non-zero to avoid division by zero height = std::max(1, height); glViewport(0, 0, width, height); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Set up the projection matrix glMatrixMode(GL_PROJECTION); glLoadIdentity(); // Window aspect (assumes square pixels) double aspectRatio = double(width) / double(height); // Set perspective view (fov is in degrees in the y direction.) gluPerspective(mFov, aspectRatio, mNearPlane, mFarPlane); if (mChanged) { mChanged = false; mEye[0] = mLookAt[0] + mDistance * std::cos(mHead * sDeg2rad) * std::cos(mPitch * sDeg2rad); mEye[1] = mLookAt[1] + mDistance * std::sin(mHead * sDeg2rad); mEye[2] = mLookAt[2] + mDistance * std::cos(mHead * sDeg2rad) * std::sin(mPitch * sDeg2rad); mForward = mLookAt - mEye; mForward.normalize(); mUp[1] = std::cos(mHead * sDeg2rad) > 0 ? 1.0 : -1.0; mRight = mForward.cross(mUp); } // Set up modelview matrix glMatrixMode(GL_MODELVIEW); glLoadIdentity(); gluLookAt(mEye[0], mEye[1], mEye[2], mLookAt[0], mLookAt[1], mLookAt[2], mUp[0], mUp[1], mUp[2]); mNeedsDisplay = false; } void Camera::keyCallback(int key, int) { if (mWindow == nullptr) return; int state = glfwGetKey(mWindow, key); switch (state) { case GLFW_PRESS: switch(key) { case GLFW_KEY_SPACE: mZoomMode = true; break; } break; case GLFW_RELEASE: switch(key) { case GLFW_KEY_SPACE: mZoomMode = false; break; } break; } mChanged = true; } void Camera::mouseButtonCallback(int button, int action) { if (button == GLFW_MOUSE_BUTTON_LEFT) { if (action == GLFW_PRESS) mMouseDown = true; else if (action == GLFW_RELEASE) mMouseDown = false; } else if (button == GLFW_MOUSE_BUTTON_RIGHT) { if (action == GLFW_PRESS) { mMouseDown = true; mZoomMode = true; } else if (action == GLFW_RELEASE) { mMouseDown = false; mZoomMode = false; } } if (action == GLFW_RELEASE) mMouseDown = false; mStartTumbling = true; mChanged = true; } void Camera::mousePosCallback(int x, int y) { if (mStartTumbling) { mMouseXPos = x; mMouseYPos = y; mStartTumbling = false; } double dx, dy; dx = x - mMouseXPos; dy = y - mMouseYPos; if (mMouseDown && !mZoomMode) { mNeedsDisplay = true; mHead += dy * mTumblingSpeed; mPitch += dx * mTumblingSpeed; } else if (mMouseDown && mZoomMode) { mNeedsDisplay = true; mLookAt += (dy * mUp - dx * mRight) * mStrafeSpeed; } mMouseXPos = x; mMouseYPos = y; mChanged = true; } void Camera::mouseWheelCallback(int pos, int prevPos) { double speed = std::abs(prevPos - pos); if (prevPos < pos) { mDistance += speed * mZoomSpeed; } else { double temp = mDistance - speed * mZoomSpeed; mDistance = std::max(0.0, temp); } setSpeed(); mChanged = true; mNeedsDisplay = true; } } // namespace openvdb_viewer
5,144
C++
21.765487
100
0.586897
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/Tree.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tree/Tree.h #ifndef OPENVDB_TREE_TREE_HAS_BEEN_INCLUDED #define OPENVDB_TREE_TREE_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/Metadata.h> #include <openvdb/math/Math.h> #include <openvdb/math/BBox.h> #include <openvdb/util/Formats.h> #include <openvdb/util/logging.h> #include <openvdb/Platform.h> #include "RootNode.h" #include "InternalNode.h" #include "LeafNode.h" #include "TreeIterator.h" #include "ValueAccessor.h" #include <tbb/concurrent_hash_map.h> #include <cstdint> #include <iostream> #include <mutex> #include <sstream> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { /// @brief Base class for typed trees class OPENVDB_API TreeBase { public: using Ptr = SharedPtr<TreeBase>; using ConstPtr = SharedPtr<const TreeBase>; TreeBase() = default; TreeBase(const TreeBase&) = default; TreeBase& operator=(const TreeBase&) = delete; // disallow assignment virtual ~TreeBase() = default; /// Return the name of this tree's type. virtual const Name& type() const = 0; /// Return the name of the type of a voxel's value (e.g., "float" or "vec3d"). virtual Name valueType() const = 0; /// Return a pointer to a deep copy of this tree virtual TreeBase::Ptr copy() const = 0; // // Tree methods // /// @brief Return this tree's background value wrapped as metadata. /// @note Query the metadata object for the value's type. virtual Metadata::Ptr getBackgroundValue() const { return Metadata::Ptr(); } /// @brief Return in @a bbox the axis-aligned bounding box of all /// active tiles and leaf nodes with active values. /// @details This is faster than calling evalActiveVoxelBoundingBox, /// which visits the individual active voxels, and hence /// evalLeafBoundingBox produces a less tight, i.e. approximate, bbox. /// @return @c false if the bounding box is empty (in which case /// the bbox is set to its default value). virtual bool evalLeafBoundingBox(CoordBBox& bbox) const = 0; /// @brief Return in @a dim the dimensions of the axis-aligned bounding box /// of all leaf nodes. /// @return @c false if the bounding box is empty. virtual bool evalLeafDim(Coord& dim) const = 0; /// @brief Return in @a bbox the axis-aligned bounding box of all /// active voxels and tiles. /// @details This method produces a more accurate, i.e. tighter, /// bounding box than evalLeafBoundingBox which is approximate but /// faster. /// @return @c false if the bounding box is empty (in which case /// the bbox is set to its default value). virtual bool evalActiveVoxelBoundingBox(CoordBBox& bbox) const = 0; /// @brief Return in @a dim the dimensions of the axis-aligned bounding box of all /// active voxels. This is a tighter bounding box than the leaf node bounding box. /// @return @c false if the bounding box is empty. virtual bool evalActiveVoxelDim(Coord& dim) const = 0; virtual void getIndexRange(CoordBBox& bbox) const = 0; /// @brief Replace with background tiles any nodes whose voxel buffers /// have not yet been allocated. /// @details Typically, unallocated nodes are leaf nodes whose voxel buffers /// are not yet resident in memory because delayed loading is in effect. /// @sa readNonresidentBuffers, io::File::open virtual void clipUnallocatedNodes() = 0; /// Return the total number of unallocated leaf nodes residing in this tree. virtual Index32 unallocatedLeafCount() const = 0; // // Statistics // /// @brief Return the depth of this tree. /// /// A tree with only a root node and leaf nodes has depth 2, for example. virtual Index treeDepth() const = 0; /// Return the number of leaf nodes. virtual Index32 leafCount() const = 0; #if OPENVDB_ABI_VERSION_NUMBER >= 7 /// Return a vector with node counts. The number of nodes of type NodeType /// is given as element NodeType::LEVEL in the return vector. Thus, the size /// of this vector corresponds to the height (or depth) of this tree. virtual std::vector<Index32> nodeCount() const = 0; #endif /// Return the number of non-leaf nodes. virtual Index32 nonLeafCount() const = 0; /// Return the number of active voxels stored in leaf nodes. virtual Index64 activeLeafVoxelCount() const = 0; /// Return the number of inactive voxels stored in leaf nodes. virtual Index64 inactiveLeafVoxelCount() const = 0; /// Return the total number of active voxels. virtual Index64 activeVoxelCount() const = 0; /// Return the number of inactive voxels within the bounding box of all active voxels. virtual Index64 inactiveVoxelCount() const = 0; /// Return the total number of active tiles. virtual Index64 activeTileCount() const = 0; /// Return the total amount of memory in bytes occupied by this tree. virtual Index64 memUsage() const { return 0; } // // I/O methods // /// @brief Read the tree topology from a stream. /// /// This will read the tree structure and tile values, but not voxel data. virtual void readTopology(std::istream&, bool saveFloatAsHalf = false); /// @brief Write the tree topology to a stream. /// /// This will write the tree structure and tile values, but not voxel data. virtual void writeTopology(std::ostream&, bool saveFloatAsHalf = false) const; /// Read all data buffers for this tree. virtual void readBuffers(std::istream&, bool saveFloatAsHalf = false) = 0; /// Read all of this tree's data buffers that intersect the given bounding box. virtual void readBuffers(std::istream&, const CoordBBox&, bool saveFloatAsHalf = false) = 0; /// @brief Read all of this tree's data buffers that are not yet resident in memory /// (because delayed loading is in effect). /// @details If this tree was read from a memory-mapped file, this operation /// disconnects the tree from the file. /// @sa clipUnallocatedNodes, io::File::open, io::MappedFile virtual void readNonresidentBuffers() const = 0; /// Write out all the data buffers for this tree. virtual void writeBuffers(std::ostream&, bool saveFloatAsHalf = false) const = 0; /// @brief Print statistics, memory usage and other information about this tree. /// @param os a stream to which to write textual information /// @param verboseLevel 1: print tree configuration only; /// 2: include node and voxel statistics; /// 3: include memory usage; /// 4: include minimum and maximum voxel values /// @warning @a verboseLevel 4 forces loading of any unallocated nodes. virtual void print(std::ostream& os = std::cout, int verboseLevel = 1) const; }; //////////////////////////////////////// template<typename _RootNodeType> class Tree: public TreeBase { public: using Ptr = SharedPtr<Tree>; using ConstPtr = SharedPtr<const Tree>; using RootNodeType = _RootNodeType; using ValueType = typename RootNodeType::ValueType; using BuildType = typename RootNodeType::BuildType; using LeafNodeType = typename RootNodeType::LeafNodeType; static const Index DEPTH = RootNodeType::LEVEL + 1; /// @brief ValueConverter<T>::Type is the type of a tree having the same /// hierarchy as this tree but a different value type, T. /// /// For example, FloatTree::ValueConverter<double>::Type is equivalent to DoubleTree. /// @note If the source tree type is a template argument, it might be necessary /// to write "typename SourceTree::template ValueConverter<T>::Type". template<typename OtherValueType> struct ValueConverter { using Type = Tree<typename RootNodeType::template ValueConverter<OtherValueType>::Type>; }; Tree() {} Tree& operator=(const Tree&) = delete; // disallow assignment /// Deep copy constructor Tree(const Tree& other): TreeBase(other), mRoot(other.mRoot) { } /// @brief Value conversion deep copy constructor /// /// Deep copy a tree of the same configuration as this tree type but a different /// ValueType, casting the other tree's values to this tree's ValueType. /// @throw TypeError if the other tree's configuration doesn't match this tree's /// or if this tree's ValueType is not constructible from the other tree's ValueType. template<typename OtherRootType> explicit Tree(const Tree<OtherRootType>& other): TreeBase(other), mRoot(other.root()) { } /// @brief Topology copy constructor from a tree of a different type /// /// Copy the structure, i.e., the active states of tiles and voxels, of another /// tree of a possibly different type, but don't copy any tile or voxel values. /// Instead, initialize tiles and voxels with the given active and inactive values. /// @param other a tree having (possibly) a different ValueType /// @param inactiveValue background value for this tree, and the value to which /// all inactive tiles and voxels are initialized /// @param activeValue value to which active tiles and voxels are initialized /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherTreeType> Tree(const OtherTreeType& other, const ValueType& inactiveValue, const ValueType& activeValue, TopologyCopy): TreeBase(other), mRoot(other.root(), inactiveValue, activeValue, TopologyCopy()) { } /// @brief Topology copy constructor from a tree of a different type /// /// @note This topology copy constructor is generally faster than /// the one that takes both a foreground and a background value. /// /// Copy the structure, i.e., the active states of tiles and voxels, of another /// tree of a possibly different type, but don't copy any tile or voxel values. /// Instead, initialize tiles and voxels with the given background value. /// @param other a tree having (possibly) a different ValueType /// @param background the value to which tiles and voxels are initialized /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherTreeType> Tree(const OtherTreeType& other, const ValueType& background, TopologyCopy): TreeBase(other), mRoot(other.root(), background, TopologyCopy()) { } /// Empty tree constructor Tree(const ValueType& background): mRoot(background) {} ~Tree() override { this->clear(); releaseAllAccessors(); } /// Return a pointer to a deep copy of this tree TreeBase::Ptr copy() const override { return TreeBase::Ptr(new Tree(*this)); } /// Return the name of the type of a voxel's value (e.g., "float" or "vec3d") Name valueType() const override { return typeNameAsString<ValueType>(); } /// Return the name of this type of tree. static const Name& treeType(); /// Return the name of this type of tree. const Name& type() const override { return this->treeType(); } bool operator==(const Tree&) const { OPENVDB_THROW(NotImplementedError, ""); } bool operator!=(const Tree&) const { OPENVDB_THROW(NotImplementedError, ""); } //@{ /// Return this tree's root node. RootNodeType& root() { return mRoot; } const RootNodeType& root() const { return mRoot; } //@} // // Tree methods // /// @brief Return @c true if the given tree has the same node and active value /// topology as this tree, whether or not it has the same @c ValueType. template<typename OtherRootNodeType> bool hasSameTopology(const Tree<OtherRootNodeType>& other) const; bool evalLeafBoundingBox(CoordBBox& bbox) const override; bool evalActiveVoxelBoundingBox(CoordBBox& bbox) const override; bool evalActiveVoxelDim(Coord& dim) const override; bool evalLeafDim(Coord& dim) const override; /// @brief Traverse the type hierarchy of nodes, and return, in @a dims, a list /// of the Log2Dims of nodes in order from RootNode to LeafNode. /// @note Because RootNodes are resizable, the RootNode Log2Dim is 0 for all trees. static void getNodeLog2Dims(std::vector<Index>& dims); // // I/O methods // /// @brief Read the tree topology from a stream. /// /// This will read the tree structure and tile values, but not voxel data. void readTopology(std::istream&, bool saveFloatAsHalf = false) override; /// @brief Write the tree topology to a stream. /// /// This will write the tree structure and tile values, but not voxel data. void writeTopology(std::ostream&, bool saveFloatAsHalf = false) const override; /// Read all data buffers for this tree. void readBuffers(std::istream&, bool saveFloatAsHalf = false) override; /// Read all of this tree's data buffers that intersect the given bounding box. void readBuffers(std::istream&, const CoordBBox&, bool saveFloatAsHalf = false) override; /// @brief Read all of this tree's data buffers that are not yet resident in memory /// (because delayed loading is in effect). /// @details If this tree was read from a memory-mapped file, this operation /// disconnects the tree from the file. /// @sa clipUnallocatedNodes, io::File::open, io::MappedFile void readNonresidentBuffers() const override; /// Write out all data buffers for this tree. void writeBuffers(std::ostream&, bool saveFloatAsHalf = false) const override; void print(std::ostream& os = std::cout, int verboseLevel = 1) const override; // // Statistics // /// @brief Return the depth of this tree. /// /// A tree with only a root node and leaf nodes has depth 2, for example. Index treeDepth() const override { return DEPTH; } /// Return the number of leaf nodes. Index32 leafCount() const override { return mRoot.leafCount(); } #if OPENVDB_ABI_VERSION_NUMBER >= 7 /// Return a vector with node counts. The number of nodes of type NodeType /// is given as element NodeType::LEVEL in the return vector. Thus, the size /// of this vector corresponds to the height (or depth) of this tree. std::vector<Index32> nodeCount() const override { std::vector<Index32> vec(DEPTH, 0); mRoot.nodeCount( vec ); return vec;// Named Return Value Optimization } #endif /// Return the number of non-leaf nodes. Index32 nonLeafCount() const override { return mRoot.nonLeafCount(); } /// Return the number of active voxels stored in leaf nodes. Index64 activeLeafVoxelCount() const override { return mRoot.onLeafVoxelCount(); } /// Return the number of inactive voxels stored in leaf nodes. Index64 inactiveLeafVoxelCount() const override { return mRoot.offLeafVoxelCount(); } /// Return the total number of active voxels. Index64 activeVoxelCount() const override { return mRoot.onVoxelCount(); } /// Return the number of inactive voxels within the bounding box of all active voxels. Index64 inactiveVoxelCount() const override; /// Return the total number of active tiles. Index64 activeTileCount() const override { return mRoot.onTileCount(); } /// Return the minimum and maximum active values in this tree. void evalMinMax(ValueType &min, ValueType &max) const; Index64 memUsage() const override { return sizeof(*this) + mRoot.memUsage(); } // // Voxel access methods (using signed indexing) // /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const; /// @brief Return the value of the voxel at the given coordinates /// and update the given accessor's node cache. template<typename AccessT> const ValueType& getValue(const Coord& xyz, AccessT&) const; /// @brief Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides. /// @details If (x, y, z) isn't explicitly represented in the tree (i.e., it is /// implicitly a background voxel), return -1. int getValueDepth(const Coord& xyz) const; /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value); /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, const ValueType& value); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value); /// @brief Set the value of the voxel at the given coordinates, mark the voxel as active, /// and update the given accessor's node cache. template<typename AccessT> void setValue(const Coord& xyz, const ValueType& value, AccessT&); /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details Provided that the functor can be inlined, this is typically /// significantly faster than calling getValue() followed by setValueOn(). /// @param xyz the coordinates of a voxel whose value is to be modified /// @param op a functor of the form <tt>void op(ValueType&) const</tt> that modifies /// its argument in place /// @par Example: /// @code /// Coord xyz(1, 0, -2); /// // Multiply the value of a voxel by a constant and mark the voxel as active. /// floatTree.modifyValue(xyz, [](float& f) { f *= 0.25; }); // C++11 /// // Set the value of a voxel to the maximum of its current value and 0.25, /// // and mark the voxel as active. /// floatTree.modifyValue(xyz, [](float& f) { f = std::max(f, 0.25f); }); // C++11 /// @endcode /// @note The functor is not guaranteed to be called only once. /// @see tools::foreach() template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op); /// @brief Apply a functor to the voxel at the given coordinates. /// @details Provided that the functor can be inlined, this is typically /// significantly faster than calling getValue() followed by setValue(). /// @param xyz the coordinates of a voxel to be modified /// @param op a functor of the form <tt>void op(ValueType&, bool&) const</tt> that /// modifies its arguments, a voxel's value and active state, in place /// @par Example: /// @code /// Coord xyz(1, 0, -2); /// // Multiply the value of a voxel by a constant and mark the voxel as inactive. /// floatTree.modifyValueAndActiveState(xyz, /// [](float& f, bool& b) { f *= 0.25; b = false; }); // C++11 /// // Set the value of a voxel to the maximum of its current value and 0.25, /// // but don't change the voxel's active state. /// floatTree.modifyValueAndActiveState(xyz, /// [](float& f, bool&) { f = std::max(f, 0.25f); }); // C++11 /// @endcode /// @note The functor is not guaranteed to be called only once. /// @see tools::foreach() template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op); /// @brief Get the value of the voxel at the given coordinates. /// @return @c true if the value is active. bool probeValue(const Coord& xyz, ValueType& value) const; /// Return @c true if the value at the given coordinates is active. bool isValueOn(const Coord& xyz) const { return mRoot.isValueOn(xyz); } /// Return @c true if the value at the given coordinates is inactive. bool isValueOff(const Coord& xyz) const { return !this->isValueOn(xyz); } /// Return @c true if this tree has any active tiles. bool hasActiveTiles() const { return mRoot.hasActiveTiles(); } /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&); /// @brief Replace with background tiles any nodes whose voxel buffers /// have not yet been allocated. /// @details Typically, unallocated nodes are leaf nodes whose voxel buffers /// are not yet resident in memory because delayed loading is in effect. /// @sa readNonresidentBuffers, io::File::open void clipUnallocatedNodes() override; /// Return the total number of unallocated leaf nodes residing in this tree. Index32 unallocatedLeafCount() const override; //@{ /// @brief Set all voxels within a given axis-aligned box to a constant value. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box /// @param value the value to which to set voxels within the box /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive /// @note This operation generates a sparse, but not always optimally sparse, /// representation of the filled box. Follow fill operations with a prune() /// operation for optimal sparseness. void sparseFill(const CoordBBox& bbox, const ValueType& value, bool active = true); void fill(const CoordBBox& bbox, const ValueType& value, bool active = true) { this->sparseFill(bbox, value, active); } //@} /// @brief Set all voxels within a given axis-aligned box to a constant value /// and ensure that those voxels are all represented at the leaf level. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box. /// @param value the value to which to set voxels within the box. /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive. /// @sa voxelizeActiveTiles() void denseFill(const CoordBBox& bbox, const ValueType& value, bool active = true); /// @brief Densify active tiles, i.e., replace them with leaf-level active voxels. /// /// @param threaded if true, this operation is multi-threaded (over the internal nodes). /// /// @warning This method can explode the tree's memory footprint, especially if it /// contains active tiles at the upper levels (in particular the root level)! /// /// @sa denseFill() void voxelizeActiveTiles(bool threaded = true); /// @brief Reduce the memory footprint of this tree by replacing with tiles /// any nodes whose values are all the same (optionally to within a tolerance) /// and have the same active state. /// @warning Will soon be deprecated! void prune(const ValueType& tolerance = zeroVal<ValueType>()) { this->clearAllAccessors(); mRoot.prune(tolerance); } /// @brief Add the given leaf node to this tree, creating a new branch if necessary. /// If a leaf node with the same origin already exists, replace it. /// /// @warning Ownership of the leaf is transferred to the tree so /// the client code should not attempt to delete the leaf pointer! void addLeaf(LeafNodeType* leaf) { assert(leaf); mRoot.addLeaf(leaf); } /// @brief Add a tile containing voxel (x, y, z) at the specified tree level, /// creating a new branch if necessary. Delete any existing lower-level nodes /// that contain (x, y, z). /// @note @a level must be less than this tree's depth. void addTile(Index level, const Coord& xyz, const ValueType& value, bool active); /// @brief Return a pointer to the node of type @c NodeT that contains voxel (x, y, z) /// and replace it with a tile of the specified value and state. /// If no such node exists, leave the tree unchanged and return @c nullptr. /// @note The caller takes ownership of the node and is responsible for deleting it. template<typename NodeT> NodeT* stealNode(const Coord& xyz, const ValueType& value, bool active); /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, create one that preserves the values and /// active states of all voxels. /// @details Use this method to preallocate a static tree topology over which to /// safely perform multithreaded processing. LeafNodeType* touchLeaf(const Coord& xyz); //@{ /// @brief Return a pointer to the node of type @c NodeType that contains /// voxel (x, y, z). If no such node exists, return @c nullptr. template<typename NodeType> NodeType* probeNode(const Coord& xyz); template<typename NodeType> const NodeType* probeConstNode(const Coord& xyz) const; template<typename NodeType> const NodeType* probeNode(const Coord& xyz) const; //@} //@{ /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, return @c nullptr. LeafNodeType* probeLeaf(const Coord& xyz); const LeafNodeType* probeConstLeaf(const Coord& xyz) const; const LeafNodeType* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } //@} //@{ /// @brief Adds all nodes of a certain type to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...; // the type of node to be added to the array /// void push_back(value_type nodePtr); // add a node to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.getNodes(array); /// @endcode template<typename ArrayT> void getNodes(ArrayT& array) { mRoot.getNodes(array); } template<typename ArrayT> void getNodes(ArrayT& array) const { mRoot.getNodes(array); } //@} /// @brief Steals all nodes of a certain type from the tree and /// adds them to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...; // the type of node to be added to the array /// void push_back(value_type nodePtr); // add a node to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.stealNodes(array); /// @endcode template<typename ArrayT> void stealNodes(ArrayT& array) { this->clearAllAccessors(); mRoot.stealNodes(array); } template<typename ArrayT> void stealNodes(ArrayT& array, const ValueType& value, bool state) { this->clearAllAccessors(); mRoot.stealNodes(array, value, state); } // // Aux methods // /// @brief Return @c true if this tree contains no nodes other than /// the root node and no tiles other than background tiles. bool empty() const { return mRoot.empty(); } /// Remove all tiles from this tree and all nodes other than the root node. void clear(); /// Clear all registered accessors. void clearAllAccessors(); //@{ /// @brief Register an accessor for this tree. Registered accessors are /// automatically cleared whenever one of this tree's nodes is deleted. void attachAccessor(ValueAccessorBase<Tree, true>&) const; void attachAccessor(ValueAccessorBase<const Tree, true>&) const; //@} //@{ /// Dummy implementations void attachAccessor(ValueAccessorBase<Tree, false>&) const {} void attachAccessor(ValueAccessorBase<const Tree, false>&) const {} //@} //@{ /// Deregister an accessor so that it is no longer automatically cleared. void releaseAccessor(ValueAccessorBase<Tree, true>&) const; void releaseAccessor(ValueAccessorBase<const Tree, true>&) const; //@} //@{ /// Dummy implementations void releaseAccessor(ValueAccessorBase<Tree, false>&) const {} void releaseAccessor(ValueAccessorBase<const Tree, false>&) const {} //@} /// @brief Return this tree's background value wrapped as metadata. /// @note Query the metadata object for the value's type. Metadata::Ptr getBackgroundValue() const override; /// @brief Return this tree's background value. /// /// @note Use tools::changeBackground to efficiently modify the /// background values. Else use tree.root().setBackground, which /// is serial and hence slower. const ValueType& background() const { return mRoot.background(); } /// Min and max are both inclusive. void getIndexRange(CoordBBox& bbox) const override { mRoot.getIndexRange(bbox); } /// @brief Efficiently merge another tree into this tree using one of several schemes. /// @details This operation is primarily intended to combine trees that are mostly /// non-overlapping (for example, intermediate trees from computations that are /// parallelized across disjoint regions of space). /// @note This operation is not guaranteed to produce an optimally sparse tree. /// Follow merge() with prune() for optimal sparseness. /// @warning This operation always empties the other tree. void merge(Tree& other, MergePolicy = MERGE_ACTIVE_STATES); /// @brief Union this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active if the corresponding value /// was already active OR if it is active in the other tree. Also, a resulting /// value maps to a voxel if the corresponding value already mapped to a voxel /// OR if it is a voxel in the other tree. Thus, a resulting value can only /// map to a tile if the corresponding value already mapped to a tile /// AND if it is a tile value in other tree. /// /// @note This operation modifies only active states, not values. /// Specifically, active tiles and voxels in this tree are not changed, and /// tiles or voxels that were inactive in this tree but active in the other tree /// are marked as active in this tree but left with their original values. template<typename OtherRootNodeType> void topologyUnion(const Tree<OtherRootNodeType>& other); /// @brief Intersects this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active only if the corresponding /// value was already active AND if it is active in the other tree. Also, a /// resulting value maps to a voxel if the corresponding value /// already mapped to an active voxel in either of the two grids /// and it maps to an active tile or voxel in the other grid. /// /// @note This operation can delete branches in this grid if they /// overlap with inactive tiles in the other grid. Likewise active /// voxels can be turned into unactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call tools::pruneInactive. template<typename OtherRootNodeType> void topologyIntersection(const Tree<OtherRootNodeType>& other); /// @brief Difference this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this tree and inactive in the other tree. /// /// @note This operation can delete branches in this grid if they /// overlap with active tiles in the other grid. Likewise active /// voxels can be turned into inactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call tools::pruneInactive. template<typename OtherRootNodeType> void topologyDifference(const Tree<OtherRootNodeType>& other); /// For a given function @c f, use sparse traversal to compute <tt>f(this, other)</tt> /// over all corresponding pairs of values (tile or voxel) of this tree and the other tree /// and store the result in this tree. /// This method is typically more space-efficient than the two-tree combine2(), /// since it moves rather than copies nodes from the other tree into this tree. /// @note This operation always empties the other tree. /// @param other a tree of the same type as this tree /// @param op a functor of the form <tt>void op(const T& a, const T& b, T& result)</tt>, /// where @c T is this tree's @c ValueType, that computes /// <tt>result = f(a, b)</tt> /// @param prune if true, prune the resulting tree one branch at a time (this is usually /// more space-efficient than pruning the entire tree in one pass) /// /// @par Example: /// Compute the per-voxel difference between two floating-point trees, /// @c aTree and @c bTree, and store the result in @c aTree (leaving @c bTree empty). /// @code /// { /// struct Local { /// static inline void diff(const float& a, const float& b, float& result) { /// result = a - b; /// } /// }; /// aTree.combine(bTree, Local::diff); /// } /// @endcode /// /// @par Example: /// Compute <tt>f * a + (1 - f) * b</tt> over all voxels of two floating-point trees, /// @c aTree and @c bTree, and store the result in @c aTree (leaving @c bTree empty). /// @code /// namespace { /// struct Blend { /// Blend(float f): frac(f) {} /// inline void operator()(const float& a, const float& b, float& result) const { /// result = frac * a + (1.0 - frac) * b; /// } /// float frac; /// }; /// } /// { /// aTree.combine(bTree, Blend(0.25)); // 0.25 * a + 0.75 * b /// } /// @endcode template<typename CombineOp> void combine(Tree& other, CombineOp& op, bool prune = false); #ifndef _MSC_VER template<typename CombineOp> void combine(Tree& other, const CombineOp& op, bool prune = false); #endif /// Like combine(), but with /// @param other a tree of the same type as this tree /// @param op a functor of the form <tt>void op(CombineArgs<ValueType>& args)</tt> that /// computes <tt>args.setResult(f(args.a(), args.b()))</tt> and, optionally, /// <tt>args.setResultIsActive(g(args.aIsActive(), args.bIsActive()))</tt> /// for some functions @c f and @c g /// @param prune if true, prune the resulting tree one branch at a time (this is usually /// more space-efficient than pruning the entire tree in one pass) /// /// This variant passes not only the @em a and @em b values but also the active states /// of the @em a and @em b values to the functor, which may then return, by calling /// @c args.setResultIsActive(), a computed active state for the result value. /// By default, the result is active if either the @em a or the @em b value is active. /// /// @see openvdb/Types.h for the definition of the CombineArgs struct. /// /// @par Example: /// Replace voxel values in floating-point @c aTree with corresponding values /// from floating-point @c bTree (leaving @c bTree empty) wherever the @c bTree /// values are larger. Also, preserve the active states of any transferred values. /// @code /// { /// struct Local { /// static inline void max(CombineArgs<float>& args) { /// if (args.b() > args.a()) { /// // Transfer the B value and its active state. /// args.setResult(args.b()); /// args.setResultIsActive(args.bIsActive()); /// } else { /// // Preserve the A value and its active state. /// args.setResult(args.a()); /// args.setResultIsActive(args.aIsActive()); /// } /// } /// }; /// aTree.combineExtended(bTree, Local::max); /// } /// @endcode template<typename ExtendedCombineOp> void combineExtended(Tree& other, ExtendedCombineOp& op, bool prune = false); #ifndef _MSC_VER template<typename ExtendedCombineOp> void combineExtended(Tree& other, const ExtendedCombineOp& op, bool prune = false); #endif /// For a given function @c f, use sparse traversal to compute <tt>f(a, b)</tt> over all /// corresponding pairs of values (tile or voxel) of trees A and B and store the result /// in this tree. /// @param a,b two trees with the same configuration (levels and node dimensions) /// as this tree but with the B tree possibly having a different value type /// @param op a functor of the form <tt>void op(const T1& a, const T2& b, T1& result)</tt>, /// where @c T1 is this tree's and the A tree's @c ValueType and @c T2 is the /// B tree's @c ValueType, that computes <tt>result = f(a, b)</tt> /// @param prune if true, prune the resulting tree one branch at a time (this is usually /// more space-efficient than pruning the entire tree in one pass) /// /// @throw TypeError if the B tree's configuration doesn't match this tree's /// or if this tree's ValueType is not constructible from the B tree's ValueType. /// /// @par Example: /// Compute the per-voxel difference between two floating-point trees, /// @c aTree and @c bTree, and store the result in a third tree. /// @code /// { /// struct Local { /// static inline void diff(const float& a, const float& b, float& result) { /// result = a - b; /// } /// }; /// FloatTree resultTree; /// resultTree.combine2(aTree, bTree, Local::diff); /// } /// @endcode template<typename CombineOp, typename OtherTreeType /*= Tree*/> void combine2(const Tree& a, const OtherTreeType& b, CombineOp& op, bool prune = false); #ifndef _MSC_VER template<typename CombineOp, typename OtherTreeType /*= Tree*/> void combine2(const Tree& a, const OtherTreeType& b, const CombineOp& op, bool prune = false); #endif /// Like combine2(), but with /// @param a,b two trees with the same configuration (levels and node dimensions) /// as this tree but with the B tree possibly having a different value type /// @param op a functor of the form <tt>void op(CombineArgs<T1, T2>& args)</tt>, where /// @c T1 is this tree's and the A tree's @c ValueType and @c T2 is the B tree's /// @c ValueType, that computes <tt>args.setResult(f(args.a(), args.b()))</tt> /// and, optionally, /// <tt>args.setResultIsActive(g(args.aIsActive(), args.bIsActive()))</tt> /// for some functions @c f and @c g /// @param prune if true, prune the resulting tree one branch at a time (this is usually /// more space-efficient than pruning the entire tree in one pass) /// This variant passes not only the @em a and @em b values but also the active states /// of the @em a and @em b values to the functor, which may then return, by calling /// <tt>args.setResultIsActive()</tt>, a computed active state for the result value. /// By default, the result is active if either the @em a or the @em b value is active. /// /// @throw TypeError if the B tree's configuration doesn't match this tree's /// or if this tree's ValueType is not constructible from the B tree's ValueType. /// /// @see openvdb/Types.h for the definition of the CombineArgs struct. /// /// @par Example: /// Compute the per-voxel maximum values of two single-precision floating-point trees, /// @c aTree and @c bTree, and store the result in a third tree. Set the active state /// of each output value to that of the larger of the two input values. /// @code /// { /// struct Local { /// static inline void max(CombineArgs<float>& args) { /// if (args.b() > args.a()) { /// // Transfer the B value and its active state. /// args.setResult(args.b()); /// args.setResultIsActive(args.bIsActive()); /// } else { /// // Preserve the A value and its active state. /// args.setResult(args.a()); /// args.setResultIsActive(args.aIsActive()); /// } /// } /// }; /// FloatTree aTree = ...; /// FloatTree bTree = ...; /// FloatTree resultTree; /// resultTree.combine2Extended(aTree, bTree, Local::max); /// } /// @endcode /// /// @par Example: /// Compute the per-voxel maximum values of a double-precision and a single-precision /// floating-point tree, @c aTree and @c bTree, and store the result in a third, /// double-precision tree. Set the active state of each output value to that of /// the larger of the two input values. /// @code /// { /// struct Local { /// static inline void max(CombineArgs<double, float>& args) { /// if (args.b() > args.a()) { /// // Transfer the B value and its active state. /// args.setResult(args.b()); /// args.setResultIsActive(args.bIsActive()); /// } else { /// // Preserve the A value and its active state. /// args.setResult(args.a()); /// args.setResultIsActive(args.aIsActive()); /// } /// } /// }; /// DoubleTree aTree = ...; /// FloatTree bTree = ...; /// DoubleTree resultTree; /// resultTree.combine2Extended(aTree, bTree, Local::max); /// } /// @endcode template<typename ExtendedCombineOp, typename OtherTreeType /*= Tree*/> void combine2Extended(const Tree& a, const OtherTreeType& b, ExtendedCombineOp& op, bool prune = false); #ifndef _MSC_VER template<typename ExtendedCombineOp, typename OtherTreeType /*= Tree*/> void combine2Extended(const Tree& a, const OtherTreeType& b, const ExtendedCombineOp&, bool prune = false); #endif template<typename BBoxOp> [[deprecated("Use DynamicNodeManager instead")]] void visitActiveBBox(BBoxOp& op) const { mRoot.visitActiveBBox(op); } template<typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit(VisitorOp& op); template<typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit(const VisitorOp& op); template<typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit(VisitorOp& op) const; template<typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit(const VisitorOp& op) const; template<typename OtherTreeType, typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit2(OtherTreeType& other, VisitorOp& op); template<typename OtherTreeType, typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit2(OtherTreeType& other, const VisitorOp& op); template<typename OtherTreeType, typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit2(OtherTreeType& other, VisitorOp& op) const; template<typename OtherTreeType, typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit2(OtherTreeType& other, const VisitorOp& op) const; // // Iteration // //@{ /// Return an iterator over children of the root node. typename RootNodeType::ChildOnCIter beginRootChildren() const { return mRoot.cbeginChildOn(); } typename RootNodeType::ChildOnCIter cbeginRootChildren() const { return mRoot.cbeginChildOn(); } typename RootNodeType::ChildOnIter beginRootChildren() { return mRoot.beginChildOn(); } //@} //@{ /// Return an iterator over non-child entries of the root node's table. typename RootNodeType::ChildOffCIter beginRootTiles() const { return mRoot.cbeginChildOff(); } typename RootNodeType::ChildOffCIter cbeginRootTiles() const { return mRoot.cbeginChildOff(); } typename RootNodeType::ChildOffIter beginRootTiles() { return mRoot.beginChildOff(); } //@} //@{ /// Return an iterator over all entries of the root node's table. typename RootNodeType::ChildAllCIter beginRootDense() const { return mRoot.cbeginChildAll(); } typename RootNodeType::ChildAllCIter cbeginRootDense() const { return mRoot.cbeginChildAll(); } typename RootNodeType::ChildAllIter beginRootDense() { return mRoot.beginChildAll(); } //@} //@{ /// Iterator over all nodes in this tree using NodeIter = NodeIteratorBase<Tree, typename RootNodeType::ChildOnIter>; using NodeCIter = NodeIteratorBase<const Tree, typename RootNodeType::ChildOnCIter>; //@} //@{ /// Iterator over all leaf nodes in this tree using LeafIter = LeafIteratorBase<Tree, typename RootNodeType::ChildOnIter>; using LeafCIter = LeafIteratorBase<const Tree, typename RootNodeType::ChildOnCIter>; //@} //@{ /// Return an iterator over all nodes in this tree. NodeIter beginNode() { return NodeIter(*this); } NodeCIter beginNode() const { return NodeCIter(*this); } NodeCIter cbeginNode() const { return NodeCIter(*this); } //@} //@{ /// Return an iterator over all leaf nodes in this tree. LeafIter beginLeaf() { return LeafIter(*this); } LeafCIter beginLeaf() const { return LeafCIter(*this); } LeafCIter cbeginLeaf() const { return LeafCIter(*this); } //@} using ValueAllIter = TreeValueIteratorBase<Tree, typename RootNodeType::ValueAllIter>; using ValueAllCIter = TreeValueIteratorBase<const Tree, typename RootNodeType::ValueAllCIter>; using ValueOnIter = TreeValueIteratorBase<Tree, typename RootNodeType::ValueOnIter>; using ValueOnCIter = TreeValueIteratorBase<const Tree, typename RootNodeType::ValueOnCIter>; using ValueOffIter = TreeValueIteratorBase<Tree, typename RootNodeType::ValueOffIter>; using ValueOffCIter = TreeValueIteratorBase<const Tree, typename RootNodeType::ValueOffCIter>; //@{ /// Return an iterator over all values (tile and voxel) across all nodes. ValueAllIter beginValueAll() { return ValueAllIter(*this); } ValueAllCIter beginValueAll() const { return ValueAllCIter(*this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(*this); } //@} //@{ /// Return an iterator over active values (tile and voxel) across all nodes. ValueOnIter beginValueOn() { return ValueOnIter(*this); } ValueOnCIter beginValueOn() const { return ValueOnCIter(*this); } ValueOnCIter cbeginValueOn() const { return ValueOnCIter(*this); } //@} //@{ /// Return an iterator over inactive values (tile and voxel) across all nodes. ValueOffIter beginValueOff() { return ValueOffIter(*this); } ValueOffCIter beginValueOff() const { return ValueOffCIter(*this); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(*this); } //@} /// @brief Return an iterator of type @c IterT (for example, begin<ValueOnIter>() is /// equivalent to beginValueOn()). template<typename IterT> IterT begin(); /// @brief Return a const iterator of type CIterT (for example, cbegin<ValueOnCIter>() /// is equivalent to cbeginValueOn()). template<typename CIterT> CIterT cbegin() const; protected: using AccessorRegistry = tbb::concurrent_hash_map<ValueAccessorBase<Tree, true>*, bool>; using ConstAccessorRegistry = tbb::concurrent_hash_map<ValueAccessorBase<const Tree, true>*, bool>; /// @brief Notify all registered accessors, by calling ValueAccessor::release(), /// that this tree is about to be deleted. void releaseAllAccessors(); // TBB body object used to deallocates nodes in parallel. template<typename NodeType> struct DeallocateNodes { DeallocateNodes(std::vector<NodeType*>& nodes) : mNodes(nodes.empty() ? nullptr : &nodes.front()) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { delete mNodes[n]; mNodes[n] = nullptr; } } NodeType ** const mNodes; }; // // Data members // RootNodeType mRoot; // root node of the tree mutable AccessorRegistry mAccessorRegistry; mutable ConstAccessorRegistry mConstAccessorRegistry; static std::unique_ptr<const Name> sTreeTypeName; }; // end of Tree class template<typename _RootNodeType> std::unique_ptr<const Name> Tree<_RootNodeType>::sTreeTypeName; /// @brief Tree3<T, N1, N2>::Type is the type of a three-level tree /// (Root, Internal, Leaf) with value type T and /// internal and leaf node log dimensions N1 and N2, respectively. /// @note This is NOT the standard tree configuration (Tree4 is). template<typename T, Index N1=4, Index N2=3> struct Tree3 { using Type = Tree<RootNode<InternalNode<LeafNode<T, N2>, N1>>>; }; /// @brief Tree4<T, N1, N2, N3>::Type is the type of a four-level tree /// (Root, Internal, Internal, Leaf) with value type T and /// internal and leaf node log dimensions N1, N2 and N3, respectively. /// @note This is the standard tree configuration. template<typename T, Index N1=5, Index N2=4, Index N3=3> struct Tree4 { using Type = Tree<RootNode<InternalNode<InternalNode<LeafNode<T, N3>, N2>, N1>>>; }; /// @brief Tree5<T, N1, N2, N3, N4>::Type is the type of a five-level tree /// (Root, Internal, Internal, Internal, Leaf) with value type T and /// internal and leaf node log dimensions N1, N2, N3 and N4, respectively. /// @note This is NOT the standard tree configuration (Tree4 is). template<typename T, Index N1=6, Index N2=5, Index N3=4, Index N4=3> struct Tree5 { using Type = Tree<RootNode<InternalNode<InternalNode<InternalNode<LeafNode<T, N4>, N3>, N2>, N1>>>; }; //////////////////////////////////////// inline void TreeBase::readTopology(std::istream& is, bool /*saveFloatAsHalf*/) { int32_t bufferCount; is.read(reinterpret_cast<char*>(&bufferCount), sizeof(int32_t)); if (bufferCount != 1) OPENVDB_LOG_WARN("multi-buffer trees are no longer supported"); } inline void TreeBase::writeTopology(std::ostream& os, bool /*saveFloatAsHalf*/) const { int32_t bufferCount = 1; os.write(reinterpret_cast<char*>(&bufferCount), sizeof(int32_t)); } inline void TreeBase::print(std::ostream& os, int /*verboseLevel*/) const { os << " Tree Type: " << type() << " Active Voxel Count: " << activeVoxelCount() << std::endl << " Active tile Count: " << activeTileCount() << std::endl << " Inactive Voxel Count: " << inactiveVoxelCount() << std::endl << " Leaf Node Count: " << leafCount() << std::endl << " Non-leaf Node Count: " << nonLeafCount() << std::endl; } //////////////////////////////////////// // // Type traits for tree iterators // /// @brief TreeIterTraits provides, for all tree iterators, a begin(tree) function /// that returns an iterator over a tree of arbitrary type. template<typename TreeT, typename IterT> struct TreeIterTraits; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildOnIter> { static typename TreeT::RootNodeType::ChildOnIter begin(TreeT& tree) { return tree.beginRootChildren(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildOnCIter> { static typename TreeT::RootNodeType::ChildOnCIter begin(const TreeT& tree) { return tree.cbeginRootChildren(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildOffIter> { static typename TreeT::RootNodeType::ChildOffIter begin(TreeT& tree) { return tree.beginRootTiles(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildOffCIter> { static typename TreeT::RootNodeType::ChildOffCIter begin(const TreeT& tree) { return tree.cbeginRootTiles(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildAllIter> { static typename TreeT::RootNodeType::ChildAllIter begin(TreeT& tree) { return tree.beginRootDense(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildAllCIter> { static typename TreeT::RootNodeType::ChildAllCIter begin(const TreeT& tree) { return tree.cbeginRootDense(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::NodeIter> { static typename TreeT::NodeIter begin(TreeT& tree) { return tree.beginNode(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::NodeCIter> { static typename TreeT::NodeCIter begin(const TreeT& tree) { return tree.cbeginNode(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::LeafIter> { static typename TreeT::LeafIter begin(TreeT& tree) { return tree.beginLeaf(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::LeafCIter> { static typename TreeT::LeafCIter begin(const TreeT& tree) { return tree.cbeginLeaf(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueOnIter> { static typename TreeT::ValueOnIter begin(TreeT& tree) { return tree.beginValueOn(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueOnCIter> { static typename TreeT::ValueOnCIter begin(const TreeT& tree) { return tree.cbeginValueOn(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueOffIter> { static typename TreeT::ValueOffIter begin(TreeT& tree) { return tree.beginValueOff(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueOffCIter> { static typename TreeT::ValueOffCIter begin(const TreeT& tree) { return tree.cbeginValueOff(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueAllIter> { static typename TreeT::ValueAllIter begin(TreeT& tree) { return tree.beginValueAll(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueAllCIter> { static typename TreeT::ValueAllCIter begin(const TreeT& tree) { return tree.cbeginValueAll(); } }; template<typename RootNodeType> template<typename IterT> inline IterT Tree<RootNodeType>::begin() { return TreeIterTraits<Tree, IterT>::begin(*this); } template<typename RootNodeType> template<typename IterT> inline IterT Tree<RootNodeType>::cbegin() const { return TreeIterTraits<Tree, IterT>::begin(*this); } //////////////////////////////////////// template<typename RootNodeType> void Tree<RootNodeType>::readTopology(std::istream& is, bool saveFloatAsHalf) { this->clearAllAccessors(); TreeBase::readTopology(is, saveFloatAsHalf); mRoot.readTopology(is, saveFloatAsHalf); } template<typename RootNodeType> void Tree<RootNodeType>::writeTopology(std::ostream& os, bool saveFloatAsHalf) const { TreeBase::writeTopology(os, saveFloatAsHalf); mRoot.writeTopology(os, saveFloatAsHalf); } template<typename RootNodeType> inline void Tree<RootNodeType>::readBuffers(std::istream &is, bool saveFloatAsHalf) { this->clearAllAccessors(); mRoot.readBuffers(is, saveFloatAsHalf); } template<typename RootNodeType> inline void Tree<RootNodeType>::readBuffers(std::istream &is, const CoordBBox& bbox, bool saveFloatAsHalf) { this->clearAllAccessors(); mRoot.readBuffers(is, bbox, saveFloatAsHalf); } template<typename RootNodeType> inline void Tree<RootNodeType>::readNonresidentBuffers() const { for (LeafCIter it = this->cbeginLeaf(); it; ++it) { // Retrieving the value of a leaf voxel forces loading of the leaf node's voxel buffer. it->getValue(Index(0)); } } template<typename RootNodeType> inline void Tree<RootNodeType>::writeBuffers(std::ostream &os, bool saveFloatAsHalf) const { mRoot.writeBuffers(os, saveFloatAsHalf); } template<typename RootNodeType> inline void Tree<RootNodeType>::clear() { std::vector<LeafNodeType*> leafnodes; this->stealNodes(leafnodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, leafnodes.size()), DeallocateNodes<LeafNodeType>(leafnodes)); std::vector<typename RootNodeType::ChildNodeType*> internalNodes; this->stealNodes(internalNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, internalNodes.size()), DeallocateNodes<typename RootNodeType::ChildNodeType>(internalNodes)); mRoot.clear(); this->clearAllAccessors(); } //////////////////////////////////////// template<typename RootNodeType> inline void Tree<RootNodeType>::attachAccessor(ValueAccessorBase<Tree, true>& accessor) const { typename AccessorRegistry::accessor a; mAccessorRegistry.insert(a, &accessor); } template<typename RootNodeType> inline void Tree<RootNodeType>::attachAccessor(ValueAccessorBase<const Tree, true>& accessor) const { typename ConstAccessorRegistry::accessor a; mConstAccessorRegistry.insert(a, &accessor); } template<typename RootNodeType> inline void Tree<RootNodeType>::releaseAccessor(ValueAccessorBase<Tree, true>& accessor) const { mAccessorRegistry.erase(&accessor); } template<typename RootNodeType> inline void Tree<RootNodeType>::releaseAccessor(ValueAccessorBase<const Tree, true>& accessor) const { mConstAccessorRegistry.erase(&accessor); } template<typename RootNodeType> inline void Tree<RootNodeType>::clearAllAccessors() { for (typename AccessorRegistry::iterator it = mAccessorRegistry.begin(); it != mAccessorRegistry.end(); ++it) { if (it->first) it->first->clear(); } for (typename ConstAccessorRegistry::iterator it = mConstAccessorRegistry.begin(); it != mConstAccessorRegistry.end(); ++it) { if (it->first) it->first->clear(); } } template<typename RootNodeType> inline void Tree<RootNodeType>::releaseAllAccessors() { mAccessorRegistry.erase(nullptr); for (typename AccessorRegistry::iterator it = mAccessorRegistry.begin(); it != mAccessorRegistry.end(); ++it) { it->first->release(); } mAccessorRegistry.clear(); mAccessorRegistry.erase(nullptr); for (typename ConstAccessorRegistry::iterator it = mConstAccessorRegistry.begin(); it != mConstAccessorRegistry.end(); ++it) { it->first->release(); } mConstAccessorRegistry.clear(); } //////////////////////////////////////// template<typename RootNodeType> inline const typename RootNodeType::ValueType& Tree<RootNodeType>::getValue(const Coord& xyz) const { return mRoot.getValue(xyz); } template<typename RootNodeType> template<typename AccessT> inline const typename RootNodeType::ValueType& Tree<RootNodeType>::getValue(const Coord& xyz, AccessT& accessor) const { return accessor.getValue(xyz); } template<typename RootNodeType> inline int Tree<RootNodeType>::getValueDepth(const Coord& xyz) const { return mRoot.getValueDepth(xyz); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOff(const Coord& xyz) { mRoot.setValueOff(xyz); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOff(const Coord& xyz, const ValueType& value) { mRoot.setValueOff(xyz, value); } template<typename RootNodeType> inline void Tree<RootNodeType>::setActiveState(const Coord& xyz, bool on) { mRoot.setActiveState(xyz, on); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValue(const Coord& xyz, const ValueType& value) { mRoot.setValueOn(xyz, value); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOnly(const Coord& xyz, const ValueType& value) { mRoot.setValueOnly(xyz, value); } template<typename RootNodeType> template<typename AccessT> inline void Tree<RootNodeType>::setValue(const Coord& xyz, const ValueType& value, AccessT& accessor) { accessor.setValue(xyz, value); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOn(const Coord& xyz) { mRoot.setActiveState(xyz, true); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOn(const Coord& xyz, const ValueType& value) { mRoot.setValueOn(xyz, value); } template<typename RootNodeType> template<typename ModifyOp> inline void Tree<RootNodeType>::modifyValue(const Coord& xyz, const ModifyOp& op) { mRoot.modifyValue(xyz, op); } template<typename RootNodeType> template<typename ModifyOp> inline void Tree<RootNodeType>::modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { mRoot.modifyValueAndActiveState(xyz, op); } template<typename RootNodeType> inline bool Tree<RootNodeType>::probeValue(const Coord& xyz, ValueType& value) const { return mRoot.probeValue(xyz, value); } //////////////////////////////////////// template<typename RootNodeType> inline void Tree<RootNodeType>::addTile(Index level, const Coord& xyz, const ValueType& value, bool active) { mRoot.addTile(level, xyz, value, active); } template<typename RootNodeType> template<typename NodeT> inline NodeT* Tree<RootNodeType>::stealNode(const Coord& xyz, const ValueType& value, bool active) { this->clearAllAccessors(); return mRoot.template stealNode<NodeT>(xyz, value, active); } template<typename RootNodeType> inline typename RootNodeType::LeafNodeType* Tree<RootNodeType>::touchLeaf(const Coord& xyz) { return mRoot.touchLeaf(xyz); } template<typename RootNodeType> inline typename RootNodeType::LeafNodeType* Tree<RootNodeType>::probeLeaf(const Coord& xyz) { return mRoot.probeLeaf(xyz); } template<typename RootNodeType> inline const typename RootNodeType::LeafNodeType* Tree<RootNodeType>::probeConstLeaf(const Coord& xyz) const { return mRoot.probeConstLeaf(xyz); } template<typename RootNodeType> template<typename NodeType> inline NodeType* Tree<RootNodeType>::probeNode(const Coord& xyz) { return mRoot.template probeNode<NodeType>(xyz); } template<typename RootNodeType> template<typename NodeType> inline const NodeType* Tree<RootNodeType>::probeNode(const Coord& xyz) const { return this->template probeConstNode<NodeType>(xyz); } template<typename RootNodeType> template<typename NodeType> inline const NodeType* Tree<RootNodeType>::probeConstNode(const Coord& xyz) const { return mRoot.template probeConstNode<NodeType>(xyz); } //////////////////////////////////////// template<typename RootNodeType> inline void Tree<RootNodeType>::clip(const CoordBBox& bbox) { this->clearAllAccessors(); return mRoot.clip(bbox); } template<typename RootNodeType> inline void Tree<RootNodeType>::clipUnallocatedNodes() { this->clearAllAccessors(); for (LeafIter it = this->beginLeaf(); it; ) { const LeafNodeType* leaf = it.getLeaf(); ++it; // advance the iterator before deleting the leaf node if (!leaf->isAllocated()) { this->addTile(/*level=*/0, leaf->origin(), this->background(), /*active=*/false); } } } template<typename RootNodeType> inline Index32 Tree<RootNodeType>::unallocatedLeafCount() const { Index32 sum = 0; for (auto it = this->cbeginLeaf(); it; ++it) if (!it->isAllocated()) ++sum; return sum; } template<typename RootNodeType> inline void Tree<RootNodeType>::sparseFill(const CoordBBox& bbox, const ValueType& value, bool active) { this->clearAllAccessors(); return mRoot.sparseFill(bbox, value, active); } template<typename RootNodeType> inline void Tree<RootNodeType>::denseFill(const CoordBBox& bbox, const ValueType& value, bool active) { this->clearAllAccessors(); return mRoot.denseFill(bbox, value, active); } template<typename RootNodeType> inline void Tree<RootNodeType>::voxelizeActiveTiles(bool threaded) { this->clearAllAccessors(); mRoot.voxelizeActiveTiles(threaded); } template<typename RootNodeType> Metadata::Ptr Tree<RootNodeType>::getBackgroundValue() const { Metadata::Ptr result; if (Metadata::isRegisteredType(valueType())) { using MetadataT = TypedMetadata<ValueType>; result = Metadata::createMetadata(valueType()); if (result->typeName() == MetadataT::staticTypeName()) { MetadataT* m = static_cast<MetadataT*>(result.get()); m->value() = mRoot.background(); } } return result; } //////////////////////////////////////// template<typename RootNodeType> inline void Tree<RootNodeType>::merge(Tree& other, MergePolicy policy) { this->clearAllAccessors(); other.clearAllAccessors(); switch (policy) { case MERGE_ACTIVE_STATES: mRoot.template merge<MERGE_ACTIVE_STATES>(other.mRoot); break; case MERGE_NODES: mRoot.template merge<MERGE_NODES>(other.mRoot); break; case MERGE_ACTIVE_STATES_AND_NODES: mRoot.template merge<MERGE_ACTIVE_STATES_AND_NODES>(other.mRoot); break; } } template<typename RootNodeType> template<typename OtherRootNodeType> inline void Tree<RootNodeType>::topologyUnion(const Tree<OtherRootNodeType>& other) { this->clearAllAccessors(); mRoot.topologyUnion(other.root()); } template<typename RootNodeType> template<typename OtherRootNodeType> inline void Tree<RootNodeType>::topologyIntersection(const Tree<OtherRootNodeType>& other) { this->clearAllAccessors(); mRoot.topologyIntersection(other.root()); } template<typename RootNodeType> template<typename OtherRootNodeType> inline void Tree<RootNodeType>::topologyDifference(const Tree<OtherRootNodeType>& other) { this->clearAllAccessors(); mRoot.topologyDifference(other.root()); } //////////////////////////////////////// /// @brief Helper class to adapt a three-argument (a, b, result) CombineOp functor /// into a single-argument functor that accepts a CombineArgs struct template<typename AValueT, typename CombineOp, typename BValueT = AValueT> struct CombineOpAdapter { CombineOpAdapter(CombineOp& _op): op(_op) {} void operator()(CombineArgs<AValueT, BValueT>& args) const { op(args.a(), args.b(), args.result()); } CombineOp& op; }; template<typename RootNodeType> template<typename CombineOp> inline void Tree<RootNodeType>::combine(Tree& other, CombineOp& op, bool prune) { CombineOpAdapter<ValueType, CombineOp> extendedOp(op); this->combineExtended(other, extendedOp, prune); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>aTree.combine(bTree, MyCombineOp(...))</tt>. #ifndef _MSC_VER template<typename RootNodeType> template<typename CombineOp> inline void Tree<RootNodeType>::combine(Tree& other, const CombineOp& op, bool prune) { CombineOpAdapter<ValueType, const CombineOp> extendedOp(op); this->combineExtended(other, extendedOp, prune); } #endif template<typename RootNodeType> template<typename ExtendedCombineOp> inline void Tree<RootNodeType>::combineExtended(Tree& other, ExtendedCombineOp& op, bool prune) { this->clearAllAccessors(); mRoot.combine(other.root(), op, prune); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>aTree.combineExtended(bTree, MyCombineOp(...))</tt>. #ifndef _MSC_VER template<typename RootNodeType> template<typename ExtendedCombineOp> inline void Tree<RootNodeType>::combineExtended(Tree& other, const ExtendedCombineOp& op, bool prune) { this->clearAllAccessors(); mRoot.template combine<const ExtendedCombineOp>(other.mRoot, op, prune); } #endif template<typename RootNodeType> template<typename CombineOp, typename OtherTreeType> inline void Tree<RootNodeType>::combine2(const Tree& a, const OtherTreeType& b, CombineOp& op, bool prune) { CombineOpAdapter<ValueType, CombineOp, typename OtherTreeType::ValueType> extendedOp(op); this->combine2Extended(a, b, extendedOp, prune); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>tree.combine2(aTree, bTree, MyCombineOp(...))</tt>. #ifndef _MSC_VER template<typename RootNodeType> template<typename CombineOp, typename OtherTreeType> inline void Tree<RootNodeType>::combine2(const Tree& a, const OtherTreeType& b, const CombineOp& op, bool prune) { CombineOpAdapter<ValueType, const CombineOp, typename OtherTreeType::ValueType> extendedOp(op); this->combine2Extended(a, b, extendedOp, prune); } #endif template<typename RootNodeType> template<typename ExtendedCombineOp, typename OtherTreeType> inline void Tree<RootNodeType>::combine2Extended(const Tree& a, const OtherTreeType& b, ExtendedCombineOp& op, bool prune) { this->clearAllAccessors(); mRoot.combine2(a.root(), b.root(), op, prune); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like the following, where the functor argument is a temporary: /// <tt>tree.combine2Extended(aTree, bTree, MyCombineOp(...))</tt>. #ifndef _MSC_VER template<typename RootNodeType> template<typename ExtendedCombineOp, typename OtherTreeType> inline void Tree<RootNodeType>::combine2Extended(const Tree& a, const OtherTreeType& b, const ExtendedCombineOp& op, bool prune) { this->clearAllAccessors(); mRoot.template combine2<const ExtendedCombineOp>(a.root(), b.root(), op, prune); } #endif //////////////////////////////////////// template<typename RootNodeType> template<typename VisitorOp> inline void Tree<RootNodeType>::visit(VisitorOp& op) { this->clearAllAccessors(); mRoot.template visit<VisitorOp>(op); } template<typename RootNodeType> template<typename VisitorOp> inline void Tree<RootNodeType>::visit(VisitorOp& op) const { mRoot.template visit<VisitorOp>(op); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>tree.visit(MyVisitorOp(...))</tt>. template<typename RootNodeType> template<typename VisitorOp> inline void Tree<RootNodeType>::visit(const VisitorOp& op) { this->clearAllAccessors(); mRoot.template visit<const VisitorOp>(op); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>tree.visit(MyVisitorOp(...))</tt>. template<typename RootNodeType> template<typename VisitorOp> inline void Tree<RootNodeType>::visit(const VisitorOp& op) const { mRoot.template visit<const VisitorOp>(op); } //////////////////////////////////////// template<typename RootNodeType> template<typename OtherTreeType, typename VisitorOp> inline void Tree<RootNodeType>::visit2(OtherTreeType& other, VisitorOp& op) { this->clearAllAccessors(); using OtherRootNodeType = typename OtherTreeType::RootNodeType; mRoot.template visit2<OtherRootNodeType, VisitorOp>(other.root(), op); } template<typename RootNodeType> template<typename OtherTreeType, typename VisitorOp> inline void Tree<RootNodeType>::visit2(OtherTreeType& other, VisitorOp& op) const { using OtherRootNodeType = typename OtherTreeType::RootNodeType; mRoot.template visit2<OtherRootNodeType, VisitorOp>(other.root(), op); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>aTree.visit2(bTree, MyVisitorOp(...))</tt>. template<typename RootNodeType> template<typename OtherTreeType, typename VisitorOp> inline void Tree<RootNodeType>::visit2(OtherTreeType& other, const VisitorOp& op) { this->clearAllAccessors(); using OtherRootNodeType = typename OtherTreeType::RootNodeType; mRoot.template visit2<OtherRootNodeType, const VisitorOp>(other.root(), op); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>aTree.visit2(bTree, MyVisitorOp(...))</tt>. template<typename RootNodeType> template<typename OtherTreeType, typename VisitorOp> inline void Tree<RootNodeType>::visit2(OtherTreeType& other, const VisitorOp& op) const { using OtherRootNodeType = typename OtherTreeType::RootNodeType; mRoot.template visit2<OtherRootNodeType, const VisitorOp>(other.root(), op); } //////////////////////////////////////// template<typename RootNodeType> inline const Name& Tree<RootNodeType>::treeType() { static std::once_flag once; std::call_once(once, []() { std::vector<Index> dims; Tree::getNodeLog2Dims(dims); std::ostringstream ostr; ostr << "Tree_" << typeNameAsString<BuildType>(); for (size_t i = 1, N = dims.size(); i < N; ++i) { // start from 1 to skip the RootNode ostr << "_" << dims[i]; } sTreeTypeName.reset(new Name(ostr.str())); }); return *sTreeTypeName; } template<typename RootNodeType> template<typename OtherRootNodeType> inline bool Tree<RootNodeType>::hasSameTopology(const Tree<OtherRootNodeType>& other) const { return mRoot.hasSameTopology(other.root()); } template<typename RootNodeType> Index64 Tree<RootNodeType>::inactiveVoxelCount() const { Coord dim(0, 0, 0); this->evalActiveVoxelDim(dim); const Index64 totalVoxels = dim.x() * dim.y() * dim.z(), activeVoxels = this->activeVoxelCount(); assert(totalVoxels >= activeVoxels); return totalVoxels - activeVoxels; } template<typename RootNodeType> inline bool Tree<RootNodeType>::evalLeafBoundingBox(CoordBBox& bbox) const { bbox.reset(); // default invalid bbox if (this->empty()) return false; // empty mRoot.evalActiveBoundingBox(bbox, false); return !bbox.empty(); } template<typename RootNodeType> inline bool Tree<RootNodeType>::evalActiveVoxelBoundingBox(CoordBBox& bbox) const { bbox.reset(); // default invalid bbox if (this->empty()) return false; // empty mRoot.evalActiveBoundingBox(bbox, true); return !bbox.empty(); } template<typename RootNodeType> inline bool Tree<RootNodeType>::evalActiveVoxelDim(Coord& dim) const { CoordBBox bbox; bool notEmpty = this->evalActiveVoxelBoundingBox(bbox); dim = bbox.extents(); return notEmpty; } template<typename RootNodeType> inline bool Tree<RootNodeType>::evalLeafDim(Coord& dim) const { CoordBBox bbox; bool notEmpty = this->evalLeafBoundingBox(bbox); dim = bbox.extents(); return notEmpty; } template<typename RootNodeType> inline void Tree<RootNodeType>::evalMinMax(ValueType& minVal, ValueType& maxVal) const { /// @todo optimize minVal = maxVal = zeroVal<ValueType>(); if (ValueOnCIter iter = this->cbeginValueOn()) { minVal = maxVal = *iter; for (++iter; iter; ++iter) { const ValueType& val = *iter; if (math::cwiseLessThan(val, minVal)) minVal = val; if (math::cwiseGreaterThan(val, maxVal)) maxVal = val; } } } template<typename RootNodeType> inline void Tree<RootNodeType>::getNodeLog2Dims(std::vector<Index>& dims) { dims.clear(); RootNodeType::getNodeLog2Dims(dims); } template<typename RootNodeType> inline void Tree<RootNodeType>::print(std::ostream& os, int verboseLevel) const { if (verboseLevel <= 0) return; /// @todo Consider using boost::io::ios_precision_saver instead. struct OnExit { std::ostream& os; std::streamsize savedPrecision; OnExit(std::ostream& _os): os(_os), savedPrecision(os.precision()) {} ~OnExit() { os.precision(savedPrecision); } }; OnExit restorePrecision(os); std::vector<Index> dims; Tree::getNodeLog2Dims(dims);// leaf is the last element os << "Information about Tree:\n" << " Type: " << this->type() << "\n"; os << " Configuration:\n"; if (verboseLevel <= 1) { // Print node types and sizes. os << " Root(" << mRoot.getTableSize() << ")"; if (dims.size() > 1) { for (size_t i = 1, N = dims.size() - 1; i < N; ++i) { os << ", Internal(" << (1 << dims[i]) << "^3)"; } os << ", Leaf(" << (1 << dims.back()) << "^3)\n"; } os << " Background value: " << mRoot.background() << "\n"; return; } // The following is tree information that is expensive to extract. ValueType minVal = zeroVal<ValueType>(), maxVal = zeroVal<ValueType>(); if (verboseLevel > 3) { // This forces loading of all non-resident nodes. this->evalMinMax(minVal, maxVal); } #if OPENVDB_ABI_VERSION_NUMBER >= 7 const auto nodeCount = this->nodeCount();//fast const Index32 leafCount = nodeCount.front();// leaf is the first element #else std::vector<Index64> nodeCount(dims.size()); for (NodeCIter it = cbeginNode(); it; ++it) ++(nodeCount[it.getDepth()]);//slow const Index64 leafCount = *nodeCount.rbegin();// leaf is the last element #endif assert(dims.size() == nodeCount.size()); Index64 totalNodeCount = 0; for (size_t i = 0; i < nodeCount.size(); ++i) totalNodeCount += nodeCount[i]; // Print node types, counts and sizes. os << " Root(1 x " << mRoot.getTableSize() << ")"; if (dims.size() >= 2) { for (size_t i = 1, N = dims.size() - 1; i < N; ++i) { #if OPENVDB_ABI_VERSION_NUMBER >= 7 os << ", Internal(" << util::formattedInt(nodeCount[N - i]); #else os << ", Internal(" << util::formattedInt(nodeCount[i]); #endif os << " x " << (1 << dims[i]) << "^3)"; } os << ", Leaf(" << util::formattedInt(leafCount); os << " x " << (1 << dims.back()) << "^3)\n"; } os << " Background value: " << mRoot.background() << "\n"; // Statistics of topology and values if (verboseLevel > 3) { os << " Min value: " << minVal << "\n"; os << " Max value: " << maxVal << "\n"; } const Index64 numActiveVoxels = this->activeVoxelCount(), numActiveLeafVoxels = this->activeLeafVoxelCount(), numActiveTiles = this->activeTileCount(); os << " Number of active voxels: " << util::formattedInt(numActiveVoxels) << "\n"; os << " Number of active tiles: " << util::formattedInt(numActiveTiles) << "\n"; Coord dim(0, 0, 0); Index64 totalVoxels = 0; if (numActiveVoxels) { // nonempty CoordBBox bbox; this->evalActiveVoxelBoundingBox(bbox); dim = bbox.extents(); totalVoxels = dim.x() * uint64_t(dim.y()) * dim.z(); os << " Bounding box of active voxels: " << bbox << "\n"; os << " Dimensions of active voxels: " << dim[0] << " x " << dim[1] << " x " << dim[2] << "\n"; const double activeRatio = (100.0 * double(numActiveVoxels)) / double(totalVoxels); os << " Percentage of active voxels: " << std::setprecision(3) << activeRatio << "%\n"; if (leafCount > 0) { const double fillRatio = (100.0 * double(numActiveLeafVoxels)) / (double(leafCount) * double(LeafNodeType::NUM_VOXELS)); os << " Average leaf node fill ratio: " << fillRatio << "%\n"; } if (verboseLevel > 2) { Index64 sum = 0;// count the number of unallocated leaf nodes for (auto it = this->cbeginLeaf(); it; ++it) if (!it->isAllocated()) ++sum; os << " Number of unallocated nodes: " << util::formattedInt(sum) << " (" << (100.0 * double(sum) / double(totalNodeCount)) << "%)\n"; } } else { os << " Tree is empty!\n"; } os << std::flush; if (verboseLevel == 2) return; // Memory footprint in bytes const Index64 actualMem = this->memUsage(), denseMem = sizeof(ValueType) * totalVoxels, voxelsMem = sizeof(ValueType) * numActiveLeafVoxels; ///< @todo not accurate for BoolTree (and probably should count tile values) os << "Memory footprint:\n"; util::printBytes(os, actualMem, " Actual: "); util::printBytes(os, voxelsMem, " Active leaf voxels: "); if (numActiveVoxels) { util::printBytes(os, denseMem, " Dense equivalent: "); os << " Actual footprint is " << (100.0 * double(actualMem) / double(denseMem)) << "% of an equivalent dense volume\n"; os << " Leaf voxel footprint is " << (100.0 * double(voxelsMem) / double(actualMem)) << "% of actual footprint\n"; } } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_TREE_HAS_BEEN_INCLUDED
82,743
C
36.474638
103
0.668757
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/ValueAccessor.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tree/ValueAccessor.h /// /// When traversing a grid in a spatially coherent pattern (e.g., iterating /// over neighboring voxels), request a @c ValueAccessor from the grid /// (with Grid::getAccessor()) and use the accessor's @c getValue() and /// @c setValue() methods. These will typically be significantly faster /// than accessing voxels directly in the grid's tree. /// /// @par Example: /// /// @code /// FloatGrid grid; /// FloatGrid::Accessor acc = grid.getAccessor(); /// // First access is slow: /// acc.setValue(Coord(0, 0, 0), 100); /// // Subsequent nearby accesses are fast, since the accessor now holds pointers /// // to nodes that contain (0, 0, 0) along the path from the root of the grid's /// // tree to the leaf: /// acc.setValue(Coord(0, 0, 1), 100); /// acc.getValue(Coord(0, 2, 0), 100); /// // Slow, because the accessor must be repopulated: /// acc.getValue(Coord(-1, -1, -1)); /// // Fast: /// acc.getValue(Coord(-1, -1, -2)); /// acc.setValue(Coord(-1, -2, 0), -100); /// @endcode #ifndef OPENVDB_TREE_VALUEACCESSOR_HAS_BEEN_INCLUDED #define OPENVDB_TREE_VALUEACCESSOR_HAS_BEEN_INCLUDED #include <tbb/null_mutex.h> #include <tbb/spin_mutex.h> #include <openvdb/version.h> #include <openvdb/Types.h> #include <cassert> #include <limits> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { // Forward declarations of local classes that are not intended for general use // The IsSafe template parameter is explained in the warning below. template<typename TreeType, bool IsSafe = true> class ValueAccessor0; template<typename TreeType, bool IsSafe = true, Index L0 = 0> class ValueAccessor1; template<typename TreeType, bool IsSafe = true, Index L0 = 0, Index L1 = 1> class ValueAccessor2; template<typename TreeType, bool IsSafe = true, Index L0 = 0, Index L1 = 1, Index L2 = 2> class ValueAccessor3; template<typename TreeCacheT, typename NodeVecT, bool AtRoot> class CacheItem; /// @brief This base class for ValueAccessors manages registration of an accessor /// with a tree so that the tree can automatically clear the accessor whenever /// one of its nodes is deleted. /// /// @internal A base class is needed because ValueAccessor is templated on both /// a Tree type and a mutex type. The various instantiations of the template /// are distinct, unrelated types, so they can't easily be stored in a container /// such as the Tree's CacheRegistry. This base class, in contrast, is templated /// only on the Tree type, so for any given Tree, only two distinct instantiations /// are possible, ValueAccessorBase<Tree> and ValueAccessorBase<const Tree>. /// /// @warning If IsSafe = false then the ValueAccessor will not register itself /// with the tree from which it is constructed. While in some rare cases this can /// lead to better performance (since it avoids the small overhead of insertion /// on creation and deletion on destruction) it is also unsafe if the tree is /// modified. So unless you're an expert it is highly recommended to set /// IsSafe = true, which is the default in all derived ValueAccessors defined /// below. However if you know that the tree is no being modifed for the lifespan /// of the ValueAccessor AND the work performed per ValueAccessor is small relative /// to overhead of registering it you should consider setting IsSafe = false. If /// this turns out to improve performance you should really rewrite your code so as /// to better amortize the construction of the ValueAccessor, i.e. reuse it as much /// as possible! template<typename TreeType, bool IsSafe> class ValueAccessorBase { public: static const bool IsConstTree = std::is_const<TreeType>::value; /// @brief Return true if this accessor is safe, i.e. registered /// by the tree from which it is constructed. Un-registered /// accessors can in rare cases be faster because it avoids the /// (small) overhead of registration, but they are unsafe if the /// tree is modified. So unless you're an expert it is highly /// recommended to set IsSafe = true (which is the default). static bool isSafe() { return IsSafe; } ValueAccessorBase(TreeType& tree): mTree(&tree) { if (IsSafe) tree.attachAccessor(*this); } virtual ~ValueAccessorBase() { if (IsSafe && mTree) mTree->releaseAccessor(*this); } /// @brief Return a pointer to the tree associated with this accessor. /// @details The pointer will be null only if the tree from which this accessor /// was constructed was subsequently deleted (which generally leaves the /// accessor in an unsafe state). TreeType* getTree() const { return mTree; } /// Return a reference to the tree associated with this accessor. TreeType& tree() const { assert(mTree); return *mTree; } ValueAccessorBase(const ValueAccessorBase& other): mTree(other.mTree) { if (IsSafe && mTree) mTree->attachAccessor(*this); } ValueAccessorBase& operator=(const ValueAccessorBase& other) { if (&other != this) { if (IsSafe && mTree) mTree->releaseAccessor(*this); mTree = other.mTree; if (IsSafe && mTree) mTree->attachAccessor(*this); } return *this; } virtual void clear() = 0; protected: // Allow trees to deregister themselves. template<typename> friend class Tree; virtual void release() { mTree = nullptr; } TreeType* mTree; }; // class ValueAccessorBase //////////////////////////////////////// /// When traversing a grid in a spatially coherent pattern (e.g., iterating /// over neighboring voxels), request a @c ValueAccessor from the grid /// (with Grid::getAccessor()) and use the accessor's @c getValue() and /// @c setValue() methods. These will typically be significantly faster /// than accessing voxels directly in the grid's tree. /// /// A ValueAccessor caches pointers to tree nodes along the path to a voxel (x, y, z). /// A subsequent access to voxel (x', y', z') starts from the cached leaf node and /// moves up until a cached node that encloses (x', y', z') is found, then traverses /// down the tree from that node to a leaf, updating the cache with the new path. /// This leads to significant acceleration of spatially-coherent accesses. /// /// @param _TreeType the type of the tree to be accessed [required] /// @param IsSafe if IsSafe = false then the ValueAccessor will /// not register itself with the tree from which /// it is consturcted (see warning). /// @param CacheLevels the number of nodes to be cached, starting from the leaf level /// and not including the root (i.e., CacheLevels < DEPTH), /// and defaulting to all non-root nodes /// @param MutexType the type of mutex to use (see note) /// /// @warning If IsSafe = false then the ValueAccessor will not register itself /// with the tree from which it is constructed. While in some rare cases this can /// lead to better performance (since it avoids the small overhead of insertion /// on creation and deletion on destruction) it is also unsafe if the tree is /// modified. So unless you're an expert it is highly recommended to set /// IsSafe = true, which is the default. However if you know that the tree is no /// being modifed for the lifespan of the ValueAccessor AND the work performed /// per ValueAccessor is small relative to overhead of registering it you should /// consider setting IsSafe = false. If this improves performance you should /// really rewrite your code so as to better amortize the construction of the /// ValueAccessor, i.e. reuse it as much as possible! /// /// @note If @c MutexType is a TBB-compatible mutex, then multiple threads may /// safely access a single, shared accessor. However, it is highly recommended /// that, instead, each thread be assigned its own, non-mutex-protected accessor. template<typename _TreeType, bool IsSafe = true, Index CacheLevels = _TreeType::DEPTH-1, typename MutexType = tbb::null_mutex> class ValueAccessor: public ValueAccessorBase<_TreeType, IsSafe> { public: static_assert(CacheLevels < _TreeType::DEPTH, "cache size exceeds tree depth"); using TreeType = _TreeType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using ValueType = typename RootNodeT::ValueType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; using LockT = typename MutexType::scoped_lock; using BaseT::IsConstTree; ValueAccessor(TreeType& tree): BaseT(tree), mCache(*this) { mCache.insert(Coord(), &tree.root()); } ValueAccessor(const ValueAccessor& other): BaseT(other), mCache(*this, other.mCache) {} ValueAccessor& operator=(const ValueAccessor& other) { if (&other != this) { this->BaseT::operator=(other); mCache.copy(*this, other.mCache); } return *this; } ~ValueAccessor() override = default; /// Return the number of cache levels employed by this accessor. static Index numCacheLevels() { return CacheLevels; } /// Return @c true if nodes along the path to the given voxel have been cached. bool isCached(const Coord& xyz) const { LockT lock(mMutex); return mCache.isCached(xyz); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { LockT lock(mMutex); return mCache.getValue(xyz); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { LockT lock(mMutex); return mCache.isValueOn(xyz); } /// Return the active state of the voxel as well as its value bool probeValue(const Coord& xyz, ValueType& value) const { LockT lock(mMutex); return mCache.probeValue(xyz,value); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { LockT lock(mMutex); return mCache.getValueDepth(xyz); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { LockT lock(mMutex); return mCache.isVoxel(xyz); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { LockT lock(mMutex); mCache.setValue(xyz, value); } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { LockT lock(mMutex); mCache.setValueOnly(xyz, value); } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { LockT lock(mMutex); mCache.setValueOff(xyz, value); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { LockT lock(mMutex); mCache.modifyValue(xyz, op); } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { LockT lock(mMutex); mCache.modifyValueAndActiveState(xyz, op); } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on = true) { LockT lock(mMutex); mCache.setActiveState(xyz, on); } /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeType> NodeType* getNode() { LockT lock(mMutex); NodeType* node = nullptr; mCache.getNode(node); return node; } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeType> void insertNode(const Coord& xyz, NodeType& node) { LockT lock(mMutex); mCache.insert(xyz, &node); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeType> void eraseNode() { LockT lock(mMutex); NodeType* node = nullptr; mCache.erase(node); } /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { LockT lock(mMutex); mCache.addLeaf(leaf); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { LockT lock(mMutex); mCache.addTile(level, xyz, value, state); } /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, create one, but preserve the values and /// active states of all voxels. /// @details Use this method to preallocate a static tree topology /// over which to safely perform multithreaded processing. LeafNodeT* touchLeaf(const Coord& xyz) { LockT lock(mMutex); return mCache.touchLeaf(xyz); } //@{ /// @brief Return a pointer to the node of the specified type that contains /// voxel (x, y, z), or @c nullptr if no such node exists. template<typename NodeT> NodeT* probeNode(const Coord& xyz) { LockT lock(mMutex); return mCache.template probeNode<NodeT>(xyz); } template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { LockT lock(mMutex); return mCache.template probeConstNode<NodeT>(xyz); } template<typename NodeT> const NodeT* probeNode(const Coord& xyz) const { return this->template probeConstNode<NodeT>(xyz); } //@} //@{ /// @brief Return a pointer to the leaf node that contains voxel (x, y, z), /// or @c nullptr if no such node exists. LeafNodeT* probeLeaf(const Coord& xyz) { LockT lock(mMutex); return mCache.probeLeaf(xyz); } const LeafNodeT* probeConstLeaf(const Coord& xyz) const { LockT lock(mMutex); return mCache.probeConstLeaf(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } //@} /// Remove all nodes from this cache, then reinsert the root node. void clear() override { LockT lock(mMutex); mCache.clear(); if (this->mTree) mCache.insert(Coord(), &(this->mTree->root())); } private: // Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, Index> friend class InternalNode; template<typename, Index> friend class LeafNode; // Allow trees to deregister themselves. template<typename> friend class Tree; /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { LockT lock(mMutex); this->BaseT::release(); mCache.clear(); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). /// @note This operation is not mutex-protected and is intended to be called /// only by nodes and only in the context of a getValue() or setValue() call. template<typename NodeType> void insert(const Coord& xyz, NodeType* node) { mCache.insert(xyz, node); } // Define a list of all tree node types from LeafNode to RootNode using InvTreeT = typename RootNodeT::NodeChainType; // Remove all tree node types that are excluded from the cache static constexpr int64_t First = CacheLevels; static constexpr int64_t Last = InvTreeT::template Index<RootNodeT>; using SubtreeT = typename InvTreeT::template RemoveByIndex<First, Last-1>; using CacheItemT = CacheItem<ValueAccessor, SubtreeT, SubtreeT::Size==1>; // Private member data mutable CacheItemT mCache; mutable MutexType mMutex; }; // class ValueAccessor /// @brief Template specialization of the ValueAccessor with no mutex and no cache levels /// @details This specialization is provided mainly for benchmarking. /// Accessors with caching will almost always be faster. template<typename TreeType, bool IsSafe> class ValueAccessor<TreeType, IsSafe, 0, tbb::null_mutex> : public ValueAccessor0<TreeType, IsSafe> { public: ValueAccessor(TreeType& tree): ValueAccessor0<TreeType, IsSafe>(tree) {} ValueAccessor(const ValueAccessor& other): ValueAccessor0<TreeType, IsSafe>(other) {} ~ValueAccessor() override = default; }; /// Template specialization of the ValueAccessor with no mutex and one cache level template<typename TreeType, bool IsSafe> class ValueAccessor<TreeType, IsSafe, 1, tbb::null_mutex> : public ValueAccessor1<TreeType, IsSafe> { public: ValueAccessor(TreeType& tree): ValueAccessor1<TreeType, IsSafe>(tree) {} ValueAccessor(const ValueAccessor& other): ValueAccessor1<TreeType, IsSafe>(other) {} ~ValueAccessor() override = default; }; /// Template specialization of the ValueAccessor with no mutex and two cache levels template<typename TreeType, bool IsSafe> class ValueAccessor<TreeType, IsSafe, 2, tbb::null_mutex> : public ValueAccessor2<TreeType, IsSafe> { public: ValueAccessor(TreeType& tree): ValueAccessor2<TreeType, IsSafe>(tree) {} ValueAccessor(const ValueAccessor& other): ValueAccessor2<TreeType, IsSafe>(other) {} ~ValueAccessor() override = default; }; /// Template specialization of the ValueAccessor with no mutex and three cache levels template<typename TreeType, bool IsSafe> class ValueAccessor<TreeType, IsSafe, 3, tbb::null_mutex>: public ValueAccessor3<TreeType, IsSafe> { public: ValueAccessor(TreeType& tree): ValueAccessor3<TreeType, IsSafe>(tree) {} ValueAccessor(const ValueAccessor&) = default; ValueAccessor& operator=(const ValueAccessor&) = default; ~ValueAccessor() override = default; }; //////////////////////////////////////// /// @brief This accessor is thread-safe (at the cost of speed) for both reading and /// writing to a tree. That is, multiple threads may safely access a single, /// shared ValueAccessorRW. /// /// @warning Since the mutex-locking employed by the ValueAccessorRW /// can seriously impair performance of multithreaded applications, it /// is recommended that, instead, each thread be assigned its own /// (non-mutex protected) accessor. template<typename TreeType, bool IsSafe = true> class ValueAccessorRW: public ValueAccessor<TreeType, IsSafe, TreeType::DEPTH-1, tbb::spin_mutex> { public: ValueAccessorRW(TreeType& tree) : ValueAccessor<TreeType, IsSafe, TreeType::DEPTH-1, tbb::spin_mutex>(tree) { } }; //////////////////////////////////////// // // The classes below are for internal use and should rarely be used directly. // // An element of a compile-time linked list of node pointers, ordered from LeafNode to RootNode template<typename TreeCacheT, typename NodeVecT, bool AtRoot> class CacheItem { public: using NodeType = typename NodeVecT::Front; using ValueType = typename NodeType::ValueType; using LeafNodeType = typename NodeType::LeafNodeType; using CoordLimits = std::numeric_limits<Int32>; CacheItem(TreeCacheT& parent): mParent(&parent), mHash(CoordLimits::max()), mNode(nullptr), mNext(parent) { } //@{ /// Copy another CacheItem's node pointers and hash keys, but not its parent pointer. CacheItem(TreeCacheT& parent, const CacheItem& other): mParent(&parent), mHash(other.mHash), mNode(other.mNode), mNext(parent, other.mNext) { } CacheItem& copy(TreeCacheT& parent, const CacheItem& other) { mParent = &parent; mHash = other.mHash; mNode = other.mNode; mNext.copy(parent, other.mNext); return *this; } //@} bool isCached(const Coord& xyz) const { return (this->isHashed(xyz) || mNext.isCached(xyz)); } /// Cache the given node at this level. void insert(const Coord& xyz, const NodeType* node) { mHash = (node != nullptr) ? xyz & ~(NodeType::DIM-1) : Coord::max(); mNode = node; } /// Forward the given node to another level of the cache. template<typename OtherNodeType> void insert(const Coord& xyz, const OtherNodeType* node) { mNext.insert(xyz, node); } /// Erase the node at this level. void erase(const NodeType*) { mHash = Coord::max(); mNode = nullptr; } /// Erase the node at another level of the cache. template<typename OtherNodeType> void erase(const OtherNodeType* node) { mNext.erase(node); } /// Erase the nodes at this and lower levels of the cache. void clear() { mHash = Coord::max(); mNode = nullptr; mNext.clear(); } /// Return the cached node (if any) at this level. void getNode(const NodeType*& node) const { node = mNode; } void getNode(const NodeType*& node) { node = mNode; } void getNode(NodeType*& node) { // This combination of a static assertion and a const_cast might not be elegant, // but it is a lot simpler than specializing TreeCache for const Trees. static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); node = const_cast<NodeType*>(mNode); } /// Forward the request to another level of the cache. template<typename OtherNodeType> void getNode(OtherNodeType*& node) { mNext.getNode(node); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return mNode->getValueAndCache(xyz, *mParent); } return mNext.getValue(xyz); } void addLeaf(LeafNodeType* leaf) { static_assert(!TreeCacheT::IsConstTree, "can't add a node to a const tree"); if (NodeType::LEVEL == 0) return; if (this->isHashed(leaf->origin())) { assert(mNode); return const_cast<NodeType*>(mNode)->addLeafAndCache(leaf, *mParent); } mNext.addLeaf(leaf); } void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { static_assert(!TreeCacheT::IsConstTree, "can't add a tile to a const tree"); if (NodeType::LEVEL < level) return; if (this->isHashed(xyz)) { assert(mNode); return const_cast<NodeType*>(mNode)->addTileAndCache( level, xyz, value, state, *mParent); } mNext.addTile(level, xyz, value, state); } LeafNodeType* touchLeaf(const Coord& xyz) { static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed(xyz)) { assert(mNode); return const_cast<NodeType*>(mNode)->touchLeafAndCache(xyz, *mParent); } return mNext.touchLeaf(xyz); } LeafNodeType* probeLeaf(const Coord& xyz) { static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed(xyz)) { assert(mNode); return const_cast<NodeType*>(mNode)->probeLeafAndCache(xyz, *mParent); } return mNext.probeLeaf(xyz); } const LeafNodeType* probeConstLeaf(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return mNode->probeConstLeafAndCache(xyz, *mParent); } return mNext.probeConstLeaf(xyz); } template<typename NodeT> NodeT* probeNode(const Coord& xyz) { static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (this->isHashed(xyz)) { if ((std::is_same<NodeT, NodeType>::value)) { assert(mNode); return reinterpret_cast<NodeT*>(const_cast<NodeType*>(mNode)); } return const_cast<NodeType*>(mNode)->template probeNodeAndCache<NodeT>(xyz, *mParent); } return mNext.template probeNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (this->isHashed(xyz)) { if ((std::is_same<NodeT, NodeType>::value)) { assert(mNode); return reinterpret_cast<const NodeT*>(mNode); } return mNode->template probeConstNodeAndCache<NodeT>(xyz, *mParent); } return mNext.template probeConstNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return mNode->isValueOnAndCache(xyz, *mParent); } return mNext.isValueOn(xyz); } /// Return the active state and value of the voxel at the given coordinates. bool probeValue(const Coord& xyz, ValueType& value) { if (this->isHashed(xyz)) { assert(mNode); return mNode->probeValueAndCache(xyz, value, *mParent); } return mNext.probeValue(xyz, value); } int getValueDepth(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return static_cast<int>(TreeCacheT::RootNodeT::LEVEL) - static_cast<int>(mNode->getValueLevelAndCache(xyz, *mParent)); } else { return mNext.getValueDepth(xyz); } } bool isVoxel(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return mNode->getValueLevelAndCache(xyz, *mParent)==0; } else { return mNext.isVoxel(xyz); } } /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->setValueAndCache(xyz, value, *mParent); } else { mNext.setValue(xyz, value); } } void setValueOnly(const Coord& xyz, const ValueType& value) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->setValueOnlyAndCache(xyz, value, *mParent); } else { mNext.setValueOnly(xyz, value); } } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->modifyValueAndCache(xyz, op, *mParent); } else { mNext.modifyValue(xyz, op); } } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->modifyValueAndActiveStateAndCache(xyz, op, *mParent); } else { mNext.modifyValueAndActiveState(xyz, op); } } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->setValueOffAndCache(xyz, value, *mParent); } else { mNext.setValueOff(xyz, value); } } /// Set the active state of the voxel at the given coordinates. void setActiveState(const Coord& xyz, bool on) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->setActiveStateAndCache(xyz, on, *mParent); } else { mNext.setActiveState(xyz, on); } } private: CacheItem(const CacheItem&); CacheItem& operator=(const CacheItem&); bool isHashed(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeType::DIM-1)) == mHash[0] && (xyz[1] & ~Coord::ValueType(NodeType::DIM-1)) == mHash[1] && (xyz[2] & ~Coord::ValueType(NodeType::DIM-1)) == mHash[2]; } TreeCacheT* mParent; Coord mHash; const NodeType* mNode; using RestT = typename NodeVecT::PopFront; CacheItem<TreeCacheT, RestT, /*AtRoot=*/RestT::Size == 1> mNext; };// end of CacheItem /// The tail of a compile-time list of cached node pointers, ordered from LeafNode to RootNode template<typename TreeCacheT, typename NodeVecT> class CacheItem<TreeCacheT, NodeVecT, /*AtRoot=*/true> { public: using RootNodeType = typename NodeVecT::Front; using ValueType = typename RootNodeType::ValueType; using LeafNodeType = typename RootNodeType::LeafNodeType; CacheItem(TreeCacheT& parent): mParent(&parent), mRoot(nullptr) {} CacheItem(TreeCacheT& parent, const CacheItem& other): mParent(&parent), mRoot(other.mRoot) {} CacheItem& copy(TreeCacheT& parent, const CacheItem& other) { mParent = &parent; mRoot = other.mRoot; return *this; } bool isCached(const Coord& xyz) const { return this->isHashed(xyz); } void insert(const Coord&, const RootNodeType* root) { mRoot = root; } // Needed for node types that are not cached template<typename OtherNodeType> void insert(const Coord&, const OtherNodeType*) {} void erase(const RootNodeType*) { mRoot = nullptr; } void clear() { mRoot = nullptr; } void getNode(RootNodeType*& node) { static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); node = const_cast<RootNodeType*>(mRoot); } void getNode(const RootNodeType*& node) const { node = mRoot; } void addLeaf(LeafNodeType* leaf) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't add a node to a const tree"); const_cast<RootNodeType*>(mRoot)->addLeafAndCache(leaf, *mParent); } void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't add a tile to a const tree"); const_cast<RootNodeType*>(mRoot)->addTileAndCache(level, xyz, value, state, *mParent); } LeafNodeType* touchLeaf(const Coord& xyz) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); return const_cast<RootNodeType*>(mRoot)->touchLeafAndCache(xyz, *mParent); } LeafNodeType* probeLeaf(const Coord& xyz) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); return const_cast<RootNodeType*>(mRoot)->probeLeafAndCache(xyz, *mParent); } const LeafNodeType* probeConstLeaf(const Coord& xyz) { assert(mRoot); return mRoot->probeConstLeafAndCache(xyz, *mParent); } template<typename NodeType> NodeType* probeNode(const Coord& xyz) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); return const_cast<RootNodeType*>(mRoot)-> template probeNodeAndCache<NodeType>(xyz, *mParent); } template<typename NodeType> const NodeType* probeConstNode(const Coord& xyz) { assert(mRoot); return mRoot->template probeConstNodeAndCache<NodeType>(xyz, *mParent); } int getValueDepth(const Coord& xyz) { assert(mRoot); return mRoot->getValueDepthAndCache(xyz, *mParent); } bool isValueOn(const Coord& xyz) { assert(mRoot); return mRoot->isValueOnAndCache(xyz, *mParent); } bool probeValue(const Coord& xyz, ValueType& value) { assert(mRoot); return mRoot->probeValueAndCache(xyz, value, *mParent); } bool isVoxel(const Coord& xyz) { assert(mRoot); return mRoot->getValueDepthAndCache(xyz, *mParent) == static_cast<int>(RootNodeType::LEVEL); } const ValueType& getValue(const Coord& xyz) { assert(mRoot); return mRoot->getValueAndCache(xyz, *mParent); } void setValue(const Coord& xyz, const ValueType& value) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->setValueAndCache(xyz, value, *mParent); } void setValueOnly(const Coord& xyz, const ValueType& value) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->setValueOnlyAndCache(xyz, value, *mParent); } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->modifyValueAndCache(xyz, op, *mParent); } template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->modifyValueAndActiveStateAndCache(xyz, op, *mParent); } void setValueOff(const Coord& xyz, const ValueType& value) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->setValueOffAndCache(xyz, value, *mParent); } void setActiveState(const Coord& xyz, bool on) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->setActiveStateAndCache(xyz, on, *mParent); } private: CacheItem(const CacheItem&); CacheItem& operator=(const CacheItem&); bool isHashed(const Coord&) const { return false; } TreeCacheT* mParent; const RootNodeType* mRoot; };// end of CacheItem specialized for RootNode //////////////////////////////////////// /// @brief ValueAccessor with no mutex and no node caching. /// @details This specialization is provided mainly for benchmarking. /// Accessors with caching will almost always be faster. template<typename _TreeType, bool IsSafe> class ValueAccessor0: public ValueAccessorBase<_TreeType, IsSafe> { public: using TreeType = _TreeType; using ValueType = typename TreeType::ValueType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; ValueAccessor0(TreeType& tree): BaseT(tree) {} ValueAccessor0(const ValueAccessor0& other): BaseT(other) {} /// Return the number of cache levels employed by this accessor. static Index numCacheLevels() { return 0; } ValueAccessor0& operator=(const ValueAccessor0& other) { if (&other != this) this->BaseT::operator=(other); return *this; } ~ValueAccessor0() override = default; /// Return @c true if nodes along the path to the given voxel have been cached. bool isCached(const Coord&) const { return false; } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->getValue(xyz); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->isValueOn(xyz); } /// Return the active state and, in @a value, the value of the voxel at the given coordinates. bool probeValue(const Coord& xyz, ValueType& value) const { assert(BaseT::mTree); return BaseT::mTree->probeValue(xyz, value); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->getValueDepth(xyz); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->getValueDepth(xyz) == static_cast<int>(RootNodeT::LEVEL); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->setValue(xyz, value); } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->setValueOnly(xyz, value); } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->root().setValueOff(xyz, value); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->modifyValue(xyz, op); } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->modifyValueAndActiveState(xyz, op); } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on = true) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->setActiveState(xyz, on); } /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeT> NodeT* getNode() { return nullptr; } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeT> void insertNode(const Coord&, NodeT&) {} /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a node to a const tree"); BaseT::mTree->root().addLeaf(leaf); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a tile to a const tree"); BaseT::mTree->root().addTile(level, xyz, value, state); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeT> void eraseNode() {} LeafNodeT* touchLeaf(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); return BaseT::mTree->touchLeaf(xyz); } template<typename NodeT> NodeT* probeNode(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); return BaseT::mTree->template probeNode<NodeT>(xyz); } template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->template probeConstNode<NodeT>(xyz); } LeafNodeT* probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeT>(xyz); } const LeafNodeT* probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeT>(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } /// Remove all nodes from this cache, then reinsert the root node. void clear() override {} private: // Allow trees to deregister themselves. template<typename> friend class Tree; /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { this->BaseT::release(); } }; // ValueAccessor0 /// @brief Value accessor with one level of node caching. /// @details The node cache level is specified by L0 with the default value 0 /// (defined in the forward declaration) corresponding to a LeafNode. /// /// @note This class is for experts only and should rarely be used /// directly. Instead use ValueAccessor with its default template arguments. template<typename _TreeType, bool IsSafe, Index L0> class ValueAccessor1 : public ValueAccessorBase<_TreeType, IsSafe> { public: static_assert(_TreeType::DEPTH >= 2, "cache size exceeds tree depth"); static_assert(L0 < _TreeType::RootNodeType::LEVEL, "invalid cache level"); using TreeType = _TreeType; using ValueType = typename TreeType::ValueType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; using InvTreeT = typename RootNodeT::NodeChainType; using NodeT0 = typename InvTreeT::template Get<L0>; /// Constructor from a tree ValueAccessor1(TreeType& tree) : BaseT(tree), mKey0(Coord::max()), mNode0(nullptr) { } /// Copy constructor ValueAccessor1(const ValueAccessor1& other) : BaseT(other) { this->copy(other); } /// Return the number of cache levels employed by this ValueAccessor static Index numCacheLevels() { return 1; } /// Asignment operator ValueAccessor1& operator=(const ValueAccessor1& other) { if (&other != this) { this->BaseT::operator=(other); this->copy(other); } return *this; } /// Virtual destructor ~ValueAccessor1() override = default; /// Return @c true if any of the nodes along the path to the given /// voxel have been cached. bool isCached(const Coord& xyz) const { assert(BaseT::mTree); return this->isHashed(xyz); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return mNode0->getValueAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueAndCache(xyz, this->self()); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return mNode0->isValueOnAndCache(xyz, this->self()); } return BaseT::mTree->root().isValueOnAndCache(xyz, this->self()); } /// Return the active state of the voxel as well as its value bool probeValue(const Coord& xyz, ValueType& value) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return mNode0->probeValueAndCache(xyz, value, this->self()); } return BaseT::mTree->root().probeValueAndCache(xyz, value, this->self()); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return RootNodeT::LEVEL - mNode0->getValueLevelAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return mNode0->getValueLevelAndCache(xyz, this->self()) == 0; } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()) == static_cast<int>(RootNodeT::LEVEL); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueAndCache(xyz, value, *this); } } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but preserves its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOnlyAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOnlyAndCache(xyz, value, *this); } } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOffAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOffAndCache(xyz, value, *this); } } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndCache(xyz, op, *this); } } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndActiveStateAndCache(xyz, op, *this); } } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on = true) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setActiveStateAndCache(xyz, on, *this); } else { BaseT::mTree->root().setActiveStateAndCache(xyz, on, *this); } } /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeT> NodeT* getNode() { const NodeT* node = nullptr; this->getNode(node); return const_cast<NodeT*>(node); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeT> void insertNode(const Coord& xyz, NodeT& node) { this->insert(xyz, &node); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeT> void eraseNode() { const NodeT* node = nullptr; this->eraseNode(node); } /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a node to a const tree"); BaseT::mTree->root().addLeaf(leaf); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a tile to a const tree"); BaseT::mTree->root().addTile(level, xyz, value, state); } /// @brief @return the leaf node that contains voxel (x, y, z) and /// if it doesn't exist, create it, but preserve the values and /// active states of all voxels. /// /// Use this method to preallocate a static tree topology over which to /// safely perform multithreaded processing. LeafNodeT* touchLeaf(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed(xyz)) { assert(mNode0); return const_cast<NodeT0*>(mNode0)->touchLeafAndCache(xyz, *this); } return BaseT::mTree->root().touchLeafAndCache(xyz, *this); } /// @brief @return a pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> NodeT* probeNode(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed(xyz)) { assert(mNode0); return reinterpret_cast<NodeT*>(const_cast<NodeT0*>(mNode0)); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } LeafNodeT* probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeT>(xyz); } /// @brief @return a const pointer to the nodeof the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { assert(BaseT::mTree); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed(xyz)) { assert(mNode0); return reinterpret_cast<const NodeT*>(mNode0); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } const LeafNodeT* probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeT>(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } /// Remove all the cached nodes and invalidate the corresponding hash-keys. void clear() override { mKey0 = Coord::max(); mNode0 = nullptr; } private: // Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, Index> friend class InternalNode; template<typename, Index> friend class LeafNode; // Allow trees to deregister themselves. template<typename> friend class Tree; // This private method is merely for convenience. inline ValueAccessor1& self() const { return const_cast<ValueAccessor1&>(*this); } void getNode(const NodeT0*& node) { node = mNode0; } void getNode(const RootNodeT*& node) { node = (BaseT::mTree ? &BaseT::mTree->root() : nullptr); } template<typename OtherNodeType> void getNode(const OtherNodeType*& node) { node = nullptr; } void eraseNode(const NodeT0*) { mKey0 = Coord::max(); mNode0 = nullptr; } template<typename OtherNodeType> void eraseNode(const OtherNodeType*) {} /// Private copy method inline void copy(const ValueAccessor1& other) { mKey0 = other.mKey0; mNode0 = other.mNode0; } /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { this->BaseT::release(); this->clear(); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). /// @note This operation is not mutex-protected and is intended to be called /// only by nodes and only in the context of a getValue() or setValue() call. inline void insert(const Coord& xyz, const NodeT0* node) { assert(node); mKey0 = xyz & ~(NodeT0::DIM-1); mNode0 = node; } /// No-op in case a tree traversal attemps to insert a node that /// is not cached by the ValueAccessor template<typename OtherNodeType> inline void insert(const Coord&, const OtherNodeType*) {} inline bool isHashed(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[0] && (xyz[1] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[1] && (xyz[2] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[2]; } mutable Coord mKey0; mutable const NodeT0* mNode0; }; // ValueAccessor1 /// @brief Value accessor with two levels of node caching. /// @details The node cache levels are specified by L0 and L1 /// with the default values 0 and 1 (defined in the forward declaration) /// corresponding to a LeafNode and its parent InternalNode. /// /// @note This class is for experts only and should rarely be used directly. /// Instead use ValueAccessor with its default template arguments. template<typename _TreeType, bool IsSafe, Index L0, Index L1> class ValueAccessor2 : public ValueAccessorBase<_TreeType, IsSafe> { public: static_assert(_TreeType::DEPTH >= 3, "cache size exceeds tree depth"); static_assert(L0 < L1, "invalid cache level"); static_assert(L1 < _TreeType::RootNodeType::LEVEL, "invalid cache level"); using TreeType = _TreeType; using ValueType = typename TreeType::ValueType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; using InvTreeT = typename RootNodeT::NodeChainType; using NodeT0 = typename InvTreeT::template Get<L0>; using NodeT1 = typename InvTreeT::template Get<L1>; /// Constructor from a tree ValueAccessor2(TreeType& tree) : BaseT(tree), mKey0(Coord::max()), mNode0(nullptr), mKey1(Coord::max()), mNode1(nullptr) {} /// Copy constructor ValueAccessor2(const ValueAccessor2& other) : BaseT(other) { this->copy(other); } /// Return the number of cache levels employed by this ValueAccessor static Index numCacheLevels() { return 2; } /// Asignment operator ValueAccessor2& operator=(const ValueAccessor2& other) { if (&other != this) { this->BaseT::operator=(other); this->copy(other); } return *this; } /// Virtual destructor ~ValueAccessor2() override = default; /// Return @c true if any of the nodes along the path to the given /// voxel have been cached. bool isCached(const Coord& xyz) const { assert(BaseT::mTree); return this->isHashed1(xyz) || this->isHashed0(xyz); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->getValueAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->getValueAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueAndCache(xyz, this->self()); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->isValueOnAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->isValueOnAndCache(xyz, this->self()); } return BaseT::mTree->root().isValueOnAndCache(xyz, this->self()); } /// Return the active state of the voxel as well as its value bool probeValue(const Coord& xyz, ValueType& value) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->probeValueAndCache(xyz, value, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->probeValueAndCache(xyz, value, this->self()); } return BaseT::mTree->root().probeValueAndCache(xyz, value, this->self()); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return RootNodeT::LEVEL - mNode0->getValueLevelAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return RootNodeT::LEVEL - mNode1->getValueLevelAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->getValueLevelAndCache(xyz, this->self())==0; } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->getValueLevelAndCache(xyz, this->self())==0; } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()) == static_cast<int>(RootNodeT::LEVEL); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueAndCache(xyz, value, *this); } } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but preserves its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOnlyAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueOnlyAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOnlyAndCache(xyz, value, *this); } } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOffAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueOffAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOffAndCache(xyz, value, *this); } } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndCache(xyz, op, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->modifyValueAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndCache(xyz, op, *this); } } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndActiveStateAndCache(xyz, op, *this); } } /// Set the active state of the voxel at the given coordinates without changing its value. void setActiveState(const Coord& xyz, bool on = true) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setActiveStateAndCache(xyz, on, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setActiveStateAndCache(xyz, on, *this); } else { BaseT::mTree->root().setActiveStateAndCache(xyz, on, *this); } } /// Mark the voxel at the given coordinates as active without changing its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive without changing its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeT> NodeT* getNode() { const NodeT* node = nullptr; this->getNode(node); return const_cast<NodeT*>(node); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeT> void insertNode(const Coord& xyz, NodeT& node) { this->insert(xyz, &node); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeT> void eraseNode() { const NodeT* node = nullptr; this->eraseNode(node); } /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a node to a const tree"); if (this->isHashed1(leaf->origin())) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->addLeafAndCache(leaf, *this); } BaseT::mTree->root().addLeafAndCache(leaf, *this); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a tile to a const tree"); if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->addTileAndCache(level, xyz, value, state, *this); } BaseT::mTree->root().addTileAndCache(level, xyz, value, state, *this); } /// @brief @return the leaf node that contains voxel (x, y, z) and /// if it doesn't exist, create it, but preserve the values and /// active states of all voxels. /// /// Use this method to preallocate a static tree topology over which to /// safely perform multithreaded processing. LeafNodeT* touchLeaf(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed0(xyz)) { assert(mNode0); return const_cast<NodeT0*>(mNode0)->touchLeafAndCache(xyz, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->touchLeafAndCache(xyz, *this); } return BaseT::mTree->root().touchLeafAndCache(xyz, *this); } /// @brief @return a pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> NodeT* probeNode(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<NodeT*>(const_cast<NodeT0*>(mNode0)); } else if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->template probeNodeAndCache<NodeT>(xyz, *this); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<NodeT*>(const_cast<NodeT1*>(mNode1)); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief @return a pointer to the leaf node that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. LeafNodeT* probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeT>(xyz); } /// @brief @return a const pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> const NodeT* probeConstLeaf(const Coord& xyz) const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<const NodeT*>(mNode0); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<const NodeT*>(mNode1); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief @return a const pointer to the leaf node that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. const LeafNodeT* probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeT>(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } /// @brief @return a const pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { assert(BaseT::mTree); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<const NodeT*>(mNode0); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<const NodeT*>(mNode1); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// Remove all the cached nodes and invalidate the corresponding hash-keys. void clear() override { mKey0 = Coord::max(); mNode0 = nullptr; mKey1 = Coord::max(); mNode1 = nullptr; } private: // Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, Index> friend class InternalNode; template<typename, Index> friend class LeafNode; // Allow trees to deregister themselves. template<typename> friend class Tree; // This private method is merely for convenience. inline ValueAccessor2& self() const { return const_cast<ValueAccessor2&>(*this); } void getNode(const NodeT0*& node) { node = mNode0; } void getNode(const NodeT1*& node) { node = mNode1; } void getNode(const RootNodeT*& node) { node = (BaseT::mTree ? &BaseT::mTree->root() : nullptr); } template<typename OtherNodeType> void getNode(const OtherNodeType*& node) { node = nullptr; } void eraseNode(const NodeT0*) { mKey0 = Coord::max(); mNode0 = nullptr; } void eraseNode(const NodeT1*) { mKey1 = Coord::max(); mNode1 = nullptr; } template<typename OtherNodeType> void eraseNode(const OtherNodeType*) {} /// Private copy method inline void copy(const ValueAccessor2& other) { mKey0 = other.mKey0; mNode0 = other.mNode0; mKey1 = other.mKey1; mNode1 = other.mNode1; } /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { this->BaseT::release(); this->clear(); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). /// @note This operation is not mutex-protected and is intended to be called /// only by nodes and only in the context of a getValue() or setValue() call. inline void insert(const Coord& xyz, const NodeT0* node) { assert(node); mKey0 = xyz & ~(NodeT0::DIM-1); mNode0 = node; } inline void insert(const Coord& xyz, const NodeT1* node) { assert(node); mKey1 = xyz & ~(NodeT1::DIM-1); mNode1 = node; } /// No-op in case a tree traversal attemps to insert a node that /// is not cached by the ValueAccessor template<typename NodeT> inline void insert(const Coord&, const NodeT*) {} inline bool isHashed0(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[0] && (xyz[1] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[1] && (xyz[2] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[2]; } inline bool isHashed1(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[0] && (xyz[1] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[1] && (xyz[2] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[2]; } mutable Coord mKey0; mutable const NodeT0* mNode0; mutable Coord mKey1; mutable const NodeT1* mNode1; }; // ValueAccessor2 /// @brief Value accessor with three levels of node caching. /// @details The node cache levels are specified by L0, L1, and L2 /// with the default values 0, 1 and 2 (defined in the forward declaration) /// corresponding to a LeafNode, its parent InternalNode, and its parent InternalNode. /// Since the default configuration of all typed trees and grids, e.g., /// FloatTree or FloatGrid, has a depth of four, this value accessor is the one /// used by default. /// /// @note This class is for experts only and should rarely be used /// directly. Instead use ValueAccessor with its default template arguments template<typename _TreeType, bool IsSafe, Index L0, Index L1, Index L2> class ValueAccessor3 : public ValueAccessorBase<_TreeType, IsSafe> { public: static_assert(_TreeType::DEPTH >= 4, "cache size exceeds tree depth"); static_assert(L0 < L1, "invalid cache level"); static_assert(L1 < L2, "invalid cache level"); static_assert(L2 < _TreeType::RootNodeType::LEVEL, "invalid cache level"); using TreeType = _TreeType; using ValueType = typename TreeType::ValueType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; using InvTreeT = typename RootNodeT::NodeChainType; using NodeT0 = typename InvTreeT::template Get<L0>; using NodeT1 = typename InvTreeT::template Get<L1>; using NodeT2 = typename InvTreeT::template Get<L2>; /// Constructor from a tree ValueAccessor3(TreeType& tree) : BaseT(tree), mKey0(Coord::max()), mNode0(nullptr), mKey1(Coord::max()), mNode1(nullptr), mKey2(Coord::max()), mNode2(nullptr) {} /// Copy constructor ValueAccessor3(const ValueAccessor3& other) : BaseT(other) { this->copy(other); } /// Asignment operator ValueAccessor3& operator=(const ValueAccessor3& other) { if (&other != this) { this->BaseT::operator=(other); this->copy(other); } return *this; } /// Return the number of cache levels employed by this ValueAccessor static Index numCacheLevels() { return 3; } /// Virtual destructor ~ValueAccessor3() override = default; /// Return @c true if any of the nodes along the path to the given /// voxel have been cached. bool isCached(const Coord& xyz) const { assert(BaseT::mTree); return this->isHashed2(xyz) || this->isHashed1(xyz) || this->isHashed0(xyz); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->getValueAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->getValueAndCache(xyz, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->getValueAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueAndCache(xyz, this->self()); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->isValueOnAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->isValueOnAndCache(xyz, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->isValueOnAndCache(xyz, this->self()); } return BaseT::mTree->root().isValueOnAndCache(xyz, this->self()); } /// Return the active state of the voxel as well as its value bool probeValue(const Coord& xyz, ValueType& value) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->probeValueAndCache(xyz, value, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->probeValueAndCache(xyz, value, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->probeValueAndCache(xyz, value, this->self()); } return BaseT::mTree->root().probeValueAndCache(xyz, value, this->self()); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return RootNodeT::LEVEL - mNode0->getValueLevelAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return RootNodeT::LEVEL - mNode1->getValueLevelAndCache(xyz, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return RootNodeT::LEVEL - mNode2->getValueLevelAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->getValueLevelAndCache(xyz, this->self())==0; } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->getValueLevelAndCache(xyz, this->self())==0; } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->getValueLevelAndCache(xyz, this->self())==0; } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()) == static_cast<int>(RootNodeT::LEVEL); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueAndCache(xyz, value, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->setValueAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueAndCache(xyz, value, *this); } } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but preserves its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOnlyAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueOnlyAndCache(xyz, value, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->setValueOnlyAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOnlyAndCache(xyz, value, *this); } } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOffAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueOffAndCache(xyz, value, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->setValueOffAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOffAndCache(xyz, value, *this); } } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndCache(xyz, op, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->modifyValueAndCache(xyz, op, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->modifyValueAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndCache(xyz, op, *this); } } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndActiveStateAndCache(xyz, op, *this); } } /// Set the active state of the voxel at the given coordinates without changing its value. void setActiveState(const Coord& xyz, bool on = true) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setActiveStateAndCache(xyz, on, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setActiveStateAndCache(xyz, on, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->setActiveStateAndCache(xyz, on, *this); } else { BaseT::mTree->root().setActiveStateAndCache(xyz, on, *this); } } /// Mark the voxel at the given coordinates as active without changing its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive without changing its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeT> NodeT* getNode() { const NodeT* node = nullptr; this->getNode(node); return const_cast<NodeT*>(node); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeT> void insertNode(const Coord& xyz, NodeT& node) { this->insert(xyz, &node); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeT> void eraseNode() { const NodeT* node = nullptr; this->eraseNode(node); } /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a node to a const tree"); if (this->isHashed1(leaf->origin())) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->addLeafAndCache(leaf, *this); } else if (this->isHashed2(leaf->origin())) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->addLeafAndCache(leaf, *this); } BaseT::mTree->root().addLeafAndCache(leaf, *this); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a tile to a const tree"); if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->addTileAndCache(level, xyz, value, state, *this); } if (this->isHashed2(xyz)) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->addTileAndCache(level, xyz, value, state, *this); } BaseT::mTree->root().addTileAndCache(level, xyz, value, state, *this); } /// @brief @return the leaf node that contains voxel (x, y, z) and /// if it doesn't exist, create it, but preserve the values and /// active states of all voxels. /// /// Use this method to preallocate a static tree topology over which to /// safely perform multithreaded processing. LeafNodeT* touchLeaf(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed0(xyz)) { assert(mNode0); return const_cast<NodeT0*>(mNode0); } else if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->touchLeafAndCache(xyz, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->touchLeafAndCache(xyz, *this); } return BaseT::mTree->root().touchLeafAndCache(xyz, *this); } /// @brief @return a pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> NodeT* probeNode(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<NodeT*>(const_cast<NodeT0*>(mNode0)); } else if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->template probeNodeAndCache<NodeT>(xyz, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->template probeNodeAndCache<NodeT>(xyz, *this); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<NodeT*>(const_cast<NodeT1*>(mNode1)); } else if (this->isHashed2(xyz)) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->template probeNodeAndCache<NodeT>(xyz, *this); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } else if ((std::is_same<NodeT, NodeT2>::value)) { if (this->isHashed2(xyz)) { assert(mNode2); return reinterpret_cast<NodeT*>(const_cast<NodeT2*>(mNode2)); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief @return a pointer to the leaf node that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. LeafNodeT* probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeT>(xyz); } /// @brief @return a const pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { assert(BaseT::mTree); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<const NodeT*>(mNode0); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<const NodeT*>(mNode1); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if ((std::is_same<NodeT, NodeT2>::value)) { if (this->isHashed2(xyz)) { assert(mNode2); return reinterpret_cast<const NodeT*>(mNode2); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief @return a const pointer to the leaf node that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. const LeafNodeT* probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeT>(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } /// Remove all the cached nodes and invalidate the corresponding hash-keys. void clear() override { mKey0 = Coord::max(); mNode0 = nullptr; mKey1 = Coord::max(); mNode1 = nullptr; mKey2 = Coord::max(); mNode2 = nullptr; } private: // Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, Index> friend class InternalNode; template<typename, Index> friend class LeafNode; // Allow trees to deregister themselves. template<typename> friend class Tree; // This private method is merely for convenience. inline ValueAccessor3& self() const { return const_cast<ValueAccessor3&>(*this); } /// Private copy method inline void copy(const ValueAccessor3& other) { mKey0 = other.mKey0; mNode0 = other.mNode0; mKey1 = other.mKey1; mNode1 = other.mNode1; mKey2 = other.mKey2; mNode2 = other.mNode2; } /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { this->BaseT::release(); this->clear(); } void getNode(const NodeT0*& node) { node = mNode0; } void getNode(const NodeT1*& node) { node = mNode1; } void getNode(const NodeT2*& node) { node = mNode2; } void getNode(const RootNodeT*& node) { node = (BaseT::mTree ? &BaseT::mTree->root() : nullptr); } template<typename OtherNodeType> void getNode(const OtherNodeType*& node) { node = nullptr; } void eraseNode(const NodeT0*) { mKey0 = Coord::max(); mNode0 = nullptr; } void eraseNode(const NodeT1*) { mKey1 = Coord::max(); mNode1 = nullptr; } void eraseNode(const NodeT2*) { mKey2 = Coord::max(); mNode2 = nullptr; } template<typename OtherNodeType> void eraseNode(const OtherNodeType*) {} /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). /// @note This operation is not mutex-protected and is intended to be called /// only by nodes and only in the context of a getValue() or setValue() call. inline void insert(const Coord& xyz, const NodeT0* node) { assert(node); mKey0 = xyz & ~(NodeT0::DIM-1); mNode0 = node; } inline void insert(const Coord& xyz, const NodeT1* node) { assert(node); mKey1 = xyz & ~(NodeT1::DIM-1); mNode1 = node; } inline void insert(const Coord& xyz, const NodeT2* node) { assert(node); mKey2 = xyz & ~(NodeT2::DIM-1); mNode2 = node; } /// No-op in case a tree traversal attemps to insert a node that /// is not cached by the ValueAccessor template<typename OtherNodeType> inline void insert(const Coord&, const OtherNodeType*) { } inline bool isHashed0(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[0] && (xyz[1] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[1] && (xyz[2] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[2]; } inline bool isHashed1(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[0] && (xyz[1] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[1] && (xyz[2] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[2]; } inline bool isHashed2(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT2::DIM-1)) == mKey2[0] && (xyz[1] & ~Coord::ValueType(NodeT2::DIM-1)) == mKey2[1] && (xyz[2] & ~Coord::ValueType(NodeT2::DIM-1)) == mKey2[2]; } mutable Coord mKey0; mutable const NodeT0* mNode0; mutable Coord mKey1; mutable const NodeT1* mNode1; mutable Coord mKey2; mutable const NodeT2* mNode2; }; // ValueAccessor3 } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_VALUEACCESSOR_HAS_BEEN_INCLUDED
103,262
C
38.368281
98
0.62997
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/LeafManager.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file LeafManager.h /// /// @brief A LeafManager manages a linear array of pointers to a given tree's /// leaf nodes, as well as optional auxiliary buffers (one or more per leaf) /// that can be swapped with the leaf nodes' voxel data buffers. /// @details The leaf array is useful for multithreaded computations over /// leaf voxels in a tree with static topology but varying voxel values. /// The auxiliary buffers are convenient for temporal integration. /// Efficient methods are provided for multithreaded swapping and synching /// (i.e., copying the contents) of these buffers. #ifndef OPENVDB_TREE_LEAFMANAGER_HAS_BEEN_INCLUDED #define OPENVDB_TREE_LEAFMANAGER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/tree/RootNode.h> // for NodeChain #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <deque> #include <functional> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { namespace leafmgr { //@{ /// Useful traits for Tree types template<typename TreeT> struct TreeTraits { static const bool IsConstTree = false; using LeafIterType = typename TreeT::LeafIter; }; template<typename TreeT> struct TreeTraits<const TreeT> { static const bool IsConstTree = true; using LeafIterType = typename TreeT::LeafCIter; }; //@} } // namespace leafmgr /// This helper class implements LeafManager methods that need to be /// specialized for const vs. non-const trees. template<typename ManagerT> struct LeafManagerImpl { using RangeT = typename ManagerT::RangeType; using LeafT = typename ManagerT::LeafType; using BufT = typename ManagerT::BufferType; static inline void doSwapLeafBuffer(const RangeT& r, size_t auxBufferIdx, LeafT** leafs, BufT* bufs, size_t bufsPerLeaf) { for (size_t n = r.begin(), m = r.end(), N = bufsPerLeaf; n != m; ++n) { leafs[n]->swap(bufs[n * N + auxBufferIdx]); } } }; //////////////////////////////////////// /// @brief This class manages a linear array of pointers to a given tree's /// leaf nodes, as well as optional auxiliary buffers (one or more per leaf) /// that can be swapped with the leaf nodes' voxel data buffers. /// @details The leaf array is useful for multithreaded computations over /// leaf voxels in a tree with static topology but varying voxel values. /// The auxiliary buffers are convenient for temporal integration. /// Efficient methods are provided for multithreaded swapping and sync'ing /// (i.e., copying the contents) of these buffers. /// /// @note Buffer index 0 denotes a leaf node's internal voxel data buffer. /// Any auxiliary buffers are indexed starting from one. template<typename TreeT> class LeafManager { public: using TreeType = TreeT; using ValueType = typename TreeT::ValueType; using RootNodeType = typename TreeT::RootNodeType; using NonConstLeafType = typename TreeType::LeafNodeType; using LeafType = typename CopyConstness<TreeType, NonConstLeafType>::Type; using LeafNodeType = LeafType; using LeafIterType = typename leafmgr::TreeTraits<TreeT>::LeafIterType; using NonConstBufferType = typename LeafType::Buffer; using BufferType = typename CopyConstness<TreeType, NonConstBufferType>::Type; using RangeType = tbb::blocked_range<size_t>; // leaf index range static const Index DEPTH = 2; // root + leaf nodes static const bool IsConstTree = leafmgr::TreeTraits<TreeT>::IsConstTree; class LeafRange { public: class Iterator { public: Iterator(const LeafRange& range, size_t pos): mRange(range), mPos(pos) { assert(this->isValid()); } Iterator(const Iterator&) = default; Iterator& operator=(const Iterator&) = default; /// Advance to the next leaf node. Iterator& operator++() { ++mPos; return *this; } /// Return a reference to the leaf node to which this iterator is pointing. LeafType& operator*() const { return mRange.mLeafManager.leaf(mPos); } /// Return a pointer to the leaf node to which this iterator is pointing. LeafType* operator->() const { return &(this->operator*()); } /// @brief Return the nth buffer for the leaf node to which this iterator is pointing, /// where n = @a bufferIdx and n = 0 corresponds to the leaf node's own buffer. BufferType& buffer(size_t bufferIdx) { return mRange.mLeafManager.getBuffer(mPos, bufferIdx); } /// Return the index into the leaf array of the current leaf node. size_t pos() const { return mPos; } /// Return @c true if the position of this iterator is in a valid range. bool isValid() const { return mPos>=mRange.mBegin && mPos<=mRange.mEnd; } /// Return @c true if this iterator is not yet exhausted. bool test() const { return mPos < mRange.mEnd; } /// Return @c true if this iterator is not yet exhausted. operator bool() const { return this->test(); } /// Return @c true if this iterator is exhausted. bool empty() const { return !this->test(); } bool operator!=(const Iterator& other) const { return (mPos != other.mPos) || (&mRange != &other.mRange); } bool operator==(const Iterator& other) const { return !(*this != other); } const LeafRange& leafRange() const { return mRange; } private: const LeafRange& mRange; size_t mPos; };// end Iterator LeafRange(size_t begin, size_t end, const LeafManager& leafManager, size_t grainSize=1) : mEnd(end) , mBegin(begin) , mGrainSize(grainSize) , mLeafManager(leafManager) { } Iterator begin() const {return Iterator(*this, mBegin);} Iterator end() const {return Iterator(*this, mEnd);} size_t size() const { return mEnd - mBegin; } size_t grainsize() const { return mGrainSize; } const LeafManager& leafManager() const { return mLeafManager; } bool empty() const {return !(mBegin < mEnd);} bool is_divisible() const {return mGrainSize < this->size();} LeafRange(LeafRange& r, tbb::split) : mEnd(r.mEnd) , mBegin(doSplit(r)) , mGrainSize(r.mGrainSize) , mLeafManager(r.mLeafManager) { } private: size_t mEnd, mBegin, mGrainSize; const LeafManager& mLeafManager; static size_t doSplit(LeafRange& r) { assert(r.is_divisible()); size_t middle = r.mBegin + (r.mEnd - r.mBegin) / 2u; r.mEnd = middle; return middle; } };// end of LeafRange /// @brief Constructor from a tree reference and an auxiliary buffer count /// @note The default is no auxiliary buffers LeafManager(TreeType& tree, size_t auxBuffersPerLeaf=0, bool serial=false) : mTree(&tree) , mLeafCount(0) , mAuxBufferCount(0) , mAuxBuffersPerLeaf(auxBuffersPerLeaf) { this->rebuild(serial); } /// @brief Construct directly from an existing array of leafnodes. /// @warning The leafnodes are implicitly assumed to exist in the /// input @a tree. LeafManager(TreeType& tree, LeafType** begin, LeafType** end, size_t auxBuffersPerLeaf=0, bool serial=false) : mTree(&tree) , mLeafCount(end-begin) , mAuxBufferCount(0) , mAuxBuffersPerLeaf(auxBuffersPerLeaf) , mLeafPtrs(new LeafType*[mLeafCount]) , mLeafs(mLeafPtrs.get()) { size_t n = mLeafCount; LeafType **target = mLeafs, **source = begin; while (n--) *target++ = *source++; if (auxBuffersPerLeaf) this->initAuxBuffers(serial); } /// Shallow copy constructor called by tbb::parallel_for() threads /// /// @note This should never get called directly LeafManager(const LeafManager& other) : mTree(other.mTree) , mLeafCount(other.mLeafCount) , mAuxBufferCount(other.mAuxBufferCount) , mAuxBuffersPerLeaf(other.mAuxBuffersPerLeaf) , mLeafs(other.mLeafs) , mAuxBuffers(other.mAuxBuffers) , mTask(other.mTask) { } /// @brief (Re)initialize by resizing (if necessary) and repopulating the leaf array /// and by deleting existing auxiliary buffers and allocating new ones. /// @details Call this method if the tree's topology, and therefore the number /// of leaf nodes, changes. New auxiliary buffers are initialized with copies /// of corresponding leaf node buffers. void rebuild(bool serial=false) { this->initLeafArray(serial); this->initAuxBuffers(serial); } //@{ /// Repopulate the leaf array and delete and reallocate auxiliary buffers. void rebuild(size_t auxBuffersPerLeaf, bool serial=false) { mAuxBuffersPerLeaf = auxBuffersPerLeaf; this->rebuild(serial); } void rebuild(TreeType& tree, bool serial=false) { mTree = &tree; this->rebuild(serial); } void rebuild(TreeType& tree, size_t auxBuffersPerLeaf, bool serial=false) { mTree = &tree; mAuxBuffersPerLeaf = auxBuffersPerLeaf; this->rebuild(serial); } //@} /// @brief Change the number of auxiliary buffers. /// @details If auxBuffersPerLeaf is 0, all existing auxiliary buffers are deleted. /// New auxiliary buffers are initialized with copies of corresponding leaf node buffers. /// This method does not rebuild the leaf array. void rebuildAuxBuffers(size_t auxBuffersPerLeaf, bool serial=false) { mAuxBuffersPerLeaf = auxBuffersPerLeaf; this->initAuxBuffers(serial); } /// @brief Remove the auxiliary buffers, but don't rebuild the leaf array. void removeAuxBuffers() { this->rebuildAuxBuffers(0); } /// @brief Remove the auxiliary buffers and rebuild the leaf array. void rebuildLeafArray(bool serial = false) { this->removeAuxBuffers(); this->initLeafArray(serial); } /// @brief Return the total number of allocated auxiliary buffers. size_t auxBufferCount() const { return mAuxBufferCount; } /// @brief Return the number of auxiliary buffers per leaf node. size_t auxBuffersPerLeaf() const { return mAuxBuffersPerLeaf; } /// @brief Return the number of leaf nodes. size_t leafCount() const { return mLeafCount; } /// @brief Return the number of active voxels in the leaf nodes. /// @note Multi-threaded for better performance than Tree::activeLeafVoxelCount Index64 activeLeafVoxelCount() const { return tbb::parallel_reduce(this->leafRange(), Index64(0), [] (const LeafRange& range, Index64 sum) -> Index64 { for (const auto& leaf: range) { sum += leaf.onVoxelCount(); } return sum; }, [] (Index64 n, Index64 m) -> Index64 { return n + m; }); } /// Return a const reference to tree associated with this manager. const TreeType& tree() const { return *mTree; } /// Return a reference to the tree associated with this manager. TreeType& tree() { return *mTree; } /// Return a const reference to root node associated with this manager. const RootNodeType& root() const { return mTree->root(); } /// Return a reference to the root node associated with this manager. RootNodeType& root() { return mTree->root(); } /// Return @c true if the tree associated with this manager is immutable. bool isConstTree() const { return this->IsConstTree; } /// @brief Return a pointer to the leaf node at index @a leafIdx in the array. /// @note For performance reasons no range check is performed (other than an assertion)! LeafType& leaf(size_t leafIdx) const { assert(leafIdx<mLeafCount); return *mLeafs[leafIdx]; } /// @brief Return the leaf or auxiliary buffer for the leaf node at index @a leafIdx. /// If @a bufferIdx is zero, return the leaf buffer, otherwise return the nth /// auxiliary buffer, where n = @a bufferIdx - 1. /// /// @note For performance reasons no range checks are performed on the inputs /// (other than assertions)! Since auxiliary buffers, unlike leaf buffers, /// might not exist, be especially careful when specifying the @a bufferIdx. /// @note For const trees, this method always returns a reference to a const buffer. /// It is safe to @c const_cast and modify any auxiliary buffer (@a bufferIdx > 0), /// but it is not safe to modify the leaf buffer (@a bufferIdx = 0). BufferType& getBuffer(size_t leafIdx, size_t bufferIdx) const { assert(leafIdx < mLeafCount); assert(bufferIdx == 0 || bufferIdx - 1 < mAuxBuffersPerLeaf); return bufferIdx == 0 ? mLeafs[leafIdx]->buffer() : mAuxBuffers[leafIdx * mAuxBuffersPerLeaf + bufferIdx - 1]; } /// @brief Return a @c tbb::blocked_range of leaf array indices. /// /// @note Consider using leafRange() instead, which provides access methods /// to leaf nodes and buffers. RangeType getRange(size_t grainsize = 1) const { return RangeType(0, mLeafCount, grainsize); } /// Return a TBB-compatible LeafRange. LeafRange leafRange(size_t grainsize = 1) const { return LeafRange(0, mLeafCount, *this, grainsize); } /// @brief Swap each leaf node's buffer with the nth corresponding auxiliary buffer, /// where n = @a bufferIdx. /// @return @c true if the swap was successful /// @param bufferIdx index of the buffer that will be swapped with /// the corresponding leaf node buffer /// @param serial if false, swap buffers in parallel using multiple threads. /// @note Recall that the indexing of auxiliary buffers is 1-based, since /// buffer index 0 denotes the leaf node buffer. So buffer index 1 denotes /// the first auxiliary buffer. bool swapLeafBuffer(size_t bufferIdx, bool serial = false) { namespace ph = std::placeholders; if (bufferIdx == 0 || bufferIdx > mAuxBuffersPerLeaf || this->isConstTree()) return false; mTask = std::bind(&LeafManager::doSwapLeafBuffer, ph::_1, ph::_2, bufferIdx - 1); this->cook(serial ? 0 : 512); return true;//success } /// @brief Swap any two buffers for each leaf node. /// @note Recall that the indexing of auxiliary buffers is 1-based, since /// buffer index 0 denotes the leaf node buffer. So buffer index 1 denotes /// the first auxiliary buffer. bool swapBuffer(size_t bufferIdx1, size_t bufferIdx2, bool serial = false) { namespace ph = std::placeholders; const size_t b1 = std::min(bufferIdx1, bufferIdx2); const size_t b2 = std::max(bufferIdx1, bufferIdx2); if (b1 == b2 || b2 > mAuxBuffersPerLeaf) return false; if (b1 == 0) { if (this->isConstTree()) return false; mTask = std::bind(&LeafManager::doSwapLeafBuffer, ph::_1, ph::_2, b2-1); } else { mTask = std::bind(&LeafManager::doSwapAuxBuffer, ph::_1, ph::_2, b1-1, b2-1); } this->cook(serial ? 0 : 512); return true;//success } /// @brief Sync up the specified auxiliary buffer with the corresponding leaf node buffer. /// @return @c true if the sync was successful /// @param bufferIdx index of the buffer that will contain a /// copy of the corresponding leaf node buffer /// @param serial if false, sync buffers in parallel using multiple threads. /// @note Recall that the indexing of auxiliary buffers is 1-based, since /// buffer index 0 denotes the leaf node buffer. So buffer index 1 denotes /// the first auxiliary buffer. bool syncAuxBuffer(size_t bufferIdx, bool serial = false) { namespace ph = std::placeholders; if (bufferIdx == 0 || bufferIdx > mAuxBuffersPerLeaf) return false; mTask = std::bind(&LeafManager::doSyncAuxBuffer, ph::_1, ph::_2, bufferIdx - 1); this->cook(serial ? 0 : 64); return true;//success } /// @brief Sync up all auxiliary buffers with their corresponding leaf node buffers. /// @return true if the sync was successful /// @param serial if false, sync buffers in parallel using multiple threads. bool syncAllBuffers(bool serial = false) { namespace ph = std::placeholders; switch (mAuxBuffersPerLeaf) { case 0: return false;//nothing to do case 1: mTask = std::bind(&LeafManager::doSyncAllBuffers1, ph::_1, ph::_2); break; case 2: mTask = std::bind(&LeafManager::doSyncAllBuffers2, ph::_1, ph::_2); break; default: mTask = std::bind(&LeafManager::doSyncAllBuffersN, ph::_1, ph::_2); break; } this->cook(serial ? 0 : 64); return true;//success } /// @brief Threaded method that applies a user-supplied functor /// to each leaf node in the LeafManager. /// /// @details The user-supplied functor needs to define the methods /// required for tbb::parallel_for. /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @warning The functor object is deep-copied to create TBB tasks. /// This allows the function to use non-thread-safe members /// like a ValueAccessor. /// /// @par Example: /// @code /// // Functor to offset a tree's voxel values with values from another tree. /// template<typename TreeType> /// struct OffsetOp /// { /// using Accessor = tree::ValueAccessor<const TreeType>; /// /// OffsetOp(const TreeType& tree): mRhsTreeAcc(tree) {} /// /// template <typename LeafNodeType> /// void operator()(LeafNodeType &lhsLeaf, size_t) const /// { /// const LeafNodeType *rhsLeaf = mRhsTreeAcc.probeConstLeaf(lhsLeaf.origin()); /// if (rhsLeaf) { /// typename LeafNodeType::ValueOnIter iter = lhsLeaf.beginValueOn(); /// for (; iter; ++iter) { /// iter.setValue(iter.getValue() + rhsLeaf->getValue(iter.pos())); /// } /// } /// } /// Accessor mRhsTreeAcc; /// }; /// /// // usage: /// tree::LeafManager<FloatTree> leafNodes(lhsTree); /// leafNodes.foreach(OffsetOp<FloatTree>(rhsTree)); /// /// // A functor that performs a min operation between different auxiliary buffers. /// template<typename LeafManagerType> /// struct MinOp /// { /// using BufferType = typename LeafManagerType::BufferType; /// /// MinOp(LeafManagerType& leafNodes): mLeafs(leafNodes) {} /// /// template <typename LeafNodeType> /// void operator()(LeafNodeType &leaf, size_t leafIndex) const /// { /// // get the first buffer /// BufferType& buffer = mLeafs.getBuffer(leafIndex, 1); /// /// // min ... /// } /// LeafManagerType& mLeafs; /// }; /// @endcode template<typename LeafOp> void foreach(const LeafOp& op, bool threaded = true, size_t grainSize=1) { LeafTransformer<LeafOp> transform(op); transform.run(this->leafRange(grainSize), threaded); } /// @brief Threaded method that applies a user-supplied functor /// to each leaf node in the LeafManager. Unlike foreach /// (defined above) this method performs a reduction on /// all the leaf nodes. /// /// @details The user-supplied functor needs to define the methods /// required for tbb::parallel_reduce. /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @warning The functor object is deep-copied to create TBB tasks. /// This allows the function to use non-thread-safe members /// like a ValueAccessor. /// /// @par Example: /// @code /// // Functor to count the number of negative (active) leaf values /// struct CountOp /// { /// CountOp() : mCounter(0) {} /// CountOp(const CountOp &other) : mCounter(other.mCounter) {} /// CountOp(const CountOp &other, tbb::split) : mCounter(0) {} /// template <typename LeafNodeType> /// void operator()(LeafNodeType &leaf, size_t) /// { /// typename LeafNodeType::ValueOnIter iter = leaf.beginValueOn(); /// for (; iter; ++iter) if (*iter < 0.0f) ++mCounter; /// } /// void join(const CountOp &other) {mCounter += other.mCounter;} /// size_t mCounter; /// }; /// /// // usage: /// tree::LeafManager<FloatTree> leafNodes(tree); /// MinValueOp min; /// leafNodes.reduce(min); /// std::cerr << "Number of negative active voxels = " << min.mCounter << std::endl; /// /// @endcode template<typename LeafOp> void reduce(LeafOp& op, bool threaded = true, size_t grainSize=1) { LeafReducer<LeafOp> transform(op); transform.run(this->leafRange(grainSize), threaded); } template<typename ArrayT> [[deprecated("Use Tree::getNodes()")]] void getNodes(ArrayT& array) { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to getNodes() must be a pointer array"); using LeafT = typename std::conditional<std::is_const< typename std::remove_pointer<T>::type>::value, const LeafType, LeafType>::type; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<T, LeafT*>::value) { array.resize(mLeafCount); for (size_t i=0; i<mLeafCount; ++i) array[i] = reinterpret_cast<T>(mLeafs[i]); } else { mTree->getNodes(array); } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ArrayT> [[deprecated("Use Tree::getNodes()")]] void getNodes(ArrayT& array) const { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to getNodes() must be a pointer array"); static_assert(std::is_const<typename std::remove_pointer<T>::type>::value, "argument to getNodes() must be an array of const node pointers"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<T, const LeafType*>::value) { array.resize(mLeafCount); for (size_t i=0; i<mLeafCount; ++i) array[i] = reinterpret_cast<T>(mLeafs[i]); } else { mTree->getNodes(array); } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief Generate a linear array of prefix sums of offsets into the /// active voxels in the leafs. So @a offsets[n]+m is the offset to the /// mth active voxel in the nth leaf node (useful for /// user-managed value buffers, e.g. in tools/LevelSetAdvect.h). /// @return The total number of active values in the leaf nodes /// @param offsets array of prefix sums of offsets to active voxels /// @param size on input, the size of @a offsets; on output, its new size /// @param grainSize optional grain size for threading /// @details If @a offsets is @c nullptr or @a size is smaller than the /// total number of active voxels (the return value) then @a offsets /// is reallocated and @a size equals the total number of active voxels. size_t getPrefixSum(size_t*& offsets, size_t& size, size_t grainSize=1) const { if (offsets == nullptr || size < mLeafCount) { delete [] offsets; offsets = new size_t[mLeafCount]; size = mLeafCount; } size_t prefix = 0; if ( grainSize > 0 ) { PrefixSum tmp(this->leafRange( grainSize ), offsets, prefix); } else {// serial for (size_t i=0; i<mLeafCount; ++i) { offsets[i] = prefix; prefix += mLeafs[i]->onVoxelCount(); } } return prefix; } //////////////////////////////////////////////////////////////////////////////////// // All methods below are for internal use only and should never be called directly /// Used internally by tbb::parallel_for() - never call it directly! void operator()(const RangeType& r) const { if (mTask) mTask(const_cast<LeafManager*>(this), r); else OPENVDB_THROW(ValueError, "task is undefined"); } private: void initLeafArray(bool serial = false) { // Build an array of all nodes that have leaf nodes as their immediate children using NodeChainT = typename NodeChain<RootNodeType, RootNodeType::LEVEL>::Type; using NonConstLeafParentT = typename NodeChainT::template Get</*Level=*/1>; using LeafParentT = typename CopyConstness<TreeType, NonConstLeafParentT>::Type; std::deque<LeafParentT*> leafParents; mTree->getNodes(leafParents); // Compute the leaf counts for each node std::vector<Index32> leafCounts; if (serial) { leafCounts.reserve(leafParents.size()); for (LeafParentT* leafParent : leafParents) { leafCounts.push_back(leafParent->childCount()); } } else { leafCounts.resize(leafParents.size()); tbb::parallel_for( // with typical node sizes and SSE enabled, there are only a handful // of instructions executed per-operation with a default grainsize // of 1, so increase to 64 to reduce parallel scheduling overhead tbb::blocked_range<size_t>(0, leafParents.size(), /*grainsize=*/64), [&](tbb::blocked_range<size_t>& range) { for (size_t i = range.begin(); i < range.end(); i++) { leafCounts[i] = leafParents[i]->childCount(); } } ); } // Turn leaf counts into a cumulative histogram and obtain total leaf count for (size_t i = 1; i < leafCounts.size(); i++) { leafCounts[i] += leafCounts[i-1]; } const size_t leafCount = leafCounts.empty() ? 0 : leafCounts.back(); // Allocate (or deallocate) the leaf pointer array if (leafCount != mLeafCount) { if (leafCount > 0) { mLeafPtrs.reset(new LeafType*[leafCount]); mLeafs = mLeafPtrs.get(); } else { mLeafPtrs.reset(); mLeafs = nullptr; } mLeafCount = leafCount; } if (mLeafCount == 0) return; // Populate the leaf node pointers if (serial) { LeafType** leafPtr = mLeafs; for (LeafParentT* leafParent : leafParents) { for (auto iter = leafParent->beginChildOn(); iter; ++iter) { *leafPtr++ = &iter.getValue(); } } } else { tbb::parallel_for( tbb::blocked_range<size_t>(0, leafParents.size()), [&](tbb::blocked_range<size_t>& range) { size_t i = range.begin(); LeafType** leafPtr = mLeafs; if (i > 0) leafPtr += leafCounts[i-1]; for ( ; i < range.end(); i++) { for (auto iter = leafParents[i]->beginChildOn(); iter; ++iter) { *leafPtr++ = &iter.getValue(); } } } ); } } void initAuxBuffers(bool serial) { const size_t auxBufferCount = mLeafCount * mAuxBuffersPerLeaf; if (auxBufferCount != mAuxBufferCount) { if (auxBufferCount > 0) { mAuxBufferPtrs.reset(new NonConstBufferType[auxBufferCount]); mAuxBuffers = mAuxBufferPtrs.get(); } else { mAuxBufferPtrs.reset(); mAuxBuffers = nullptr; } mAuxBufferCount = auxBufferCount; } this->syncAllBuffers(serial); } void cook(size_t grainsize) { if (grainsize>0) { tbb::parallel_for(this->getRange(grainsize), *this); } else { (*this)(this->getRange()); } } void doSwapLeafBuffer(const RangeType& r, size_t auxBufferIdx) { LeafManagerImpl<LeafManager>::doSwapLeafBuffer( r, auxBufferIdx, mLeafs, mAuxBuffers, mAuxBuffersPerLeaf); } void doSwapAuxBuffer(const RangeType& r, size_t auxBufferIdx1, size_t auxBufferIdx2) { for (size_t N = mAuxBuffersPerLeaf, n = N*r.begin(), m = N*r.end(); n != m; n+=N) { mAuxBuffers[n + auxBufferIdx1].swap(mAuxBuffers[n + auxBufferIdx2]); } } void doSyncAuxBuffer(const RangeType& r, size_t auxBufferIdx) { for (size_t n = r.begin(), m = r.end(), N = mAuxBuffersPerLeaf; n != m; ++n) { mAuxBuffers[n*N + auxBufferIdx] = mLeafs[n]->buffer(); } } void doSyncAllBuffers1(const RangeType& r) { for (size_t n = r.begin(), m = r.end(); n != m; ++n) { mAuxBuffers[n] = mLeafs[n]->buffer(); } } void doSyncAllBuffers2(const RangeType& r) { for (size_t n = r.begin(), m = r.end(); n != m; ++n) { const BufferType& leafBuffer = mLeafs[n]->buffer(); mAuxBuffers[2*n ] = leafBuffer; mAuxBuffers[2*n+1] = leafBuffer; } } void doSyncAllBuffersN(const RangeType& r) { for (size_t n = r.begin(), m = r.end(), N = mAuxBuffersPerLeaf; n != m; ++n) { const BufferType& leafBuffer = mLeafs[n]->buffer(); for (size_t i=n*N, j=i+N; i!=j; ++i) mAuxBuffers[i] = leafBuffer; } } /// @brief Private member class that applies a user-defined /// functor to perform parallel_for on all the leaf nodes. template<typename LeafOp> struct LeafTransformer { LeafTransformer(const LeafOp &leafOp) : mLeafOp(leafOp) { } void run(const LeafRange &range, bool threaded) const { threaded ? tbb::parallel_for(range, *this) : (*this)(range); } void operator()(const LeafRange &range) const { for (typename LeafRange::Iterator it = range.begin(); it; ++it) mLeafOp(*it, it.pos()); } const LeafOp mLeafOp; };// LeafTransformer /// @brief Private member class that applies a user-defined /// functor to perform parallel_reduce on all the leaf nodes. template<typename LeafOp> struct LeafReducer { LeafReducer(LeafOp &leafOp) : mLeafOp(&leafOp) { } LeafReducer(const LeafReducer &other, tbb::split) : mLeafOpPtr(std::make_unique<LeafOp>(*(other.mLeafOp), tbb::split())) , mLeafOp(mLeafOpPtr.get()) { } void run(const LeafRange& range, bool threaded) { threaded ? tbb::parallel_reduce(range, *this) : (*this)(range); } void operator()(const LeafRange& range) { LeafOp &op = *mLeafOp;//local registry for (typename LeafRange::Iterator it = range.begin(); it; ++it) op(*it, it.pos()); } void join(const LeafReducer& other) { mLeafOp->join(*(other.mLeafOp)); } std::unique_ptr<LeafOp> mLeafOpPtr; LeafOp *mLeafOp = nullptr; };// LeafReducer // Helper class to compute a prefix sum of offsets to active voxels struct PrefixSum { PrefixSum(const LeafRange& r, size_t* offsets, size_t& prefix) : mOffsets(offsets) { tbb::parallel_for( r, *this); for (size_t i=0, leafCount = r.size(); i<leafCount; ++i) { size_t tmp = offsets[i]; offsets[i] = prefix; prefix += tmp; } } inline void operator()(const LeafRange& r) const { for (typename LeafRange::Iterator i = r.begin(); i; ++i) { mOffsets[i.pos()] = i->onVoxelCount(); } } size_t* mOffsets; };// PrefixSum using FuncType = typename std::function<void (LeafManager*, const RangeType&)>; TreeType* mTree; size_t mLeafCount, mAuxBufferCount, mAuxBuffersPerLeaf; std::unique_ptr<LeafType*[]> mLeafPtrs; LeafType** mLeafs = nullptr;//array of LeafNode pointers std::unique_ptr<NonConstBufferType[]> mAuxBufferPtrs; NonConstBufferType* mAuxBuffers = nullptr;//array of auxiliary buffers FuncType mTask = nullptr; };//end of LeafManager class // Partial specializations of LeafManager methods for const trees template<typename TreeT> struct LeafManagerImpl<LeafManager<const TreeT> > { using ManagerT = LeafManager<const TreeT>; using RangeT = typename ManagerT::RangeType; using LeafT = typename ManagerT::LeafType; using BufT = typename ManagerT::BufferType; static inline void doSwapLeafBuffer(const RangeT&, size_t /*auxBufferIdx*/, LeafT**, BufT*, size_t /*bufsPerLeaf*/) { // Buffers can't be swapped into const trees. } }; } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_LEAFMANAGER_HAS_BEEN_INCLUDED
34,615
C
38.788506
99
0.600722
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/LeafNode.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TREE_LEAFNODE_HAS_BEEN_INCLUDED #define OPENVDB_TREE_LEAFNODE_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/util/NodeMasks.h> #include <openvdb/io/Compression.h> // for io::readData(), etc. #include "Iterator.h" #include "LeafBuffer.h" #include <algorithm> // for std::nth_element() #include <iostream> #include <memory> #include <sstream> #include <string> #include <type_traits> #include <vector> class TestLeaf; template<typename> class TestLeafIO; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { template<Index, typename> struct SameLeafConfig; // forward declaration /// @brief Templated block class to hold specific data types and a fixed /// number of values determined by Log2Dim. The actual coordinate /// dimension of the block is 2^Log2Dim, i.e. Log2Dim=3 corresponds to /// a LeafNode that spans a 8^3 block. template<typename T, Index Log2Dim> class LeafNode { public: using BuildType = T; using ValueType = T; using Buffer = LeafBuffer<ValueType, Log2Dim>; using LeafNodeType = LeafNode<ValueType, Log2Dim>; using NodeMaskType = util::NodeMask<Log2Dim>; using Ptr = SharedPtr<LeafNode>; static const Index LOG2DIM = Log2Dim, // needed by parent nodes TOTAL = Log2Dim, // needed by parent nodes DIM = 1 << TOTAL, // dimension along one coordinate direction NUM_VALUES = 1 << 3 * Log2Dim, NUM_VOXELS = NUM_VALUES, // total number of voxels represented by this node SIZE = NUM_VALUES, LEVEL = 0; // level 0 = leaf /// @brief ValueConverter<T>::Type is the type of a LeafNode having the same /// dimensions as this node but a different value type, T. template<typename OtherValueType> struct ValueConverter { using Type = LeafNode<OtherValueType, Log2Dim>; }; /// @brief SameConfiguration<OtherNodeType>::value is @c true if and only if /// OtherNodeType is the type of a LeafNode with the same dimensions as this node. template<typename OtherNodeType> struct SameConfiguration { static const bool value = SameLeafConfig<LOG2DIM, OtherNodeType>::value; }; /// Default constructor LeafNode(); /// @brief Constructor /// @param coords the grid index coordinates of a voxel /// @param value a value with which to fill the buffer /// @param active the active state to which to initialize all voxels explicit LeafNode(const Coord& coords, const ValueType& value = zeroVal<ValueType>(), bool active = false); /// @brief "Partial creation" constructor used during file input /// @param coords the grid index coordinates of a voxel /// @param value a value with which to fill the buffer /// @param active the active state to which to initialize all voxels /// @details This constructor does not allocate memory for voxel values. LeafNode(PartialCreate, const Coord& coords, const ValueType& value = zeroVal<ValueType>(), bool active = false); /// Deep copy constructor LeafNode(const LeafNode&); /// Deep assignment operator LeafNode& operator=(const LeafNode&) = default; /// Value conversion copy constructor template<typename OtherValueType> explicit LeafNode(const LeafNode<OtherValueType, Log2Dim>& other); /// Topology copy constructor template<typename OtherValueType> LeafNode(const LeafNode<OtherValueType, Log2Dim>& other, const ValueType& offValue, const ValueType& onValue, TopologyCopy); /// Topology copy constructor template<typename OtherValueType> LeafNode(const LeafNode<OtherValueType, Log2Dim>& other, const ValueType& background, TopologyCopy); /// Destructor. ~LeafNode(); // // Statistics // /// Return log2 of the dimension of this LeafNode, e.g. 3 if dimensions are 8^3 static Index log2dim() { return Log2Dim; } /// Return the number of voxels in each coordinate dimension. static Index dim() { return DIM; } /// Return the total number of voxels represented by this LeafNode static Index size() { return SIZE; } /// Return the total number of voxels represented by this LeafNode static Index numValues() { return SIZE; } /// Return the level of this node, which by definition is zero for LeafNodes static Index getLevel() { return LEVEL; } /// Append the Log2Dim of this LeafNode to the specified vector static void getNodeLog2Dims(std::vector<Index>& dims) { dims.push_back(Log2Dim); } /// Return the dimension of child nodes of this LeafNode, which is one for voxels. static Index getChildDim() { return 1; } /// Return the leaf count for this node, which is one. static Index32 leafCount() { return 1; } /// no-op void nodeCount(std::vector<Index32> &) const {} /// Return the non-leaf count for this node, which is zero. static Index32 nonLeafCount() { return 0; } /// Return the child count for this node, which is zero. static Index32 childCount() { return 0; } /// Return the number of voxels marked On. Index64 onVoxelCount() const { return mValueMask.countOn(); } /// Return the number of voxels marked Off. Index64 offVoxelCount() const { return mValueMask.countOff(); } Index64 onLeafVoxelCount() const { return onVoxelCount(); } Index64 offLeafVoxelCount() const { return offVoxelCount(); } static Index64 onTileCount() { return 0; } static Index64 offTileCount() { return 0; } /// Return @c true if this node has no active voxels. bool isEmpty() const { return mValueMask.isOff(); } /// Return @c true if this node contains only active voxels. bool isDense() const { return mValueMask.isOn(); } /// Return @c true if memory for this node's buffer has been allocated. bool isAllocated() const { return !mBuffer.isOutOfCore() && !mBuffer.empty(); } /// Allocate memory for this node's buffer if it has not already been allocated. bool allocate() { return mBuffer.allocate(); } /// Return the memory in bytes occupied by this node. Index64 memUsage() const; /// Expand the given bounding box so that it includes this leaf node's active voxels. /// If visitVoxels is false this LeafNode will be approximated as dense, i.e. with all /// voxels active. Else the individual active voxels are visited to produce a tight bbox. void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// @brief Return the bounding box of this node, i.e., the full index space /// spanned by this leaf node. CoordBBox getNodeBoundingBox() const { return CoordBBox::createCube(mOrigin, DIM); } /// Set the grid index coordinates of this node's local origin. void setOrigin(const Coord& origin) { mOrigin = origin; } //@{ /// Return the grid index coordinates of this node's local origin. const Coord& origin() const { return mOrigin; } void getOrigin(Coord& origin) const { origin = mOrigin; } void getOrigin(Int32& x, Int32& y, Int32& z) const { mOrigin.asXYZ(x, y, z); } //@} /// Return the linear table offset of the given global or local coordinates. static Index coordToOffset(const Coord& xyz); /// @brief Return the local coordinates for a linear table offset, /// where offset 0 has coordinates (0, 0, 0). static Coord offsetToLocalCoord(Index n); /// Return the global coordinates for a linear table offset. Coord offsetToGlobalCoord(Index n) const; /// Return a string representation of this node. std::string str() const; /// @brief Return @c true if the given node (which may have a different @c ValueType /// than this node) has the same active value topology as this node. template<typename OtherType, Index OtherLog2Dim> bool hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const; /// Check for buffer, state and origin equivalence. bool operator==(const LeafNode& other) const; bool operator!=(const LeafNode& other) const { return !(other == *this); } protected: using MaskOnIterator = typename NodeMaskType::OnIterator; using MaskOffIterator = typename NodeMaskType::OffIterator; using MaskDenseIterator = typename NodeMaskType::DenseIterator; // Type tags to disambiguate template instantiations struct ValueOn {}; struct ValueOff {}; struct ValueAll {}; struct ChildOn {}; struct ChildOff {}; struct ChildAll {}; template<typename MaskIterT, typename NodeT, typename ValueT, typename TagT> struct ValueIter: // Derives from SparseIteratorBase, but can also be used as a dense iterator, // if MaskIterT is a dense mask iterator type. public SparseIteratorBase< MaskIterT, ValueIter<MaskIterT, NodeT, ValueT, TagT>, NodeT, ValueT> { using BaseT = SparseIteratorBase<MaskIterT, ValueIter, NodeT, ValueT>; ValueIter() {} ValueIter(const MaskIterT& iter, NodeT* parent): BaseT(iter, parent) {} ValueT& getItem(Index pos) const { return this->parent().getValue(pos); } ValueT& getValue() const { return this->parent().getValue(this->pos()); } // Note: setItem() can't be called on const iterators. void setItem(Index pos, const ValueT& value) const { this->parent().setValueOnly(pos, value); } // Note: setValue() can't be called on const iterators. void setValue(const ValueT& value) const { this->parent().setValueOnly(this->pos(), value); } // Note: modifyItem() can't be called on const iterators. template<typename ModifyOp> void modifyItem(Index n, const ModifyOp& op) const { this->parent().modifyValue(n, op); } // Note: modifyValue() can't be called on const iterators. template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { this->parent().modifyValue(this->pos(), op); } }; /// Leaf nodes have no children, so their child iterators have no get/set accessors. template<typename MaskIterT, typename NodeT, typename TagT> struct ChildIter: public SparseIteratorBase<MaskIterT, ChildIter<MaskIterT, NodeT, TagT>, NodeT, ValueType> { ChildIter() {} ChildIter(const MaskIterT& iter, NodeT* parent): SparseIteratorBase< MaskIterT, ChildIter<MaskIterT, NodeT, TagT>, NodeT, ValueType>(iter, parent) {} }; template<typename NodeT, typename ValueT, typename TagT> struct DenseIter: public DenseIteratorBase< MaskDenseIterator, DenseIter<NodeT, ValueT, TagT>, NodeT, /*ChildT=*/void, ValueT> { using BaseT = DenseIteratorBase<MaskDenseIterator, DenseIter, NodeT, void, ValueT>; using NonConstValueT = typename BaseT::NonConstValueType; DenseIter() {} DenseIter(const MaskDenseIterator& iter, NodeT* parent): BaseT(iter, parent) {} bool getItem(Index pos, void*& child, NonConstValueT& value) const { value = this->parent().getValue(pos); child = nullptr; return false; // no child } // Note: setItem() can't be called on const iterators. //void setItem(Index pos, void* child) const {} // Note: unsetItem() can't be called on const iterators. void unsetItem(Index pos, const ValueT& value) const { this->parent().setValueOnly(pos, value); } }; public: using ValueOnIter = ValueIter<MaskOnIterator, LeafNode, const ValueType, ValueOn>; using ValueOnCIter = ValueIter<MaskOnIterator, const LeafNode, const ValueType, ValueOn>; using ValueOffIter = ValueIter<MaskOffIterator, LeafNode, const ValueType, ValueOff>; using ValueOffCIter = ValueIter<MaskOffIterator,const LeafNode,const ValueType,ValueOff>; using ValueAllIter = ValueIter<MaskDenseIterator, LeafNode, const ValueType, ValueAll>; using ValueAllCIter = ValueIter<MaskDenseIterator,const LeafNode,const ValueType,ValueAll>; using ChildOnIter = ChildIter<MaskOnIterator, LeafNode, ChildOn>; using ChildOnCIter = ChildIter<MaskOnIterator, const LeafNode, ChildOn>; using ChildOffIter = ChildIter<MaskOffIterator, LeafNode, ChildOff>; using ChildOffCIter = ChildIter<MaskOffIterator, const LeafNode, ChildOff>; using ChildAllIter = DenseIter<LeafNode, ValueType, ChildAll>; using ChildAllCIter = DenseIter<const LeafNode, const ValueType, ChildAll>; ValueOnCIter cbeginValueOn() const { return ValueOnCIter(mValueMask.beginOn(), this); } ValueOnCIter beginValueOn() const { return ValueOnCIter(mValueMask.beginOn(), this); } ValueOnIter beginValueOn() { return ValueOnIter(mValueMask.beginOn(), this); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(mValueMask.beginOff(), this); } ValueOffCIter beginValueOff() const { return ValueOffCIter(mValueMask.beginOff(), this); } ValueOffIter beginValueOff() { return ValueOffIter(mValueMask.beginOff(), this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(mValueMask.beginDense(), this); } ValueAllCIter beginValueAll() const { return ValueAllCIter(mValueMask.beginDense(), this); } ValueAllIter beginValueAll() { return ValueAllIter(mValueMask.beginDense(), this); } ValueOnCIter cendValueOn() const { return ValueOnCIter(mValueMask.endOn(), this); } ValueOnCIter endValueOn() const { return ValueOnCIter(mValueMask.endOn(), this); } ValueOnIter endValueOn() { return ValueOnIter(mValueMask.endOn(), this); } ValueOffCIter cendValueOff() const { return ValueOffCIter(mValueMask.endOff(), this); } ValueOffCIter endValueOff() const { return ValueOffCIter(mValueMask.endOff(), this); } ValueOffIter endValueOff() { return ValueOffIter(mValueMask.endOff(), this); } ValueAllCIter cendValueAll() const { return ValueAllCIter(mValueMask.endDense(), this); } ValueAllCIter endValueAll() const { return ValueAllCIter(mValueMask.endDense(), this); } ValueAllIter endValueAll() { return ValueAllIter(mValueMask.endDense(), this); } // Note that [c]beginChildOn() and [c]beginChildOff() actually return end iterators, // because leaf nodes have no children. ChildOnCIter cbeginChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnCIter beginChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnIter beginChildOn() { return ChildOnIter(mValueMask.endOn(), this); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffCIter beginChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffIter beginChildOff() { return ChildOffIter(mValueMask.endOff(), this); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(mValueMask.beginDense(), this); } ChildAllCIter beginChildAll() const { return ChildAllCIter(mValueMask.beginDense(), this); } ChildAllIter beginChildAll() { return ChildAllIter(mValueMask.beginDense(), this); } ChildOnCIter cendChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnCIter endChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnIter endChildOn() { return ChildOnIter(mValueMask.endOn(), this); } ChildOffCIter cendChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffCIter endChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffIter endChildOff() { return ChildOffIter(mValueMask.endOff(), this); } ChildAllCIter cendChildAll() const { return ChildAllCIter(mValueMask.endDense(), this); } ChildAllCIter endChildAll() const { return ChildAllCIter(mValueMask.endDense(), this); } ChildAllIter endChildAll() { return ChildAllIter(mValueMask.endDense(), this); } // // Buffer management // /// @brief Exchange this node's data buffer with the given data buffer /// without changing the active states of the values. void swap(Buffer& other) { mBuffer.swap(other); } const Buffer& buffer() const { return mBuffer; } Buffer& buffer() { return mBuffer; } // // I/O methods // /// @brief Read in just the topology. /// @param is the stream from which to read /// @param fromHalf if true, floating-point input values are assumed to be 16-bit void readTopology(std::istream& is, bool fromHalf = false); /// @brief Write out just the topology. /// @param os the stream to which to write /// @param toHalf if true, output floating-point values as 16-bit half floats void writeTopology(std::ostream& os, bool toHalf = false) const; /// @brief Read buffers from a stream. /// @param is the stream from which to read /// @param fromHalf if true, floating-point input values are assumed to be 16-bit void readBuffers(std::istream& is, bool fromHalf = false); /// @brief Read buffers that intersect the given bounding box. /// @param is the stream from which to read /// @param bbox an index-space bounding box /// @param fromHalf if true, floating-point input values are assumed to be 16-bit void readBuffers(std::istream& is, const CoordBBox& bbox, bool fromHalf = false); /// @brief Write buffers to a stream. /// @param os the stream to which to write /// @param toHalf if true, output floating-point values as 16-bit half floats void writeBuffers(std::ostream& os, bool toHalf = false) const; size_t streamingSize(bool toHalf = false) const; // // Accessor methods // /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const; /// Return the value of the voxel at the given linear offset. const ValueType& getValue(Index offset) const; /// @brief Return @c true if the voxel at the given coordinates is active. /// @param xyz the coordinates of the voxel to be probed /// @param[out] val the value of the voxel at the given coordinates bool probeValue(const Coord& xyz, ValueType& val) const; /// @brief Return @c true if the voxel at the given offset is active. /// @param offset the linear offset of the voxel to be probed /// @param[out] val the value of the voxel at the given coordinates bool probeValue(Index offset, ValueType& val) const; /// Return the level (i.e., 0) at which leaf node values reside. static Index getValueLevel(const Coord&) { return LEVEL; } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the active state of the voxel at the given offset but don't change its value. void setActiveState(Index offset, bool on) { assert(offset<SIZE); mValueMask.set(offset, on); } /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& val); /// Set the value of the voxel at the given offset but don't change its active state. void setValueOnly(Index offset, const ValueType& val); /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { mValueMask.setOff(LeafNode::coordToOffset(xyz)); } /// Mark the voxel at the given offset as inactive but don't change its value. void setValueOff(Index offset) { assert(offset < SIZE); mValueMask.setOff(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& val); /// Set the value of the voxel at the given offset and mark the voxel as inactive. void setValueOff(Index offset, const ValueType& val); /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { mValueMask.setOn(LeafNode::coordToOffset(xyz)); } /// Mark the voxel at the given offset as active but don't change its value. void setValueOn(Index offset) { assert(offset < SIZE); mValueMask.setOn(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, const ValueType& val) { this->setValueOn(LeafNode::coordToOffset(xyz), val); } /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& val) { this->setValueOn(xyz, val); } /// Set the value of the voxel at the given offset and mark the voxel as active. void setValueOn(Index offset, const ValueType& val) { mBuffer.setValue(offset, val); mValueMask.setOn(offset); } /// @brief Apply a functor to the value of the voxel at the given offset /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(Index offset, const ModifyOp& op) { mBuffer.loadValues(); if (!mBuffer.empty()) { // in-place modify value ValueType& val = const_cast<ValueType&>(mBuffer[offset]); op(val); mValueMask.setOn(offset); } } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { this->modifyValue(this->coordToOffset(xyz), op); } /// Apply a functor to the voxel at the given coordinates. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { mBuffer.loadValues(); if (!mBuffer.empty()) { const Index offset = this->coordToOffset(xyz); bool state = mValueMask.isOn(offset); // in-place modify value ValueType& val = const_cast<ValueType&>(mBuffer[offset]); op(val, state); mValueMask.set(offset, state); } } /// Mark all voxels as active but don't change their values. void setValuesOn() { mValueMask.setOn(); } /// Mark all voxels as inactive but don't change their values. void setValuesOff() { mValueMask.setOff(); } /// Return @c true if the voxel at the given coordinates is active. bool isValueOn(const Coord& xyz) const {return this->isValueOn(LeafNode::coordToOffset(xyz));} /// Return @c true if the voxel at the given offset is active. bool isValueOn(Index offset) const { return mValueMask.isOn(offset); } /// Return @c false since leaf nodes never contain tiles. static bool hasActiveTiles() { return false; } /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&, const ValueType& background); /// Set all voxels within an axis-aligned box to the specified value and active state. void fill(const CoordBBox& bbox, const ValueType&, bool active = true); /// Set all voxels within an axis-aligned box to the specified value and active state. void denseFill(const CoordBBox& bbox, const ValueType& value, bool active = true) { this->fill(bbox, value, active); } /// Set all voxels to the specified value but don't change their active states. void fill(const ValueType& value); /// Set all voxels to the specified value and active state. void fill(const ValueType& value, bool active); /// @brief Copy into a dense grid the values of the voxels that lie within /// a given bounding box. /// /// @param bbox inclusive bounding box of the voxels to be copied into the dense grid /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyToDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyToDense(const CoordBBox& bbox, DenseT& dense) const; /// @brief Copy from a dense grid into this node the values of the voxels /// that lie within a given bounding box. /// @details Only values that are different (by more than the given tolerance) /// from the background value will be active. Other values are inactive /// and truncated to the background value. /// /// @param bbox inclusive bounding box of the voxels to be copied into this node /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// @param background background value of the tree that this node belongs to /// @param tolerance tolerance within which a value equals the background value /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyFromDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyFromDense(const CoordBBox& bbox, const DenseT& dense, const ValueType& background, const ValueType& tolerance); /// @brief Return the value of the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename AccessorT> const ValueType& getValueAndCache(const Coord& xyz, AccessorT&) const { return this->getValue(xyz); } /// @brief Return @c true if the voxel at the given coordinates is active. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool isValueOnAndCache(const Coord& xyz, AccessorT&) const { return this->isValueOn(xyz); } /// @brief Change the value of the voxel at the given coordinates and mark it as active. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueAndCache(const Coord& xyz, const ValueType& val, AccessorT&) { this->setValueOn(xyz, val); } /// @brief Change the value of the voxel at the given coordinates /// but preserve its state. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOnlyAndCache(const Coord& xyz, const ValueType& val, AccessorT&) { this->setValueOnly(xyz, val); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValue(xyz, op); } /// Apply a functor to the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValueAndActiveState(xyz, op); } /// @brief Change the value of the voxel at the given coordinates and mark it as inactive. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT&) { this->setValueOff(xyz, value); } /// @brief Set the active state of the voxel at the given coordinates /// without changing its value. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT&) { this->setActiveState(xyz, on); } /// @brief Return @c true if the voxel at the given coordinates is active /// and return the voxel value in @a val. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool probeValueAndCache(const Coord& xyz, ValueType& val, AccessorT&) const { return this->probeValue(xyz, val); } /// @brief Return the value of the voxel at the given coordinates and return /// its active state and level (i.e., 0) in @a state and @a level. /// @note Used internally by ValueAccessor. template<typename AccessorT> const ValueType& getValue(const Coord& xyz, bool& state, int& level, AccessorT&) const { const Index offset = this->coordToOffset(xyz); state = mValueMask.isOn(offset); level = LEVEL; return mBuffer[offset]; } /// @brief Return the LEVEL (=0) at which leaf node values reside. /// @note Used internally by ValueAccessor (note last argument is a dummy). template<typename AccessorT> static Index getValueLevelAndCache(const Coord&, AccessorT&) { return LEVEL; } /// @brief Return a const reference to the first value in the buffer. /// @note Though it is potentially risky you can convert this /// to a non-const pointer by means of const_case<ValueType*>&. const ValueType& getFirstValue() const { return mBuffer[0]; } /// Return a const reference to the last value in the buffer. const ValueType& getLastValue() const { return mBuffer[SIZE - 1]; } /// @brief Replace inactive occurrences of @a oldBackground with @a newBackground, /// and inactive occurrences of @a -oldBackground with @a -newBackground. void resetBackground(const ValueType& oldBackground, const ValueType& newBackground); void negate(); /// @brief No-op /// @details This function exists only to enable template instantiation. void voxelizeActiveTiles(bool = true) {} template<MergePolicy Policy> void merge(const LeafNode&); template<MergePolicy Policy> void merge(const ValueType& tileValue, bool tileActive); template<MergePolicy Policy> void merge(const LeafNode& other, const ValueType& /*bg*/, const ValueType& /*otherBG*/); /// @brief Union this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active if either of the original voxels /// were active. /// /// @note This operation modifies only active states, not values. template<typename OtherType> void topologyUnion(const LeafNode<OtherType, Log2Dim>& other); /// @brief Intersect this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if both of the original voxels /// were active. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyIntersection. /// /// @note This operation modifies only active states, not /// values. Also note that this operation can result in all voxels /// being inactive so consider subsequnetly calling prune. template<typename OtherType> void topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const ValueType&); /// @brief Difference this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this LeafNode and inactive in the other LeafNode. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyDifference. /// /// @note This operation modifies only active states, not values. /// Also, because it can deactivate all of this node's voxels, /// consider subsequently calling prune. template<typename OtherType> void topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const ValueType&); template<typename CombineOp> void combine(const LeafNode& other, CombineOp& op); template<typename CombineOp> void combine(const ValueType& value, bool valueIsActive, CombineOp& op); template<typename CombineOp, typename OtherType /*= ValueType*/> void combine2(const LeafNode& other, const OtherType&, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(const ValueType&, const OtherNodeT& other, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp&); /// @brief Calls the templated functor BBoxOp with bounding box /// information. An additional level argument is provided to the /// callback. /// /// @note The bounding boxes are guarenteed to be non-overlapping. template<typename BBoxOp> void visitActiveBBox(BBoxOp&) const; template<typename VisitorOp> void visit(VisitorOp&); template<typename VisitorOp> void visit(VisitorOp&) const; template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&); template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&) const; template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false); template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false) const; //@{ /// This function exists only to enable template instantiation. void prune(const ValueType& /*tolerance*/ = zeroVal<ValueType>()) {} void addLeaf(LeafNode*) {} template<typename AccessorT> void addLeafAndCache(LeafNode*, AccessorT&) {} template<typename NodeT> NodeT* stealNode(const Coord&, const ValueType&, bool) { return nullptr; } template<typename NodeT> NodeT* probeNode(const Coord&) { return nullptr; } template<typename NodeT> const NodeT* probeConstNode(const Coord&) const { return nullptr; } template<typename ArrayT> void getNodes(ArrayT&) const {} template<typename ArrayT> void stealNodes(ArrayT&, const ValueType&, bool) {} //@} void addTile(Index level, const Coord&, const ValueType&, bool); void addTile(Index offset, const ValueType&, bool); template<typename AccessorT> void addTileAndCache(Index, const Coord&, const ValueType&, bool, AccessorT&); //@{ /// @brief Return a pointer to this node. LeafNode* touchLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* touchLeafAndCache(const Coord&, AccessorT&) { return this; } template<typename NodeT, typename AccessorT> NodeT* probeNodeAndCache(const Coord&, AccessorT&) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } LeafNode* probeLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* probeLeafAndCache(const Coord&, AccessorT&) { return this; } //@} //@{ /// @brief Return a @const pointer to this node. const LeafNode* probeConstLeaf(const Coord&) const { return this; } template<typename AccessorT> const LeafNode* probeConstLeafAndCache(const Coord&, AccessorT&) const { return this; } template<typename AccessorT> const LeafNode* probeLeafAndCache(const Coord&, AccessorT&) const { return this; } const LeafNode* probeLeaf(const Coord&) const { return this; } template<typename NodeT, typename AccessorT> const NodeT* probeConstNodeAndCache(const Coord&, AccessorT&) const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<const NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} /// Return @c true if all of this node's values have the same active state /// and are in the range this->getFirstValue() +/- @a tolerance. /// /// /// @param firstValue Is updated with the first value of this leaf node. /// @param state Is updated with the state of all values IF method /// returns @c true. Else the value is undefined! /// @param tolerance The tolerance used to determine if values are /// approximatly equal to the for value. bool isConstant(ValueType& firstValue, bool& state, const ValueType& tolerance = zeroVal<ValueType>()) const; /// Return @c true if all of this node's values have the same active state /// and the range (@a maxValue - @a minValue) < @a tolerance. /// /// @param minValue Is updated with the minimum of all values IF method /// returns @c true. Else the value is undefined! /// @param maxValue Is updated with the maximum of all values IF method /// returns @c true. Else the value is undefined! /// @param state Is updated with the state of all values IF method /// returns @c true. Else the value is undefined! /// @param tolerance The tolerance used to determine if values are /// approximatly constant. bool isConstant(ValueType& minValue, ValueType& maxValue, bool& state, const ValueType& tolerance = zeroVal<ValueType>()) const; /// @brief Computes the median value of all the active AND inactive voxels in this node. /// @return The median value of all values in this node. /// /// @param tmp Optional temporary storage that can hold at least NUM_VALUES values /// Use of this temporary storage can improve performance /// when this method is called multiple times. /// /// @note If tmp = this->buffer().data() then the median /// value is computed very efficiently (in place) but /// the voxel values in this node are re-shuffeled! /// /// @warning If tmp != nullptr then it is the responsibility of /// the client code that it points to enough memory to /// hold NUM_VALUES elements of type ValueType. ValueType medianAll(ValueType *tmp = nullptr) const; /// @brief Computes the median value of all the active voxels in this node. /// @return The number of active voxels. /// /// @param value If the return value is non zero @a value is updated /// with the median value. /// /// @param tmp Optional temporary storage that can hold at least /// as many values as there are active voxels in this node. /// Use of this temporary storage can improve performance /// when this method is called multiple times. /// /// @warning If tmp != nullptr then it is the responsibility of /// the client code that it points to enough memory to /// hold the number of active voxels of type ValueType. Index medianOn(ValueType &value, ValueType *tmp = nullptr) const; /// @brief Computes the median value of all the inactive voxels in this node. /// @return The number of inactive voxels. /// /// @param value If the return value is non zero @a value is updated /// with the median value. /// /// @param tmp Optional temporary storage that can hold at least /// as many values as there are inactive voxels in this node. /// Use of this temporary storage can improve performance /// when this method is called multiple times. /// /// @warning If tmp != nullptr then it is the responsibility of /// the client code that it points to enough memory to /// hold the number of inactive voxels of type ValueType. Index medianOff(ValueType &value, ValueType *tmp = nullptr) const; /// Return @c true if all of this node's values are inactive. bool isInactive() const { return mValueMask.isOff(); } protected: friend class ::TestLeaf; template<typename> friend class ::TestLeafIO; // During topology-only construction, access is needed // to protected/private members of other template instances. template<typename, Index> friend class LeafNode; friend struct ValueIter<MaskOnIterator, LeafNode, ValueType, ValueOn>; friend struct ValueIter<MaskOffIterator, LeafNode, ValueType, ValueOff>; friend struct ValueIter<MaskDenseIterator, LeafNode, ValueType, ValueAll>; friend struct ValueIter<MaskOnIterator, const LeafNode, ValueType, ValueOn>; friend struct ValueIter<MaskOffIterator, const LeafNode, ValueType, ValueOff>; friend struct ValueIter<MaskDenseIterator, const LeafNode, ValueType, ValueAll>; // Allow iterators to call mask accessor methods (see below). /// @todo Make mask accessors public? friend class IteratorBase<MaskOnIterator, LeafNode>; friend class IteratorBase<MaskOffIterator, LeafNode>; friend class IteratorBase<MaskDenseIterator, LeafNode>; // Mask accessors public: bool isValueMaskOn(Index n) const { return mValueMask.isOn(n); } bool isValueMaskOn() const { return mValueMask.isOn(); } bool isValueMaskOff(Index n) const { return mValueMask.isOff(n); } bool isValueMaskOff() const { return mValueMask.isOff(); } const NodeMaskType& getValueMask() const { return mValueMask; } NodeMaskType& getValueMask() { return mValueMask; } const NodeMaskType& valueMask() const { return mValueMask; } void setValueMask(const NodeMaskType& mask) { mValueMask = mask; } bool isChildMaskOn(Index) const { return false; } // leaf nodes have no children bool isChildMaskOff(Index) const { return true; } bool isChildMaskOff() const { return true; } protected: void setValueMask(Index n, bool on) { mValueMask.set(n, on); } void setValueMaskOn(Index n) { mValueMask.setOn(n); } void setValueMaskOff(Index n) { mValueMask.setOff(n); } inline void skipCompressedValues(bool seekable, std::istream&, bool fromHalf); /// Compute the origin of the leaf node that contains the voxel with the given coordinates. static void evalNodeOrigin(Coord& xyz) { xyz &= ~(DIM - 1); } template<typename NodeT, typename VisitorOp, typename ChildAllIterT> static inline void doVisit(NodeT&, VisitorOp&); template<typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp&); template<typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2(NodeT& self, OtherChildAllIterT&, VisitorOp&, bool otherIsLHS); private: /// Buffer containing the actual data values Buffer mBuffer; /// Bitmask that determines which voxels are active NodeMaskType mValueMask; /// Global grid index coordinates (x,y,z) of the local origin of this node Coord mOrigin; }; // end of LeafNode class //////////////////////////////////////// //@{ /// Helper metafunction used to implement LeafNode::SameConfiguration /// (which, as an inner class, can't be independently specialized) template<Index Dim1, typename NodeT2> struct SameLeafConfig { static const bool value = false; }; template<Index Dim1, typename T2> struct SameLeafConfig<Dim1, LeafNode<T2, Dim1> > { static const bool value = true; }; //@} //////////////////////////////////////// template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::LeafNode(): mValueMask(),//default is off! mOrigin(0, 0, 0) { } template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::LeafNode(const Coord& xyz, const ValueType& val, bool active): mBuffer(val), mValueMask(active), mOrigin(xyz & (~(DIM - 1))) { } template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::LeafNode(PartialCreate, const Coord& xyz, const ValueType& val, bool active): mBuffer(PartialCreate(), val), mValueMask(active), mOrigin(xyz & (~(DIM - 1))) { } template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::LeafNode(const LeafNode& other): mBuffer(other.mBuffer), mValueMask(other.valueMask()), mOrigin(other.mOrigin) { } // Copy-construct from a leaf node with the same configuration but a different ValueType. template<typename T, Index Log2Dim> template<typename OtherValueType> inline LeafNode<T, Log2Dim>::LeafNode(const LeafNode<OtherValueType, Log2Dim>& other): mValueMask(other.valueMask()), mOrigin(other.mOrigin) { struct Local { /// @todo Consider using a value conversion functor passed as an argument instead. static inline ValueType convertValue(const OtherValueType& val) { return ValueType(val); } }; for (Index i = 0; i < SIZE; ++i) { mBuffer[i] = Local::convertValue(other.mBuffer[i]); } } template<typename T, Index Log2Dim> template<typename OtherValueType> inline LeafNode<T, Log2Dim>::LeafNode(const LeafNode<OtherValueType, Log2Dim>& other, const ValueType& background, TopologyCopy): mBuffer(background), mValueMask(other.valueMask()), mOrigin(other.mOrigin) { } template<typename T, Index Log2Dim> template<typename OtherValueType> inline LeafNode<T, Log2Dim>::LeafNode(const LeafNode<OtherValueType, Log2Dim>& other, const ValueType& offValue, const ValueType& onValue, TopologyCopy): mValueMask(other.valueMask()), mOrigin(other.mOrigin) { for (Index i = 0; i < SIZE; ++i) { mBuffer[i] = (mValueMask.isOn(i) ? onValue : offValue); } } template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::~LeafNode() { } template<typename T, Index Log2Dim> inline std::string LeafNode<T, Log2Dim>::str() const { std::ostringstream ostr; ostr << "LeafNode @" << mOrigin << ": " << mBuffer; return ostr.str(); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline Index LeafNode<T, Log2Dim>::coordToOffset(const Coord& xyz) { assert ((xyz[0] & (DIM-1u)) < DIM && (xyz[1] & (DIM-1u)) < DIM && (xyz[2] & (DIM-1u)) < DIM); return ((xyz[0] & (DIM-1u)) << 2*Log2Dim) + ((xyz[1] & (DIM-1u)) << Log2Dim) + (xyz[2] & (DIM-1u)); } template<typename T, Index Log2Dim> inline Coord LeafNode<T, Log2Dim>::offsetToLocalCoord(Index n) { assert(n<(1<< 3*Log2Dim)); Coord xyz; xyz.setX(n >> 2*Log2Dim); n &= ((1<<2*Log2Dim)-1); xyz.setY(n >> Log2Dim); xyz.setZ(n & ((1<<Log2Dim)-1)); return xyz; } template<typename T, Index Log2Dim> inline Coord LeafNode<T, Log2Dim>::offsetToGlobalCoord(Index n) const { return (this->offsetToLocalCoord(n) + this->origin()); } //////////////////////////////////////// template<typename ValueT, Index Log2Dim> inline const ValueT& LeafNode<ValueT, Log2Dim>::getValue(const Coord& xyz) const { return this->getValue(LeafNode::coordToOffset(xyz)); } template<typename ValueT, Index Log2Dim> inline const ValueT& LeafNode<ValueT, Log2Dim>::getValue(Index offset) const { assert(offset < SIZE); return mBuffer[offset]; } template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::probeValue(const Coord& xyz, ValueType& val) const { return this->probeValue(LeafNode::coordToOffset(xyz), val); } template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::probeValue(Index offset, ValueType& val) const { assert(offset < SIZE); val = mBuffer[offset]; return mValueMask.isOn(offset); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setValueOff(const Coord& xyz, const ValueType& val) { this->setValueOff(LeafNode::coordToOffset(xyz), val); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setValueOff(Index offset, const ValueType& val) { assert(offset < SIZE); mBuffer.setValue(offset, val); mValueMask.setOff(offset); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setActiveState(const Coord& xyz, bool on) { mValueMask.set(this->coordToOffset(xyz), on); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setValueOnly(const Coord& xyz, const ValueType& val) { this->setValueOnly(LeafNode::coordToOffset(xyz), val); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setValueOnly(Index offset, const ValueType& val) { assert(offset<SIZE); mBuffer.setValue(offset, val); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::clip(const CoordBBox& clipBBox, const T& background) { CoordBBox nodeBBox = this->getNodeBoundingBox(); if (!clipBBox.hasOverlap(nodeBBox)) { // This node lies completely outside the clipping region. Fill it with the background. this->fill(background, /*active=*/false); } else if (clipBBox.isInside(nodeBBox)) { // This node lies completely inside the clipping region. Leave it intact. return; } // This node isn't completely contained inside the clipping region. // Set any voxels that lie outside the region to the background value. // Construct a boolean mask that is on inside the clipping region and off outside it. NodeMaskType mask; nodeBBox.intersect(clipBBox); Coord xyz; int &x = xyz.x(), &y = xyz.y(), &z = xyz.z(); for (x = nodeBBox.min().x(); x <= nodeBBox.max().x(); ++x) { for (y = nodeBBox.min().y(); y <= nodeBBox.max().y(); ++y) { for (z = nodeBBox.min().z(); z <= nodeBBox.max().z(); ++z) { mask.setOn(static_cast<Index32>(this->coordToOffset(xyz))); } } } // Set voxels that lie in the inactive region of the mask (i.e., outside // the clipping region) to the background value. for (MaskOffIterator maskIter = mask.beginOff(); maskIter; ++maskIter) { this->setValueOff(maskIter.pos(), background); } } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::fill(const CoordBBox& bbox, const ValueType& value, bool active) { if (!this->allocate()) return; auto clippedBBox = this->getNodeBoundingBox(); clippedBBox.intersect(bbox); if (!clippedBBox) return; for (Int32 x = clippedBBox.min().x(); x <= clippedBBox.max().x(); ++x) { const Index offsetX = (x & (DIM-1u)) << 2*Log2Dim; for (Int32 y = clippedBBox.min().y(); y <= clippedBBox.max().y(); ++y) { const Index offsetXY = offsetX + ((y & (DIM-1u)) << Log2Dim); for (Int32 z = clippedBBox.min().z(); z <= clippedBBox.max().z(); ++z) { const Index offset = offsetXY + (z & (DIM-1u)); mBuffer[offset] = value; mValueMask.set(offset, active); } } } } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::fill(const ValueType& value) { mBuffer.fill(value); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::fill(const ValueType& value, bool active) { mBuffer.fill(value); mValueMask.set(active); } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename DenseT> inline void LeafNode<T, Log2Dim>::copyToDense(const CoordBBox& bbox, DenseT& dense) const { mBuffer.loadValues(); using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); DenseValueType* t0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // target array const T* s0 = &mBuffer[bbox.min()[2] & (DIM-1u)]; // source array for (Int32 x = bbox.min()[0], ex = bbox.max()[0] + 1; x < ex; ++x) { DenseValueType* t1 = t0 + xStride * (x - min[0]); const T* s1 = s0 + ((x & (DIM-1u)) << 2*Log2Dim); for (Int32 y = bbox.min()[1], ey = bbox.max()[1] + 1; y < ey; ++y) { DenseValueType* t2 = t1 + yStride * (y - min[1]); const T* s2 = s1 + ((y & (DIM-1u)) << Log2Dim); for (Int32 z = bbox.min()[2], ez = bbox.max()[2] + 1; z < ez; ++z, t2 += zStride) { *t2 = DenseValueType(*s2++); } } } } template<typename T, Index Log2Dim> template<typename DenseT> inline void LeafNode<T, Log2Dim>::copyFromDense(const CoordBBox& bbox, const DenseT& dense, const ValueType& background, const ValueType& tolerance) { if (!this->allocate()) return; using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); const DenseValueType* s0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // source const Int32 n0 = bbox.min()[2] & (DIM-1u); for (Int32 x = bbox.min()[0], ex = bbox.max()[0]+1; x < ex; ++x) { const DenseValueType* s1 = s0 + xStride * (x - min[0]); const Int32 n1 = n0 + ((x & (DIM-1u)) << 2*LOG2DIM); for (Int32 y = bbox.min()[1], ey = bbox.max()[1]+1; y < ey; ++y) { const DenseValueType* s2 = s1 + yStride * (y - min[1]); Int32 n2 = n1 + ((y & (DIM-1u)) << LOG2DIM); for (Int32 z = bbox.min()[2], ez = bbox.max()[2]+1; z < ez; ++z, ++n2, s2 += zStride) { if (math::isApproxEqual(background, ValueType(*s2), tolerance)) { mValueMask.setOff(n2); mBuffer[n2] = background; } else { mValueMask.setOn(n2); mBuffer[n2] = ValueType(*s2); } } } } } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::readTopology(std::istream& is, bool /*fromHalf*/) { mValueMask.load(is); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::writeTopology(std::ostream& os, bool /*toHalf*/) const { mValueMask.save(os); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T,Log2Dim>::skipCompressedValues(bool seekable, std::istream& is, bool fromHalf) { if (seekable) { // Seek over voxel values. io::readCompressedValues<ValueType, NodeMaskType>( is, nullptr, SIZE, mValueMask, fromHalf); } else { // Read and discard voxel values. Buffer temp; io::readCompressedValues(is, temp.mData, SIZE, mValueMask, fromHalf); } } template<typename T, Index Log2Dim> inline void LeafNode<T,Log2Dim>::readBuffers(std::istream& is, bool fromHalf) { this->readBuffers(is, CoordBBox::inf(), fromHalf); } template<typename T, Index Log2Dim> inline void LeafNode<T,Log2Dim>::readBuffers(std::istream& is, const CoordBBox& clipBBox, bool fromHalf) { SharedPtr<io::StreamMetadata> meta = io::getStreamMetadataPtr(is); const bool seekable = meta && meta->seekable(); std::streamoff maskpos = is.tellg(); if (seekable) { // Seek over the value mask. mValueMask.seek(is); } else { // Read in the value mask. mValueMask.load(is); } int8_t numBuffers = 1; if (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_NODE_MASK_COMPRESSION) { // Read in the origin. is.read(reinterpret_cast<char*>(&mOrigin), sizeof(Coord::ValueType) * 3); // Read in the number of buffers, which should now always be one. is.read(reinterpret_cast<char*>(&numBuffers), sizeof(int8_t)); } CoordBBox nodeBBox = this->getNodeBoundingBox(); if (!clipBBox.hasOverlap(nodeBBox)) { // This node lies completely outside the clipping region. skipCompressedValues(seekable, is, fromHalf); mValueMask.setOff(); mBuffer.setOutOfCore(false); } else { // If this node lies completely inside the clipping region and it is being read // from a memory-mapped file, delay loading of its buffer until the buffer // is actually accessed. (If this node requires clipping, its buffer // must be accessed and therefore must be loaded.) io::MappedFile::Ptr mappedFile = io::getMappedFilePtr(is); const bool delayLoad = ((mappedFile.get() != nullptr) && clipBBox.isInside(nodeBBox)); if (delayLoad) { mBuffer.setOutOfCore(true); mBuffer.mFileInfo = new typename Buffer::FileInfo; mBuffer.mFileInfo->meta = meta; mBuffer.mFileInfo->bufpos = is.tellg(); mBuffer.mFileInfo->mapping = mappedFile; // Save the offset to the value mask, because the in-memory copy // might change before the value buffer gets read. mBuffer.mFileInfo->maskpos = maskpos; // Skip over voxel values. skipCompressedValues(seekable, is, fromHalf); } else { mBuffer.allocate(); io::readCompressedValues(is, mBuffer.mData, SIZE, mValueMask, fromHalf); mBuffer.setOutOfCore(false); // Get this tree's background value. T background = zeroVal<T>(); if (const void* bgPtr = io::getGridBackgroundValuePtr(is)) { background = *static_cast<const T*>(bgPtr); } this->clip(clipBBox, background); } } if (numBuffers > 1) { // Read in and discard auxiliary buffers that were created with earlier // versions of the library. (Auxiliary buffers are not mask compressed.) const bool zipped = io::getDataCompression(is) & io::COMPRESS_ZIP; Buffer temp; for (int i = 1; i < numBuffers; ++i) { if (fromHalf) { io::HalfReader<io::RealToHalf<T>::isReal, T>::read(is, temp.mData, SIZE, zipped); } else { io::readData<T>(is, temp.mData, SIZE, zipped); } } } // increment the leaf number if (meta) meta->setLeaf(meta->leaf() + 1); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::writeBuffers(std::ostream& os, bool toHalf) const { // Write out the value mask. mValueMask.save(os); mBuffer.loadValues(); io::writeCompressedValues(os, mBuffer.mData, SIZE, mValueMask, /*childMask=*/NodeMaskType(), toHalf); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::operator==(const LeafNode& other) const { return mOrigin == other.mOrigin && mValueMask == other.valueMask() && mBuffer == other.mBuffer; } template<typename T, Index Log2Dim> inline Index64 LeafNode<T, Log2Dim>::memUsage() const { // Use sizeof(*this) to capture alignment-related padding // (but note that sizeof(*this) includes sizeof(mBuffer)). return sizeof(*this) + mBuffer.memUsage() - sizeof(mBuffer); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { CoordBBox this_bbox = this->getNodeBoundingBox(); if (bbox.isInside(this_bbox)) return;//this LeafNode is already enclosed in the bbox if (ValueOnCIter iter = this->cbeginValueOn()) {//any active values? if (visitVoxels) {//use voxel granularity? this_bbox.reset(); for(; iter; ++iter) this_bbox.expand(this->offsetToLocalCoord(iter.pos())); this_bbox.translate(this->origin()); } bbox.expand(this_bbox); } } template<typename T, Index Log2Dim> template<typename OtherType, Index OtherLog2Dim> inline bool LeafNode<T, Log2Dim>::hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const { assert(other); return (Log2Dim == OtherLog2Dim && mValueMask == other->getValueMask()); } template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::isConstant(ValueType& firstValue, bool& state, const ValueType& tolerance) const { if (!mValueMask.isConstant(state)) return false;// early termination firstValue = mBuffer[0]; for (Index i = 1; i < SIZE; ++i) { if ( !math::isApproxEqual(mBuffer[i], firstValue, tolerance) ) return false;// early termination } return true; } template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::isConstant(ValueType& minValue, ValueType& maxValue, bool& state, const ValueType& tolerance) const { if (!mValueMask.isConstant(state)) return false;// early termination minValue = maxValue = mBuffer[0]; for (Index i = 1; i < SIZE; ++i) { const T& v = mBuffer[i]; if (v < minValue) { if ((maxValue - v) > tolerance) return false;// early termination minValue = v; } else if (v > maxValue) { if ((v - minValue) > tolerance) return false;// early termination maxValue = v; } } return true; } template<typename T, Index Log2Dim> inline T LeafNode<T, Log2Dim>::medianAll(T *tmp) const { std::unique_ptr<T[]> data(nullptr); if (tmp == nullptr) {//allocate temporary storage data.reset(new T[NUM_VALUES]); tmp = data.get(); } if (tmp != mBuffer.data()) { const T* src = mBuffer.data(); for (T* dst = tmp; dst-tmp < NUM_VALUES;) *dst++ = *src++; } static const size_t midpoint = (NUM_VALUES - 1) >> 1; std::nth_element(tmp, tmp + midpoint, tmp + NUM_VALUES); return tmp[midpoint]; } template<typename T, Index Log2Dim> inline Index LeafNode<T, Log2Dim>::medianOn(T &value, T *tmp) const { const Index count = mValueMask.countOn(); if (count == NUM_VALUES) {//special case: all voxels are active value = this->medianAll(tmp); return NUM_VALUES; } else if (count == 0) { return 0; } std::unique_ptr<T[]> data(nullptr); if (tmp == nullptr) {//allocate temporary storage data.reset(new T[count]);// 0 < count < NUM_VALUES tmp = data.get(); } for (auto iter=this->cbeginValueOn(); iter; ++iter) *tmp++ = *iter; T *begin = tmp - count; const size_t midpoint = (count - 1) >> 1; std::nth_element(begin, begin + midpoint, tmp); value = begin[midpoint]; return count; } template<typename T, Index Log2Dim> inline Index LeafNode<T, Log2Dim>::medianOff(T &value, T *tmp) const { const Index count = mValueMask.countOff(); if (count == NUM_VALUES) {//special case: all voxels are inactive value = this->medianAll(tmp); return NUM_VALUES; } else if (count == 0) { return 0; } std::unique_ptr<T[]> data(nullptr); if (tmp == nullptr) {//allocate temporary storage data.reset(new T[count]);// 0 < count < NUM_VALUES tmp = data.get(); } for (auto iter=this->cbeginValueOff(); iter; ++iter) *tmp++ = *iter; T *begin = tmp - count; const size_t midpoint = (count - 1) >> 1; std::nth_element(begin, begin + midpoint, tmp); value = begin[midpoint]; return count; } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::addTile(Index /*level*/, const Coord& xyz, const ValueType& val, bool active) { this->addTile(this->coordToOffset(xyz), val, active); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::addTile(Index offset, const ValueType& val, bool active) { assert(offset < SIZE); setValueOnly(offset, val); setActiveState(offset, active); } template<typename T, Index Log2Dim> template<typename AccessorT> inline void LeafNode<T, Log2Dim>::addTileAndCache(Index level, const Coord& xyz, const ValueType& val, bool active, AccessorT&) { this->addTile(level, xyz, val, active); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::resetBackground(const ValueType& oldBackground, const ValueType& newBackground) { if (!this->allocate()) return; typename NodeMaskType::OffIterator iter; // For all inactive values... for (iter = this->mValueMask.beginOff(); iter; ++iter) { ValueType &inactiveValue = mBuffer[iter.pos()]; if (math::isApproxEqual(inactiveValue, oldBackground)) { inactiveValue = newBackground; } else if (math::isApproxEqual(inactiveValue, math::negative(oldBackground))) { inactiveValue = math::negative(newBackground); } } } template<typename T, Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<T, Log2Dim>::merge(const LeafNode& other) { if (!this->allocate()) return; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy == MERGE_NODES) return; typename NodeMaskType::OnIterator iter = other.valueMask().beginOn(); for (; iter; ++iter) { const Index n = iter.pos(); if (mValueMask.isOff(n)) { mBuffer[n] = other.mBuffer[n]; mValueMask.setOn(n); } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename T, Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<T, Log2Dim>::merge(const LeafNode& other, const ValueType& /*bg*/, const ValueType& /*otherBG*/) { this->template merge<Policy>(other); } template<typename T, Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<T, Log2Dim>::merge(const ValueType& tileValue, bool tileActive) { if (!this->allocate()) return; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy != MERGE_ACTIVE_STATES_AND_NODES) return; if (!tileActive) return; // Replace all inactive values with the active tile value. for (typename NodeMaskType::OffIterator iter = mValueMask.beginOff(); iter; ++iter) { const Index n = iter.pos(); mBuffer[n] = tileValue; mValueMask.setOn(n); } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename T, Index Log2Dim> template<typename OtherType> inline void LeafNode<T, Log2Dim>::topologyUnion(const LeafNode<OtherType, Log2Dim>& other) { mValueMask |= other.valueMask(); } template<typename T, Index Log2Dim> template<typename OtherType> inline void LeafNode<T, Log2Dim>::topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const ValueType&) { mValueMask &= other.valueMask(); } template<typename T, Index Log2Dim> template<typename OtherType> inline void LeafNode<T, Log2Dim>::topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const ValueType&) { mValueMask &= !other.valueMask(); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::negate() { if (!this->allocate()) return; for (Index i = 0; i < SIZE; ++i) { mBuffer[i] = -mBuffer[i]; } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename CombineOp> inline void LeafNode<T, Log2Dim>::combine(const LeafNode& other, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T> args; for (Index i = 0; i < SIZE; ++i) { op(args.setARef(mBuffer[i]) .setAIsActive(mValueMask.isOn(i)) .setBRef(other.mBuffer[i]) .setBIsActive(other.valueMask().isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } template<typename T, Index Log2Dim> template<typename CombineOp> inline void LeafNode<T, Log2Dim>::combine(const ValueType& value, bool valueIsActive, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { op(args.setARef(mBuffer[i]) .setAIsActive(mValueMask.isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename CombineOp, typename OtherType> inline void LeafNode<T, Log2Dim>::combine2(const LeafNode& other, const OtherType& value, bool valueIsActive, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T, OtherType> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { op(args.setARef(other.mBuffer[i]) .setAIsActive(other.valueMask().isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } template<typename T, Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<T, Log2Dim>::combine2(const ValueType& value, const OtherNodeT& other, bool valueIsActive, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T, typename OtherNodeT::ValueType> args; args.setARef(value).setAIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { op(args.setBRef(other.mBuffer[i]) .setBIsActive(other.valueMask().isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } template<typename T, Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<T, Log2Dim>::combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T, typename OtherNodeT::ValueType> args; for (Index i = 0; i < SIZE; ++i) { mValueMask.set(i, b0.valueMask().isOn(i) || b1.valueMask().isOn(i)); op(args.setARef(b0.mBuffer[i]) .setAIsActive(b0.valueMask().isOn(i)) .setBRef(b1.mBuffer[i]) .setBIsActive(b1.valueMask().isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename BBoxOp> inline void LeafNode<T, Log2Dim>::visitActiveBBox(BBoxOp& op) const { if (op.template descent<LEVEL>()) { for (ValueOnCIter i=this->cbeginValueOn(); i; ++i) { op.template operator()<LEVEL>(CoordBBox::createCube(i.getCoord(), 1)); } } else { op.template operator()<LEVEL>(this->getNodeBoundingBox()); } } template<typename T, Index Log2Dim> template<typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit(VisitorOp& op) { doVisit<LeafNode, VisitorOp, ChildAllIter>(*this, op); } template<typename T, Index Log2Dim> template<typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit(VisitorOp& op) const { doVisit<const LeafNode, VisitorOp, ChildAllCIter>(*this, op); } template<typename T, Index Log2Dim> template<typename NodeT, typename VisitorOp, typename ChildAllIterT> inline void LeafNode<T, Log2Dim>::doVisit(NodeT& self, VisitorOp& op) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter); } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) { doVisit2Node<LeafNode, OtherLeafNodeType, VisitorOp, ChildAllIter, typename OtherLeafNodeType::ChildAllIter>(*this, other, op); } template<typename T, Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) const { doVisit2Node<const LeafNode, OtherLeafNodeType, VisitorOp, ChildAllCIter, typename OtherLeafNodeType::ChildAllCIter>(*this, other, op); } template<typename T, Index Log2Dim> template< typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<T, Log2Dim>::doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp& op) { // Allow the two nodes to have different ValueTypes, but not different dimensions. static_assert(OtherNodeT::SIZE == NodeT::SIZE, "can't visit nodes of different sizes simultaneously"); static_assert(OtherNodeT::LEVEL == NodeT::LEVEL, "can't visit nodes at different tree levels simultaneously"); ChildAllIterT iter = self.beginChildAll(); OtherChildAllIterT otherIter = other.beginChildAll(); for ( ; iter && otherIter; ++iter, ++otherIter) { op(iter, otherIter); } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) { doVisit2<LeafNode, VisitorOp, ChildAllIter, IterT>( *this, otherIter, op, otherIsLHS); } template<typename T, Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) const { doVisit2<const LeafNode, VisitorOp, ChildAllCIter, IterT>( *this, otherIter, op, otherIsLHS); } template<typename T, Index Log2Dim> template< typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<T, Log2Dim>::doVisit2(NodeT& self, OtherChildAllIterT& otherIter, VisitorOp& op, bool otherIsLHS) { if (!otherIter) return; if (otherIsLHS) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(otherIter, iter); } } else { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter, otherIter); } } } //////////////////////////////////////// template<typename T, Index Log2Dim> inline std::ostream& operator<<(std::ostream& os, const typename LeafNode<T, Log2Dim>::Buffer& buf) { for (Index32 i = 0, N = buf.size(); i < N; ++i) os << buf.mData[i] << ", "; return os; } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb //////////////////////////////////////// // Specialization for LeafNodes of type bool #include "LeafNodeBool.h" // Specialization for LeafNodes with mask information only #include "LeafNodeMask.h" #endif // OPENVDB_TREE_LEAFNODE_HAS_BEEN_INCLUDED
75,094
C
36.831234
104
0.661504
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/Iterator.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file tree/Iterator.h /// /// @author Peter Cucka and Ken Museth #ifndef OPENVDB_TREE_ITERATOR_HAS_BEEN_INCLUDED #define OPENVDB_TREE_ITERATOR_HAS_BEEN_INCLUDED #include <sstream> #include <type_traits> #include <openvdb/util/NodeMasks.h> #include <openvdb/Exceptions.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { /// @brief Base class for iterators over internal and leaf nodes /// /// This class is typically not instantiated directly, since it doesn't provide methods /// to dereference the iterator. Those methods (@vdblink::tree::SparseIteratorBase::operator*() /// operator*()@endlink, @vdblink::tree::SparseIteratorBase::setValue() setValue()@endlink, etc.) /// are implemented in the @vdblink::tree::SparseIteratorBase sparse@endlink and /// @vdblink::tree::DenseIteratorBase dense@endlink iterator subclasses. template<typename MaskIterT, typename NodeT> class IteratorBase { public: IteratorBase(): mParentNode(nullptr) {} IteratorBase(const MaskIterT& iter, NodeT* parent): mParentNode(parent), mMaskIter(iter) {} IteratorBase(const IteratorBase&) = default; IteratorBase& operator=(const IteratorBase&) = default; bool operator==(const IteratorBase& other) const { return (mParentNode == other.mParentNode) && (mMaskIter == other.mMaskIter); } bool operator!=(const IteratorBase& other) const { return !(*this == other); } /// Return a pointer to the node (if any) over which this iterator is iterating. NodeT* getParentNode() const { return mParentNode; } /// @brief Return a reference to the node over which this iterator is iterating. /// @throw ValueError if there is no parent node. NodeT& parent() const { if (!mParentNode) OPENVDB_THROW(ValueError, "iterator references a null node"); return *mParentNode; } /// Return this iterator's position as an index into the parent node's table. Index offset() const { return mMaskIter.offset(); } /// Identical to offset Index pos() const { return mMaskIter.offset(); } /// Return @c true if this iterator is not yet exhausted. bool test() const { return mMaskIter.test(); } /// Return @c true if this iterator is not yet exhausted. operator bool() const { return this->test(); } /// Advance to the next item in the parent node's table. bool next() { return mMaskIter.next(); } /// Advance to the next item in the parent node's table. void increment() { mMaskIter.increment(); } /// Advance to the next item in the parent node's table. IteratorBase& operator++() { this->increment(); return *this; } /// Advance @a n items in the parent node's table. void increment(Index n) { mMaskIter.increment(n); } /// @brief Return @c true if this iterator is pointing to an active value. /// Return @c false if it is pointing to either an inactive value or a child node. bool isValueOn() const { return parent().isValueMaskOn(this->pos()); } /// @brief If this iterator is pointing to a value, set the value's active state. /// Otherwise, do nothing. void setValueOn(bool on = true) const { parent().setValueMask(this->pos(), on); } /// @brief If this iterator is pointing to a value, mark the value as inactive. /// @details If this iterator is pointing to a child node, then the current item /// in the parent node's table is required to be inactive. In that case, /// this method has no effect. void setValueOff() const { parent().mValueMask.setOff(this->pos()); } /// Return the coordinates of the item to which this iterator is pointing. Coord getCoord() const { return parent().offsetToGlobalCoord(this->pos()); } /// Return in @a xyz the coordinates of the item to which this iterator is pointing. void getCoord(Coord& xyz) const { xyz = this->getCoord(); } private: /// @note This parent node pointer is mutable, because setValueOn() and /// setValueOff(), though const, need to call non-const methods on the parent. /// There is a distinction between a const iterator (e.g., const ValueOnIter), /// which is an iterator that can't be incremented, and an iterator over /// a const node (e.g., ValueOnCIter), which might be const or non-const itself /// but can't call non-const methods like setValue() on the node. mutable NodeT* mParentNode; MaskIterT mMaskIter; }; // class IteratorBase //////////////////////////////////////// /// @brief Base class for sparse iterators over internal and leaf nodes template< typename MaskIterT, // mask iterator type (OnIterator, OffIterator, etc.) typename IterT, // SparseIteratorBase subclass (the "Curiously Recurring Template Pattern") typename NodeT, // type of node over which to iterate typename ItemT> // type of value to which this iterator points struct SparseIteratorBase: public IteratorBase<MaskIterT, NodeT> { using NodeType = NodeT; using ValueType = ItemT; using NonConstNodeType = typename std::remove_const<NodeT>::type; using NonConstValueType = typename std::remove_const<ItemT>::type; static const bool IsSparseIterator = true, IsDenseIterator = false; SparseIteratorBase() {} SparseIteratorBase(const MaskIterT& iter, NodeT* parent): IteratorBase<MaskIterT, NodeT>(iter, parent) {} /// @brief Return the item at the given index in the parent node's table. /// @note All subclasses must implement this accessor. ItemT& getItem(Index) const; /// @brief Set the value of the item at the given index in the parent node's table. /// @note All non-const iterator subclasses must implement this accessor. void setItem(Index, const ItemT&) const; /// Return a reference to the item to which this iterator is pointing. ItemT& operator*() const { return this->getValue(); } /// Return a pointer to the item to which this iterator is pointing. ItemT* operator->() const { return &(this->operator*()); } /// Return the item to which this iterator is pointing. ItemT& getValue() const { return static_cast<const IterT*>(this)->getItem(this->pos()); // static polymorphism } /// @brief Set the value of the item to which this iterator is pointing. /// (Not valid for const iterators.) void setValue(const ItemT& value) const { static_assert(!std::is_const<NodeT>::value, "setValue() not allowed for const iterators"); static_cast<const IterT*>(this)->setItem(this->pos(), value); // static polymorphism } /// @brief Apply a functor to the item to which this iterator is pointing. /// (Not valid for const iterators.) /// @param op a functor of the form <tt>void op(ValueType&) const</tt> that modifies /// its argument in place /// @see Tree::modifyValue() template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { static_assert(!std::is_const<NodeT>::value, "modifyValue() not allowed for const iterators"); static_cast<const IterT*>(this)->modifyItem(this->pos(), op); // static polymorphism } }; // class SparseIteratorBase //////////////////////////////////////// /// @brief Base class for dense iterators over internal and leaf nodes /// @note Dense iterators have no @c %operator*() or @c %operator->(), /// because their return type would have to vary depending on whether /// the iterator is pointing to a value or a child node. template< typename MaskIterT, // mask iterator type (typically a DenseIterator) typename IterT, // DenseIteratorBase subclass (the "Curiously Recurring Template Pattern") typename NodeT, // type of node over which to iterate typename SetItemT, // type of set value (ChildNodeType, for non-leaf nodes) typename UnsetItemT> // type of unset value (ValueType, usually) struct DenseIteratorBase: public IteratorBase<MaskIterT, NodeT> { using NodeType = NodeT; using ValueType = UnsetItemT; using ChildNodeType = SetItemT; using NonConstNodeType = typename std::remove_const<NodeT>::type; using NonConstValueType = typename std::remove_const<UnsetItemT>::type; using NonConstChildNodeType = typename std::remove_const<SetItemT>::type; static const bool IsSparseIterator = false, IsDenseIterator = true; DenseIteratorBase() {} DenseIteratorBase(const MaskIterT& iter, NodeT* parent): IteratorBase<MaskIterT, NodeT>(iter, parent) {} /// @brief Return @c true if the item at the given index in the parent node's table /// is a set value and return either the set value in @a child or the unset value /// in @a value. /// @note All subclasses must implement this accessor. bool getItem(Index, SetItemT*& child, NonConstValueType& value) const; /// @brief Set the value of the item at the given index in the parent node's table. /// @note All non-const iterator subclasses must implement this accessor. void setItem(Index, SetItemT*) const; /// @brief "Unset" the value of the item at the given index in the parent node's table. /// @note All non-const iterator subclasses must implement this accessor. void unsetItem(Index, const UnsetItemT&) const; /// Return @c true if this iterator is pointing to a child node. bool isChildNode() const { return this->parent().isChildMaskOn(this->pos()); } /// @brief If this iterator is pointing to a child node, return a pointer to the node. /// Otherwise, return nullptr and, in @a value, the value to which this iterator is pointing. SetItemT* probeChild(NonConstValueType& value) const { SetItemT* child = nullptr; static_cast<const IterT*>(this)->getItem(this->pos(), child, value); // static polymorphism return child; } /// @brief If this iterator is pointing to a child node, return @c true and return /// a pointer to the child node in @a child. Otherwise, return @c false and return /// the value to which this iterator is pointing in @a value. bool probeChild(SetItemT*& child, NonConstValueType& value) const { child = probeChild(value); return (child != nullptr); } /// @brief Return @c true if this iterator is pointing to a value and return /// the value in @a value. Otherwise, return @c false. bool probeValue(NonConstValueType& value) const { SetItemT* child = nullptr; const bool isChild = static_cast<const IterT*>(this)-> // static polymorphism getItem(this->pos(), child, value); return !isChild; } /// @brief Replace with the given child node the item in the parent node's table /// to which this iterator is pointing. void setChild(SetItemT* child) const { static_cast<const IterT*>(this)->setItem(this->pos(), child); // static polymorphism } /// @brief Replace with the given value the item in the parent node's table /// to which this iterator is pointing. void setValue(const UnsetItemT& value) const { static_cast<const IterT*>(this)->unsetItem(this->pos(), value); // static polymorphism } }; // struct DenseIteratorBase } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_ITERATOR_HAS_BEEN_INCLUDED
11,473
C
44.173228
99
0.683692
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/NodeManager.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tree/NodeManager.h /// /// @authors Ken Museth, Dan Bailey /// /// @brief NodeManager produces linear arrays of all tree nodes /// allowing for efficient threading and bottom-up processing. /// /// @note A NodeManager can be constructed from a Tree or LeafManager. #ifndef OPENVDB_TREE_NODEMANAGER_HAS_BEEN_INCLUDED #define OPENVDB_TREE_NODEMANAGER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <deque> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { // Produce linear arrays of all tree nodes, to facilitate efficient threading // and bottom-up processing. template<typename TreeOrLeafManagerT, Index LEVELS = TreeOrLeafManagerT::RootNodeType::LEVEL> class NodeManager; // Produce linear arrays of all tree nodes lazily, to facilitate efficient threading // of topology-changing top-down workflows. template<typename TreeOrLeafManagerT, Index _LEVELS = TreeOrLeafManagerT::RootNodeType::LEVEL> class DynamicNodeManager; //////////////////////////////////////// // This is a dummy node filtering class used by the NodeManager class to match // the internal filtering interface used by the DynamicNodeManager. struct NodeFilter { static bool valid(size_t) { return true; } }; // struct NodeFilter /// @brief This class caches tree nodes of a specific type in a linear array. /// /// @note It is for internal use and should rarely be used directly. template<typename NodeT> class NodeList { public: NodeList() = default; NodeT& operator()(size_t n) const { assert(n<mNodeCount); return *(mNodes[n]); } NodeT*& operator[](size_t n) { assert(n<mNodeCount); return mNodes[n]; } Index64 nodeCount() const { return mNodeCount; } void clear() { mNodePtrs.reset(); mNodes = nullptr; mNodeCount = 0; } // initialize this node list from the provided root node template <typename RootT> bool initRootChildren(RootT& root) { // Allocate (or deallocate) the node pointer array size_t nodeCount = root.childCount(); if (nodeCount != mNodeCount) { if (nodeCount > 0) { mNodePtrs.reset(new NodeT*[nodeCount]); mNodes = mNodePtrs.get(); } else { mNodePtrs.reset(); mNodes = nullptr; } mNodeCount = nodeCount; } if (mNodeCount == 0) return false; // Populate the node pointers NodeT** nodePtr = mNodes; for (auto iter = root.beginChildOn(); iter; ++iter) { *nodePtr++ = &iter.getValue(); } return true; } // initialize this node list from another node list containing the parent nodes template <typename ParentsT, typename NodeFilterT> bool initNodeChildren(ParentsT& parents, const NodeFilterT& nodeFilter = NodeFilterT(), bool serial = false) { // Compute the node counts for each node std::vector<Index32> nodeCounts; if (serial) { nodeCounts.reserve(parents.nodeCount()); for (size_t i = 0; i < parents.nodeCount(); i++) { if (!nodeFilter.valid(i)) nodeCounts.push_back(0); else nodeCounts.push_back(parents(i).childCount()); } } else { nodeCounts.resize(parents.nodeCount()); tbb::parallel_for( // with typical node sizes and SSE enabled, there are only a handful // of instructions executed per-operation with a default grainsize // of 1, so increase to 64 to reduce parallel scheduling overhead tbb::blocked_range<Index64>(0, parents.nodeCount(), /*grainsize=*/64), [&](tbb::blocked_range<Index64>& range) { for (Index64 i = range.begin(); i < range.end(); i++) { if (!nodeFilter.valid(i)) nodeCounts[i] = 0; else nodeCounts[i] = parents(i).childCount(); } } ); } // Turn node counts into a cumulative histogram and obtain total node count for (size_t i = 1; i < nodeCounts.size(); i++) { nodeCounts[i] += nodeCounts[i-1]; } const size_t nodeCount = nodeCounts.empty() ? 0 : nodeCounts.back(); // Allocate (or deallocate) the node pointer array if (nodeCount != mNodeCount) { if (nodeCount > 0) { mNodePtrs.reset(new NodeT*[nodeCount]); mNodes = mNodePtrs.get(); } else { mNodePtrs.reset(); mNodes = nullptr; } mNodeCount = nodeCount; } if (mNodeCount == 0) return false; // Populate the node pointers if (serial) { NodeT** nodePtr = mNodes; for (size_t i = 0; i < parents.nodeCount(); i++) { if (!nodeFilter.valid(i)) continue; for (auto iter = parents(i).beginChildOn(); iter; ++iter) { *nodePtr++ = &iter.getValue(); } } } else { tbb::parallel_for( tbb::blocked_range<Index64>(0, parents.nodeCount()), [&](tbb::blocked_range<Index64>& range) { Index64 i = range.begin(); NodeT** nodePtr = mNodes; if (i > 0) nodePtr += nodeCounts[i-1]; for ( ; i < range.end(); i++) { if (!nodeFilter.valid(i)) continue; for (auto iter = parents(i).beginChildOn(); iter; ++iter) { *nodePtr++ = &iter.getValue(); } } } ); } return true; } class NodeRange { public: NodeRange(size_t begin, size_t end, const NodeList& nodeList, size_t grainSize=1): mEnd(end), mBegin(begin), mGrainSize(grainSize), mNodeList(nodeList) {} NodeRange(NodeRange& r, tbb::split): mEnd(r.mEnd), mBegin(doSplit(r)), mGrainSize(r.mGrainSize), mNodeList(r.mNodeList) {} size_t size() const { return mEnd - mBegin; } size_t grainsize() const { return mGrainSize; } const NodeList& nodeList() const { return mNodeList; } bool empty() const {return !(mBegin < mEnd);} bool is_divisible() const {return mGrainSize < this->size();} class Iterator { public: Iterator(const NodeRange& range, size_t pos): mRange(range), mPos(pos) { assert(this->isValid()); } Iterator(const Iterator&) = default; Iterator& operator=(const Iterator&) = default; /// Advance to the next node. Iterator& operator++() { ++mPos; return *this; } /// Return a reference to the node to which this iterator is pointing. NodeT& operator*() const { return mRange.mNodeList(mPos); } /// Return a pointer to the node to which this iterator is pointing. NodeT* operator->() const { return &(this->operator*()); } /// Return the index into the list of the current node. size_t pos() const { return mPos; } bool isValid() const { return mPos>=mRange.mBegin && mPos<=mRange.mEnd; } /// Return @c true if this iterator is not yet exhausted. bool test() const { return mPos < mRange.mEnd; } /// Return @c true if this iterator is not yet exhausted. operator bool() const { return this->test(); } /// Return @c true if this iterator is exhausted. bool empty() const { return !this->test(); } bool operator!=(const Iterator& other) const { return (mPos != other.mPos) || (&mRange != &other.mRange); } bool operator==(const Iterator& other) const { return !(*this != other); } const NodeRange& nodeRange() const { return mRange; } private: const NodeRange& mRange; size_t mPos; };// NodeList::NodeRange::Iterator Iterator begin() const {return Iterator(*this, mBegin);} Iterator end() const {return Iterator(*this, mEnd);} private: size_t mEnd, mBegin, mGrainSize; const NodeList& mNodeList; static size_t doSplit(NodeRange& r) { assert(r.is_divisible()); size_t middle = r.mBegin + (r.mEnd - r.mBegin) / 2u; r.mEnd = middle; return middle; } };// NodeList::NodeRange /// Return a TBB-compatible NodeRange. NodeRange nodeRange(size_t grainsize = 1) const { return NodeRange(0, this->nodeCount(), *this, grainsize); } template<typename NodeOp> void foreach(const NodeOp& op, bool threaded = true, size_t grainSize=1) { NodeTransformer<NodeOp> transform(op); transform.run(this->nodeRange(grainSize), threaded); } template<typename NodeOp> void reduce(NodeOp& op, bool threaded = true, size_t grainSize=1) { NodeReducer<NodeOp> transform(op); transform.run(this->nodeRange(grainSize), threaded); } // identical to foreach except the operator() method has a node index template<typename NodeOp> void foreachWithIndex(const NodeOp& op, bool threaded = true, size_t grainSize=1) { NodeTransformer<NodeOp, OpWithIndex> transform(op); transform.run(this->nodeRange(grainSize), threaded); } // identical to reduce except the operator() method has a node index template<typename NodeOp> void reduceWithIndex(NodeOp& op, bool threaded = true, size_t grainSize=1) { NodeReducer<NodeOp, OpWithIndex> transform(op); transform.run(this->nodeRange(grainSize), threaded); } private: // default execution in the NodeManager ignores the node index // given by the iterator position struct OpWithoutIndex { template <typename T> static void eval(T& node, typename NodeRange::Iterator& iter) { node(*iter); } }; // execution in the DynamicNodeManager matches that of the LeafManager in // passing through the node index given by the iterator position struct OpWithIndex { template <typename T> static void eval(T& node, typename NodeRange::Iterator& iter) { node(*iter, iter.pos()); } }; // Private struct of NodeList that performs parallel_for template<typename NodeOp, typename OpT = OpWithoutIndex> struct NodeTransformer { NodeTransformer(const NodeOp& nodeOp) : mNodeOp(nodeOp) { } void run(const NodeRange& range, bool threaded = true) { threaded ? tbb::parallel_for(range, *this) : (*this)(range); } void operator()(const NodeRange& range) const { for (typename NodeRange::Iterator it = range.begin(); it; ++it) { OpT::template eval(mNodeOp, it); } } const NodeOp mNodeOp; };// NodeList::NodeTransformer // Private struct of NodeList that performs parallel_reduce template<typename NodeOp, typename OpT = OpWithoutIndex> struct NodeReducer { NodeReducer(NodeOp& nodeOp) : mNodeOp(&nodeOp) { } NodeReducer(const NodeReducer& other, tbb::split) : mNodeOpPtr(std::make_unique<NodeOp>(*(other.mNodeOp), tbb::split())) , mNodeOp(mNodeOpPtr.get()) { } void run(const NodeRange& range, bool threaded = true) { threaded ? tbb::parallel_reduce(range, *this) : (*this)(range); } void operator()(const NodeRange& range) { for (typename NodeRange::Iterator it = range.begin(); it; ++it) { OpT::template eval(*mNodeOp, it); } } void join(const NodeReducer& other) { mNodeOp->join(*(other.mNodeOp)); } std::unique_ptr<NodeOp> mNodeOpPtr; NodeOp *mNodeOp = nullptr; };// NodeList::NodeReducer protected: size_t mNodeCount = 0; std::unique_ptr<NodeT*[]> mNodePtrs; NodeT** mNodes = nullptr; };// NodeList ///////////////////////////////////////////// /// @brief This class is a link in a chain that each caches tree nodes /// of a specific type in a linear array. /// /// @note It is for internal use and should rarely be used directly. template<typename NodeT, Index LEVEL> class NodeManagerLink { public: using NonConstChildNodeType = typename NodeT::ChildNodeType; using ChildNodeType = typename CopyConstness<NodeT, NonConstChildNodeType>::Type; NodeManagerLink() = default; void clear() { mList.clear(); mNext.clear(); } template <typename RootT> void initRootChildren(RootT& root, bool serial = false) { mList.initRootChildren(root); mNext.initNodeChildren(mList, serial); } template<typename ParentsT> void initNodeChildren(ParentsT& parents, bool serial = false) { mList.initNodeChildren(parents, NodeFilter(), serial); mNext.initNodeChildren(mList, serial); } Index64 nodeCount() const { return mList.nodeCount() + mNext.nodeCount(); } Index64 nodeCount(Index i) const { return i==NodeT::LEVEL ? mList.nodeCount() : mNext.nodeCount(i); } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded, size_t grainSize) { mNext.foreachBottomUp(op, threaded, grainSize); mList.foreach(op, threaded, grainSize); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded, size_t grainSize) { mList.foreach(op, threaded, grainSize); mNext.foreachTopDown(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded, size_t grainSize) { mNext.reduceBottomUp(op, threaded, grainSize); mList.reduce(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded, size_t grainSize) { mList.reduce(op, threaded, grainSize); mNext.reduceTopDown(op, threaded, grainSize); } protected: NodeList<NodeT> mList; NodeManagerLink<ChildNodeType, LEVEL-1> mNext; };// NodeManagerLink class //////////////////////////////////////// /// @private /// @brief Specialization that terminates the chain of cached tree nodes /// @note It is for internal use and should rarely be used directly. template<typename NodeT> class NodeManagerLink<NodeT, 0> { public: NodeManagerLink() = default; /// @brief Clear all the cached tree nodes void clear() { mList.clear(); } template <typename RootT> void initRootChildren(RootT& root, bool /*serial*/ = false) { mList.initRootChildren(root); } template<typename ParentsT> void initNodeChildren(ParentsT& parents, bool serial = false) { mList.initNodeChildren(parents, NodeFilter(), serial); } Index64 nodeCount() const { return mList.nodeCount(); } Index64 nodeCount(Index) const { return mList.nodeCount(); } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded, size_t grainSize) { mList.foreach(op, threaded, grainSize); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded, size_t grainSize) { mList.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded, size_t grainSize) { mList.reduce(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded, size_t grainSize) { mList.reduce(op, threaded, grainSize); } protected: NodeList<NodeT> mList; };// NodeManagerLink class //////////////////////////////////////// /// @brief To facilitate threading over the nodes of a tree, cache /// node pointers in linear arrays, one for each level of the tree. /// /// @details This implementation works with trees of any depth, but /// optimized specializations are provided for the most typical tree depths. template<typename TreeOrLeafManagerT, Index _LEVELS> class NodeManager { public: static const Index LEVELS = _LEVELS; static_assert(LEVELS > 0, "expected instantiation of template specialization"); // see specialization below using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; using NonConstChildNodeType = typename RootNodeType::ChildNodeType; using ChildNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstChildNodeType>::Type; static_assert(RootNodeType::LEVEL >= LEVELS, "number of levels exceeds root node height"); NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() { mChain.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool serial = false) { mChain.initRootChildren(mRoot, serial); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mChain.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return mChain.nodeCount(i); } //@{ /// @brief Threaded method that applies a user-supplied functor /// to all the nodes in the tree. /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @warning The functor object is deep-copied to create TBB tasks. /// /// @par Example: /// @code /// // Functor to offset all the inactive values of a tree. Note /// // this implementation also illustrates how different /// // computation can be applied to the different node types. /// template<typename TreeType> /// struct OffsetOp /// { /// using ValueT = typename TreeT::ValueType; /// using RootT = typename TreeT::RootNodeType; /// using LeafT = typename TreeT::LeafNodeType; /// OffsetOp(const ValueT& v) : mOffset(v) {} /// /// // Processes the root node. Required by the NodeManager /// void operator()(RootT& root) const /// { /// for (typename RootT::ValueOffIter i = root.beginValueOff(); i; ++i) *i += mOffset; /// } /// // Processes the leaf nodes. Required by the NodeManager /// void operator()(LeafT& leaf) const /// { /// for (typename LeafT::ValueOffIter i = leaf.beginValueOff(); i; ++i) *i += mOffset; /// } /// // Processes the internal nodes. Required by the NodeManager /// template<typename NodeT> /// void operator()(NodeT& node) const /// { /// for (typename NodeT::ValueOffIter i = node.beginValueOff(); i; ++i) *i += mOffset; /// } /// private: /// const ValueT mOffset; /// }; /// /// // usage: /// OffsetOp<FloatTree> op(3.0f); /// tree::NodeManager<FloatTree> nodes(tree); /// nodes.foreachBottomUp(op); /// /// // or if a LeafManager already exists /// using T = tree::LeafManager<FloatTree>; /// OffsetOp<T> op(3.0f); /// tree::NodeManager<T> nodes(leafManager); /// nodes.foreachBottomUp(op); /// /// @endcode template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mChain.foreachBottomUp(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mChain.foreachTopDown(op, threaded, grainSize); } //@} //@{ /// @brief Threaded method that processes nodes with a user supplied functor /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @warning The functor object is deep-copied to create TBB tasks. /// /// @par Example: /// @code /// // Functor to count nodes in a tree /// template<typename TreeType> /// struct NodeCountOp /// { /// NodeCountOp() : nodeCount(TreeType::DEPTH, 0), totalCount(0) /// { /// } /// NodeCountOp(const NodeCountOp& other, tbb::split) : /// nodeCount(TreeType::DEPTH, 0), totalCount(0) /// { /// } /// void join(const NodeCountOp& other) /// { /// for (size_t i = 0; i < nodeCount.size(); ++i) { /// nodeCount[i] += other.nodeCount[i]; /// } /// totalCount += other.totalCount; /// } /// // do nothing for the root node /// void operator()(const typename TreeT::RootNodeType& node) /// { /// } /// // count the internal and leaf nodes /// template<typename NodeT> /// void operator()(const NodeT& node) /// { /// ++(nodeCount[NodeT::LEVEL]); /// ++totalCount; /// } /// std::vector<openvdb::Index64> nodeCount; /// openvdb::Index64 totalCount; /// }; /// /// // usage: /// NodeCountOp<FloatTree> op; /// tree::NodeManager<FloatTree> nodes(tree); /// nodes.reduceBottomUp(op); /// /// // or if a LeafManager already exists /// NodeCountOp<FloatTree> op; /// using T = tree::LeafManager<FloatTree>; /// T leafManager(tree); /// tree::NodeManager<T> nodes(leafManager); /// nodes.reduceBottomUp(op); /// /// @endcode template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mChain.reduceBottomUp(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mChain.reduceTopDown(op, threaded, grainSize); } //@} protected: RootNodeType& mRoot; NodeManagerLink<ChildNodeType, LEVELS-1> mChain; };// NodeManager class //////////////////////////////////////////// // Wraps a user-supplied DynamicNodeManager operator and stores the return // value of the operator() method to the index of the node in a bool array template <typename OpT> struct ForeachFilterOp { explicit ForeachFilterOp(const OpT& op, openvdb::Index64 size) : mOp(op) , mValidPtr(std::make_unique<bool[]>(size)) , mValid(mValidPtr.get()) { } ForeachFilterOp(const ForeachFilterOp& other) : mOp(other.mOp) , mValid(other.mValid) { } template<typename NodeT> void operator()(NodeT& node, size_t idx) const { mValid[idx] = mOp(node, idx); } bool valid(size_t idx) const { return mValid[idx]; } const OpT& op() const { return mOp; } private: const OpT& mOp; std::unique_ptr<bool[]> mValidPtr; bool* mValid = nullptr; }; // struct ForeachFilterOp // Wraps a user-supplied DynamicNodeManager operator and stores the return // value of the operator() method to the index of the node in a bool array template <typename OpT> struct ReduceFilterOp { ReduceFilterOp(OpT& op, openvdb::Index64 size) : mOp(&op) , mValidPtr(std::make_unique<bool[]>(size)) , mValid(mValidPtr.get()) { } ReduceFilterOp(const ReduceFilterOp& other) : mOp(other.mOp) , mValid(other.mValid) { } ReduceFilterOp(const ReduceFilterOp& other, tbb::split) : mOpPtr(std::make_unique<OpT>(*(other.mOp), tbb::split())) , mOp(mOpPtr.get()) , mValid(other.mValid) { } template<typename NodeT> void operator()(NodeT& node, size_t idx) const { mValid[idx] = (*mOp)(node, idx); } void join(const ReduceFilterOp& other) { mOp->join(*(other.mOp)); } bool valid(size_t idx) const { return mValid[idx]; } OpT& op() { return *mOp; } private: std::unique_ptr<OpT> mOpPtr; OpT* mOp = nullptr; std::unique_ptr<bool[]> mValidPtr; bool* mValid = nullptr; }; // struct ReduceFilterOp /// @brief This class is a link in a chain that each caches tree nodes /// of a specific type in a linear array. /// /// @note It is for internal use and should rarely be used directly. template<typename NodeT, Index LEVEL> class DynamicNodeManagerLink { public: DynamicNodeManagerLink() = default; template<typename NodeOpT, typename RootT> void foreachTopDown(const NodeOpT& op, RootT& root, bool threaded, size_t grainSize) { if (!op(root, /*index=*/0)) return; if (!mList.initRootChildren(root)) return; ForeachFilterOp<NodeOpT> filterOp(op, mList.nodeCount()); mList.foreachWithIndex(filterOp, threaded, grainSize); mNext.foreachTopDownRecurse(filterOp, mList, threaded, grainSize); } template<typename FilterOpT, typename ParentT> void foreachTopDownRecurse(const FilterOpT& filterOp, ParentT& parent, bool threaded, size_t grainSize) { if (!mList.initNodeChildren(parent, filterOp, !threaded)) return; FilterOpT childFilterOp(filterOp.op(), mList.nodeCount()); mList.foreachWithIndex(childFilterOp, threaded, grainSize); } template<typename NodeOpT, typename RootT> void reduceTopDown(NodeOpT& op, RootT& root, bool threaded, size_t grainSize) { if (!op(root, /*index=*/0)) return; if (!mList.initRootChildren(root)) return; ReduceFilterOp<NodeOpT> filterOp(op, mList.nodeCount()); mList.reduceWithIndex(filterOp, threaded, grainSize); mNext.reduceTopDownRecurse(filterOp, mList, threaded, grainSize); } template<typename FilterOpT, typename ParentT> void reduceTopDownRecurse(FilterOpT& filterOp, ParentT& parent, bool threaded, size_t grainSize) { if (!mList.initNodeChildren(parent, filterOp, !threaded)) return; FilterOpT childFilterOp(filterOp.op(), mList.nodeCount()); mList.reduceWithIndex(childFilterOp, threaded, grainSize); } protected: NodeList<NodeT> mList; DynamicNodeManagerLink<typename NodeT::ChildNodeType, LEVEL-1> mNext; };// DynamicNodeManagerLink class /// @private /// @brief Specialization that terminates the chain of cached tree nodes /// @note It is for internal use and should rarely be used directly. template<typename NodeT> class DynamicNodeManagerLink<NodeT, 0> { public: DynamicNodeManagerLink() = default; template<typename NodeFilterOp, typename ParentT> void foreachTopDownRecurse(const NodeFilterOp& nodeFilterOp, ParentT& parent, bool threaded, size_t grainSize) { if (!mList.initNodeChildren(parent, nodeFilterOp, !threaded)) return; mList.foreachWithIndex(nodeFilterOp.op(), threaded, grainSize); } template<typename NodeFilterOp, typename ParentT> void reduceTopDownRecurse(NodeFilterOp& nodeFilterOp, ParentT& parent, bool threaded, size_t grainSize) { if (!mList.initNodeChildren(parent, nodeFilterOp, !threaded)) return; mList.reduceWithIndex(nodeFilterOp.op(), threaded, grainSize); } protected: NodeList<NodeT> mList; };// DynamicNodeManagerLink class template<typename TreeOrLeafManagerT, Index _LEVELS> class DynamicNodeManager { public: static const Index LEVELS = _LEVELS; static_assert(LEVELS > 0, "expected instantiation of template specialization"); // see specialization below using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL >= LEVELS, "number of levels exceeds root node height"); explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Threaded method that applies a user-supplied functor /// to all the nodes in the tree. /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @note There are two key differences to the interface of the /// user-supplied functor to the NodeManager class - (1) the operator() /// method aligns with the LeafManager class in expecting the index of the /// node in a linear array of identical node types, (2) the operator() /// method returns a boolean termination value with true indicating that /// children of this node should be processed, false indicating the /// early-exit termination should occur. /// /// @par Example: /// @code /// // Functor to densify the first child node in a linear array. Note /// // this implementation also illustrates how different /// // computation can be applied to the different node types. /// /// template<typename TreeT> /// struct DensifyOp /// { /// using RootT = typename TreeT::RootNodeType; /// using LeafT = typename TreeT::LeafNodeType; /// /// DensifyOp() = default; /// /// // Processes the root node. Required by the DynamicNodeManager /// bool operator()(RootT&, size_t) const { return true; } /// /// // Processes the internal nodes. Required by the DynamicNodeManager /// template<typename NodeT> /// bool operator()(NodeT& node, size_t idx) const /// { /// // densify child /// for (auto iter = node.cbeginValueAll(); iter; ++iter) { /// const openvdb::Coord ijk = iter.getCoord(); /// node.addChild(new typename NodeT::ChildNodeType(iter.getCoord(), NodeT::LEVEL, true)); /// } /// // early-exit termination for all non-zero index children /// return idx == 0; /// } /// // Processes the leaf nodes. Required by the DynamicNodeManager /// bool operator()(LeafT&, size_t) const /// { /// return true; /// } /// };// DensifyOp /// /// // usage: /// DensifyOp<FloatTree> op; /// tree::DynamicNodeManager<FloatTree> nodes(tree); /// nodes.foreachTopDown(op); /// /// @endcode template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mChain.foreachTopDown(op, mRoot, threaded, grainSize); } /// @brief Threaded method that processes nodes with a user supplied functor /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @note There are two key differences to the interface of the /// user-supplied functor to the NodeManager class - (1) the operator() /// method aligns with the LeafManager class in expecting the index of the /// node in a linear array of identical node types, (2) the operator() /// method returns a boolean termination value with true indicating that /// children of this node should be processed, false indicating the /// early-exit termination should occur. /// /// @par Example: /// @code /// // Functor to count nodes in a tree /// template<typename TreeType> /// struct NodeCountOp /// { /// NodeCountOp() : nodeCount(TreeType::DEPTH, 0), totalCount(0) /// { /// } /// NodeCountOp(const NodeCountOp& other, tbb::split) : /// nodeCount(TreeType::DEPTH, 0), totalCount(0) /// { /// } /// void join(const NodeCountOp& other) /// { /// for (size_t i = 0; i < nodeCount.size(); ++i) { /// nodeCount[i] += other.nodeCount[i]; /// } /// totalCount += other.totalCount; /// } /// // do nothing for the root node /// bool operator()(const typename TreeT::RootNodeType& node, size_t) /// { /// return true; /// } /// // count the internal and leaf nodes /// template<typename NodeT> /// bool operator()(const NodeT& node, size_t) /// { /// ++(nodeCount[NodeT::LEVEL]); /// ++totalCount; /// return true; /// } /// std::vector<openvdb::Index64> nodeCount; /// openvdb::Index64 totalCount; /// }; /// /// // usage: /// NodeCountOp<FloatTree> op; /// tree::DynamicNodeManager<FloatTree> nodes(tree); /// nodes.reduceTopDown(op); /// /// @endcode template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { mChain.reduceTopDown(op, mRoot, threaded, grainSize); } protected: RootNodeType& mRoot; DynamicNodeManagerLink<typename RootNodeType::ChildNodeType, LEVELS-1> mChain; };// DynamicNodeManager class //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with no caching of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 0> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static const Index LEVELS = 0; NodeManager(TreeOrLeafManagerT& tree, bool /*serial*/ = false) : mRoot(tree.root()) { } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() {} /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool /*serial*/ = false) { } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return 0; } Index64 nodeCount(Index) const { return 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool, size_t) { op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool, size_t) { op(mRoot); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool, size_t) { op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool, size_t) { op(mRoot); } protected: RootNodeType& mRoot; }; // NodeManager<0> //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with one level of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 1> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static_assert(RootNodeType::LEVEL > 0, "expected instantiation of template specialization"); static const Index LEVELS = 1; NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() { mList0.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool /*serial*/ = false) { mList0.initRootChildren(mRoot); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mList0.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return i==0 ? mList0.nodeCount() : 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.foreach(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList0.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.reduce(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList0.reduce(op, threaded, grainSize); } protected: using NodeT1 = RootNodeType; using NonConstNodeT0 = typename NodeT1::ChildNodeType; using NodeT0 = typename CopyConstness<RootNodeType, NonConstNodeT0>::Type; using ListT0 = NodeList<NodeT0>; NodeT1& mRoot; ListT0 mList0; }; // NodeManager<1> //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with two levels of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 2> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static_assert(RootNodeType::LEVEL > 1, "expected instantiation of template specialization"); static const Index LEVELS = 2; NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() { mList0.clear(); mList1.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool serial = false) { mList1.initRootChildren(mRoot); mList0.initNodeChildren(mList1, NodeFilter(), serial); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mList0.nodeCount() + mList1.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return i==0 ? mList0.nodeCount() : i==1 ? mList1.nodeCount() : 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList1.foreach(op, threaded, grainSize); mList0.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList1.reduce(op, threaded, grainSize); mList0.reduce(op, threaded, grainSize); } protected: using NodeT2 = RootNodeType; using NonConstNodeT1 = typename NodeT2::ChildNodeType; using NodeT1 = typename CopyConstness<RootNodeType, NonConstNodeT1>::Type; // upper level using NonConstNodeT0 = typename NodeT1::ChildNodeType; using NodeT0 = typename CopyConstness<RootNodeType, NonConstNodeT0>::Type; // lower level using ListT1 = NodeList<NodeT1>; // upper level using ListT0 = NodeList<NodeT0>; // lower level NodeT2& mRoot; ListT1 mList1; ListT0 mList0; }; // NodeManager<2> //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with three levels of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 3> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static_assert(RootNodeType::LEVEL > 2, "expected instantiation of template specialization"); static const Index LEVELS = 3; NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() { mList0.clear(); mList1.clear(); mList2.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool serial = false) { mList2.initRootChildren(mRoot); mList1.initNodeChildren(mList2, NodeFilter(), serial); mList0.initNodeChildren(mList1, NodeFilter(), serial); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mList0.nodeCount()+mList1.nodeCount()+mList2.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return i==0 ? mList0.nodeCount() : i==1 ? mList1.nodeCount() : i==2 ? mList2.nodeCount() : 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); mList2.foreach(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList2.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); mList0.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); mList2.reduce(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList2.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); mList0.reduce(op, threaded, grainSize); } protected: using NodeT3 = RootNodeType; using NonConstNodeT2 = typename NodeT3::ChildNodeType; using NodeT2 = typename CopyConstness<RootNodeType, NonConstNodeT2>::Type; // upper level using NonConstNodeT1 = typename NodeT2::ChildNodeType; using NodeT1 = typename CopyConstness<RootNodeType, NonConstNodeT1>::Type; // mid level using NonConstNodeT0 = typename NodeT1::ChildNodeType; using NodeT0 = typename CopyConstness<RootNodeType, NonConstNodeT0>::Type; // lower level using ListT2 = NodeList<NodeT2>; // upper level of internal nodes using ListT1 = NodeList<NodeT1>; // lower level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT3& mRoot; ListT2 mList2; ListT1 mList1; ListT0 mList0; }; // NodeManager<3> //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with four levels of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 4> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static_assert(RootNodeType::LEVEL > 3, "expected instantiation of template specialization"); static const Index LEVELS = 4; NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; // disallow copy-construction /// @brief Clear all the cached tree nodes void clear() { mList0.clear(); mList1.clear(); mList2.clear(); mList3.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool serial = false) { mList3.initRootChildren(mRoot); mList2.initNodeChildren(mList3, NodeFilter(), serial); mList1.initNodeChildren(mList2, NodeFilter(), serial); mList0.initNodeChildren(mList1, NodeFilter(), serial); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mList0.nodeCount() + mList1.nodeCount() + mList2.nodeCount() + mList3.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return i==0 ? mList0.nodeCount() : i==1 ? mList1.nodeCount() : i==2 ? mList2.nodeCount() : i==3 ? mList3.nodeCount() : 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); mList2.foreach(op, threaded, grainSize); mList3.foreach(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList3.foreach(op, threaded, grainSize); mList2.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); mList0.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); mList2.reduce(op, threaded, grainSize); mList3.reduce(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList3.reduce(op, threaded, grainSize); mList2.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); mList0.reduce(op, threaded, grainSize); } protected: using NodeT4 = RootNodeType; using NonConstNodeT3 = typename NodeT4::ChildNodeType; using NodeT3 = typename CopyConstness<RootNodeType, NonConstNodeT3>::Type; // upper level using NonConstNodeT2 = typename NodeT3::ChildNodeType; using NodeT2 = typename CopyConstness<RootNodeType, NonConstNodeT2>::Type; // upper mid level using NonConstNodeT1 = typename NodeT2::ChildNodeType; using NodeT1 = typename CopyConstness<RootNodeType, NonConstNodeT1>::Type; // lower mid level using NonConstNodeT0 = typename NodeT1::ChildNodeType; using NodeT0 = typename CopyConstness<RootNodeType, NonConstNodeT0>::Type; // lower level using ListT3 = NodeList<NodeT3>; // upper level of internal nodes using ListT2 = NodeList<NodeT2>; // upper mid level of internal nodes using ListT1 = NodeList<NodeT1>; // lower mid level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT4& mRoot; ListT3 mList3; ListT2 mList2; ListT1 mList1; ListT0 mList0; }; // NodeManager<4> //////////////////////////////////////////// /// @private /// Template specialization of the DynamicNodeManager with one level of nodes template<typename TreeOrLeafManagerT> class DynamicNodeManager<TreeOrLeafManagerT, 1> { public: using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL > 0, "expected instantiation of template specialization"); static const Index LEVELS = 1; explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list0 if (!mList0.initRootChildren(mRoot)) return; ForeachFilterOp<NodeOp> nodeOp(op, mList0.nodeCount()); mList0.foreachWithIndex(nodeOp, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list0 if (!mList0.initRootChildren(mRoot)) return; ReduceFilterOp<NodeOp> nodeOp(op, mList0.nodeCount()); mList0.reduceWithIndex(nodeOp, threaded, grainSize); } protected: using NodeT1 = RootNodeType; using NodeT0 = typename NodeT1::ChildNodeType; using ListT0 = NodeList<NodeT0>; NodeT1& mRoot; ListT0 mList0; };// DynamicNodeManager<1> class //////////////////////////////////////////// /// @private /// Template specialization of the DynamicNodeManager with two levels of nodes template<typename TreeOrLeafManagerT> class DynamicNodeManager<TreeOrLeafManagerT, 2> { public: using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL > 1, "expected instantiation of template specialization"); static const Index LEVELS = 2; explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list1 if (!mList1.initRootChildren(mRoot)) return; ForeachFilterOp<NodeOp> nodeOp(op, mList1.nodeCount()); mList1.foreachWithIndex(nodeOp, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp, !threaded)) return; mList0.foreachWithIndex(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list1 if (!mList1.initRootChildren(mRoot)) return; ReduceFilterOp<NodeOp> nodeOp(op, mList1.nodeCount()); mList1.reduceWithIndex(nodeOp, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp, !threaded)) return; mList0.reduceWithIndex(op, threaded, grainSize); } protected: using NodeT2 = RootNodeType; using NodeT1 = typename NodeT2::ChildNodeType; // upper level using NodeT0 = typename NodeT1::ChildNodeType; // lower level using ListT1 = NodeList<NodeT1>; // upper level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT2& mRoot; ListT1 mList1; ListT0 mList0; };// DynamicNodeManager<2> class //////////////////////////////////////////// /// @private /// Template specialization of the DynamicNodeManager with three levels of nodes template<typename TreeOrLeafManagerT> class DynamicNodeManager<TreeOrLeafManagerT, 3> { public: using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL > 2, "expected instantiation of template specialization"); static const Index LEVELS = 3; explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list2 if (!mList2.initRootChildren(mRoot)) return; ForeachFilterOp<NodeOp> nodeOp2(op, mList2.nodeCount()); mList2.foreachWithIndex(nodeOp2, threaded, grainSize); // list1 if (!mList1.initNodeChildren(mList2, nodeOp2, !threaded)) return; ForeachFilterOp<NodeOp> nodeOp1(op, mList1.nodeCount()); mList1.foreachWithIndex(nodeOp1, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp1, !threaded)) return; mList0.foreachWithIndex(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list2 if (!mList2.initRootChildren(mRoot)) return; ReduceFilterOp<NodeOp> nodeOp2(op, mList2.nodeCount()); mList2.reduceWithIndex(nodeOp2, threaded, grainSize); // list1 if (!mList1.initNodeChildren(mList2, nodeOp2, !threaded)) return; ReduceFilterOp<NodeOp> nodeOp1(op, mList1.nodeCount()); mList1.reduceWithIndex(nodeOp1, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp1, !threaded)) return; mList0.reduceWithIndex(op, threaded, grainSize); } protected: using NodeT3 = RootNodeType; using NodeT2 = typename NodeT3::ChildNodeType; // upper level using NodeT1 = typename NodeT2::ChildNodeType; // mid level using NodeT0 = typename NodeT1::ChildNodeType; // lower level using ListT2 = NodeList<NodeT2>; // upper level of internal nodes using ListT1 = NodeList<NodeT1>; // lower level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT3& mRoot; ListT2 mList2; ListT1 mList1; ListT0 mList0; };// DynamicNodeManager<3> class //////////////////////////////////////////// /// @private /// Template specialization of the DynamicNodeManager with four levels of nodes template<typename TreeOrLeafManagerT> class DynamicNodeManager<TreeOrLeafManagerT, 4> { public: using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL > 3, "expected instantiation of template specialization"); static const Index LEVELS = 4; explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list3 if (!mList3.initRootChildren(mRoot)) return; ForeachFilterOp<NodeOp> nodeOp3(op, mList3.nodeCount()); mList3.foreachWithIndex(nodeOp3, threaded, grainSize); // list2 if (!mList2.initNodeChildren(mList3, nodeOp3, !threaded)) return; ForeachFilterOp<NodeOp> nodeOp2(op, mList2.nodeCount()); mList2.foreachWithIndex(nodeOp2, threaded, grainSize); // list1 if (!mList1.initNodeChildren(mList2, nodeOp2, !threaded)) return; ForeachFilterOp<NodeOp> nodeOp1(op, mList1.nodeCount()); mList1.foreachWithIndex(nodeOp1, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp1, !threaded)) return; mList0.foreachWithIndex(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list3 if (!mList3.initRootChildren(mRoot)) return; ReduceFilterOp<NodeOp> nodeOp3(op, mList3.nodeCount()); mList3.reduceWithIndex(nodeOp3, threaded, grainSize); // list2 if (!mList2.initNodeChildren(mList3, nodeOp3, !threaded)) return; ReduceFilterOp<NodeOp> nodeOp2(op, mList2.nodeCount()); mList2.reduceWithIndex(nodeOp2, threaded, grainSize); // list1 if (!mList1.initNodeChildren(mList2, nodeOp2, !threaded)) return; ReduceFilterOp<NodeOp> nodeOp1(op, mList1.nodeCount()); mList1.reduceWithIndex(nodeOp1, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp1, !threaded)) return; mList0.reduceWithIndex(op, threaded, grainSize); } protected: using NodeT4 = RootNodeType; using NodeT3 = typename NodeT4::ChildNodeType; // upper level using NodeT2 = typename NodeT3::ChildNodeType; // upper mid level using NodeT1 = typename NodeT2::ChildNodeType; // lower mid level using NodeT0 = typename NodeT1::ChildNodeType; // lower level using ListT3 = NodeList<NodeT3>; // upper level of internal nodes using ListT2 = NodeList<NodeT2>; // upper mid level of internal nodes using ListT1 = NodeList<NodeT1>; // lower mid level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT4& mRoot; ListT3 mList3; ListT2 mList2; ListT1 mList1; ListT0 mList0; };// DynamicNodeManager<4> class } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_NODEMANAGER_HAS_BEEN_INCLUDED
60,466
C
34.073666
124
0.628155
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/RootNode.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// /// @file RootNode.h /// /// @brief The root node of an OpenVDB tree #ifndef OPENVDB_TREE_ROOTNODE_HAS_BEEN_INCLUDED #define OPENVDB_TREE_ROOTNODE_HAS_BEEN_INCLUDED #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/io/Compression.h> // for truncateRealToHalf() #include <openvdb/math/Math.h> // for isZero(), isExactlyEqual(), etc. #include <openvdb/math/BBox.h> #include <openvdb/util/NodeMasks.h> // for backward compatibility only (see readTopology()) #include <openvdb/version.h> #include <tbb/parallel_for.h> #include <map> #include <set> #include <sstream> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { // Forward declarations template<typename HeadType, int HeadLevel> struct NodeChain; template<typename, typename> struct SameRootConfig; template<typename, typename, bool> struct RootNodeCopyHelper; template<typename, typename, typename, bool> struct RootNodeCombineHelper; template<typename ChildType> class RootNode { public: using ChildNodeType = ChildType; using LeafNodeType = typename ChildType::LeafNodeType; using ValueType = typename ChildType::ValueType; using BuildType = typename ChildType::BuildType; static const Index LEVEL = 1 + ChildType::LEVEL; // level 0 = leaf /// NodeChainType is a list of this tree's node types, from LeafNodeType to RootNode. using NodeChainType = typename NodeChain<RootNode, LEVEL>::Type; static_assert(NodeChainType::Size == LEVEL + 1, "wrong number of entries in RootNode node chain"); /// @brief ValueConverter<T>::Type is the type of a RootNode having the same /// child hierarchy as this node but a different value type, T. template<typename OtherValueType> struct ValueConverter { using Type = RootNode<typename ChildType::template ValueConverter<OtherValueType>::Type>; }; /// @brief SameConfiguration<OtherNodeType>::value is @c true if and only if /// OtherNodeType is the type of a RootNode whose ChildNodeType has the same /// configuration as this node's ChildNodeType. template<typename OtherNodeType> struct SameConfiguration { static const bool value = SameRootConfig<ChildNodeType, OtherNodeType>::value; }; /// Construct a new tree with a background value of 0. RootNode(); /// Construct a new tree with the given background value. explicit RootNode(const ValueType& background); RootNode(const RootNode& other) { *this = other; } /// @brief Construct a new tree that reproduces the topology and active states /// of a tree of a different ValueType but the same configuration (levels, /// node dimensions and branching factors). Cast the other tree's values to /// this tree's ValueType. /// @throw TypeError if the other tree's configuration doesn't match this tree's /// or if this tree's ValueType is not constructible from the other tree's ValueType. template<typename OtherChildType> explicit RootNode(const RootNode<OtherChildType>& other) { *this = other; } /// @brief Construct a new tree that reproduces the topology and active states of /// another tree (which may have a different ValueType), but not the other tree's values. /// @details All tiles and voxels that are active in the other tree are set to /// @a foreground in the new tree, and all inactive tiles and voxels are set to @a background. /// @param other the root node of a tree having (possibly) a different ValueType /// @param background the value to which inactive tiles and voxels are initialized /// @param foreground the value to which active tiles and voxels are initialized /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherChildType> RootNode(const RootNode<OtherChildType>& other, const ValueType& background, const ValueType& foreground, TopologyCopy); /// @brief Construct a new tree that reproduces the topology and active states of /// another tree (which may have a different ValueType), but not the other tree's values. /// All tiles and voxels in the new tree are set to @a background regardless of /// their active states in the other tree. /// @param other the root node of a tree having (possibly) a different ValueType /// @param background the value to which inactive tiles and voxels are initialized /// @note This copy constructor is generally faster than the one that takes both /// a foreground and a background value. Its main application is in multithreaded /// operations where the topology of the output tree exactly matches the input tree. /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherChildType> RootNode(const RootNode<OtherChildType>& other, const ValueType& background, TopologyCopy); /// @brief Copy a root node of the same type as this node. RootNode& operator=(const RootNode& other); /// @brief Copy a root node of the same tree configuration as this node /// but a different ValueType. /// @throw TypeError if the other tree's configuration doesn't match this tree's. /// @note This node's ValueType must be constructible from the other node's ValueType. /// For example, a root node with values of type float can be assigned to a root node /// with values of type Vec3s, because a Vec3s can be constructed from a float. /// But a Vec3s root node cannot be assigned to a float root node. template<typename OtherChildType> RootNode& operator=(const RootNode<OtherChildType>& other); ~RootNode() { this->clear(); } private: struct Tile { Tile(): value(zeroVal<ValueType>()), active(false) {} Tile(const ValueType& v, bool b): value(v), active(b) {} ValueType value; bool active; }; // This lightweight struct pairs child pointers and tiles. struct NodeStruct { ChildType* child; Tile tile; NodeStruct(): child(nullptr) {} NodeStruct(ChildType& c): child(&c) {} NodeStruct(const Tile& t): child(nullptr), tile(t) {} NodeStruct(const NodeStruct&) = default; NodeStruct& operator=(const NodeStruct&) = default; ~NodeStruct() {} ///< @note doesn't delete child bool isChild() const { return child != nullptr; } bool isTile() const { return child == nullptr; } bool isTileOff() const { return isTile() && !tile.active; } bool isTileOn() const { return isTile() && tile.active; } void set(ChildType& c) { delete child; child = &c; } void set(const Tile& t) { delete child; child = nullptr; tile = t; } ChildType& steal(const Tile& t) { ChildType* c=child; child=nullptr; tile=t; return *c; } }; using MapType = std::map<Coord, NodeStruct>; using MapIter = typename MapType::iterator; using MapCIter = typename MapType::const_iterator; using CoordSet = std::set<Coord>; using CoordSetIter = typename CoordSet::iterator; using CoordSetCIter = typename CoordSet::const_iterator; static void setTile(const MapIter& i, const Tile& t) { i->second.set(t); } static void setChild(const MapIter& i, ChildType& c) { i->second.set(c); } static Tile& getTile(const MapIter& i) { return i->second.tile; } static const Tile& getTile(const MapCIter& i) { return i->second.tile; } static ChildType& getChild(const MapIter& i) { return *(i->second.child); } static const ChildType& getChild(const MapCIter& i) { return *(i->second.child); } static ChildType& stealChild(const MapIter& i, const Tile& t) {return i->second.steal(t);} static const ChildType& stealChild(const MapCIter& i,const Tile& t) {return i->second.steal(t);} static bool isChild(const MapCIter& i) { return i->second.isChild(); } static bool isChild(const MapIter& i) { return i->second.isChild(); } static bool isTile(const MapCIter& i) { return i->second.isTile(); } static bool isTile(const MapIter& i) { return i->second.isTile(); } static bool isTileOff(const MapCIter& i) { return i->second.isTileOff(); } static bool isTileOff(const MapIter& i) { return i->second.isTileOff(); } static bool isTileOn(const MapCIter& i) { return i->second.isTileOn(); } static bool isTileOn(const MapIter& i) { return i->second.isTileOn(); } struct NullPred { static inline bool test(const MapIter&) { return true; } static inline bool test(const MapCIter&) { return true; } }; struct ValueOnPred { static inline bool test(const MapIter& i) { return isTileOn(i); } static inline bool test(const MapCIter& i) { return isTileOn(i); } }; struct ValueOffPred { static inline bool test(const MapIter& i) { return isTileOff(i); } static inline bool test(const MapCIter& i) { return isTileOff(i); } }; struct ValueAllPred { static inline bool test(const MapIter& i) { return isTile(i); } static inline bool test(const MapCIter& i) { return isTile(i); } }; struct ChildOnPred { static inline bool test(const MapIter& i) { return isChild(i); } static inline bool test(const MapCIter& i) { return isChild(i); } }; struct ChildOffPred { static inline bool test(const MapIter& i) { return isTile(i); } static inline bool test(const MapCIter& i) { return isTile(i); } }; template<typename _RootNodeT, typename _MapIterT, typename FilterPredT> class BaseIter { public: using RootNodeT = _RootNodeT; using MapIterT = _MapIterT; // either MapIter or MapCIter bool operator==(const BaseIter& other) const { return (mParentNode == other.mParentNode) && (mIter == other.mIter); } bool operator!=(const BaseIter& other) const { return !(*this == other); } RootNodeT* getParentNode() const { return mParentNode; } /// Return a reference to the node over which this iterator iterates. RootNodeT& parent() const { if (!mParentNode) OPENVDB_THROW(ValueError, "iterator references a null parent node"); return *mParentNode; } bool test() const { assert(mParentNode); return mIter != mParentNode->mTable.end(); } operator bool() const { return this->test(); } void increment() { if (this->test()) { ++mIter; } this->skip(); } bool next() { this->increment(); return this->test(); } void increment(Index n) { for (Index i = 0; i < n && this->next(); ++i) {} } /// @brief Return this iterator's position as an offset from /// the beginning of the parent node's map. Index pos() const { return !mParentNode ? 0U : Index(std::distance(mParentNode->mTable.begin(), mIter)); } bool isValueOn() const { return RootNodeT::isTileOn(mIter); } bool isValueOff() const { return RootNodeT::isTileOff(mIter); } void setValueOn(bool on = true) const { mIter->second.tile.active = on; } void setValueOff() const { mIter->second.tile.active = false; } /// Return the coordinates of the item to which this iterator is pointing. Coord getCoord() const { return mIter->first; } /// Return in @a xyz the coordinates of the item to which this iterator is pointing. void getCoord(Coord& xyz) const { xyz = this->getCoord(); } protected: BaseIter(): mParentNode(nullptr) {} BaseIter(RootNodeT& parent, const MapIterT& iter): mParentNode(&parent), mIter(iter) {} void skip() { while (this->test() && !FilterPredT::test(mIter)) ++mIter; } RootNodeT* mParentNode; MapIterT mIter; }; // BaseIter template<typename RootNodeT, typename MapIterT, typename FilterPredT, typename ChildNodeT> class ChildIter: public BaseIter<RootNodeT, MapIterT, FilterPredT> { public: using BaseT = BaseIter<RootNodeT, MapIterT, FilterPredT>; using NodeType = RootNodeT; using ValueType = NodeType; using ChildNodeType = ChildNodeT; using NonConstNodeType = typename std::remove_const<NodeType>::type; using NonConstValueType = typename std::remove_const<ValueType>::type; using NonConstChildNodeType = typename std::remove_const<ChildNodeType>::type; using BaseT::mIter; ChildIter() {} ChildIter(RootNodeT& parent, const MapIterT& iter): BaseT(parent, iter) { BaseT::skip(); } ChildIter& operator++() { BaseT::increment(); return *this; } ChildNodeT& getValue() const { return getChild(mIter); } ChildNodeT& operator*() const { return this->getValue(); } ChildNodeT* operator->() const { return &this->getValue(); } }; // ChildIter template<typename RootNodeT, typename MapIterT, typename FilterPredT, typename ValueT> class ValueIter: public BaseIter<RootNodeT, MapIterT, FilterPredT> { public: using BaseT = BaseIter<RootNodeT, MapIterT, FilterPredT>; using NodeType = RootNodeT; using ValueType = ValueT; using NonConstNodeType = typename std::remove_const<NodeType>::type; using NonConstValueType = typename std::remove_const<ValueT>::type; using BaseT::mIter; ValueIter() {} ValueIter(RootNodeT& parent, const MapIterT& iter): BaseT(parent, iter) { BaseT::skip(); } ValueIter& operator++() { BaseT::increment(); return *this; } ValueT& getValue() const { return getTile(mIter).value; } ValueT& operator*() const { return this->getValue(); } ValueT* operator->() const { return &(this->getValue()); } void setValue(const ValueT& v) const { assert(isTile(mIter)); getTile(mIter).value = v; } template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { assert(isTile(mIter)); op(getTile(mIter).value); } }; // ValueIter template<typename RootNodeT, typename MapIterT, typename ChildNodeT, typename ValueT> class DenseIter: public BaseIter<RootNodeT, MapIterT, NullPred> { public: using BaseT = BaseIter<RootNodeT, MapIterT, NullPred>; using NodeType = RootNodeT; using ValueType = ValueT; using ChildNodeType = ChildNodeT; using NonConstNodeType = typename std::remove_const<NodeType>::type; using NonConstValueType = typename std::remove_const<ValueT>::type; using NonConstChildNodeType = typename std::remove_const<ChildNodeT>::type; using BaseT::mIter; DenseIter() {} DenseIter(RootNodeT& parent, const MapIterT& iter): BaseT(parent, iter) {} DenseIter& operator++() { BaseT::increment(); return *this; } bool isChildNode() const { return isChild(mIter); } ChildNodeT* probeChild(NonConstValueType& value) const { if (isChild(mIter)) return &getChild(mIter); value = getTile(mIter).value; return nullptr; } bool probeChild(ChildNodeT*& child, NonConstValueType& value) const { child = this->probeChild(value); return child != nullptr; } bool probeValue(NonConstValueType& value) const { return !this->probeChild(value); } void setChild(ChildNodeT& c) const { RootNodeT::setChild(mIter, c); } void setChild(ChildNodeT* c) const { assert(c != nullptr); RootNodeT::setChild(mIter, *c); } void setValue(const ValueT& v) const { if (isTile(mIter)) getTile(mIter).value = v; /// @internal For consistency with iterators for other node types /// (see, e.g., InternalNode::DenseIter::unsetItem()), we don't call /// setTile() here, because that would also delete the child. else stealChild(mIter, Tile(v, /*active=*/true)); } }; // DenseIter public: using ChildOnIter = ChildIter<RootNode, MapIter, ChildOnPred, ChildType>; using ChildOnCIter = ChildIter<const RootNode, MapCIter, ChildOnPred, const ChildType>; using ChildOffIter = ValueIter<RootNode, MapIter, ChildOffPred, const ValueType>; using ChildOffCIter = ValueIter<const RootNode, MapCIter, ChildOffPred, ValueType>; using ChildAllIter = DenseIter<RootNode, MapIter, ChildType, ValueType>; using ChildAllCIter = DenseIter<const RootNode, MapCIter, const ChildType, const ValueType>; using ValueOnIter = ValueIter<RootNode, MapIter, ValueOnPred, ValueType>; using ValueOnCIter = ValueIter<const RootNode, MapCIter, ValueOnPred, const ValueType>; using ValueOffIter = ValueIter<RootNode, MapIter, ValueOffPred, ValueType>; using ValueOffCIter = ValueIter<const RootNode, MapCIter, ValueOffPred, const ValueType>; using ValueAllIter = ValueIter<RootNode, MapIter, ValueAllPred, ValueType>; using ValueAllCIter = ValueIter<const RootNode, MapCIter, ValueAllPred, const ValueType>; ChildOnCIter cbeginChildOn() const { return ChildOnCIter(*this, mTable.begin()); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(*this, mTable.begin()); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(*this, mTable.begin()); } ChildOnCIter beginChildOn() const { return cbeginChildOn(); } ChildOffCIter beginChildOff() const { return cbeginChildOff(); } ChildAllCIter beginChildAll() const { return cbeginChildAll(); } ChildOnIter beginChildOn() { return ChildOnIter(*this, mTable.begin()); } ChildOffIter beginChildOff() { return ChildOffIter(*this, mTable.begin()); } ChildAllIter beginChildAll() { return ChildAllIter(*this, mTable.begin()); } ValueOnCIter cbeginValueOn() const { return ValueOnCIter(*this, mTable.begin()); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(*this, mTable.begin()); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(*this, mTable.begin()); } ValueOnCIter beginValueOn() const { return cbeginValueOn(); } ValueOffCIter beginValueOff() const { return cbeginValueOff(); } ValueAllCIter beginValueAll() const { return cbeginValueAll(); } ValueOnIter beginValueOn() { return ValueOnIter(*this, mTable.begin()); } ValueOffIter beginValueOff() { return ValueOffIter(*this, mTable.begin()); } ValueAllIter beginValueAll() { return ValueAllIter(*this, mTable.begin()); } /// Return the total amount of memory in bytes occupied by this node and its children. Index64 memUsage() const; /// @brief Expand the specified bbox so it includes the active tiles of /// this root node as well as all the active values in its child /// nodes. If visitVoxels is false LeafNodes will be approximated /// as dense, i.e. with all voxels active. Else the individual /// active voxels are visited to produce a tight bbox. void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// Return the bounding box of this RootNode, i.e., an infinite bounding box. static CoordBBox getNodeBoundingBox() { return CoordBBox::inf(); } /// @brief Change inactive tiles or voxels with a value equal to +/- the /// old background to the specified value (with the same sign). Active values /// are unchanged. /// /// @param value The new background value /// @param updateChildNodes If true the background values of the /// child nodes is also updated. Else only the background value /// stored in the RootNode itself is changed. /// /// @note Instead of setting @a updateChildNodes to true, consider /// using tools::changeBackground or /// tools::changeLevelSetBackground which are multi-threaded! void setBackground(const ValueType& value, bool updateChildNodes); /// Return this node's background value. const ValueType& background() const { return mBackground; } /// Return @c true if the given tile is inactive and has the background value. bool isBackgroundTile(const Tile&) const; //@{ /// Return @c true if the given iterator points to an inactive tile with the background value. bool isBackgroundTile(const MapIter&) const; bool isBackgroundTile(const MapCIter&) const; //@} /// Return the number of background tiles. size_t numBackgroundTiles() const; /// @brief Remove all background tiles. /// @return the number of tiles removed. size_t eraseBackgroundTiles(); inline void clear(); /// Return @c true if this node's table is either empty or contains only background tiles. bool empty() const { return mTable.size() == numBackgroundTiles(); } /// @brief Expand this node's table so that (x, y, z) is included in the index range. /// @return @c true if an expansion was performed (i.e., if (x, y, z) was not already /// included in the index range). bool expand(const Coord& xyz); static Index getLevel() { return LEVEL; } static void getNodeLog2Dims(std::vector<Index>& dims); static Index getChildDim() { return ChildType::DIM; } /// Return the number of entries in this node's table. Index getTableSize() const { return static_cast<Index>(mTable.size()); } Index getWidth() const { return this->getMaxIndex()[0] - this->getMinIndex()[0]; } Index getHeight() const { return this->getMaxIndex()[1] - this->getMinIndex()[1]; } Index getDepth() const { return this->getMaxIndex()[2] - this->getMinIndex()[2]; } /// Return the smallest index of the current tree. Coord getMinIndex() const; /// Return the largest index of the current tree. Coord getMaxIndex() const; /// Return the current index range. Both min and max are inclusive. void getIndexRange(CoordBBox& bbox) const; /// @brief Return @c true if the given tree has the same node and active value /// topology as this tree (but possibly a different @c ValueType). template<typename OtherChildType> bool hasSameTopology(const RootNode<OtherChildType>& other) const; /// Return @c false if the other node's dimensions don't match this node's. template<typename OtherChildType> static bool hasSameConfiguration(const RootNode<OtherChildType>& other); /// Return @c true if values of the other node's ValueType can be converted /// to values of this node's ValueType. template<typename OtherChildType> static bool hasCompatibleValueType(const RootNode<OtherChildType>& other); Index32 leafCount() const; Index32 nonLeafCount() const; Index32 childCount() const; Index64 onVoxelCount() const; Index64 offVoxelCount() const; Index64 onLeafVoxelCount() const; Index64 offLeafVoxelCount() const; Index64 onTileCount() const; void nodeCount(std::vector<Index32> &vec) const; bool isValueOn(const Coord& xyz) const; /// Return @c true if this root node, or any of its child nodes, have active tiles. bool hasActiveTiles() const; const ValueType& getValue(const Coord& xyz) const; bool probeValue(const Coord& xyz, ValueType& value) const; /// @brief Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides. /// @details If (x, y, z) isn't explicitly represented in the tree (i.e., /// it is implicitly a background voxel), return -1. int getValueDepth(const Coord& xyz) const; /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, const ValueType& value); /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op); /// Apply a functor to the voxel at the given coordinates. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op); //@{ /// @brief Set all voxels within a given axis-aligned box to a constant value. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box /// @param value the value to which to set voxels within the box /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive /// @note This operation generates a sparse, but not always optimally sparse, /// representation of the filled box. Follow fill operations with a prune() /// operation for optimal sparseness. void fill(const CoordBBox& bbox, const ValueType& value, bool active = true); void sparseFill(const CoordBBox& bbox, const ValueType& value, bool active = true) { this->fill(bbox, value, active); } //@} /// @brief Set all voxels within a given axis-aligned box to a constant value /// and ensure that those voxels are all represented at the leaf level. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box. /// @param value the value to which to set voxels within the box. /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive. /// @sa voxelizeActiveTiles() void denseFill(const CoordBBox& bbox, const ValueType& value, bool active = true); /// @brief Densify active tiles, i.e., replace them with leaf-level active voxels. /// /// @param threaded if true, this operation is multi-threaded (over the internal nodes). /// /// @warning This method can explode the tree's memory footprint, especially if it /// contains active tiles at the upper levels (in particular the root level)! /// /// @sa denseFill() void voxelizeActiveTiles(bool threaded = true); /// @brief Copy into a dense grid the values of all voxels, both active and inactive, /// that intersect a given bounding box. /// @param bbox inclusive bounding box of the voxels to be copied into the dense grid /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) template<typename DenseT> void copyToDense(const CoordBBox& bbox, DenseT& dense) const; // // I/O // bool writeTopology(std::ostream&, bool toHalf = false) const; bool readTopology(std::istream&, bool fromHalf = false); void writeBuffers(std::ostream&, bool toHalf = false) const; void readBuffers(std::istream&, bool fromHalf = false); void readBuffers(std::istream&, const CoordBBox&, bool fromHalf = false); // // Voxel access // /// Return the value of the voxel at the given coordinates and, if necessary, update /// the accessor with pointers to the nodes along the path from the root node to /// the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> const ValueType& getValueAndCache(const Coord& xyz, AccessorT&) const; /// Return @c true if the voxel at the given coordinates is active and, if necessary, /// update the accessor with pointers to the nodes along the path from the root node /// to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool isValueOnAndCache(const Coord& xyz, AccessorT&) const; /// Change the value of the voxel at the given coordinates and mark it as active. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Set the value of the voxel at the given coordinates without changing its active state. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOnlyAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&); /// Apply a functor to the voxel at the given coordinates. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&); /// Change the value of the voxel at the given coordinates and mark it as inactive. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Set the active state of the voxel at the given coordinates without changing its value. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT&); /// Return, in @a value, the value of the voxel at the given coordinates and, /// if necessary, update the accessor with pointers to the nodes along /// the path from the root node to the node containing the voxel. /// @return @c true if the voxel at the given coordinates is active /// @note Used internally by ValueAccessor. template<typename AccessorT> bool probeValueAndCache(const Coord& xyz, ValueType& value, AccessorT&) const; /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides. /// If (x, y, z) isn't explicitly represented in the tree (i.e., it is implicitly /// a background voxel), return -1. If necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> int getValueDepthAndCache(const Coord& xyz, AccessorT&) const; /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&); /// @brief Reduce the memory footprint of this tree by replacing with tiles /// any nodes whose values are all the same (optionally to within a tolerance) /// and have the same active state. /// /// @note Consider instead using tools::prune which is multi-threaded! void prune(const ValueType& tolerance = zeroVal<ValueType>()); /// @brief Add the given leaf node to this tree, creating a new branch if necessary. /// If a leaf node with the same origin already exists, replace it. void addLeaf(LeafNodeType* leaf); /// @brief Same as addLeaf() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> void addLeafAndCache(LeafNodeType* leaf, AccessorT&); /// @brief Return a pointer to the node of type @c NodeT that contains voxel (x, y, z) /// and replace it with a tile of the specified value and state. /// If no such node exists, leave the tree unchanged and return @c nullptr. /// /// @note The caller takes ownership of the node and is responsible for deleting it. /// /// @warning Since this method potentially removes nodes and branches of the tree, /// it is important to clear the caches of all ValueAccessors associated with this tree. template<typename NodeT> NodeT* stealNode(const Coord& xyz, const ValueType& value, bool state); /// @brief Add the given child node at the root level. /// If a child node with the same origin already exists, delete the old node and add /// the new node in its place (i.e. ownership of the new child node is transferred /// to this RootNode). /// @return @c true (for consistency with InternalNode::addChild) bool addChild(ChildType* child); /// @brief Add a tile containing voxel (x, y, z) at the root level, /// deleting the existing branch if necessary. void addTile(const Coord& xyz, const ValueType& value, bool state); /// @brief Add a tile containing voxel (x, y, z) at the specified tree level, /// creating a new branch if necessary. Delete any existing lower-level nodes /// that contain (x, y, z). void addTile(Index level, const Coord& xyz, const ValueType& value, bool state); /// @brief Same as addTile() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> void addTileAndCache(Index level, const Coord& xyz, const ValueType&, bool state, AccessorT&); /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, create one that preserves the values and /// active states of all voxels. /// @details Use this method to preallocate a static tree topology /// over which to safely perform multithreaded processing. LeafNodeType* touchLeaf(const Coord& xyz); /// @brief Same as touchLeaf() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> LeafNodeType* touchLeafAndCache(const Coord& xyz, AccessorT& acc); //@{ /// @brief Return a pointer to the node that contains voxel (x, y, z). /// If no such node exists, return @c nullptr. template <typename NodeT> NodeT* probeNode(const Coord& xyz); template <typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const; //@} //@{ /// @brief Same as probeNode() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename NodeT, typename AccessorT> NodeT* probeNodeAndCache(const Coord& xyz, AccessorT& acc); template<typename NodeT, typename AccessorT> const NodeT* probeConstNodeAndCache(const Coord& xyz, AccessorT& acc) const; //@} //@{ /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, return @c nullptr. LeafNodeType* probeLeaf(const Coord& xyz); const LeafNodeType* probeConstLeaf(const Coord& xyz) const; const LeafNodeType* probeLeaf(const Coord& xyz) const; //@} //@{ /// @brief Same as probeLeaf() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> LeafNodeType* probeLeafAndCache(const Coord& xyz, AccessorT& acc); template<typename AccessorT> const LeafNodeType* probeConstLeafAndCache(const Coord& xyz, AccessorT& acc) const; template<typename AccessorT> const LeafNodeType* probeLeafAndCache(const Coord& xyz, AccessorT& acc) const; //@} // // Aux methods // //@{ /// @brief Adds all nodes of a certain type to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...;// defines the type of nodes to be added to the array /// void push_back(value_type nodePtr);// method that add nodes to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.getNodes(array); /// @endcode template<typename ArrayT> void getNodes(ArrayT& array); template<typename ArrayT> void getNodes(ArrayT& array) const; //@} //@{ /// @brief Steals all nodes of a certain type from the tree and /// adds them to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...;// defines the type of nodes to be added to the array /// void push_back(value_type nodePtr);// method that add nodes to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.stealNodes(array); /// @endcode template<typename ArrayT> void stealNodes(ArrayT& array, const ValueType& value, bool state); template<typename ArrayT> void stealNodes(ArrayT& array) { this->stealNodes(array, mBackground, false); } //@} /// @brief Efficiently merge another tree into this tree using one of several schemes. /// @details This operation is primarily intended to combine trees that are mostly /// non-overlapping (for example, intermediate trees from computations that are /// parallelized across disjoint regions of space). /// @note This operation is not guaranteed to produce an optimally sparse tree. /// Follow merge() with prune() for optimal sparseness. /// @warning This operation always empties the other tree. template<MergePolicy Policy> void merge(RootNode& other); /// @brief Union this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active if the corresponding value /// was already active OR if it is active in the other tree. Also, a resulting /// value maps to a voxel if the corresponding value already mapped to a voxel /// OR if it is a voxel in the other tree. Thus, a resulting value can only /// map to a tile if the corresponding value already mapped to a tile /// AND if it is a tile value in other tree. /// /// @note This operation modifies only active states, not values. /// Specifically, active tiles and voxels in this tree are not changed, and /// tiles or voxels that were inactive in this tree but active in the other tree /// are marked as active in this tree but left with their original values. template<typename OtherChildType> void topologyUnion(const RootNode<OtherChildType>& other); /// @brief Intersects this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active only if the corresponding /// value was already active AND if it is active in the other tree. Also, a /// resulting value maps to a voxel if the corresponding value /// already mapped to an active voxel in either of the two grids /// and it maps to an active tile or voxel in the other grid. /// /// @note This operation can delete branches in this grid if they /// overlap with inactive tiles in the other grid. Likewise active /// voxels can be turned into inactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call prune. template<typename OtherChildType> void topologyIntersection(const RootNode<OtherChildType>& other); /// @brief Difference this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this tree and inactive in the other tree. /// /// @note This operation can delete branches in this grid if they /// overlap with active tiles in the other grid. Likewise active /// voxels can be turned into inactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call prune. template<typename OtherChildType> void topologyDifference(const RootNode<OtherChildType>& other); template<typename CombineOp> void combine(RootNode& other, CombineOp&, bool prune = false); template<typename CombineOp, typename OtherRootNode /*= RootNode*/> void combine2(const RootNode& other0, const OtherRootNode& other1, CombineOp& op, bool prune = false); /// @brief Call the templated functor BBoxOp with bounding box /// information for all active tiles and leaf nodes in the tree. /// An additional level argument is provided for each callback. /// /// @note The bounding boxes are guaranteed to be non-overlapping. template<typename BBoxOp> void visitActiveBBox(BBoxOp&) const; template<typename VisitorOp> void visit(VisitorOp&); template<typename VisitorOp> void visit(VisitorOp&) const; template<typename OtherRootNodeType, typename VisitorOp> void visit2(OtherRootNodeType& other, VisitorOp&); template<typename OtherRootNodeType, typename VisitorOp> void visit2(OtherRootNodeType& other, VisitorOp&) const; private: /// During topology-only construction, access is needed /// to protected/private members of other template instances. template<typename> friend class RootNode; template<typename, typename, bool> friend struct RootNodeCopyHelper; template<typename, typename, typename, bool> friend struct RootNodeCombineHelper; /// Currently no-op, but can be used to define empty and delete keys for mTable void initTable() {} //@{ /// @internal Used by doVisit2(). void resetTable(MapType& table) { mTable.swap(table); table.clear(); } void resetTable(const MapType&) const {} //@} #if OPENVDB_ABI_VERSION_NUMBER < 8 Index getChildCount() const; #endif Index getTileCount() const; Index getActiveTileCount() const; Index getInactiveTileCount() const; /// Return a MapType key for the given coordinates. static Coord coordToKey(const Coord& xyz) { return xyz & ~(ChildType::DIM - 1); } /// Insert this node's mTable keys into the given set. void insertKeys(CoordSet&) const; /// Return @c true if this node's mTable contains the given key. bool hasKey(const Coord& key) const { return mTable.find(key) != mTable.end(); } //@{ /// @brief Look up the given key in this node's mTable. /// @return an iterator pointing to the matching mTable entry or to mTable.end(). MapIter findKey(const Coord& key) { return mTable.find(key); } MapCIter findKey(const Coord& key) const { return mTable.find(key); } //@} //@{ /// @brief Convert the given coordinates to a key and look the key up in this node's mTable. /// @return an iterator pointing to the matching mTable entry or to mTable.end(). MapIter findCoord(const Coord& xyz) { return mTable.find(coordToKey(xyz)); } MapCIter findCoord(const Coord& xyz) const { return mTable.find(coordToKey(xyz)); } //@} /// @brief Convert the given coordinates to a key and look the key up in this node's mTable. /// @details If the key is not found, insert a background tile with that key. /// @return an iterator pointing to the matching mTable entry. MapIter findOrAddCoord(const Coord& xyz); /// @brief Verify that the tree rooted at @a other has the same configuration /// (levels, branching factors and node dimensions) as this tree, but allow /// their ValueTypes to differ. /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherChildType> static void enforceSameConfiguration(const RootNode<OtherChildType>& other); /// @brief Verify that @a other has values of a type that can be converted /// to this node's ValueType. /// @details For example, values of type float are compatible with values of type Vec3s, /// because a Vec3s can be constructed from a float. But the reverse is not true. /// @throw TypeError if the other node's ValueType is not convertible into this node's. template<typename OtherChildType> static void enforceCompatibleValueTypes(const RootNode<OtherChildType>& other); template<typename CombineOp, typename OtherRootNode /*= RootNode*/> void doCombine2(const RootNode&, const OtherRootNode&, CombineOp&, bool prune); template<typename RootNodeT, typename VisitorOp, typename ChildAllIterT> static inline void doVisit(RootNodeT&, VisitorOp&); template<typename RootNodeT, typename OtherRootNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2(RootNodeT&, OtherRootNodeT&, VisitorOp&); MapType mTable; ValueType mBackground; }; // end of RootNode class //////////////////////////////////////// /// @brief NodeChain<RootNodeType, RootNodeType::LEVEL>::Type is a openvdb::TypeList /// that lists the types of the nodes of the tree rooted at RootNodeType in reverse order, /// from LeafNode to RootNode. /// @details For example, if RootNodeType is /// @code /// RootNode<InternalNode<InternalNode<LeafNode> > > /// @endcode /// then NodeChain::Type is /// @code /// openvdb::TypeList< /// LeafNode, /// InternalNode<LeafNode>, /// InternalNode<InternalNode<LeafNode> >, /// RootNode<InternalNode<InternalNode<LeafNode> > > > /// @endcode /// /// @note Use the following to get the Nth node type, where N=0 is the LeafNodeType: /// @code /// NodeChainType::Get<N>; /// @endcode template<typename HeadT, int HeadLevel> struct NodeChain { using SubtreeT = typename NodeChain<typename HeadT::ChildNodeType, HeadLevel-1>::Type; using Type = typename SubtreeT::template Append<HeadT>; }; /// Specialization to terminate NodeChain template<typename HeadT> struct NodeChain<HeadT, /*HeadLevel=*/1> { using Type = TypeList<typename HeadT::ChildNodeType, HeadT>; }; //////////////////////////////////////// //@{ /// Helper metafunction used to implement RootNode::SameConfiguration /// (which, as an inner class, can't be independently specialized) template<typename ChildT1, typename NodeT2> struct SameRootConfig { static const bool value = false; }; template<typename ChildT1, typename ChildT2> struct SameRootConfig<ChildT1, RootNode<ChildT2> > { static const bool value = ChildT1::template SameConfiguration<ChildT2>::value; }; //@} //////////////////////////////////////// template<typename ChildT> inline RootNode<ChildT>::RootNode(): mBackground(zeroVal<ValueType>()) { this->initTable(); } template<typename ChildT> inline RootNode<ChildT>::RootNode(const ValueType& background): mBackground(background) { this->initTable(); } template<typename ChildT> template<typename OtherChildType> inline RootNode<ChildT>::RootNode(const RootNode<OtherChildType>& other, const ValueType& backgd, const ValueType& foregd, TopologyCopy): mBackground(backgd) { using OtherRootT = RootNode<OtherChildType>; enforceSameConfiguration(other); const Tile bgTile(backgd, /*active=*/false), fgTile(foregd, true); this->initTable(); for (typename OtherRootT::MapCIter i=other.mTable.begin(), e=other.mTable.end(); i != e; ++i) { mTable[i->first] = OtherRootT::isTile(i) ? NodeStruct(OtherRootT::isTileOn(i) ? fgTile : bgTile) : NodeStruct(*(new ChildT(OtherRootT::getChild(i), backgd, foregd, TopologyCopy()))); } } template<typename ChildT> template<typename OtherChildType> inline RootNode<ChildT>::RootNode(const RootNode<OtherChildType>& other, const ValueType& backgd, TopologyCopy): mBackground(backgd) { using OtherRootT = RootNode<OtherChildType>; enforceSameConfiguration(other); const Tile bgTile(backgd, /*active=*/false), fgTile(backgd, true); this->initTable(); for (typename OtherRootT::MapCIter i=other.mTable.begin(), e=other.mTable.end(); i != e; ++i) { mTable[i->first] = OtherRootT::isTile(i) ? NodeStruct(OtherRootT::isTileOn(i) ? fgTile : bgTile) : NodeStruct(*(new ChildT(OtherRootT::getChild(i), backgd, TopologyCopy()))); } } //////////////////////////////////////// // This helper class is a friend of RootNode and is needed so that assignment // with value conversion can be specialized for compatible and incompatible // pairs of RootNode types. template<typename RootT, typename OtherRootT, bool Compatible = false> struct RootNodeCopyHelper { static inline void copyWithValueConversion(RootT& self, const OtherRootT& other) { // If the two root nodes have different configurations or incompatible ValueTypes, // throw an exception. self.enforceSameConfiguration(other); self.enforceCompatibleValueTypes(other); // One of the above two tests should throw, so we should never get here: std::ostringstream ostr; ostr << "cannot convert a " << typeid(OtherRootT).name() << " to a " << typeid(RootT).name(); OPENVDB_THROW(TypeError, ostr.str()); } }; // Specialization for root nodes of compatible types template<typename RootT, typename OtherRootT> struct RootNodeCopyHelper<RootT, OtherRootT, /*Compatible=*/true> { static inline void copyWithValueConversion(RootT& self, const OtherRootT& other) { using ValueT = typename RootT::ValueType; using ChildT = typename RootT::ChildNodeType; using NodeStruct = typename RootT::NodeStruct; using Tile = typename RootT::Tile; using OtherValueT = typename OtherRootT::ValueType; using OtherMapCIter = typename OtherRootT::MapCIter; using OtherTile = typename OtherRootT::Tile; struct Local { /// @todo Consider using a value conversion functor passed as an argument instead. static inline ValueT convertValue(const OtherValueT& val) { return ValueT(val); } }; self.mBackground = Local::convertValue(other.mBackground); self.clear(); self.initTable(); for (OtherMapCIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { if (other.isTile(i)) { // Copy the other node's tile, but convert its value to this node's ValueType. const OtherTile& otherTile = other.getTile(i); self.mTable[i->first] = NodeStruct( Tile(Local::convertValue(otherTile.value), otherTile.active)); } else { // Copy the other node's child, but convert its values to this node's ValueType. self.mTable[i->first] = NodeStruct(*(new ChildT(other.getChild(i)))); } } } }; // Overload for root nodes of the same type as this node template<typename ChildT> inline RootNode<ChildT>& RootNode<ChildT>::operator=(const RootNode& other) { if (&other != this) { mBackground = other.mBackground; this->clear(); this->initTable(); for (MapCIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { mTable[i->first] = isTile(i) ? NodeStruct(getTile(i)) : NodeStruct(*(new ChildT(getChild(i)))); } } return *this; } // Overload for root nodes of different types template<typename ChildT> template<typename OtherChildType> inline RootNode<ChildT>& RootNode<ChildT>::operator=(const RootNode<OtherChildType>& other) { using OtherRootT = RootNode<OtherChildType>; using OtherValueT = typename OtherRootT::ValueType; static const bool compatible = (SameConfiguration<OtherRootT>::value && CanConvertType</*from=*/OtherValueT, /*to=*/ValueType>::value); RootNodeCopyHelper<RootNode, OtherRootT, compatible>::copyWithValueConversion(*this, other); return *this; } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::setBackground(const ValueType& background, bool updateChildNodes) { if (math::isExactlyEqual(background, mBackground)) return; if (updateChildNodes) { // Traverse the tree, replacing occurrences of mBackground with background // and -mBackground with -background. for (MapIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { ChildT *child = iter->second.child; if (child) { child->resetBackground(/*old=*/mBackground, /*new=*/background); } else { Tile& tile = getTile(iter); if (tile.active) continue;//only change inactive tiles if (math::isApproxEqual(tile.value, mBackground)) { tile.value = background; } else if (math::isApproxEqual(tile.value, math::negative(mBackground))) { tile.value = math::negative(background); } } } } mBackground = background; } template<typename ChildT> inline bool RootNode<ChildT>::isBackgroundTile(const Tile& tile) const { return !tile.active && math::isApproxEqual(tile.value, mBackground); } template<typename ChildT> inline bool RootNode<ChildT>::isBackgroundTile(const MapIter& iter) const { return isTileOff(iter) && math::isApproxEqual(getTile(iter).value, mBackground); } template<typename ChildT> inline bool RootNode<ChildT>::isBackgroundTile(const MapCIter& iter) const { return isTileOff(iter) && math::isApproxEqual(getTile(iter).value, mBackground); } template<typename ChildT> inline size_t RootNode<ChildT>::numBackgroundTiles() const { size_t count = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isBackgroundTile(i)) ++count; } return count; } template<typename ChildT> inline size_t RootNode<ChildT>::eraseBackgroundTiles() { std::set<Coord> keysToErase; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isBackgroundTile(i)) keysToErase.insert(i->first); } for (std::set<Coord>::iterator i = keysToErase.begin(), e = keysToErase.end(); i != e; ++i) { mTable.erase(*i); } return keysToErase.size(); } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::insertKeys(CoordSet& keys) const { for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { keys.insert(i->first); } } template<typename ChildT> inline typename RootNode<ChildT>::MapIter RootNode<ChildT>::findOrAddCoord(const Coord& xyz) { const Coord key = coordToKey(xyz); std::pair<MapIter, bool> result = mTable.insert( typename MapType::value_type(key, NodeStruct(Tile(mBackground, /*active=*/false)))); return result.first; } template<typename ChildT> inline bool RootNode<ChildT>::expand(const Coord& xyz) { const Coord key = coordToKey(xyz); std::pair<MapIter, bool> result = mTable.insert( typename MapType::value_type(key, NodeStruct(Tile(mBackground, /*active=*/false)))); return result.second; // return true if the key did not already exist } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::getNodeLog2Dims(std::vector<Index>& dims) { dims.push_back(0); // magic number; RootNode has no Log2Dim ChildT::getNodeLog2Dims(dims); } template<typename ChildT> inline Coord RootNode<ChildT>::getMinIndex() const { return mTable.empty() ? Coord(0) : mTable.begin()->first; } template<typename ChildT> inline Coord RootNode<ChildT>::getMaxIndex() const { return mTable.empty() ? Coord(0) : mTable.rbegin()->first + Coord(ChildT::DIM - 1); } template<typename ChildT> inline void RootNode<ChildT>::getIndexRange(CoordBBox& bbox) const { bbox.min() = this->getMinIndex(); bbox.max() = this->getMaxIndex(); } //////////////////////////////////////// template<typename ChildT> template<typename OtherChildType> inline bool RootNode<ChildT>::hasSameTopology(const RootNode<OtherChildType>& other) const { using OtherRootT = RootNode<OtherChildType>; using OtherMapT = typename OtherRootT::MapType; using OtherIterT = typename OtherRootT::MapIter; using OtherCIterT = typename OtherRootT::MapCIter; if (!hasSameConfiguration(other)) return false; // Create a local copy of the other node's table. OtherMapT copyOfOtherTable = other.mTable; // For each entry in this node's table... for (MapCIter thisIter = mTable.begin(); thisIter != mTable.end(); ++thisIter) { if (this->isBackgroundTile(thisIter)) continue; // ignore background tiles // Fail if there is no corresponding entry in the other node's table. OtherCIterT otherIter = other.findKey(thisIter->first); if (otherIter == other.mTable.end()) return false; // Fail if this entry is a tile and the other is a child or vice-versa. if (isChild(thisIter)) {//thisIter points to a child if (OtherRootT::isTile(otherIter)) return false; // Fail if both entries are children, but the children have different topology. if (!getChild(thisIter).hasSameTopology(&OtherRootT::getChild(otherIter))) return false; } else {//thisIter points to a tile if (OtherRootT::isChild(otherIter)) return false; if (getTile(thisIter).active != OtherRootT::getTile(otherIter).active) return false; } // Remove tiles and child nodes with matching topology from // the copy of the other node's table. This is required since // the two root tables can include an arbitrary number of // background tiles and still have the same topology! copyOfOtherTable.erase(otherIter->first); } // Fail if the remaining entries in copyOfOtherTable are not all background tiles. for (OtherIterT i = copyOfOtherTable.begin(), e = copyOfOtherTable.end(); i != e; ++i) { if (!other.isBackgroundTile(i)) return false; } return true; } template<typename ChildT> template<typename OtherChildType> inline bool RootNode<ChildT>::hasSameConfiguration(const RootNode<OtherChildType>&) { std::vector<Index> thisDims, otherDims; RootNode::getNodeLog2Dims(thisDims); RootNode<OtherChildType>::getNodeLog2Dims(otherDims); return (thisDims == otherDims); } template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::enforceSameConfiguration(const RootNode<OtherChildType>&) { std::vector<Index> thisDims, otherDims; RootNode::getNodeLog2Dims(thisDims); RootNode<OtherChildType>::getNodeLog2Dims(otherDims); if (thisDims != otherDims) { std::ostringstream ostr; ostr << "grids have incompatible configurations (" << thisDims[0]; for (size_t i = 1, N = thisDims.size(); i < N; ++i) ostr << " x " << thisDims[i]; ostr << " vs. " << otherDims[0]; for (size_t i = 1, N = otherDims.size(); i < N; ++i) ostr << " x " << otherDims[i]; ostr << ")"; OPENVDB_THROW(TypeError, ostr.str()); } } template<typename ChildT> template<typename OtherChildType> inline bool RootNode<ChildT>::hasCompatibleValueType(const RootNode<OtherChildType>&) { using OtherValueType = typename OtherChildType::ValueType; return CanConvertType</*from=*/OtherValueType, /*to=*/ValueType>::value; } template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::enforceCompatibleValueTypes(const RootNode<OtherChildType>&) { using OtherValueType = typename OtherChildType::ValueType; if (!CanConvertType</*from=*/OtherValueType, /*to=*/ValueType>::value) { std::ostringstream ostr; ostr << "values of type " << typeNameAsString<OtherValueType>() << " cannot be converted to type " << typeNameAsString<ValueType>(); OPENVDB_THROW(TypeError, ostr.str()); } } //////////////////////////////////////// template<typename ChildT> inline Index64 RootNode<ChildT>::memUsage() const { Index64 sum = sizeof(*this); for (MapCIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (const ChildT *child = iter->second.child) { sum += child->memUsage(); } } return sum; } template<typename ChildT> inline void RootNode<ChildT>::clear() { for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { delete i->second.child; } mTable.clear(); } template<typename ChildT> inline void RootNode<ChildT>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { for (MapCIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (const ChildT *child = iter->second.child) { child->evalActiveBoundingBox(bbox, visitVoxels); } else if (isTileOn(iter)) { bbox.expand(iter->first, ChildT::DIM); } } } #if OPENVDB_ABI_VERSION_NUMBER < 8 template<typename ChildT> inline Index RootNode<ChildT>::getChildCount() const { return this->childCount(); } #endif template<typename ChildT> inline Index RootNode<ChildT>::getTileCount() const { Index sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isTile(i)) ++sum; } return sum; } template<typename ChildT> inline Index RootNode<ChildT>::getActiveTileCount() const { Index sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isTileOn(i)) ++sum; } return sum; } template<typename ChildT> inline Index RootNode<ChildT>::getInactiveTileCount() const { Index sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isTileOff(i)) ++sum; } return sum; } template<typename ChildT> inline Index32 RootNode<ChildT>::leafCount() const { Index32 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) sum += getChild(i).leafCount(); } return sum; } template<typename ChildT> inline Index32 RootNode<ChildT>::nonLeafCount() const { Index32 sum = 1; if (ChildT::LEVEL != 0) { for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) sum += getChild(i).nonLeafCount(); } } return sum; } template<typename ChildT> inline Index32 RootNode<ChildT>::childCount() const { Index sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) ++sum; } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::onVoxelCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { sum += getChild(i).onVoxelCount(); } else if (isTileOn(i)) { sum += ChildT::NUM_VOXELS; } } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::offVoxelCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { sum += getChild(i).offVoxelCount(); } else if (isTileOff(i) && !this->isBackgroundTile(i)) { sum += ChildT::NUM_VOXELS; } } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::onLeafVoxelCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) sum += getChild(i).onLeafVoxelCount(); } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::offLeafVoxelCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) sum += getChild(i).offLeafVoxelCount(); } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::onTileCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { sum += getChild(i).onTileCount(); } else if (isTileOn(i)) { sum += 1; } } return sum; } template<typename ChildT> inline void RootNode<ChildT>::nodeCount(std::vector<Index32> &vec) const { assert(vec.size() > LEVEL); Index32 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { ++sum; getChild(i).nodeCount(vec); } } vec[LEVEL] = 1;// one root node vec[ChildNodeType::LEVEL] = sum; } //////////////////////////////////////// template<typename ChildT> inline bool RootNode<ChildT>::isValueOn(const Coord& xyz) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTileOff(iter)) return false; return isTileOn(iter) ? true : getChild(iter).isValueOn(xyz); } template<typename ChildT> inline bool RootNode<ChildT>::hasActiveTiles() const { for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i) ? getChild(i).hasActiveTiles() : getTile(i).active) return true; } return false; } template<typename ChildT> template<typename AccessorT> inline bool RootNode<ChildT>::isValueOnAndCache(const Coord& xyz, AccessorT& acc) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTileOff(iter)) return false; if (isTileOn(iter)) return true; acc.insert(xyz, &getChild(iter)); return getChild(iter).isValueOnAndCache(xyz, acc); } template<typename ChildT> inline const typename ChildT::ValueType& RootNode<ChildT>::getValue(const Coord& xyz) const { MapCIter iter = this->findCoord(xyz); return iter == mTable.end() ? mBackground : (isTile(iter) ? getTile(iter).value : getChild(iter).getValue(xyz)); } template<typename ChildT> template<typename AccessorT> inline const typename ChildT::ValueType& RootNode<ChildT>::getValueAndCache(const Coord& xyz, AccessorT& acc) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end()) return mBackground; if (isChild(iter)) { acc.insert(xyz, &getChild(iter)); return getChild(iter).getValueAndCache(xyz, acc); } return getTile(iter).value; } template<typename ChildT> inline int RootNode<ChildT>::getValueDepth(const Coord& xyz) const { MapCIter iter = this->findCoord(xyz); return iter == mTable.end() ? -1 : (isTile(iter) ? 0 : int(LEVEL) - int(getChild(iter).getValueLevel(xyz))); } template<typename ChildT> template<typename AccessorT> inline int RootNode<ChildT>::getValueDepthAndCache(const Coord& xyz, AccessorT& acc) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end()) return -1; if (isTile(iter)) return 0; acc.insert(xyz, &getChild(iter)); return int(LEVEL) - int(getChild(iter).getValueLevelAndCache(xyz, acc)); } template<typename ChildT> inline void RootNode<ChildT>::setValueOff(const Coord& xyz) { MapIter iter = this->findCoord(xyz); if (iter != mTable.end() && !isTileOff(iter)) { if (isTileOn(iter)) { setChild(iter, *new ChildT(xyz, getTile(iter).value, /*active=*/true)); } getChild(iter).setValueOff(xyz); } } template<typename ChildT> inline void RootNode<ChildT>::setActiveState(const Coord& xyz, bool on) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (on) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else { // Nothing to do; (x, y, z) is background and therefore already inactive. } } else if (isChild(iter)) { child = &getChild(iter); } else if (on != getTile(iter).active) { child = new ChildT(xyz, getTile(iter).value, !on); setChild(iter, *child); } if (child) child->setActiveState(xyz, on); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::setActiveStateAndCache(const Coord& xyz, bool on, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (on) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else { // Nothing to do; (x, y, z) is background and therefore already inactive. } } else if (isChild(iter)) { child = &getChild(iter); } else if (on != getTile(iter).active) { child = new ChildT(xyz, getTile(iter).value, !on); setChild(iter, *child); } if (child) { acc.insert(xyz, child); child->setActiveStateAndCache(xyz, on, acc); } } template<typename ChildT> inline void RootNode<ChildT>::setValueOff(const Coord& xyz, const ValueType& value) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (!math::isExactlyEqual(mBackground, value)) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } } else if (isChild(iter)) { child = &getChild(iter); } else if (isTileOn(iter) || !math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) child->setValueOff(xyz, value); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (!math::isExactlyEqual(mBackground, value)) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } } else if (isChild(iter)) { child = &getChild(iter); } else if (isTileOn(iter) || !math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) { acc.insert(xyz, child); child->setValueOffAndCache(xyz, value, acc); } } template<typename ChildT> inline void RootNode<ChildT>::setValueOn(const Coord& xyz, const ValueType& value) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else if (isTileOff(iter) || !math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) child->setValueOn(xyz, value); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::setValueAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else if (isTileOff(iter) || !math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) { acc.insert(xyz, child); child->setValueAndCache(xyz, value, acc); } } template<typename ChildT> inline void RootNode<ChildT>::setValueOnly(const Coord& xyz, const ValueType& value) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else if (!math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) child->setValueOnly(xyz, value); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::setValueOnlyAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else if (!math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) { acc.insert(xyz, child); child->setValueOnlyAndCache(xyz, value, acc); } } template<typename ChildT> template<typename ModifyOp> inline void RootNode<ChildT>::modifyValue(const Coord& xyz, const ModifyOp& op) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { // Need to create a child if the tile is inactive, // in order to activate voxel (x, y, z). bool createChild = isTileOff(iter); if (!createChild) { // Need to create a child if applying the functor // to the tile value produces a different value. const ValueType& tileVal = getTile(iter).value; ValueType modifiedVal = tileVal; op(modifiedVal); createChild = !math::isExactlyEqual(tileVal, modifiedVal); } if (createChild) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } } if (child) child->modifyValue(xyz, op); } template<typename ChildT> template<typename ModifyOp, typename AccessorT> inline void RootNode<ChildT>::modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { // Need to create a child if the tile is inactive, // in order to activate voxel (x, y, z). bool createChild = isTileOff(iter); if (!createChild) { // Need to create a child if applying the functor // to the tile value produces a different value. const ValueType& tileVal = getTile(iter).value; ValueType modifiedVal = tileVal; op(modifiedVal); createChild = !math::isExactlyEqual(tileVal, modifiedVal); } if (createChild) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } } if (child) { acc.insert(xyz, child); child->modifyValueAndCache(xyz, op, acc); } } template<typename ChildT> template<typename ModifyOp> inline void RootNode<ChildT>::modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { const Tile& tile = getTile(iter); bool modifiedState = tile.active; ValueType modifiedVal = tile.value; op(modifiedVal, modifiedState); // Need to create a child if applying the functor to the tile // produces a different value or active state. if (modifiedState != tile.active || !math::isExactlyEqual(modifiedVal, tile.value)) { child = new ChildT(xyz, tile.value, tile.active); setChild(iter, *child); } } if (child) child->modifyValueAndActiveState(xyz, op); } template<typename ChildT> template<typename ModifyOp, typename AccessorT> inline void RootNode<ChildT>::modifyValueAndActiveStateAndCache( const Coord& xyz, const ModifyOp& op, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { const Tile& tile = getTile(iter); bool modifiedState = tile.active; ValueType modifiedVal = tile.value; op(modifiedVal, modifiedState); // Need to create a child if applying the functor to the tile // produces a different value or active state. if (modifiedState != tile.active || !math::isExactlyEqual(modifiedVal, tile.value)) { child = new ChildT(xyz, tile.value, tile.active); setChild(iter, *child); } } if (child) { acc.insert(xyz, child); child->modifyValueAndActiveStateAndCache(xyz, op, acc); } } template<typename ChildT> inline bool RootNode<ChildT>::probeValue(const Coord& xyz, ValueType& value) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end()) { value = mBackground; return false; } else if (isChild(iter)) { return getChild(iter).probeValue(xyz, value); } value = getTile(iter).value; return isTileOn(iter); } template<typename ChildT> template<typename AccessorT> inline bool RootNode<ChildT>::probeValueAndCache(const Coord& xyz, ValueType& value, AccessorT& acc) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end()) { value = mBackground; return false; } else if (isChild(iter)) { acc.insert(xyz, &getChild(iter)); return getChild(iter).probeValueAndCache(xyz, value, acc); } value = getTile(iter).value; return isTileOn(iter); } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::fill(const CoordBBox& bbox, const ValueType& value, bool active) { if (bbox.empty()) return; // Iterate over the fill region in axis-aligned, tile-sized chunks. // (The first and last chunks along each axis might be smaller than a tile.) Coord xyz, tileMax; for (int x = bbox.min().x(); x <= bbox.max().x(); x = tileMax.x() + 1) { xyz.setX(x); for (int y = bbox.min().y(); y <= bbox.max().y(); y = tileMax.y() + 1) { xyz.setY(y); for (int z = bbox.min().z(); z <= bbox.max().z(); z = tileMax.z() + 1) { xyz.setZ(z); // Get the bounds of the tile that contains voxel (x, y, z). Coord tileMin = coordToKey(xyz); tileMax = tileMin.offsetBy(ChildT::DIM - 1); if (xyz != tileMin || Coord::lessThan(bbox.max(), tileMax)) { // If the box defined by (xyz, bbox.max()) doesn't completely enclose // the tile to which xyz belongs, create a child node (or retrieve // the existing one). ChildT* child = nullptr; MapIter iter = this->findKey(tileMin); if (iter == mTable.end()) { // No child or tile exists. Create a child and initialize it // with the background value. child = new ChildT(xyz, mBackground); mTable[tileMin] = NodeStruct(*child); } else if (isTile(iter)) { // Replace the tile with a newly-created child that is filled // with the tile's value and active state. const Tile& tile = getTile(iter); child = new ChildT(xyz, tile.value, tile.active); mTable[tileMin] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } // Forward the fill request to the child. if (child) { const Coord tmp = Coord::minComponent(bbox.max(), tileMax); child->fill(CoordBBox(xyz, tmp), value, active); } } else { // If the box given by (xyz, bbox.max()) completely encloses // the tile to which xyz belongs, create the tile (if it // doesn't already exist) and give it the fill value. MapIter iter = this->findOrAddCoord(tileMin); setTile(iter, Tile(value, active)); } } } } } template<typename ChildT> inline void RootNode<ChildT>::denseFill(const CoordBBox& bbox, const ValueType& value, bool active) { if (bbox.empty()) return; if (active && mTable.empty()) { // If this tree is empty, then a sparse fill followed by (threaded) // densification of active tiles is the more efficient approach. sparseFill(bbox, value, active); voxelizeActiveTiles(/*threaded=*/true); return; } // Iterate over the fill region in axis-aligned, tile-sized chunks. // (The first and last chunks along each axis might be smaller than a tile.) Coord xyz, tileMin, tileMax; for (int x = bbox.min().x(); x <= bbox.max().x(); x = tileMax.x() + 1) { xyz.setX(x); for (int y = bbox.min().y(); y <= bbox.max().y(); y = tileMax.y() + 1) { xyz.setY(y); for (int z = bbox.min().z(); z <= bbox.max().z(); z = tileMax.z() + 1) { xyz.setZ(z); // Get the bounds of the tile that contains voxel (x, y, z). tileMin = coordToKey(xyz); tileMax = tileMin.offsetBy(ChildT::DIM - 1); // Retrieve the table entry for the tile that contains xyz, // or, if there is no table entry, add a background tile. const auto iter = findOrAddCoord(tileMin); if (isTile(iter)) { // If the table entry is a tile, replace it with a child node // that is filled with the tile's value and active state. const auto& tile = getTile(iter); auto* child = new ChildT{tileMin, tile.value, tile.active}; setChild(iter, *child); } // Forward the fill request to the child. getChild(iter).denseFill(bbox, value, active); } } } } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::voxelizeActiveTiles(bool threaded) { // There is little point in threading over the root table since each tile // spans a huge index space (by default 4096^3) and hence we expect few // active tiles if any at all. In fact, you're very likely to run out of // memory if this method is called on a tree with root-level active tiles! for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isTileOff(i)) continue; ChildT* child = i->second.child; if (child == nullptr) { // If this table entry is an active tile (i.e., not off and not a child node), // replace it with a child node filled with active tiles of the same value. child = new ChildT{i->first, this->getTile(i).value, true}; i->second.child = child; } child->voxelizeActiveTiles(threaded); } } //////////////////////////////////////// template<typename ChildT> template<typename DenseT> inline void RootNode<ChildT>::copyToDense(const CoordBBox& bbox, DenseT& dense) const { using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); CoordBBox nodeBBox; for (Coord xyz = bbox.min(); xyz[0] <= bbox.max()[0]; xyz[0] = nodeBBox.max()[0] + 1) { for (xyz[1] = bbox.min()[1]; xyz[1] <= bbox.max()[1]; xyz[1] = nodeBBox.max()[1] + 1) { for (xyz[2] = bbox.min()[2]; xyz[2] <= bbox.max()[2]; xyz[2] = nodeBBox.max()[2] + 1) { // Get the coordinate bbox of the child node that contains voxel xyz. nodeBBox = CoordBBox::createCube(coordToKey(xyz), ChildT::DIM); // Get the coordinate bbox of the interection of inBBox and nodeBBox CoordBBox sub(xyz, Coord::minComponent(bbox.max(), nodeBBox.max())); MapCIter iter = this->findKey(nodeBBox.min()); if (iter != mTable.end() && isChild(iter)) {//is a child getChild(iter).copyToDense(sub, dense); } else {//is background or a tile value const ValueType value = iter==mTable.end() ? mBackground : getTile(iter).value; sub.translate(-min); DenseValueType* a0 = dense.data() + zStride*sub.min()[2]; for (Int32 x=sub.min()[0], ex=sub.max()[0]+1; x<ex; ++x) { DenseValueType* a1 = a0 + x*xStride; for (Int32 y=sub.min()[1], ey=sub.max()[1]+1; y<ey; ++y) { DenseValueType* a2 = a1 + y*yStride; for (Int32 z=sub.min()[2], ez=sub.max()[2]+1; z<ez; ++z, a2 += zStride) { *a2 = DenseValueType(value); } } } } } } } } //////////////////////////////////////// template<typename ChildT> inline bool RootNode<ChildT>::writeTopology(std::ostream& os, bool toHalf) const { if (!toHalf) { os.write(reinterpret_cast<const char*>(&mBackground), sizeof(ValueType)); } else { ValueType truncatedVal = io::truncateRealToHalf(mBackground); os.write(reinterpret_cast<const char*>(&truncatedVal), sizeof(ValueType)); } io::setGridBackgroundValuePtr(os, &mBackground); const Index numTiles = this->getTileCount(), numChildren = this->childCount(); os.write(reinterpret_cast<const char*>(&numTiles), sizeof(Index)); os.write(reinterpret_cast<const char*>(&numChildren), sizeof(Index)); if (numTiles == 0 && numChildren == 0) return false; // Write tiles. for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) continue; os.write(reinterpret_cast<const char*>(i->first.asPointer()), 3 * sizeof(Int32)); os.write(reinterpret_cast<const char*>(&getTile(i).value), sizeof(ValueType)); os.write(reinterpret_cast<const char*>(&getTile(i).active), sizeof(bool)); } // Write child nodes. for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isTile(i)) continue; os.write(reinterpret_cast<const char*>(i->first.asPointer()), 3 * sizeof(Int32)); getChild(i).writeTopology(os, toHalf); } return true; // not empty } template<typename ChildT> inline bool RootNode<ChildT>::readTopology(std::istream& is, bool fromHalf) { // Delete the existing tree. this->clear(); if (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_ROOTNODE_MAP) { // Read and convert an older-format RootNode. // For backward compatibility with older file formats, read both // outside and inside background values. is.read(reinterpret_cast<char*>(&mBackground), sizeof(ValueType)); ValueType inside; is.read(reinterpret_cast<char*>(&inside), sizeof(ValueType)); io::setGridBackgroundValuePtr(is, &mBackground); // Read the index range. Coord rangeMin, rangeMax; is.read(reinterpret_cast<char*>(rangeMin.asPointer()), 3 * sizeof(Int32)); is.read(reinterpret_cast<char*>(rangeMax.asPointer()), 3 * sizeof(Int32)); this->initTable(); Index tableSize = 0, log2Dim[4] = { 0, 0, 0, 0 }; Int32 offset[3]; for (int i = 0; i < 3; ++i) { offset[i] = rangeMin[i] >> ChildT::TOTAL; rangeMin[i] = offset[i] << ChildT::TOTAL; log2Dim[i] = 1 + util::FindHighestOn((rangeMax[i] >> ChildT::TOTAL) - offset[i]); tableSize += log2Dim[i]; rangeMax[i] = (((1 << log2Dim[i]) + offset[i]) << ChildT::TOTAL) - 1; } log2Dim[3] = log2Dim[1] + log2Dim[2]; tableSize = 1U << tableSize; // Read masks. util::RootNodeMask childMask(tableSize), valueMask(tableSize); childMask.load(is); valueMask.load(is); // Read child nodes/values. for (Index i = 0; i < tableSize; ++i) { // Compute origin = offset2coord(i). Index n = i; Coord origin; origin[0] = (n >> log2Dim[3]) + offset[0]; n &= (1U << log2Dim[3]) - 1; origin[1] = (n >> log2Dim[2]) + offset[1]; origin[2] = (n & ((1U << log2Dim[2]) - 1)) + offset[1]; origin <<= ChildT::TOTAL; if (childMask.isOn(i)) { // Read in and insert a child node. ChildT* child = new ChildT(PartialCreate(), origin, mBackground); child->readTopology(is); mTable[origin] = NodeStruct(*child); } else { // Read in a tile value and insert a tile, but only if the value // is either active or non-background. ValueType value; is.read(reinterpret_cast<char*>(&value), sizeof(ValueType)); if (valueMask.isOn(i) || (!math::isApproxEqual(value, mBackground))) { mTable[origin] = NodeStruct(Tile(value, valueMask.isOn(i))); } } } return true; } // Read a RootNode that was stored in the current format. is.read(reinterpret_cast<char*>(&mBackground), sizeof(ValueType)); io::setGridBackgroundValuePtr(is, &mBackground); Index numTiles = 0, numChildren = 0; is.read(reinterpret_cast<char*>(&numTiles), sizeof(Index)); is.read(reinterpret_cast<char*>(&numChildren), sizeof(Index)); if (numTiles == 0 && numChildren == 0) return false; Int32 vec[3]; ValueType value; bool active; // Read tiles. for (Index n = 0; n < numTiles; ++n) { is.read(reinterpret_cast<char*>(vec), 3 * sizeof(Int32)); is.read(reinterpret_cast<char*>(&value), sizeof(ValueType)); is.read(reinterpret_cast<char*>(&active), sizeof(bool)); mTable[Coord(vec)] = NodeStruct(Tile(value, active)); } // Read child nodes. for (Index n = 0; n < numChildren; ++n) { is.read(reinterpret_cast<char*>(vec), 3 * sizeof(Int32)); Coord origin(vec); ChildT* child = new ChildT(PartialCreate(), origin, mBackground); child->readTopology(is, fromHalf); mTable[Coord(vec)] = NodeStruct(*child); } return true; // not empty } template<typename ChildT> inline void RootNode<ChildT>::writeBuffers(std::ostream& os, bool toHalf) const { for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) getChild(i).writeBuffers(os, toHalf); } } template<typename ChildT> inline void RootNode<ChildT>::readBuffers(std::istream& is, bool fromHalf) { for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) getChild(i).readBuffers(is, fromHalf); } } template<typename ChildT> inline void RootNode<ChildT>::readBuffers(std::istream& is, const CoordBBox& clipBBox, bool fromHalf) { const Tile bgTile(mBackground, /*active=*/false); for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { // Stream in and clip the branch rooted at this child. // (We can't skip over children that lie outside the clipping region, // because buffers are serialized in depth-first order and need to be // unserialized in the same order.) ChildT& child = getChild(i); child.readBuffers(is, clipBBox, fromHalf); } } // Clip root-level tiles and prune children that were clipped. this->clip(clipBBox); } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::clip(const CoordBBox& clipBBox) { const Tile bgTile(mBackground, /*active=*/false); // Iterate over a copy of this node's table so that we can modify the original. // (Copying the table copies child node pointers, not the nodes themselves.) MapType copyOfTable(mTable); for (MapIter i = copyOfTable.begin(), e = copyOfTable.end(); i != e; ++i) { const Coord& xyz = i->first; // tile or child origin CoordBBox tileBBox(xyz, xyz.offsetBy(ChildT::DIM - 1)); // tile or child bounds if (!clipBBox.hasOverlap(tileBBox)) { // This table entry lies completely outside the clipping region. Delete it. setTile(this->findCoord(xyz), bgTile); // delete any existing child node first mTable.erase(xyz); } else if (!clipBBox.isInside(tileBBox)) { // This table entry does not lie completely inside the clipping region // and must be clipped. if (isChild(i)) { getChild(i).clip(clipBBox, mBackground); } else { // Replace this tile with a background tile, then fill the clip region // with the tile's original value. (This might create a child branch.) tileBBox.intersect(clipBBox); const Tile& origTile = getTile(i); setTile(this->findCoord(xyz), bgTile); this->sparseFill(tileBBox, origTile.value, origTile.active); } } else { // This table entry lies completely inside the clipping region. Leave it intact. } } this->prune(); // also erases root-level background tiles } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::prune(const ValueType& tolerance) { bool state = false; ValueType value = zeroVal<ValueType>(); for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isTile(i)) continue; this->getChild(i).prune(tolerance); if (this->getChild(i).isConstant(value, state, tolerance)) { this->setTile(i, Tile(value, state)); } } this->eraseBackgroundTiles(); } //////////////////////////////////////// template<typename ChildT> template<typename NodeT> inline NodeT* RootNode<ChildT>::stealNode(const Coord& xyz, const ValueType& value, bool state) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(&stealChild(iter, Tile(value, state))) : getChild(iter).template stealNode<NodeT>(xyz, value, state); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::addLeaf(LeafNodeType* leaf) { if (leaf == nullptr) return; ChildT* child = nullptr; const Coord& xyz = leaf->origin(); MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (ChildT::LEVEL>0) { child = new ChildT(xyz, mBackground, false); } else { child = reinterpret_cast<ChildT*>(leaf); } mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { if (ChildT::LEVEL>0) { child = &getChild(iter); } else { child = reinterpret_cast<ChildT*>(leaf); setChild(iter, *child);//this also deletes the existing child node } } else {//tile if (ChildT::LEVEL>0) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); } else { child = reinterpret_cast<ChildT*>(leaf); } setChild(iter, *child); } child->addLeaf(leaf); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::addLeafAndCache(LeafNodeType* leaf, AccessorT& acc) { if (leaf == nullptr) return; ChildT* child = nullptr; const Coord& xyz = leaf->origin(); MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (ChildT::LEVEL>0) { child = new ChildT(xyz, mBackground, false); } else { child = reinterpret_cast<ChildT*>(leaf); } mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { if (ChildT::LEVEL>0) { child = &getChild(iter); } else { child = reinterpret_cast<ChildT*>(leaf); setChild(iter, *child);//this also deletes the existing child node } } else {//tile if (ChildT::LEVEL>0) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); } else { child = reinterpret_cast<ChildT*>(leaf); } setChild(iter, *child); } acc.insert(xyz, child); child->addLeafAndCache(leaf, acc); } template<typename ChildT> inline bool RootNode<ChildT>::addChild(ChildT* child) { if (!child) return false; const Coord& xyz = child->origin(); MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) {//background mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else {//child or tile setChild(iter, *child);//this also deletes the existing child node } return true; } template<typename ChildT> inline void RootNode<ChildT>::addTile(const Coord& xyz, const ValueType& value, bool state) { MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) {//background mTable[this->coordToKey(xyz)] = NodeStruct(Tile(value, state)); } else {//child or tile setTile(iter, Tile(value, state));//this also deletes the existing child node } } template<typename ChildT> inline void RootNode<ChildT>::addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { if (LEVEL >= level) { MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) {//background if (LEVEL > level) { ChildT* child = new ChildT(xyz, mBackground, false); mTable[this->coordToKey(xyz)] = NodeStruct(*child); child->addTile(level, xyz, value, state); } else { mTable[this->coordToKey(xyz)] = NodeStruct(Tile(value, state)); } } else if (isChild(iter)) {//child if (LEVEL > level) { getChild(iter).addTile(level, xyz, value, state); } else { setTile(iter, Tile(value, state));//this also deletes the existing child node } } else {//tile if (LEVEL > level) { ChildT* child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); child->addTile(level, xyz, value, state); } else { setTile(iter, Tile(value, state)); } } } } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::addTileAndCache(Index level, const Coord& xyz, const ValueType& value, bool state, AccessorT& acc) { if (LEVEL >= level) { MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) {//background if (LEVEL > level) { ChildT* child = new ChildT(xyz, mBackground, false); acc.insert(xyz, child); mTable[this->coordToKey(xyz)] = NodeStruct(*child); child->addTileAndCache(level, xyz, value, state, acc); } else { mTable[this->coordToKey(xyz)] = NodeStruct(Tile(value, state)); } } else if (isChild(iter)) {//child if (LEVEL > level) { ChildT* child = &getChild(iter); acc.insert(xyz, child); child->addTileAndCache(level, xyz, value, state, acc); } else { setTile(iter, Tile(value, state));//this also deletes the existing child node } } else {//tile if (LEVEL > level) { ChildT* child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); acc.insert(xyz, child); setChild(iter, *child); child->addTileAndCache(level, xyz, value, state, acc); } else { setTile(iter, Tile(value, state)); } } } } //////////////////////////////////////// template<typename ChildT> inline typename ChildT::LeafNodeType* RootNode<ChildT>::touchLeaf(const Coord& xyz) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground, false); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } return child->touchLeaf(xyz); } template<typename ChildT> template<typename AccessorT> inline typename ChildT::LeafNodeType* RootNode<ChildT>::touchLeafAndCache(const Coord& xyz, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground, false); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } acc.insert(xyz, child); return child->touchLeafAndCache(xyz, acc); } //////////////////////////////////////// template<typename ChildT> template<typename NodeT> inline NodeT* RootNode<ChildT>::probeNode(const Coord& xyz) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; ChildT* child = &getChild(iter); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template probeNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT> template<typename NodeT> inline const NodeT* RootNode<ChildT>::probeConstNode(const Coord& xyz) const { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapCIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; const ChildT* child = &getChild(iter); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<const NodeT*>(child) : child->template probeConstNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT> inline typename ChildT::LeafNodeType* RootNode<ChildT>::probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeType>(xyz); } template<typename ChildT> inline const typename ChildT::LeafNodeType* RootNode<ChildT>::probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeType>(xyz); } template<typename ChildT> template<typename AccessorT> inline typename ChildT::LeafNodeType* RootNode<ChildT>::probeLeafAndCache(const Coord& xyz, AccessorT& acc) { return this->template probeNodeAndCache<LeafNodeType>(xyz, acc); } template<typename ChildT> template<typename AccessorT> inline const typename ChildT::LeafNodeType* RootNode<ChildT>::probeConstLeafAndCache(const Coord& xyz, AccessorT& acc) const { return this->template probeConstNodeAndCache<LeafNodeType>(xyz, acc); } template<typename ChildT> template<typename AccessorT> inline const typename ChildT::LeafNodeType* RootNode<ChildT>::probeLeafAndCache(const Coord& xyz, AccessorT& acc) const { return this->probeConstLeafAndCache(xyz, acc); } template<typename ChildT> template<typename NodeT, typename AccessorT> inline NodeT* RootNode<ChildT>::probeNodeAndCache(const Coord& xyz, AccessorT& acc) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; ChildT* child = &getChild(iter); acc.insert(xyz, child); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template probeNodeAndCache<NodeT>(xyz, acc); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT> template<typename NodeT,typename AccessorT> inline const NodeT* RootNode<ChildT>::probeConstNodeAndCache(const Coord& xyz, AccessorT& acc) const { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapCIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; const ChildT* child = &getChild(iter); acc.insert(xyz, child); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<const NodeT*>(child) : child->template probeConstNodeAndCache<NodeT>(xyz, acc); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT> template<typename ArrayT> inline void RootNode<ChildT>::getNodes(ArrayT& array) { using NodePtr = typename ArrayT::value_type; static_assert(std::is_pointer<NodePtr>::value, "argument to getNodes() must be a pointer array"); using NodeType = typename std::remove_pointer<NodePtr>::type; using NonConstNodeType = typename std::remove_const<NodeType>::type; static_assert(NodeChainType::template Contains<NonConstNodeType>, "can't extract non-const nodes from a const tree"); using ArrayChildT = typename std::conditional< std::is_const<NodeType>::value, const ChildT, ChildT>::type; for (MapIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (ChildT* child = iter->second.child) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<NodePtr, ArrayChildT*>::value) { array.push_back(reinterpret_cast<NodePtr>(iter->second.child)); } else { child->getNodes(array);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } } template<typename ChildT> template<typename ArrayT> inline void RootNode<ChildT>::getNodes(ArrayT& array) const { using NodePtr = typename ArrayT::value_type; static_assert(std::is_pointer<NodePtr>::value, "argument to getNodes() must be a pointer array"); using NodeType = typename std::remove_pointer<NodePtr>::type; static_assert(std::is_const<NodeType>::value, "argument to getNodes() must be an array of const node pointers"); using NonConstNodeType = typename std::remove_const<NodeType>::type; static_assert(NodeChainType::template Contains<NonConstNodeType>, "can't extract non-const nodes from a const tree"); for (MapCIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (const ChildNodeType *child = iter->second.child) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<NodePtr, const ChildT*>::value) { array.push_back(reinterpret_cast<NodePtr>(iter->second.child)); } else { child->getNodes(array);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } } //////////////////////////////////////// template<typename ChildT> template<typename ArrayT> inline void RootNode<ChildT>::stealNodes(ArrayT& array, const ValueType& value, bool state) { using NodePtr = typename ArrayT::value_type; static_assert(std::is_pointer<NodePtr>::value, "argument to stealNodes() must be a pointer array"); using NodeType = typename std::remove_pointer<NodePtr>::type; using NonConstNodeType = typename std::remove_const<NodeType>::type; static_assert(NodeChainType::template Contains<NonConstNodeType>, "can't extract non-const nodes from a const tree"); using ArrayChildT = typename std::conditional< std::is_const<NodeType>::value, const ChildT, ChildT>::type; for (MapIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (ChildT* child = iter->second.child) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<NodePtr, ArrayChildT*>::value) { array.push_back(reinterpret_cast<NodePtr>(&stealChild(iter, Tile(value, state)))); } else { child->stealNodes(array, value, state);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } } //////////////////////////////////////// template<typename ChildT> template<MergePolicy Policy> inline void RootNode<ChildT>::merge(RootNode& other) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN switch (Policy) { default: case MERGE_ACTIVE_STATES: for (MapIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end()) { // insert other node's child ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); mTable[i->first] = NodeStruct(child); } else if (isTile(j)) { if (isTileOff(j)) { // replace inactive tile with other node's child ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); setChild(j, child); } } else { // merge both child nodes getChild(j).template merge<MERGE_ACTIVE_STATES>(getChild(i), other.mBackground, mBackground); } } else if (other.isTileOn(i)) { if (j == mTable.end()) { // insert other node's active tile mTable[i->first] = i->second; } else if (!isTileOn(j)) { // Replace anything except an active tile with the other node's active tile. setTile(j, Tile(other.getTile(i).value, true)); } } } break; case MERGE_NODES: for (MapIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end()) { // insert other node's child ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); mTable[i->first] = NodeStruct(child); } else if (isTile(j)) { // replace tile with other node's child ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); setChild(j, child); } else { // merge both child nodes getChild(j).template merge<MERGE_NODES>( getChild(i), other.mBackground, mBackground); } } } break; case MERGE_ACTIVE_STATES_AND_NODES: for (MapIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end()) { // Steal and insert the other node's child. ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); mTable[i->first] = NodeStruct(child); } else if (isTile(j)) { // Replace this node's tile with the other node's child. ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); const Tile tile = getTile(j); setChild(j, child); if (tile.active) { // Merge the other node's child with this node's active tile. child.template merge<MERGE_ACTIVE_STATES_AND_NODES>( tile.value, tile.active); } } else /*if (isChild(j))*/ { // Merge the other node's child into this node's child. getChild(j).template merge<MERGE_ACTIVE_STATES_AND_NODES>(getChild(i), other.mBackground, mBackground); } } else if (other.isTileOn(i)) { if (j == mTable.end()) { // Insert a copy of the other node's active tile. mTable[i->first] = i->second; } else if (isTileOff(j)) { // Replace this node's inactive tile with a copy of the other's active tile. setTile(j, Tile(other.getTile(i).value, true)); } else if (isChild(j)) { // Merge the other node's active tile into this node's child. const Tile& tile = getTile(i); getChild(j).template merge<MERGE_ACTIVE_STATES_AND_NODES>( tile.value, tile.active); } } // else if (other.isTileOff(i)) {} // ignore the other node's inactive tiles } break; } // Empty the other tree so as not to leave it in a partially cannibalized state. other.clear(); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::topologyUnion(const RootNode<OtherChildType>& other) { using OtherRootT = RootNode<OtherChildType>; using OtherCIterT = typename OtherRootT::MapCIter; enforceSameConfiguration(other); for (OtherCIterT i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end()) { // create child branch with identical topology mTable[i->first] = NodeStruct( *(new ChildT(other.getChild(i), mBackground, TopologyCopy()))); } else if (this->isChild(j)) { // union with child branch this->getChild(j).topologyUnion(other.getChild(i)); } else {// this is a tile so replace it with a child branch with identical topology ChildT* child = new ChildT( other.getChild(i), this->getTile(j).value, TopologyCopy()); if (this->isTileOn(j)) child->setValuesOn();//this is an active tile this->setChild(j, *child); } } else if (other.isTileOn(i)) { // other is an active tile if (j == mTable.end()) { // insert an active tile mTable[i->first] = NodeStruct(Tile(mBackground, true)); } else if (this->isChild(j)) { this->getChild(j).setValuesOn(); } else if (this->isTileOff(j)) { this->setTile(j, Tile(this->getTile(j).value, true)); } } } } template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::topologyIntersection(const RootNode<OtherChildType>& other) { using OtherRootT = RootNode<OtherChildType>; using OtherCIterT = typename OtherRootT::MapCIter; enforceSameConfiguration(other); std::set<Coord> tmp;//keys to erase for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { OtherCIterT j = other.mTable.find(i->first); if (this->isChild(i)) { if (j == other.mTable.end() || other.isTileOff(j)) { tmp.insert(i->first);//delete child branch } else if (other.isChild(j)) { // intersect with child branch this->getChild(i).topologyIntersection(other.getChild(j), mBackground); } } else if (this->isTileOn(i)) { if (j == other.mTable.end() || other.isTileOff(j)) { this->setTile(i, Tile(this->getTile(i).value, false));//turn inactive } else if (other.isChild(j)) { //replace with a child branch with identical topology ChildT* child = new ChildT(other.getChild(j), this->getTile(i).value, TopologyCopy()); this->setChild(i, *child); } } } for (std::set<Coord>::iterator i = tmp.begin(), e = tmp.end(); i != e; ++i) { MapIter it = this->findCoord(*i); setTile(it, Tile()); // delete any existing child node first mTable.erase(it); } } template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::topologyDifference(const RootNode<OtherChildType>& other) { using OtherRootT = RootNode<OtherChildType>; using OtherCIterT = typename OtherRootT::MapCIter; enforceSameConfiguration(other); for (OtherCIterT i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end() || this->isTileOff(j)) { //do nothing } else if (this->isChild(j)) { // difference with child branch this->getChild(j).topologyDifference(other.getChild(i), mBackground); } else if (this->isTileOn(j)) { // this is an active tile so create a child node and descent ChildT* child = new ChildT(j->first, this->getTile(j).value, true); child->topologyDifference(other.getChild(i), mBackground); this->setChild(j, *child); } } else if (other.isTileOn(i)) { // other is an active tile if (j == mTable.end() || this->isTileOff(j)) { // do nothing } else if (this->isChild(j)) { setTile(j, Tile()); // delete any existing child node first mTable.erase(j); } else if (this->isTileOn(j)) { this->setTile(j, Tile(this->getTile(j).value, false)); } } } } //////////////////////////////////////// template<typename ChildT> template<typename CombineOp> inline void RootNode<ChildT>::combine(RootNode& other, CombineOp& op, bool prune) { CombineArgs<ValueType> args; CoordSet keys; this->insertKeys(keys); other.insertKeys(keys); for (CoordSetCIter i = keys.begin(), e = keys.end(); i != e; ++i) { MapIter iter = findOrAddCoord(*i), otherIter = other.findOrAddCoord(*i); if (isTile(iter) && isTile(otherIter)) { // Both this node and the other node have constant values (tiles). // Combine the two values and store the result as this node's new tile value. op(args.setARef(getTile(iter).value) .setAIsActive(isTileOn(iter)) .setBRef(getTile(otherIter).value) .setBIsActive(isTileOn(otherIter))); setTile(iter, Tile(args.result(), args.resultIsActive())); } else if (isChild(iter) && isTile(otherIter)) { // Combine this node's child with the other node's constant value. ChildT& child = getChild(iter); child.combine(getTile(otherIter).value, isTileOn(otherIter), op); } else if (isTile(iter) && isChild(otherIter)) { // Combine this node's constant value with the other node's child, // but use a new functor in which the A and B values are swapped, // since the constant value is the A value, not the B value. SwappedCombineOp<ValueType, CombineOp> swappedOp(op); ChildT& child = getChild(otherIter); child.combine(getTile(iter).value, isTileOn(iter), swappedOp); // Steal the other node's child. setChild(iter, stealChild(otherIter, Tile())); } else /*if (isChild(iter) && isChild(otherIter))*/ { // Combine this node's child with the other node's child. ChildT &child = getChild(iter), &otherChild = getChild(otherIter); child.combine(otherChild, op); } if (prune && isChild(iter)) getChild(iter).prune(); } // Combine background values. op(args.setARef(mBackground).setBRef(other.mBackground)); mBackground = args.result(); // Empty the other tree so as not to leave it in a partially cannibalized state. other.clear(); } //////////////////////////////////////// // This helper class is a friend of RootNode and is needed so that combine2 // can be specialized for compatible and incompatible pairs of RootNode types. template<typename CombineOp, typename RootT, typename OtherRootT, bool Compatible = false> struct RootNodeCombineHelper { static inline void combine2(RootT& self, const RootT&, const OtherRootT& other1, CombineOp&, bool) { // If the two root nodes have different configurations or incompatible ValueTypes, // throw an exception. self.enforceSameConfiguration(other1); self.enforceCompatibleValueTypes(other1); // One of the above two tests should throw, so we should never get here: std::ostringstream ostr; ostr << "cannot combine a " << typeid(OtherRootT).name() << " into a " << typeid(RootT).name(); OPENVDB_THROW(TypeError, ostr.str()); } }; // Specialization for root nodes of compatible types template<typename CombineOp, typename RootT, typename OtherRootT> struct RootNodeCombineHelper<CombineOp, RootT, OtherRootT, /*Compatible=*/true> { static inline void combine2(RootT& self, const RootT& other0, const OtherRootT& other1, CombineOp& op, bool prune) { self.doCombine2(other0, other1, op, prune); } }; template<typename ChildT> template<typename CombineOp, typename OtherRootNode> inline void RootNode<ChildT>::combine2(const RootNode& other0, const OtherRootNode& other1, CombineOp& op, bool prune) { using OtherValueType = typename OtherRootNode::ValueType; static const bool compatible = (SameConfiguration<OtherRootNode>::value && CanConvertType</*from=*/OtherValueType, /*to=*/ValueType>::value); RootNodeCombineHelper<CombineOp, RootNode, OtherRootNode, compatible>::combine2( *this, other0, other1, op, prune); } template<typename ChildT> template<typename CombineOp, typename OtherRootNode> inline void RootNode<ChildT>::doCombine2(const RootNode& other0, const OtherRootNode& other1, CombineOp& op, bool prune) { enforceSameConfiguration(other1); using OtherValueT = typename OtherRootNode::ValueType; using OtherTileT = typename OtherRootNode::Tile; using OtherNodeStructT = typename OtherRootNode::NodeStruct; using OtherMapCIterT = typename OtherRootNode::MapCIter; CombineArgs<ValueType, OtherValueT> args; CoordSet keys; other0.insertKeys(keys); other1.insertKeys(keys); const NodeStruct bg0(Tile(other0.mBackground, /*active=*/false)); const OtherNodeStructT bg1(OtherTileT(other1.mBackground, /*active=*/false)); for (CoordSetCIter i = keys.begin(), e = keys.end(); i != e; ++i) { MapIter thisIter = this->findOrAddCoord(*i); MapCIter iter0 = other0.findKey(*i); OtherMapCIterT iter1 = other1.findKey(*i); const NodeStruct& ns0 = (iter0 != other0.mTable.end()) ? iter0->second : bg0; const OtherNodeStructT& ns1 = (iter1 != other1.mTable.end()) ? iter1->second : bg1; if (ns0.isTile() && ns1.isTile()) { // Both input nodes have constant values (tiles). // Combine the two values and add a new tile to this node with the result. op(args.setARef(ns0.tile.value) .setAIsActive(ns0.isTileOn()) .setBRef(ns1.tile.value) .setBIsActive(ns1.isTileOn())); setTile(thisIter, Tile(args.result(), args.resultIsActive())); } else { if (!isChild(thisIter)) { // Add a new child with the same coordinates, etc. as the other node's child. const Coord& childOrigin = ns0.isChild() ? ns0.child->origin() : ns1.child->origin(); setChild(thisIter, *(new ChildT(childOrigin, getTile(thisIter).value))); } ChildT& child = getChild(thisIter); if (ns0.isTile()) { // Combine node1's child with node0's constant value // and write the result into this node's child. child.combine2(ns0.tile.value, *ns1.child, ns0.isTileOn(), op); } else if (ns1.isTile()) { // Combine node0's child with node1's constant value // and write the result into this node's child. child.combine2(*ns0.child, ns1.tile.value, ns1.isTileOn(), op); } else { // Combine node0's child with node1's child // and write the result into this node's child. child.combine2(*ns0.child, *ns1.child, op); } } if (prune && isChild(thisIter)) getChild(thisIter).prune(); } // Combine background values. op(args.setARef(other0.mBackground).setBRef(other1.mBackground)); mBackground = args.result(); } //////////////////////////////////////// template<typename ChildT> template<typename BBoxOp> inline void RootNode<ChildT>::visitActiveBBox(BBoxOp& op) const { const bool descent = op.template descent<LEVEL>(); for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isTileOff(i)) continue; if (this->isChild(i) && descent) { this->getChild(i).visitActiveBBox(op); } else { op.template operator()<LEVEL>(CoordBBox::createCube(i->first, ChildT::DIM)); } } } template<typename ChildT> template<typename VisitorOp> inline void RootNode<ChildT>::visit(VisitorOp& op) { doVisit<RootNode, VisitorOp, ChildAllIter>(*this, op); } template<typename ChildT> template<typename VisitorOp> inline void RootNode<ChildT>::visit(VisitorOp& op) const { doVisit<const RootNode, VisitorOp, ChildAllCIter>(*this, op); } template<typename ChildT> template<typename RootNodeT, typename VisitorOp, typename ChildAllIterT> inline void RootNode<ChildT>::doVisit(RootNodeT& self, VisitorOp& op) { typename RootNodeT::ValueType val; for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { if (op(iter)) continue; if (typename ChildAllIterT::ChildNodeType* child = iter.probeChild(val)) { child->visit(op); } } } //////////////////////////////////////// template<typename ChildT> template<typename OtherRootNodeType, typename VisitorOp> inline void RootNode<ChildT>::visit2(OtherRootNodeType& other, VisitorOp& op) { doVisit2<RootNode, OtherRootNodeType, VisitorOp, ChildAllIter, typename OtherRootNodeType::ChildAllIter>(*this, other, op); } template<typename ChildT> template<typename OtherRootNodeType, typename VisitorOp> inline void RootNode<ChildT>::visit2(OtherRootNodeType& other, VisitorOp& op) const { doVisit2<const RootNode, OtherRootNodeType, VisitorOp, ChildAllCIter, typename OtherRootNodeType::ChildAllCIter>(*this, other, op); } template<typename ChildT> template< typename RootNodeT, typename OtherRootNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void RootNode<ChildT>::doVisit2(RootNodeT& self, OtherRootNodeT& other, VisitorOp& op) { enforceSameConfiguration(other); typename RootNodeT::ValueType val; typename OtherRootNodeT::ValueType otherVal; // The two nodes are required to have corresponding table entries, // but since that might require background tiles to be added to one or both, // and the nodes might be const, we operate on shallow copies of the nodes instead. RootNodeT copyOfSelf(self.mBackground); copyOfSelf.mTable = self.mTable; OtherRootNodeT copyOfOther(other.mBackground); copyOfOther.mTable = other.mTable; // Add background tiles to both nodes as needed. CoordSet keys; self.insertKeys(keys); other.insertKeys(keys); for (CoordSetCIter i = keys.begin(), e = keys.end(); i != e; ++i) { copyOfSelf.findOrAddCoord(*i); copyOfOther.findOrAddCoord(*i); } ChildAllIterT iter = copyOfSelf.beginChildAll(); OtherChildAllIterT otherIter = copyOfOther.beginChildAll(); for ( ; iter && otherIter; ++iter, ++otherIter) { const size_t skipBranch = static_cast<size_t>(op(iter, otherIter)); typename ChildAllIterT::ChildNodeType* child = (skipBranch & 1U) ? nullptr : iter.probeChild(val); typename OtherChildAllIterT::ChildNodeType* otherChild = (skipBranch & 2U) ? nullptr : otherIter.probeChild(otherVal); if (child != nullptr && otherChild != nullptr) { child->visit2Node(*otherChild, op); } else if (child != nullptr) { child->visit2(otherIter, op); } else if (otherChild != nullptr) { otherChild->visit2(iter, op, /*otherIsLHS=*/true); } } // Remove any background tiles that were added above, // as well as any that were created by the visitors. copyOfSelf.eraseBackgroundTiles(); copyOfOther.eraseBackgroundTiles(); // If either input node is non-const, replace its table with // the (possibly modified) copy. self.resetTable(copyOfSelf.mTable); other.resetTable(copyOfOther.mTable); } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_ROOTNODE_HAS_BEEN_INCLUDED
133,960
C
37.395242
101
0.633577
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/LeafNodeMask.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TREE_LEAF_NODE_MASK_HAS_BEEN_INCLUDED #define OPENVDB_TREE_LEAF_NODE_MASK_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Types.h> #include <openvdb/io/Compression.h> // for io::readData(), etc. #include <openvdb/math/Math.h> // for math::isZero() #include <openvdb/util/NodeMasks.h> #include "LeafNode.h" #include "Iterator.h" #include <iostream> #include <sstream> #include <string> #include <type_traits> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { /// @brief LeafNode specialization for values of type ValueMask that encodes both /// the active states and the boolean values of (2^Log2Dim)^3 voxels /// in a single bit mask, i.e. voxel values and states are indistinguishable! template<Index Log2Dim> class LeafNode<ValueMask, Log2Dim> { public: using LeafNodeType = LeafNode<ValueMask, Log2Dim>; using BuildType = ValueMask;// this is a rare case where using ValueType = bool;// value type != build type using Buffer = LeafBuffer<ValueType, Log2Dim>;// buffer uses the bool specialization using NodeMaskType = util::NodeMask<Log2Dim>; using Ptr = SharedPtr<LeafNodeType>; // These static declarations must be on separate lines to avoid VC9 compiler errors. static const Index LOG2DIM = Log2Dim; // needed by parent nodes static const Index TOTAL = Log2Dim; // needed by parent nodes static const Index DIM = 1 << TOTAL; // dimension along one coordinate direction static const Index NUM_VALUES = 1 << 3 * Log2Dim; static const Index NUM_VOXELS = NUM_VALUES; // total number of voxels represented by this node static const Index SIZE = NUM_VALUES; static const Index LEVEL = 0; // level 0 = leaf /// @brief ValueConverter<T>::Type is the type of a LeafNode having the same /// dimensions as this node but a different value type, T. template<typename OtherValueType> struct ValueConverter { using Type = LeafNode<OtherValueType, Log2Dim>; }; /// @brief SameConfiguration<OtherNodeType>::value is @c true if and only if /// OtherNodeType is the type of a LeafNode with the same dimensions as this node. template<typename OtherNodeType> struct SameConfiguration { static const bool value = SameLeafConfig<LOG2DIM, OtherNodeType>::value; }; /// Default constructor LeafNode(); /// Constructor /// @param xyz the coordinates of a voxel that lies within the node /// @param value the initial value = state for all of this node's voxels /// @param dummy dummy value explicit LeafNode(const Coord& xyz, bool value = false, bool dummy = false); /// "Partial creation" constructor used during file input LeafNode(PartialCreate, const Coord& xyz, bool value = false, bool dummy = false); /// Deep copy constructor LeafNode(const LeafNode&); /// Value conversion copy constructor template<typename OtherValueType> explicit LeafNode(const LeafNode<OtherValueType, Log2Dim>& other); /// Topology copy constructor template<typename ValueType> LeafNode(const LeafNode<ValueType, Log2Dim>& other, TopologyCopy); //@{ /// @brief Topology copy constructor /// @note This variant exists mainly to enable template instantiation. template<typename ValueType> LeafNode(const LeafNode<ValueType, Log2Dim>& other, bool offValue, bool onValue, TopologyCopy); template<typename ValueType> LeafNode(const LeafNode<ValueType, Log2Dim>& other, bool background, TopologyCopy); //@} /// Destructor ~LeafNode(); // // Statistics // /// Return log2 of the size of the buffer storage. static Index log2dim() { return Log2Dim; } /// Return the number of voxels in each dimension. static Index dim() { return DIM; } /// Return the total number of voxels represented by this LeafNode static Index size() { return SIZE; } /// Return the total number of voxels represented by this LeafNode static Index numValues() { return SIZE; } /// Return the level of this node, which by definition is zero for LeafNodes static Index getLevel() { return LEVEL; } /// Append the Log2Dim of this LeafNode to the specified vector static void getNodeLog2Dims(std::vector<Index>& dims) { dims.push_back(Log2Dim); } /// Return the dimension of child nodes of this LeafNode, which is one for voxels. static Index getChildDim() { return 1; } /// Return the leaf count for this node, which is one. static Index32 leafCount() { return 1; } /// no-op void nodeCount(std::vector<Index32> &) const {} /// Return the non-leaf count for this node, which is zero. static Index32 nonLeafCount() { return 0; } /// Return the number of active voxels. Index64 onVoxelCount() const { return mBuffer.mData.countOn(); } /// Return the number of inactive voxels. Index64 offVoxelCount() const { return mBuffer.mData.countOff(); } Index64 onLeafVoxelCount() const { return this->onVoxelCount(); } Index64 offLeafVoxelCount() const { return this->offVoxelCount(); } static Index64 onTileCount() { return 0; } static Index64 offTileCount() { return 0; } /// Return @c true if this node has no active voxels. bool isEmpty() const { return mBuffer.mData.isOff(); } /// Return @c true if this node only contains active voxels. bool isDense() const { return mBuffer.mData.isOn(); } /// @brief Return @c true if memory for this node's buffer has been allocated. /// @details Currently, boolean leaf nodes don't support partial creation, /// so this always returns @c true. bool isAllocated() const { return true; } /// @brief Allocate memory for this node's buffer if it has not already been allocated. /// @details Currently, boolean leaf nodes don't support partial creation, /// so this has no effect. bool allocate() { return true; } /// Return the memory in bytes occupied by this node. Index64 memUsage() const; /// Expand the given bounding box so that it includes this leaf node's active voxels. /// If visitVoxels is false this LeafNode will be approximated as dense, i.e. with all /// voxels active. Else the individual active voxels are visited to produce a tight bbox. void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// @brief Return the bounding box of this node, i.e., the full index space /// spanned by this leaf node. CoordBBox getNodeBoundingBox() const { return CoordBBox::createCube(mOrigin, DIM); } /// Set the grid index coordinates of this node's local origin. void setOrigin(const Coord& origin) { mOrigin = origin; } //@{ /// Return the grid index coordinates of this node's local origin. const Coord& origin() const { return mOrigin; } void getOrigin(Coord& origin) const { origin = mOrigin; } void getOrigin(Int32& x, Int32& y, Int32& z) const { mOrigin.asXYZ(x, y, z); } //@} /// Return the linear table offset of the given global or local coordinates. static Index coordToOffset(const Coord& xyz); /// @brief Return the local coordinates for a linear table offset, /// where offset 0 has coordinates (0, 0, 0). static Coord offsetToLocalCoord(Index n); /// Return the global coordinates for a linear table offset. Coord offsetToGlobalCoord(Index n) const; /// Return a string representation of this node. std::string str() const; /// @brief Return @c true if the given node (which may have a different @c ValueType /// than this node) has the same active value topology as this node. template<typename OtherType, Index OtherLog2Dim> bool hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const; /// Check for buffer equivalence by value. bool operator==(const LeafNode&) const; bool operator!=(const LeafNode&) const; // // Buffer management // /// @brief Exchange this node's data buffer with the given data buffer /// without changing the active states of the values. void swap(Buffer& other) { mBuffer.swap(other); } const Buffer& buffer() const { return mBuffer; } Buffer& buffer() { return mBuffer; } // // I/O methods // /// Read in just the topology. void readTopology(std::istream&, bool fromHalf = false); /// Write out just the topology. void writeTopology(std::ostream&, bool toHalf = false) const; /// Read in the topology and the origin. void readBuffers(std::istream&, bool fromHalf = false); void readBuffers(std::istream& is, const CoordBBox&, bool fromHalf = false); /// Write out the topology and the origin. void writeBuffers(std::ostream&, bool toHalf = false) const; // // Accessor methods // /// Return the value of the voxel at the given coordinates. const bool& getValue(const Coord& xyz) const; /// Return the value of the voxel at the given offset. const bool& getValue(Index offset) const; /// @brief Return @c true if the voxel at the given coordinates is active. /// @param xyz the coordinates of the voxel to be probed /// @param[out] val the value of the voxel at the given coordinates bool probeValue(const Coord& xyz, bool& val) const; /// Return the level (0) at which leaf node values reside. static Index getValueLevel(const Coord&) { return LEVEL; } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the active state of the voxel at the given offset but don't change its value. void setActiveState(Index offset, bool on) { assert(offset<SIZE); mBuffer.mData.set(offset, on); } /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, bool val); /// Set the value of the voxel at the given offset but don't change its active state. void setValueOnly(Index offset, bool val) { assert(offset<SIZE); mBuffer.setValue(offset,val); } /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { mBuffer.mData.setOff(this->coordToOffset(xyz)); } /// Mark the voxel at the given offset as inactive but don't change its value. void setValueOff(Index offset) { assert(offset < SIZE); mBuffer.mData.setOff(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, bool val); /// Set the value of the voxel at the given offset and mark the voxel as inactive. void setValueOff(Index offset, bool val); /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { mBuffer.mData.setOn(this->coordToOffset(xyz)); } /// Mark the voxel at the given offset as active but don't change its value. void setValueOn(Index offset) { assert(offset < SIZE); mBuffer.mData.setOn(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, bool val); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, bool val) { this->setValueOn(xyz, val); } /// Set the value of the voxel at the given offset and mark the voxel as active. void setValueOn(Index offset, bool val); /// @brief Apply a functor to the value of the voxel at the given offset /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(Index offset, const ModifyOp& op); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op); /// Apply a functor to the voxel at the given coordinates. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op); /// Mark all voxels as active but don't change their values. void setValuesOn() { mBuffer.mData.setOn(); } /// Mark all voxels as inactive but don't change their values. void setValuesOff() { mBuffer.mData.setOff(); } /// Return @c true if the voxel at the given coordinates is active. bool isValueOn(const Coord& xyz) const { return mBuffer.mData.isOn(this->coordToOffset(xyz)); } /// Return @c true if the voxel at the given offset is active. bool isValueOn(Index offset) const { assert(offset < SIZE); return mBuffer.mData.isOn(offset); } /// Return @c false since leaf nodes never contain tiles. static bool hasActiveTiles() { return false; } /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&, bool background); /// Set all voxels within an axis-aligned box to the specified value. void fill(const CoordBBox& bbox, bool value, bool = false); /// Set all voxels within an axis-aligned box to the specified value. void denseFill(const CoordBBox& bbox, bool value, bool = false) { this->fill(bbox, value); } /// Set the state of all voxels to the specified active state. void fill(const bool& value, bool dummy = false); /// @brief Copy into a dense grid the values of the voxels that lie within /// a given bounding box. /// /// @param bbox inclusive bounding box of the voxels to be copied into the dense grid /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyToDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyToDense(const CoordBBox& bbox, DenseT& dense) const; /// @brief Copy from a dense grid into this node the values of the voxels /// that lie within a given bounding box. /// @details Only values that are different (by more than the given tolerance) /// from the background value will be active. Other values are inactive /// and truncated to the background value. /// /// @param bbox inclusive bounding box of the voxels to be copied into this node /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// @param background background value of the tree that this node belongs to /// @param tolerance tolerance within which a value equals the background value /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyFromDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyFromDense(const CoordBBox& bbox, const DenseT& dense, bool background, bool tolerance); /// @brief Return the value of the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename AccessorT> const bool& getValueAndCache(const Coord& xyz, AccessorT&) const {return this->getValue(xyz);} /// @brief Return @c true if the voxel at the given coordinates is active. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool isValueOnAndCache(const Coord& xyz, AccessorT&) const { return this->isValueOn(xyz); } /// @brief Change the value of the voxel at the given coordinates and mark it as active. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueAndCache(const Coord& xyz, bool val, AccessorT&) { this->setValueOn(xyz, val); } /// @brief Change the value of the voxel at the given coordinates /// but preserve its state. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOnlyAndCache(const Coord& xyz, bool val, AccessorT&) {this->setValueOnly(xyz,val);} /// @brief Change the value of the voxel at the given coordinates and mark it as inactive. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOffAndCache(const Coord& xyz, bool value, AccessorT&) { this->setValueOff(xyz, value); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValue(xyz, op); } /// Apply a functor to the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValueAndActiveState(xyz, op); } /// @brief Set the active state of the voxel at the given coordinates /// without changing its value. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT&) { this->setActiveState(xyz, on); } /// @brief Return @c true if the voxel at the given coordinates is active /// and return the voxel value in @a val. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool probeValueAndCache(const Coord& xyz, bool& val, AccessorT&) const { return this->probeValue(xyz, val); } /// @brief Return the LEVEL (=0) at which leaf node values reside. /// @note Used internally by ValueAccessor. template<typename AccessorT> static Index getValueLevelAndCache(const Coord&, AccessorT&) { return LEVEL; } /// @brief Return a const reference to the first entry in the buffer. /// @note Since it's actually a reference to a static data member /// it should not be converted to a non-const pointer! const bool& getFirstValue() const { if (mBuffer.mData.isOn(0)) return Buffer::sOn; else return Buffer::sOff; } /// @brief Return a const reference to the last entry in the buffer. /// @note Since it's actually a reference to a static data member /// it should not be converted to a non-const pointer! const bool& getLastValue() const { if (mBuffer.mData.isOn(SIZE-1)) return Buffer::sOn; else return Buffer::sOff; } /// Return @c true if all of this node's voxels have the same active state /// and are equal to within the given tolerance, and return the value in /// @a constValue and the active state in @a state. bool isConstant(bool& constValue, bool& state, bool tolerance = 0) const; /// @brief Computes the median value of all the active and inactive voxels in this node. /// @return The median value. /// /// @details The median for boolean values is defined as the mode /// of the values, i.e. the value that occurs most often. bool medianAll() const; /// @brief Computes the median value of all the active voxels in this node. /// @return The number of active voxels. /// /// @param value Updated with the median value of all the active voxels. /// /// @note Since the value and state are shared for this /// specialization of the LeafNode the @a value will always be true! Index medianOn(ValueType &value) const; /// @brief Computes the median value of all the inactive voxels in this node. /// @return The number of inactive voxels. /// /// @param value Updated with the median value of all the inactive /// voxels. /// /// @note Since the value and state are shared for this /// specialization of the LeafNode the @a value will always be false! Index medianOff(ValueType &value) const; /// Return @c true if all of this node's values are inactive. bool isInactive() const { return mBuffer.mData.isOff(); } /// @brief no-op since for this temaplte specialization voxel /// values and states are indistinguishable. void resetBackground(bool, bool) {} /// @brief Invert the bits of the voxels, i.e. states and values void negate() { mBuffer.mData.toggle(); } template<MergePolicy Policy> void merge(const LeafNode& other, bool bg = false, bool otherBG = false); template<MergePolicy Policy> void merge(bool tileValue, bool tileActive=false); /// @brief No-op /// @details This function exists only to enable template instantiation. void voxelizeActiveTiles(bool = true) {} /// @brief Union this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active if either of the original voxels /// were active. /// /// @note This operation modifies only active states, not values. template<typename OtherType> void topologyUnion(const LeafNode<OtherType, Log2Dim>& other); /// @brief Intersect this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if both of the original voxels /// were active. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyIntersection. /// /// @note This operation modifies only active states, not /// values. Also note that this operation can result in all voxels /// being inactive so consider subsequnetly calling prune. template<typename OtherType> void topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const bool&); /// @brief Difference this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this LeafNode and inactive in the other LeafNode. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyDifference. /// /// @note This operation modifies only active states, not values. /// Also, because it can deactivate all of this node's voxels, /// consider subsequently calling prune. template<typename OtherType> void topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const bool&); template<typename CombineOp> void combine(const LeafNode& other, CombineOp& op); template<typename CombineOp> void combine(bool, bool valueIsActive, CombineOp& op); template<typename CombineOp, typename OtherType /*= bool*/> void combine2(const LeafNode& other, const OtherType&, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(bool, const OtherNodeT& other, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp&); /// @brief Calls the templated functor BBoxOp with bounding box information. /// An additional level argument is provided to the callback. /// /// @note The bounding boxes are guarenteed to be non-overlapping. template<typename BBoxOp> void visitActiveBBox(BBoxOp&) const; template<typename VisitorOp> void visit(VisitorOp&); template<typename VisitorOp> void visit(VisitorOp&) const; template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&); template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&) const; template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false); template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false) const; //@{ /// This function exists only to enable template instantiation. void prune(const ValueType& /*tolerance*/ = zeroVal<ValueType>()) {} void addLeaf(LeafNode*) {} template<typename AccessorT> void addLeafAndCache(LeafNode*, AccessorT&) {} template<typename NodeT> NodeT* stealNode(const Coord&, const ValueType&, bool) { return nullptr; } template<typename NodeT> NodeT* probeNode(const Coord&) { return nullptr; } template<typename NodeT> const NodeT* probeConstNode(const Coord&) const { return nullptr; } template<typename ArrayT> void getNodes(ArrayT&) const {} template<typename ArrayT> void stealNodes(ArrayT&, const ValueType&, bool) {} //@} void addTile(Index level, const Coord&, bool val, bool active); void addTile(Index offset, bool val, bool active); template<typename AccessorT> void addTileAndCache(Index level, const Coord&, bool val, bool active, AccessorT&); //@{ /// @brief Return a pointer to this node. LeafNode* touchLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* touchLeafAndCache(const Coord&, AccessorT&) { return this; } LeafNode* probeLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* probeLeafAndCache(const Coord&, AccessorT&) { return this; } template<typename NodeT, typename AccessorT> NodeT* probeNodeAndCache(const Coord&, AccessorT&) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} //@{ /// @brief Return a @const pointer to this node. const LeafNode* probeLeaf(const Coord&) const { return this; } template<typename AccessorT> const LeafNode* probeLeafAndCache(const Coord&, AccessorT&) const { return this; } const LeafNode* probeConstLeaf(const Coord&) const { return this; } template<typename AccessorT> const LeafNode* probeConstLeafAndCache(const Coord&, AccessorT&) const { return this; } template<typename NodeT, typename AccessorT> const NodeT* probeConstNodeAndCache(const Coord&, AccessorT&) const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<const NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} // // Iterators // protected: using MaskOnIter = typename NodeMaskType::OnIterator; using MaskOffIter = typename NodeMaskType::OffIterator; using MaskDenseIter = typename NodeMaskType::DenseIterator; template<typename MaskIterT, typename NodeT, typename ValueT> struct ValueIter: // Derives from SparseIteratorBase, but can also be used as a dense iterator, // if MaskIterT is a dense mask iterator type. public SparseIteratorBase<MaskIterT, ValueIter<MaskIterT, NodeT, ValueT>, NodeT, ValueT> { using BaseT = SparseIteratorBase<MaskIterT, ValueIter, NodeT, ValueT>; ValueIter() {} ValueIter(const MaskIterT& iter, NodeT* parent): BaseT(iter, parent) {} const bool& getItem(Index pos) const { return this->parent().getValue(pos); } const bool& getValue() const { return this->getItem(this->pos()); } // Note: setItem() can't be called on const iterators. void setItem(Index pos, bool value) const { this->parent().setValueOnly(pos, value); } // Note: setValue() can't be called on const iterators. void setValue(bool value) const { this->setItem(this->pos(), value); } // Note: modifyItem() can't be called on const iterators. template<typename ModifyOp> void modifyItem(Index n, const ModifyOp& op) const { this->parent().modifyValue(n, op); } // Note: modifyValue() can't be called on const iterators. template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { this->modifyItem(this->pos(), op); } }; /// Leaf nodes have no children, so their child iterators have no get/set accessors. template<typename MaskIterT, typename NodeT> struct ChildIter: public SparseIteratorBase<MaskIterT, ChildIter<MaskIterT, NodeT>, NodeT, bool> { ChildIter() {} ChildIter(const MaskIterT& iter, NodeT* parent): SparseIteratorBase< MaskIterT, ChildIter<MaskIterT, NodeT>, NodeT, bool>(iter, parent) {} }; template<typename NodeT, typename ValueT> struct DenseIter: public DenseIteratorBase< MaskDenseIter, DenseIter<NodeT, ValueT>, NodeT, /*ChildT=*/void, ValueT> { using BaseT = DenseIteratorBase<MaskDenseIter, DenseIter, NodeT, void, ValueT>; using NonConstValueT = typename BaseT::NonConstValueType; DenseIter() {} DenseIter(const MaskDenseIter& iter, NodeT* parent): BaseT(iter, parent) {} bool getItem(Index pos, void*& child, NonConstValueT& value) const { value = this->parent().getValue(pos); child = nullptr; return false; // no child } // Note: setItem() can't be called on const iterators. //void setItem(Index pos, void* child) const {} // Note: unsetItem() can't be called on const iterators. void unsetItem(Index pos, const ValueT& val) const {this->parent().setValueOnly(pos, val);} }; public: using ValueOnIter = ValueIter<MaskOnIter, LeafNode, const bool>; using ValueOnCIter = ValueIter<MaskOnIter, const LeafNode, const bool>; using ValueOffIter = ValueIter<MaskOffIter, LeafNode, const bool>; using ValueOffCIter = ValueIter<MaskOffIter, const LeafNode, const bool>; using ValueAllIter = ValueIter<MaskDenseIter, LeafNode, const bool>; using ValueAllCIter = ValueIter<MaskDenseIter, const LeafNode, const bool>; using ChildOnIter = ChildIter<MaskOnIter, LeafNode>; using ChildOnCIter = ChildIter<MaskOnIter, const LeafNode>; using ChildOffIter = ChildIter<MaskOffIter, LeafNode>; using ChildOffCIter = ChildIter<MaskOffIter, const LeafNode>; using ChildAllIter = DenseIter<LeafNode, bool>; using ChildAllCIter = DenseIter<const LeafNode, const bool>; ValueOnCIter cbeginValueOn() const { return ValueOnCIter(mBuffer.mData.beginOn(), this); } ValueOnCIter beginValueOn() const { return ValueOnCIter(mBuffer.mData.beginOn(), this); } ValueOnIter beginValueOn() { return ValueOnIter(mBuffer.mData.beginOn(), this); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(mBuffer.mData.beginOff(), this); } ValueOffCIter beginValueOff() const { return ValueOffCIter(mBuffer.mData.beginOff(), this); } ValueOffIter beginValueOff() { return ValueOffIter(mBuffer.mData.beginOff(), this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(mBuffer.mData.beginDense(), this); } ValueAllCIter beginValueAll() const { return ValueAllCIter(mBuffer.mData.beginDense(), this); } ValueAllIter beginValueAll() { return ValueAllIter(mBuffer.mData.beginDense(), this); } ValueOnCIter cendValueOn() const { return ValueOnCIter(mBuffer.mData.endOn(), this); } ValueOnCIter endValueOn() const { return ValueOnCIter(mBuffer.mData.endOn(), this); } ValueOnIter endValueOn() { return ValueOnIter(mBuffer.mData.endOn(), this); } ValueOffCIter cendValueOff() const { return ValueOffCIter(mBuffer.mData.endOff(), this); } ValueOffCIter endValueOff() const { return ValueOffCIter(mBuffer.mData.endOff(), this); } ValueOffIter endValueOff() { return ValueOffIter(mBuffer.mData.endOff(), this); } ValueAllCIter cendValueAll() const { return ValueAllCIter(mBuffer.mData.endDense(), this); } ValueAllCIter endValueAll() const { return ValueAllCIter(mBuffer.mData.endDense(), this); } ValueAllIter endValueAll() { return ValueAllIter(mBuffer.mData.endDense(), this); } // Note that [c]beginChildOn() and [c]beginChildOff() actually return end iterators, // because leaf nodes have no children. ChildOnCIter cbeginChildOn() const { return ChildOnCIter(mBuffer.mData.endOn(), this); } ChildOnCIter beginChildOn() const { return ChildOnCIter(mBuffer.mData.endOn(), this); } ChildOnIter beginChildOn() { return ChildOnIter(mBuffer.mData.endOn(), this); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(mBuffer.mData.endOff(), this); } ChildOffCIter beginChildOff() const { return ChildOffCIter(mBuffer.mData.endOff(), this); } ChildOffIter beginChildOff() { return ChildOffIter(mBuffer.mData.endOff(), this); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(mBuffer.mData.beginDense(), this); } ChildAllCIter beginChildAll() const { return ChildAllCIter(mBuffer.mData.beginDense(), this); } ChildAllIter beginChildAll() { return ChildAllIter(mBuffer.mData.beginDense(), this); } ChildOnCIter cendChildOn() const { return ChildOnCIter(mBuffer.mData.endOn(), this); } ChildOnCIter endChildOn() const { return ChildOnCIter(mBuffer.mData.endOn(), this); } ChildOnIter endChildOn() { return ChildOnIter(mBuffer.mData.endOn(), this); } ChildOffCIter cendChildOff() const { return ChildOffCIter(mBuffer.mData.endOff(), this); } ChildOffCIter endChildOff() const { return ChildOffCIter(mBuffer.mData.endOff(), this); } ChildOffIter endChildOff() { return ChildOffIter(mBuffer.mData.endOff(), this); } ChildAllCIter cendChildAll() const { return ChildAllCIter(mBuffer.mData.endDense(), this); } ChildAllCIter endChildAll() const { return ChildAllCIter(mBuffer.mData.endDense(), this); } ChildAllIter endChildAll() { return ChildAllIter(mBuffer.mData.endDense(), this); } // // Mask accessors // bool isValueMaskOn(Index n) const { return mBuffer.mData.isOn(n); } bool isValueMaskOn() const { return mBuffer.mData.isOn(); } bool isValueMaskOff(Index n) const { return mBuffer.mData.isOff(n); } bool isValueMaskOff() const { return mBuffer.mData.isOff(); } const NodeMaskType& getValueMask() const { return mBuffer.mData; } const NodeMaskType& valueMask() const { return mBuffer.mData; } NodeMaskType& getValueMask() { return mBuffer.mData; } void setValueMask(const NodeMaskType& mask) { mBuffer.mData = mask; } bool isChildMaskOn(Index) const { return false; } // leaf nodes have no children bool isChildMaskOff(Index) const { return true; } bool isChildMaskOff() const { return true; } protected: void setValueMask(Index n, bool on) { mBuffer.mData.set(n, on); } void setValueMaskOn(Index n) { mBuffer.mData.setOn(n); } void setValueMaskOff(Index n) { mBuffer.mData.setOff(n); } /// Compute the origin of the leaf node that contains the voxel with the given coordinates. static void evalNodeOrigin(Coord& xyz) { xyz &= ~(DIM - 1); } template<typename NodeT, typename VisitorOp, typename ChildAllIterT> static inline void doVisit(NodeT&, VisitorOp&); template<typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp&); template<typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2(NodeT& self, OtherChildAllIterT&, VisitorOp&, bool otherIsLHS); /// Bitmask representing the values AND state of voxels Buffer mBuffer; /// Global grid index coordinates (x,y,z) of the local origin of this node Coord mOrigin; private: /// @brief During topology-only construction, access is needed /// to protected/private members of other template instances. template<typename, Index> friend class LeafNode; friend struct ValueIter<MaskOnIter, LeafNode, bool>; friend struct ValueIter<MaskOffIter, LeafNode, bool>; friend struct ValueIter<MaskDenseIter, LeafNode, bool>; friend struct ValueIter<MaskOnIter, const LeafNode, bool>; friend struct ValueIter<MaskOffIter, const LeafNode, bool>; friend struct ValueIter<MaskDenseIter, const LeafNode, bool>; //@{ /// Allow iterators to call mask accessor methods (see below). /// @todo Make mask accessors public? friend class IteratorBase<MaskOnIter, LeafNode>; friend class IteratorBase<MaskOffIter, LeafNode>; friend class IteratorBase<MaskDenseIter, LeafNode>; //@} template<typename, Index> friend class LeafBuffer; }; // class LeafNode<ValueMask> //////////////////////////////////////// template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::LeafNode() : mOrigin(0, 0, 0) { } template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const Coord& xyz, bool value, bool active) : mBuffer(value || active) , mOrigin(xyz & (~(DIM - 1))) { } template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::LeafNode(PartialCreate, const Coord& xyz, bool value, bool active) : mBuffer(value || active) , mOrigin(xyz & (~(DIM - 1))) { } template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode &other) : mBuffer(other.mBuffer) , mOrigin(other.mOrigin) { } // Copy-construct from a leaf node with the same configuration but a different ValueType. template<Index Log2Dim> template<typename ValueT> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other) : mBuffer(other.valueMask()) , mOrigin(other.origin()) { } template<Index Log2Dim> template<typename ValueT> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other, bool, TopologyCopy) : mBuffer(other.valueMask())// value = active state , mOrigin(other.origin()) { } template<Index Log2Dim> template<typename ValueT> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other, TopologyCopy) : mBuffer(other.valueMask())// value = active state , mOrigin(other.origin()) { } template<Index Log2Dim> template<typename ValueT> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other, bool offValue, bool onValue, TopologyCopy) : mBuffer(other.valueMask()) , mOrigin(other.origin()) { if (offValue==true) { if (onValue==false) { mBuffer.mData.toggle(); } else { mBuffer.mData.setOn(); } } } template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::~LeafNode() { } //////////////////////////////////////// template<Index Log2Dim> inline Index64 LeafNode<ValueMask, Log2Dim>::memUsage() const { // Use sizeof(*this) to capture alignment-related padding return sizeof(*this); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { CoordBBox this_bbox = this->getNodeBoundingBox(); if (bbox.isInside(this_bbox)) return;//this LeafNode is already enclosed in the bbox if (ValueOnCIter iter = this->cbeginValueOn()) {//any active values? if (visitVoxels) {//use voxel granularity? this_bbox.reset(); for(; iter; ++iter) this_bbox.expand(this->offsetToLocalCoord(iter.pos())); this_bbox.translate(this->origin()); } bbox.expand(this_bbox); } } template<Index Log2Dim> template<typename OtherType, Index OtherLog2Dim> inline bool LeafNode<ValueMask, Log2Dim>::hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const { assert(other); return (Log2Dim == OtherLog2Dim && mBuffer.mData == other->getValueMask()); } template<Index Log2Dim> inline std::string LeafNode<ValueMask, Log2Dim>::str() const { std::ostringstream ostr; ostr << "LeafNode @" << mOrigin << ": "; for (Index32 n = 0; n < SIZE; ++n) ostr << (mBuffer.mData.isOn(n) ? '#' : '.'); return ostr.str(); } //////////////////////////////////////// template<Index Log2Dim> inline Index LeafNode<ValueMask, Log2Dim>::coordToOffset(const Coord& xyz) { assert ((xyz[0] & (DIM-1u)) < DIM && (xyz[1] & (DIM-1u)) < DIM && (xyz[2] & (DIM-1u)) < DIM); return ((xyz[0] & (DIM-1u)) << 2*Log2Dim) + ((xyz[1] & (DIM-1u)) << Log2Dim) + (xyz[2] & (DIM-1u)); } template<Index Log2Dim> inline Coord LeafNode<ValueMask, Log2Dim>::offsetToLocalCoord(Index n) { assert(n < (1 << 3*Log2Dim)); Coord xyz; xyz.setX(n >> 2*Log2Dim); n &= ((1 << 2*Log2Dim) - 1); xyz.setY(n >> Log2Dim); xyz.setZ(n & ((1 << Log2Dim) - 1)); return xyz; } template<Index Log2Dim> inline Coord LeafNode<ValueMask, Log2Dim>::offsetToGlobalCoord(Index n) const { return (this->offsetToLocalCoord(n) + this->origin()); } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::readTopology(std::istream& is, bool /*fromHalf*/) { mBuffer.mData.load(is); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::writeTopology(std::ostream& os, bool /*toHalf*/) const { mBuffer.mData.save(os); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::readBuffers(std::istream& is, const CoordBBox& clipBBox, bool fromHalf) { // Boolean LeafNodes don't currently implement lazy loading. // Instead, load the full buffer, then clip it. this->readBuffers(is, fromHalf); // Get this tree's background value. bool background = false; if (const void* bgPtr = io::getGridBackgroundValuePtr(is)) { background = *static_cast<const bool*>(bgPtr); } this->clip(clipBBox, background); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::readBuffers(std::istream& is, bool /*fromHalf*/) { // Read in the value mask = buffer. mBuffer.mData.load(is); // Read in the origin. is.read(reinterpret_cast<char*>(&mOrigin), sizeof(Coord::ValueType) * 3); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::writeBuffers(std::ostream& os, bool /*toHalf*/) const { // Write out the value mask = buffer. mBuffer.mData.save(os); // Write out the origin. os.write(reinterpret_cast<const char*>(&mOrigin), sizeof(Coord::ValueType) * 3); } //////////////////////////////////////// template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::operator==(const LeafNode& other) const { return mOrigin == other.mOrigin && mBuffer == other.mBuffer; } template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::operator!=(const LeafNode& other) const { return !(this->operator==(other)); } //////////////////////////////////////// template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::isConstant(bool& constValue, bool& state, bool) const { if (!mBuffer.mData.isConstant(state)) return false; constValue = state; return true; } //////////////////////////////////////// template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::medianAll() const { const Index countTrue = mBuffer.mData.countOn(); return countTrue > (NUM_VALUES >> 1); } template<Index Log2Dim> inline Index LeafNode<ValueMask, Log2Dim>::medianOn(bool& state) const { const Index countTrueOn = mBuffer.mData.countOn(); state = true;//since value and state are the same for this specialization of the leaf node return countTrueOn; } template<Index Log2Dim> inline Index LeafNode<ValueMask, Log2Dim>::medianOff(bool& state) const { const Index countFalseOff = mBuffer.mData.countOff(); state = false;//since value and state are the same for this specialization of the leaf node return countFalseOff; } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::addTile(Index /*level*/, const Coord& xyz, bool val, bool active) { this->addTile(this->coordToOffset(xyz), val, active); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::addTile(Index offset, bool val, bool active) { assert(offset < SIZE); this->setValueOnly(offset, val); this->setActiveState(offset, active); } template<Index Log2Dim> template<typename AccessorT> inline void LeafNode<ValueMask, Log2Dim>::addTileAndCache(Index level, const Coord& xyz, bool val, bool active, AccessorT&) { this->addTile(level, xyz, val, active); } //////////////////////////////////////// template<Index Log2Dim> inline const bool& LeafNode<ValueMask, Log2Dim>::getValue(const Coord& xyz) const { // This *CANNOT* use operator ? because Visual C++ if (mBuffer.mData.isOn(this->coordToOffset(xyz))) return Buffer::sOn; else return Buffer::sOff; } template<Index Log2Dim> inline const bool& LeafNode<ValueMask, Log2Dim>::getValue(Index offset) const { assert(offset < SIZE); // This *CANNOT* use operator ? for Windows if (mBuffer.mData.isOn(offset)) return Buffer::sOn; else return Buffer::sOff; } template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::probeValue(const Coord& xyz, bool& val) const { const Index offset = this->coordToOffset(xyz); val = mBuffer.mData.isOn(offset); return val; } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOn(const Coord& xyz, bool val) { this->setValueOn(this->coordToOffset(xyz), val); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOn(Index offset, bool val) { assert(offset < SIZE); mBuffer.mData.set(offset, val); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOnly(const Coord& xyz, bool val) { this->setValueOnly(this->coordToOffset(xyz), val); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setActiveState(const Coord& xyz, bool on) { mBuffer.mData.set(this->coordToOffset(xyz), on); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOff(const Coord& xyz, bool val) { this->setValueOff(this->coordToOffset(xyz), val); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOff(Index offset, bool val) { assert(offset < SIZE); mBuffer.mData.set(offset, val); } template<Index Log2Dim> template<typename ModifyOp> inline void LeafNode<ValueMask, Log2Dim>::modifyValue(Index offset, const ModifyOp& op) { bool val = mBuffer.mData.isOn(offset); op(val); mBuffer.mData.set(offset, val); } template<Index Log2Dim> template<typename ModifyOp> inline void LeafNode<ValueMask, Log2Dim>::modifyValue(const Coord& xyz, const ModifyOp& op) { this->modifyValue(this->coordToOffset(xyz), op); } template<Index Log2Dim> template<typename ModifyOp> inline void LeafNode<ValueMask, Log2Dim>::modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { const Index offset = this->coordToOffset(xyz); bool val = mBuffer.mData.isOn(offset), state = val; op(val, state); mBuffer.mData.set(offset, val); } //////////////////////////////////////// template<Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<ValueMask, Log2Dim>::merge(const LeafNode& other, bool /*bg*/, bool /*otherBG*/) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy == MERGE_NODES) return; mBuffer.mData |= other.mBuffer.mData; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<ValueMask, Log2Dim>::merge(bool tileValue, bool) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy != MERGE_ACTIVE_STATES_AND_NODES) return; if (tileValue) mBuffer.mData.setOn(); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<Index Log2Dim> template<typename OtherType> inline void LeafNode<ValueMask, Log2Dim>::topologyUnion(const LeafNode<OtherType, Log2Dim>& other) { mBuffer.mData |= other.valueMask(); } template<Index Log2Dim> template<typename OtherType> inline void LeafNode<ValueMask, Log2Dim>::topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const bool&) { mBuffer.mData &= other.valueMask(); } template<Index Log2Dim> template<typename OtherType> inline void LeafNode<ValueMask, Log2Dim>::topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const bool&) { mBuffer.mData &= !other.valueMask(); } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::clip(const CoordBBox& clipBBox, bool background) { CoordBBox nodeBBox = this->getNodeBoundingBox(); if (!clipBBox.hasOverlap(nodeBBox)) { // This node lies completely outside the clipping region. Fill it with background tiles. this->fill(nodeBBox, background, /*active=*/false); } else if (clipBBox.isInside(nodeBBox)) { // This node lies completely inside the clipping region. Leave it intact. return; } // This node isn't completely contained inside the clipping region. // Set any voxels that lie outside the region to the background value. // Construct a boolean mask that is on inside the clipping region and off outside it. NodeMaskType mask; nodeBBox.intersect(clipBBox); Coord xyz; int &x = xyz.x(), &y = xyz.y(), &z = xyz.z(); for (x = nodeBBox.min().x(); x <= nodeBBox.max().x(); ++x) { for (y = nodeBBox.min().y(); y <= nodeBBox.max().y(); ++y) { for (z = nodeBBox.min().z(); z <= nodeBBox.max().z(); ++z) { mask.setOn(static_cast<Index32>(this->coordToOffset(xyz))); } } } // Set voxels that lie in the inactive region of the mask (i.e., outside // the clipping region) to the background value. for (MaskOffIter maskIter = mask.beginOff(); maskIter; ++maskIter) { this->setValueOff(maskIter.pos(), background); } } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::fill(const CoordBBox& bbox, bool value, bool) { auto clippedBBox = this->getNodeBoundingBox(); clippedBBox.intersect(bbox); if (!clippedBBox) return; for (Int32 x = clippedBBox.min().x(); x <= clippedBBox.max().x(); ++x) { const Index offsetX = (x & (DIM-1u))<<2*Log2Dim; for (Int32 y = clippedBBox.min().y(); y <= clippedBBox.max().y(); ++y) { const Index offsetXY = offsetX + ((y & (DIM-1u))<< Log2Dim); for (Int32 z = clippedBBox.min().z(); z <= clippedBBox.max().z(); ++z) { const Index offset = offsetXY + (z & (DIM-1u)); mBuffer.mData.set(offset, value); } } } } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::fill(const bool& value, bool) { mBuffer.fill(value); } //////////////////////////////////////// template<Index Log2Dim> template<typename DenseT> inline void LeafNode<ValueMask, Log2Dim>::copyToDense(const CoordBBox& bbox, DenseT& dense) const { using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); DenseValueType* t0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // target array const Int32 n0 = bbox.min()[2] & (DIM-1u); for (Int32 x = bbox.min()[0], ex = bbox.max()[0] + 1; x < ex; ++x) { DenseValueType* t1 = t0 + xStride * (x - min[0]); const Int32 n1 = n0 + ((x & (DIM-1u)) << 2*LOG2DIM); for (Int32 y = bbox.min()[1], ey = bbox.max()[1] + 1; y < ey; ++y) { DenseValueType* t2 = t1 + yStride * (y - min[1]); Int32 n2 = n1 + ((y & (DIM-1u)) << LOG2DIM); for (Int32 z = bbox.min()[2], ez = bbox.max()[2] + 1; z < ez; ++z, t2 += zStride) { *t2 = DenseValueType(mBuffer.mData.isOn(n2++)); } } } } template<Index Log2Dim> template<typename DenseT> inline void LeafNode<ValueMask, Log2Dim>::copyFromDense(const CoordBBox& bbox, const DenseT& dense, bool background, bool tolerance) { using DenseValueType = typename DenseT::ValueType; struct Local { inline static bool toBool(const DenseValueType& v) { return !math::isZero(v); } }; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); const DenseValueType* s0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // source const Int32 n0 = bbox.min()[2] & (DIM-1u); for (Int32 x = bbox.min()[0], ex = bbox.max()[0] + 1; x < ex; ++x) { const DenseValueType* s1 = s0 + xStride * (x - min[0]); const Int32 n1 = n0 + ((x & (DIM-1u)) << 2*LOG2DIM); for (Int32 y = bbox.min()[1], ey = bbox.max()[1] + 1; y < ey; ++y) { const DenseValueType* s2 = s1 + yStride * (y - min[1]); Int32 n2 = n1 + ((y & (DIM-1u)) << LOG2DIM); for (Int32 z = bbox.min()[2], ez = bbox.max()[2]+1; z < ez; ++z, ++n2, s2 += zStride) { // Note: if tolerance is true (i.e., 1), then all boolean values compare equal. if (tolerance || (background == Local::toBool(*s2))) { mBuffer.mData.set(n2, background); } else { mBuffer.mData.set(n2, Local::toBool(*s2)); } } } } } //////////////////////////////////////// template<Index Log2Dim> template<typename CombineOp> inline void LeafNode<ValueMask, Log2Dim>::combine(const LeafNode& other, CombineOp& op) { CombineArgs<bool> args; for (Index i = 0; i < SIZE; ++i) { bool result = false, aVal = mBuffer.mData.isOn(i), bVal = other.mBuffer.mData.isOn(i); op(args.setARef(aVal) .setAIsActive(aVal) .setBRef(bVal) .setBIsActive(bVal) .setResultRef(result)); mBuffer.mData.set(i, result); } } template<Index Log2Dim> template<typename CombineOp> inline void LeafNode<ValueMask, Log2Dim>::combine(bool value, bool valueIsActive, CombineOp& op) { CombineArgs<bool> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { bool result = false, aVal = mBuffer.mData.isOn(i); op(args.setARef(aVal) .setAIsActive(aVal) .setResultRef(result)); mBuffer.mData.set(i, result); } } //////////////////////////////////////// template<Index Log2Dim> template<typename CombineOp, typename OtherType> inline void LeafNode<ValueMask, Log2Dim>::combine2(const LeafNode& other, const OtherType& value, bool valueIsActive, CombineOp& op) { CombineArgs<bool, OtherType> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { bool result = false, aVal = other.mBuffer.mData.isOn(i); op(args.setARef(aVal) .setAIsActive(aVal) .setResultRef(result)); mBuffer.mData.set(i, result); } } template<Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<ValueMask, Log2Dim>::combine2(bool value, const OtherNodeT& other, bool valueIsActive, CombineOp& op) { CombineArgs<bool, typename OtherNodeT::ValueType> args; args.setARef(value).setAIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { bool result = false, bVal = other.mBuffer.mData.isOn(i); op(args.setBRef(bVal) .setBIsActive(bVal) .setResultRef(result)); mBuffer.mData.set(i, result); } } template<Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<ValueMask, Log2Dim>::combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp& op) { CombineArgs<bool, typename OtherNodeT::ValueType> args; for (Index i = 0; i < SIZE; ++i) { bool result = false, b0Val = b0.mBuffer.mData.isOn(i), b1Val = b1.mBuffer.mData.isOn(i); op(args.setARef(b0Val) .setAIsActive(b0Val) .setBRef(b1Val) .setBIsActive(b1Val) .setResultRef(result)); mBuffer.mData.set(i, result); } } //////////////////////////////////////// template<Index Log2Dim> template<typename BBoxOp> inline void LeafNode<ValueMask, Log2Dim>::visitActiveBBox(BBoxOp& op) const { if (op.template descent<LEVEL>()) { for (ValueOnCIter i=this->cbeginValueOn(); i; ++i) { op.template operator()<LEVEL>(CoordBBox::createCube(i.getCoord(), 1)); } } else { op.template operator()<LEVEL>(this->getNodeBoundingBox()); } } template<Index Log2Dim> template<typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit(VisitorOp& op) { doVisit<LeafNode, VisitorOp, ChildAllIter>(*this, op); } template<Index Log2Dim> template<typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit(VisitorOp& op) const { doVisit<const LeafNode, VisitorOp, ChildAllCIter>(*this, op); } template<Index Log2Dim> template<typename NodeT, typename VisitorOp, typename ChildAllIterT> inline void LeafNode<ValueMask, Log2Dim>::doVisit(NodeT& self, VisitorOp& op) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter); } } //////////////////////////////////////// template<Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) { doVisit2Node<LeafNode, OtherLeafNodeType, VisitorOp, ChildAllIter, typename OtherLeafNodeType::ChildAllIter>(*this, other, op); } template<Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) const { doVisit2Node<const LeafNode, OtherLeafNodeType, VisitorOp, ChildAllCIter, typename OtherLeafNodeType::ChildAllCIter>(*this, other, op); } template<Index Log2Dim> template< typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<ValueMask, Log2Dim>::doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp& op) { // Allow the two nodes to have different ValueTypes, but not different dimensions. static_assert(OtherNodeT::SIZE == NodeT::SIZE, "can't visit nodes of different sizes simultaneously"); static_assert(OtherNodeT::LEVEL == NodeT::LEVEL, "can't visit nodes at different tree levels simultaneously"); ChildAllIterT iter = self.beginChildAll(); OtherChildAllIterT otherIter = other.beginChildAll(); for ( ; iter && otherIter; ++iter, ++otherIter) { op(iter, otherIter); } } //////////////////////////////////////// template<Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) { doVisit2<LeafNode, VisitorOp, ChildAllIter, IterT>(*this, otherIter, op, otherIsLHS); } template<Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) const { doVisit2<const LeafNode, VisitorOp, ChildAllCIter, IterT>(*this, otherIter, op, otherIsLHS); } template<Index Log2Dim> template< typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<ValueMask, Log2Dim>::doVisit2(NodeT& self, OtherChildAllIterT& otherIter, VisitorOp& op, bool otherIsLHS) { if (!otherIter) return; if (otherIsLHS) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(otherIter, iter); } } else { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter, otherIter); } } } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_LEAF_NODE_MASK_HAS_BEEN_INCLUDED
60,854
C
36.197433
118
0.676997
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/NodeUnion.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file NodeUnion.h /// /// @details NodeUnion is a templated helper class that controls access to either /// the child node pointer or the value for a particular element of a root /// or internal node. For space efficiency, the child pointer and the value /// are unioned when possible, since the two are never in use simultaneously. #ifndef OPENVDB_TREE_NODEUNION_HAS_BEEN_INCLUDED #define OPENVDB_TREE_NODEUNION_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Types.h> #include <cstring> // for std::memcpy() #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { #if OPENVDB_ABI_VERSION_NUMBER >= 8 /// @brief Default implementation of a NodeUnion that stores the child pointer /// and the value separately (i.e., not in a union). Types which select this /// specialization usually do not conform to the requirements of a union /// member, that is that the type ValueT is not trivially copyable. This /// implementation is thus NOT used for POD, math::Vec, math::Mat, math::Quat /// or math::Coord types, but is used (for example) with std::string template<typename ValueT, typename ChildT, typename Enable = void> class NodeUnion { private: ChildT* mChild; ValueT mValue; public: NodeUnion(): mChild(nullptr), mValue() {} ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } // Small check to ensure this class isn't // selected for some expected types static_assert(!ValueTraits<ValueT>::IsVec && !ValueTraits<ValueT>::IsMat && !ValueTraits<ValueT>::IsQuat && !std::is_same<ValueT, math::Coord>::value && !std::is_arithmetic<ValueT>::value, "Unexpected instantiation of NodeUnion"); }; /// @brief Template specialization of a NodeUnion that stores the child pointer /// and the value together (int, float, pointer, etc.) template<typename ValueT, typename ChildT> class NodeUnion<ValueT, ChildT, typename std::enable_if<std::is_trivially_copyable<ValueT>::value>::type> { private: union { ChildT* mChild; ValueT mValue; }; public: NodeUnion(): mChild(nullptr) {} ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } }; #else // Forward declaration of traits class template<typename T> struct CopyTraits; // Default implementation that stores the child pointer and the value separately // (i.e., not in a union) // This implementation is not used for POD, math::Vec or math::Coord value types. template<typename ValueT, typename ChildT, typename Enable = void> class NodeUnion { private: ChildT* mChild; ValueT mValue; public: NodeUnion(): mChild(nullptr), mValue() {} ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } }; // Template specialization for values of POD types (int, float, pointer, etc.) template<typename ValueT, typename ChildT> class NodeUnion<ValueT, ChildT, typename std::enable_if<std::is_pod<ValueT>::value>::type> { private: union { ChildT* mChild; ValueT mValue; }; public: NodeUnion(): mChild(nullptr) {} ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } }; // Template specialization for values of types such as math::Vec3f and math::Coord // for which CopyTraits<T>::IsCopyable is true template<typename ValueT, typename ChildT> class NodeUnion<ValueT, ChildT, typename std::enable_if<CopyTraits<ValueT>::IsCopyable>::type> { private: union { ChildT* mChild; ValueT mValue; }; public: NodeUnion(): mChild(nullptr) {} NodeUnion(const NodeUnion& other): mChild(nullptr) { std::memcpy(static_cast<void*>(this), &other, sizeof(*this)); } NodeUnion& operator=(const NodeUnion& rhs) { std::memcpy(static_cast<void*>(this), &rhs, sizeof(*this)); return *this; } ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } }; /// @details A type T is copyable if /// # T stores member values by value (vs. by pointer or reference) /// and T's true byte size is given by sizeof(T). /// # T has a trivial destructor /// # T has a default constructor /// # T has an assignment operator template<typename T> struct CopyTraits { static const bool IsCopyable = false; }; template<typename T> struct CopyTraits<math::Vec2<T>> { static const bool IsCopyable = true; }; template<typename T> struct CopyTraits<math::Vec3<T>> { static const bool IsCopyable = true; }; template<typename T> struct CopyTraits<math::Vec4<T>> { static const bool IsCopyable = true; }; template<> struct CopyTraits<math::Coord> { static const bool IsCopyable = true; }; #endif //////////////////////////////////////// } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_NODEUNION_HAS_BEEN_INCLUDED
5,784
C
32.830409
95
0.6926
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/LeafBuffer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TREE_LEAFBUFFER_HAS_BEEN_INCLUDED #define OPENVDB_TREE_LEAFBUFFER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/io/Compression.h> // for io::readCompressedValues(), etc #include <openvdb/util/NodeMasks.h> #include <tbb/atomic.h> #include <tbb/spin_mutex.h> #include <algorithm> // for std::swap #include <cstddef> // for offsetof() #include <iostream> #include <memory> #include <type_traits> class TestLeaf; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { namespace internal { /// @internal For delayed loading to be threadsafe, LeafBuffer::mOutOfCore must be /// memory-fenced when it is set in LeafBuffer::doLoad(), otherwise that operation /// could be reordered ahead of others in doLoad(), with the possible result that /// other threads could see the buffer as in-core before it has been fully loaded. /// Making mOutOfCore a TBB atomic solves the problem, since TBB atomics are release-fenced /// by default (unlike STL atomics, which are not even guaranteed to be lock-free). /// However, TBB atomics have stricter alignment requirements than their underlying value_types, /// so a LeafBuffer with an atomic mOutOfCore is potentially ABI-incompatible with /// its non-atomic counterpart. /// This helper class conditionally declares mOutOfCore as an atomic only if doing so /// doesn't break ABI compatibility. template<typename T> struct LeafBufferFlags { /// The type of LeafBuffer::mOutOfCore using type = tbb::atomic<Index32>; static constexpr bool IsAtomic = true; }; } // namespace internal /// @brief Array of fixed size 2<SUP>3<I>Log2Dim</I></SUP> that stores /// the voxel values of a LeafNode template<typename T, Index Log2Dim> class LeafBuffer { public: using ValueType = T; using StorageType = ValueType; using NodeMaskType = util::NodeMask<Log2Dim>; static const Index SIZE = 1 << 3 * Log2Dim; struct FileInfo { FileInfo(): bufpos(0) , maskpos(0) {} std::streamoff bufpos; std::streamoff maskpos; io::MappedFile::Ptr mapping; SharedPtr<io::StreamMetadata> meta; }; /// Default constructor inline LeafBuffer(): mData(new ValueType[SIZE]) { mOutOfCore = 0; } /// Construct a buffer populated with the specified value. explicit inline LeafBuffer(const ValueType&); /// Copy constructor inline LeafBuffer(const LeafBuffer&); /// Construct a buffer but don't allocate memory for the full array of values. LeafBuffer(PartialCreate, const ValueType&): mData(nullptr) { mOutOfCore = 0; } /// Destructor inline ~LeafBuffer(); /// Return @c true if this buffer's values have not yet been read from disk. bool isOutOfCore() const { return bool(mOutOfCore); } /// Return @c true if memory for this buffer has not yet been allocated. bool empty() const { return !mData || this->isOutOfCore(); } /// Allocate memory for this buffer if it has not already been allocated. bool allocate() { if (mData == nullptr) mData = new ValueType[SIZE]; return true; } /// Populate this buffer with a constant value. inline void fill(const ValueType&); /// Return a const reference to the i'th element of this buffer. const ValueType& getValue(Index i) const { return this->at(i); } /// Return a const reference to the i'th element of this buffer. const ValueType& operator[](Index i) const { return this->at(i); } /// Set the i'th value of this buffer to the specified value. inline void setValue(Index i, const ValueType&); /// Copy the other buffer's values into this buffer. inline LeafBuffer& operator=(const LeafBuffer&); /// @brief Return @c true if the contents of the other buffer /// exactly equal the contents of this buffer. inline bool operator==(const LeafBuffer&) const; /// @brief Return @c true if the contents of the other buffer /// are not exactly equal to the contents of this buffer. inline bool operator!=(const LeafBuffer& other) const { return !(other == *this); } /// Exchange this buffer's values with the other buffer's values. inline void swap(LeafBuffer&); /// Return the memory footprint of this buffer in bytes. inline Index memUsage() const; /// Return the number of values contained in this buffer. static Index size() { return SIZE; } /// @brief Return a const pointer to the array of voxel values. /// @details This method guarantees that the buffer is allocated and loaded. /// @warning This method should only be used by experts seeking low-level optimizations. const ValueType* data() const; /// @brief Return a pointer to the array of voxel values. /// @details This method guarantees that the buffer is allocated and loaded. /// @warning This method should only be used by experts seeking low-level optimizations. ValueType* data(); private: /// If this buffer is empty, return zero, otherwise return the value at index @ i. inline const ValueType& at(Index i) const; /// @brief Return a non-const reference to the value at index @a i. /// @details This method is private since it makes assumptions about the /// buffer's memory layout. LeafBuffers associated with custom leaf node types /// (e.g., a bool buffer implemented as a bitmask) might not be able to /// return non-const references to their values. ValueType& operator[](Index i) { return const_cast<ValueType&>(this->at(i)); } bool deallocate(); inline void setOutOfCore(bool b) { mOutOfCore = b; } // To facilitate inlining in the common case in which the buffer is in-core, // the loading logic is split into a separate function, doLoad(). inline void loadValues() const { if (this->isOutOfCore()) this->doLoad(); } inline void doLoad() const; inline bool detachFromFile(); using FlagsType = typename internal::LeafBufferFlags<ValueType>::type; union { ValueType* mData; FileInfo* mFileInfo; }; FlagsType mOutOfCore; // interpreted as bool; extra bits reserved for future use tbb::spin_mutex mMutex; // 1 byte //int8_t mReserved[3]; // padding for alignment static const ValueType sZero; friend class ::TestLeaf; // Allow the parent LeafNode to access this buffer's data pointer. template<typename, Index> friend class LeafNode; }; // class LeafBuffer //////////////////////////////////////// template<typename T, Index Log2Dim> const T LeafBuffer<T, Log2Dim>::sZero = zeroVal<T>(); template<typename T, Index Log2Dim> inline LeafBuffer<T, Log2Dim>::LeafBuffer(const ValueType& val) : mData(new ValueType[SIZE]) { mOutOfCore = 0; this->fill(val); } template<typename T, Index Log2Dim> inline LeafBuffer<T, Log2Dim>::~LeafBuffer() { if (this->isOutOfCore()) { this->detachFromFile(); } else { this->deallocate(); } } template<typename T, Index Log2Dim> inline LeafBuffer<T, Log2Dim>::LeafBuffer(const LeafBuffer& other) : mData(nullptr) , mOutOfCore(other.mOutOfCore) { if (other.isOutOfCore()) { mFileInfo = new FileInfo(*other.mFileInfo); } else if (other.mData != nullptr) { this->allocate(); ValueType* target = mData; const ValueType* source = other.mData; Index n = SIZE; while (n--) *target++ = *source++; } } template<typename T, Index Log2Dim> inline void LeafBuffer<T, Log2Dim>::setValue(Index i, const ValueType& val) { assert(i < SIZE); this->loadValues(); if (mData) mData[i] = val; } template<typename T, Index Log2Dim> inline LeafBuffer<T, Log2Dim>& LeafBuffer<T, Log2Dim>::operator=(const LeafBuffer& other) { if (&other != this) { if (this->isOutOfCore()) { this->detachFromFile(); } else { if (other.isOutOfCore()) this->deallocate(); } if (other.isOutOfCore()) { mOutOfCore = other.mOutOfCore; mFileInfo = new FileInfo(*other.mFileInfo); } else if (other.mData != nullptr) { this->allocate(); ValueType* target = mData; const ValueType* source = other.mData; Index n = SIZE; while (n--) *target++ = *source++; } } return *this; } template<typename T, Index Log2Dim> inline void LeafBuffer<T, Log2Dim>::fill(const ValueType& val) { this->detachFromFile(); if (mData != nullptr) { ValueType* target = mData; Index n = SIZE; while (n--) *target++ = val; } } template<typename T, Index Log2Dim> inline bool LeafBuffer<T, Log2Dim>::operator==(const LeafBuffer& other) const { this->loadValues(); other.loadValues(); const ValueType *target = mData, *source = other.mData; if (!target && !source) return true; if (!target || !source) return false; Index n = SIZE; while (n && math::isExactlyEqual(*target++, *source++)) --n; return n == 0; } template<typename T, Index Log2Dim> inline void LeafBuffer<T, Log2Dim>::swap(LeafBuffer& other) { std::swap(mData, other.mData); std::swap(mOutOfCore, other.mOutOfCore); } template<typename T, Index Log2Dim> inline Index LeafBuffer<T, Log2Dim>::memUsage() const { size_t n = sizeof(*this); if (this->isOutOfCore()) n += sizeof(FileInfo); else if (mData) n += SIZE * sizeof(ValueType); return static_cast<Index>(n); } template<typename T, Index Log2Dim> inline const typename LeafBuffer<T, Log2Dim>::ValueType* LeafBuffer<T, Log2Dim>::data() const { this->loadValues(); if (mData == nullptr) { LeafBuffer* self = const_cast<LeafBuffer*>(this); // This lock will be contended at most once. tbb::spin_mutex::scoped_lock lock(self->mMutex); if (mData == nullptr) self->mData = new ValueType[SIZE]; } return mData; } template<typename T, Index Log2Dim> inline typename LeafBuffer<T, Log2Dim>::ValueType* LeafBuffer<T, Log2Dim>::data() { this->loadValues(); if (mData == nullptr) { // This lock will be contended at most once. tbb::spin_mutex::scoped_lock lock(mMutex); if (mData == nullptr) mData = new ValueType[SIZE]; } return mData; } template<typename T, Index Log2Dim> inline const typename LeafBuffer<T, Log2Dim>::ValueType& LeafBuffer<T, Log2Dim>::at(Index i) const { assert(i < SIZE); this->loadValues(); // We can't use the ternary operator here, otherwise Visual C++ returns // a reference to a temporary. if (mData) return mData[i]; else return sZero; } template<typename T, Index Log2Dim> inline bool LeafBuffer<T, Log2Dim>::deallocate() { if (mData != nullptr && !this->isOutOfCore()) { delete[] mData; mData = nullptr; return true; } return false; } template<typename T, Index Log2Dim> inline void LeafBuffer<T, Log2Dim>::doLoad() const { if (!this->isOutOfCore()) return; LeafBuffer<T, Log2Dim>* self = const_cast<LeafBuffer<T, Log2Dim>*>(this); // This lock will be contended at most once, after which this buffer // will no longer be out-of-core. tbb::spin_mutex::scoped_lock lock(self->mMutex); if (!this->isOutOfCore()) return; std::unique_ptr<FileInfo> info(self->mFileInfo); assert(info.get() != nullptr); assert(info->mapping.get() != nullptr); assert(info->meta.get() != nullptr); /// @todo For now, we have to clear the mData pointer in order for allocate() to take effect. self->mData = nullptr; self->allocate(); SharedPtr<std::streambuf> buf = info->mapping->createBuffer(); std::istream is(buf.get()); io::setStreamMetadataPtr(is, info->meta, /*transfer=*/true); NodeMaskType mask; is.seekg(info->maskpos); mask.load(is); is.seekg(info->bufpos); io::readCompressedValues(is, self->mData, SIZE, mask, io::getHalfFloat(is)); self->setOutOfCore(false); } template<typename T, Index Log2Dim> inline bool LeafBuffer<T, Log2Dim>::detachFromFile() { if (this->isOutOfCore()) { delete mFileInfo; mFileInfo = nullptr; this->setOutOfCore(false); return true; } return false; } //////////////////////////////////////// // Partial specialization for bool ValueType template<Index Log2Dim> class LeafBuffer<bool, Log2Dim> { public: using NodeMaskType = util::NodeMask<Log2Dim>; using WordType = typename NodeMaskType::Word; using ValueType = bool; using StorageType = WordType; static const Index WORD_COUNT = NodeMaskType::WORD_COUNT; static const Index SIZE = 1 << 3 * Log2Dim; // These static declarations must be on separate lines to avoid VC9 compiler errors. static const bool sOn; static const bool sOff; LeafBuffer() {} LeafBuffer(bool on): mData(on) {} LeafBuffer(const NodeMaskType& other): mData(other) {} LeafBuffer(const LeafBuffer& other): mData(other.mData) {} ~LeafBuffer() {} void fill(bool val) { mData.set(val); } LeafBuffer& operator=(const LeafBuffer& b) { if (&b != this) { mData=b.mData; } return *this; } const bool& getValue(Index i) const { assert(i < SIZE); // We can't use the ternary operator here, otherwise Visual C++ returns // a reference to a temporary. if (mData.isOn(i)) return sOn; else return sOff; } const bool& operator[](Index i) const { return this->getValue(i); } bool operator==(const LeafBuffer& other) const { return mData == other.mData; } bool operator!=(const LeafBuffer& other) const { return mData != other.mData; } void setValue(Index i, bool val) { assert(i < SIZE); mData.set(i, val); } void swap(LeafBuffer& other) { if (&other != this) std::swap(mData, other.mData); } Index memUsage() const { return sizeof(*this); } static Index size() { return SIZE; } /// @brief Return a pointer to the C-style array of words encoding the bits. /// @warning This method should only be used by experts seeking low-level optimizations. WordType* data() { return &(mData.template getWord<WordType>(0)); } /// @brief Return a const pointer to the C-style array of words encoding the bits. /// @warning This method should only be used by experts seeking low-level optimizations. const WordType* data() const { return const_cast<LeafBuffer*>(this)->data(); } private: // Allow the parent LeafNode to access this buffer's data. template<typename, Index> friend class LeafNode; NodeMaskType mData; }; // class LeafBuffer /// @internal For consistency with other nodes and with iterators, methods like /// LeafNode::getValue() return a reference to a value. Since it's not possible /// to return a reference to a bit in a node mask, we return a reference to one /// of the following static values instead. template<Index Log2Dim> const bool LeafBuffer<bool, Log2Dim>::sOn = true; template<Index Log2Dim> const bool LeafBuffer<bool, Log2Dim>::sOff = false; } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_LEAFBUFFER_HAS_BEEN_INCLUDED
15,253
C
31.317797
99
0.668459
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/InternalNode.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file InternalNode.h /// /// @brief Internal table nodes for OpenVDB trees #ifndef OPENVDB_TREE_INTERNALNODE_HAS_BEEN_INCLUDED #define OPENVDB_TREE_INTERNALNODE_HAS_BEEN_INCLUDED #include <openvdb/Platform.h> #include <openvdb/util/NodeMasks.h> #include <openvdb/io/Compression.h> // for io::readCompressedValues(), etc. #include <openvdb/math/Math.h> // for math::isExactlyEqual(), etc. #include <openvdb/version.h> #include <openvdb/Types.h> #include "Iterator.h" #include "NodeUnion.h" #include <tbb/parallel_for.h> #include <memory> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { template<typename, Index, typename> struct SameInternalConfig; // forward declaration template<typename _ChildNodeType, Index Log2Dim> class InternalNode { public: using ChildNodeType = _ChildNodeType; using LeafNodeType = typename ChildNodeType::LeafNodeType; using ValueType = typename ChildNodeType::ValueType; using BuildType = typename ChildNodeType::BuildType; using UnionType = NodeUnion<ValueType, ChildNodeType>; using NodeMaskType = util::NodeMask<Log2Dim>; static const Index LOG2DIM = Log2Dim, // log2 of tile count in one dimension TOTAL = Log2Dim + ChildNodeType::TOTAL, // log2 of voxel count in one dimension DIM = 1 << TOTAL, // total voxel count in one dimension NUM_VALUES = 1 << (3 * Log2Dim), // total voxel count represented by this node LEVEL = 1 + ChildNodeType::LEVEL; // level 0 = leaf static const Index64 NUM_VOXELS = uint64_t(1) << (3 * TOTAL); // total voxel count represented by this node /// @brief ValueConverter<T>::Type is the type of an InternalNode having the same /// child hierarchy and dimensions as this node but a different value type, T. template<typename OtherValueType> struct ValueConverter { using Type = InternalNode<typename ChildNodeType::template ValueConverter< OtherValueType>::Type, Log2Dim>; }; /// @brief SameConfiguration<OtherNodeType>::value is @c true if and only if OtherNodeType /// is the type of an InternalNode with the same dimensions as this node and whose /// ChildNodeType has the same configuration as this node's ChildNodeType. template<typename OtherNodeType> struct SameConfiguration { static const bool value = SameInternalConfig<ChildNodeType, Log2Dim, OtherNodeType>::value; }; /// @brief Default constructor /// @warning The resulting InternalNode is uninitialized InternalNode() {} /// @brief Constructor of an InternalNode with dense inactive tiles of the specified value. /// @param offValue Background value used for inactive values explicit InternalNode(const ValueType& offValue); /// @brief Constructs an InternalNode with dense tiles /// @param origin The location in index space of the fist tile value /// @param fillValue Value assigned to all the tiles /// @param active State assigned to all the tiles InternalNode(const Coord& origin, const ValueType& fillValue, bool active = false); InternalNode(PartialCreate, const Coord&, const ValueType& fillValue, bool active = false); /// @brief Deep copy constructor /// /// @note This method is multi-threaded! InternalNode(const InternalNode&); /// @brief Value conversion copy constructor /// /// @note This method is multi-threaded! template<typename OtherChildNodeType> explicit InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other); /// @brief Topology copy constructor /// /// @note This method is multi-threaded! template<typename OtherChildNodeType> InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& background, TopologyCopy); /// @brief Topology copy constructor /// /// @note This method is multi-threaded! template<typename OtherChildNodeType> InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& offValue, const ValueType& onValue, TopologyCopy); ~InternalNode(); protected: using MaskOnIterator = typename NodeMaskType::OnIterator; using MaskOffIterator = typename NodeMaskType::OffIterator; using MaskDenseIterator = typename NodeMaskType::DenseIterator; // Type tags to disambiguate template instantiations struct ValueOn {}; struct ValueOff {}; struct ValueAll {}; struct ChildOn {}; struct ChildOff {}; struct ChildAll {}; // The following class templates implement the iterator interfaces specified in Iterator.h // by providing getItem(), setItem() and/or modifyItem() methods. // Sparse iterator that visits child nodes of an InternalNode template<typename NodeT, typename ChildT, typename MaskIterT, typename TagT> struct ChildIter: public SparseIteratorBase< MaskIterT, ChildIter<NodeT, ChildT, MaskIterT, TagT>, NodeT, ChildT> { ChildIter() {} ChildIter(const MaskIterT& iter, NodeT* parent): SparseIteratorBase< MaskIterT, ChildIter<NodeT, ChildT, MaskIterT, TagT>, NodeT, ChildT>(iter, parent) {} ChildT& getItem(Index pos) const { assert(this->parent().isChildMaskOn(pos)); return *(this->parent().getChildNode(pos)); } // Note: setItem() can't be called on const iterators. void setItem(Index pos, const ChildT& c) const { this->parent().resetChildNode(pos, &c); } // Note: modifyItem() isn't implemented, since it's not useful for child node pointers. };// ChildIter // Sparse iterator that visits tile values of an InternalNode template<typename NodeT, typename ValueT, typename MaskIterT, typename TagT> struct ValueIter: public SparseIteratorBase< MaskIterT, ValueIter<NodeT, ValueT, MaskIterT, TagT>, NodeT, ValueT> { ValueIter() {} ValueIter(const MaskIterT& iter, NodeT* parent): SparseIteratorBase< MaskIterT, ValueIter<NodeT, ValueT, MaskIterT, TagT>, NodeT, ValueT>(iter, parent) {} const ValueT& getItem(Index pos) const { return this->parent().mNodes[pos].getValue(); } // Note: setItem() can't be called on const iterators. void setItem(Index pos, const ValueT& v) const { this->parent().mNodes[pos].setValue(v); } // Note: modifyItem() can't be called on const iterators. template<typename ModifyOp> void modifyItem(Index pos, const ModifyOp& op) const { op(this->parent().mNodes[pos].getValue()); } };// ValueIter // Dense iterator that visits both tiles and child nodes of an InternalNode template<typename NodeT, typename ChildT, typename ValueT, typename TagT> struct DenseIter: public DenseIteratorBase< MaskDenseIterator, DenseIter<NodeT, ChildT, ValueT, TagT>, NodeT, ChildT, ValueT> { using BaseT = DenseIteratorBase<MaskDenseIterator, DenseIter, NodeT, ChildT, ValueT>; using NonConstValueT = typename BaseT::NonConstValueType; DenseIter() {} DenseIter(const MaskDenseIterator& iter, NodeT* parent): DenseIteratorBase<MaskDenseIterator, DenseIter, NodeT, ChildT, ValueT>(iter, parent) {} bool getItem(Index pos, ChildT*& child, NonConstValueT& value) const { if (this->parent().isChildMaskOn(pos)) { child = this->parent().getChildNode(pos); return true; } child = nullptr; value = this->parent().mNodes[pos].getValue(); return false; } // Note: setItem() can't be called on const iterators. void setItem(Index pos, ChildT* child) const { this->parent().resetChildNode(pos, child); } // Note: unsetItem() can't be called on const iterators. void unsetItem(Index pos, const ValueT& value) const { this->parent().unsetChildNode(pos, value); } };// DenseIter public: // Iterators (see Iterator.h for usage) using ChildOnIter = ChildIter<InternalNode, ChildNodeType, MaskOnIterator, ChildOn>; using ChildOnCIter = ChildIter<const InternalNode,const ChildNodeType,MaskOnIterator,ChildOn>; using ChildOffIter = ValueIter<InternalNode, const ValueType, MaskOffIterator, ChildOff>; using ChildOffCIter = ValueIter<const InternalNode,const ValueType,MaskOffIterator,ChildOff>; using ChildAllIter = DenseIter<InternalNode, ChildNodeType, ValueType, ChildAll>; using ChildAllCIter = DenseIter<const InternalNode,const ChildNodeType, ValueType, ChildAll>; using ValueOnIter = ValueIter<InternalNode, const ValueType, MaskOnIterator, ValueOn>; using ValueOnCIter = ValueIter<const InternalNode, const ValueType, MaskOnIterator, ValueOn>; using ValueOffIter = ValueIter<InternalNode, const ValueType, MaskOffIterator, ValueOff>; using ValueOffCIter = ValueIter<const InternalNode,const ValueType,MaskOffIterator,ValueOff>; using ValueAllIter = ValueIter<InternalNode, const ValueType, MaskOffIterator, ValueAll>; using ValueAllCIter = ValueIter<const InternalNode,const ValueType,MaskOffIterator,ValueAll>; ChildOnCIter cbeginChildOn() const { return ChildOnCIter(mChildMask.beginOn(), this); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(mChildMask.beginOff(), this); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(mChildMask.beginDense(), this); } ChildOnCIter beginChildOn() const { return cbeginChildOn(); } ChildOffCIter beginChildOff() const { return cbeginChildOff(); } ChildAllCIter beginChildAll() const { return cbeginChildAll(); } ChildOnIter beginChildOn() { return ChildOnIter(mChildMask.beginOn(), this); } ChildOffIter beginChildOff() { return ChildOffIter(mChildMask.beginOff(), this); } ChildAllIter beginChildAll() { return ChildAllIter(mChildMask.beginDense(), this); } ValueOnCIter cbeginValueOn() const { return ValueOnCIter(mValueMask.beginOn(), this); } /// @warning This iterator will also visit child nodes so use isChildMaskOn to skip them! ValueOffCIter cbeginValueOff() const { return ValueOffCIter(mValueMask.beginOff(), this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(mChildMask.beginOff(), this); } ValueOnCIter beginValueOn() const { return cbeginValueOn(); } /// @warning This iterator will also visit child nodes so use isChildMaskOn to skip them! ValueOffCIter beginValueOff() const { return cbeginValueOff(); } ValueAllCIter beginValueAll() const { return cbeginValueAll(); } ValueOnIter beginValueOn() { return ValueOnIter(mValueMask.beginOn(), this); } /// @warning This iterator will also visit child nodes so use isChildMaskOn to skip them! ValueOffIter beginValueOff() { return ValueOffIter(mValueMask.beginOff(), this); } ValueAllIter beginValueAll() { return ValueAllIter(mChildMask.beginOff(), this); } /// @return The dimension of this InternalNode /// @details The number of voxels in one coordinate direction covered by this node static Index dim() { return DIM; } /// @return The level of this node /// @details Level 0 is by definition the level of the leaf nodes static Index getLevel() { return LEVEL; } /// @brief Populated an stil::vector with the dimension of all the /// nodes in the branch starting with this node. static void getNodeLog2Dims(std::vector<Index>& dims); /// @return The dimension of the child nodes of this node. /// @details The number of voxels in one coordinate direction /// covered by a child node of this node. static Index getChildDim() { return ChildNodeType::DIM; } /// Return the linear table offset of the given global or local coordinates. static Index coordToOffset(const Coord& xyz); /// @brief Return the local coordinates for a linear table offset, /// where offset 0 has coordinates (0, 0, 0). static void offsetToLocalCoord(Index n, Coord& xyz); /// Return the global coordinates for a linear table offset. Coord offsetToGlobalCoord(Index n) const; /// Return the grid index coordinates of this node's local origin. const Coord& origin() const { return mOrigin; } /// Set the grid index coordinates of this node's local origin. void setOrigin(const Coord& origin) { mOrigin = origin; } Index32 leafCount() const; void nodeCount(std::vector<Index32> &vec) const; Index32 nonLeafCount() const; Index32 childCount() const; Index64 onVoxelCount() const; Index64 offVoxelCount() const; Index64 onLeafVoxelCount() const; Index64 offLeafVoxelCount() const; Index64 onTileCount() const; /// Return the total amount of memory in bytes occupied by this node and its children. Index64 memUsage() const; /// @brief Expand the specified bounding box so that it includes the active tiles /// of this internal node as well as all the active values in its child nodes. /// If visitVoxels is false LeafNodes will be approximated as dense, i.e. with all /// voxels active. Else the individual active voxels are visited to produce a tight bbox. void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// @brief Return the bounding box of this node, i.e., the full index space /// spanned by the node regardless of its content. CoordBBox getNodeBoundingBox() const { return CoordBBox::createCube(mOrigin, DIM); } /// @return True if this node contains no child nodes. bool isEmpty() const { return mChildMask.isOff(); } /// Return @c true if all of this node's table entries have the same active state /// and the same constant value to within the given tolerance, /// and return that value in @a firstValue and the active state in @a state. /// /// @note This method also returns @c false if this node contains any child nodes. bool isConstant(ValueType& firstValue, bool& state, const ValueType& tolerance = zeroVal<ValueType>()) const; /// Return @c true if all of this node's tables entries have /// the same active @a state and the range of its values satisfy /// (@a maxValue - @a minValue) <= @a tolerance. /// /// @param minValue Is updated with the minimum of all values IF method /// returns @c true. Else the value is undefined! /// @param maxValue Is updated with the maximum of all values IF method /// returns @c true. Else the value is undefined! /// @param state Is updated with the state of all values IF method /// returns @c true. Else the value is undefined! /// @param tolerance The tolerance used to determine if values are /// approximatly constant. /// /// @note This method also returns @c false if this node contains any child nodes. bool isConstant(ValueType& minValue, ValueType& maxValue, bool& state, const ValueType& tolerance = zeroVal<ValueType>()) const; /// Return @c true if this node has no children and only contains inactive values. bool isInactive() const { return this->isChildMaskOff() && this->isValueMaskOff(); } /// Return @c true if the voxel at the given coordinates is active. bool isValueOn(const Coord& xyz) const; /// Return @c true if the voxel at the given offset is active. bool isValueOn(Index offset) const { return mValueMask.isOn(offset); } /// Return @c true if this node or any of its child nodes have any active tiles. bool hasActiveTiles() const; const ValueType& getValue(const Coord& xyz) const; bool probeValue(const Coord& xyz, ValueType& value) const; /// @brief Return the level of the tree (0 = leaf) at which the value /// at the given coordinates resides. Index getValueLevel(const Coord& xyz) const; /// @brief If the first entry in this node's table is a tile, return the tile's value. /// Otherwise, return the result of calling getFirstValue() on the child. const ValueType& getFirstValue() const; /// @brief If the last entry in this node's table is a tile, return the tile's value. /// Otherwise, return the result of calling getLastValue() on the child. const ValueType& getLastValue() const; /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value); /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, const ValueType& value); /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op); /// Apply a functor to the voxel at the given coordinates. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op); /// Return the value of the voxel at the given coordinates and, if necessary, update /// the accessor with pointers to the nodes along the path from the root node to /// the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> const ValueType& getValueAndCache(const Coord& xyz, AccessorT&) const; /// Return @c true if the voxel at the given coordinates is active and, if necessary, /// update the accessor with pointers to the nodes along the path from the root node /// to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool isValueOnAndCache(const Coord& xyz, AccessorT&) const; /// Change the value of the voxel at the given coordinates and mark it as active. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Set the value of the voxel at the given coordinate but preserves its active state. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOnlyAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&); /// Apply a functor to the voxel at the given coordinates. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&); /// Change the value of the voxel at the given coordinates and mark it as inactive. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Set the active state of the voxel at the given coordinates without changing its value. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT&); /// Return, in @a value, the value of the voxel at the given coordinates and, /// if necessary, update the accessor with pointers to the nodes along /// the path from the root node to the node containing the voxel. /// @return @c true if the voxel at the given coordinates is active /// @note Used internally by ValueAccessor. template<typename AccessorT> bool probeValueAndCache(const Coord& xyz, ValueType& value, AccessorT&) const; /// @brief Return the level of the tree (0 = leaf) at which the value /// at the given coordinates resides. /// /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> Index getValueLevelAndCache(const Coord& xyz, AccessorT&) const; /// Mark all values (both tiles and voxels) as active. void setValuesOn(); // // I/O // void writeTopology(std::ostream&, bool toHalf = false) const; void readTopology(std::istream&, bool fromHalf = false); void writeBuffers(std::ostream&, bool toHalf = false) const; void readBuffers(std::istream&, bool fromHalf = false); void readBuffers(std::istream&, const CoordBBox&, bool fromHalf = false); // // Aux methods // /// Change the sign of all the values represented in this node and its child nodes. void negate(); /// @brief Set all voxels within a given axis-aligned box to a constant value. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box /// @param value the value to which to set voxels within the box /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive /// @note This operation generates a sparse, but not always optimally sparse, /// representation of the filled box. Follow fill operations with a prune() /// operation for optimal sparseness. void fill(const CoordBBox& bbox, const ValueType& value, bool active = true); /// @brief Set all voxels within a given axis-aligned box to a constant value /// and ensure that those voxels are all represented at the leaf level. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box. /// @param value the value to which to set voxels within the box. /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive. /// @sa voxelizeActiveTiles() void denseFill(const CoordBBox& bbox, const ValueType& value, bool active = true); /// @brief Densify active tiles, i.e., replace them with leaf-level active voxels. /// @param threaded if true, this operation is multi-threaded (over the internal nodes). /// @sa denseFill() void voxelizeActiveTiles(bool threaded = true); /// @brief Copy into a dense grid the values of the voxels that lie within /// a given bounding box. /// @param bbox inclusive bounding box of the voxels to be copied into the dense grid /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. template<typename DenseT> void copyToDense(const CoordBBox& bbox, DenseT& dense) const; /// @brief Efficiently merge another tree into this tree using one of several schemes. /// @warning This operation cannibalizes the other tree. template<MergePolicy Policy> void merge(InternalNode& other, const ValueType& background, const ValueType& otherBackground); /// @brief Merge, using one of several schemes, this node (and its descendants) /// with a tile of the same dimensions and the given value and active state. template<MergePolicy Policy> void merge(const ValueType& tileValue, bool tileActive); /// @brief Union this branch's set of active values with the other branch's /// active values. The value type of the other branch can be different. /// @details The resulting state of a value is active if the corresponding value /// was already active OR if it is active in the other tree. Also, a resulting /// value maps to a voxel if the corresponding value already mapped to a voxel /// OR if it is a voxel in the other tree. Thus, a resulting value can only /// map to a tile if the corresponding value already mapped to a tile /// AND if it is a tile value in other tree. /// /// Specifically, active tiles and voxels in this branch are not changed, and /// tiles or voxels that were inactive in this branch but active in the other branch /// are marked as active in this branch but left with their original values. template<typename OtherChildNodeType> void topologyUnion(const InternalNode<OtherChildNodeType, Log2Dim>& other); /// @brief Intersects this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active only if the corresponding /// value was already active AND if it is active in the other tree. Also, a /// resulting value maps to a voxel if the corresponding value /// already mapped to an active voxel in either of the two grids /// and it maps to an active tile or voxel in the other grid. /// /// @note This operation can delete branches in this grid if they /// overlap with inactive tiles in the other grid. Likewise active /// voxels can be turned into unactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call prune. template<typename OtherChildNodeType> void topologyIntersection(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& background); /// @brief Difference this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this node and inactive in the other node. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyDifference. /// /// @note This operation modifies only active states, not /// values. Also note that this operation can result in all voxels /// being inactive so consider subsequnetly calling prune. template<typename OtherChildNodeType> void topologyDifference(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& background); template<typename CombineOp> void combine(InternalNode& other, CombineOp&); template<typename CombineOp> void combine(const ValueType& value, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeType /*= InternalNode*/> void combine2(const InternalNode& other0, const OtherNodeType& other1, CombineOp&); template<typename CombineOp, typename OtherNodeType /*= InternalNode*/> void combine2(const ValueType& value, const OtherNodeType& other, bool valIsActive, CombineOp&); template<typename CombineOp, typename OtherValueType> void combine2(const InternalNode& other, const OtherValueType&, bool valIsActive, CombineOp&); /// @brief Calls the templated functor BBoxOp with bounding box /// information for all active tiles and leaf nodes in this node. /// An additional level argument is provided for each callback. /// /// @note The bounding boxes are guarenteed to be non-overlapping. template<typename BBoxOp> void visitActiveBBox(BBoxOp&) const; template<typename VisitorOp> void visit(VisitorOp&); template<typename VisitorOp> void visit(VisitorOp&) const; template<typename OtherNodeType, typename VisitorOp> void visit2Node(OtherNodeType& other, VisitorOp&); template<typename OtherNodeType, typename VisitorOp> void visit2Node(OtherNodeType& other, VisitorOp&) const; template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false); template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false) const; /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&, const ValueType& background); /// @brief Reduce the memory footprint of this tree by replacing with tiles /// any nodes whose values are all the same (optionally to within a tolerance) /// and have the same active state. void prune(const ValueType& tolerance = zeroVal<ValueType>()); /// @brief Add the specified leaf to this node, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeType* leaf); /// @brief Same as addLeaf() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> void addLeafAndCache(LeafNodeType* leaf, AccessorT&); /// @brief Return a pointer to the node of type @c NodeT that contains voxel (x, y, z) /// and replace it with a tile of the specified value and state. /// If no such node exists, leave the tree unchanged and return @c nullptr. /// /// @note The caller takes ownership of the node and is responsible for deleting it. /// /// @warning Since this method potentially removes nodes and branches of the tree, /// it is important to clear the caches of all ValueAccessors associated with this tree. template<typename NodeT> NodeT* stealNode(const Coord& xyz, const ValueType& value, bool state); /// @brief Add the given child node at this level deducing the offset from it's origin. /// If a child node with this offset already exists, delete the old node and add the /// new node in its place (i.e. ownership of the new child node is transferred to /// this InternalNode) /// @return @c true if inserting the child has been successful, otherwise the caller /// retains ownership of the node and is responsible for deleting it. bool addChild(ChildNodeType* child); /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly creating a parent branch or deleting a child branch in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state); /// @brief Delete any existing child branch at the specified offset and add a tile. void addTile(Index offset, const ValueType& value, bool state); /// @brief Same as addTile() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing (x, y, z). template<typename AccessorT> void addTileAndCache(Index level, const Coord& xyz, const ValueType&, bool state, AccessorT&); //@{ /// @brief Return a pointer to the node that contains voxel (x, y, z). /// If no such node exists, return nullptr. template<typename NodeType> NodeType* probeNode(const Coord& xyz); template<typename NodeType> const NodeType* probeConstNode(const Coord& xyz) const; //@} //@{ /// @brief Same as probeNode() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing (x, y, z). template<typename NodeType, typename AccessorT> NodeType* probeNodeAndCache(const Coord& xyz, AccessorT&); template<typename NodeType, typename AccessorT> const NodeType* probeConstNodeAndCache(const Coord& xyz, AccessorT&) const; //@} //@{ /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, return @c nullptr. LeafNodeType* probeLeaf(const Coord& xyz); const LeafNodeType* probeConstLeaf(const Coord& xyz) const; const LeafNodeType* probeLeaf(const Coord& xyz) const; //@} //@{ /// @brief Same as probeLeaf() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing (x, y, z). template<typename AccessorT> LeafNodeType* probeLeafAndCache(const Coord& xyz, AccessorT& acc); template<typename AccessorT> const LeafNodeType* probeConstLeafAndCache(const Coord& xyz, AccessorT& acc) const; template<typename AccessorT> const LeafNodeType* probeLeafAndCache(const Coord& xyz, AccessorT& acc) const; //@} /// @brief Return the leaf node that contains voxel (x, y, z). /// If no such node exists, create one, but preserve the values and /// active states of all voxels. /// /// @details Use this method to preallocate a static tree topology /// over which to safely perform multithreaded processing. LeafNodeType* touchLeaf(const Coord& xyz); /// @brief Same as touchLeaf() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> LeafNodeType* touchLeafAndCache(const Coord& xyz, AccessorT&); //@{ /// @brief Adds all nodes of a certain type to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...;// defines the type of nodes to be added to the array /// void push_back(value_type nodePtr);// method that add nodes to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.getNodes(array); /// @endcode template<typename ArrayT> void getNodes(ArrayT& array); template<typename ArrayT> void getNodes(ArrayT& array) const; //@} /// @brief Steals all nodes of a certain type from the tree and /// adds them to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...;// defines the type of nodes to be added to the array /// void push_back(value_type nodePtr);// method that add nodes to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.stealNodes(array); /// @endcode template<typename ArrayT> void stealNodes(ArrayT& array, const ValueType& value, bool state); /// @brief Change inactive tiles or voxels with value oldBackground to newBackground /// or -oldBackground to -newBackground. Active values are unchanged. void resetBackground(const ValueType& oldBackground, const ValueType& newBackground); /// @brief Return @c true if the given tree branch has the same node and active value /// topology as this tree branch (but possibly a different @c ValueType). template<typename OtherChildNodeType, Index OtherLog2Dim> bool hasSameTopology(const InternalNode<OtherChildNodeType, OtherLog2Dim>* other) const; protected: //@{ /// Allow iterators to call mask accessor methods (setValueMask(), setChildMask(), etc.). /// @todo Make mask accessors public? friend class IteratorBase<MaskOnIterator, InternalNode>; friend class IteratorBase<MaskOffIterator, InternalNode>; friend class IteratorBase<MaskDenseIterator, InternalNode>; //@} /// @brief During topology-only construction, access is needed /// to protected/private members of other template instances. template<typename, Index> friend class InternalNode; // Mask accessors public: bool isValueMaskOn(Index n) const { return mValueMask.isOn(n); } bool isValueMaskOn() const { return mValueMask.isOn(); } bool isValueMaskOff(Index n) const { return mValueMask.isOff(n); } bool isValueMaskOff() const { return mValueMask.isOff(); } bool isChildMaskOn(Index n) const { return mChildMask.isOn(n); } bool isChildMaskOff(Index n) const { return mChildMask.isOff(n); } bool isChildMaskOff() const { return mChildMask.isOff(); } const NodeMaskType& getValueMask() const { return mValueMask; } const NodeMaskType& getChildMask() const { return mChildMask; } NodeMaskType getValueOffMask() const { NodeMaskType mask = mValueMask; mask |= mChildMask; mask.toggle(); return mask; } const UnionType* getTable() const { return mNodes; } protected: //@{ /// Use a mask accessor to ensure consistency between the child and value masks; /// i.e., the value mask should always be off wherever the child mask is on. void setValueMask(Index n, bool on) { mValueMask.set(n, mChildMask.isOn(n) ? false : on); } //@} void makeChildNodeEmpty(Index n, const ValueType& value); void setChildNode( Index i, ChildNodeType* child);//assumes a tile void resetChildNode(Index i, ChildNodeType* child);//checks for an existing child ChildNodeType* unsetChildNode(Index i, const ValueType& value); template<typename NodeT, typename VisitorOp, typename ChildAllIterT> static inline void doVisit(NodeT&, VisitorOp&); template<typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2Node(NodeT&, OtherNodeT&, VisitorOp&); template<typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2(NodeT&, OtherChildAllIterT&, VisitorOp&, bool otherIsLHS); ///@{ /// @brief Returns a pointer to the child node at the linear offset n. /// @warning This protected method assumes that a child node exists at /// the specified linear offset! ChildNodeType* getChildNode(Index n); const ChildNodeType* getChildNode(Index n) const; ///@} ///@{ /// @brief Protected member classes for recursive multi-threading struct VoxelizeActiveTiles; template<typename OtherInternalNode> struct DeepCopy; template<typename OtherInternalNode> struct TopologyCopy1; template<typename OtherInternalNode> struct TopologyCopy2; template<typename OtherInternalNode> struct TopologyUnion; template<typename OtherInternalNode> struct TopologyDifference; template<typename OtherInternalNode> struct TopologyIntersection; ///@} UnionType mNodes[NUM_VALUES]; NodeMaskType mChildMask, mValueMask; /// Global grid index coordinates (x,y,z) of the local origin of this node Coord mOrigin; }; // class InternalNode //////////////////////////////////////// //@{ /// Helper metafunction used to implement InternalNode::SameConfiguration /// (which, as an inner class, can't be independently specialized) template<typename ChildT1, Index Dim1, typename NodeT2> struct SameInternalConfig { static const bool value = false; }; template<typename ChildT1, Index Dim1, typename ChildT2> struct SameInternalConfig<ChildT1, Dim1, InternalNode<ChildT2, Dim1> > { static const bool value = ChildT1::template SameConfiguration<ChildT2>::value; }; //@} //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::InternalNode(const ValueType& background) { for (Index i = 0; i < NUM_VALUES; ++i) mNodes[i].setValue(background); } template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::InternalNode(const Coord& origin, const ValueType& val, bool active): mOrigin(origin[0] & ~(DIM - 1), // zero out the low-order bits origin[1] & ~(DIM - 1), origin[2] & ~(DIM - 1)) { if (active) mValueMask.setOn(); for (Index i = 0; i < NUM_VALUES; ++i) mNodes[i].setValue(val); } // For InternalNodes, the PartialCreate constructor is identical to its // non-PartialCreate counterpart. template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::InternalNode(PartialCreate, const Coord& origin, const ValueType& val, bool active) : mOrigin(origin[0] & ~(DIM-1), origin[1] & ~(DIM-1), origin[2] & ~(DIM-1)) { if (active) mValueMask.setOn(); for (Index i = 0; i < NUM_VALUES; ++i) mNodes[i].setValue(val); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::DeepCopy { DeepCopy(const OtherInternalNode* source, InternalNode* target) : s(source), t(target) { tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//serial } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (s->mChildMask.isOff(i)) { t->mNodes[i].setValue(ValueType(s->mNodes[i].getValue())); } else { t->mNodes[i].setChild(new ChildNodeType(*(s->mNodes[i].getChild()))); } } } const OtherInternalNode* s; InternalNode* t; };// DeepCopy template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::InternalNode(const InternalNode& other): mChildMask(other.mChildMask), mValueMask(other.mValueMask), mOrigin(other.mOrigin) { DeepCopy<InternalNode<ChildT, Log2Dim> > tmp(&other, this); } // Copy-construct from a node with the same configuration but a different ValueType. template<typename ChildT, Index Log2Dim> template<typename OtherChildNodeType> inline InternalNode<ChildT, Log2Dim>::InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other) : mChildMask(other.mChildMask) , mValueMask(other.mValueMask) , mOrigin(other.mOrigin) { DeepCopy<InternalNode<OtherChildNodeType, Log2Dim> > tmp(&other, this); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyCopy1 { TopologyCopy1(const OtherInternalNode* source, InternalNode* target, const ValueType& background) : s(source), t(target), b(background) { tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//serial } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (s->isChildMaskOn(i)) { t->mNodes[i].setChild(new ChildNodeType(*(s->mNodes[i].getChild()), b, TopologyCopy())); } else { t->mNodes[i].setValue(b); } } } const OtherInternalNode* s; InternalNode* t; const ValueType &b; };// TopologyCopy1 template<typename ChildT, Index Log2Dim> template<typename OtherChildNodeType> inline InternalNode<ChildT, Log2Dim>::InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& background, TopologyCopy): mChildMask(other.mChildMask), mValueMask(other.mValueMask), mOrigin(other.mOrigin) { TopologyCopy1<InternalNode<OtherChildNodeType, Log2Dim> > tmp(&other, this, background); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyCopy2 { TopologyCopy2(const OtherInternalNode* source, InternalNode* target, const ValueType& offValue, const ValueType& onValue) : s(source), t(target), offV(offValue), onV(onValue) { tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (s->isChildMaskOn(i)) { t->mNodes[i].setChild(new ChildNodeType(*(s->mNodes[i].getChild()), offV, onV, TopologyCopy())); } else { t->mNodes[i].setValue(s->isValueMaskOn(i) ? onV : offV); } } } const OtherInternalNode* s; InternalNode* t; const ValueType &offV, &onV; };// TopologyCopy2 template<typename ChildT, Index Log2Dim> template<typename OtherChildNodeType> inline InternalNode<ChildT, Log2Dim>::InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& offValue, const ValueType& onValue, TopologyCopy): mChildMask(other.mChildMask), mValueMask(other.mValueMask), mOrigin(other.mOrigin) { TopologyCopy2<InternalNode<OtherChildNodeType, Log2Dim> > tmp(&other, this, offValue, onValue); } template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::~InternalNode() { for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { delete mNodes[iter.pos()].getChild(); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline Index32 InternalNode<ChildT, Log2Dim>::leafCount() const { if (ChildNodeType::getLevel() == 0) return mChildMask.countOn(); Index32 sum = 0; for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->leafCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::nodeCount(std::vector<Index32> &vec) const { assert(vec.size() > ChildNodeType::LEVEL); const auto count = mChildMask.countOn(); if (ChildNodeType::LEVEL > 0 && count > 0) { for (auto iter = this->cbeginChildOn(); iter; ++iter) iter->nodeCount(vec); } vec[ChildNodeType::LEVEL] += count; } template<typename ChildT, Index Log2Dim> inline Index32 InternalNode<ChildT, Log2Dim>::nonLeafCount() const { Index32 sum = 1; if (ChildNodeType::getLevel() == 0) return sum; for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->nonLeafCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index32 InternalNode<ChildT, Log2Dim>::childCount() const { return this->getChildMask().countOn(); } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::onVoxelCount() const { Index64 sum = ChildT::NUM_VOXELS * mValueMask.countOn(); for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->onVoxelCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::offVoxelCount() const { Index64 sum = ChildT::NUM_VOXELS * (NUM_VALUES-mValueMask.countOn()-mChildMask.countOn()); for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->offVoxelCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::onLeafVoxelCount() const { Index64 sum = 0; for (ChildOnCIter iter = this->beginChildOn(); iter; ++iter) { sum += mNodes[iter.pos()].getChild()->onLeafVoxelCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::offLeafVoxelCount() const { Index64 sum = 0; for (ChildOnCIter iter = this->beginChildOn(); iter; ++iter) { sum += mNodes[iter.pos()].getChild()->offLeafVoxelCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::onTileCount() const { Index64 sum = mValueMask.countOn(); for (ChildOnCIter iter = this->cbeginChildOn(); LEVEL>1 && iter; ++iter) { sum += iter->onTileCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::memUsage() const { Index64 sum = NUM_VALUES * sizeof(UnionType) + mChildMask.memUsage() + mValueMask.memUsage() + sizeof(mOrigin); for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->memUsage(); } return sum; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { if (bbox.isInside(this->getNodeBoundingBox())) return; for (ValueOnCIter i = this->cbeginValueOn(); i; ++i) { bbox.expand(i.getCoord(), ChildT::DIM); } for (ChildOnCIter i = this->cbeginChildOn(); i; ++i) { i->evalActiveBoundingBox(bbox, visitVoxels); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::prune(const ValueType& tolerance) { bool state = false; ValueType value = zeroVal<ValueType>(); for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { const Index i = iter.pos(); ChildT* child = mNodes[i].getChild(); child->prune(tolerance); if (child->isConstant(value, state, tolerance)) { delete child; mChildMask.setOff(i); mValueMask.set(i, state); mNodes[i].setValue(value); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename NodeT> inline NodeT* InternalNode<ChildT, Log2Dim>::stealNode(const Coord& xyz, const ValueType& value, bool state) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; ChildT* child = mNodes[n].getChild(); if (std::is_same<NodeT, ChildT>::value) { mChildMask.setOff(n); mValueMask.set(n, state); mNodes[n].setValue(value); } return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template stealNode<NodeT>(xyz, value, state); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename NodeT> inline NodeT* InternalNode<ChildT, Log2Dim>::probeNode(const Coord& xyz) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; ChildT* child = mNodes[n].getChild(); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template probeNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> template<typename NodeT, typename AccessorT> inline NodeT* InternalNode<ChildT, Log2Dim>::probeNodeAndCache(const Coord& xyz, AccessorT& acc) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; ChildT* child = mNodes[n].getChild(); acc.insert(xyz, child); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template probeNodeAndCache<NodeT>(xyz, acc); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> template<typename NodeT> inline const NodeT* InternalNode<ChildT, Log2Dim>::probeConstNode(const Coord& xyz) const { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; const ChildT* child = mNodes[n].getChild(); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<const NodeT*>(child) : child->template probeConstNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> template<typename NodeT, typename AccessorT> inline const NodeT* InternalNode<ChildT, Log2Dim>::probeConstNodeAndCache(const Coord& xyz, AccessorT& acc) const { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; const ChildT* child = mNodes[n].getChild(); acc.insert(xyz, child); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<const NodeT*>(child) : child->template probeConstNodeAndCache<NodeT>(xyz, acc); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeType>(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeLeafAndCache(const Coord& xyz, AccessorT& acc) { return this->template probeNodeAndCache<LeafNodeType>(xyz, acc); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline const typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeLeafAndCache(const Coord& xyz, AccessorT& acc) const { return this->probeConstLeafAndCache(xyz, acc); } template<typename ChildT, Index Log2Dim> inline const typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeType>(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline const typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeConstLeafAndCache(const Coord& xyz, AccessorT& acc) const { return this->template probeConstNodeAndCache<LeafNodeType>(xyz, acc); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::addLeaf(LeafNodeType* leaf) { assert(leaf != nullptr); const Coord& xyz = leaf->origin(); const Index n = this->coordToOffset(xyz); ChildT* child = nullptr; if (mChildMask.isOff(n)) { if (ChildT::LEVEL>0) { child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); } else { child = reinterpret_cast<ChildT*>(leaf); } this->setChildNode(n, child); } else { if (ChildT::LEVEL>0) { child = mNodes[n].getChild(); } else { delete mNodes[n].getChild(); child = reinterpret_cast<ChildT*>(leaf); mNodes[n].setChild(child); } } child->addLeaf(leaf); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::addLeafAndCache(LeafNodeType* leaf, AccessorT& acc) { assert(leaf != nullptr); const Coord& xyz = leaf->origin(); const Index n = this->coordToOffset(xyz); ChildT* child = nullptr; if (mChildMask.isOff(n)) { if (ChildT::LEVEL>0) { child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); acc.insert(xyz, child);//we only cache internal nodes } else { child = reinterpret_cast<ChildT*>(leaf); } this->setChildNode(n, child); } else { if (ChildT::LEVEL>0) { child = mNodes[n].getChild(); acc.insert(xyz, child);//we only cache internal nodes } else { delete mNodes[n].getChild(); child = reinterpret_cast<ChildT*>(leaf); mNodes[n].setChild(child); } } child->addLeafAndCache(leaf, acc); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::addChild(ChildT* child) { assert(child); const Coord& xyz = child->origin(); // verify that the child belongs in this internal node if (Coord((xyz & ~(DIM-1))) != this->origin()) return false; // compute the offset and insert the child node const Index n = this->coordToOffset(xyz); // this also deletes an existing child node this->resetChildNode(n, child); return true; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::addTile(Index n, const ValueType& value, bool state) { assert(n < NUM_VALUES); this->makeChildNodeEmpty(n, value); mValueMask.set(n, state); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { if (LEVEL >= level) { const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) {// tile case if (LEVEL > level) { ChildT* child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); this->setChildNode(n, child); child->addTile(level, xyz, value, state); } else { mValueMask.set(n, state); mNodes[n].setValue(value); } } else {// child branch case ChildT* child = mNodes[n].getChild(); if (LEVEL > level) { child->addTile(level, xyz, value, state); } else { delete child; mChildMask.setOff(n); mValueMask.set(n, state); mNodes[n].setValue(value); } } } } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::addTileAndCache(Index level, const Coord& xyz, const ValueType& value, bool state, AccessorT& acc) { if (LEVEL >= level) { const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) {// tile case if (LEVEL > level) { ChildT* child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); this->setChildNode(n, child); acc.insert(xyz, child); child->addTileAndCache(level, xyz, value, state, acc); } else { mValueMask.set(n, state); mNodes[n].setValue(value); } } else {// child branch case ChildT* child = mNodes[n].getChild(); if (LEVEL > level) { acc.insert(xyz, child); child->addTileAndCache(level, xyz, value, state, acc); } else { delete child; mChildMask.setOff(n); mValueMask.set(n, state); mNodes[n].setValue(value); } } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::touchLeaf(const Coord& xyz) { const Index n = this->coordToOffset(xyz); ChildT* child = nullptr; if (mChildMask.isOff(n)) { child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); this->setChildNode(n, child); } else { child = mNodes[n].getChild(); } return child->touchLeaf(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::touchLeafAndCache(const Coord& xyz, AccessorT& acc) { const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) { this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), mValueMask.isOn(n))); } acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->touchLeafAndCache(xyz, acc); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::isConstant(ValueType& firstValue, bool& state, const ValueType& tolerance) const { if (!mChildMask.isOff() || !mValueMask.isConstant(state)) return false;// early termination firstValue = mNodes[0].getValue(); for (Index i = 1; i < NUM_VALUES; ++i) { if (!math::isApproxEqual(mNodes[i].getValue(), firstValue, tolerance)) { return false; // early termination } } return true; } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::isConstant(ValueType& minValue, ValueType& maxValue, bool& state, const ValueType& tolerance) const { if (!mChildMask.isOff() || !mValueMask.isConstant(state)) return false;// early termination minValue = maxValue = mNodes[0].getValue(); for (Index i = 1; i < NUM_VALUES; ++i) { const ValueType& v = mNodes[i].getValue(); if (v < minValue) { if ((maxValue - v) > tolerance) return false;// early termination minValue = v; } else if (v > maxValue) { if ((v - minValue) > tolerance) return false;// early termination maxValue = v; } } return true; } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::hasActiveTiles() const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const bool anyActiveTiles = !mValueMask.isOff(); if (LEVEL==1 || anyActiveTiles) return anyActiveTiles; for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { if (iter->hasActiveTiles()) return true; } return false; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::isValueOn(const Coord& xyz) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOff(n)) return this->isValueMaskOn(n); return mNodes[n].getChild()->isValueOn(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline bool InternalNode<ChildT, Log2Dim>::isValueOnAndCache(const Coord& xyz, AccessorT& acc) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOff(n)) return this->isValueMaskOn(n); acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->isValueOnAndCache(xyz, acc); } template<typename ChildT, Index Log2Dim> inline const typename ChildT::ValueType& InternalNode<ChildT, Log2Dim>::getValue(const Coord& xyz) const { const Index n = this->coordToOffset(xyz); return this->isChildMaskOff(n) ? mNodes[n].getValue() : mNodes[n].getChild()->getValue(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline const typename ChildT::ValueType& InternalNode<ChildT, Log2Dim>::getValueAndCache(const Coord& xyz, AccessorT& acc) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOn(n)) { acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->getValueAndCache(xyz, acc); } return mNodes[n].getValue(); } template<typename ChildT, Index Log2Dim> inline Index InternalNode<ChildT, Log2Dim>::getValueLevel(const Coord& xyz) const { const Index n = this->coordToOffset(xyz); return this->isChildMaskOff(n) ? LEVEL : mNodes[n].getChild()->getValueLevel(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline Index InternalNode<ChildT, Log2Dim>::getValueLevelAndCache(const Coord& xyz, AccessorT& acc) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOn(n)) { acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->getValueLevelAndCache(xyz, acc); } return LEVEL; } template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::probeValue(const Coord& xyz, ValueType& value) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOff(n)) { value = mNodes[n].getValue(); return this->isValueMaskOn(n); } return mNodes[n].getChild()->probeValue(xyz, value); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline bool InternalNode<ChildT, Log2Dim>::probeValueAndCache(const Coord& xyz, ValueType& value, AccessorT& acc) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOn(n)) { acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->probeValueAndCache(xyz, value, acc); } value = mNodes[n].getValue(); return this->isValueMaskOn(n); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOff(const Coord& xyz) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild && this->isValueMaskOn(n)) { // If the voxel belongs to a constant tile that is active, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), /*active=*/true)); } if (hasChild) mNodes[n].getChild()->setValueOff(xyz); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOn(const Coord& xyz) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild && !this->isValueMaskOn(n)) { // If the voxel belongs to a constant tile that is inactive, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), /*active=*/false)); } if (hasChild) mNodes[n].getChild()->setValueOn(xyz); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOff(const Coord& xyz, const ValueType& value) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool active = this->isValueMaskOn(n); if (active || !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel belongs to a tile that is either active or that // has a constant value that is different from the one provided, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) mNodes[n].getChild()->setValueOff(xyz, value); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool active = this->isValueMaskOn(n); if (active || !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel belongs to a tile that is either active or that // has a constant value that is different from the one provided, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) { ChildT* child = mNodes[n].getChild(); acc.insert(xyz, child); child->setValueOffAndCache(xyz, value, acc); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOn(const Coord& xyz, const ValueType& value) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool active = this->isValueMaskOn(n); // tile's active state if (!active || !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel belongs to a tile that is either inactive or that // has a constant value that is different from the one provided, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) mNodes[n].getChild()->setValueOn(xyz, value); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::setValueAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool active = this->isValueMaskOn(n); if (!active || !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel belongs to a tile that is either inactive or that // has a constant value that is different from the one provided, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) { acc.insert(xyz, mNodes[n].getChild()); mNodes[n].getChild()->setValueAndCache(xyz, value, acc); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOnly(const Coord& xyz, const ValueType& value) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild && !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel has a tile value that is different from the one provided, // a child subtree must be constructed. const bool active = this->isValueMaskOn(n); hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } if (hasChild) mNodes[n].getChild()->setValueOnly(xyz, value); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::setValueOnlyAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild && !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel has a tile value that is different from the one provided, // a child subtree must be constructed. const bool active = this->isValueMaskOn(n); hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } if (hasChild) { acc.insert(xyz, mNodes[n].getChild()); mNodes[n].getChild()->setValueOnlyAndCache(xyz, value, acc); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setActiveState(const Coord& xyz, bool on) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { if (on != this->isValueMaskOn(n)) { // If the voxel belongs to a tile with the wrong active state, // then a child subtree must be constructed. // 'on' is the voxel's new state, therefore '!on' is the tile's current state hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), !on)); } } if (hasChild) mNodes[n].getChild()->setActiveState(xyz, on); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::setActiveStateAndCache(const Coord& xyz, bool on, AccessorT& acc) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { if (on != this->isValueMaskOn(n)) { // If the voxel belongs to a tile with the wrong active state, // then a child subtree must be constructed. // 'on' is the voxel's new state, therefore '!on' is the tile's current state hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), !on)); } } if (hasChild) { ChildT* child = mNodes[n].getChild(); acc.insert(xyz, child); child->setActiveStateAndCache(xyz, on, acc); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValuesOn() { mValueMask = !mChildMask; for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { mNodes[iter.pos()].getChild()->setValuesOn(); } } template<typename ChildT, Index Log2Dim> template<typename ModifyOp> inline void InternalNode<ChildT, Log2Dim>::modifyValue(const Coord& xyz, const ModifyOp& op) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { // Need to create a child if the tile is inactive, // in order to activate voxel (x, y, z). const bool active = this->isValueMaskOn(n); bool createChild = !active; if (!createChild) { // Need to create a child if applying the functor // to the tile value produces a different value. const ValueType& tileVal = mNodes[n].getValue(); ValueType modifiedVal = tileVal; op(modifiedVal); createChild = !math::isExactlyEqual(tileVal, modifiedVal); } if (createChild) { hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) mNodes[n].getChild()->modifyValue(xyz, op); } template<typename ChildT, Index Log2Dim> template<typename ModifyOp, typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT& acc) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { // Need to create a child if the tile is inactive, // in order to activate voxel (x, y, z). const bool active = this->isValueMaskOn(n); bool createChild = !active; if (!createChild) { // Need to create a child if applying the functor // to the tile value produces a different value. const ValueType& tileVal = mNodes[n].getValue(); ValueType modifiedVal = tileVal; op(modifiedVal); createChild = !math::isExactlyEqual(tileVal, modifiedVal); } if (createChild) { hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) { ChildNodeType* child = mNodes[n].getChild(); acc.insert(xyz, child); child->modifyValueAndCache(xyz, op, acc); } } template<typename ChildT, Index Log2Dim> template<typename ModifyOp> inline void InternalNode<ChildT, Log2Dim>::modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool tileState = this->isValueMaskOn(n); const ValueType& tileVal = mNodes[n].getValue(); bool modifiedState = !tileState; ValueType modifiedVal = tileVal; op(modifiedVal, modifiedState); // Need to create a child if applying the functor to the tile // produces a different value or active state. if (modifiedState != tileState || !math::isExactlyEqual(modifiedVal, tileVal)) { hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, tileVal, tileState)); } } if (hasChild) mNodes[n].getChild()->modifyValueAndActiveState(xyz, op); } template<typename ChildT, Index Log2Dim> template<typename ModifyOp, typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::modifyValueAndActiveStateAndCache( const Coord& xyz, const ModifyOp& op, AccessorT& acc) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool tileState = this->isValueMaskOn(n); const ValueType& tileVal = mNodes[n].getValue(); bool modifiedState = !tileState; ValueType modifiedVal = tileVal; op(modifiedVal, modifiedState); // Need to create a child if applying the functor to the tile // produces a different value or active state. if (modifiedState != tileState || !math::isExactlyEqual(modifiedVal, tileVal)) { hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, tileVal, tileState)); } } if (hasChild) { ChildNodeType* child = mNodes[n].getChild(); acc.insert(xyz, child); child->modifyValueAndActiveStateAndCache(xyz, op, acc); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::clip(const CoordBBox& clipBBox, const ValueType& background) { CoordBBox nodeBBox = this->getNodeBoundingBox(); if (!clipBBox.hasOverlap(nodeBBox)) { // This node lies completely outside the clipping region. Fill it with background tiles. this->fill(nodeBBox, background, /*active=*/false); } else if (clipBBox.isInside(nodeBBox)) { // This node lies completely inside the clipping region. Leave it intact. return; } // This node isn't completely contained inside the clipping region. // Clip tiles and children, and replace any that lie outside the region // with background tiles. for (Index pos = 0; pos < NUM_VALUES; ++pos) { const Coord xyz = this->offsetToGlobalCoord(pos); // tile or child origin CoordBBox tileBBox(xyz, xyz.offsetBy(ChildT::DIM - 1)); // tile or child bounds if (!clipBBox.hasOverlap(tileBBox)) { // This table entry lies completely outside the clipping region. // Replace it with a background tile. this->makeChildNodeEmpty(pos, background); mValueMask.setOff(pos); } else if (!clipBBox.isInside(tileBBox)) { // This table entry does not lie completely inside the clipping region // and must be clipped. if (this->isChildMaskOn(pos)) { mNodes[pos].getChild()->clip(clipBBox, background); } else { // Replace this tile with a background tile, then fill the clip region // with the tile's original value. (This might create a child branch.) tileBBox.intersect(clipBBox); const ValueType val = mNodes[pos].getValue(); const bool on = this->isValueMaskOn(pos); mNodes[pos].setValue(background); mValueMask.setOff(pos); this->fill(tileBBox, val, on); } } else { // This table entry lies completely inside the clipping region. Leave it intact. } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::fill(const CoordBBox& bbox, const ValueType& value, bool active) { auto clippedBBox = this->getNodeBoundingBox(); clippedBBox.intersect(bbox); if (!clippedBBox) return; // Iterate over the fill region in axis-aligned, tile-sized chunks. // (The first and last chunks along each axis might be smaller than a tile.) Coord xyz, tileMin, tileMax; for (int x = clippedBBox.min().x(); x <= clippedBBox.max().x(); x = tileMax.x() + 1) { xyz.setX(x); for (int y = clippedBBox.min().y(); y <= clippedBBox.max().y(); y = tileMax.y() + 1) { xyz.setY(y); for (int z = clippedBBox.min().z(); z <= clippedBBox.max().z(); z = tileMax.z() + 1) { xyz.setZ(z); // Get the bounds of the tile that contains voxel (x, y, z). const Index n = this->coordToOffset(xyz); tileMin = this->offsetToGlobalCoord(n); tileMax = tileMin.offsetBy(ChildT::DIM - 1); if (xyz != tileMin || Coord::lessThan(clippedBBox.max(), tileMax)) { // If the box defined by (xyz, clippedBBox.max()) doesn't completely enclose // the tile to which xyz belongs, create a child node (or retrieve // the existing one). ChildT* child = nullptr; if (this->isChildMaskOff(n)) { // Replace the tile with a newly-created child that is initialized // with the tile's value and active state. child = new ChildT{xyz, mNodes[n].getValue(), this->isValueMaskOn(n)}; this->setChildNode(n, child); } else { child = mNodes[n].getChild(); } // Forward the fill request to the child. if (child) { const Coord tmp = Coord::minComponent(clippedBBox.max(), tileMax); child->fill(CoordBBox(xyz, tmp), value, active); } } else { // If the box given by (xyz, clippedBBox.max()) completely encloses // the tile to which xyz belongs, create the tile (if it // doesn't already exist) and give it the fill value. this->makeChildNodeEmpty(n, value); mValueMask.set(n, active); } } } } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::denseFill(const CoordBBox& bbox, const ValueType& value, bool active) { auto clippedBBox = this->getNodeBoundingBox(); clippedBBox.intersect(bbox); if (!clippedBBox) return; // Iterate over the fill region in axis-aligned, tile-sized chunks. // (The first and last chunks along each axis might be smaller than a tile.) Coord xyz, tileMin, tileMax; for (int x = clippedBBox.min().x(); x <= clippedBBox.max().x(); x = tileMax.x() + 1) { xyz.setX(x); for (int y = clippedBBox.min().y(); y <= clippedBBox.max().y(); y = tileMax.y() + 1) { xyz.setY(y); for (int z = clippedBBox.min().z(); z <= clippedBBox.max().z(); z = tileMax.z() + 1) { xyz.setZ(z); // Get the table index of the tile that contains voxel (x, y, z). const auto n = this->coordToOffset(xyz); // Retrieve the child node at index n, or replace the tile at index n with a child. ChildT* child = nullptr; if (this->isChildMaskOn(n)) { child = mNodes[n].getChild(); } else { // Replace the tile with a newly-created child that is filled // with the tile's value and active state. child = new ChildT{xyz, mNodes[n].getValue(), this->isValueMaskOn(n)}; this->setChildNode(n, child); } // Get the bounds of the tile that contains voxel (x, y, z). tileMin = this->offsetToGlobalCoord(n); tileMax = tileMin.offsetBy(ChildT::DIM - 1); // Forward the fill request to the child. child->denseFill(CoordBBox{xyz, clippedBBox.max()}, value, active); } } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename DenseT> inline void InternalNode<ChildT, Log2Dim>::copyToDense(const CoordBBox& bbox, DenseT& dense) const { using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); for (Coord xyz = bbox.min(), max; xyz[0] <= bbox.max()[0]; xyz[0] = max[0] + 1) { for (xyz[1] = bbox.min()[1]; xyz[1] <= bbox.max()[1]; xyz[1] = max[1] + 1) { for (xyz[2] = bbox.min()[2]; xyz[2] <= bbox.max()[2]; xyz[2] = max[2] + 1) { const Index n = this->coordToOffset(xyz); // Get max coordinates of the child node that contains voxel xyz. max = this->offsetToGlobalCoord(n).offsetBy(ChildT::DIM-1); // Get the bbox of the interection of bbox and the child node CoordBBox sub(xyz, Coord::minComponent(bbox.max(), max)); if (this->isChildMaskOn(n)) {//is a child mNodes[n].getChild()->copyToDense(sub, dense); } else {//a tile value const ValueType value = mNodes[n].getValue(); sub.translate(-min); DenseValueType* a0 = dense.data() + zStride*sub.min()[2]; for (Int32 x=sub.min()[0], ex=sub.max()[0]+1; x<ex; ++x) { DenseValueType* a1 = a0 + x*xStride; for (Int32 y=sub.min()[1], ey=sub.max()[1]+1; y<ey; ++y) { DenseValueType* a2 = a1 + y*yStride; for (Int32 z = sub.min()[2], ez = sub.max()[2]+1; z < ez; ++z, a2 += zStride) { *a2 = DenseValueType(value); } } } } } } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::writeTopology(std::ostream& os, bool toHalf) const { mChildMask.save(os); mValueMask.save(os); { // Copy all of this node's values into an array. std::unique_ptr<ValueType[]> valuePtr(new ValueType[NUM_VALUES]); ValueType* values = valuePtr.get(); const ValueType zero = zeroVal<ValueType>(); for (Index i = 0; i < NUM_VALUES; ++i) { values[i] = (mChildMask.isOff(i) ? mNodes[i].getValue() : zero); } // Compress (optionally) and write out the contents of the array. io::writeCompressedValues(os, values, NUM_VALUES, mValueMask, mChildMask, toHalf); } // Write out the child nodes in order. for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { iter->writeTopology(os, toHalf); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::readTopology(std::istream& is, bool fromHalf) { const ValueType background = (!io::getGridBackgroundValuePtr(is) ? zeroVal<ValueType>() : *static_cast<const ValueType*>(io::getGridBackgroundValuePtr(is))); mChildMask.load(is); mValueMask.load(is); if (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_INTERNALNODE_COMPRESSION) { for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOn(i)) { ChildNodeType* child = new ChildNodeType(PartialCreate(), offsetToGlobalCoord(i), background); mNodes[i].setChild(child); child->readTopology(is); } else { ValueType value; is.read(reinterpret_cast<char*>(&value), sizeof(ValueType)); mNodes[i].setValue(value); } } } else { const bool oldVersion = (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_NODE_MASK_COMPRESSION); const Index numValues = (oldVersion ? mChildMask.countOff() : NUM_VALUES); { // Read in (and uncompress, if necessary) all of this node's values // into a contiguous array. std::unique_ptr<ValueType[]> valuePtr(new ValueType[numValues]); ValueType* values = valuePtr.get(); io::readCompressedValues(is, values, numValues, mValueMask, fromHalf); // Copy values from the array into this node's table. if (oldVersion) { Index n = 0; for (ValueAllIter iter = this->beginValueAll(); iter; ++iter) { mNodes[iter.pos()].setValue(values[n++]); } assert(n == numValues); } else { for (ValueAllIter iter = this->beginValueAll(); iter; ++iter) { mNodes[iter.pos()].setValue(values[iter.pos()]); } } } // Read in all child nodes and insert them into the table at their proper locations. for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { ChildNodeType* child = new ChildNodeType(PartialCreate(), iter.getCoord(), background); mNodes[iter.pos()].setChild(child); child->readTopology(is, fromHalf); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline const typename ChildT::ValueType& InternalNode<ChildT, Log2Dim>::getFirstValue() const { return (this->isChildMaskOn(0) ? mNodes[0].getChild()->getFirstValue() : mNodes[0].getValue()); } template<typename ChildT, Index Log2Dim> inline const typename ChildT::ValueType& InternalNode<ChildT, Log2Dim>::getLastValue() const { const Index n = NUM_VALUES - 1; return (this->isChildMaskOn(n) ? mNodes[n].getChild()->getLastValue() : mNodes[n].getValue()); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::negate() { for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOn(i)) { mNodes[i].getChild()->negate(); } else { mNodes[i].setValue(math::negative(mNodes[i].getValue())); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> struct InternalNode<ChildT, Log2Dim>::VoxelizeActiveTiles { VoxelizeActiveTiles(InternalNode &node) : mNode(&node) { //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//single thread for debugging tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); node.mChildMask |= node.mValueMask; node.mValueMask.setOff(); } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (mNode->mChildMask.isOn(i)) {// Loop over node's child nodes mNode->mNodes[i].getChild()->voxelizeActiveTiles(true); } else if (mNode->mValueMask.isOn(i)) {// Loop over node's active tiles const Coord &ijk = mNode->offsetToGlobalCoord(i); ChildNodeType *child = new ChildNodeType(ijk, mNode->mNodes[i].getValue(), true); child->voxelizeActiveTiles(true); mNode->mNodes[i].setChild(child); } } } InternalNode* mNode; };// VoxelizeActiveTiles template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::voxelizeActiveTiles(bool threaded) { if (threaded) { VoxelizeActiveTiles tmp(*this); } else { for (ValueOnIter iter = this->beginValueOn(); iter; ++iter) { this->setChildNode(iter.pos(), new ChildNodeType(iter.getCoord(), iter.getValue(), true)); } for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) iter->voxelizeActiveTiles(false); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<MergePolicy Policy> inline void InternalNode<ChildT, Log2Dim>::merge(InternalNode& other, const ValueType& background, const ValueType& otherBackground) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN switch (Policy) { case MERGE_ACTIVE_STATES: default: { for (ChildOnIter iter = other.beginChildOn(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge this node's child with the other node's child. mNodes[n].getChild()->template merge<MERGE_ACTIVE_STATES>(*iter, background, otherBackground); } else if (mValueMask.isOff(n)) { // Replace this node's inactive tile with the other node's child // and replace the other node's child with a tile of undefined value // (which is okay since the other tree is assumed to be cannibalized // in the process of merging). ChildNodeType* child = other.mNodes[n].getChild(); other.mChildMask.setOff(n); child->resetBackground(otherBackground, background); this->setChildNode(n, child); } } // Copy active tile values. for (ValueOnCIter iter = other.cbeginValueOn(); iter; ++iter) { const Index n = iter.pos(); if (mValueMask.isOff(n)) { // Replace this node's child or inactive tile with the other node's active tile. this->makeChildNodeEmpty(n, iter.getValue()); mValueMask.setOn(n); } } break; } case MERGE_NODES: { for (ChildOnIter iter = other.beginChildOn(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge this node's child with the other node's child. mNodes[n].getChild()->template merge<Policy>(*iter, background, otherBackground); } else { // Replace this node's tile (regardless of its active state) with // the other node's child and replace the other node's child with // a tile of undefined value (which is okay since the other tree // is assumed to be cannibalized in the process of merging). ChildNodeType* child = other.mNodes[n].getChild(); other.mChildMask.setOff(n); child->resetBackground(otherBackground, background); this->setChildNode(n, child); } } break; } case MERGE_ACTIVE_STATES_AND_NODES: { // Transfer children from the other tree to this tree. for (ChildOnIter iter = other.beginChildOn(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge this node's child with the other node's child. mNodes[n].getChild()->template merge<Policy>(*iter, background, otherBackground); } else { // Replace this node's tile with the other node's child, leaving the other // node with an inactive tile of undefined value (which is okay since // the other tree is assumed to be cannibalized in the process of merging). ChildNodeType* child = other.mNodes[n].getChild(); other.mChildMask.setOff(n); child->resetBackground(otherBackground, background); if (mValueMask.isOn(n)) { // Merge the child with this node's active tile. child->template merge<Policy>(mNodes[n].getValue(), /*on=*/true); mValueMask.setOff(n); } mChildMask.setOn(n); mNodes[n].setChild(child); } } // Merge active tiles into this tree. for (ValueOnCIter iter = other.cbeginValueOn(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge the other node's active tile into this node's child. mNodes[n].getChild()->template merge<Policy>(iter.getValue(), /*on=*/true); } else if (mValueMask.isOff(n)) { // Replace this node's inactive tile with the other node's active tile. mNodes[n].setValue(iter.getValue()); mValueMask.setOn(n); } } break; } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> template<MergePolicy Policy> inline void InternalNode<ChildT, Log2Dim>::merge(const ValueType& tileValue, bool tileActive) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy != MERGE_ACTIVE_STATES_AND_NODES) return; // For MERGE_ACTIVE_STATES_AND_NODES, inactive tiles in the other tree are ignored. if (!tileActive) return; // Iterate over this node's children and inactive tiles. for (ValueOffIter iter = this->beginValueOff(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge the other node's active tile into this node's child. mNodes[n].getChild()->template merge<Policy>(tileValue, /*on=*/true); } else { // Replace this node's inactive tile with the other node's active tile. iter.setValue(tileValue); mValueMask.setOn(n); } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyUnion { using W = typename NodeMaskType::Word; struct A { inline void operator()(W &tV, const W& sV, const W& tC) const { tV = (tV | sV) & ~tC; } }; TopologyUnion(const OtherInternalNode* source, InternalNode* target) : s(source), t(target) { //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//single thread for debugging tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); // Bit processing is done in a single thread! t->mChildMask |= s->mChildMask;//serial but very fast bitwise post-process A op; t->mValueMask.foreach(s->mValueMask, t->mChildMask, op); assert((t->mValueMask & t->mChildMask).isOff());//no overlapping active tiles or child nodes } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (s->mChildMask.isOn(i)) {// Loop over other node's child nodes const typename OtherInternalNode::ChildNodeType& other = *(s->mNodes[i].getChild()); if (t->mChildMask.isOn(i)) {//this has a child node t->mNodes[i].getChild()->topologyUnion(other); } else {// this is a tile so replace it with a child branch with identical topology ChildT* child = new ChildT(other, t->mNodes[i].getValue(), TopologyCopy()); if (t->mValueMask.isOn(i)) child->setValuesOn();//activate all values t->mNodes[i].setChild(child); } } else if (s->mValueMask.isOn(i) && t->mChildMask.isOn(i)) { t->mNodes[i].getChild()->setValuesOn(); } } } const OtherInternalNode* s; InternalNode* t; };// TopologyUnion template<typename ChildT, Index Log2Dim> template<typename OtherChildT> inline void InternalNode<ChildT, Log2Dim>::topologyUnion(const InternalNode<OtherChildT, Log2Dim>& other) { TopologyUnion<InternalNode<OtherChildT, Log2Dim> > tmp(&other, this); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyIntersection { using W = typename NodeMaskType::Word; struct A { inline void operator()(W &tC, const W& sC, const W& sV, const W& tV) const { tC = (tC & (sC | sV)) | (tV & sC); } }; TopologyIntersection(const OtherInternalNode* source, InternalNode* target, const ValueType& background) : s(source), t(target), b(background) { //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//single thread for debugging tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); // Bit processing is done in a single thread! A op; t->mChildMask.foreach(s->mChildMask, s->mValueMask, t->mValueMask, op); t->mValueMask &= s->mValueMask; assert((t->mValueMask & t->mChildMask).isOff());//no overlapping active tiles or child nodes } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (t->mChildMask.isOn(i)) {// Loop over this node's child nodes ChildT* child = t->mNodes[i].getChild(); if (s->mChildMask.isOn(i)) {//other also has a child node child->topologyIntersection(*(s->mNodes[i].getChild()), b); } else if (s->mValueMask.isOff(i)) {//other is an inactive tile delete child;//convert child to an inactive tile t->mNodes[i].setValue(b); } } else if (t->mValueMask.isOn(i) && s->mChildMask.isOn(i)) {//active tile -> a branch t->mNodes[i].setChild(new ChildT(*(s->mNodes[i].getChild()), t->mNodes[i].getValue(), TopologyCopy())); } } } const OtherInternalNode* s; InternalNode* t; const ValueType& b; };// TopologyIntersection template<typename ChildT, Index Log2Dim> template<typename OtherChildT> inline void InternalNode<ChildT, Log2Dim>::topologyIntersection( const InternalNode<OtherChildT, Log2Dim>& other, const ValueType& background) { TopologyIntersection<InternalNode<OtherChildT, Log2Dim> > tmp(&other, this, background); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyDifference { using W = typename NodeMaskType::Word; struct A {inline void operator()(W &tC, const W& sC, const W& sV, const W& tV) const { tC = (tC & (sC | ~sV)) | (tV & sC); } }; struct B {inline void operator()(W &tV, const W& sC, const W& sV, const W& tC) const { tV &= ~((tC & sV) | (sC | sV)); } }; TopologyDifference(const OtherInternalNode* source, InternalNode* target, const ValueType& background) : s(source), t(target), b(background) { //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//single thread for debugging tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); // Bit processing is done in a single thread! const NodeMaskType oldChildMask(t->mChildMask);//important to avoid cross pollution A op1; t->mChildMask.foreach(s->mChildMask, s->mValueMask, t->mValueMask, op1); B op2; t->mValueMask.foreach(t->mChildMask, s->mValueMask, oldChildMask, op2); assert((t->mValueMask & t->mChildMask).isOff());//no overlapping active tiles or child nodes } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (t->mChildMask.isOn(i)) {// Loop over this node's child nodes ChildT* child = t->mNodes[i].getChild(); if (s->mChildMask.isOn(i)) { child->topologyDifference(*(s->mNodes[i].getChild()), b); } else if (s->mValueMask.isOn(i)) { delete child;//convert child to an inactive tile t->mNodes[i].setValue(b); } } else if (t->mValueMask.isOn(i)) {//this is an active tile if (s->mChildMask.isOn(i)) { const typename OtherInternalNode::ChildNodeType& other = *(s->mNodes[i].getChild()); ChildT* child = new ChildT(other.origin(), t->mNodes[i].getValue(), true); child->topologyDifference(other, b); t->mNodes[i].setChild(child);//replace the active tile with a child branch } } } } const OtherInternalNode* s; InternalNode* t; const ValueType& b; };// TopologyDifference template<typename ChildT, Index Log2Dim> template<typename OtherChildT> inline void InternalNode<ChildT, Log2Dim>::topologyDifference(const InternalNode<OtherChildT, Log2Dim>& other, const ValueType& background) { TopologyDifference<InternalNode<OtherChildT, Log2Dim> > tmp(&other, this, background); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename CombineOp> inline void InternalNode<ChildT, Log2Dim>::combine(InternalNode& other, CombineOp& op) { const ValueType zero = zeroVal<ValueType>(); CombineArgs<ValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOff(i) && other.isChildMaskOff(i)) { // Both this node and the other node have constant values (tiles). // Combine the two values and store the result as this node's new tile value. op(args.setARef(mNodes[i].getValue()) .setAIsActive(isValueMaskOn(i)) .setBRef(other.mNodes[i].getValue()) .setBIsActive(other.isValueMaskOn(i))); mNodes[i].setValue(args.result()); mValueMask.set(i, args.resultIsActive()); } else if (this->isChildMaskOn(i) && other.isChildMaskOff(i)) { // Combine this node's child with the other node's constant value. ChildNodeType* child = mNodes[i].getChild(); assert(child); if (child) { child->combine(other.mNodes[i].getValue(), other.isValueMaskOn(i), op); } } else if (this->isChildMaskOff(i) && other.isChildMaskOn(i)) { // Combine this node's constant value with the other node's child. ChildNodeType* child = other.mNodes[i].getChild(); assert(child); if (child) { // Combine this node's constant value with the other node's child, // but use a new functor in which the A and B values are swapped, // since the constant value is the A value, not the B value. SwappedCombineOp<ValueType, CombineOp> swappedOp(op); child->combine(mNodes[i].getValue(), isValueMaskOn(i), swappedOp); // Steal the other node's child. other.mChildMask.setOff(i); other.mNodes[i].setValue(zero); this->setChildNode(i, child); } } else /*if (isChildMaskOn(i) && other.isChildMaskOn(i))*/ { // Combine this node's child with the other node's child. ChildNodeType *child = mNodes[i].getChild(), *otherChild = other.mNodes[i].getChild(); assert(child); assert(otherChild); if (child && otherChild) { child->combine(*otherChild, op); } } } } template<typename ChildT, Index Log2Dim> template<typename CombineOp> inline void InternalNode<ChildT, Log2Dim>::combine(const ValueType& value, bool valueIsActive, CombineOp& op) { CombineArgs<ValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOff(i)) { // Combine this node's constant value with the given constant value. op(args.setARef(mNodes[i].getValue()) .setAIsActive(isValueMaskOn(i)) .setBRef(value) .setBIsActive(valueIsActive)); mNodes[i].setValue(args.result()); mValueMask.set(i, args.resultIsActive()); } else /*if (isChildMaskOn(i))*/ { // Combine this node's child with the given constant value. ChildNodeType* child = mNodes[i].getChild(); assert(child); if (child) child->combine(value, valueIsActive, op); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename CombineOp, typename OtherNodeType> inline void InternalNode<ChildT, Log2Dim>::combine2(const InternalNode& other0, const OtherNodeType& other1, CombineOp& op) { CombineArgs<ValueType, typename OtherNodeType::ValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (other0.isChildMaskOff(i) && other1.isChildMaskOff(i)) { op(args.setARef(other0.mNodes[i].getValue()) .setAIsActive(other0.isValueMaskOn(i)) .setBRef(other1.mNodes[i].getValue()) .setBIsActive(other1.isValueMaskOn(i))); // Replace child i with a constant value. this->makeChildNodeEmpty(i, args.result()); mValueMask.set(i, args.resultIsActive()); } else { if (this->isChildMaskOff(i)) { // Add a new child with the same coordinates, etc. as the other node's child. const Coord& childOrigin = other0.isChildMaskOn(i) ? other0.mNodes[i].getChild()->origin() : other1.mNodes[i].getChild()->origin(); this->setChildNode(i, new ChildNodeType(childOrigin, mNodes[i].getValue())); } if (other0.isChildMaskOff(i)) { // Combine node1's child with node0's constant value // and write the result into child i. mNodes[i].getChild()->combine2(other0.mNodes[i].getValue(), *other1.mNodes[i].getChild(), other0.isValueMaskOn(i), op); } else if (other1.isChildMaskOff(i)) { // Combine node0's child with node1's constant value // and write the result into child i. mNodes[i].getChild()->combine2(*other0.mNodes[i].getChild(), other1.mNodes[i].getValue(), other1.isValueMaskOn(i), op); } else { // Combine node0's child with node1's child // and write the result into child i. mNodes[i].getChild()->combine2(*other0.mNodes[i].getChild(), *other1.mNodes[i].getChild(), op); } } } } template<typename ChildT, Index Log2Dim> template<typename CombineOp, typename OtherNodeType> inline void InternalNode<ChildT, Log2Dim>::combine2(const ValueType& value, const OtherNodeType& other, bool valueIsActive, CombineOp& op) { CombineArgs<ValueType, typename OtherNodeType::ValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (other.isChildMaskOff(i)) { op(args.setARef(value) .setAIsActive(valueIsActive) .setBRef(other.mNodes[i].getValue()) .setBIsActive(other.isValueMaskOn(i))); // Replace child i with a constant value. this->makeChildNodeEmpty(i, args.result()); mValueMask.set(i, args.resultIsActive()); } else { typename OtherNodeType::ChildNodeType* otherChild = other.mNodes[i].getChild(); assert(otherChild); if (this->isChildMaskOff(i)) { // Add a new child with the same coordinates, etc. // as the other node's child. this->setChildNode(i, new ChildNodeType(*otherChild)); } // Combine the other node's child with a constant value // and write the result into child i. mNodes[i].getChild()->combine2(value, *otherChild, valueIsActive, op); } } } template<typename ChildT, Index Log2Dim> template<typename CombineOp, typename OtherValueType> inline void InternalNode<ChildT, Log2Dim>::combine2(const InternalNode& other, const OtherValueType& value, bool valueIsActive, CombineOp& op) { CombineArgs<ValueType, OtherValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (other.isChildMaskOff(i)) { op(args.setARef(other.mNodes[i].getValue()) .setAIsActive(other.isValueMaskOn(i)) .setBRef(value) .setBIsActive(valueIsActive)); // Replace child i with a constant value. this->makeChildNodeEmpty(i, args.result()); mValueMask.set(i, args.resultIsActive()); } else { ChildNodeType* otherChild = other.mNodes[i].getChild(); assert(otherChild); if (this->isChildMaskOff(i)) { // Add a new child with the same coordinates, etc. as the other node's child. this->setChildNode(i, new ChildNodeType(otherChild->origin(), mNodes[i].getValue())); } // Combine the other node's child with a constant value // and write the result into child i. mNodes[i].getChild()->combine2(*otherChild, value, valueIsActive, op); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename BBoxOp> inline void InternalNode<ChildT, Log2Dim>::visitActiveBBox(BBoxOp& op) const { for (ValueOnCIter i = this->cbeginValueOn(); i; ++i) { op.template operator()<LEVEL>(CoordBBox::createCube(i.getCoord(), ChildNodeType::DIM)); } if (op.template descent<LEVEL>()) { for (ChildOnCIter i = this->cbeginChildOn(); i; ++i) i->visitActiveBBox(op); } else { for (ChildOnCIter i = this->cbeginChildOn(); i; ++i) { op.template operator()<LEVEL>(i->getNodeBoundingBox()); } } } template<typename ChildT, Index Log2Dim> template<typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit(VisitorOp& op) { doVisit<InternalNode, VisitorOp, ChildAllIter>(*this, op); } template<typename ChildT, Index Log2Dim> template<typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit(VisitorOp& op) const { doVisit<const InternalNode, VisitorOp, ChildAllCIter>(*this, op); } template<typename ChildT, Index Log2Dim> template<typename NodeT, typename VisitorOp, typename ChildAllIterT> inline void InternalNode<ChildT, Log2Dim>::doVisit(NodeT& self, VisitorOp& op) { typename NodeT::ValueType val; for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { if (op(iter)) continue; if (typename ChildAllIterT::ChildNodeType* child = iter.probeChild(val)) { child->visit(op); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename OtherNodeType, typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit2Node(OtherNodeType& other, VisitorOp& op) { doVisit2Node<InternalNode, OtherNodeType, VisitorOp, ChildAllIter, typename OtherNodeType::ChildAllIter>(*this, other, op); } template<typename ChildT, Index Log2Dim> template<typename OtherNodeType, typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit2Node(OtherNodeType& other, VisitorOp& op) const { doVisit2Node<const InternalNode, OtherNodeType, VisitorOp, ChildAllCIter, typename OtherNodeType::ChildAllCIter>(*this, other, op); } template<typename ChildT, Index Log2Dim> template< typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void InternalNode<ChildT, Log2Dim>::doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp& op) { // Allow the two nodes to have different ValueTypes, but not different dimensions. static_assert(OtherNodeT::NUM_VALUES == NodeT::NUM_VALUES, "visit2() requires nodes to have the same dimensions"); static_assert(OtherNodeT::LEVEL == NodeT::LEVEL, "visit2() requires nodes to be at the same tree level"); typename NodeT::ValueType val; typename OtherNodeT::ValueType otherVal; ChildAllIterT iter = self.beginChildAll(); OtherChildAllIterT otherIter = other.beginChildAll(); for ( ; iter && otherIter; ++iter, ++otherIter) { const size_t skipBranch = static_cast<size_t>(op(iter, otherIter)); typename ChildAllIterT::ChildNodeType* child = (skipBranch & 1U) ? nullptr : iter.probeChild(val); typename OtherChildAllIterT::ChildNodeType* otherChild = (skipBranch & 2U) ? nullptr : otherIter.probeChild(otherVal); if (child != nullptr && otherChild != nullptr) { child->visit2Node(*otherChild, op); } else if (child != nullptr) { child->visit2(otherIter, op); } else if (otherChild != nullptr) { otherChild->visit2(iter, op, /*otherIsLHS=*/true); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename OtherChildAllIterType, typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit2(OtherChildAllIterType& otherIter, VisitorOp& op, bool otherIsLHS) { doVisit2<InternalNode, VisitorOp, ChildAllIter, OtherChildAllIterType>( *this, otherIter, op, otherIsLHS); } template<typename ChildT, Index Log2Dim> template<typename OtherChildAllIterType, typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit2(OtherChildAllIterType& otherIter, VisitorOp& op, bool otherIsLHS) const { doVisit2<const InternalNode, VisitorOp, ChildAllCIter, OtherChildAllIterType>( *this, otherIter, op, otherIsLHS); } template<typename ChildT, Index Log2Dim> template<typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void InternalNode<ChildT, Log2Dim>::doVisit2(NodeT& self, OtherChildAllIterT& otherIter, VisitorOp& op, bool otherIsLHS) { if (!otherIter) return; const size_t skipBitMask = (otherIsLHS ? 2U : 1U); typename NodeT::ValueType val; for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { const size_t skipBranch = static_cast<size_t>( otherIsLHS ? op(otherIter, iter) : op(iter, otherIter)); typename ChildAllIterT::ChildNodeType* child = (skipBranch & skipBitMask) ? nullptr : iter.probeChild(val); if (child != nullptr) child->visit2(otherIter, op, otherIsLHS); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::writeBuffers(std::ostream& os, bool toHalf) const { for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { iter->writeBuffers(os, toHalf); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::readBuffers(std::istream& is, bool fromHalf) { for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { iter->readBuffers(is, fromHalf); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::readBuffers(std::istream& is, const CoordBBox& clipBBox, bool fromHalf) { for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { // Stream in the branch rooted at this child. // (We can't skip over children that lie outside the clipping region, // because buffers are serialized in depth-first order and need to be // unserialized in the same order.) iter->readBuffers(is, clipBBox, fromHalf); } // Get this tree's background value. ValueType background = zeroVal<ValueType>(); if (const void* bgPtr = io::getGridBackgroundValuePtr(is)) { background = *static_cast<const ValueType*>(bgPtr); } this->clip(clipBBox, background); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> void InternalNode<ChildT, Log2Dim>::getNodeLog2Dims(std::vector<Index>& dims) { dims.push_back(Log2Dim); ChildNodeType::getNodeLog2Dims(dims); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::offsetToLocalCoord(Index n, Coord &xyz) { assert(n<(1<<3*Log2Dim)); xyz.setX(n >> 2*Log2Dim); n &= ((1<<2*Log2Dim)-1); xyz.setY(n >> Log2Dim); xyz.setZ(n & ((1<<Log2Dim)-1)); } template<typename ChildT, Index Log2Dim> inline Index InternalNode<ChildT, Log2Dim>::coordToOffset(const Coord& xyz) { return (((xyz[0] & (DIM-1u)) >> ChildNodeType::TOTAL) << 2*Log2Dim) + (((xyz[1] & (DIM-1u)) >> ChildNodeType::TOTAL) << Log2Dim) + ((xyz[2] & (DIM-1u)) >> ChildNodeType::TOTAL); } template<typename ChildT, Index Log2Dim> inline Coord InternalNode<ChildT, Log2Dim>::offsetToGlobalCoord(Index n) const { Coord local; this->offsetToLocalCoord(n, local); local <<= ChildT::TOTAL; return local + this->origin(); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename ArrayT> inline void InternalNode<ChildT, Log2Dim>::getNodes(ArrayT& array) { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to getNodes() must be a pointer array"); using ArrayChildT = typename std::conditional< std::is_const<typename std::remove_pointer<T>::type>::value, const ChildT, ChildT>::type; for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<T, ArrayChildT*>::value) { array.push_back(reinterpret_cast<T>(mNodes[iter.pos()].getChild())); } else { iter->getNodes(array);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } template<typename ChildT, Index Log2Dim> template<typename ArrayT> inline void InternalNode<ChildT, Log2Dim>::getNodes(ArrayT& array) const { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to getNodes() must be a pointer array"); static_assert(std::is_const<typename std::remove_pointer<T>::type>::value, "argument to getNodes() must be an array of const node pointers"); for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<T, const ChildT*>::value) { array.push_back(reinterpret_cast<T>(mNodes[iter.pos()].getChild())); } else { iter->getNodes(array);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename ArrayT> inline void InternalNode<ChildT, Log2Dim>::stealNodes(ArrayT& array, const ValueType& value, bool state) { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to stealNodes() must be a pointer array"); using ArrayChildT = typename std::conditional< std::is_const<typename std::remove_pointer<T>::type>::value, const ChildT, ChildT>::type; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { const Index n = iter.pos(); if (std::is_same<T, ArrayChildT*>::value) { array.push_back(reinterpret_cast<T>(mNodes[n].getChild())); mValueMask.set(n, state); mNodes[n].setValue(value); } else { iter->stealNodes(array, value, state);//descent } } if (std::is_same<T, ArrayChildT*>::value) mChildMask.setOff(); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::resetBackground(const ValueType& oldBackground, const ValueType& newBackground) { if (math::isExactlyEqual(oldBackground, newBackground)) return; for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOn(i)) { mNodes[i].getChild()->resetBackground(oldBackground, newBackground); } else if (this->isValueMaskOff(i)) { if (math::isApproxEqual(mNodes[i].getValue(), oldBackground)) { mNodes[i].setValue(newBackground); } else if (math::isApproxEqual(mNodes[i].getValue(), math::negative(oldBackground))) { mNodes[i].setValue(math::negative(newBackground)); } } } } template<typename ChildT, Index Log2Dim> template<typename OtherChildNodeType, Index OtherLog2Dim> inline bool InternalNode<ChildT, Log2Dim>::hasSameTopology( const InternalNode<OtherChildNodeType, OtherLog2Dim>* other) const { if (Log2Dim != OtherLog2Dim || mChildMask != other->mChildMask || mValueMask != other->mValueMask) return false; for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { if (!iter->hasSameTopology(other->mNodes[iter.pos()].getChild())) return false; } return true; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::resetChildNode(Index i, ChildNodeType* child) { assert(child); if (this->isChildMaskOn(i)) { delete mNodes[i].getChild(); } else { mChildMask.setOn(i); mValueMask.setOff(i); } mNodes[i].setChild(child); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setChildNode(Index i, ChildNodeType* child) { assert(child); assert(mChildMask.isOff(i)); mChildMask.setOn(i); mValueMask.setOff(i); mNodes[i].setChild(child); } template<typename ChildT, Index Log2Dim> inline ChildT* InternalNode<ChildT, Log2Dim>::unsetChildNode(Index i, const ValueType& value) { if (this->isChildMaskOff(i)) { mNodes[i].setValue(value); return nullptr; } ChildNodeType* child = mNodes[i].getChild(); mChildMask.setOff(i); mNodes[i].setValue(value); return child; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::makeChildNodeEmpty(Index n, const ValueType& value) { delete this->unsetChildNode(n, value); } template<typename ChildT, Index Log2Dim> inline ChildT* InternalNode<ChildT, Log2Dim>::getChildNode(Index n) { assert(this->isChildMaskOn(n)); return mNodes[n].getChild(); } template<typename ChildT, Index Log2Dim> inline const ChildT* InternalNode<ChildT, Log2Dim>::getChildNode(Index n) const { assert(this->isChildMaskOn(n)); return mNodes[n].getChild(); } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_INTERNALNODE_HAS_BEEN_INCLUDED
129,215
C
38.239599
100
0.640529
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/LeafNodeBool.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TREE_LEAF_NODE_BOOL_HAS_BEEN_INCLUDED #define OPENVDB_TREE_LEAF_NODE_BOOL_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/io/Compression.h> // for io::readData(), etc. #include <openvdb/math/Math.h> // for math::isZero() #include <openvdb/util/NodeMasks.h> #include "LeafNode.h" #include "Iterator.h" #include <iostream> #include <sstream> #include <string> #include <type_traits> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { /// @brief LeafNode specialization for values of type bool that stores both /// the active states and the values of (2^Log2Dim)^3 voxels as bit masks template<Index Log2Dim> class LeafNode<bool, Log2Dim> { public: using LeafNodeType = LeafNode<bool, Log2Dim>; using BuildType = bool; using ValueType = bool; using Buffer = LeafBuffer<ValueType, Log2Dim>; using NodeMaskType = util::NodeMask<Log2Dim>; using Ptr = SharedPtr<LeafNodeType>; // These static declarations must be on separate lines to avoid VC9 compiler errors. static const Index LOG2DIM = Log2Dim; // needed by parent nodes static const Index TOTAL = Log2Dim; // needed by parent nodes static const Index DIM = 1 << TOTAL; // dimension along one coordinate direction static const Index NUM_VALUES = 1 << 3 * Log2Dim; static const Index NUM_VOXELS = NUM_VALUES; // total number of voxels represented by this node static const Index SIZE = NUM_VALUES; static const Index LEVEL = 0; // level 0 = leaf /// @brief ValueConverter<T>::Type is the type of a LeafNode having the same /// dimensions as this node but a different value type, T. template<typename ValueType> struct ValueConverter { using Type = LeafNode<ValueType, Log2Dim>; }; /// @brief SameConfiguration<OtherNodeType>::value is @c true if and only if /// OtherNodeType is the type of a LeafNode with the same dimensions as this node. template<typename OtherNodeType> struct SameConfiguration { static const bool value = SameLeafConfig<LOG2DIM, OtherNodeType>::value; }; /// Default constructor LeafNode(); /// Constructor /// @param xyz the coordinates of a voxel that lies within the node /// @param value the initial value for all of this node's voxels /// @param active the active state to which to initialize all voxels explicit LeafNode(const Coord& xyz, bool value = false, bool active = false); /// "Partial creation" constructor used during file input LeafNode(PartialCreate, const Coord& xyz, bool value = false, bool active = false); /// Deep copy constructor LeafNode(const LeafNode&); /// Deep assignment operator LeafNode& operator=(const LeafNode&) = default; /// Value conversion copy constructor template<typename OtherValueType> explicit LeafNode(const LeafNode<OtherValueType, Log2Dim>& other); /// Topology copy constructor template<typename ValueType> LeafNode(const LeafNode<ValueType, Log2Dim>& other, TopologyCopy); //@{ /// @brief Topology copy constructor /// @note This variant exists mainly to enable template instantiation. template<typename ValueType> LeafNode(const LeafNode<ValueType, Log2Dim>& other, bool offValue, bool onValue, TopologyCopy); template<typename ValueType> LeafNode(const LeafNode<ValueType, Log2Dim>& other, bool background, TopologyCopy); //@} /// Destructor ~LeafNode(); // // Statistics // /// Return log2 of the size of the buffer storage. static Index log2dim() { return Log2Dim; } /// Return the number of voxels in each dimension. static Index dim() { return DIM; } static Index size() { return SIZE; } static Index numValues() { return SIZE; } static Index getLevel() { return LEVEL; } static void getNodeLog2Dims(std::vector<Index>& dims) { dims.push_back(Log2Dim); } static Index getChildDim() { return 1; } static Index32 leafCount() { return 1; } /// no-op void nodeCount(std::vector<Index32> &) const {} static Index32 nonLeafCount() { return 0; } /// Return the number of active voxels. Index64 onVoxelCount() const { return mValueMask.countOn(); } /// Return the number of inactive voxels. Index64 offVoxelCount() const { return mValueMask.countOff(); } Index64 onLeafVoxelCount() const { return onVoxelCount(); } Index64 offLeafVoxelCount() const { return offVoxelCount(); } static Index64 onTileCount() { return 0; } static Index64 offTileCount() { return 0; } /// Return @c true if this node has no active voxels. bool isEmpty() const { return mValueMask.isOff(); } /// Return @c true if this node only contains active voxels. bool isDense() const { return mValueMask.isOn(); } /// @brief Return @c true if memory for this node's buffer has been allocated. /// @details Currently, boolean leaf nodes don't support partial creation, /// so this always returns @c true. bool isAllocated() const { return true; } /// @brief Allocate memory for this node's buffer if it has not already been allocated. /// @details Currently, boolean leaf nodes don't support partial creation, /// so this has no effect. bool allocate() { return true; } /// Return the memory in bytes occupied by this node. Index64 memUsage() const; /// Expand the given bounding box so that it includes this leaf node's active voxels. /// If visitVoxels is false this LeafNode will be approximated as dense, i.e. with all /// voxels active. Else the individual active voxels are visited to produce a tight bbox. void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// @brief Return the bounding box of this node, i.e., the full index space /// spanned by this leaf node. CoordBBox getNodeBoundingBox() const { return CoordBBox::createCube(mOrigin, DIM); } /// Set the grid index coordinates of this node's local origin. void setOrigin(const Coord& origin) { mOrigin = origin; } //@{ /// Return the grid index coordinates of this node's local origin. const Coord& origin() const { return mOrigin; } void getOrigin(Coord& origin) const { origin = mOrigin; } void getOrigin(Int32& x, Int32& y, Int32& z) const { mOrigin.asXYZ(x, y, z); } //@} /// Return the linear table offset of the given global or local coordinates. static Index coordToOffset(const Coord& xyz); /// @brief Return the local coordinates for a linear table offset, /// where offset 0 has coordinates (0, 0, 0). static Coord offsetToLocalCoord(Index n); /// Return the global coordinates for a linear table offset. Coord offsetToGlobalCoord(Index n) const; /// Return a string representation of this node. std::string str() const; /// @brief Return @c true if the given node (which may have a different @c ValueType /// than this node) has the same active value topology as this node. template<typename OtherType, Index OtherLog2Dim> bool hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const; /// Check for buffer equivalence by value. bool operator==(const LeafNode&) const; bool operator!=(const LeafNode&) const; // // Buffer management // /// @brief Exchange this node's data buffer with the given data buffer /// without changing the active states of the values. void swap(Buffer& other) { mBuffer.swap(other); } const Buffer& buffer() const { return mBuffer; } Buffer& buffer() { return mBuffer; } // // I/O methods // /// Read in just the topology. void readTopology(std::istream&, bool fromHalf = false); /// Write out just the topology. void writeTopology(std::ostream&, bool toHalf = false) const; /// Read in the topology and the origin. void readBuffers(std::istream&, bool fromHalf = false); void readBuffers(std::istream& is, const CoordBBox&, bool fromHalf = false); /// Write out the topology and the origin. void writeBuffers(std::ostream&, bool toHalf = false) const; // // Accessor methods // /// Return the value of the voxel at the given coordinates. const bool& getValue(const Coord& xyz) const; /// Return the value of the voxel at the given offset. const bool& getValue(Index offset) const; /// @brief Return @c true if the voxel at the given coordinates is active. /// @param xyz the coordinates of the voxel to be probed /// @param[out] val the value of the voxel at the given coordinates bool probeValue(const Coord& xyz, bool& val) const; /// Return the level (0) at which leaf node values reside. static Index getValueLevel(const Coord&) { return LEVEL; } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the active state of the voxel at the given offset but don't change its value. void setActiveState(Index offset, bool on) { assert(offset<SIZE); mValueMask.set(offset, on); } /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, bool val); /// Set the value of the voxel at the given offset but don't change its active state. void setValueOnly(Index offset, bool val) { assert(offset<SIZE); mBuffer.setValue(offset,val); } /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { mValueMask.setOff(this->coordToOffset(xyz)); } /// Mark the voxel at the given offset as inactive but don't change its value. void setValueOff(Index offset) { assert(offset < SIZE); mValueMask.setOff(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, bool val); /// Set the value of the voxel at the given offset and mark the voxel as inactive. void setValueOff(Index offset, bool val); /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { mValueMask.setOn(this->coordToOffset(xyz)); } /// Mark the voxel at the given offset as active but don't change its value. void setValueOn(Index offset) { assert(offset < SIZE); mValueMask.setOn(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, bool val); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, bool val) { this->setValueOn(xyz, val); } /// Set the value of the voxel at the given offset and mark the voxel as active. void setValueOn(Index offset, bool val); /// @brief Apply a functor to the value of the voxel at the given offset /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(Index offset, const ModifyOp& op); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op); /// Apply a functor to the voxel at the given coordinates. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op); /// Mark all voxels as active but don't change their values. void setValuesOn() { mValueMask.setOn(); } /// Mark all voxels as inactive but don't change their values. void setValuesOff() { mValueMask.setOff(); } /// Return @c true if the voxel at the given coordinates is active. bool isValueOn(const Coord& xyz) const { return mValueMask.isOn(this->coordToOffset(xyz)); } /// Return @c true if the voxel at the given offset is active. bool isValueOn(Index offset) const { assert(offset < SIZE); return mValueMask.isOn(offset); } /// Return @c false since leaf nodes never contain tiles. static bool hasActiveTiles() { return false; } /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&, bool background); /// Set all voxels within an axis-aligned box to the specified value and active state. void fill(const CoordBBox& bbox, bool value, bool active = true); /// Set all voxels within an axis-aligned box to the specified value and active state. void denseFill(const CoordBBox& bbox, bool val, bool on = true) { this->fill(bbox, val, on); } /// Set all voxels to the specified value but don't change their active states. void fill(const bool& value); /// Set all voxels to the specified value and active state. void fill(const bool& value, bool active); /// @brief Copy into a dense grid the values of the voxels that lie within /// a given bounding box. /// /// @param bbox inclusive bounding box of the voxels to be copied into the dense grid /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyToDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyToDense(const CoordBBox& bbox, DenseT& dense) const; /// @brief Copy from a dense grid into this node the values of the voxels /// that lie within a given bounding box. /// @details Only values that are different (by more than the given tolerance) /// from the background value will be active. Other values are inactive /// and truncated to the background value. /// /// @param bbox inclusive bounding box of the voxels to be copied into this node /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// @param background background value of the tree that this node belongs to /// @param tolerance tolerance within which a value equals the background value /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyFromDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyFromDense(const CoordBBox& bbox, const DenseT& dense, bool background, bool tolerance); /// @brief Return the value of the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename AccessorT> const bool& getValueAndCache(const Coord& xyz, AccessorT&) const {return this->getValue(xyz);} /// @brief Return @c true if the voxel at the given coordinates is active. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool isValueOnAndCache(const Coord& xyz, AccessorT&) const { return this->isValueOn(xyz); } /// @brief Change the value of the voxel at the given coordinates and mark it as active. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueAndCache(const Coord& xyz, bool val, AccessorT&) { this->setValueOn(xyz, val); } /// @brief Change the value of the voxel at the given coordinates /// but preserve its state. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOnlyAndCache(const Coord& xyz, bool val, AccessorT&) {this->setValueOnly(xyz,val);} /// @brief Change the value of the voxel at the given coordinates and mark it as inactive. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOffAndCache(const Coord& xyz, bool value, AccessorT&) { this->setValueOff(xyz, value); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValue(xyz, op); } /// Apply a functor to the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValueAndActiveState(xyz, op); } /// @brief Set the active state of the voxel at the given coordinates /// without changing its value. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT&) { this->setActiveState(xyz, on); } /// @brief Return @c true if the voxel at the given coordinates is active /// and return the voxel value in @a val. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool probeValueAndCache(const Coord& xyz, bool& val, AccessorT&) const { return this->probeValue(xyz, val); } /// @brief Return the LEVEL (=0) at which leaf node values reside. /// @note Used internally by ValueAccessor. template<typename AccessorT> static Index getValueLevelAndCache(const Coord&, AccessorT&) { return LEVEL; } /// @brief Return a const reference to the first entry in the buffer. /// @note Since it's actually a reference to a static data member /// it should not be converted to a non-const pointer! const bool& getFirstValue() const { if (mValueMask.isOn(0)) return Buffer::sOn; else return Buffer::sOff; } /// @brief Return a const reference to the last entry in the buffer. /// @note Since it's actually a reference to a static data member /// it should not be converted to a non-const pointer! const bool& getLastValue() const { if (mValueMask.isOn(SIZE-1)) return Buffer::sOn; else return Buffer::sOff; } /// Return @c true if all of this node's voxels have the same active state /// and are equal to within the given tolerance, and return the value in /// @a constValue and the active state in @a state. bool isConstant(bool& constValue, bool& state, bool tolerance = 0) const; /// @brief Computes the median value of all the active and inactive voxels in this node. /// @return The median value. /// /// @details The median for boolean values is defined as the mode /// of the values, i.e. the value that occurs most often. bool medianAll() const; /// @brief Computes the median value of all the active voxels in this node. /// @return The number of active voxels. /// @param value Updated with the median value of all the active voxels. /// /// @details The median for boolean values is defined as the mode /// of the values, i.e. the value that occurs most often. Index medianOn(ValueType &value) const; /// @brief Computes the median value of all the inactive voxels in this node. /// @return The number of inactive voxels. /// @param value Updated with the median value of all the inactive voxels. /// /// @details The median for boolean values is defined as the mode /// of the values, i.e. the value that occurs most often. Index medianOff(ValueType &value) const; /// Return @c true if all of this node's values are inactive. bool isInactive() const { return mValueMask.isOff(); } void resetBackground(bool oldBackground, bool newBackground); void negate() { mBuffer.mData.toggle(); } template<MergePolicy Policy> void merge(const LeafNode& other, bool bg = false, bool otherBG = false); template<MergePolicy Policy> void merge(bool tileValue, bool tileActive); /// @brief No-op /// @details This function exists only to enable template instantiation. void voxelizeActiveTiles(bool = true) {} /// @brief Union this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active if either of the original voxels /// were active. /// /// @note This operation modifies only active states, not values. template<typename OtherType> void topologyUnion(const LeafNode<OtherType, Log2Dim>& other); /// @brief Intersect this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if both of the original voxels /// were active. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyIntersection. /// /// @note This operation modifies only active states, not /// values. Also note that this operation can result in all voxels /// being inactive so consider subsequnetly calling prune. template<typename OtherType> void topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const bool&); /// @brief Difference this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this LeafNode and inactive in the other LeafNode. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyDifference. /// /// @note This operation modifies only active states, not values. /// Also, because it can deactivate all of this node's voxels, /// consider subsequently calling prune. template<typename OtherType> void topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const bool&); template<typename CombineOp> void combine(const LeafNode& other, CombineOp& op); template<typename CombineOp> void combine(bool, bool valueIsActive, CombineOp& op); template<typename CombineOp, typename OtherType /*= bool*/> void combine2(const LeafNode& other, const OtherType&, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(bool, const OtherNodeT& other, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp&); /// @brief Calls the templated functor BBoxOp with bounding box information. /// An additional level argument is provided to the callback. /// /// @note The bounding boxes are guarenteed to be non-overlapping. template<typename BBoxOp> void visitActiveBBox(BBoxOp&) const; template<typename VisitorOp> void visit(VisitorOp&); template<typename VisitorOp> void visit(VisitorOp&) const; template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&); template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&) const; template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false); template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false) const; //@{ /// This function exists only to enable template instantiation. void prune(const ValueType& /*tolerance*/ = zeroVal<ValueType>()) {} void addLeaf(LeafNode*) {} template<typename AccessorT> void addLeafAndCache(LeafNode*, AccessorT&) {} template<typename NodeT> NodeT* stealNode(const Coord&, const ValueType&, bool) { return nullptr; } template<typename NodeT> NodeT* probeNode(const Coord&) { return nullptr; } template<typename NodeT> const NodeT* probeConstNode(const Coord&) const { return nullptr; } template<typename ArrayT> void getNodes(ArrayT&) const {} template<typename ArrayT> void stealNodes(ArrayT&, const ValueType&, bool) {} //@} void addTile(Index level, const Coord&, bool val, bool active); void addTile(Index offset, bool val, bool active); template<typename AccessorT> void addTileAndCache(Index level, const Coord&, bool val, bool active, AccessorT&); //@{ /// @brief Return a pointer to this node. LeafNode* touchLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* touchLeafAndCache(const Coord&, AccessorT&) { return this; } LeafNode* probeLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* probeLeafAndCache(const Coord&, AccessorT&) { return this; } template<typename NodeT, typename AccessorT> NodeT* probeNodeAndCache(const Coord&, AccessorT&) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} //@{ /// @brief Return a @const pointer to this node. const LeafNode* probeLeaf(const Coord&) const { return this; } template<typename AccessorT> const LeafNode* probeLeafAndCache(const Coord&, AccessorT&) const { return this; } const LeafNode* probeConstLeaf(const Coord&) const { return this; } template<typename AccessorT> const LeafNode* probeConstLeafAndCache(const Coord&, AccessorT&) const { return this; } template<typename NodeT, typename AccessorT> const NodeT* probeConstNodeAndCache(const Coord&, AccessorT&) const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<const NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} // // Iterators // protected: using MaskOnIter = typename NodeMaskType::OnIterator; using MaskOffIter = typename NodeMaskType::OffIterator; using MaskDenseIter = typename NodeMaskType::DenseIterator; template<typename MaskIterT, typename NodeT, typename ValueT> struct ValueIter: // Derives from SparseIteratorBase, but can also be used as a dense iterator, // if MaskIterT is a dense mask iterator type. public SparseIteratorBase<MaskIterT, ValueIter<MaskIterT, NodeT, ValueT>, NodeT, ValueT> { using BaseT = SparseIteratorBase<MaskIterT, ValueIter, NodeT, ValueT>; ValueIter() {} ValueIter(const MaskIterT& iter, NodeT* parent): BaseT(iter, parent) {} const bool& getItem(Index pos) const { return this->parent().getValue(pos); } const bool& getValue() const { return this->getItem(this->pos()); } // Note: setItem() can't be called on const iterators. void setItem(Index pos, bool value) const { this->parent().setValueOnly(pos, value); } // Note: setValue() can't be called on const iterators. void setValue(bool value) const { this->setItem(this->pos(), value); } // Note: modifyItem() can't be called on const iterators. template<typename ModifyOp> void modifyItem(Index n, const ModifyOp& op) const { this->parent().modifyValue(n, op); } // Note: modifyValue() can't be called on const iterators. template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { this->modifyItem(this->pos(), op); } }; /// Leaf nodes have no children, so their child iterators have no get/set accessors. template<typename MaskIterT, typename NodeT> struct ChildIter: public SparseIteratorBase<MaskIterT, ChildIter<MaskIterT, NodeT>, NodeT, bool> { ChildIter() {} ChildIter(const MaskIterT& iter, NodeT* parent): SparseIteratorBase< MaskIterT, ChildIter<MaskIterT, NodeT>, NodeT, bool>(iter, parent) {} }; template<typename NodeT, typename ValueT> struct DenseIter: public DenseIteratorBase< MaskDenseIter, DenseIter<NodeT, ValueT>, NodeT, /*ChildT=*/void, ValueT> { using BaseT = DenseIteratorBase<MaskDenseIter, DenseIter, NodeT, void, ValueT>; using NonConstValueT = typename BaseT::NonConstValueType; DenseIter() {} DenseIter(const MaskDenseIter& iter, NodeT* parent): BaseT(iter, parent) {} bool getItem(Index pos, void*& child, NonConstValueT& value) const { value = this->parent().getValue(pos); child = nullptr; return false; // no child } // Note: setItem() can't be called on const iterators. //void setItem(Index pos, void* child) const {} // Note: unsetItem() can't be called on const iterators. void unsetItem(Index pos, const ValueT& val) const {this->parent().setValueOnly(pos, val);} }; public: using ValueOnIter = ValueIter<MaskOnIter, LeafNode, const bool>; using ValueOnCIter = ValueIter<MaskOnIter, const LeafNode, const bool>; using ValueOffIter = ValueIter<MaskOffIter, LeafNode, const bool>; using ValueOffCIter = ValueIter<MaskOffIter, const LeafNode, const bool>; using ValueAllIter = ValueIter<MaskDenseIter, LeafNode, const bool>; using ValueAllCIter = ValueIter<MaskDenseIter, const LeafNode, const bool>; using ChildOnIter = ChildIter<MaskOnIter, LeafNode>; using ChildOnCIter = ChildIter<MaskOnIter, const LeafNode>; using ChildOffIter = ChildIter<MaskOffIter, LeafNode>; using ChildOffCIter = ChildIter<MaskOffIter, const LeafNode>; using ChildAllIter = DenseIter<LeafNode, bool>; using ChildAllCIter = DenseIter<const LeafNode, const bool>; ValueOnCIter cbeginValueOn() const { return ValueOnCIter(mValueMask.beginOn(), this); } ValueOnCIter beginValueOn() const { return ValueOnCIter(mValueMask.beginOn(), this); } ValueOnIter beginValueOn() { return ValueOnIter(mValueMask.beginOn(), this); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(mValueMask.beginOff(), this); } ValueOffCIter beginValueOff() const { return ValueOffCIter(mValueMask.beginOff(), this); } ValueOffIter beginValueOff() { return ValueOffIter(mValueMask.beginOff(), this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(mValueMask.beginDense(), this); } ValueAllCIter beginValueAll() const { return ValueAllCIter(mValueMask.beginDense(), this); } ValueAllIter beginValueAll() { return ValueAllIter(mValueMask.beginDense(), this); } ValueOnCIter cendValueOn() const { return ValueOnCIter(mValueMask.endOn(), this); } ValueOnCIter endValueOn() const { return ValueOnCIter(mValueMask.endOn(), this); } ValueOnIter endValueOn() { return ValueOnIter(mValueMask.endOn(), this); } ValueOffCIter cendValueOff() const { return ValueOffCIter(mValueMask.endOff(), this); } ValueOffCIter endValueOff() const { return ValueOffCIter(mValueMask.endOff(), this); } ValueOffIter endValueOff() { return ValueOffIter(mValueMask.endOff(), this); } ValueAllCIter cendValueAll() const { return ValueAllCIter(mValueMask.endDense(), this); } ValueAllCIter endValueAll() const { return ValueAllCIter(mValueMask.endDense(), this); } ValueAllIter endValueAll() { return ValueAllIter(mValueMask.endDense(), this); } // Note that [c]beginChildOn() and [c]beginChildOff() actually return end iterators, // because leaf nodes have no children. ChildOnCIter cbeginChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnCIter beginChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnIter beginChildOn() { return ChildOnIter(mValueMask.endOn(), this); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffCIter beginChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffIter beginChildOff() { return ChildOffIter(mValueMask.endOff(), this); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(mValueMask.beginDense(), this); } ChildAllCIter beginChildAll() const { return ChildAllCIter(mValueMask.beginDense(), this); } ChildAllIter beginChildAll() { return ChildAllIter(mValueMask.beginDense(), this); } ChildOnCIter cendChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnCIter endChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnIter endChildOn() { return ChildOnIter(mValueMask.endOn(), this); } ChildOffCIter cendChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffCIter endChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffIter endChildOff() { return ChildOffIter(mValueMask.endOff(), this); } ChildAllCIter cendChildAll() const { return ChildAllCIter(mValueMask.endDense(), this); } ChildAllCIter endChildAll() const { return ChildAllCIter(mValueMask.endDense(), this); } ChildAllIter endChildAll() { return ChildAllIter(mValueMask.endDense(), this); } // // Mask accessors // bool isValueMaskOn(Index n) const { return mValueMask.isOn(n); } bool isValueMaskOn() const { return mValueMask.isOn(); } bool isValueMaskOff(Index n) const { return mValueMask.isOff(n); } bool isValueMaskOff() const { return mValueMask.isOff(); } const NodeMaskType& getValueMask() const { return mValueMask; } const NodeMaskType& valueMask() const { return mValueMask; } NodeMaskType& getValueMask() { return mValueMask; } void setValueMask(const NodeMaskType& mask) { mValueMask = mask; } bool isChildMaskOn(Index) const { return false; } // leaf nodes have no children bool isChildMaskOff(Index) const { return true; } bool isChildMaskOff() const { return true; } protected: void setValueMask(Index n, bool on) { mValueMask.set(n, on); } void setValueMaskOn(Index n) { mValueMask.setOn(n); } void setValueMaskOff(Index n) { mValueMask.setOff(n); } /// Compute the origin of the leaf node that contains the voxel with the given coordinates. static void evalNodeOrigin(Coord& xyz) { xyz &= ~(DIM - 1); } template<typename NodeT, typename VisitorOp, typename ChildAllIterT> static inline void doVisit(NodeT&, VisitorOp&); template<typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp&); template<typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2(NodeT& self, OtherChildAllIterT&, VisitorOp&, bool otherIsLHS); /// Bitmask that determines which voxels are active NodeMaskType mValueMask; /// Bitmask representing the values of voxels Buffer mBuffer; /// Global grid index coordinates (x,y,z) of the local origin of this node Coord mOrigin; private: /// @brief During topology-only construction, access is needed /// to protected/private members of other template instances. template<typename, Index> friend class LeafNode; friend struct ValueIter<MaskOnIter, LeafNode, bool>; friend struct ValueIter<MaskOffIter, LeafNode, bool>; friend struct ValueIter<MaskDenseIter, LeafNode, bool>; friend struct ValueIter<MaskOnIter, const LeafNode, bool>; friend struct ValueIter<MaskOffIter, const LeafNode, bool>; friend struct ValueIter<MaskDenseIter, const LeafNode, bool>; //@{ /// Allow iterators to call mask accessor methods (see below). /// @todo Make mask accessors public? friend class IteratorBase<MaskOnIter, LeafNode>; friend class IteratorBase<MaskOffIter, LeafNode>; friend class IteratorBase<MaskDenseIter, LeafNode>; //@} }; // class LeafNode<bool> //////////////////////////////////////// template<Index Log2Dim> inline LeafNode<bool, Log2Dim>::LeafNode() : mOrigin(0, 0, 0) { } template<Index Log2Dim> inline LeafNode<bool, Log2Dim>::LeafNode(const Coord& xyz, bool value, bool active) : mValueMask(active) , mBuffer(value) , mOrigin(xyz & (~(DIM - 1))) { } template<Index Log2Dim> inline LeafNode<bool, Log2Dim>::LeafNode(PartialCreate, const Coord& xyz, bool value, bool active) : mValueMask(active) , mBuffer(value) , mOrigin(xyz & (~(DIM - 1))) { /// @todo For now, this is identical to the non-PartialCreate constructor. /// Consider modifying the Buffer class to allow it to be constructed /// without allocating a bitmask. } template<Index Log2Dim> inline LeafNode<bool, Log2Dim>::LeafNode(const LeafNode &other) : mValueMask(other.valueMask()) , mBuffer(other.mBuffer) , mOrigin(other.mOrigin) { } // Copy-construct from a leaf node with the same configuration but a different ValueType. template<Index Log2Dim> template<typename ValueT> inline LeafNode<bool, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other) : mValueMask(other.valueMask()) , mOrigin(other.origin()) { struct Local { /// @todo Consider using a value conversion functor passed as an argument instead. static inline bool convertValue(const ValueT& val) { return bool(val); } }; for (Index i = 0; i < SIZE; ++i) { mBuffer.setValue(i, Local::convertValue(other.mBuffer[i])); } } template<Index Log2Dim> template<typename ValueT> inline LeafNode<bool, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other, bool background, TopologyCopy) : mValueMask(other.valueMask()) , mBuffer(background) , mOrigin(other.origin()) { } template<Index Log2Dim> template<typename ValueT> inline LeafNode<bool, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other, TopologyCopy) : mValueMask(other.valueMask()) , mBuffer(other.valueMask())// value = active state , mOrigin(other.origin()) { } template<Index Log2Dim> template<typename ValueT> inline LeafNode<bool, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other, bool offValue, bool onValue, TopologyCopy) : mValueMask(other.valueMask()) , mBuffer(other.valueMask()) , mOrigin(other.origin()) { if (offValue) { if (!onValue) mBuffer.mData.toggle(); else mBuffer.mData.setOn(); } } template<Index Log2Dim> inline LeafNode<bool, Log2Dim>::~LeafNode() { } //////////////////////////////////////// template<Index Log2Dim> inline Index64 LeafNode<bool, Log2Dim>::memUsage() const { // Use sizeof(*this) to capture alignment-related padding return sizeof(*this); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { CoordBBox this_bbox = this->getNodeBoundingBox(); if (bbox.isInside(this_bbox)) return;//this LeafNode is already enclosed in the bbox if (ValueOnCIter iter = this->cbeginValueOn()) {//any active values? if (visitVoxels) {//use voxel granularity? this_bbox.reset(); for(; iter; ++iter) this_bbox.expand(this->offsetToLocalCoord(iter.pos())); this_bbox.translate(this->origin()); } bbox.expand(this_bbox); } } template<Index Log2Dim> template<typename OtherType, Index OtherLog2Dim> inline bool LeafNode<bool, Log2Dim>::hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const { assert(other); return (Log2Dim == OtherLog2Dim && mValueMask == other->getValueMask()); } template<Index Log2Dim> inline std::string LeafNode<bool, Log2Dim>::str() const { std::ostringstream ostr; ostr << "LeafNode @" << mOrigin << ": "; for (Index32 n = 0; n < SIZE; ++n) ostr << (mValueMask.isOn(n) ? '#' : '.'); return ostr.str(); } //////////////////////////////////////// template<Index Log2Dim> inline Index LeafNode<bool, Log2Dim>::coordToOffset(const Coord& xyz) { assert ((xyz[0] & (DIM-1u)) < DIM && (xyz[1] & (DIM-1u)) < DIM && (xyz[2] & (DIM-1u)) < DIM); return ((xyz[0] & (DIM-1u)) << 2*Log2Dim) + ((xyz[1] & (DIM-1u)) << Log2Dim) + (xyz[2] & (DIM-1u)); } template<Index Log2Dim> inline Coord LeafNode<bool, Log2Dim>::offsetToLocalCoord(Index n) { assert(n < (1 << 3*Log2Dim)); Coord xyz; xyz.setX(n >> 2*Log2Dim); n &= ((1 << 2*Log2Dim) - 1); xyz.setY(n >> Log2Dim); xyz.setZ(n & ((1 << Log2Dim) - 1)); return xyz; } template<Index Log2Dim> inline Coord LeafNode<bool, Log2Dim>::offsetToGlobalCoord(Index n) const { return (this->offsetToLocalCoord(n) + this->origin()); } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::readTopology(std::istream& is, bool /*fromHalf*/) { mValueMask.load(is); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::writeTopology(std::ostream& os, bool /*toHalf*/) const { mValueMask.save(os); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::readBuffers(std::istream& is, const CoordBBox& clipBBox, bool fromHalf) { // Boolean LeafNodes don't currently implement lazy loading. // Instead, load the full buffer, then clip it. this->readBuffers(is, fromHalf); // Get this tree's background value. bool background = false; if (const void* bgPtr = io::getGridBackgroundValuePtr(is)) { background = *static_cast<const bool*>(bgPtr); } this->clip(clipBBox, background); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::readBuffers(std::istream& is, bool /*fromHalf*/) { // Read in the value mask. mValueMask.load(is); // Read in the origin. is.read(reinterpret_cast<char*>(&mOrigin), sizeof(Coord::ValueType) * 3); if (io::getFormatVersion(is) >= OPENVDB_FILE_VERSION_BOOL_LEAF_OPTIMIZATION) { // Read in the mask for the voxel values. mBuffer.mData.load(is); } else { // Older files stored one or more bool arrays. // Read in the number of buffers, which should now always be one. int8_t numBuffers = 0; is.read(reinterpret_cast<char*>(&numBuffers), sizeof(int8_t)); // Read in the buffer. // (Note: prior to the bool leaf optimization, buffers were always compressed.) std::unique_ptr<bool[]> buf{new bool[SIZE]}; io::readData<bool>(is, buf.get(), SIZE, /*isCompressed=*/true); // Transfer values to mBuffer. mBuffer.mData.setOff(); for (Index i = 0; i < SIZE; ++i) { if (buf[i]) mBuffer.mData.setOn(i); } if (numBuffers > 1) { // Read in and discard auxiliary buffers that were created with // earlier versions of the library. for (int i = 1; i < numBuffers; ++i) { io::readData<bool>(is, buf.get(), SIZE, /*isCompressed=*/true); } } } } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::writeBuffers(std::ostream& os, bool /*toHalf*/) const { // Write out the value mask. mValueMask.save(os); // Write out the origin. os.write(reinterpret_cast<const char*>(&mOrigin), sizeof(Coord::ValueType) * 3); // Write out the voxel values. mBuffer.mData.save(os); } //////////////////////////////////////// template<Index Log2Dim> inline bool LeafNode<bool, Log2Dim>::operator==(const LeafNode& other) const { return mOrigin == other.mOrigin && mValueMask == other.valueMask() && mBuffer == other.mBuffer; } template<Index Log2Dim> inline bool LeafNode<bool, Log2Dim>::operator!=(const LeafNode& other) const { return !(this->operator==(other)); } //////////////////////////////////////// template<Index Log2Dim> inline bool LeafNode<bool, Log2Dim>::isConstant(bool& constValue, bool& state, bool tolerance) const { if (!mValueMask.isConstant(state)) return false; // Note: if tolerance is true (i.e., 1), then all boolean values compare equal. if (!tolerance && !(mBuffer.mData.isOn() || mBuffer.mData.isOff())) return false; constValue = mBuffer.mData.isOn(); return true; } //////////////////////////////////////// template<Index Log2Dim> inline bool LeafNode<bool, Log2Dim>::medianAll() const { const Index countTrue = mBuffer.mData.countOn(); return countTrue > (NUM_VALUES >> 1); } template<Index Log2Dim> inline Index LeafNode<bool, Log2Dim>::medianOn(bool& state) const { const NodeMaskType tmp = mBuffer.mData & mValueMask;//both true and active const Index countTrueOn = tmp.countOn(), countOn = mValueMask.countOn(); state = countTrueOn > (NUM_VALUES >> 1); return countOn; } template<Index Log2Dim> inline Index LeafNode<bool, Log2Dim>::medianOff(bool& state) const { const NodeMaskType tmp = mBuffer.mData & (!mValueMask);//both true and inactive const Index countTrueOff = tmp.countOn(), countOff = mValueMask.countOff(); state = countTrueOff > (NUM_VALUES >> 1); return countOff; } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::addTile(Index /*level*/, const Coord& xyz, bool val, bool active) { this->addTile(this->coordToOffset(xyz), val, active); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::addTile(Index offset, bool val, bool active) { assert(offset < SIZE); this->setValueOnly(offset, val); this->setActiveState(offset, active); } template<Index Log2Dim> template<typename AccessorT> inline void LeafNode<bool, Log2Dim>::addTileAndCache(Index level, const Coord& xyz, bool val, bool active, AccessorT&) { this->addTile(level, xyz, val, active); } //////////////////////////////////////// template<Index Log2Dim> inline const bool& LeafNode<bool, Log2Dim>::getValue(const Coord& xyz) const { // This *CANNOT* use operator ? because Visual C++ if (mBuffer.mData.isOn(this->coordToOffset(xyz))) return Buffer::sOn; else return Buffer::sOff; } template<Index Log2Dim> inline const bool& LeafNode<bool, Log2Dim>::getValue(Index offset) const { assert(offset < SIZE); // This *CANNOT* use operator ? for Windows if (mBuffer.mData.isOn(offset)) return Buffer::sOn; else return Buffer::sOff; } template<Index Log2Dim> inline bool LeafNode<bool, Log2Dim>::probeValue(const Coord& xyz, bool& val) const { const Index offset = this->coordToOffset(xyz); val = mBuffer.mData.isOn(offset); return mValueMask.isOn(offset); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::setValueOn(const Coord& xyz, bool val) { this->setValueOn(this->coordToOffset(xyz), val); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::setValueOn(Index offset, bool val) { assert(offset < SIZE); mValueMask.setOn(offset); mBuffer.mData.set(offset, val); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::setValueOnly(const Coord& xyz, bool val) { this->setValueOnly(this->coordToOffset(xyz), val); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::setActiveState(const Coord& xyz, bool on) { mValueMask.set(this->coordToOffset(xyz), on); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::setValueOff(const Coord& xyz, bool val) { this->setValueOff(this->coordToOffset(xyz), val); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::setValueOff(Index offset, bool val) { assert(offset < SIZE); mValueMask.setOff(offset); mBuffer.mData.set(offset, val); } template<Index Log2Dim> template<typename ModifyOp> inline void LeafNode<bool, Log2Dim>::modifyValue(Index offset, const ModifyOp& op) { bool val = mBuffer.mData.isOn(offset); op(val); mBuffer.mData.set(offset, val); mValueMask.setOn(offset); } template<Index Log2Dim> template<typename ModifyOp> inline void LeafNode<bool, Log2Dim>::modifyValue(const Coord& xyz, const ModifyOp& op) { this->modifyValue(this->coordToOffset(xyz), op); } template<Index Log2Dim> template<typename ModifyOp> inline void LeafNode<bool, Log2Dim>::modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { const Index offset = this->coordToOffset(xyz); bool val = mBuffer.mData.isOn(offset), state = mValueMask.isOn(offset); op(val, state); mBuffer.mData.set(offset, val); mValueMask.set(offset, state); } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::resetBackground(bool oldBackground, bool newBackground) { if (newBackground != oldBackground) { // Flip mBuffer's background bits and zero its foreground bits. NodeMaskType bgMask = !(mBuffer.mData | mValueMask); // Overwrite mBuffer's background bits, leaving its foreground bits intact. mBuffer.mData = (mBuffer.mData & mValueMask) | bgMask; } } //////////////////////////////////////// template<Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<bool, Log2Dim>::merge(const LeafNode& other, bool /*bg*/, bool /*otherBG*/) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy == MERGE_NODES) return; for (typename NodeMaskType::OnIterator iter = other.valueMask().beginOn(); iter; ++iter) { const Index n = iter.pos(); if (mValueMask.isOff(n)) { mBuffer.mData.set(n, other.mBuffer.mData.isOn(n)); mValueMask.setOn(n); } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<bool, Log2Dim>::merge(bool tileValue, bool tileActive) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy != MERGE_ACTIVE_STATES_AND_NODES) return; if (!tileActive) return; // Replace all inactive values with the active tile value. if (tileValue) mBuffer.mData |= !mValueMask; // -0=>1, +0=>0, -1=>1, +1=>1 (-,+ = off,on) else mBuffer.mData &= mValueMask; // -0=>0, +0=>0, -1=>0, +1=>1 mValueMask.setOn(); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<Index Log2Dim> template<typename OtherType> inline void LeafNode<bool, Log2Dim>::topologyUnion(const LeafNode<OtherType, Log2Dim>& other) { mValueMask |= other.valueMask(); } template<Index Log2Dim> template<typename OtherType> inline void LeafNode<bool, Log2Dim>::topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const bool&) { mValueMask &= other.valueMask(); } template<Index Log2Dim> template<typename OtherType> inline void LeafNode<bool, Log2Dim>::topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const bool&) { mValueMask &= !other.valueMask(); } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::clip(const CoordBBox& clipBBox, bool background) { CoordBBox nodeBBox = this->getNodeBoundingBox(); if (!clipBBox.hasOverlap(nodeBBox)) { // This node lies completely outside the clipping region. Fill it with background tiles. this->fill(nodeBBox, background, /*active=*/false); } else if (clipBBox.isInside(nodeBBox)) { // This node lies completely inside the clipping region. Leave it intact. return; } // This node isn't completely contained inside the clipping region. // Set any voxels that lie outside the region to the background value. // Construct a boolean mask that is on inside the clipping region and off outside it. NodeMaskType mask; nodeBBox.intersect(clipBBox); Coord xyz; int &x = xyz.x(), &y = xyz.y(), &z = xyz.z(); for (x = nodeBBox.min().x(); x <= nodeBBox.max().x(); ++x) { for (y = nodeBBox.min().y(); y <= nodeBBox.max().y(); ++y) { for (z = nodeBBox.min().z(); z <= nodeBBox.max().z(); ++z) { mask.setOn(static_cast<Index32>(this->coordToOffset(xyz))); } } } // Set voxels that lie in the inactive region of the mask (i.e., outside // the clipping region) to the background value. for (MaskOffIter maskIter = mask.beginOff(); maskIter; ++maskIter) { this->setValueOff(maskIter.pos(), background); } } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::fill(const CoordBBox& bbox, bool value, bool active) { auto clippedBBox = this->getNodeBoundingBox(); clippedBBox.intersect(bbox); if (!clippedBBox) return; for (Int32 x = clippedBBox.min().x(); x <= clippedBBox.max().x(); ++x) { const Index offsetX = (x & (DIM-1u))<<2*Log2Dim; for (Int32 y = clippedBBox.min().y(); y <= clippedBBox.max().y(); ++y) { const Index offsetXY = offsetX + ((y & (DIM-1u))<< Log2Dim); for (Int32 z = clippedBBox.min().z(); z <= clippedBBox.max().z(); ++z) { const Index offset = offsetXY + (z & (DIM-1u)); mValueMask.set(offset, active); mBuffer.mData.set(offset, value); } } } } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::fill(const bool& value) { mBuffer.fill(value); } template<Index Log2Dim> inline void LeafNode<bool, Log2Dim>::fill(const bool& value, bool active) { mBuffer.fill(value); mValueMask.set(active); } //////////////////////////////////////// template<Index Log2Dim> template<typename DenseT> inline void LeafNode<bool, Log2Dim>::copyToDense(const CoordBBox& bbox, DenseT& dense) const { using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); DenseValueType* t0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // target array const Int32 n0 = bbox.min()[2] & (DIM-1u); for (Int32 x = bbox.min()[0], ex = bbox.max()[0] + 1; x < ex; ++x) { DenseValueType* t1 = t0 + xStride * (x - min[0]); const Int32 n1 = n0 + ((x & (DIM-1u)) << 2*LOG2DIM); for (Int32 y = bbox.min()[1], ey = bbox.max()[1] + 1; y < ey; ++y) { DenseValueType* t2 = t1 + yStride * (y - min[1]); Int32 n2 = n1 + ((y & (DIM-1u)) << LOG2DIM); for (Int32 z = bbox.min()[2], ez = bbox.max()[2] + 1; z < ez; ++z, t2 += zStride) { *t2 = DenseValueType(mBuffer.mData.isOn(n2++)); } } } } template<Index Log2Dim> template<typename DenseT> inline void LeafNode<bool, Log2Dim>::copyFromDense(const CoordBBox& bbox, const DenseT& dense, bool background, bool tolerance) { using DenseValueType = typename DenseT::ValueType; struct Local { inline static bool toBool(const DenseValueType& v) { return !math::isZero(v); } }; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); const DenseValueType* s0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // source const Int32 n0 = bbox.min()[2] & (DIM-1u); for (Int32 x = bbox.min()[0], ex = bbox.max()[0] + 1; x < ex; ++x) { const DenseValueType* s1 = s0 + xStride * (x - min[0]); const Int32 n1 = n0 + ((x & (DIM-1u)) << 2*LOG2DIM); for (Int32 y = bbox.min()[1], ey = bbox.max()[1] + 1; y < ey; ++y) { const DenseValueType* s2 = s1 + yStride * (y - min[1]); Int32 n2 = n1 + ((y & (DIM-1u)) << LOG2DIM); for (Int32 z = bbox.min()[2], ez = bbox.max()[2]+1; z < ez; ++z, ++n2, s2 += zStride) { // Note: if tolerance is true (i.e., 1), then all boolean values compare equal. if (tolerance || (background == Local::toBool(*s2))) { mValueMask.setOff(n2); mBuffer.mData.set(n2, background); } else { mValueMask.setOn(n2); mBuffer.mData.set(n2, Local::toBool(*s2)); } } } } } //////////////////////////////////////// template<Index Log2Dim> template<typename CombineOp> inline void LeafNode<bool, Log2Dim>::combine(const LeafNode& other, CombineOp& op) { CombineArgs<bool> args; for (Index i = 0; i < SIZE; ++i) { bool result = false, aVal = mBuffer.mData.isOn(i), bVal = other.mBuffer.mData.isOn(i); op(args.setARef(aVal) .setAIsActive(mValueMask.isOn(i)) .setBRef(bVal) .setBIsActive(other.valueMask().isOn(i)) .setResultRef(result)); mValueMask.set(i, args.resultIsActive()); mBuffer.mData.set(i, result); } } template<Index Log2Dim> template<typename CombineOp> inline void LeafNode<bool, Log2Dim>::combine(bool value, bool valueIsActive, CombineOp& op) { CombineArgs<bool> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { bool result = false, aVal = mBuffer.mData.isOn(i); op(args.setARef(aVal) .setAIsActive(mValueMask.isOn(i)) .setResultRef(result)); mValueMask.set(i, args.resultIsActive()); mBuffer.mData.set(i, result); } } //////////////////////////////////////// template<Index Log2Dim> template<typename CombineOp, typename OtherType> inline void LeafNode<bool, Log2Dim>::combine2(const LeafNode& other, const OtherType& value, bool valueIsActive, CombineOp& op) { CombineArgs<bool, OtherType> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { bool result = false, aVal = other.mBuffer.mData.isOn(i); op(args.setARef(aVal) .setAIsActive(other.valueMask().isOn(i)) .setResultRef(result)); mValueMask.set(i, args.resultIsActive()); mBuffer.mData.set(i, result); } } template<Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<bool, Log2Dim>::combine2(bool value, const OtherNodeT& other, bool valueIsActive, CombineOp& op) { CombineArgs<bool, typename OtherNodeT::ValueType> args; args.setARef(value).setAIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { bool result = false, bVal = other.mBuffer.mData.isOn(i); op(args.setBRef(bVal) .setBIsActive(other.valueMask().isOn(i)) .setResultRef(result)); mValueMask.set(i, args.resultIsActive()); mBuffer.mData.set(i, result); } } template<Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<bool, Log2Dim>::combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp& op) { CombineArgs<bool, typename OtherNodeT::ValueType> args; for (Index i = 0; i < SIZE; ++i) { // Default behavior: output voxel is active if either input voxel is active. mValueMask.set(i, b0.valueMask().isOn(i) || b1.valueMask().isOn(i)); bool result = false, b0Val = b0.mBuffer.mData.isOn(i), b1Val = b1.mBuffer.mData.isOn(i); op(args.setARef(b0Val) .setAIsActive(b0.valueMask().isOn(i)) .setBRef(b1Val) .setBIsActive(b1.valueMask().isOn(i)) .setResultRef(result)); mValueMask.set(i, args.resultIsActive()); mBuffer.mData.set(i, result); } } //////////////////////////////////////// template<Index Log2Dim> template<typename BBoxOp> inline void LeafNode<bool, Log2Dim>::visitActiveBBox(BBoxOp& op) const { if (op.template descent<LEVEL>()) { for (ValueOnCIter i=this->cbeginValueOn(); i; ++i) { op.template operator()<LEVEL>(CoordBBox::createCube(i.getCoord(), 1)); } } else { op.template operator()<LEVEL>(this->getNodeBoundingBox()); } } template<Index Log2Dim> template<typename VisitorOp> inline void LeafNode<bool, Log2Dim>::visit(VisitorOp& op) { doVisit<LeafNode, VisitorOp, ChildAllIter>(*this, op); } template<Index Log2Dim> template<typename VisitorOp> inline void LeafNode<bool, Log2Dim>::visit(VisitorOp& op) const { doVisit<const LeafNode, VisitorOp, ChildAllCIter>(*this, op); } template<Index Log2Dim> template<typename NodeT, typename VisitorOp, typename ChildAllIterT> inline void LeafNode<bool, Log2Dim>::doVisit(NodeT& self, VisitorOp& op) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter); } } //////////////////////////////////////// template<Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<bool, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) { doVisit2Node<LeafNode, OtherLeafNodeType, VisitorOp, ChildAllIter, typename OtherLeafNodeType::ChildAllIter>(*this, other, op); } template<Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<bool, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) const { doVisit2Node<const LeafNode, OtherLeafNodeType, VisitorOp, ChildAllCIter, typename OtherLeafNodeType::ChildAllCIter>(*this, other, op); } template<Index Log2Dim> template< typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<bool, Log2Dim>::doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp& op) { // Allow the two nodes to have different ValueTypes, but not different dimensions. static_assert(OtherNodeT::SIZE == NodeT::SIZE, "can't visit nodes of different sizes simultaneously"); static_assert(OtherNodeT::LEVEL == NodeT::LEVEL, "can't visit nodes at different tree levels simultaneously"); ChildAllIterT iter = self.beginChildAll(); OtherChildAllIterT otherIter = other.beginChildAll(); for ( ; iter && otherIter; ++iter, ++otherIter) { op(iter, otherIter); } } //////////////////////////////////////// template<Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<bool, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) { doVisit2<LeafNode, VisitorOp, ChildAllIter, IterT>(*this, otherIter, op, otherIsLHS); } template<Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<bool, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) const { doVisit2<const LeafNode, VisitorOp, ChildAllCIter, IterT>(*this, otherIter, op, otherIsLHS); } template<Index Log2Dim> template< typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<bool, Log2Dim>::doVisit2(NodeT& self, OtherChildAllIterT& otherIter, VisitorOp& op, bool otherIsLHS) { if (!otherIter) return; if (otherIsLHS) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(otherIter, iter); } } else { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter, otherIter); } } } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_LEAF_NODE_BOOL_HAS_BEEN_INCLUDED
63,733
C
35.92584
115
0.671552
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/TreeIterator.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file tree/TreeIterator.h #ifndef OPENVDB_TREE_TREEITERATOR_HAS_BEEN_INCLUDED #define OPENVDB_TREE_TREEITERATOR_HAS_BEEN_INCLUDED #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <openvdb/version.h> #include <openvdb/Types.h> #include <algorithm> #include <sstream> #include <string> #include <type_traits> // Prior to 0.96.1, depth-bounded value iterators always descended to the leaf level // and iterated past leaf nodes. Now, they never descend past the maximum depth. // Comment out the following line to restore the older, less-efficient behavior: #define ENABLE_TREE_VALUE_DEPTH_BOUND_OPTIMIZATION namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { namespace iter { template<typename HeadT, int HeadLevel> struct InvertedTree { using SubtreeT = typename InvertedTree<typename HeadT::ChildNodeType, HeadLevel-1>::Type; using Type = typename SubtreeT::template Append<HeadT>; }; template<typename HeadT> struct InvertedTree<HeadT, /*HeadLevel=*/1> { using Type = TypeList<typename HeadT::ChildNodeType, HeadT>; }; } // namespace iter //////////////////////////////////////// /// IterTraits provides the following for iterators of the standard types, /// i.e., for {Child,Value}{On,Off,All}{Iter,CIter}: /// - a NodeConverter template to convert an iterator for one type of node /// to an iterator of the same type for another type of node; for example, /// IterTraits<RootNode, RootNode::ValueOnIter>::NodeConverter<LeafNode>::Type /// is synonymous with LeafNode::ValueOnIter. /// - a begin(node) function that returns a begin iterator for a node of arbitrary type; /// for example, IterTraits<LeafNode, LeafNode::ValueOnIter>::begin(leaf) returns /// leaf.beginValueOn() /// - a getChild() function that returns a pointer to the child node to which the iterator /// is currently pointing (always null if the iterator is a Value iterator) template<typename NodeT, typename IterT> struct IterTraits { template<typename ChildT> static ChildT* getChild(const IterT&) { return nullptr; } }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildOnIter> { using IterT = typename NodeT::ChildOnIter; static IterT begin(NodeT& node) { return node.beginChildOn(); } template<typename ChildT> static ChildT* getChild(const IterT& iter) { return &iter.getValue(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildOnIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildOnCIter> { using IterT = typename NodeT::ChildOnCIter; static IterT begin(const NodeT& node) { return node.cbeginChildOn(); } template<typename ChildT> static const ChildT* getChild(const IterT& iter) { return &iter.getValue(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildOnCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildOffIter> { using IterT = typename NodeT::ChildOffIter; static IterT begin(NodeT& node) { return node.beginChildOff(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildOffIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildOffCIter> { using IterT = typename NodeT::ChildOffCIter; static IterT begin(const NodeT& node) { return node.cbeginChildOff(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildOffCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildAllIter> { using IterT = typename NodeT::ChildAllIter; static IterT begin(NodeT& node) { return node.beginChildAll(); } template<typename ChildT> static ChildT* getChild(const IterT& iter) { typename IterT::NonConstValueType val; return iter.probeChild(val); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildAllIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildAllCIter> { using IterT = typename NodeT::ChildAllCIter; static IterT begin(const NodeT& node) { return node.cbeginChildAll(); } template<typename ChildT> static ChildT* getChild(const IterT& iter) { typename IterT::NonConstValueType val; return iter.probeChild(val); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildAllCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueOnIter> { using IterT = typename NodeT::ValueOnIter; static IterT begin(NodeT& node) { return node.beginValueOn(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueOnIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueOnCIter> { using IterT = typename NodeT::ValueOnCIter; static IterT begin(const NodeT& node) { return node.cbeginValueOn(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueOnCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueOffIter> { using IterT = typename NodeT::ValueOffIter; static IterT begin(NodeT& node) { return node.beginValueOff(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueOffIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueOffCIter> { using IterT = typename NodeT::ValueOffCIter; static IterT begin(const NodeT& node) { return node.cbeginValueOff(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueOffCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueAllIter> { using IterT = typename NodeT::ValueAllIter; static IterT begin(NodeT& node) { return node.beginValueAll(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueAllIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueAllCIter> { using IterT = typename NodeT::ValueAllCIter; static IterT begin(const NodeT& node) { return node.cbeginValueAll(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueAllCIter; }; }; //////////////////////////////////////// /// @brief An IterListItem is an element of a compile-time linked list of iterators /// to nodes of different types. /// /// The list is constructed by traversing the template hierarchy of a Tree in reverse order, /// so typically the elements will be a LeafNode iterator of some type (e.g., ValueOnCIter), /// followed by one or more InternalNode iterators of the same type, followed by a RootNode /// iterator of the same type. /// /// The length of the list is fixed at compile time, and because it is implemented using /// nested, templated classes, much of the list traversal logic can be optimized away. template<typename PrevItemT, typename NodeVecT, size_t VecSize, Index _Level> class IterListItem { public: /// The type of iterator stored in the previous list item using PrevIterT = typename PrevItemT::IterT; /// The type of node (non-const) whose iterator is stored in this list item using _NodeT = typename NodeVecT::Front; /// The type of iterator stored in this list item (e.g., InternalNode::ValueOnCIter) using IterT = typename IterTraits<typename PrevIterT::NonConstNodeType, PrevIterT>::template NodeConverter<_NodeT>::Type; /// The type of node (const or non-const) over which IterT iterates (e.g., const RootNode<...>) using NodeT = typename IterT::NodeType; /// The type of the node with const qualifiers removed ("Non-Const") using NCNodeT = typename IterT::NonConstNodeType; /// The type of value (with const qualifiers removed) to which the iterator points using NCValueT = typename IterT::NonConstValueType; /// NodeT's child node type, with the same constness (e.g., const InternalNode<...>) using ChildT = typename CopyConstness<NodeT, typename NodeT::ChildNodeType>::Type; /// NodeT's child node type with const qualifiers removed using NCChildT = typename CopyConstness<NCNodeT, typename NCNodeT::ChildNodeType>::Type; using ITraits = IterTraits<NCNodeT, IterT>; /// NodeT's level in its tree (0 = LeafNode) static const Index Level = _Level; IterListItem(PrevItemT* prev): mNext(this), mPrev(prev) {} IterListItem(const IterListItem& other): mIter(other.mIter), mNext(other.mNext), mPrev(nullptr) {} IterListItem& operator=(const IterListItem& other) { if (&other != this) { mIter = other.mIter; mNext = other.mNext; mPrev = nullptr; ///< @note external call to updateBackPointers() required } return *this; } void updateBackPointers(PrevItemT* prev) { mPrev = prev; mNext.updateBackPointers(this); } void setIter(const IterT& iter) { mIter = iter; } template<typename OtherIterT> void setIter(const OtherIterT& iter) { mNext.setIter(iter); } /// Return the node over which this list element's iterator iterates. void getNode(Index lvl, NodeT*& node) const { node = (lvl <= Level) ? mIter.getParentNode() : nullptr; } /// Return the node over which one of the following list elements' iterator iterates. template<typename OtherNodeT> void getNode(Index lvl, OtherNodeT*& node) const { mNext.getNode(lvl, node); } /// @brief Initialize the iterator for level @a lvl of the tree with the node /// over which the corresponding iterator of @a otherListItem is iterating. /// /// For example, if @a otherListItem contains a LeafNode::ValueOnIter, /// initialize this list's leaf iterator with the same LeafNode. template<typename OtherIterListItemT> void initLevel(Index lvl, OtherIterListItemT& otherListItem) { if (lvl == Level) { const NodeT* node = nullptr; otherListItem.getNode(lvl, node); mIter = (node == nullptr) ? IterT() : ITraits::begin(*const_cast<NodeT*>(node)); } else { // Forward to one of the following list elements. mNext.initLevel(lvl, otherListItem); } } /// Return The table offset of the iterator at level @a lvl of the tree. Index pos(Index lvl) const { return (lvl == Level) ? mIter.pos() : mNext.pos(lvl); } /// Return @c true if the iterator at level @a lvl of the tree has not yet reached its end. bool test(Index lvl) const { return (lvl == Level) ? mIter.test() : mNext.test(lvl); } /// Increment the iterator at level @a lvl of the tree. bool next(Index lvl) { return (lvl == Level) ? mIter.next() : mNext.next(lvl); } /// @brief If the iterator at level @a lvl of the tree points to a child node, /// initialize the next iterator in this list with that child node. bool down(Index lvl) { if (lvl == Level && mPrev != nullptr && mIter) { if (ChildT* child = ITraits::template getChild<ChildT>(mIter)) { mPrev->setIter(PrevItemT::ITraits::begin(*child)); return true; } } return (lvl > Level) ? mNext.down(lvl) : false; } /// @brief Return the global coordinates of the voxel or tile to which the iterator /// at level @a lvl of the tree is currently pointing. Coord getCoord(Index lvl) const { return (lvl == Level) ? mIter.getCoord() : mNext.getCoord(lvl); } Index getChildDim(Index lvl) const { return (lvl == Level) ? NodeT::getChildDim() : mNext.getChildDim(lvl); } /// Return the number of (virtual) voxels spanned by a tile value or child node Index64 getVoxelCount(Index lvl) const { return (lvl == Level) ? ChildT::NUM_VOXELS : mNext.getVoxelCount(lvl); } /// Return @c true if the iterator at level @a lvl of the tree points to an active value. bool isValueOn(Index lvl) const { return (lvl == Level) ? mIter.isValueOn() : mNext.isValueOn(lvl); } /// Return the value to which the iterator at level @a lvl of the tree points. const NCValueT& getValue(Index lvl) const { if (lvl == Level) return mIter.getValue(); return mNext.getValue(lvl); } /// @brief Set the value (to @a val) to which the iterator at level @a lvl /// of the tree points and mark the value as active. /// @note Not valid when @c IterT is a const iterator type void setValue(Index lvl, const NCValueT& val) const { if (lvl == Level) mIter.setValue(val); else mNext.setValue(lvl, val); } /// @brief Set the value (to @a val) to which the iterator at level @a lvl of the tree /// points and mark the value as active if @a on is @c true, or inactive otherwise. /// @note Not valid when @c IterT is a const iterator type void setValueOn(Index lvl, bool on = true) const { if (lvl == Level) mIter.setValueOn(on); else mNext.setValueOn(lvl, on); } /// @brief Mark the value to which the iterator at level @a lvl of the tree points /// as inactive. /// @note Not valid when @c IterT is a const iterator type void setValueOff(Index lvl) const { if (lvl == Level) mIter.setValueOff(); else mNext.setValueOff(lvl); } /// @brief Apply a functor to the item to which this iterator is pointing. /// @note Not valid when @c IterT is a const iterator type template<typename ModifyOp> void modifyValue(Index lvl, const ModifyOp& op) const { if (lvl == Level) mIter.modifyValue(op); else mNext.modifyValue(lvl, op); } private: using RestT = typename NodeVecT::PopFront; // NodeVecT minus its first item using NextItem = IterListItem<IterListItem, RestT, VecSize - 1, Level + 1>; IterT mIter; NextItem mNext; PrevItemT* mPrev; }; /// The initial element of a compile-time linked list of iterators to nodes of different types template<typename PrevItemT, typename NodeVecT, size_t VecSize> class IterListItem<PrevItemT, NodeVecT, VecSize, /*Level=*/0U> { public: /// The type of iterator stored in the previous list item using PrevIterT = typename PrevItemT::IterT; /// The type of node (non-const) whose iterator is stored in this list item using _NodeT = typename NodeVecT::Front; /// The type of iterator stored in this list item (e.g., InternalNode::ValueOnCIter) using IterT = typename IterTraits<typename PrevIterT::NonConstNodeType, PrevIterT>::template NodeConverter<_NodeT>::Type; /// The type of node (const or non-const) over which IterT iterates (e.g., const RootNode<...>) using NodeT = typename IterT::NodeType; /// The type of the node with const qualifiers removed ("Non-Const") using NCNodeT = typename IterT::NonConstNodeType; /// The type of value (with const qualifiers removed) to which the iterator points using NCValueT = typename IterT::NonConstValueType; using ITraits = IterTraits<NCNodeT, IterT>; /// NodeT's level in its tree (0 = LeafNode) static const Index Level = 0; IterListItem(PrevItemT*): mNext(this), mPrev(nullptr) {} IterListItem(const IterListItem& other): mIter(other.mIter), mNext(other.mNext), mPrev(nullptr) {} IterListItem& operator=(const IterListItem& other) { if (&other != this) { mIter = other.mIter; mNext = other.mNext; mPrev = nullptr; } return *this; } void updateBackPointers(PrevItemT* = nullptr) { mPrev = nullptr; mNext.updateBackPointers(this); } void setIter(const IterT& iter) { mIter = iter; } template<typename OtherIterT> void setIter(const OtherIterT& iter) { mNext.setIter(iter); } void getNode(Index lvl, NodeT*& node) const { node = (lvl == 0) ? mIter.getParentNode() : nullptr; } template<typename OtherNodeT> void getNode(Index lvl, OtherNodeT*& node) const { mNext.getNode(lvl, node); } template<typename OtherIterListItemT> void initLevel(Index lvl, OtherIterListItemT& otherListItem) { if (lvl == 0) { const NodeT* node = nullptr; otherListItem.getNode(lvl, node); mIter = (node == nullptr) ? IterT() : ITraits::begin(*const_cast<NodeT*>(node)); } else { mNext.initLevel(lvl, otherListItem); } } Index pos(Index lvl) const { return (lvl == 0) ? mIter.pos() : mNext.pos(lvl); } bool test(Index lvl) const { return (lvl == 0) ? mIter.test() : mNext.test(lvl); } bool next(Index lvl) { return (lvl == 0) ? mIter.next() : mNext.next(lvl); } bool down(Index lvl) { return (lvl == 0) ? false : mNext.down(lvl); } Coord getCoord(Index lvl) const { return (lvl == 0) ? mIter.getCoord() : mNext.getCoord(lvl); } Index getChildDim(Index lvl) const { return (lvl == 0) ? NodeT::getChildDim() : mNext.getChildDim(lvl); } Index64 getVoxelCount(Index lvl) const { return (lvl == 0) ? 1 : mNext.getVoxelCount(lvl); } bool isValueOn(Index lvl) const { return (lvl == 0) ? mIter.isValueOn() : mNext.isValueOn(lvl); } const NCValueT& getValue(Index lvl) const { if (lvl == 0) return mIter.getValue(); return mNext.getValue(lvl); } void setValue(Index lvl, const NCValueT& val) const { if (lvl == 0) mIter.setValue(val); else mNext.setValue(lvl, val); } void setValueOn(Index lvl, bool on = true) const { if (lvl == 0) mIter.setValueOn(on); else mNext.setValueOn(lvl, on); } void setValueOff(Index lvl) const { if (lvl == 0) mIter.setValueOff(); else mNext.setValueOff(lvl); } template<typename ModifyOp> void modifyValue(Index lvl, const ModifyOp& op) const { if (lvl == 0) mIter.modifyValue(op); else mNext.modifyValue(lvl, op); } private: using RestT = typename NodeVecT::PopFront; // NodeVecT minus its first item using NextItem = IterListItem<IterListItem, RestT, VecSize - 1, /*Level=*/1>; IterT mIter; NextItem mNext; PrevItemT* mPrev; }; /// The final element of a compile-time linked list of iterators to nodes of different types template<typename PrevItemT, typename NodeVecT, Index _Level> class IterListItem<PrevItemT, NodeVecT, /*VecSize=*/1, _Level> { public: using _NodeT = typename NodeVecT::Front; /// The type of iterator stored in the previous list item using PrevIterT = typename PrevItemT::IterT; /// The type of iterator stored in this list item (e.g., RootNode::ValueOnCIter) using IterT = typename IterTraits<typename PrevIterT::NonConstNodeType, PrevIterT>::template NodeConverter<_NodeT>::Type; /// The type of node over which IterT iterates (e.g., const RootNode<...>) using NodeT = typename IterT::NodeType; /// The type of the node with const qualifiers removed ("Non-Const") using NCNodeT = typename IterT::NonConstNodeType; /// The type of value (with const qualifiers removed) to which the iterator points using NCValueT = typename IterT::NonConstValueType; /// NodeT's child node type, with the same constness (e.g., const InternalNode<...>) using ChildT = typename CopyConstness<NodeT, typename NodeT::ChildNodeType>::Type; /// NodeT's child node type with const qualifiers removed using NCChildT = typename CopyConstness<NCNodeT, typename NCNodeT::ChildNodeType>::Type; using ITraits = IterTraits<NCNodeT, IterT>; /// NodeT's level in its tree (0 = LeafNode) static const Index Level = _Level; IterListItem(PrevItemT* prev): mPrev(prev) {} IterListItem(const IterListItem& other): mIter(other.mIter), mPrev(nullptr) {} IterListItem& operator=(const IterListItem& other) { if (&other != this) { mIter = other.mIter; mPrev = nullptr; ///< @note external call to updateBackPointers() required } return *this; } void updateBackPointers(PrevItemT* prev) { mPrev = prev; } // The following method specializations differ from the default template // implementations mainly in that they don't forward. void setIter(const IterT& iter) { mIter = iter; } void getNode(Index lvl, NodeT*& node) const { node = (lvl <= Level) ? mIter.getParentNode() : nullptr; } template<typename OtherIterListItemT> void initLevel(Index lvl, OtherIterListItemT& otherListItem) { if (lvl == Level) { const NodeT* node = nullptr; otherListItem.getNode(lvl, node); mIter = (node == nullptr) ? IterT() : ITraits::begin(*const_cast<NodeT*>(node)); } } Index pos(Index lvl) const { return (lvl == Level) ? mIter.pos() : Index(-1); } bool test(Index lvl) const { return (lvl == Level) ? mIter.test() : false; } bool next(Index lvl) { return (lvl == Level) ? mIter.next() : false; } bool down(Index lvl) { if (lvl == Level && mPrev != nullptr && mIter) { if (ChildT* child = ITraits::template getChild<ChildT>(mIter)) { mPrev->setIter(PrevItemT::ITraits::begin(*child)); return true; } } return false; } Coord getCoord(Index lvl) const { return (lvl == Level) ? mIter.getCoord() : Coord(); } Index getChildDim(Index lvl) const { return (lvl == Level) ? NodeT::getChildDim() : 0; } Index64 getVoxelCount(Index lvl) const { return (lvl == Level) ? ChildT::NUM_VOXELS : 0; } bool isValueOn(Index lvl) const { return (lvl == Level) ? mIter.isValueOn() : false; } const NCValueT& getValue(Index lvl) const { assert(lvl == Level); (void)lvl; // avoid unused variable warning in optimized builds return mIter.getValue(); } void setValue(Index lvl, const NCValueT& val) const { if (lvl == Level) mIter.setValue(val); } void setValueOn(Index lvl, bool on = true) const { if (lvl == Level) mIter.setValueOn(on); } void setValueOff(Index lvl) const { if (lvl == Level) mIter.setValueOff(); } template<typename ModifyOp> void modifyValue(Index lvl, const ModifyOp& op) const { if (lvl == Level) mIter.modifyValue(op); } private: IterT mIter; PrevItemT* mPrev; }; //////////////////////////////////////// //#define DEBUG_TREE_VALUE_ITERATOR /// @brief Base class for tree-traversal iterators over tile and voxel values template<typename _TreeT, typename _ValueIterT> class TreeValueIteratorBase { public: using TreeT = _TreeT; using ValueIterT = _ValueIterT; using NodeT = typename ValueIterT::NodeType; using ValueT = typename ValueIterT::NonConstValueType; using ChildOnIterT = typename NodeT::ChildOnCIter; static const Index ROOT_LEVEL = NodeT::LEVEL; static_assert(ValueIterT::NodeType::LEVEL == ROOT_LEVEL, "invalid value iterator node type"); static const Index LEAF_LEVEL = 0, ROOT_DEPTH = 0, LEAF_DEPTH = ROOT_LEVEL; TreeValueIteratorBase(TreeT&); TreeValueIteratorBase(const TreeValueIteratorBase& other); TreeValueIteratorBase& operator=(const TreeValueIteratorBase& other); /// Specify the depth of the highest level of the tree to which to ascend (depth 0 = root). void setMinDepth(Index minDepth); /// Return the depth of the highest level of the tree to which this iterator ascends. Index getMinDepth() const { return ROOT_LEVEL - Index(mMaxLevel); } /// Specify the depth of the lowest level of the tree to which to descend (depth 0 = root). void setMaxDepth(Index maxDepth); /// Return the depth of the lowest level of the tree to which this iterator ascends. Index getMaxDepth() const { return ROOT_LEVEL - Index(mMinLevel); } //@{ /// Return @c true if this iterator is not yet exhausted. bool test() const { return mValueIterList.test(mLevel); } operator bool() const { return this->test(); } //@} /// @brief Advance to the next tile or voxel value. /// Return @c true if this iterator is not yet exhausted. bool next(); /// Advance to the next tile or voxel value. TreeValueIteratorBase& operator++() { this->next(); return *this; } /// @brief Return the level in the tree (0 = leaf) of the node to which /// this iterator is currently pointing. Index getLevel() const { return mLevel; } /// @brief Return the depth in the tree (0 = root) of the node to which /// this iterator is currently pointing. Index getDepth() const { return ROOT_LEVEL - mLevel; } static Index getLeafDepth() { return LEAF_DEPTH; } /// @brief Return in @a node a pointer to the node over which this iterator is /// currently iterating or one of that node's parents, as determined by @a NodeType. /// @return a null pointer if @a NodeType specifies a node at a lower level /// of the tree than that given by getLevel(). template<typename NodeType> void getNode(NodeType*& node) const { mValueIterList.getNode(mLevel, node); } /// @brief Return the global coordinates of the voxel or tile to which /// this iterator is currently pointing. Coord getCoord() const { return mValueIterList.getCoord(mLevel); } /// @brief Return in @a bbox the axis-aligned bounding box of /// the voxel or tile to which this iterator is currently pointing. /// @return false if the bounding box is empty. bool getBoundingBox(CoordBBox&) const; /// @brief Return the axis-aligned bounding box of the voxel or tile to which /// this iterator is currently pointing. CoordBBox getBoundingBox() const { CoordBBox b; this->getBoundingBox(b); return b; } /// Return the number of (virtual) voxels corresponding to the value Index64 getVoxelCount() const { return mValueIterList.getVoxelCount(mLevel);} /// Return @c true if this iterator is currently pointing to a (non-leaf) tile value. bool isTileValue() const { return mLevel != 0 && this->test(); } /// Return @c true if this iterator is currently pointing to a (leaf) voxel value. bool isVoxelValue() const { return mLevel == 0 && this->test(); } /// Return @c true if the value to which this iterator is currently pointing is active. bool isValueOn() const { return mValueIterList.isValueOn(mLevel); } //@{ /// Return the tile or voxel value to which this iterator is currently pointing. const ValueT& getValue() const { return mValueIterList.getValue(mLevel); } const ValueT& operator*() const { return this->getValue(); } const ValueT* operator->() const { return &(this->operator*()); } //@} /// @brief Change the tile or voxel value to which this iterator is currently pointing /// and mark it as active. void setValue(const ValueT& val) const { mValueIterList.setValue(mLevel, val); } /// @brief Change the active/inactive state of the tile or voxel value to which /// this iterator is currently pointing. void setActiveState(bool on) const { mValueIterList.setValueOn(mLevel, on); } /// Mark the tile or voxel value to which this iterator is currently pointing as inactive. void setValueOff() const { mValueIterList.setValueOff(mLevel); } /// @brief Apply a functor to the item to which this iterator is pointing. /// (Not valid for const iterators.) /// @param op a functor of the form <tt>void op(ValueType&) const</tt> that modifies /// its argument in place /// @see Tree::modifyValue() template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { mValueIterList.modifyValue(mLevel, op); } /// Return a pointer to the tree over which this iterator is iterating. TreeT* getTree() const { return mTree; } /// Return a string (for debugging, mainly) describing this iterator's current state. std::string summary() const; private: bool advance(bool dontIncrement = false); using InvTreeT = typename iter::InvertedTree<NodeT, NodeT::LEVEL>::Type; struct PrevChildItem { using IterT = ChildOnIterT; }; struct PrevValueItem { using IterT = ValueIterT; }; IterListItem<PrevChildItem, InvTreeT, /*VecSize=*/ROOT_LEVEL+1, /*Level=*/0> mChildIterList; IterListItem<PrevValueItem, InvTreeT, /*VecSize=*/ROOT_LEVEL+1, /*Level=*/0> mValueIterList; Index mLevel; int mMinLevel, mMaxLevel; TreeT* mTree; }; // class TreeValueIteratorBase template<typename TreeT, typename ValueIterT> inline TreeValueIteratorBase<TreeT, ValueIterT>::TreeValueIteratorBase(TreeT& tree): mChildIterList(nullptr), mValueIterList(nullptr), mLevel(ROOT_LEVEL), mMinLevel(int(LEAF_LEVEL)), mMaxLevel(int(ROOT_LEVEL)), mTree(&tree) { mChildIterList.setIter(IterTraits<NodeT, ChildOnIterT>::begin(tree.root())); mValueIterList.setIter(IterTraits<NodeT, ValueIterT>::begin(tree.root())); this->advance(/*dontIncrement=*/true); } template<typename TreeT, typename ValueIterT> inline TreeValueIteratorBase<TreeT, ValueIterT>::TreeValueIteratorBase(const TreeValueIteratorBase& other): mChildIterList(other.mChildIterList), mValueIterList(other.mValueIterList), mLevel(other.mLevel), mMinLevel(other.mMinLevel), mMaxLevel(other.mMaxLevel), mTree(other.mTree) { mChildIterList.updateBackPointers(); mValueIterList.updateBackPointers(); } template<typename TreeT, typename ValueIterT> inline TreeValueIteratorBase<TreeT, ValueIterT>& TreeValueIteratorBase<TreeT, ValueIterT>::operator=(const TreeValueIteratorBase& other) { if (&other != this) { mChildIterList = other.mChildIterList; mValueIterList = other.mValueIterList; mLevel = other.mLevel; mMinLevel = other.mMinLevel; mMaxLevel = other.mMaxLevel; mTree = other.mTree; mChildIterList.updateBackPointers(); mValueIterList.updateBackPointers(); } return *this; } template<typename TreeT, typename ValueIterT> inline void TreeValueIteratorBase<TreeT, ValueIterT>::setMinDepth(Index minDepth) { mMaxLevel = int(ROOT_LEVEL - minDepth); // level = ROOT_LEVEL - depth if (int(mLevel) > mMaxLevel) this->next(); } template<typename TreeT, typename ValueIterT> inline void TreeValueIteratorBase<TreeT, ValueIterT>::setMaxDepth(Index maxDepth) { // level = ROOT_LEVEL - depth mMinLevel = int(ROOT_LEVEL - std::min(maxDepth, this->getLeafDepth())); if (int(mLevel) < mMinLevel) this->next(); } template<typename TreeT, typename ValueIterT> inline bool TreeValueIteratorBase<TreeT, ValueIterT>::next() { do { if (!this->advance()) return false; } while (int(mLevel) < mMinLevel || int(mLevel) > mMaxLevel); return true; } template<typename TreeT, typename ValueIterT> inline bool TreeValueIteratorBase<TreeT, ValueIterT>::advance(bool dontIncrement) { bool recurse = false; do { recurse = false; Index vPos = mValueIterList.pos(mLevel), cPos = mChildIterList.pos(mLevel); if (vPos == cPos && mChildIterList.test(mLevel)) { /// @todo Once ValueOff iterators properly skip child pointers, remove this block. mValueIterList.next(mLevel); vPos = mValueIterList.pos(mLevel); } if (vPos < cPos) { if (dontIncrement) return true; if (mValueIterList.next(mLevel)) { if (mValueIterList.pos(mLevel) == cPos && mChildIterList.test(mLevel)) { /// @todo Once ValueOff iterators properly skip child pointers, /// remove this block. mValueIterList.next(mLevel); } // If there is a next value and it precedes the next child, return. if (mValueIterList.pos(mLevel) < cPos) return true; } } else { // Advance to the next child, which may or may not precede the next value. if (!dontIncrement) mChildIterList.next(mLevel); } #ifdef DEBUG_TREE_VALUE_ITERATOR std::cout << "\n" << this->summary() << std::flush; #endif // Descend to the lowest level at which the next value precedes the next child. while (mChildIterList.pos(mLevel) < mValueIterList.pos(mLevel)) { #ifdef ENABLE_TREE_VALUE_DEPTH_BOUND_OPTIMIZATION if (int(mLevel) == mMinLevel) { // If the current node lies at the lowest allowed level, none of its // children can be visited, so just advance its child iterator. mChildIterList.next(mLevel); if (mValueIterList.pos(mLevel) == mChildIterList.pos(mLevel) && mChildIterList.test(mLevel)) { /// @todo Once ValueOff iterators properly skip child pointers, /// remove this block. mValueIterList.next(mLevel); } } else #endif if (mChildIterList.down(mLevel)) { --mLevel; // descend one level mValueIterList.initLevel(mLevel, mChildIterList); if (mValueIterList.pos(mLevel) == mChildIterList.pos(mLevel) && mChildIterList.test(mLevel)) { /// @todo Once ValueOff iterators properly skip child pointers, /// remove this block. mValueIterList.next(mLevel); } } else break; #ifdef DEBUG_TREE_VALUE_ITERATOR std::cout << "\n" << this->summary() << std::flush; #endif } // Ascend to the nearest level at which one of the iterators is not yet exhausted. while (!mChildIterList.test(mLevel) && !mValueIterList.test(mLevel)) { if (mLevel == ROOT_LEVEL) return false; ++mLevel; mChildIterList.next(mLevel); dontIncrement = true; recurse = true; } } while (recurse); return true; } template<typename TreeT, typename ValueIterT> inline bool TreeValueIteratorBase<TreeT, ValueIterT>::getBoundingBox(CoordBBox& bbox) const { if (!this->test()) { bbox = CoordBBox(); return false; } bbox.min() = mValueIterList.getCoord(mLevel); bbox.max() = bbox.min().offsetBy(mValueIterList.getChildDim(mLevel) - 1); return true; } template<typename TreeT, typename ValueIterT> inline std::string TreeValueIteratorBase<TreeT, ValueIterT>::summary() const { std::ostringstream ostr; for (int lvl = int(ROOT_LEVEL); lvl >= 0 && lvl >= int(mLevel); --lvl) { if (lvl == 0) ostr << "leaf"; else if (lvl == int(ROOT_LEVEL)) ostr << "root"; else ostr << "int" << (ROOT_LEVEL - lvl); ostr << " v" << mValueIterList.pos(lvl) << " c" << mChildIterList.pos(lvl); if (lvl > int(mLevel)) ostr << " / "; } if (this->test() && mValueIterList.pos(mLevel) < mChildIterList.pos(mLevel)) { if (mLevel == 0) { ostr << " " << this->getCoord(); } else { ostr << " " << this->getBoundingBox(); } } return ostr.str(); } //////////////////////////////////////// /// @brief Base class for tree-traversal iterators over all nodes template<typename _TreeT, typename RootChildOnIterT> class NodeIteratorBase { public: using TreeT = _TreeT; using RootIterT = RootChildOnIterT; using RootNodeT = typename RootIterT::NodeType; using NCRootNodeT = typename RootIterT::NonConstNodeType; static const Index ROOT_LEVEL = RootNodeT::LEVEL; using InvTreeT = typename iter::InvertedTree<NCRootNodeT, ROOT_LEVEL>::Type; static const Index LEAF_LEVEL = 0, ROOT_DEPTH = 0, LEAF_DEPTH = ROOT_LEVEL; using RootIterTraits = IterTraits<NCRootNodeT, RootIterT>; NodeIteratorBase(); NodeIteratorBase(TreeT&); NodeIteratorBase(const NodeIteratorBase& other); NodeIteratorBase& operator=(const NodeIteratorBase& other); /// Specify the depth of the highest level of the tree to which to ascend (depth 0 = root). void setMinDepth(Index minDepth); /// Return the depth of the highest level of the tree to which this iterator ascends. Index getMinDepth() const { return ROOT_LEVEL - Index(mMaxLevel); } /// Specify the depth of the lowest level of the tree to which to descend (depth 0 = root). void setMaxDepth(Index maxDepth); /// Return the depth of the lowest level of the tree to which this iterator ascends. Index getMaxDepth() const { return ROOT_LEVEL - Index(mMinLevel); } //@{ /// Return @c true if this iterator is not yet exhausted. bool test() const { return !mDone; } operator bool() const { return this->test(); } //@} /// @brief Advance to the next tile or voxel value. /// @return @c true if this iterator is not yet exhausted. bool next(); /// Advance the iterator to the next leaf node. void increment() { this->next(); } NodeIteratorBase& operator++() { this->increment(); return *this; } /// Increment the iterator n times. void increment(Index n) { for (Index i = 0; i < n && this->next(); ++i) {} } /// @brief Return the level in the tree (0 = leaf) of the node to which /// this iterator is currently pointing. Index getLevel() const { return mLevel; } /// @brief Return the depth in the tree (0 = root) of the node to which /// this iterator is currently pointing. Index getDepth() const { return ROOT_LEVEL - mLevel; } static Index getLeafDepth() { return LEAF_DEPTH; } /// @brief Return the global coordinates of the voxel or tile to which /// this iterator is currently pointing. Coord getCoord() const; /// @brief Return in @a bbox the axis-aligned bounding box of /// the voxel or tile to which this iterator is currently pointing. /// @return false if the bounding box is empty. bool getBoundingBox(CoordBBox& bbox) const; /// @brief Return the axis-aligned bounding box of the voxel or tile to which /// this iterator is currently pointing. CoordBBox getBoundingBox() const { CoordBBox b; this->getBoundingBox(b); return b; } //@{ /// @brief Return the node to which the iterator is pointing. /// @note This iterator doesn't have the usual dereference operators (* and ->), /// because they would have to be overloaded by the returned node type. template<typename NodeT> void getNode(NodeT*& node) const { node = nullptr; mIterList.getNode(mLevel, node); } template<typename NodeT> void getNode(const NodeT*& node) const { node = nullptr; mIterList.getNode(mLevel, node); } //@} TreeT* getTree() const { return mTree; } std::string summary() const; private: struct PrevItem { using IterT = RootIterT; }; IterListItem<PrevItem, InvTreeT, /*VecSize=*/ROOT_LEVEL+1, LEAF_LEVEL> mIterList; Index mLevel; int mMinLevel, mMaxLevel; bool mDone; TreeT* mTree; }; // class NodeIteratorBase template<typename TreeT, typename RootChildOnIterT> inline NodeIteratorBase<TreeT, RootChildOnIterT>::NodeIteratorBase(): mIterList(nullptr), mLevel(ROOT_LEVEL), mMinLevel(int(LEAF_LEVEL)), mMaxLevel(int(ROOT_LEVEL)), mDone(true), mTree(nullptr) { } template<typename TreeT, typename RootChildOnIterT> inline NodeIteratorBase<TreeT, RootChildOnIterT>::NodeIteratorBase(TreeT& tree): mIterList(nullptr), mLevel(ROOT_LEVEL), mMinLevel(int(LEAF_LEVEL)), mMaxLevel(int(ROOT_LEVEL)), mDone(false), mTree(&tree) { mIterList.setIter(RootIterTraits::begin(tree.root())); } template<typename TreeT, typename RootChildOnIterT> inline NodeIteratorBase<TreeT, RootChildOnIterT>::NodeIteratorBase(const NodeIteratorBase& other): mIterList(other.mIterList), mLevel(other.mLevel), mMinLevel(other.mMinLevel), mMaxLevel(other.mMaxLevel), mDone(other.mDone), mTree(other.mTree) { mIterList.updateBackPointers(); } template<typename TreeT, typename RootChildOnIterT> inline NodeIteratorBase<TreeT, RootChildOnIterT>& NodeIteratorBase<TreeT, RootChildOnIterT>::operator=(const NodeIteratorBase& other) { if (&other != this) { mLevel = other.mLevel; mMinLevel = other.mMinLevel; mMaxLevel = other.mMaxLevel; mDone = other.mDone; mTree = other.mTree; mIterList = other.mIterList; mIterList.updateBackPointers(); } return *this; } template<typename TreeT, typename RootChildOnIterT> inline void NodeIteratorBase<TreeT, RootChildOnIterT>::setMinDepth(Index minDepth) { mMaxLevel = int(ROOT_LEVEL - minDepth); // level = ROOT_LEVEL - depth if (int(mLevel) > mMaxLevel) this->next(); } template<typename TreeT, typename RootChildOnIterT> inline void NodeIteratorBase<TreeT, RootChildOnIterT>::setMaxDepth(Index maxDepth) { // level = ROOT_LEVEL - depth mMinLevel = int(ROOT_LEVEL - std::min(maxDepth, this->getLeafDepth())); if (int(mLevel) < mMinLevel) this->next(); } template<typename TreeT, typename RootChildOnIterT> inline bool NodeIteratorBase<TreeT, RootChildOnIterT>::next() { do { if (mDone) return false; // If the iterator over the current node points to a child, // descend to the child (depth-first traversal). if (int(mLevel) > mMinLevel && mIterList.test(mLevel)) { if (!mIterList.down(mLevel)) return false; --mLevel; } else { // Ascend to the nearest ancestor that has other children. while (!mIterList.test(mLevel)) { if (mLevel == ROOT_LEVEL) { // Can't ascend higher than the root. mDone = true; return false; } ++mLevel; // ascend one level mIterList.next(mLevel); // advance to the next child, if there is one } // Descend to the child. if (!mIterList.down(mLevel)) return false; --mLevel; } } while (int(mLevel) < mMinLevel || int(mLevel) > mMaxLevel); return true; } template<typename TreeT, typename RootChildOnIterT> inline Coord NodeIteratorBase<TreeT, RootChildOnIterT>::getCoord() const { if (mLevel != ROOT_LEVEL) return mIterList.getCoord(mLevel + 1); RootNodeT* root = nullptr; this->getNode(root); return root ? root->getMinIndex() : Coord::min(); } template<typename TreeT, typename RootChildOnIterT> inline bool NodeIteratorBase<TreeT, RootChildOnIterT>::getBoundingBox(CoordBBox& bbox) const { if (mLevel == ROOT_LEVEL) { RootNodeT* root = nullptr; this->getNode(root); if (root == nullptr) { bbox = CoordBBox(); return false; } root->getIndexRange(bbox); return true; } bbox.min() = mIterList.getCoord(mLevel + 1); bbox.max() = bbox.min().offsetBy(mIterList.getChildDim(mLevel + 1) - 1); return true; } template<typename TreeT, typename RootChildOnIterT> inline std::string NodeIteratorBase<TreeT, RootChildOnIterT>::summary() const { std::ostringstream ostr; for (int lvl = int(ROOT_LEVEL); lvl >= 0 && lvl >= int(mLevel); --lvl) { if (lvl == 0) ostr << "leaf"; else if (lvl == int(ROOT_LEVEL)) ostr << "root"; else ostr << "int" << (ROOT_LEVEL - lvl); ostr << " c" << mIterList.pos(lvl); if (lvl > int(mLevel)) ostr << " / "; } CoordBBox bbox; this->getBoundingBox(bbox); ostr << " " << bbox; return ostr.str(); } //////////////////////////////////////// /// @brief Base class for tree-traversal iterators over all leaf nodes (but not leaf voxels) template<typename TreeT, typename RootChildOnIterT> class LeafIteratorBase { public: using RootIterT = RootChildOnIterT; using RootNodeT = typename RootIterT::NodeType; using NCRootNodeT = typename RootIterT::NonConstNodeType; static const Index ROOT_LEVEL = RootNodeT::LEVEL; using InvTreeT = typename iter::InvertedTree<NCRootNodeT, ROOT_LEVEL>::Type; using NCLeafNodeT = typename InvTreeT::Front; using LeafNodeT = typename CopyConstness<RootNodeT, NCLeafNodeT>::Type; static const Index LEAF_LEVEL = 0, LEAF_PARENT_LEVEL = LEAF_LEVEL + 1; using RootIterTraits = IterTraits<NCRootNodeT, RootIterT>; LeafIteratorBase(): mIterList(nullptr), mTree(nullptr) {} LeafIteratorBase(TreeT& tree): mIterList(nullptr), mTree(&tree) { // Initialize the iterator list with a root node iterator. mIterList.setIter(RootIterTraits::begin(tree.root())); // Descend along the first branch, initializing the node iterator at each level. Index lvl = ROOT_LEVEL; for ( ; lvl > 0 && mIterList.down(lvl); --lvl) {} // If the first branch terminated above the leaf level, backtrack to the next branch. if (lvl > 0) this->next(); } LeafIteratorBase(const LeafIteratorBase& other): mIterList(other.mIterList), mTree(other.mTree) { mIterList.updateBackPointers(); } LeafIteratorBase& operator=(const LeafIteratorBase& other) { if (&other != this) { mTree = other.mTree; mIterList = other.mIterList; mIterList.updateBackPointers(); } return *this; } //@{ /// Return the leaf node to which the iterator is pointing. LeafNodeT* getLeaf() const { LeafNodeT* n = nullptr; mIterList.getNode(LEAF_LEVEL, n); return n; } LeafNodeT& operator*() const { return *this->getLeaf(); } LeafNodeT* operator->() const { return this->getLeaf(); } //@} bool test() const { return mIterList.test(LEAF_PARENT_LEVEL); } operator bool() const { return this->test(); } //@{ /// Advance the iterator to the next leaf node. bool next(); void increment() { this->next(); } LeafIteratorBase& operator++() { this->increment(); return *this; } //@} /// Increment the iterator n times. void increment(Index n) { for (Index i = 0; i < n && this->next(); ++i) {} } TreeT* getTree() const { return mTree; } private: struct PrevItem { using IterT = RootIterT; }; /// @note Even though a LeafIterator doesn't iterate over leaf voxels, /// the first item of this linked list of node iterators is a leaf node iterator, /// whose purpose is only to provide access to its parent leaf node. IterListItem<PrevItem, InvTreeT, /*VecSize=*/ROOT_LEVEL+1, LEAF_LEVEL> mIterList; TreeT* mTree; }; // class LeafIteratorBase template<typename TreeT, typename RootChildOnIterT> inline bool LeafIteratorBase<TreeT, RootChildOnIterT>::next() { // If the iterator is valid for the current node one level above the leaf level, // advance the iterator to the node's next child. if (mIterList.test(LEAF_PARENT_LEVEL) && mIterList.next(LEAF_PARENT_LEVEL)) { mIterList.down(LEAF_PARENT_LEVEL); // initialize the leaf iterator return true; } Index lvl = LEAF_PARENT_LEVEL; while (!mIterList.test(LEAF_PARENT_LEVEL)) { if (mIterList.test(lvl)) { mIterList.next(lvl); } else { do { // Ascend to the nearest level at which // one of the iterators is not yet exhausted. if (lvl == ROOT_LEVEL) return false; ++lvl; if (mIterList.test(lvl)) mIterList.next(lvl); } while (!mIterList.test(lvl)); } // Descend to the lowest child, but not as far as the leaf iterator. while (lvl > LEAF_PARENT_LEVEL && mIterList.down(lvl)) --lvl; } mIterList.down(LEAF_PARENT_LEVEL); // initialize the leaf iterator return true; } //////////////////////////////////////// /// An IteratorRange wraps a tree or node iterator, giving the iterator TBB /// splittable range semantics. template<typename IterT> class IteratorRange { public: IteratorRange(const IterT& iter, size_t grainSize = 8): mIter(iter), mGrainSize(grainSize), mSize(0) { mSize = this->size(); } IteratorRange(IteratorRange& other, tbb::split): mIter(other.mIter), mGrainSize(other.mGrainSize), mSize(other.mSize >> 1) { other.increment(mSize); } /// @brief Return a reference to this range's iterator. /// @note The reference is const, because the iterator should not be /// incremented directly. Use this range object's increment() instead. const IterT& iterator() const { return mIter; } bool empty() const { return mSize == 0 || !mIter.test(); } bool test() const { return !this->empty(); } operator bool() const { return !this->empty(); } /// @brief Return @c true if this range is splittable (i.e., if the iterator /// can be advanced more than mGrainSize times). bool is_divisible() const { return mSize > mGrainSize; } /// Advance the iterator @a n times. void increment(Index n = 1) { for ( ; n > 0 && mSize > 0; --n, --mSize, ++mIter) {} } /// Advance the iterator to the next item. IteratorRange& operator++() { this->increment(); return *this; } /// @brief Advance the iterator to the next item. /// @return @c true if the iterator is not yet exhausted. bool next() { this->increment(); return this->test(); } private: Index size() const { Index n = 0; for (IterT it(mIter); it.test(); ++n, ++it) {} return n; } IterT mIter; size_t mGrainSize; /// @note mSize is only an estimate of the number of times mIter can be incremented /// before it is exhausted (because the topology of the underlying tree could change /// during iteration). For the purpose of range splitting, though, that should be /// sufficient, since the two halves need not be of exactly equal size. Index mSize; }; //////////////////////////////////////// /// @brief Base class for tree-traversal iterators over real and virtual voxel values /// @todo class TreeVoxelIteratorBase; } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_TREEITERATOR_HAS_BEEN_INCLUDED
50,775
C
36.198535
100
0.657115
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyMetadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <boost/python.hpp> #include "openvdb/openvdb.h" namespace py = boost::python; using namespace openvdb::OPENVDB_VERSION_NAME; namespace { class MetadataWrap: public Metadata, public py::wrapper<Metadata> { public: Name typeName() const { return static_cast<const Name&>(this->get_override("typeName")()); } Metadata::Ptr copy() const { return static_cast<const Metadata::Ptr&>(this->get_override("copy")()); } void copy(const Metadata& other) { this->get_override("copy")(other); } std::string str() const {return static_cast<const std::string&>(this->get_override("str")());} bool asBool() const { return static_cast<const bool&>(this->get_override("asBool")()); } Index32 size() const { return static_cast<const Index32&>(this->get_override("size")()); } protected: void readValue(std::istream& is, Index32 numBytes) { this->get_override("readValue")(is, numBytes); } void writeValue(std::ostream& os) const { this->get_override("writeValue")(os); } }; // aliases disambiguate the different versions of copy Metadata::Ptr (MetadataWrap::*copy0)() const = &MetadataWrap::copy; void (MetadataWrap::*copy1)(const Metadata&) = &MetadataWrap::copy; } // end anonymous namespace void exportMetadata(); void exportMetadata() { py::class_<MetadataWrap, boost::noncopyable> clss( /*classname=*/"Metadata", /*docstring=*/ "Class that holds the value of a single item of metadata of a type\n" "for which no Python equivalent exists (typically a custom type)", /*ctor=*/py::no_init // can only be instantiated from C++, not from Python ); clss.def("copy", py::pure_virtual(copy0), "copy() -> Metadata\n\nReturn a copy of this value.") .def("copy", py::pure_virtual(copy1), "copy() -> Metadata\n\nReturn a copy of this value.") .def("type", py::pure_virtual(&Metadata::typeName), "type() -> str\n\nReturn the name of this value's type.") .def("size", py::pure_virtual(&Metadata::size), "size() -> int\n\nReturn the size of this value in bytes.") .def("__nonzero__", py::pure_virtual(&Metadata::asBool)) .def("__str__", py::pure_virtual(&Metadata::str)) ; py::register_ptr_to_python<Metadata::Ptr>(); }
2,422
C++
36.276923
98
0.639141
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyIntGrid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyIntGrid.cc /// @brief Boost.Python wrappers for scalar, integer-valued openvdb::Grid types #include "pyGrid.h" void exportIntGrid(); void exportIntGrid() { pyGrid::exportGrid<BoolGrid>(); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES pyGrid::exportGrid<Int32Grid>(); pyGrid::exportGrid<Int64Grid>(); #endif }
418
C++
18.045454
79
0.717703
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyopenvdb.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyopenvdb.h /// /// @brief Glue functions for access to pyOpenVDB objects from C++ code /// @details Use these functions in your own Python function implementations /// to extract an OpenVDB grid from or wrap a grid in a @c PyObject. /// For example (using Boost.Python), /// @code /// #include <openvdb.h> /// #include <pyopenvdb.h> /// #include <boost/python.hpp> /// /// // Implementation of a Python function that processes pyOpenVDB grids /// boost::python::object /// processGrid(boost::python::object inObj) /// { /// boost::python::object outObj; /// try { /// // Extract an OpenVDB grid from the input argument. /// if (openvdb::GridBase::Ptr grid = /// pyopenvdb::getGridFromPyObject(inObj)) /// { /// grid = grid->deepCopyGrid(); /// /// // Process the grid... /// /// // Wrap the processed grid in a PyObject. /// outObj = pyopenvdb::getPyObjectFromGrid(grid); /// } /// } catch (openvdb::TypeError& e) { /// PyErr_Format(PyExc_TypeError, e.what()); /// boost::python::throw_error_already_set(); /// } /// return outObj; /// } /// /// BOOST_PYTHON_MODULE(mymodule) /// { /// openvdb::initialize(); /// /// // Definition of a Python function that processes pyOpenVDB grids /// boost::python::def(/*name=*/"processGrid", &processGrid, /*argname=*/"grid"); /// } /// @endcode /// Then, from Python, /// @code /// import openvdb /// import mymodule /// /// grid = openvdb.read('myGrid.vdb', 'MyGrid') /// grid = mymodule.processGrid(grid) /// openvdb.write('myProcessedGrid.vdb', [grid]) /// @endcode #ifndef PYOPENVDB_HAS_BEEN_INCLUDED #define PYOPENVDB_HAS_BEEN_INCLUDED #include <boost/python.hpp> #include "openvdb/Grid.h" namespace pyopenvdb { //@{ /// @brief Return a pointer to the OpenVDB grid held by the given Python object. /// @throw openvdb::TypeError if the Python object is not one of the pyOpenVDB grid types. /// (See the Python module's GridTypes global variable for the list of supported grid types.) openvdb::GridBase::Ptr getGridFromPyObject(PyObject*); openvdb::GridBase::Ptr getGridFromPyObject(const boost::python::object&); //@} /// @brief Return a new Python object that holds the given OpenVDB grid. /// @return @c None if the given grid pointer is null. /// @throw openvdb::TypeError if the grid is not of a supported type. /// (See the Python module's GridTypes global variable for the list of supported grid types.) boost::python::object getPyObjectFromGrid(const openvdb::GridBase::Ptr&); } // namespace pyopenvdb #endif // PYOPENVDB_HAS_BEEN_INCLUDED
2,745
C
32.084337
97
0.655738
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyFloatGrid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyFloatGrid.cc /// @author Peter Cucka /// @brief Boost.Python wrappers for scalar, floating-point openvdb::Grid types #include "pyGrid.h" void exportFloatGrid(); /// Create a Python wrapper for each supported Grid type. void exportFloatGrid() { // Add a module-level list that gives the types of all supported Grid classes. py::scope().attr("GridTypes") = py::list(); #if defined(PY_OPENVDB_USE_NUMPY) && !defined(PY_OPENVDB_USE_BOOST_PYTHON_NUMPY) // Specify that py::numeric::array should refer to the Python type numpy.ndarray // (rather than the older Numeric.array). py::numeric::array::set_module_and_type("numpy", "ndarray"); #endif pyGrid::exportGrid<FloatGrid>(); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES pyGrid::exportGrid<DoubleGrid>(); #endif py::def("createLevelSetSphere", &pyGrid::createLevelSetSphere<FloatGrid>, (py::arg("radius"), py::arg("center")=openvdb::Coord(), py::arg("voxelSize")=1.0, py::arg("halfWidth")=openvdb::LEVEL_SET_HALF_WIDTH), "createLevelSetSphere(radius, center, voxelSize, halfWidth) -> FloatGrid\n\n" "Return a grid containing a narrow-band level set representation\n" "of a sphere."); }
1,318
C++
31.974999
89
0.685888
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyAccessor.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_PYACCESSOR_HAS_BEEN_INCLUDED #define OPENVDB_PYACCESSOR_HAS_BEEN_INCLUDED #include <boost/python.hpp> #include "openvdb/openvdb.h" #include "pyutil.h" namespace pyAccessor { namespace py = boost::python; using namespace openvdb::OPENVDB_VERSION_NAME; //@{ /// Type traits for grid accessors template<typename _GridT> struct AccessorTraits { using GridT = _GridT; using NonConstGridT = GridT; using GridPtrT = typename NonConstGridT::Ptr; using AccessorT = typename NonConstGridT::Accessor; using ValueT = typename AccessorT::ValueType; static const bool IsConst = false; static const char* typeName() { return "Accessor"; } static void setActiveState(AccessorT& acc, const Coord& ijk, bool on) { acc.setActiveState(ijk, on); } static void setValueOnly(AccessorT& acc, const Coord& ijk, const ValueT& val) { acc.setValueOnly(ijk, val); } static void setValueOn(AccessorT& acc, const Coord& ijk) { acc.setValueOn(ijk); } static void setValueOn(AccessorT& acc, const Coord& ijk, const ValueT& val) { acc.setValueOn(ijk, val); } static void setValueOff(AccessorT& acc, const Coord& ijk) { acc.setValueOff(ijk); } static void setValueOff(AccessorT& acc, const Coord& ijk, const ValueT& val) { acc.setValueOff(ijk, val); } }; // Partial specialization for const accessors template<typename _GridT> struct AccessorTraits<const _GridT> { using GridT = const _GridT; using NonConstGridT = _GridT; using GridPtrT = typename NonConstGridT::ConstPtr; using AccessorT = typename NonConstGridT::ConstAccessor; using ValueT = typename AccessorT::ValueType; static const bool IsConst = true; static const char* typeName() { return "ConstAccessor"; } static void setActiveState(AccessorT&, const Coord&, bool) { notWritable(); } static void setValueOnly(AccessorT&, const Coord&, const ValueT&) { notWritable(); } static void setValueOn(AccessorT&, const Coord&) { notWritable(); } static void setValueOn(AccessorT&, const Coord&, const ValueT&) { notWritable(); } static void setValueOff(AccessorT&, const Coord&) { notWritable(); } static void setValueOff(AccessorT&, const Coord&, const ValueT&) { notWritable(); } static void notWritable() { PyErr_SetString(PyExc_TypeError, "accessor is read-only"); py::throw_error_already_set(); } }; //@} //////////////////////////////////////// /// Variant of pyutil::extractArg() that extracts a Coord from a py::object /// argument to a given ValueAccessor method template<typename GridT> inline Coord extractCoordArg(py::object obj, const char* functionName, int argIdx = 0) { return pyutil::extractArg<Coord>(obj, functionName, AccessorTraits<GridT>::typeName(), argIdx, "tuple(int, int, int)"); } /// Variant of pyutil::extractArg() that extracts a value of type /// ValueAccessor::ValueType from an argument to a ValueAccessor method template<typename GridT> inline typename GridT::ValueType extractValueArg( py::object obj, const char* functionName, int argIdx = 0, // args are numbered starting from 1 const char* expectedType = nullptr) { return pyutil::extractArg<typename GridT::ValueType>( obj, functionName, AccessorTraits<GridT>::typeName(), argIdx, expectedType); } //////////////////////////////////////// /// @brief ValueAccessor wrapper class that also stores a grid pointer, /// so that the grid doesn't get deleted as long as the accessor is live /// /// @internal This class could have just been made to inherit from ValueAccessor, /// but the method wrappers allow for more Pythonic error messages. For example, /// if we constructed the Python getValue() method directly from the corresponding /// ValueAccessor method, as follows, /// /// .def("getValue", &Accessor::getValue, ...) /// /// then the conversion from a Python type to a Coord& would be done /// automatically. But if the Python method were called with an object of /// a type that is not convertible to a Coord, then the TypeError message /// would say something like "TypeError: No registered converter was able to /// produce a C++ rvalue of type openvdb::math::Coord...". /// Handling the type conversion manually is more work, but it allows us to /// instead generate messages like "TypeError: expected tuple(int, int, int), /// found str as argument to FloatGridAccessor.getValue()". template<typename _GridType> class AccessorWrap { public: using Traits = AccessorTraits<_GridType>; using Accessor = typename Traits::AccessorT; using ValueType = typename Traits::ValueT; using GridType = typename Traits::NonConstGridT; using GridPtrType = typename Traits::GridPtrT; AccessorWrap(GridPtrType grid): mGrid(grid), mAccessor(grid->getAccessor()) {} AccessorWrap copy() const { return *this; } void clear() { mAccessor.clear(); } GridPtrType parent() const { return mGrid; } ValueType getValue(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "getValue"); return mAccessor.getValue(ijk); } int getValueDepth(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "getValueDepth"); return mAccessor.getValueDepth(ijk); } int isVoxel(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "isVoxel"); return mAccessor.isVoxel(ijk); } py::tuple probeValue(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "probeValue"); ValueType value; bool on = mAccessor.probeValue(ijk, value); return py::make_tuple(value, on); } bool isValueOn(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "isValueOn"); return mAccessor.isValueOn(ijk); } void setActiveState(py::object coordObj, bool on) { const Coord ijk = extractCoordArg<GridType>(coordObj, "setActiveState", /*argIdx=*/1); Traits::setActiveState(mAccessor, ijk, on); } void setValueOnly(py::object coordObj, py::object valObj) { Coord ijk = extractCoordArg<GridType>(coordObj, "setValueOnly", 1); ValueType val = extractValueArg<GridType>(valObj, "setValueOnly", 2); Traits::setValueOnly(mAccessor, ijk, val); } void setValueOn(py::object coordObj, py::object valObj) { Coord ijk = extractCoordArg<GridType>(coordObj, "setValueOn", 1); if (valObj.is_none()) { Traits::setValueOn(mAccessor, ijk); } else { ValueType val = extractValueArg<GridType>(valObj, "setValueOn", 2); Traits::setValueOn(mAccessor, ijk, val); } } void setValueOff(py::object coordObj, py::object valObj) { Coord ijk = extractCoordArg<GridType>(coordObj, "setValueOff", 1); if (valObj.is_none()) { Traits::setValueOff(mAccessor, ijk); } else { ValueType val = extractValueArg<GridType>(valObj, "setValueOff", 2); Traits::setValueOff(mAccessor, ijk, val); } } int isCached(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "isCached"); return mAccessor.isCached(ijk); } /// @brief Define a Python wrapper class for this C++ class. static void wrap() { const std::string pyGridTypeName = pyutil::GridTraits<GridType>::name(), pyValueTypeName = openvdb::typeNameAsString<typename GridType::ValueType>(), pyAccessorTypeName = Traits::typeName(); py::class_<AccessorWrap> clss( pyAccessorTypeName.c_str(), (std::string(Traits::IsConst ? "Read-only" : "Read/write") + " access by (i, j, k) index coordinates to the voxels\nof a " + pyGridTypeName).c_str(), py::no_init); clss.def("copy", &AccessorWrap::copy, ("copy() -> " + pyAccessorTypeName + "\n\n" "Return a copy of this accessor.").c_str()) .def("clear", &AccessorWrap::clear, "clear()\n\n" "Clear this accessor of all cached data.") .add_property("parent", &AccessorWrap::parent, ("this accessor's parent " + pyGridTypeName).c_str()) // // Voxel access // .def("getValue", &AccessorWrap::getValue, py::arg("ijk"), ("getValue(ijk) -> " + pyValueTypeName + "\n\n" "Return the value of the voxel at coordinates (i, j, k).").c_str()) .def("getValueDepth", &AccessorWrap::getValueDepth, py::arg("ijk"), "getValueDepth(ijk) -> int\n\n" "Return the tree depth (0 = root) at which the value of voxel\n" "(i, j, k) resides. If (i, j, k) isn't explicitly represented in\n" "the tree (i.e., it is implicitly a background voxel), return -1.") .def("isVoxel", &AccessorWrap::isVoxel, py::arg("ijk"), "isVoxel(ijk) -> bool\n\n" "Return True if voxel (i, j, k) resides at the leaf level of the tree.") .def("probeValue", &AccessorWrap::probeValue, py::arg("ijk"), "probeValue(ijk) -> value, bool\n\n" "Return the value of the voxel at coordinates (i, j, k)\n" "together with the voxel's active state.") .def("isValueOn", &AccessorWrap::isValueOn, py::arg("ijk"), "isValueOn(ijk) -> bool\n\n" "Return the active state of the voxel at coordinates (i, j, k).") .def("setActiveState", &AccessorWrap::setActiveState, (py::arg("ijk"), py::arg("on")), "setActiveState(ijk, on)\n\n" "Mark voxel (i, j, k) as either active or inactive (True or False),\n" "but don't change its value.") .def("setValueOnly", &AccessorWrap::setValueOnly, (py::arg("ijk"), py::arg("value")), "setValueOnly(ijk, value)\n\n" "Set the value of voxel (i, j, k), but don't change its active state.") .def("setValueOn", &AccessorWrap::setValueOn, (py::arg("ijk"), py::arg("value") = py::object()), "setValueOn(ijk, value=None)\n\n" "Mark voxel (i, j, k) as active and, if the given value\n" "is not None, set the voxel's value.\n") .def("setValueOff", &AccessorWrap::setValueOff, (py::arg("ijk"), py::arg("value") = py::object()), "setValueOff(ijk, value=None)\n\n" "Mark voxel (i, j, k) as inactive and, if the given value\n" "is not None, set the voxel's value.") .def("isCached", &AccessorWrap::isCached, py::arg("ijk"), "isCached(ijk) -> bool\n\n" "Return True if this accessor has cached the path to voxel (i, j, k).") ; // py::class_<ValueAccessor> } private: const GridPtrType mGrid; Accessor mAccessor; }; // class AccessorWrap } // namespace pyAccessor #endif // OPENVDB_PYACCESSOR_HAS_BEEN_INCLUDED
11,521
C
35.811501
94
0.618002
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyOpenVDBModule.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <iostream> // must be included before python on macos #include <cstring> // for strncmp(), strrchr(), etc. #include <limits> #include <string> #include <utility> // for std::make_pair() #include <boost/python.hpp> #include <boost/python/stl_iterator.hpp> #include <boost/python/exception_translator.hpp> #include "openvdb/openvdb.h" #include "pyopenvdb.h" #include "pyGrid.h" #include "pyutil.h" namespace py = boost::python; // Forward declarations void exportTransform(); void exportMetadata(); void exportFloatGrid(); void exportIntGrid(); void exportVec3Grid(); void exportPointGrid(); namespace _openvdbmodule { using namespace openvdb; /// Helper class to convert between a Python numeric sequence /// (tuple, list, etc.) and an openvdb::Coord struct CoordConverter { /// @return a Python tuple object equivalent to the given Coord. static PyObject* convert(const openvdb::Coord& xyz) { py::object obj = py::make_tuple(xyz[0], xyz[1], xyz[2]); Py_INCREF(obj.ptr()); ///< @todo is this the right way to ensure that the object ///< doesn't get freed on exit? return obj.ptr(); } /// @return nullptr if the given Python object is not convertible to a Coord. static void* convertible(PyObject* obj) { if (!PySequence_Check(obj)) return nullptr; // not a Python sequence Py_ssize_t len = PySequence_Length(obj); if (len != 3 && len != 1) return nullptr; // not the right length return obj; } /// Convert from a Python object to a Coord. static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a Coord in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<openvdb::Coord>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) openvdb::Coord; // placement new data->convertible = storage; openvdb::Coord* xyz = static_cast<openvdb::Coord*>(storage); // Populate the Coord. switch (PySequence_Length(obj)) { case 1: xyz->reset(pyutil::getSequenceItem<openvdb::Int32>(obj, 0)); break; case 3: xyz->reset( pyutil::getSequenceItem<openvdb::Int32>(obj, 0), pyutil::getSequenceItem<openvdb::Int32>(obj, 1), pyutil::getSequenceItem<openvdb::Int32>(obj, 2)); break; default: PyErr_Format(PyExc_ValueError, "expected a sequence of three integers"); py::throw_error_already_set(); break; } } /// Register both the Coord-to-tuple and the sequence-to-Coord converters. static void registerConverter() { py::to_python_converter<openvdb::Coord, CoordConverter>(); py::converter::registry::push_back( &CoordConverter::convertible, &CoordConverter::construct, py::type_id<openvdb::Coord>()); } }; // struct CoordConverter /// @todo CoordBBoxConverter? //////////////////////////////////////// /// Helper class to convert between a Python numeric sequence /// (tuple, list, etc.) and an openvdb::Vec template<typename VecT> struct VecConverter { static PyObject* convert(const VecT& v) { py::object obj; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN switch (VecT::size) { // compile-time constant case 2: obj = py::make_tuple(v[0], v[1]); break; case 3: obj = py::make_tuple(v[0], v[1], v[2]); break; case 4: obj = py::make_tuple(v[0], v[1], v[2], v[3]); break; default: { py::list lst; for (int n = 0; n < VecT::size; ++n) lst.append(v[n]); obj = lst; } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END Py_INCREF(obj.ptr()); return obj.ptr(); } static void* convertible(PyObject* obj) { if (!PySequence_Check(obj)) return nullptr; // not a Python sequence Py_ssize_t len = PySequence_Length(obj); if (len != VecT::size) return nullptr; // Check that all elements of the Python sequence are convertible // to the Vec's value type. py::object seq = pyutil::pyBorrow(obj); for (int i = 0; i < VecT::size; ++i) { if (!py::extract<typename VecT::value_type>(seq[i]).check()) { return nullptr; } } return obj; } static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a Vec in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<VecT>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) VecT; // placement new data->convertible = storage; VecT* v = static_cast<VecT*>(storage); // Populate the vector. for (int n = 0; n < VecT::size; ++n) { (*v)[n] = pyutil::getSequenceItem<typename VecT::value_type>(obj, n); } } static void registerConverter() { py::to_python_converter<VecT, VecConverter<VecT> >(); py::converter::registry::push_back( &VecConverter<VecT>::convertible, &VecConverter<VecT>::construct, py::type_id<VecT>()); } }; // struct VecConverter //////////////////////////////////////// /// Helper class to convert between a 2D Python numeric sequence /// (tuple, list, etc.) and an openvdb::Mat template<typename MatT> struct MatConverter { /// Return the given matrix as a Python list of lists. static py::object toList(const MatT& m) { py::list obj; for (int i = 0; i < MatT::size; ++i) { py::list rowObj; for (int j = 0; j < MatT::size; ++j) { rowObj.append(m(i, j)); } obj.append(rowObj); } return std::move(obj); } /// Extract a matrix from a Python sequence of numeric sequences. static MatT fromSeq(py::object obj) { MatT m = MatT::zero(); if (py::len(obj) == MatT::size) { for (int i = 0; i < MatT::size; ++i) { py::object rowObj = obj[i]; if (py::len(rowObj) != MatT::size) return MatT::zero(); for (int j = 0; j < MatT::size; ++j) { m(i, j) = py::extract<typename MatT::value_type>(rowObj[j]); } } } return m; } static PyObject* convert(const MatT& m) { py::object obj = toList(m); Py_INCREF(obj.ptr()); return obj.ptr(); } static void* convertible(PyObject* obj) { if (!PySequence_Check(obj)) return nullptr; // not a Python sequence Py_ssize_t len = PySequence_Length(obj); if (len != MatT::size) return nullptr; py::object seq = pyutil::pyBorrow(obj); for (int i = 0; i < MatT::size; ++i) { py::object rowObj = seq[i]; if (py::len(rowObj) != MatT::size) return nullptr; // Check that all elements of the Python sequence are convertible // to the Mat's value type. for (int j = 0; j < MatT::size; ++j) { if (!py::extract<typename MatT::value_type>(rowObj[j]).check()) { return nullptr; } } } return obj; } static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a Mat in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<MatT>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) MatT; // placement new data->convertible = storage; *(static_cast<MatT*>(storage)) = fromSeq(pyutil::pyBorrow(obj)); } static void registerConverter() { py::to_python_converter<MatT, MatConverter<MatT> >(); py::converter::registry::push_back( &MatConverter<MatT>::convertible, &MatConverter<MatT>::construct, py::type_id<MatT>()); } }; // struct MatConverter //////////////////////////////////////// /// Helper class to convert between a Python integer and a openvdb::PointIndex template <typename PointIndexT> struct PointIndexConverter { using IntType = typename PointIndexT::IntType; /// @return a Python integer object equivalent to the given PointIndex. static PyObject* convert(const PointIndexT& index) { py::object obj(static_cast<IntType>(index)); Py_INCREF(obj.ptr()); return obj.ptr(); } /// @return nullptr if the given Python object is not convertible to the PointIndex. static void* convertible(PyObject* obj) { #if PY_MAJOR_VERSION >= 3 if (!PyLong_Check(obj)) return nullptr; // not a Python integer #else if (!PyInt_Check(obj)) return nullptr; // not a Python integer #endif return obj; } /// Convert from a Python object to a PointIndex. static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a PointIndex in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<PointIndexT>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) PointIndexT; // placement new data->convertible = storage; // Extract the PointIndex from the python integer PointIndexT* index = static_cast<PointIndexT*>(storage); #if PY_MAJOR_VERSION >= 3 *index = static_cast<IntType>(PyLong_AsLong(obj)); #else *index = static_cast<IntType>(PyInt_AsLong(obj)); #endif } /// Register both the PointIndex-to-integer and the integer-to-PointIndex converters. static void registerConverter() { py::to_python_converter<PointIndexT, PointIndexConverter>(); py::converter::registry::push_back( &PointIndexConverter::convertible, &PointIndexConverter::construct, py::type_id<PointIndexT>()); } }; // struct PointIndexConverter //////////////////////////////////////// /// Helper class to convert between a Python dict and an openvdb::MetaMap /// @todo Consider implementing a separate, templated converter for /// the various Metadata types. struct MetaMapConverter { static PyObject* convert(const MetaMap& metaMap) { py::dict ret; for (MetaMap::ConstMetaIterator it = metaMap.beginMeta(); it != metaMap.endMeta(); ++it) { if (Metadata::Ptr meta = it->second) { py::object obj(meta); const std::string typeName = meta->typeName(); if (typeName == StringMetadata::staticTypeName()) { obj = py::str(static_cast<StringMetadata&>(*meta).value()); } else if (typeName == DoubleMetadata::staticTypeName()) { obj = py::object(static_cast<DoubleMetadata&>(*meta).value()); } else if (typeName == FloatMetadata::staticTypeName()) { obj = py::object(static_cast<FloatMetadata&>(*meta).value()); } else if (typeName == Int32Metadata::staticTypeName()) { obj = py::object(static_cast<Int32Metadata&>(*meta).value()); } else if (typeName == Int64Metadata::staticTypeName()) { obj = py::object(static_cast<Int64Metadata&>(*meta).value()); } else if (typeName == BoolMetadata::staticTypeName()) { obj = py::object(static_cast<BoolMetadata&>(*meta).value()); } else if (typeName == Vec2DMetadata::staticTypeName()) { const Vec2d v = static_cast<Vec2DMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1]); } else if (typeName == Vec2IMetadata::staticTypeName()) { const Vec2i v = static_cast<Vec2IMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1]); } else if (typeName == Vec2SMetadata::staticTypeName()) { const Vec2s v = static_cast<Vec2SMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1]); } else if (typeName == Vec3DMetadata::staticTypeName()) { const Vec3d v = static_cast<Vec3DMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2]); } else if (typeName == Vec3IMetadata::staticTypeName()) { const Vec3i v = static_cast<Vec3IMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2]); } else if (typeName == Vec3SMetadata::staticTypeName()) { const Vec3s v = static_cast<Vec3SMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2]); } else if (typeName == Vec4DMetadata::staticTypeName()) { const Vec4d v = static_cast<Vec4DMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2], v[3]); } else if (typeName == Vec4IMetadata::staticTypeName()) { const Vec4i v = static_cast<Vec4IMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2], v[3]); } else if (typeName == Vec4SMetadata::staticTypeName()) { const Vec4s v = static_cast<Vec4SMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2], v[3]); } else if (typeName == Mat4SMetadata::staticTypeName()) { const Mat4s m = static_cast<Mat4SMetadata&>(*meta).value(); obj = MatConverter<Mat4s>::toList(m); } else if (typeName == Mat4DMetadata::staticTypeName()) { const Mat4d m = static_cast<Mat4DMetadata&>(*meta).value(); obj = MatConverter<Mat4d>::toList(m); } ret[it->first] = obj; } } Py_INCREF(ret.ptr()); return ret.ptr(); } static void* convertible(PyObject* obj) { return (PyMapping_Check(obj) ? obj : nullptr); } static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a MetaMap in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<MetaMap>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) MetaMap; // placement new data->convertible = storage; MetaMap* metaMap = static_cast<MetaMap*>(storage); // Populate the map. py::dict pyDict(pyutil::pyBorrow(obj)); py::list keys = pyDict.keys(); for (size_t i = 0, N = py::len(keys); i < N; ++i) { std::string name; py::object key = keys[i]; if (py::extract<std::string>(key).check()) { name = py::extract<std::string>(key); } else { const std::string keyAsStr = py::extract<std::string>(key.attr("__str__")()), keyType = pyutil::className(key); PyErr_Format(PyExc_TypeError, "expected string as metadata name, found object" " \"%s\" of type %s", keyAsStr.c_str(), keyType.c_str()); py::throw_error_already_set(); } // Note: the order of the following tests is significant, as it // avoids unnecessary type promotion (e.g., of ints to floats). py::object val = pyDict[keys[i]]; Metadata::Ptr value; if (py::extract<std::string>(val).check()) { value.reset(new StringMetadata(py::extract<std::string>(val))); } else if (bool(PyBool_Check(val.ptr()))) { value.reset(new BoolMetadata(py::extract<bool>(val))); } else if (py::extract<Int64>(val).check()) { const Int64 n = py::extract<Int64>(val); if (n <= std::numeric_limits<Int32>::max() && n >= std::numeric_limits<Int32>::min()) { value.reset(new Int32Metadata(static_cast<Int32>(n))); } else { value.reset(new Int64Metadata(n)); } //} else if (py::extract<float>(val).check()) { // value.reset(new FloatMetadata(py::extract<float>(val))); } else if (py::extract<double>(val).check()) { value.reset(new DoubleMetadata(py::extract<double>(val))); } else if (py::extract<Vec2i>(val).check()) { value.reset(new Vec2IMetadata(py::extract<Vec2i>(val))); } else if (py::extract<Vec2d>(val).check()) { value.reset(new Vec2DMetadata(py::extract<Vec2d>(val))); } else if (py::extract<Vec2s>(val).check()) { value.reset(new Vec2SMetadata(py::extract<Vec2s>(val))); } else if (py::extract<Vec3i>(val).check()) { value.reset(new Vec3IMetadata(py::extract<Vec3i>(val))); } else if (py::extract<Vec3d>(val).check()) { value.reset(new Vec3DMetadata(py::extract<Vec3d>(val))); } else if (py::extract<Vec3s>(val).check()) { value.reset(new Vec3SMetadata(py::extract<Vec3s>(val))); } else if (py::extract<Vec4i>(val).check()) { value.reset(new Vec4IMetadata(py::extract<Vec4i>(val))); } else if (py::extract<Vec4d>(val).check()) { value.reset(new Vec4DMetadata(py::extract<Vec4d>(val))); } else if (py::extract<Vec4s>(val).check()) { value.reset(new Vec4SMetadata(py::extract<Vec4s>(val))); } else if (py::extract<Mat4d>(val).check()) { value.reset(new Mat4DMetadata(py::extract<Mat4d>(val))); } else if (py::extract<Mat4s>(val).check()) { value.reset(new Mat4SMetadata(py::extract<Mat4s>(val))); } else if (py::extract<Metadata::Ptr>(val).check()) { value = py::extract<Metadata::Ptr>(val); } else { const std::string valAsStr = py::extract<std::string>(val.attr("__str__")()), valType = pyutil::className(val); PyErr_Format(PyExc_TypeError, "metadata value \"%s\" of type %s is not allowed", valAsStr.c_str(), valType.c_str()); py::throw_error_already_set(); } if (value) metaMap->insertMeta(name, *value); } } static void registerConverter() { py::to_python_converter<MetaMap, MetaMapConverter>(); py::converter::registry::push_back( &MetaMapConverter::convertible, &MetaMapConverter::construct, py::type_id<MetaMap>()); } }; // struct MetaMapConverter //////////////////////////////////////// template<typename T> void translateException(const T&) {} /// @brief Define a function that translates an OpenVDB exception into /// the equivalent Python exception. /// @details openvdb::Exception::what() typically returns a string of the form /// "<exception>: <description>". To avoid duplication of the exception name in Python /// stack traces, the function strips off the "<exception>: " prefix. To do that, /// it needs the class name in the form of a string, hence the preprocessor macro. #define PYOPENVDB_CATCH(_openvdbname, _pyname) \ template<> \ void translateException<_openvdbname>(const _openvdbname& e) \ { \ const char* name = #_openvdbname; \ if (const char* c = std::strrchr(name, ':')) name = c + 1; \ const int namelen = int(std::strlen(name)); \ const char* msg = e.what(); \ if (0 == std::strncmp(msg, name, namelen)) msg += namelen; \ if (0 == std::strncmp(msg, ": ", 2)) msg += 2; \ PyErr_SetString(_pyname, msg); \ } /// Define an overloaded function that translate all OpenVDB exceptions into /// their Python equivalents. /// @todo LookupError is redundant and should someday be removed. PYOPENVDB_CATCH(openvdb::ArithmeticError, PyExc_ArithmeticError) PYOPENVDB_CATCH(openvdb::IndexError, PyExc_IndexError) PYOPENVDB_CATCH(openvdb::IoError, PyExc_IOError) PYOPENVDB_CATCH(openvdb::KeyError, PyExc_KeyError) PYOPENVDB_CATCH(openvdb::LookupError, PyExc_LookupError) PYOPENVDB_CATCH(openvdb::NotImplementedError, PyExc_NotImplementedError) PYOPENVDB_CATCH(openvdb::ReferenceError, PyExc_ReferenceError) PYOPENVDB_CATCH(openvdb::RuntimeError, PyExc_RuntimeError) PYOPENVDB_CATCH(openvdb::TypeError, PyExc_TypeError) PYOPENVDB_CATCH(openvdb::ValueError, PyExc_ValueError) #undef PYOPENVDB_CATCH //////////////////////////////////////// py::object readFromFile(const std::string&, const std::string&); py::tuple readAllFromFile(const std::string&); py::dict readFileMetadata(const std::string&); py::object readGridMetadataFromFile(const std::string&, const std::string&); py::list readAllGridMetadataFromFile(const std::string&); void writeToFile(const std::string&, py::object, py::object); py::object readFromFile(const std::string& filename, const std::string& gridName) { io::File vdbFile(filename); vdbFile.open(); if (!vdbFile.hasGrid(gridName)) { PyErr_Format(PyExc_KeyError, "file %s has no grid named \"%s\"", filename.c_str(), gridName.c_str()); py::throw_error_already_set(); } return pyGrid::getGridFromGridBase(vdbFile.readGrid(gridName)); } py::tuple readAllFromFile(const std::string& filename) { io::File vdbFile(filename); vdbFile.open(); GridPtrVecPtr grids = vdbFile.getGrids(); MetaMap::Ptr metadata = vdbFile.getMetadata(); vdbFile.close(); py::list gridList; for (GridPtrVec::const_iterator it = grids->begin(); it != grids->end(); ++it) { gridList.append(pyGrid::getGridFromGridBase(*it)); } return py::make_tuple(gridList, py::dict(*metadata)); } py::dict readFileMetadata(const std::string& filename) { io::File vdbFile(filename); vdbFile.open(); MetaMap::Ptr metadata = vdbFile.getMetadata(); vdbFile.close(); return py::dict(*metadata); } py::object readGridMetadataFromFile(const std::string& filename, const std::string& gridName) { io::File vdbFile(filename); vdbFile.open(); if (!vdbFile.hasGrid(gridName)) { PyErr_Format(PyExc_KeyError, "file %s has no grid named \"%s\"", filename.c_str(), gridName.c_str()); py::throw_error_already_set(); } return pyGrid::getGridFromGridBase(vdbFile.readGridMetadata(gridName)); } py::list readAllGridMetadataFromFile(const std::string& filename) { io::File vdbFile(filename); vdbFile.open(); GridPtrVecPtr grids = vdbFile.readAllGridMetadata(); vdbFile.close(); py::list gridList; for (GridPtrVec::const_iterator it = grids->begin(); it != grids->end(); ++it) { gridList.append(pyGrid::getGridFromGridBase(*it)); } return gridList; } void writeToFile(const std::string& filename, py::object gridOrSeqObj, py::object dictObj) { GridPtrVec gridVec; try { GridBase::Ptr base = pyopenvdb::getGridFromPyObject(gridOrSeqObj); gridVec.push_back(base); } catch (openvdb::TypeError&) { for (py::stl_input_iterator<py::object> it(gridOrSeqObj), end; it != end; ++it) { if (GridBase::Ptr base = pyGrid::getGridBaseFromGrid(*it)) { gridVec.push_back(base); } } } io::File vdbFile(filename); if (dictObj.is_none()) { vdbFile.write(gridVec); } else { MetaMap metadata = py::extract<MetaMap>(dictObj); vdbFile.write(gridVec, metadata); } vdbFile.close(); } //////////////////////////////////////// std::string getLoggingLevel(); void setLoggingLevel(py::object); void setProgramName(py::object, bool); std::string getLoggingLevel() { switch (logging::getLevel()) { case logging::Level::Debug: return "debug"; case logging::Level::Info: return "info"; case logging::Level::Warn: return "warn"; case logging::Level::Error: return "error"; case logging::Level::Fatal: break; } return "fatal"; } void setLoggingLevel(py::object pyLevelObj) { std::string levelStr; if (!py::extract<py::str>(pyLevelObj).check()) { levelStr = py::extract<std::string>(pyLevelObj.attr("__str__")()); } else { const py::str pyLevelStr = py::extract<py::str>(pyLevelObj.attr("lower")().attr("lstrip")("-")); levelStr = py::extract<std::string>(pyLevelStr); if (levelStr == "debug") { logging::setLevel(logging::Level::Debug); return; } else if (levelStr == "info") { logging::setLevel(logging::Level::Info); return; } else if (levelStr == "warn") { logging::setLevel(logging::Level::Warn); return; } else if (levelStr == "error") { logging::setLevel(logging::Level::Error); return; } else if (levelStr == "fatal") { logging::setLevel(logging::Level::Fatal); return; } } PyErr_Format(PyExc_ValueError, "expected logging level \"debug\", \"info\", \"warn\", \"error\", or \"fatal\"," " got \"%s\"", levelStr.c_str()); py::throw_error_already_set(); } void setProgramName(py::object nameObj, bool color) { if (py::extract<std::string>(nameObj).check()) { logging::setProgramName(py::extract<std::string>(nameObj), color); } else { const std::string str = py::extract<std::string>(nameObj.attr("__str__")()), typ = pyutil::className(nameObj).c_str(); PyErr_Format(PyExc_TypeError, "expected string as program name, got \"%s\" of type %s", str.c_str(), typ.c_str()); py::throw_error_already_set(); } } //////////////////////////////////////// // Descriptor for the openvdb::GridClass enum (for use with pyutil::StringEnum) struct GridClassDescr { static const char* name() { return "GridClass"; } static const char* doc() { return "Classes of volumetric data (level set, fog volume, etc.)"; } static pyutil::CStringPair item(int i) { static const int sCount = 4; static const char* const sStrings[sCount][2] = { { "UNKNOWN", strdup(GridBase::gridClassToString(GRID_UNKNOWN).c_str()) }, { "LEVEL_SET", strdup(GridBase::gridClassToString(GRID_LEVEL_SET).c_str()) }, { "FOG_VOLUME", strdup(GridBase::gridClassToString(GRID_FOG_VOLUME).c_str()) }, { "STAGGERED", strdup(GridBase::gridClassToString(GRID_STAGGERED).c_str()) } }; if (i >= 0 && i < sCount) return pyutil::CStringPair(&sStrings[i][0], &sStrings[i][1]); return pyutil::CStringPair(static_cast<char**>(nullptr), static_cast<char**>(nullptr)); } }; // Descriptor for the openvdb::VecType enum (for use with pyutil::StringEnum) struct VecTypeDescr { static const char* name() { return "VectorType"; } static const char* doc() { return "The type of a vector determines how transforms are applied to it.\n" " - INVARIANT:\n" " does not transform (e.g., tuple, uvw, color)\n" " - COVARIANT:\n" " apply inverse-transpose transformation with w = 0\n" " and ignore translation (e.g., gradient/normal)\n" " - COVARIANT_NORMALIZE:\n" " apply inverse-transpose transformation with w = 0\n" " and ignore translation, vectors are renormalized\n" " (e.g., unit normal)\n" " - CONTRAVARIANT_RELATIVE:\n" " apply \"regular\" transformation with w = 0 and ignore\n" " translation (e.g., displacement, velocity, acceleration)\n" " - CONTRAVARIANT_ABSOLUTE:\n" " apply \"regular\" transformation with w = 1 so that\n" " vector translates (e.g., position)\n"; } static pyutil::CStringPair item(int i) { static const int sCount = 5; static const char* const sStrings[sCount][2] = { { "INVARIANT", strdup(GridBase::vecTypeToString(openvdb::VEC_INVARIANT).c_str()) }, { "COVARIANT", strdup(GridBase::vecTypeToString(openvdb::VEC_COVARIANT).c_str()) }, { "COVARIANT_NORMALIZE", strdup(GridBase::vecTypeToString(openvdb::VEC_COVARIANT_NORMALIZE).c_str()) }, { "CONTRAVARIANT_RELATIVE", strdup(GridBase::vecTypeToString(openvdb::VEC_CONTRAVARIANT_RELATIVE).c_str()) }, { "CONTRAVARIANT_ABSOLUTE", strdup(GridBase::vecTypeToString(openvdb::VEC_CONTRAVARIANT_ABSOLUTE).c_str()) } }; if (i >= 0 && i < sCount) return std::make_pair(&sStrings[i][0], &sStrings[i][1]); return pyutil::CStringPair(static_cast<char**>(nullptr), static_cast<char**>(nullptr)); } }; } // namespace _openvdbmodule //////////////////////////////////////// #ifdef DWA_OPENVDB #define PY_OPENVDB_MODULE_NAME _openvdb extern "C" { void init_openvdb(); } #else #define PY_OPENVDB_MODULE_NAME pyopenvdb extern "C" { void initpyopenvdb(); } #endif BOOST_PYTHON_MODULE(PY_OPENVDB_MODULE_NAME) { // Don't auto-generate ugly, C++-style function signatures. py::docstring_options docOptions; docOptions.disable_signatures(); docOptions.enable_user_defined(); #ifdef PY_OPENVDB_USE_NUMPY // Initialize NumPy. #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY boost::python::numpy::initialize(); #else #if PY_MAJOR_VERSION >= 3 if (_import_array()) {} #else import_array(); #endif #endif #endif using namespace openvdb::OPENVDB_VERSION_NAME; // Initialize OpenVDB. initialize(); _openvdbmodule::CoordConverter::registerConverter(); _openvdbmodule::VecConverter<Vec2i>::registerConverter(); _openvdbmodule::VecConverter<Vec2I>::registerConverter(); _openvdbmodule::VecConverter<Vec2s>::registerConverter(); _openvdbmodule::VecConverter<Vec2d>::registerConverter(); _openvdbmodule::VecConverter<Vec3i>::registerConverter(); _openvdbmodule::VecConverter<Vec3I>::registerConverter(); _openvdbmodule::VecConverter<Vec3s>::registerConverter(); _openvdbmodule::VecConverter<Vec3d>::registerConverter(); _openvdbmodule::VecConverter<Vec4i>::registerConverter(); _openvdbmodule::VecConverter<Vec4I>::registerConverter(); _openvdbmodule::VecConverter<Vec4s>::registerConverter(); _openvdbmodule::VecConverter<Vec4d>::registerConverter(); _openvdbmodule::MatConverter<Mat4s>::registerConverter(); _openvdbmodule::MatConverter<Mat4d>::registerConverter(); _openvdbmodule::PointIndexConverter<PointDataIndex32>::registerConverter(); _openvdbmodule::MetaMapConverter::registerConverter(); #define PYOPENVDB_TRANSLATE_EXCEPTION(_classname) \ py::register_exception_translator<_classname>(&_openvdbmodule::translateException<_classname>) PYOPENVDB_TRANSLATE_EXCEPTION(ArithmeticError); PYOPENVDB_TRANSLATE_EXCEPTION(IndexError); PYOPENVDB_TRANSLATE_EXCEPTION(IoError); PYOPENVDB_TRANSLATE_EXCEPTION(KeyError); PYOPENVDB_TRANSLATE_EXCEPTION(LookupError); PYOPENVDB_TRANSLATE_EXCEPTION(NotImplementedError); PYOPENVDB_TRANSLATE_EXCEPTION(ReferenceError); PYOPENVDB_TRANSLATE_EXCEPTION(RuntimeError); PYOPENVDB_TRANSLATE_EXCEPTION(TypeError); PYOPENVDB_TRANSLATE_EXCEPTION(ValueError); #undef PYOPENVDB_TRANSLATE_EXCEPTION // Export the python bindings. exportTransform(); exportMetadata(); exportFloatGrid(); exportIntGrid(); exportVec3Grid(); exportPointGrid(); py::def("read", &_openvdbmodule::readFromFile, (py::arg("filename"), py::arg("gridname")), "read(filename, gridname) -> Grid\n\n" "Read a single grid from a .vdb file."); py::def("readAll", &_openvdbmodule::readAllFromFile, py::arg("filename"), "readAll(filename) -> list, dict\n\n" "Read a .vdb file and return a list of grids and\n" "a dict of file-level metadata."); py::def("readMetadata", &_openvdbmodule::readFileMetadata, py::arg("filename"), "readMetadata(filename) -> dict\n\n" "Read file-level metadata from a .vdb file."); py::def("readGridMetadata", &_openvdbmodule::readGridMetadataFromFile, (py::arg("filename"), py::arg("gridname")), "readGridMetadata(filename, gridname) -> Grid\n\n" "Read a single grid's metadata and transform (but not its tree)\n" "from a .vdb file."); py::def("readAllGridMetadata", &_openvdbmodule::readAllGridMetadataFromFile, py::arg("filename"), "readAllGridMetadata(filename) -> list\n\n" "Read a .vdb file and return a list of grids populated with\n" "their metadata and transforms, but not their trees."); py::def("write", &_openvdbmodule::writeToFile, (py::arg("filename"), py::arg("grids"), py::arg("metadata") = py::object()), "write(filename, grids, metadata=None)\n\n" "Write a grid or a sequence of grids and, optionally, a dict\n" "of (name, value) metadata pairs to a .vdb file."); py::def("getLoggingLevel", &_openvdbmodule::getLoggingLevel, "getLoggingLevel() -> str\n\n" "Return the severity threshold (\"debug\", \"info\", \"warn\", \"error\",\n" "or \"fatal\") for error messages."); py::def("setLoggingLevel", &_openvdbmodule::setLoggingLevel, (py::arg("level")), "setLoggingLevel(level)\n\n" "Specify the severity threshold (\"debug\", \"info\", \"warn\", \"error\",\n" "or \"fatal\") for error messages. Messages of lower severity\n" "will be suppressed."); py::def("setProgramName", &_openvdbmodule::setProgramName, (py::arg("name"), py::arg("color") = true), "setProgramName(name, color=True)\n\n" "Specify the program name to be displayed in error messages,\n" "and optionally specify whether to print error messages in color."); // Add some useful module-level constants. py::scope().attr("LIBRARY_VERSION") = py::make_tuple( openvdb::OPENVDB_LIBRARY_MAJOR_VERSION, openvdb::OPENVDB_LIBRARY_MINOR_VERSION, openvdb::OPENVDB_LIBRARY_PATCH_VERSION); py::scope().attr("FILE_FORMAT_VERSION") = openvdb::OPENVDB_FILE_VERSION; py::scope().attr("COORD_MIN") = openvdb::Coord::min(); py::scope().attr("COORD_MAX") = openvdb::Coord::max(); py::scope().attr("LEVEL_SET_HALF_WIDTH") = openvdb::LEVEL_SET_HALF_WIDTH; pyutil::StringEnum<_openvdbmodule::GridClassDescr>::wrap(); pyutil::StringEnum<_openvdbmodule::VecTypeDescr>::wrap(); } // BOOST_PYTHON_MODULE
35,818
C++
36.903704
98
0.58465
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyTransform.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <boost/python.hpp> #include "openvdb/openvdb.h" #include "pyutil.h" namespace py = boost::python; using namespace openvdb::OPENVDB_VERSION_NAME; namespace pyTransform { inline void scale1(math::Transform& t, double s) { t.preScale(s); } inline void scale3(math::Transform& t, const Vec3d& xyz) { t.preScale(xyz); } inline Vec3d voxelDim0(math::Transform& t) { return t.voxelSize(); } inline Vec3d voxelDim1(math::Transform& t, const Vec3d& p) { return t.voxelSize(p); } inline double voxelVolume0(math::Transform& t) { return t.voxelVolume(); } inline double voxelVolume1(math::Transform& t, const Vec3d& p) { return t.voxelVolume(p); } inline Vec3d indexToWorld(math::Transform& t, const Vec3d& p) { return t.indexToWorld(p); } inline Vec3d worldToIndex(math::Transform& t, const Vec3d& p) { return t.worldToIndex(p); } inline Coord worldToIndexCellCentered(math::Transform& t, const Vec3d& p) { return t.worldToIndexCellCentered(p); } inline Coord worldToIndexNodeCentered(math::Transform& t, const Vec3d& p) { return t.worldToIndexNodeCentered(p); } inline std::string info(math::Transform& t) { std::ostringstream ostr; t.print(ostr); return ostr.str(); } inline math::Transform::Ptr createLinearFromDim(double dim) { return math::Transform::createLinearTransform(dim); } inline math::Transform::Ptr createLinearFromMat(py::object obj) { Mat4R m; // Verify that obj is a four-element sequence. bool is4x4Seq = (PySequence_Check(obj.ptr()) && PySequence_Length(obj.ptr()) == 4); if (is4x4Seq) { for (int row = 0; is4x4Seq && row < 4; ++row) { // Verify that each element of obj is itself a four-element sequence. py::object rowObj = obj[row]; if (PySequence_Check(rowObj.ptr()) && PySequence_Length(rowObj.ptr()) == 4) { // Extract four numeric values from this row of the sequence. for (int col = 0; is4x4Seq && col < 4; ++col) { if (py::extract<double>(rowObj[col]).check()) { m[row][col] = py::extract<double>(rowObj[col]); } else { is4x4Seq = false; } } } else { is4x4Seq = false; } } } if (!is4x4Seq) { PyErr_Format(PyExc_ValueError, "expected a 4 x 4 sequence of numeric values"); py::throw_error_already_set(); } return math::Transform::createLinearTransform(m); } inline math::Transform::Ptr createFrustum(const Coord& xyzMin, const Coord& xyzMax, double taper, double depth, double voxelDim = 1.0) { return math::Transform::createFrustumTransform( BBoxd(xyzMin.asVec3d(), xyzMax.asVec3d()), taper, depth, voxelDim); } //////////////////////////////////////// struct PickleSuite: public py::pickle_suite { enum { STATE_DICT = 0, STATE_MAJOR, STATE_MINOR, STATE_FORMAT, STATE_XFORM }; /// Return @c true, indicating that this pickler preserves a Transform's __dict__. static bool getstate_manages_dict() { return true; } /// Return a tuple representing the state of the given Transform. static py::tuple getstate(py::object xformObj) { py::tuple state; py::extract<math::Transform> x(xformObj); if (x.check()) { // Extract a Transform from the Python object. math::Transform xform = x(); std::ostringstream ostr(std::ios_base::binary); // Serialize the Transform to a string. xform.write(ostr); // Construct a state tuple comprising the Python object's __dict__, // the version numbers of the serialization format, // and the serialized Transform. #if PY_MAJOR_VERSION >= 3 // Convert the byte string to a "bytes" sequence. const std::string s = ostr.str(); py::object bytesObj = pyutil::pyBorrow(PyBytes_FromStringAndSize(s.data(), s.size())); #else py::str bytesObj(ostr.str()); #endif state = py::make_tuple( xformObj.attr("__dict__"), uint32_t(OPENVDB_LIBRARY_MAJOR_VERSION), uint32_t(OPENVDB_LIBRARY_MINOR_VERSION), uint32_t(OPENVDB_FILE_VERSION), bytesObj); } return state; } /// Restore the given Transform to a saved state. static void setstate(py::object xformObj, py::object stateObj) { math::Transform* xform = nullptr; { py::extract<math::Transform*> x(xformObj); if (x.check()) xform = x(); else return; } py::tuple state; { py::extract<py::tuple> x(stateObj); if (x.check()) state = x(); } bool badState = (py::len(state) != 5); if (!badState) { // Restore the object's __dict__. py::extract<py::dict> x(state[int(STATE_DICT)]); if (x.check()) { py::dict d = py::extract<py::dict>(xformObj.attr("__dict__"))(); d.update(x()); } else { badState = true; } } openvdb::VersionId libVersion; uint32_t formatVersion = 0; if (!badState) { // Extract the serialization format version numbers. const int idx[3] = { STATE_MAJOR, STATE_MINOR, STATE_FORMAT }; uint32_t version[3] = { 0, 0, 0 }; for (int i = 0; i < 3 && !badState; ++i) { py::extract<uint32_t> x(state[idx[i]]); if (x.check()) version[i] = x(); else badState = true; } libVersion.first = version[0]; libVersion.second = version[1]; formatVersion = version[2]; } std::string serialized; if (!badState) { // Extract the sequence containing the serialized Transform. py::object bytesObj = state[int(STATE_XFORM)]; #if PY_MAJOR_VERSION >= 3 badState = true; if (PyBytes_Check(bytesObj.ptr())) { // Convert the "bytes" sequence to a byte string. char* buf = NULL; Py_ssize_t length = 0; if (-1 != PyBytes_AsStringAndSize(bytesObj.ptr(), &buf, &length)) { if (buf != NULL && length > 0) { serialized.assign(buf, buf + length); badState = false; } } } #else py::extract<std::string> x(bytesObj); if (x.check()) serialized = x(); else badState = true; #endif } if (badState) { PyErr_SetObject(PyExc_ValueError, #if PY_MAJOR_VERSION >= 3 ("expected (dict, int, int, int, bytes) tuple in call to __setstate__; found %s" #else ("expected (dict, int, int, int, str) tuple in call to __setstate__; found %s" #endif % stateObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); } // Restore the internal state of the C++ object. std::istringstream istr(serialized, std::ios_base::binary); io::setVersion(istr, libVersion, formatVersion); xform->read(istr); } }; // struct PickleSuite } // namespace pyTransform void exportTransform(); void exportTransform() { py::enum_<math::Axis>("Axis") .value("X", math::X_AXIS) .value("Y", math::Y_AXIS) .value("Z", math::Z_AXIS); py::class_<math::Transform>("Transform", py::init<>()) .def("deepCopy", &math::Transform::copy, "deepCopy() -> Transform\n\n" "Return a copy of this transform.") /// @todo Should this also be __str__()? .def("info", &pyTransform::info, "info() -> str\n\n" "Return a string containing a description of this transform.\n") .def_pickle(pyTransform::PickleSuite()) .add_property("typeName", &math::Transform::mapType, "name of this transform's type") .add_property("isLinear", &math::Transform::isLinear, "True if this transform is linear") .def("rotate", &math::Transform::preRotate, (py::arg("radians"), py::arg("axis") = math::X_AXIS), "rotate(radians, axis)\n\n" "Accumulate a rotation about either Axis.X, Axis.Y or Axis.Z.") .def("translate", &math::Transform::postTranslate, py::arg("xyz"), "translate((x, y, z))\n\n" "Accumulate a translation.") .def("scale", &pyTransform::scale1, py::arg("s"), "scale(s)\n\n" "Accumulate a uniform scale.") .def("scale", &pyTransform::scale3, py::arg("sxyz"), "scale((sx, sy, sz))\n\n" "Accumulate a nonuniform scale.") .def("shear", &math::Transform::preShear, (py::arg("s"), py::arg("axis0"), py::arg("axis1")), "shear(s, axis0, axis1)\n\n" "Accumulate a shear (axis0 and axis1 are either\n" "Axis.X, Axis.Y or Axis.Z).") .def("voxelSize", &pyTransform::voxelDim0, "voxelSize() -> (dx, dy, dz)\n\n" "Return the size of voxels of the linear component of this transform.") .def("voxelSize", &pyTransform::voxelDim1, py::arg("xyz"), "voxelSize((x, y, z)) -> (dx, dy, dz)\n\n" "Return the size of the voxel at position (x, y, z).") .def("voxelVolume", &pyTransform::voxelVolume0, "voxelVolume() -> float\n\n" "Return the voxel volume of the linear component of this transform.") .def("voxelVolume", &pyTransform::voxelVolume1, py::arg("xyz"), "voxelVolume((x, y, z)) -> float\n\n" "Return the voxel volume at position (x, y, z).") .def("indexToWorld", &pyTransform::indexToWorld, py::arg("xyz"), "indexToWorld((x, y, z)) -> (x', y', z')\n\n" "Apply this transformation to the given coordinates.") .def("worldToIndex", &pyTransform::worldToIndex, py::arg("xyz"), "worldToIndex((x, y, z)) -> (x', y', z')\n\n" "Apply the inverse of this transformation to the given coordinates.") .def("worldToIndexCellCentered", &pyTransform::worldToIndexCellCentered, py::arg("xyz"), "worldToIndexCellCentered((x, y, z)) -> (i, j, k)\n\n" "Apply the inverse of this transformation to the given coordinates\n" "and round the result to the nearest integer coordinates.") .def("worldToIndexNodeCentered", &pyTransform::worldToIndexNodeCentered, py::arg("xyz"), "worldToIndexNodeCentered((x, y, z)) -> (i, j, k)\n\n" "Apply the inverse of this transformation to the given coordinates\n" "and round the result down to the nearest integer coordinates.") // Allow Transforms to be compared for equality and inequality. .def(py::self == py::other<math::Transform>()) .def(py::self != py::other<math::Transform>()) ; py::def("createLinearTransform", &pyTransform::createLinearFromMat, py::arg("matrix"), "createLinearTransform(matrix) -> Transform\n\n" "Create a new linear transform from a 4 x 4 matrix given as a sequence\n" "of the form [[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]],\n" "where [m, n, o, p] is the translation component."); py::def("createLinearTransform", &pyTransform::createLinearFromDim, (py::arg("voxelSize") = 1.0), "createLinearTransform(voxelSize) -> Transform\n\n" "Create a new linear transform with the given uniform voxel size."); py::def("createFrustumTransform", &pyTransform::createFrustum, (py::arg("xyzMin"), py::arg("xyzMax"), py::arg("taper"), py::arg("depth"), py::arg("voxelSize") = 1.0), "createFrustumTransform(xyzMin, xyzMax, taper, depth, voxelSize) -> Transform\n\n" "Create a new frustum transform with unit bounding box (xyzMin, xyzMax)\n" "and the given taper, depth and uniform voxel size."); // allows Transform::Ptr Grid::getTransform() to work py::register_ptr_to_python<math::Transform::Ptr>(); }
12,443
C++
37.055046
98
0.566021
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyPointGrid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyPointGrid.cc /// @brief Boost.Python wrappers for point openvdb::Grid types #include <boost/python.hpp> #include "pyGrid.h" namespace py = boost::python; void exportPointGrid(); void exportPointGrid() { #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES pyGrid::exportGrid<points::PointDataGrid>(); #endif }
406
C++
15.958333
62
0.729064
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyutil.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_PYUTIL_HAS_BEEN_INCLUDED #define OPENVDB_PYUTIL_HAS_BEEN_INCLUDED #include "openvdb/openvdb.h" #include "openvdb/points/PointDataGrid.h" #include <boost/python.hpp> #include <tbb/mutex.h> #include <map> // for std::pair #include <string> #include <sstream> namespace pyutil { /// Return a new @c boost::python::object that borrows (i.e., doesn't /// take over ownership of) the given @c PyObject's reference. inline boost::python::object pyBorrow(PyObject* obj) { return boost::python::object(boost::python::handle<>(boost::python::borrowed(obj))); } /// @brief Given a @c PyObject that implements the sequence protocol /// (e.g., a @c PyListObject), return the value of type @c ValueT /// at index @a idx in the sequence. /// @details Raise a Python @c TypeError exception if the value /// at index @a idx is not convertible to type @c ValueT. template<typename ValueT> inline ValueT getSequenceItem(PyObject* obj, int idx) { return boost::python::extract<ValueT>(pyBorrow(obj)[idx]); } //////////////////////////////////////// template<class GridType> struct GridTraitsBase { /// @brief Return the name of the Python class that wraps this grid type /// (e.g., "FloatGrid" for openvdb::FloatGrid). /// /// @note This name is not the same as GridType::type(). /// The latter returns a name like "Tree_float_5_4_3". static const char* name(); /// Return the name of this grid type's value type ("bool", "float", "vec3s", etc.). static const char* valueTypeName() { return openvdb::typeNameAsString<typename GridType::ValueType>(); } /// @brief Return a description of this grid type. /// /// @note This name is generated at runtime for each call to descr(). static const std::string descr() { return std::string("OpenVDB grid with voxels of type ") + valueTypeName(); } }; // struct GridTraitsBase template<class GridType> struct GridTraits: public GridTraitsBase<GridType> { }; /// Map a grid type to a traits class that derives from GridTraitsBase /// and that defines a name() method. #define GRID_TRAITS(_typ, _name) \ template<> struct GridTraits<_typ>: public GridTraitsBase<_typ> { \ static const char* name() { return _name; } \ } GRID_TRAITS(openvdb::FloatGrid, "FloatGrid"); GRID_TRAITS(openvdb::Vec3SGrid, "Vec3SGrid"); GRID_TRAITS(openvdb::BoolGrid, "BoolGrid"); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES GRID_TRAITS(openvdb::DoubleGrid, "DoubleGrid"); GRID_TRAITS(openvdb::Int32Grid, "Int32Grid"); GRID_TRAITS(openvdb::Int64Grid, "Int64Grid"); GRID_TRAITS(openvdb::Vec3IGrid, "Vec3IGrid"); GRID_TRAITS(openvdb::Vec3DGrid, "Vec3DGrid"); GRID_TRAITS(openvdb::points::PointDataGrid, "PointDataGrid"); #endif #undef GRID_TRAITS //////////////////////////////////////// // Note that the elements are pointers to C strings (char**), because // boost::python::class_::def_readonly() requires a pointer to a static member. typedef std::pair<const char* const*, const char* const*> CStringPair; /// @brief Enum-like mapping from string keys to string values, with characteristics /// of both (Python) classes and class instances (as well as NamedTuples) /// @details /// - (@e key, @e value) pairs can be accessed as class attributes (\"<tt>MyClass.MY_KEY</tt>\") /// - (@e key, @e value) pairs can be accessed via dict lookup on instances /// (\"<tt>MyClass()['MY_KEY']</tt>\") /// - (@e key, @e value) pairs can't be modified or reassigned /// - instances are iterable (\"<tt>for key in MyClass(): ...</tt>\") /// /// A @c Descr class must implement the following interface: /// @code /// struct MyDescr /// { /// // Return the Python name for the enum class. /// static const char* name(); /// // Return the docstring for the enum class. /// static const char* doc(); /// // Return the ith (key, value) pair, in the form of /// // a pair of *pointers* to C strings /// static CStringPair item(int i); /// }; /// @endcode template<typename Descr> struct StringEnum { /// Return the (key, value) map as a Python dict. static boost::python::dict items() { static tbb::mutex sMutex; static boost::python::dict itemDict; if (!itemDict) { // The first time this function is called, populate // the static dict with (key, value) pairs. tbb::mutex::scoped_lock lock(sMutex); if (!itemDict) { for (int i = 0; ; ++i) { const CStringPair item = Descr::item(i); OPENVDB_START_THREADSAFE_STATIC_WRITE if (item.first) { itemDict[boost::python::str(*item.first)] = boost::python::str(*item.second); } OPENVDB_FINISH_THREADSAFE_STATIC_WRITE else break; } } } return itemDict; } /// Return the keys as a Python list of strings. static boost::python::object keys() { return items().attr("keys")(); } /// Return the number of keys as a Python int. boost::python::object numItems() const { return boost::python::object(boost::python::len(items())); } /// Return the value (as a Python string) for the given key. boost::python::object getItem(boost::python::object keyObj) const { return items()[keyObj]; } /// Return a Python iterator over the keys. boost::python::object iter() const { return items().attr("__iter__")(); } /// Register this enum. static void wrap() { boost::python::class_<StringEnum> cls( /*classname=*/Descr::name(), /*docstring=*/Descr::doc()); cls.def("keys", &StringEnum::keys, "keys() -> list") .staticmethod("keys") .def("__len__", &StringEnum::numItems, "__len__() -> int") .def("__iter__", &StringEnum::iter, "__iter__() -> iterator") .def("__getitem__", &StringEnum::getItem, "__getitem__(str) -> str") /*end*/; // Add a read-only, class-level attribute for each (key, value) pair. for (int i = 0; ; ++i) { const CStringPair item = Descr::item(i); if (item.first) cls.def_readonly(*item.first, item.second); else break; } } }; //////////////////////////////////////// /// @brief From the given Python object, extract a value of type @c T. /// /// If the object cannot be converted to type @c T, raise a @c TypeError with a more /// Pythonic error message (incorporating the provided class and function names, etc.) /// than the one that would be generated by boost::python::extract(), e.g., /// "TypeError: expected float, found str as argument 2 to FloatGrid.prune()" instead of /// "TypeError: No registered converter was able to produce a C++ rvalue of type /// boost::shared_ptr<openvdb::Grid<openvdb::tree::Tree<openvdb::tree::RootNode<...". template<typename T> inline T extractArg( boost::python::object obj, const char* functionName, const char* className = nullptr, int argIdx = 0, // args are numbered starting from 1 const char* expectedType = nullptr) { boost::python::extract<T> val(obj); if (!val.check()) { // Generate an error string of the form // "expected <expectedType>, found <actualType> as argument <argIdx> // to <className>.<functionName>()", where <argIdx> and <className> // are optional. std::ostringstream os; os << "expected "; if (expectedType) os << expectedType; else os << openvdb::typeNameAsString<T>(); const std::string actualType = boost::python::extract<std::string>(obj.attr("__class__").attr("__name__")); os << ", found " << actualType << " as argument"; if (argIdx > 0) os << " " << argIdx; os << " to "; if (className) os << className << "."; os << functionName << "()"; PyErr_SetString(PyExc_TypeError, os.str().c_str()); boost::python::throw_error_already_set(); } return val(); } //////////////////////////////////////// /// Return str(val) for the given value. template<typename T> inline std::string str(const T& val) { return boost::python::extract<std::string>(boost::python::str(val)); } /// Return the name of the given Python object's class. inline std::string className(boost::python::object obj) { std::string s = boost::python::extract<std::string>( obj.attr("__class__").attr("__name__")); return s; } } // namespace pyutil #endif // OPENVDB_PYUTIL_HAS_BEEN_INCLUDED
8,741
C
33.148437
97
0.611715
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyVec3Grid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyVec3Grid.cc /// @brief Boost.Python wrappers for vector-valued openvdb::Grid types #include "pyGrid.h" void exportVec3Grid(); void exportVec3Grid() { pyGrid::exportGrid<Vec3SGrid>(); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES pyGrid::exportGrid<Vec3IGrid>(); pyGrid::exportGrid<Vec3DGrid>(); #endif }
413
C++
17.818181
70
0.719128
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyGrid.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyGrid.h /// @author Peter Cucka /// @brief Boost.Python wrapper for openvdb::Grid #ifndef OPENVDB_PYGRID_HAS_BEEN_INCLUDED #define OPENVDB_PYGRID_HAS_BEEN_INCLUDED #include <boost/python.hpp> #ifdef PY_OPENVDB_USE_NUMPY // boost::python::numeric was replaced with boost::python::numpy in Boost 1.65. // (boost::python::numpy requires NumPy 1.7 or later.) #include <boost/python/numpy.hpp> //#include <arrayobject.h> // for PyArray_Descr (see pyGrid::arrayTypeId()) #define PY_OPENVDB_USE_BOOST_PYTHON_NUMPY #include "openvdb/tools/MeshToVolume.h" #include "openvdb/tools/VolumeToMesh.h" // for tools::volumeToMesh() #endif #include "openvdb/openvdb.h" #include "openvdb/io/Stream.h" #include "openvdb/math/Math.h" // for math::isExactlyEqual() #include "openvdb/points/PointDataGrid.h" #include "openvdb/tools/LevelSetSphere.h" #include "openvdb/tools/Dense.h" #include "openvdb/tools/ChangeBackground.h" #include "openvdb/tools/Prune.h" #include "openvdb/tools/SignedFloodFill.h" #include "pyutil.h" #include "pyAccessor.h" // for pyAccessor::AccessorWrap #include "pyopenvdb.h" #include <algorithm> // for std::max() #include <cstring> // for memcpy() #include <iostream> #include <memory> #include <sstream> #include <string> #include <vector> namespace py = boost::python; #ifdef __clang__ // This is a private header, so it's OK to include a "using namespace" directive. #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wheader-hygiene" #endif using namespace openvdb::OPENVDB_VERSION_NAME; #ifdef __clang__ #pragma clang diagnostic pop #endif namespace pyopenvdb { inline py::object getPyObjectFromGrid(const GridBase::Ptr& grid) { if (!grid) return py::object(); #define CONVERT_BASE_TO_GRID(GridType, grid) \ if (grid->isType<GridType>()) { \ return py::object(gridPtrCast<GridType>(grid)); \ } CONVERT_BASE_TO_GRID(FloatGrid, grid); CONVERT_BASE_TO_GRID(Vec3SGrid, grid); CONVERT_BASE_TO_GRID(BoolGrid, grid); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES CONVERT_BASE_TO_GRID(DoubleGrid, grid); CONVERT_BASE_TO_GRID(Int32Grid, grid); CONVERT_BASE_TO_GRID(Int64Grid, grid); CONVERT_BASE_TO_GRID(Vec3IGrid, grid); CONVERT_BASE_TO_GRID(Vec3DGrid, grid); CONVERT_BASE_TO_GRID(points::PointDataGrid, grid); #endif #undef CONVERT_BASE_TO_GRID OPENVDB_THROW(TypeError, grid->type() + " is not a supported OpenVDB grid type"); } inline openvdb::GridBase::Ptr getGridFromPyObject(const boost::python::object& gridObj) { if (!gridObj) return GridBase::Ptr(); #define CONVERT_GRID_TO_BASE(GridPtrType) \ { \ py::extract<GridPtrType> x(gridObj); \ if (x.check()) return x(); \ } // Extract a grid pointer of one of the supported types // from the input object, then cast it to a base pointer. CONVERT_GRID_TO_BASE(FloatGrid::Ptr); CONVERT_GRID_TO_BASE(Vec3SGrid::Ptr); CONVERT_GRID_TO_BASE(BoolGrid::Ptr); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES CONVERT_GRID_TO_BASE(DoubleGrid::Ptr); CONVERT_GRID_TO_BASE(Int32Grid::Ptr); CONVERT_GRID_TO_BASE(Int64Grid::Ptr); CONVERT_GRID_TO_BASE(Vec3IGrid::Ptr); CONVERT_GRID_TO_BASE(Vec3DGrid::Ptr); CONVERT_GRID_TO_BASE(points::PointDataGrid::Ptr); #endif #undef CONVERT_GRID_TO_BASE OPENVDB_THROW(TypeError, pyutil::className(gridObj) + " is not a supported OpenVDB grid type"); } inline openvdb::GridBase::Ptr getGridFromPyObject(PyObject* gridObj) { return getGridFromPyObject(pyutil::pyBorrow(gridObj)); } } // namespace pyopenvdb //////////////////////////////////////// namespace pyGrid { inline py::object getGridFromGridBase(GridBase::Ptr grid) { py::object obj; try { obj = pyopenvdb::getPyObjectFromGrid(grid); } catch (openvdb::TypeError& e) { PyErr_SetString(PyExc_TypeError, e.what()); py::throw_error_already_set(); return py::object(); } return obj; } /// GridBase is not exposed in Python because it isn't really needed /// (and because exposing it would be complicated, requiring wrapping /// pure virtual functions like GridBase::baseTree()), but there are /// a few cases where, internally, we need to extract a GridBase::Ptr /// from a py::object. Hence this converter. inline GridBase::Ptr getGridBaseFromGrid(py::object gridObj) { GridBase::Ptr grid; try { grid = pyopenvdb::getGridFromPyObject(gridObj); } catch (openvdb::TypeError& e) { PyErr_SetString(PyExc_TypeError, e.what()); py::throw_error_already_set(); return GridBase::Ptr(); } return grid; } //////////////////////////////////////// /// Variant of pyutil::extractArg() that uses the class name of a given grid type template<typename GridType, typename T> inline T extractValueArg( py::object obj, const char* functionName, int argIdx = 0, // args are numbered starting from 1 const char* expectedType = nullptr) { return pyutil::extractArg<T>(obj, functionName, pyutil::GridTraits<GridType>::name(), argIdx, expectedType); } /// @brief Variant of pyutil::extractArg() that uses the class name /// and @c ValueType of a given grid type template<typename GridType> inline typename GridType::ValueType extractValueArg( py::object obj, const char* functionName, int argIdx = 0, // args are numbered starting from 1 const char* expectedType = nullptr) { return extractValueArg<GridType, typename GridType::ValueType>( obj, functionName, argIdx, expectedType); } //////////////////////////////////////// template<typename GridType> inline typename GridType::Ptr copyGrid(GridType& grid) { return grid.copy(); } template<typename GridType> inline bool sharesWith(const GridType& grid, py::object other) { py::extract<typename GridType::Ptr> x(other); if (x.check()) { typename GridType::ConstPtr otherGrid = x(); return (&otherGrid->tree() == &grid.tree()); } return false; } //////////////////////////////////////// template<typename GridType> inline std::string getValueType() { return pyutil::GridTraits<GridType>::valueTypeName(); } template<typename GridType> inline typename GridType::ValueType getZeroValue() { return openvdb::zeroVal<typename GridType::ValueType>(); } template<typename GridType> inline typename GridType::ValueType getOneValue() { using ValueT = typename GridType::ValueType; return ValueT(openvdb::zeroVal<ValueT>() + 1); } template<typename GridType> inline bool notEmpty(const GridType& grid) { return !grid.empty(); } template<typename GridType> inline typename GridType::ValueType getGridBackground(const GridType& grid) { return grid.background(); } template<typename GridType> inline void setGridBackground(GridType& grid, py::object obj) { tools::changeBackground(grid.tree(), extractValueArg<GridType>(obj, "setBackground")); } inline void setGridName(GridBase::Ptr grid, py::object strObj) { if (grid) { if (!strObj) { // if name is None grid->removeMeta(GridBase::META_GRID_NAME); } else { const std::string name = pyutil::extractArg<std::string>( strObj, "setName", /*className=*/nullptr, /*argIdx=*/1, "str"); grid->setName(name); } } } inline void setGridCreator(GridBase::Ptr grid, py::object strObj) { if (grid) { if (!strObj) { // if name is None grid->removeMeta(GridBase::META_GRID_CREATOR); } else { const std::string name = pyutil::extractArg<std::string>( strObj, "setCreator", /*className=*/nullptr, /*argIdx=*/1, "str"); grid->setCreator(name); } } } inline std::string getGridClass(GridBase::ConstPtr grid) { return GridBase::gridClassToString(grid->getGridClass()); } inline void setGridClass(GridBase::Ptr grid, py::object strObj) { if (!strObj) { grid->clearGridClass(); } else { const std::string name = pyutil::extractArg<std::string>( strObj, "setGridClass", /*className=*/nullptr, /*argIdx=*/1, "str"); grid->setGridClass(GridBase::stringToGridClass(name)); } } inline std::string getVecType(GridBase::ConstPtr grid) { return GridBase::vecTypeToString(grid->getVectorType()); } inline void setVecType(GridBase::Ptr grid, py::object strObj) { if (!strObj) { grid->clearVectorType(); } else { const std::string name = pyutil::extractArg<std::string>( strObj, "setVectorType", /*className=*/nullptr, /*argIdx=*/1, "str"); grid->setVectorType(GridBase::stringToVecType(name)); } } inline std::string gridInfo(GridBase::ConstPtr grid, int verbosity) { std::ostringstream ostr; grid->print(ostr, std::max<int>(1, verbosity)); return ostr.str(); } //////////////////////////////////////// inline void setGridTransform(GridBase::Ptr grid, py::object xformObj) { if (grid) { if (math::Transform::Ptr xform = pyutil::extractArg<math::Transform::Ptr>( xformObj, "setTransform", /*className=*/nullptr, /*argIdx=*/1, "Transform")) { grid->setTransform(xform); } else { PyErr_SetString(PyExc_ValueError, "null transform"); py::throw_error_already_set(); } } } //////////////////////////////////////// // Helper class to construct a pyAccessor::AccessorWrap for a given grid, // permitting partial specialization for const vs. non-const grids template<typename GridType> struct AccessorHelper { using Wrapper = typename pyAccessor::AccessorWrap<GridType>; static Wrapper wrap(typename GridType::Ptr grid) { if (!grid) { PyErr_SetString(PyExc_ValueError, "null grid"); py::throw_error_already_set(); } return Wrapper(grid); } }; // Specialization for const grids template<typename GridType> struct AccessorHelper<const GridType> { using Wrapper = typename pyAccessor::AccessorWrap<const GridType>; static Wrapper wrap(typename GridType::ConstPtr grid) { if (!grid) { PyErr_SetString(PyExc_ValueError, "null grid"); py::throw_error_already_set(); } return Wrapper(grid); } }; /// Return a non-const accessor (wrapped in a pyAccessor::AccessorWrap) for the given grid. template<typename GridType> inline typename AccessorHelper<GridType>::Wrapper getAccessor(typename GridType::Ptr grid) { return AccessorHelper<GridType>::wrap(grid); } /// @brief Return a const accessor (wrapped in a pyAccessor::AccessorWrap) for the given grid. /// @internal Note that the grid pointer is non-const, even though the grid is /// treated as const. This is because we don't expose a const grid type in Python. template<typename GridType> inline typename AccessorHelper<const GridType>::Wrapper getConstAccessor(typename GridType::Ptr grid) { return AccessorHelper<const GridType>::wrap(grid); } //////////////////////////////////////// template<typename GridType> inline py::tuple evalLeafBoundingBox(const GridType& grid) { CoordBBox bbox; grid.tree().evalLeafBoundingBox(bbox); return py::make_tuple(bbox.min(), bbox.max()); } template<typename GridType> inline Coord evalLeafDim(const GridType& grid) { Coord dim; grid.tree().evalLeafDim(dim); return dim; } template<typename GridType> inline py::tuple evalActiveVoxelBoundingBox(const GridType& grid) { CoordBBox bbox = grid.evalActiveVoxelBoundingBox(); return py::make_tuple(bbox.min(), bbox.max()); } template<typename GridType> inline py::tuple getNodeLog2Dims(const GridType& grid) { std::vector<Index> dims; grid.tree().getNodeLog2Dims(dims); py::list lst; for (size_t i = 0, N = dims.size(); i < N; ++i) { lst.append(dims[i]); } return py::tuple(lst); } template<typename GridType> inline Index treeDepth(const GridType& grid) { return grid.tree().treeDepth(); } template<typename GridType> inline Index32 leafCount(const GridType& grid) { return grid.tree().leafCount(); } template<typename GridType> inline Index32 nonLeafCount(const GridType& grid) { return grid.tree().nonLeafCount(); } template<typename GridType> inline Index64 activeLeafVoxelCount(const GridType& grid) { return grid.tree().activeLeafVoxelCount(); } template<typename GridType> inline py::tuple evalMinMax(const GridType& grid) { typename GridType::ValueType vmin, vmax; grid.tree().evalMinMax(vmin, vmax); return py::make_tuple(vmin, vmax); } template<typename GridType> inline py::tuple getIndexRange(const GridType& grid) { CoordBBox bbox; grid.tree().getIndexRange(bbox); return py::make_tuple(bbox.min(), bbox.max()); } //template<typename GridType> //inline void //expandIndexRange(GridType& grid, py::object coordObj) //{ // Coord xyz = extractValueArg<GridType, Coord>( // coordObj, "expand", 0, "tuple(int, int, int)"); // grid.tree().expand(xyz); //} //////////////////////////////////////// inline py::dict getAllMetadata(GridBase::ConstPtr grid) { if (grid) return py::dict(static_cast<const MetaMap&>(*grid)); return py::dict(); } inline void replaceAllMetadata(GridBase::Ptr grid, const MetaMap& metadata) { if (grid) { grid->clearMetadata(); for (MetaMap::ConstMetaIterator it = metadata.beginMeta(); it != metadata.endMeta(); ++it) { if (it->second) grid->insertMeta(it->first, *it->second); } } } inline void updateMetadata(GridBase::Ptr grid, const MetaMap& metadata) { if (grid) { for (MetaMap::ConstMetaIterator it = metadata.beginMeta(); it != metadata.endMeta(); ++it) { if (it->second) { grid->removeMeta(it->first); grid->insertMeta(it->first, *it->second); } } } } inline py::dict getStatsMetadata(GridBase::ConstPtr grid) { MetaMap::ConstPtr metadata; if (grid) metadata = grid->getStatsMetadata(); if (metadata) return py::dict(*metadata); return py::dict(); } inline py::object getMetadataKeys(GridBase::ConstPtr grid) { if (grid) { #if PY_MAJOR_VERSION >= 3 // Return an iterator over the "keys" view of a dict. return py::import("builtins").attr("iter")( py::dict(static_cast<const MetaMap&>(*grid)).keys()); #else return py::dict(static_cast<const MetaMap&>(*grid)).iterkeys(); #endif } return py::object(); } inline py::object getMetadata(GridBase::ConstPtr grid, py::object nameObj) { if (!grid) return py::object(); const std::string name = pyutil::extractArg<std::string>( nameObj, "__getitem__", nullptr, /*argIdx=*/1, "str"); Metadata::ConstPtr metadata = (*grid)[name]; if (!metadata) { PyErr_SetString(PyExc_KeyError, name.c_str()); py::throw_error_already_set(); } // Use the MetaMap-to-dict converter (see pyOpenVDBModule.cc) to convert // the Metadata value to a Python object of the appropriate type. /// @todo Would be more efficient to convert the Metadata object /// directly to a Python object. MetaMap metamap; metamap.insertMeta(name, *metadata); return py::dict(metamap)[name]; } inline void setMetadata(GridBase::Ptr grid, py::object nameObj, py::object valueObj) { if (!grid) return; const std::string name = pyutil::extractArg<std::string>( nameObj, "__setitem__", nullptr, /*argIdx=*/1, "str"); // Insert the Python object into a Python dict, then use the dict-to-MetaMap // converter (see pyOpenVDBModule.cc) to convert the dict to a MetaMap // containing a Metadata object of the appropriate type. /// @todo Would be more efficient to convert the Python object /// directly to a Metadata object. py::dict dictObj; dictObj[name] = valueObj; MetaMap metamap = py::extract<MetaMap>(dictObj); if (Metadata::Ptr metadata = metamap[name]) { grid->removeMeta(name); grid->insertMeta(name, *metadata); } } inline void removeMetadata(GridBase::Ptr grid, const std::string& name) { if (grid) { Metadata::Ptr metadata = (*grid)[name]; if (!metadata) { PyErr_SetString(PyExc_KeyError, name.c_str()); py::throw_error_already_set(); } grid->removeMeta(name); } } inline bool hasMetadata(GridBase::ConstPtr grid, const std::string& name) { if (grid) return ((*grid)[name].get() != nullptr); return false; } //////////////////////////////////////// template<typename GridType> inline void prune(GridType& grid, py::object tolerance) { tools::prune(grid.tree(), extractValueArg<GridType>(tolerance, "prune")); } template<typename GridType> inline void pruneInactive(GridType& grid, py::object valObj) { if (valObj.is_none()) { tools::pruneInactive(grid.tree()); } else { tools::pruneInactiveWithValue( grid.tree(), extractValueArg<GridType>(valObj, "pruneInactive")); } } template<typename GridType> inline void fill(GridType& grid, py::object minObj, py::object maxObj, py::object valObj, bool active) { const Coord bmin = extractValueArg<GridType, Coord>(minObj, "fill", 1, "tuple(int, int, int)"), bmax = extractValueArg<GridType, Coord>(maxObj, "fill", 2, "tuple(int, int, int)"); grid.fill(CoordBBox(bmin, bmax), extractValueArg<GridType>(valObj, "fill", 3), active); } template<typename GridType> inline void signedFloodFill(GridType& grid) { tools::signedFloodFill(grid.tree()); } //////////////////////////////////////// #ifndef PY_OPENVDB_USE_NUMPY template<typename GridType> inline void copyFromArray(GridType&, const py::object&, py::object, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); } template<typename GridType> inline void copyToArray(GridType&, const py::object&, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); } #else // if defined(PY_OPENVDB_USE_NUMPY) using ArrayDimVec = std::vector<size_t>; #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // ID numbers for supported value types enum class DtId { NONE, FLOAT, DOUBLE, BOOL, INT16, INT32, INT64, UINT32, UINT64/*, HALF*/ }; using NumPyArrayType = py::numpy::ndarray; #else // if !defined PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // NumPy type numbers for supported value types enum class DtId { NONE = NPY_NOTYPE, FLOAT = NPY_FLOAT, DOUBLE = NPY_DOUBLE, BOOL = NPY_BOOL, INT16 = NPY_INT16, INT32 = NPY_INT32, INT64 = NPY_INT64, UINT32 = NPY_UINT32, UINT64 = NPY_UINT64, //HALF = NPY_HALF }; using NumPyArrayType = py::numeric::array; #endif // PY_OPENVDB_USE_BOOST_PYTHON_NUMPY template<DtId TypeId> struct NumPyToCpp {}; template<> struct NumPyToCpp<DtId::FLOAT> { using type = float; }; template<> struct NumPyToCpp<DtId::DOUBLE> { using type = double; }; template<> struct NumPyToCpp<DtId::BOOL> { using type = bool; }; template<> struct NumPyToCpp<DtId::INT16> { using type = Int16; }; template<> struct NumPyToCpp<DtId::INT32> { using type = Int32; }; template<> struct NumPyToCpp<DtId::INT64> { using type = Int64; }; template<> struct NumPyToCpp<DtId::UINT32> { using type = Index32; }; template<> struct NumPyToCpp<DtId::UINT64> { using type = Index64; }; //template<> struct NumPyToCpp<DtId::HALF> { using type = half; }; #if 0 template<typename T> struct CppToNumPy { static const DtId typeId = DtId::NONE; }; template<> struct CppToNumPy<float> { static const DtId typeId = DtId::FLOAT; }; template<> struct CppToNumPy<double> { static const DtId typeId = DtId::DOUBLE; }; template<> struct CppToNumPy<bool> { static const DtId typeId = DtId::BOOL; }; template<> struct CppToNumPy<Int16> { static const DtId typeId = DtId::INT16; }; template<> struct CppToNumPy<Int32> { static const DtId typeId = DtId::INT32; }; template<> struct CppToNumPy<Int64> { static const DtId typeId = DtId::INT64; }; template<> struct CppToNumPy<Index32> { static const DtId typeId = DtId::UINT32; }; template<> struct CppToNumPy<Index64> { static const DtId typeId = DtId::UINT64; }; //template<> struct CppToNumPy<half> { static const DtId typeId = DtId::HALF; }; #endif #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Return the ID number of the given NumPy array's data type. /// @todo Revisit this if and when py::numpy::dtype ever provides a type number accessor. inline DtId arrayTypeId(const py::numpy::ndarray& arrayObj) { namespace np = py::numpy; const auto dtype = arrayObj.get_dtype(); #if 0 // More efficient than np::equivalent(), but requires NumPy headers. if (const auto* descr = reinterpret_cast<const PyArray_Descr*>(dtype.ptr())) { const auto typeId = static_cast<DtId>(descr->type_num); switch (typeId) { case DtId::NONE: break; case DtId::FLOAT: case DtId::DOUBLE: case DtId::BOOL: case DtId::INT16: case DtId::INT32: case DtId::INT64: case DtId::UINT32: case DtId::UINT64: return typeId; } throw openvdb::TypeError{}; } #else if (np::equivalent(dtype, np::dtype::get_builtin<float>())) return DtId::FLOAT; if (np::equivalent(dtype, np::dtype::get_builtin<double>())) return DtId::DOUBLE; if (np::equivalent(dtype, np::dtype::get_builtin<bool>())) return DtId::BOOL; if (np::equivalent(dtype, np::dtype::get_builtin<Int16>())) return DtId::INT16; if (np::equivalent(dtype, np::dtype::get_builtin<Int32>())) return DtId::INT32; if (np::equivalent(dtype, np::dtype::get_builtin<Int64>())) return DtId::INT64; if (np::equivalent(dtype, np::dtype::get_builtin<Index32>())) return DtId::UINT32; if (np::equivalent(dtype, np::dtype::get_builtin<Index64>())) return DtId::UINT64; //if (np::equivalent(dtype, np::dtype::get_builtin<half>())) return DtId::HALF; #endif throw openvdb::TypeError{}; } // Return a string description of the given NumPy array's data type. inline std::string arrayTypeName(const py::numpy::ndarray& arrayObj) { return pyutil::str(arrayObj.get_dtype()); } // Return the dimensions of the given NumPy array. inline ArrayDimVec arrayDimensions(const py::numpy::ndarray& arrayObj) { ArrayDimVec dims; for (int i = 0, N = arrayObj.get_nd(); i < N; ++i) { dims.push_back(static_cast<size_t>(arrayObj.shape(i))); } return dims; } #else // !defined PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Return the ID number of the given NumPy array's data type. inline DtId arrayTypeId(const py::numeric::array& arrayObj) { const PyArray_Descr* dtype = nullptr; if (PyArrayObject* arrayObjPtr = reinterpret_cast<PyArrayObject*>(arrayObj.ptr())) { dtype = PyArray_DESCR(arrayObjPtr); } if (dtype) return static_cast<DtId>(dtype->type_num); throw openvdb::TypeError{}; } // Return a string description of the given NumPy array's data type. inline std::string arrayTypeName(const py::numeric::array& arrayObj) { std::string name; if (PyObject_HasAttrString(arrayObj.ptr(), "dtype")) { name = pyutil::str(arrayObj.attr("dtype")); } else { name = "'_'"; PyArrayObject* arrayObjPtr = reinterpret_cast<PyArrayObject*>(arrayObj.ptr()); name[1] = PyArray_DESCR(arrayObjPtr)->kind; } return name; } // Return the dimensions of the given NumPy array. inline ArrayDimVec arrayDimensions(const py::numeric::array& arrayObj) { const py::object shape = arrayObj.attr("shape"); ArrayDimVec dims; for (long i = 0, N = py::len(shape); i < N; ++i) { dims.push_back(py::extract<size_t>(shape[i])); } return dims; } inline py::object copyNumPyArray(PyArrayObject* arrayObj, NPY_ORDER order = NPY_CORDER) { #ifdef __GNUC__ // Silence GCC "casting between pointer-to-function and pointer-to-object" warnings. __extension__ #endif auto obj = pyutil::pyBorrow(PyArray_NewCopy(arrayObj, order)); return obj; } #endif // PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Abstract base class for helper classes that copy data between // NumPy arrays of various types and grids of various types template<typename GridType> class CopyOpBase { public: using ValueT = typename GridType::ValueType; CopyOpBase(bool toGrid, GridType& grid, py::object arrObj, py::object coordObj, py::object tolObj) : mToGrid(toGrid) , mGrid(&grid) { const char* const opName[2] = { "copyToArray", "copyFromArray" }; // Extract the coordinates (i, j, k) of the voxel at which to start populating data. // Voxel (i, j, k) will correspond to array element (0, 0, 0). const Coord origin = extractValueArg<GridType, Coord>( coordObj, opName[toGrid], 1, "tuple(int, int, int)"); // Extract a reference to (not a copy of) the NumPy array, // or throw an exception if arrObj is not a NumPy array object. const auto arrayObj = pyutil::extractArg<NumPyArrayType>( arrObj, opName[toGrid], pyutil::GridTraits<GridType>::name(), /*argIdx=*/1, "numpy.ndarray"); #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY mArray = arrayObj.get_data(); #else mArray = PyArray_DATA(reinterpret_cast<PyArrayObject*>(arrayObj.ptr())); #endif mArrayTypeName = arrayTypeName(arrayObj); mArrayTypeId = arrayTypeId(arrayObj); mArrayDims = arrayDimensions(arrayObj); mTolerance = extractValueArg<GridType>(tolObj, opName[toGrid], 2); // Compute the bounding box of the region of the grid that is to be copied from or to. Coord bboxMax = origin; for (size_t n = 0, N = std::min<size_t>(mArrayDims.size(), 3); n < N; ++n) { bboxMax[n] += int(mArrayDims[n]) - 1; } mBBox.reset(origin, bboxMax); } virtual ~CopyOpBase() {} void operator()() const { try { if (mToGrid) { copyFromArray(); // copy data from the array to the grid } else { copyToArray(); // copy data from the grid to the array } } catch (openvdb::TypeError&) { PyErr_Format(PyExc_TypeError, "unsupported NumPy data type %s", mArrayTypeName.c_str()); boost::python::throw_error_already_set(); } } protected: virtual void validate() const = 0; virtual void copyFromArray() const = 0; virtual void copyToArray() const = 0; template<typename ArrayValueType> void fromArray() const { validate(); tools::Dense<ArrayValueType> valArray(mBBox, static_cast<ArrayValueType*>(mArray)); tools::copyFromDense(valArray, *mGrid, mTolerance); } template<typename ArrayValueType> void toArray() const { validate(); tools::Dense<ArrayValueType> valArray(mBBox, static_cast<ArrayValueType*>(mArray)); tools::copyToDense(*mGrid, valArray); } bool mToGrid; // if true, copy from the array to the grid, else vice-versa void* mArray; GridType* mGrid; DtId mArrayTypeId; ArrayDimVec mArrayDims; std::string mArrayTypeName; CoordBBox mBBox; ValueT mTolerance; }; // class CopyOpBase // Helper subclass that can be specialized for various grid and NumPy array types template<typename GridType, int VecSize> class CopyOp: public CopyOpBase<GridType> {}; // Specialization for scalar grids template<typename GridType> class CopyOp<GridType, /*VecSize=*/1>: public CopyOpBase<GridType> { public: CopyOp(bool toGrid, GridType& grid, py::object arrObj, py::object coordObj, py::object tolObj = py::object(zeroVal<typename GridType::ValueType>())): CopyOpBase<GridType>(toGrid, grid, arrObj, coordObj, tolObj) { } protected: void validate() const override { if (this->mArrayDims.size() != 3) { std::ostringstream os; os << "expected 3-dimensional array, found " << this->mArrayDims.size() << "-dimensional array"; PyErr_SetString(PyExc_ValueError, os.str().c_str()); boost::python::throw_error_already_set(); } } #ifdef __clang__ // Suppress "enum value not explicitly handled" warnings PRAGMA(clang diagnostic push) PRAGMA(clang diagnostic ignored "-Wswitch-enum") #endif void copyFromArray() const override { switch (this->mArrayTypeId) { case DtId::FLOAT: this->template fromArray<typename NumPyToCpp<DtId::FLOAT>::type>(); break; case DtId::DOUBLE:this->template fromArray<typename NumPyToCpp<DtId::DOUBLE>::type>();break; case DtId::BOOL: this->template fromArray<typename NumPyToCpp<DtId::BOOL>::type>(); break; case DtId::INT16: this->template fromArray<typename NumPyToCpp<DtId::INT16>::type>(); break; case DtId::INT32: this->template fromArray<typename NumPyToCpp<DtId::INT32>::type>(); break; case DtId::INT64: this->template fromArray<typename NumPyToCpp<DtId::INT64>::type>(); break; case DtId::UINT32:this->template fromArray<typename NumPyToCpp<DtId::UINT32>::type>();break; case DtId::UINT64:this->template fromArray<typename NumPyToCpp<DtId::UINT64>::type>();break; default: throw openvdb::TypeError(); break; } } void copyToArray() const override { switch (this->mArrayTypeId) { case DtId::FLOAT: this->template toArray<typename NumPyToCpp<DtId::FLOAT>::type>(); break; case DtId::DOUBLE: this->template toArray<typename NumPyToCpp<DtId::DOUBLE>::type>(); break; case DtId::BOOL: this->template toArray<typename NumPyToCpp<DtId::BOOL>::type>(); break; case DtId::INT16: this->template toArray<typename NumPyToCpp<DtId::INT16>::type>(); break; case DtId::INT32: this->template toArray<typename NumPyToCpp<DtId::INT32>::type>(); break; case DtId::INT64: this->template toArray<typename NumPyToCpp<DtId::INT64>::type>(); break; case DtId::UINT32: this->template toArray<typename NumPyToCpp<DtId::UINT32>::type>(); break; case DtId::UINT64: this->template toArray<typename NumPyToCpp<DtId::UINT64>::type>(); break; default: throw openvdb::TypeError(); break; } } #ifdef __clang__ PRAGMA(clang diagnostic pop) #endif }; // class CopyOp // Specialization for Vec3 grids template<typename GridType> class CopyOp<GridType, /*VecSize=*/3>: public CopyOpBase<GridType> { public: CopyOp(bool toGrid, GridType& grid, py::object arrObj, py::object coordObj, py::object tolObj = py::object(zeroVal<typename GridType::ValueType>())): CopyOpBase<GridType>(toGrid, grid, arrObj, coordObj, tolObj) { } protected: void validate() const override { if (this->mArrayDims.size() != 4) { std::ostringstream os; os << "expected 4-dimensional array, found " << this->mArrayDims.size() << "-dimensional array"; PyErr_SetString(PyExc_ValueError, os.str().c_str()); boost::python::throw_error_already_set(); } if (this->mArrayDims[3] != 3) { std::ostringstream os; os << "expected " << this->mArrayDims[0] << "x" << this->mArrayDims[1] << "x" << this->mArrayDims[2] << "x3 array, found " << this->mArrayDims[0] << "x" << this->mArrayDims[1] << "x" << this->mArrayDims[2] << "x" << this->mArrayDims[3] << " array"; PyErr_SetString(PyExc_ValueError, os.str().c_str()); boost::python::throw_error_already_set(); } } #ifdef __clang__ // Suppress "enum value not explicitly handled" warnings PRAGMA(clang diagnostic push) PRAGMA(clang diagnostic ignored "-Wswitch-enum") #endif void copyFromArray() const override { switch (this->mArrayTypeId) { case DtId::FLOAT: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::FLOAT>::type>>(); break; case DtId::DOUBLE: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::DOUBLE>::type>>(); break; case DtId::BOOL: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::BOOL>::type>>(); break; case DtId::INT16: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::INT16>::type>>(); break; case DtId::INT32: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::INT32>::type>>(); break; case DtId::INT64: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::INT64>::type>>(); break; case DtId::UINT32: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::UINT32>::type>>(); break; case DtId::UINT64: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::UINT64>::type>>(); break; default: throw openvdb::TypeError(); break; } } void copyToArray() const override { switch (this->mArrayTypeId) { case DtId::FLOAT: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::FLOAT>::type>>(); break; case DtId::DOUBLE: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::DOUBLE>::type>>(); break; case DtId::BOOL: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::BOOL>::type>>(); break; case DtId::INT16: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::INT16>::type>>(); break; case DtId::INT32: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::INT32>::type>>(); break; case DtId::INT64: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::INT64>::type>>(); break; case DtId::UINT32: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::UINT32>::type>>(); break; case DtId::UINT64: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::UINT64>::type>>(); break; default: throw openvdb::TypeError(); break; } } #ifdef __clang__ PRAGMA(clang diagnostic pop) #endif }; // class CopyOp template<typename GridType> inline void copyFromArray(GridType& grid, py::object arrayObj, py::object coordObj, py::object toleranceObj) { using ValueT = typename GridType::ValueType; CopyOp<GridType, VecTraits<ValueT>::Size> op(/*toGrid=*/true, grid, arrayObj, coordObj, toleranceObj); op(); } template<typename GridType> inline void copyToArray(GridType& grid, py::object arrayObj, py::object coordObj) { using ValueT = typename GridType::ValueType; CopyOp<GridType, VecTraits<ValueT>::Size> op(/*toGrid=*/false, grid, arrayObj, coordObj); op(); } template<> inline void copyFromArray(points::PointDataGrid& /*grid*/, py::object /*arrayObj*/, py::object /*coordObj*/, py::object /*toleranceObj*/) { PyErr_SetString(PyExc_NotImplementedError, "copying NumPy arrays for PointDataGrids is not supported"); boost::python::throw_error_already_set(); } template<> inline void copyToArray(points::PointDataGrid& /*grid*/, py::object /*arrayObj*/, py::object /*coordObj*/) { PyErr_SetString(PyExc_NotImplementedError, "copying NumPy arrays for PointDataGrids is not supported"); boost::python::throw_error_already_set(); } #endif // defined(PY_OPENVDB_USE_NUMPY) //////////////////////////////////////// #ifndef PY_OPENVDB_USE_NUMPY template<typename GridType> inline typename GridType::Ptr meshToLevelSet(py::object, py::object, py::object, py::object, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); return typename GridType::Ptr(); } template<typename GridType> inline py::object volumeToQuadMesh(const GridType&, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); return py::object(); } template<typename GridType> inline py::object volumeToMesh(const GridType&, py::object, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); return py::object(); } #else // if defined(PY_OPENVDB_USE_NUMPY) // Helper class for meshToLevelSet() template<typename SrcT, typename DstT> struct CopyVecOp { void operator()(const void* srcPtr, DstT* dst, size_t count) { const SrcT* src = static_cast<const SrcT*>(srcPtr); for (size_t i = count; i > 0; --i, ++src, ++dst) { *dst = static_cast<DstT>(*src); } } }; // Partial specialization for source and destination arrays of the same type template<typename T> struct CopyVecOp<T, T> { void operator()(const void* srcPtr, T* dst, size_t count) { const T* src = static_cast<const T*>(srcPtr); ::memcpy(dst, src, count * sizeof(T)); } }; // Helper function for use with meshToLevelSet() to copy vectors of various types // and sizes from NumPy arrays to STL vectors template<typename VecT> inline void copyVecArray(NumPyArrayType& arrayObj, std::vector<VecT>& vec) { using ValueT = typename VecT::ValueType; // Get the input array dimensions. const auto dims = arrayDimensions(arrayObj); const size_t M = dims.empty() ? 0 : dims[0]; const size_t N = VecT().numElements(); if (M == 0 || N == 0) return; // Preallocate the output vector. vec.resize(M); // Copy values from the input array to the output vector (with type conversion, if necessary). #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY const void* src = arrayObj.get_data(); #else PyArrayObject* arrayObjPtr = reinterpret_cast<PyArrayObject*>(arrayObj.ptr()); const void* src = PyArray_DATA(arrayObjPtr); #endif ValueT* dst = &vec[0][0]; switch (arrayTypeId(arrayObj)) { case DtId::FLOAT: CopyVecOp<NumPyToCpp<DtId::FLOAT>::type, ValueT>()(src, dst, M*N); break; case DtId::DOUBLE: CopyVecOp<NumPyToCpp<DtId::DOUBLE>::type, ValueT>()(src, dst, M*N); break; case DtId::INT16: CopyVecOp<NumPyToCpp<DtId::INT16>::type, ValueT>()(src, dst, M*N); break; case DtId::INT32: CopyVecOp<NumPyToCpp<DtId::INT32>::type, ValueT>()(src, dst, M*N); break; case DtId::INT64: CopyVecOp<NumPyToCpp<DtId::INT64>::type, ValueT>()(src, dst, M*N); break; case DtId::UINT32: CopyVecOp<NumPyToCpp<DtId::UINT32>::type, ValueT>()(src, dst, M*N); break; case DtId::UINT64: CopyVecOp<NumPyToCpp<DtId::UINT64>::type, ValueT>()(src, dst, M*N); break; default: break; } } /// @brief Given NumPy arrays of points, triangle indices and quad indices, /// call tools::meshToLevelSet() to generate a level set grid. template<typename GridType> inline typename GridType::Ptr meshToLevelSet(py::object pointsObj, py::object trianglesObj, py::object quadsObj, py::object xformObj, py::object halfWidthObj) { struct Local { // Return the name of the Python grid method (for use in error messages). static const char* methodName() { return "createLevelSetFromPolygons"; } // Raise a Python exception if the given NumPy array does not have dimensions M x N // or does not have an integer or floating-point data type. static void validate2DNumPyArray(NumPyArrayType arrayObj, const size_t N, const char* desiredType) { const auto dims = arrayDimensions(arrayObj); bool wrongArrayType = false; // Check array dimensions. if (dims.size() != 2 || dims[1] != N) { wrongArrayType = true; } else { // Check array data type. switch (arrayTypeId(arrayObj)) { case DtId::FLOAT: case DtId::DOUBLE: //case DtId::HALF: case DtId::INT16: case DtId::INT32: case DtId::INT64: case DtId::UINT32: case DtId::UINT64: break; default: wrongArrayType = true; break; } } if (wrongArrayType) { // Generate an error message and raise a Python TypeError. std::ostringstream os; os << "expected N x 3 numpy.ndarray of " << desiredType << ", found "; switch (dims.size()) { case 0: os << "zero-dimensional"; break; case 1: os << "one-dimensional"; break; default: os << dims[0]; for (size_t i = 1; i < dims.size(); ++i) { os << " x " << dims[i]; } break; } os << " " << arrayTypeName(arrayObj) << " array as argument 1 to " << pyutil::GridTraits<GridType>::name() << "." << methodName() << "()"; PyErr_SetString(PyExc_TypeError, os.str().c_str()); py::throw_error_already_set(); } } }; // Extract the narrow band half width from the arguments to this method. const float halfWidth = extractValueArg<GridType, float>( halfWidthObj, Local::methodName(), /*argIdx=*/5, "float"); // Extract the transform from the arguments to this method. math::Transform::Ptr xform = math::Transform::createLinearTransform(); if (!xformObj.is_none()) { xform = extractValueArg<GridType, math::Transform::Ptr>( xformObj, Local::methodName(), /*argIdx=*/4, "Transform"); } // Extract the list of mesh vertices from the arguments to this method. std::vector<Vec3s> points; if (!pointsObj.is_none()) { // Extract a reference to (not a copy of) a NumPy array argument, // or throw an exception if the argument is not a NumPy array object. auto arrayObj = extractValueArg<GridType, NumPyArrayType>( pointsObj, Local::methodName(), /*argIdx=*/1, "numpy.ndarray"); // Throw an exception if the array has the wrong type or dimensions. Local::validate2DNumPyArray(arrayObj, /*N=*/3, /*desiredType=*/"float"); // Copy values from the array to the vector. copyVecArray(arrayObj, points); } // Extract the list of triangle indices from the arguments to this method. std::vector<Vec3I> triangles; if (!trianglesObj.is_none()) { auto arrayObj = extractValueArg<GridType, NumPyArrayType>( trianglesObj, Local::methodName(), /*argIdx=*/2, "numpy.ndarray"); Local::validate2DNumPyArray(arrayObj, /*N=*/3, /*desiredType=*/"int"); copyVecArray(arrayObj, triangles); } // Extract the list of quad indices from the arguments to this method. std::vector<Vec4I> quads; if (!quadsObj.is_none()) { auto arrayObj = extractValueArg<GridType, NumPyArrayType>( quadsObj, Local::methodName(), /*argIdx=*/3, "numpy.ndarray"); Local::validate2DNumPyArray(arrayObj, /*N=*/4, /*desiredType=*/"int"); copyVecArray(arrayObj, quads); } // Generate and return a level set grid. return tools::meshToLevelSet<GridType>(*xform, points, triangles, quads, halfWidth); } template<typename GridType> inline py::object volumeToQuadMesh(const GridType& grid, py::object isovalueObj) { const double isovalue = pyutil::extractArg<double>( isovalueObj, "convertToQuads", /*className=*/nullptr, /*argIdx=*/2, "float"); // Mesh the input grid and populate lists of mesh vertices and face vertex indices. std::vector<Vec3s> points; std::vector<Vec4I> quads; tools::volumeToMesh(grid, points, quads, isovalue); #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY const py::object own; auto dtype = py::numpy::dtype::get_builtin<Vec3s::value_type>(); auto shape = py::make_tuple(points.size(), 3); auto stride = py::make_tuple(3 * sizeof(Vec3s::value_type), sizeof(Vec3s::value_type)); // Create a deep copy of the array (because the point vector will be destroyed // when this function returns). auto pointArrayObj = py::numpy::from_data(points.data(), dtype, shape, stride, own).copy(); dtype = py::numpy::dtype::get_builtin<Vec4I::value_type>(); shape = py::make_tuple(quads.size(), 4); stride = py::make_tuple(4 * sizeof(Vec4I::value_type), sizeof(Vec4I::value_type)); auto quadArrayObj = py::numpy::from_data( quads.data(), dtype, shape, stride, own).copy(); // deep copy #else // !defined PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Copy vertices into an N x 3 NumPy array. py::object pointArrayObj = py::numeric::array(py::list(), "float32"); if (!points.empty()) { npy_intp dims[2] = { npy_intp(points.size()), 3 }; // Construct a NumPy array that wraps the point vector. if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*nd=*/2, dims, NPY_FLOAT, &points[0]))) { // Create a deep copy of the array (because the point vector will be // destroyed when this function returns). pointArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } // Copy face indices into an N x 4 NumPy array. py::object quadArrayObj = py::numeric::array(py::list(), "uint32"); if (!quads.empty()) { npy_intp dims[2] = { npy_intp(quads.size()), 4 }; if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*dims=*/2, dims, NPY_UINT32, &quads[0]))) { quadArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } #endif // PY_OPENVDB_USE_BOOST_PYTHON_NUMPY return py::make_tuple(pointArrayObj, quadArrayObj); } template<typename GridType> inline py::object volumeToMesh(const GridType& grid, py::object isovalueObj, py::object adaptivityObj) { const double isovalue = pyutil::extractArg<double>( isovalueObj, "convertToPolygons", /*className=*/nullptr, /*argIdx=*/2, "float"); const double adaptivity = pyutil::extractArg<double>( adaptivityObj, "convertToPolygons", /*className=*/nullptr, /*argIdx=*/3, "float"); // Mesh the input grid and populate lists of mesh vertices and face vertex indices. std::vector<Vec3s> points; std::vector<Vec3I> triangles; std::vector<Vec4I> quads; tools::volumeToMesh(grid, points, triangles, quads, isovalue, adaptivity); #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY const py::object own; auto dtype = py::numpy::dtype::get_builtin<Vec3s::value_type>(); auto shape = py::make_tuple(points.size(), 3); auto stride = py::make_tuple(3 * sizeof(Vec3s::value_type), sizeof(Vec3s::value_type)); // Create a deep copy of the array (because the point vector will be destroyed // when this function returns). auto pointArrayObj = py::numpy::from_data(points.data(), dtype, shape, stride, own).copy(); dtype = py::numpy::dtype::get_builtin<Vec3I::value_type>(); shape = py::make_tuple(triangles.size(), 3); stride = py::make_tuple(3 * sizeof(Vec3I::value_type), sizeof(Vec3I::value_type)); auto triangleArrayObj = py::numpy::from_data( triangles.data(), dtype, shape, stride, own).copy(); // deep copy dtype = py::numpy::dtype::get_builtin<Vec4I::value_type>(); shape = py::make_tuple(quads.size(), 4); stride = py::make_tuple(4 * sizeof(Vec4I::value_type), sizeof(Vec4I::value_type)); auto quadArrayObj = py::numpy::from_data( quads.data(), dtype, shape, stride, own).copy(); // deep copy #else // !defined PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Copy vertices into an N x 3 NumPy array. py::object pointArrayObj = py::numeric::array(py::list(), "float32"); if (!points.empty()) { npy_intp dims[2] = { npy_intp(points.size()), 3 }; // Construct a NumPy array that wraps the point vector. if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*dims=*/2, dims, NPY_FLOAT, &points[0]))) { // Create a deep copy of the array (because the point vector will be // destroyed when this function returns). pointArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } // Copy triangular face indices into an N x 3 NumPy array. py::object triangleArrayObj = py::numeric::array(py::list(), "uint32"); if (!triangles.empty()) { npy_intp dims[2] = { npy_intp(triangles.size()), 3 }; if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*dims=*/2, dims, NPY_UINT32, &triangles[0]))) { triangleArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } // Copy quadrilateral face indices into an N x 4 NumPy array. py::object quadArrayObj = py::numeric::array(py::list(), "uint32"); if (!quads.empty()) { npy_intp dims[2] = { npy_intp(quads.size()), 4 }; if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*dims=*/2, dims, NPY_UINT32, &quads[0]))) { quadArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } #endif // PY_OPENVDB_USE_BOOST_PYTHON_NUMPY return py::make_tuple(pointArrayObj, triangleArrayObj, quadArrayObj); } #endif // defined(PY_OPENVDB_USE_NUMPY) //////////////////////////////////////// template<typename GridType, typename IterType> inline void applyMap(const char* methodName, GridType& grid, py::object funcObj) { using ValueT = typename GridType::ValueType; for (IterType it = grid.tree().template begin<IterType>(); it; ++it) { // Evaluate the functor. py::object result = funcObj(*it); // Verify that the result is of type GridType::ValueType. py::extract<ValueT> val(result); if (!val.check()) { PyErr_Format(PyExc_TypeError, "expected callable argument to %s.%s() to return %s, found %s", pyutil::GridTraits<GridType>::name(), methodName, openvdb::typeNameAsString<ValueT>(), pyutil::className(result).c_str()); py::throw_error_already_set(); } it.setValue(val()); } } template<typename GridType> inline void mapOn(GridType& grid, py::object funcObj) { applyMap<GridType, typename GridType::ValueOnIter>("mapOn", grid, funcObj); } template<typename GridType> inline void mapOff(GridType& grid, py::object funcObj) { applyMap<GridType, typename GridType::ValueOffIter>("mapOff", grid, funcObj); } template<typename GridType> inline void mapAll(GridType& grid, py::object funcObj) { applyMap<GridType, typename GridType::ValueAllIter>("mapAll", grid, funcObj); } //////////////////////////////////////// template<typename GridType> struct TreeCombineOp { using TreeT = typename GridType::TreeType; using ValueT = typename GridType::ValueType; TreeCombineOp(py::object _op): op(_op) {} void operator()(const ValueT& a, const ValueT& b, ValueT& result) { py::object resultObj = op(a, b); py::extract<ValueT> val(resultObj); if (!val.check()) { PyErr_Format(PyExc_TypeError, "expected callable argument to %s.combine() to return %s, found %s", pyutil::GridTraits<GridType>::name(), openvdb::typeNameAsString<ValueT>(), pyutil::className(resultObj).c_str()); py::throw_error_already_set(); } result = val(); } py::object op; }; template<typename GridType> inline void combine(GridType& grid, py::object otherGridObj, py::object funcObj) { using GridPtr = typename GridType::Ptr; GridPtr otherGrid = extractValueArg<GridType, GridPtr>(otherGridObj, "combine", 1, pyutil::GridTraits<GridType>::name()); TreeCombineOp<GridType> op(funcObj); grid.tree().combine(otherGrid->tree(), op, /*prune=*/true); } //////////////////////////////////////// template<typename GridType> inline typename GridType::Ptr createLevelSetSphere(float radius, const openvdb::Vec3f& center, float voxelSize, float halfWidth) { return tools::createLevelSetSphere<GridType>(radius, center, voxelSize, halfWidth); } //////////////////////////////////////// template<typename GridT, typename IterT> class IterWrap; // forward declaration // // Type traits for various iterators // template<typename GridT, typename IterT> struct IterTraits { // IterT the type of the iterator // name() function returning the base name of the iterator type (e.g., "ValueOffIter") // descr() function returning a string describing the iterator // begin() function returning a begin iterator for a given grid }; template<typename GridT> struct IterTraits<GridT, typename GridT::ValueOnCIter> { using IterT = typename GridT::ValueOnCIter; static std::string name() { return "ValueOnCIter"; } static std::string descr() { return std::string("Read-only iterator over the active values (tile and voxel)\nof a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<const GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<const GridT, IterT>(g, g->cbeginValueOn()); } }; // IterTraits<ValueOnCIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueOffCIter> { using IterT = typename GridT::ValueOffCIter; static std::string name() { return "ValueOffCIter"; } static std::string descr() { return std::string("Read-only iterator over the inactive values (tile and voxel)\nof a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<const GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<const GridT, IterT>(g, g->cbeginValueOff()); } }; // IterTraits<ValueOffCIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueAllCIter> { using IterT = typename GridT::ValueAllCIter; static std::string name() { return "ValueAllCIter"; } static std::string descr() { return std::string("Read-only iterator over all tile and voxel values of a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<const GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<const GridT, IterT>(g, g->cbeginValueAll()); } }; // IterTraits<ValueAllCIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueOnIter> { using IterT = typename GridT::ValueOnIter; static std::string name() { return "ValueOnIter"; } static std::string descr() { return std::string("Read/write iterator over the active values (tile and voxel)\nof a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<GridT, IterT>(g, g->beginValueOn()); } }; // IterTraits<ValueOnIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueOffIter> { using IterT = typename GridT::ValueOffIter; static std::string name() { return "ValueOffIter"; } static std::string descr() { return std::string("Read/write iterator over the inactive values (tile and voxel)\nof a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<GridT, IterT>(g, g->beginValueOff()); } }; // IterTraits<ValueOffIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueAllIter> { using IterT = typename GridT::ValueAllIter; static std::string name() { return "ValueAllIter"; } static std::string descr() { return std::string("Read/write iterator over all tile and voxel values of a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<GridT, IterT>(g, g->beginValueAll()); } }; // IterTraits<ValueAllIter> //////////////////////////////////////// // Helper class to modify a grid through a non-const iterator template<typename GridT, typename IterT> struct IterItemSetter { using ValueT = typename GridT::ValueType; static void setValue(const IterT& iter, const ValueT& val) { iter.setValue(val); } static void setActive(const IterT& iter, bool on) { iter.setActiveState(on); } }; // Partial specialization for const iterators template<typename GridT, typename IterT> struct IterItemSetter<const GridT, IterT> { using ValueT = typename GridT::ValueType; static void setValue(const IterT&, const ValueT&) { PyErr_SetString(PyExc_AttributeError, "can't set attribute 'value'"); py::throw_error_already_set(); } static void setActive(const IterT&, bool /*on*/) { PyErr_SetString(PyExc_AttributeError, "can't set attribute 'active'"); py::throw_error_already_set(); } }; /// @brief Value returned by the next() method of a grid's value iterator /// @details This class allows both dictionary-style (e.g., items["depth"]) and /// attribute access (e.g., items.depth) to the items returned by an iterator. /// @todo Create a reusable base class for "named dicts" like this? template<typename _GridT, typename _IterT> class IterValueProxy { public: using GridT = _GridT; using IterT = _IterT; using ValueT = typename GridT::ValueType; using SetterT = IterItemSetter<GridT, IterT>; IterValueProxy(typename GridT::ConstPtr grid, const IterT& iter): mGrid(grid), mIter(iter) {} IterValueProxy copy() const { return *this; } typename GridT::ConstPtr parent() const { return mGrid; } ValueT getValue() const { return *mIter; } bool getActive() const { return mIter.isValueOn(); } Index getDepth() const { return mIter.getDepth(); } Coord getBBoxMin() const { return mIter.getBoundingBox().min(); } Coord getBBoxMax() const { return mIter.getBoundingBox().max(); } Index64 getVoxelCount() const { return mIter.getVoxelCount(); } void setValue(const ValueT& val) { SetterT::setValue(mIter, val); } void setActive(bool on) { SetterT::setActive(mIter, on); } /// Return this dictionary's keys as a list of C strings. static const char* const * keys() { static const char* const sKeys[] = { "value", "active", "depth", "min", "max", "count", nullptr }; return sKeys; } /// Return @c true if the given string is a valid key. static bool hasKey(const std::string& key) { for (int i = 0; keys()[i] != nullptr; ++i) { if (key == keys()[i]) return true; } return false; } /// Return this dictionary's keys as a Python list of Python strings. static py::list getKeys() { py::list keyList; for (int i = 0; keys()[i] != nullptr; ++i) keyList.append(keys()[i]); return keyList; } /// @brief Return the value for the given key. /// @throw KeyError if the key is invalid py::object getItem(py::object keyObj) const { py::extract<std::string> x(keyObj); if (x.check()) { const std::string key = x(); if (key == "value") return py::object(this->getValue()); else if (key == "active") return py::object(this->getActive()); else if (key == "depth") return py::object(this->getDepth()); else if (key == "min") return py::object(this->getBBoxMin()); else if (key == "max") return py::object(this->getBBoxMax()); else if (key == "count") return py::object(this->getVoxelCount()); } PyErr_SetObject(PyExc_KeyError, ("%s" % keyObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); return py::object(); } /// @brief Set the value for the given key. /// @throw KeyError if the key is invalid /// @throw AttributeError if the key refers to a read-only item void setItem(py::object keyObj, py::object valObj) { py::extract<std::string> x(keyObj); if (x.check()) { const std::string key = x(); if (key == "value") { this->setValue(py::extract<ValueT>(valObj)); return; } else if (key == "active") { this->setActive(py::extract<bool>(valObj)); return; } else if (this->hasKey(key)) { PyErr_SetObject(PyExc_AttributeError, ("can't set attribute '%s'" % keyObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); } } PyErr_SetObject(PyExc_KeyError, ("'%s'" % keyObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); } bool operator==(const IterValueProxy& other) const { return (other.getActive() == this->getActive() && other.getDepth() == this->getDepth() && math::isExactlyEqual(other.getValue(), this->getValue()) && other.getBBoxMin() == this->getBBoxMin() && other.getBBoxMax() == this->getBBoxMax() && other.getVoxelCount() == this->getVoxelCount()); } bool operator!=(const IterValueProxy& other) const { return !(*this == other); } /// Print this dictionary to a stream. std::ostream& put(std::ostream& os) const { // valuesAsStrings = ["%s: %s" % key, repr(this[key]) for key in this.keys()] py::list valuesAsStrings; for (int i = 0; this->keys()[i] != nullptr; ++i) { py::str key(this->keys()[i]), val(this->getItem(key).attr("__repr__")()); valuesAsStrings.append("'%s': %s" % py::make_tuple(key, val)); } // print ", ".join(valuesAsStrings) py::object joined = py::str(", ").attr("join")(valuesAsStrings); std::string s = py::extract<std::string>(joined); os << "{" << s << "}"; return os; } /// Return a string describing this dictionary. std::string info() const { std::ostringstream os; os << *this; return os.str(); } private: // To keep the iterator's grid from being deleted (leaving the iterator dangling), // store a shared pointer to the grid. const typename GridT::ConstPtr mGrid; const IterT mIter; // the iterator may not be incremented }; // class IterValueProxy template<typename GridT, typename IterT> inline std::ostream& operator<<(std::ostream& os, const IterValueProxy<GridT, IterT>& iv) { return iv.put(os); } //////////////////////////////////////// /// Wrapper for a grid's value iterator classes template<typename _GridT, typename _IterT> class IterWrap { public: using GridT = _GridT; using IterT = _IterT; using ValueT = typename GridT::ValueType; using IterValueProxyT = IterValueProxy<GridT, IterT>; using Traits = IterTraits<GridT, IterT>; IterWrap(typename GridT::ConstPtr grid, const IterT& iter): mGrid(grid), mIter(iter) {} typename GridT::ConstPtr parent() const { return mGrid; } /// Return an IterValueProxy for the current iterator position. IterValueProxyT next() { if (!mIter) { PyErr_SetString(PyExc_StopIteration, "no more values"); py::throw_error_already_set(); } IterValueProxyT result(mGrid, mIter); ++mIter; return result; } static py::object returnSelf(const py::object& obj) { return obj; } /// @brief Define a Python wrapper class for this C++ class and another for /// the IterValueProxy class returned by iterators of this type. static void wrap() { const std::string gridClassName = pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(), iterClassName = /*gridClassName +*/ Traits::name(), valueClassName = /*gridClassName +*/ "Value"; py::class_<IterWrap>( iterClassName.c_str(), /*docstring=*/Traits::descr().c_str(), /*ctor=*/py::no_init) // can only be instantiated from C++, not from Python .add_property("parent", &IterWrap::parent, ("the " + gridClassName + " over which to iterate").c_str()) .def("next", &IterWrap::next, ("next() -> " + valueClassName).c_str()) .def("__next__", &IterWrap::next, ("__next__() -> " + valueClassName).c_str()) .def("__iter__", &returnSelf); py::class_<IterValueProxyT>( valueClassName.c_str(), /*docstring=*/("Proxy for a tile or voxel value in a " + gridClassName).c_str(), /*ctor=*/py::no_init) // can only be instantiated from C++, not from Python .def("copy", &IterValueProxyT::copy, ("copy() -> " + valueClassName + "\n\n" "Return a shallow copy of this value, i.e., one that shares\n" "its data with the original.").c_str()) .add_property("parent", &IterValueProxyT::parent, ("the " + gridClassName + " to which this value belongs").c_str()) .def("__str__", &IterValueProxyT::info) .def("__repr__", &IterValueProxyT::info) .def("__eq__", &IterValueProxyT::operator==) .def("__ne__", &IterValueProxyT::operator!=) .add_property("value", &IterValueProxyT::getValue, &IterValueProxyT::setValue, "value of this tile or voxel") .add_property("active", &IterValueProxyT::getActive, &IterValueProxyT::setActive, "active state of this tile or voxel") .add_property("depth", &IterValueProxyT::getDepth, "tree depth at which this value is stored") .add_property("min", &IterValueProxyT::getBBoxMin, "lower bound of the axis-aligned bounding box of this tile or voxel") .add_property("max", &IterValueProxyT::getBBoxMax, "upper bound of the axis-aligned bounding box of this tile or voxel") .add_property("count", &IterValueProxyT::getVoxelCount, "number of voxels spanned by this value") .def("keys", &IterValueProxyT::getKeys, "keys() -> list\n\n" "Return a list of keys for this tile or voxel.") .staticmethod("keys") .def("__contains__", &IterValueProxyT::hasKey, "__contains__(key) -> bool\n\n" "Return True if the given key exists.") .staticmethod("__contains__") .def("__getitem__", &IterValueProxyT::getItem, "__getitem__(key) -> value\n\n" "Return the value of the item with the given key.") .def("__setitem__", &IterValueProxyT::getItem, "__setitem__(key, value)\n\n" "Set the value of the item with the given key."); } private: // To keep this iterator's grid from being deleted, leaving the iterator dangling, // store a shared pointer to the grid. const typename GridT::ConstPtr mGrid; IterT mIter; }; // class IterWrap //////////////////////////////////////// template<typename GridT> struct PickleSuite: public py::pickle_suite { using GridPtrT = typename GridT::Ptr; /// Return @c true, indicating that this pickler preserves a Grid's __dict__. static bool getstate_manages_dict() { return true; } /// Return a tuple representing the state of the given Grid. static py::tuple getstate(py::object gridObj) { py::tuple state; // Extract a Grid from the Python object. GridPtrT grid; py::extract<GridPtrT> x(gridObj); if (x.check()) grid = x(); if (grid) { // Serialize the Grid to a string. std::ostringstream ostr(std::ios_base::binary); { openvdb::io::Stream strm(ostr); strm.setGridStatsMetadataEnabled(false); strm.write(openvdb::GridPtrVec(1, grid)); } // Construct a state tuple comprising the Python object's __dict__ // and the serialized Grid. #if PY_MAJOR_VERSION >= 3 // Convert the byte string to a "bytes" sequence. const std::string s = ostr.str(); py::object bytesObj = pyutil::pyBorrow(PyBytes_FromStringAndSize(s.data(), s.size())); #else py::str bytesObj(ostr.str()); #endif state = py::make_tuple(gridObj.attr("__dict__"), bytesObj); } return state; } /// Restore the given Grid to a saved state. static void setstate(py::object gridObj, py::object stateObj) { GridPtrT grid; { py::extract<GridPtrT> x(gridObj); if (x.check()) grid = x(); } if (!grid) return; py::tuple state; { py::extract<py::tuple> x(stateObj); if (x.check()) state = x(); } bool badState = (py::len(state) != 2); if (!badState) { // Restore the object's __dict__. py::extract<py::dict> x(state[0]); if (x.check()) { py::dict d = py::extract<py::dict>(gridObj.attr("__dict__"))(); d.update(x()); } else { badState = true; } } std::string serialized; if (!badState) { // Extract the sequence containing the serialized Grid. py::object bytesObj = state[1]; #if PY_MAJOR_VERSION >= 3 badState = true; if (PyBytes_Check(bytesObj.ptr())) { // Convert the "bytes" sequence to a byte string. char* buf = nullptr; Py_ssize_t length = 0; if (-1 != PyBytes_AsStringAndSize(bytesObj.ptr(), &buf, &length)) { if (buf != nullptr && length > 0) { serialized.assign(buf, buf + length); badState = false; } } } #else py::extract<std::string> x(bytesObj); if (x.check()) serialized = x(); else badState = true; #endif } if (badState) { PyErr_SetObject(PyExc_ValueError, #if PY_MAJOR_VERSION >= 3 ("expected (dict, bytes) tuple in call to __setstate__; found %s" #else ("expected (dict, str) tuple in call to __setstate__; found %s" #endif % stateObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); } // Restore the internal state of the C++ object. GridPtrVecPtr grids; { std::istringstream istr(serialized, std::ios_base::binary); io::Stream strm(istr); grids = strm.getGrids(); // (note: file-level metadata is ignored) } if (grids && !grids->empty()) { if (GridPtrT savedGrid = gridPtrCast<GridT>((*grids)[0])) { grid->MetaMap::operator=(*savedGrid); ///< @todo add a Grid::setMetadata() method? grid->setTransform(savedGrid->transformPtr()); grid->setTree(savedGrid->treePtr()); } } } }; // struct PickleSuite //////////////////////////////////////// /// Create a Python wrapper for a particular template instantiation of Grid. template<typename GridType> inline void exportGrid() { using ValueT = typename GridType::ValueType; using GridPtr = typename GridType::Ptr; using Traits = pyutil::GridTraits<GridType>; using ValueOnCIterT = typename GridType::ValueOnCIter; using ValueOffCIterT = typename GridType::ValueOffCIter; using ValueAllCIterT = typename GridType::ValueAllCIter; using ValueOnIterT = typename GridType::ValueOnIter; using ValueOffIterT = typename GridType::ValueOffIter; using ValueAllIterT = typename GridType::ValueAllIter; math::Transform::Ptr (GridType::*getTransform)() = &GridType::transformPtr; const std::string pyGridTypeName = Traits::name(); const std::string defaultCtorDescr = "Initialize with a background value of " + pyutil::str(pyGrid::getZeroValue<GridType>()) + "."; // Define the Grid wrapper class and make it the current scope. { py::class_<GridType, /*HeldType=*/GridPtr> clss( /*classname=*/pyGridTypeName.c_str(), /*docstring=*/(Traits::descr()).c_str(), /*ctor=*/py::init<>(defaultCtorDescr.c_str()) ); py::scope gridClassScope = clss; clss.def(py::init<const ValueT&>(py::args("background"), "Initialize with the given background value.")) .def("copy", &pyGrid::copyGrid<GridType>, ("copy() -> " + pyGridTypeName + "\n\n" "Return a shallow copy of this grid, i.e., a grid\n" "that shares its voxel data with this grid.").c_str()) .def("deepCopy", &GridType::deepCopy, ("deepCopy() -> " + pyGridTypeName + "\n\n" "Return a deep copy of this grid.\n").c_str()) .def_pickle(pyGrid::PickleSuite<GridType>()) .def("sharesWith", &pyGrid::sharesWith<GridType>, ("sharesWith(" + pyGridTypeName + ") -> bool\n\n" "Return True if this grid shares its voxel data with the given grid.").c_str()) /// @todo Any way to set a docstring for a class property? .add_static_property("valueTypeName", &pyGrid::getValueType<GridType>) /// @todo docstring = "name of this grid's value type" .add_static_property("zeroValue", &pyGrid::getZeroValue<GridType>) /// @todo docstring = "zero, as expressed in this grid's value type" .add_static_property("oneValue", &pyGrid::getOneValue<GridType>) /// @todo docstring = "one, as expressed in this grid's value type" /// @todo Is Grid.typeName ever needed? //.add_static_property("typeName", &GridType::gridType) /// @todo docstring = to "name of this grid's type" .add_property("background", &pyGrid::getGridBackground<GridType>, &pyGrid::setGridBackground<GridType>, "value of this grid's background voxels") .add_property("name", &GridType::getName, &pyGrid::setGridName, "this grid's name") .add_property("creator", &GridType::getCreator, &pyGrid::setGridCreator, "description of this grid's creator") .add_property("transform", getTransform, &pyGrid::setGridTransform, "transform associated with this grid") .add_property("gridClass", &pyGrid::getGridClass, &pyGrid::setGridClass, "the class of volumetric data (level set, fog volume, etc.)\nstored in this grid") .add_property("vectorType", &pyGrid::getVecType, &pyGrid::setVecType, "how transforms are applied to values stored in this grid") .def("getAccessor", &pyGrid::getAccessor<GridType>, ("getAccessor() -> " + pyGridTypeName + "Accessor\n\n" "Return an accessor that provides random read and write access\n" "to this grid's voxels.").c_str()) .def("getConstAccessor", &pyGrid::getConstAccessor<GridType>, ("getConstAccessor() -> " + pyGridTypeName + "Accessor\n\n" "Return an accessor that provides random read-only access\n" "to this grid's voxels.").c_str()) // // Metadata // .add_property("metadata", &pyGrid::getAllMetadata, &pyGrid::replaceAllMetadata, "dict of this grid's metadata\n\n" "Setting this attribute replaces all of this grid's metadata,\n" "but mutating it in place has no effect on the grid, since\n" "the value of this attribute is a only a copy of the metadata.\n" "Use either indexing or updateMetadata() to mutate metadata in place.") .def("updateMetadata", &pyGrid::updateMetadata, "updateMetadata(dict)\n\n" "Add metadata to this grid, replacing any existing items\n" "having the same names as the new items.") .def("addStatsMetadata", &GridType::addStatsMetadata, "addStatsMetadata()\n\n" "Add metadata to this grid comprising the current values\n" "of statistics like the active voxel count and bounding box.\n" "(This metadata is not automatically kept up-to-date with\n" "changes to this grid.)") .def("getStatsMetadata", &pyGrid::getStatsMetadata, "getStatsMetadata() -> dict\n\n" "Return a (possibly empty) dict containing just the metadata\n" "that was added to this grid with addStatsMetadata().") .def("__getitem__", &pyGrid::getMetadata, "__getitem__(name) -> value\n\n" "Return the metadata value associated with the given name.") .def("__setitem__", &pyGrid::setMetadata, "__setitem__(name, value)\n\n" "Add metadata to this grid, replacing any existing item having\n" "the same name as the new item.") .def("__delitem__", &pyGrid::removeMetadata, "__delitem__(name)\n\n" "Remove the metadata with the given name.") .def("__contains__", &pyGrid::hasMetadata, "__contains__(name) -> bool\n\n" "Return True if this grid contains metadata with the given name.") .def("__iter__", &pyGrid::getMetadataKeys, "__iter__() -> iterator\n\n" "Return an iterator over this grid's metadata keys.") .def("iterkeys", &pyGrid::getMetadataKeys, "iterkeys() -> iterator\n\n" "Return an iterator over this grid's metadata keys.") .add_property("saveFloatAsHalf", &GridType::saveFloatAsHalf, &GridType::setSaveFloatAsHalf, "if True, write floating-point voxel values as 16-bit half floats") // // Statistics // .def("memUsage", &GridType::memUsage, "memUsage() -> int\n\n" "Return the memory usage of this grid in bytes.") .def("evalLeafBoundingBox", &pyGrid::evalLeafBoundingBox<GridType>, "evalLeafBoundingBox() -> xyzMin, xyzMax\n\n" "Return the coordinates of opposite corners of the axis-aligned\n" "bounding box of all leaf nodes.") .def("evalLeafDim", &pyGrid::evalLeafDim<GridType>, "evalLeafDim() -> x, y, z\n\n" "Return the dimensions of the axis-aligned bounding box\n" "of all leaf nodes.") .def("evalActiveVoxelBoundingBox", &pyGrid::evalActiveVoxelBoundingBox<GridType>, "evalActiveVoxelBoundingBox() -> xyzMin, xyzMax\n\n" "Return the coordinates of opposite corners of the axis-aligned\n" "bounding box of all active voxels.") .def("evalActiveVoxelDim", &GridType::evalActiveVoxelDim, "evalActiveVoxelDim() -> x, y, z\n\n" "Return the dimensions of the axis-aligned bounding box of all\n" "active voxels.") .add_property("treeDepth", &pyGrid::treeDepth<GridType>, "depth of this grid's tree from root node to leaf node") .def("nodeLog2Dims", &pyGrid::getNodeLog2Dims<GridType>, "list of Log2Dims of the nodes of this grid's tree\n" "in order from root to leaf") .def("leafCount", &pyGrid::leafCount<GridType>, "leafCount() -> int\n\n" "Return the number of leaf nodes in this grid's tree.") .def("nonLeafCount", &pyGrid::nonLeafCount<GridType>, "nonLeafCount() -> int\n\n" "Return the number of non-leaf nodes in this grid's tree.") .def("activeVoxelCount", &GridType::activeVoxelCount, "activeVoxelCount() -> int\n\n" "Return the number of active voxels in this grid.") .def("activeLeafVoxelCount", &pyGrid::activeLeafVoxelCount<GridType>, "activeLeafVoxelCount() -> int\n\n" "Return the number of active voxels that are stored\n" "in the leaf nodes of this grid's tree.") .def("evalMinMax", &pyGrid::evalMinMax<GridType>, "evalMinMax() -> min, max\n\n" "Return the minimum and maximum active values in this grid.") .def("getIndexRange", &pyGrid::getIndexRange<GridType>, "getIndexRange() -> min, max\n\n" "Return the minimum and maximum coordinates that are represented\n" "in this grid. These might include background voxels.") //.def("expand", &pyGrid::expandIndexRange<GridType>, // py::arg("xyz"), // "expand(xyz)\n\n" // "Expand this grid's index range to include the given coordinates.") .def("info", &pyGrid::gridInfo, py::arg("verbosity")=1, "info(verbosity=1) -> str\n\n" "Return a string containing information about this grid\n" "with a specified level of verbosity.\n") // // Tools // .def("fill", &pyGrid::fill<GridType>, (py::arg("min"), py::arg("max"), py::arg("value"), py::arg("active")=true), "fill(min, max, value, active=True)\n\n" "Set all voxels within a given axis-aligned box to\n" "a constant value (either active or inactive).") .def("signedFloodFill", &pyGrid::signedFloodFill<GridType>, "signedFloodFill()\n\n" "Propagate the sign from a narrow-band level set into inactive\n" "voxels and tiles.") .def("copyFromArray", &pyGrid::copyFromArray<GridType>, (py::arg("array"), py::arg("ijk")=Coord(0), py::arg("tolerance")=pyGrid::getZeroValue<GridType>()), ("copyFromArray(array, ijk=(0, 0, 0), tolerance=0)\n\n" "Populate this grid, starting at voxel (i, j, k), with values\nfrom a " + std::string(openvdb::VecTraits<ValueT>::IsVec ? "four" : "three") + "-dimensional array. Mark voxels as inactive\n" "if and only if their values are equal to this grid's\n" "background value within the given tolerance.").c_str()) .def("copyToArray", &pyGrid::copyToArray<GridType>, (py::arg("array"), py::arg("ijk")=Coord(0)), ("copyToArray(array, ijk=(0, 0, 0))\n\nPopulate a " + std::string(openvdb::VecTraits<ValueT>::IsVec ? "four" : "three") + "-dimensional array with values\n" "from this grid, starting at voxel (i, j, k).").c_str()) .def("convertToQuads", &pyGrid::volumeToQuadMesh<GridType>, (py::arg("isovalue")=0), "convertToQuads(isovalue=0) -> points, quads\n\n" "Uniformly mesh a scalar grid that has a continuous isosurface\n" "at the given isovalue. Return a NumPy array of world-space\n" "points and a NumPy array of 4-tuples of point indices, which\n" "specify the vertices of the quadrilaterals that form the mesh.") .def("convertToPolygons", &pyGrid::volumeToMesh<GridType>, (py::arg("isovalue")=0, py::arg("adaptivity")=0), "convertToPolygons(isovalue=0, adaptivity=0) -> points, triangles, quads\n\n" "Adaptively mesh a scalar grid that has a continuous isosurface\n" "at the given isovalue. Return a NumPy array of world-space\n" "points and NumPy arrays of 3- and 4-tuples of point indices,\n" "which specify the vertices of the triangles and quadrilaterals\n" "that form the mesh. Adaptivity can vary from 0 to 1, where 0\n" "produces a high-polygon-count mesh that closely approximates\n" "the isosurface, and 1 produces a lower-polygon-count mesh\n" "with some loss of surface detail.") .def("createLevelSetFromPolygons", &pyGrid::meshToLevelSet<GridType>, (py::arg("points"), py::arg("triangles")=py::object(), py::arg("quads")=py::object(), py::arg("transform")=py::object(), py::arg("halfWidth")=openvdb::LEVEL_SET_HALF_WIDTH), ("createLevelSetFromPolygons(points, triangles=None, quads=None,\n" " transform=None, halfWidth=" + std::to_string(openvdb::LEVEL_SET_HALF_WIDTH) + ") -> " + pyGridTypeName + "\n\n" "Convert a triangle and/or quad mesh to a narrow-band level set volume.\n" "The mesh must form a closed surface, but the surface need not be\n" "manifold and may have self intersections and degenerate faces.\n" "The mesh is described by a NumPy array of world-space points\n" "and NumPy arrays of 3- and 4-tuples of point indices that specify\n" "the vertices of the triangles and quadrilaterals that form the mesh.\n" "Either the triangle or the quad array may be empty or None.\n" "The resulting volume will have the given transform (or the identity\n" "transform if no transform is given) and a narrow band width of\n" "2 x halfWidth voxels.").c_str()) .staticmethod("createLevelSetFromPolygons") .def("prune", &pyGrid::prune<GridType>, (py::arg("tolerance")=0), "prune(tolerance=0)\n\n" "Remove nodes whose values all have the same active state\n" "and are equal to within a given tolerance.") .def("pruneInactive", &pyGrid::pruneInactive<GridType>, (py::arg("value")=py::object()), "pruneInactive(value=None)\n\n" "Remove nodes whose values are all inactive and replace them\n" "with either background tiles or tiles of the given value\n" "(if the value is not None).") .def("empty", &GridType::empty, "empty() -> bool\n\n" "Return True if this grid contains only background voxels.") .def("__nonzero__", &pyGrid::notEmpty<GridType>) .def("clear", &GridType::clear, "clear()\n\n" "Remove all tiles from this grid and all nodes other than the root node.") .def("merge", &GridType::merge, ("merge(" + pyGridTypeName + ")\n\n" "Move child nodes from the other grid into this grid wherever\n" "those nodes correspond to constant-value tiles in this grid,\n" "and replace leaf-level inactive voxels in this grid with\n" "corresponding voxels in the other grid that are active.\n\n" "Note: this operation always empties the other grid.").c_str()) .def("mapOn", &pyGrid::mapOn<GridType>, py::arg("function"), "mapOn(function)\n\n" "Iterate over all the active (\"on\") values (tile and voxel)\n" "of this grid and replace each value with function(value).\n\n" "Example: grid.mapOn(lambda x: x * 2 if x < 0.5 else x)") .def("mapOff", &pyGrid::mapOff<GridType>, py::arg("function"), "mapOff(function)\n\n" "Iterate over all the inactive (\"off\") values (tile and voxel)\n" "of this grid and replace each value with function(value).\n\n" "Example: grid.mapOff(lambda x: x * 2 if x < 0.5 else x)") .def("mapAll", &pyGrid::mapAll<GridType>, py::arg("function"), "mapAll(function)\n\n" "Iterate over all values (tile and voxel) of this grid\n" "and replace each value with function(value).\n\n" "Example: grid.mapAll(lambda x: x * 2 if x < 0.5 else x)") .def("combine", &pyGrid::combine<GridType>, (py::arg("grid"), py::arg("function")), "combine(grid, function)\n\n" "Compute function(self, other) over all corresponding pairs\n" "of values (tile or voxel) of this grid and the other grid\n" "and store the result in this grid.\n\n" "Note: this operation always empties the other grid.\n\n" "Example: grid.combine(otherGrid, lambda a, b: min(a, b))") // // Iterators // .def("citerOnValues", &pyGrid::IterTraits<GridType, ValueOnCIterT>::begin, "citerOnValues() -> iterator\n\n" "Return a read-only iterator over this grid's active\ntile and voxel values.") .def("citerOffValues", &pyGrid::IterTraits<GridType, ValueOffCIterT>::begin, "iterOffValues() -> iterator\n\n" "Return a read-only iterator over this grid's inactive\ntile and voxel values.") .def("citerAllValues", &pyGrid::IterTraits<GridType, ValueAllCIterT>::begin, "iterAllValues() -> iterator\n\n" "Return a read-only iterator over all of this grid's\ntile and voxel values.") .def("iterOnValues", &pyGrid::IterTraits<GridType, ValueOnIterT>::begin, "iterOnValues() -> iterator\n\n" "Return a read/write iterator over this grid's active\ntile and voxel values.") .def("iterOffValues", &pyGrid::IterTraits<GridType, ValueOffIterT>::begin, "iterOffValues() -> iterator\n\n" "Return a read/write iterator over this grid's inactive\ntile and voxel values.") .def("iterAllValues", &pyGrid::IterTraits<GridType, ValueAllIterT>::begin, "iterAllValues() -> iterator\n\n" "Return a read/write iterator over all of this grid's\ntile and voxel values.") ; // py::class_<Grid> // Register the GridPtr-to-Python object converter explicitly // if it is not already implicitly registered. try { py::object testObj{GridPtr()}; } catch (py::error_already_set&) { PyErr_Clear(); py::register_ptr_to_python<GridPtr>(); } py::implicitly_convertible<GridPtr, GridBase::Ptr>(); py::implicitly_convertible<GridPtr, GridBase::ConstPtr>(); /// @todo Is there a way to implicitly convert GridType references to GridBase /// references without wrapping the GridBase class? The following doesn't compile, /// because GridBase has pure virtual functions: /// @code /// py::implicitly_convertible<GridType&, GridBase&>(); /// @endcode // Wrap const and non-const value accessors and expose them // as nested classes of the Grid class. pyAccessor::AccessorWrap<const GridType>::wrap(); pyAccessor::AccessorWrap<GridType>::wrap(); // Wrap tree value iterators and expose them as nested classes of the Grid class. IterWrap<const GridType, ValueOnCIterT>::wrap(); IterWrap<const GridType, ValueOffCIterT>::wrap(); IterWrap<const GridType, ValueAllCIterT>::wrap(); IterWrap<GridType, ValueOnIterT>::wrap(); IterWrap<GridType, ValueOffIterT>::wrap(); IterWrap<GridType, ValueAllIterT>::wrap(); } // gridClassScope // Add the Python type object for this grid type to the module-level list. py::extract<py::list>(py::scope().attr("GridTypes"))().append( py::scope().attr(pyGridTypeName.c_str())); } } // namespace pyGrid #endif // OPENVDB_PYGRID_HAS_BEEN_INCLUDED
93,311
C
35.549941
100
0.614247
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/test/TestOpenVDB.py
#!/usr/local/bin/python # Copyright Contributors to the OpenVDB Project # SPDX-License-Identifier: MPL-2.0 """ Unit tests for the OpenVDB Python module These are intended primarily to test the Python-to-C++ and C++-to-Python bindings, not the OpenVDB library itself. """ import os, os.path import sys import unittest try: from studio import openvdb except ImportError: import pyopenvdb as openvdb def valueFactory(zeroValue, elemValue): """ Return elemValue converted to a value of the same type as zeroValue. If zeroValue is a sequence, return a sequence of the same type and length, with each element set to elemValue. """ val = zeroValue typ = type(val) try: # If the type is a sequence type, return a sequence of the appropriate length. size = len(val) val = typ([elemValue]) * size except TypeError: # Return a scalar value of the appropriate type. val = typ(elemValue) return val class TestOpenVDB(unittest.TestCase): def run(self, result=None, *args, **kwargs): super(TestOpenVDB, self).run(result, *args, **kwargs) def setUp(self): # Make output files and directories world-writable. self.umask = os.umask(0) def tearDown(self): os.umask(self.umask) def testModule(self): # At a minimum, BoolGrid, FloatGrid and Vec3SGrid should exist. self.assertTrue(openvdb.BoolGrid in openvdb.GridTypes) self.assertTrue(openvdb.FloatGrid in openvdb.GridTypes) self.assertTrue(openvdb.Vec3SGrid in openvdb.GridTypes) # Verify that it is possible to construct a grid of each supported type. for cls in openvdb.GridTypes: grid = cls() acc = grid.getAccessor() acc.setValueOn((-1, -2, 3)) self.assertEqual(grid.activeVoxelCount(), 1) def testTransform(self): xform1 = openvdb.createLinearTransform( [[.5, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [1, 2, 3, 1]]) self.assertTrue(xform1.typeName != '') self.assertEqual(xform1.indexToWorld((1, 1, 1)), (1.5, 3, 5)) xform2 = xform1 self.assertEqual(xform2, xform1) xform2 = xform1.deepCopy() self.assertEqual(xform2, xform1) xform2 = openvdb.createFrustumTransform(taper=0.5, depth=100, xyzMin=(0, 0, 0), xyzMax=(100, 100, 100), voxelSize=0.25) self.assertNotEqual(xform2, xform1) worldp = xform2.indexToWorld((10, 10, 10)) worldp = [int(round(x * 1000000)) for x in worldp] self.assertEqual(worldp, [-110000, -110000, 2500000]) grid = openvdb.FloatGrid() self.assertEqual(grid.transform, openvdb.createLinearTransform()) grid.transform = openvdb.createLinearTransform(2.0) self.assertEqual(grid.transform, openvdb.createLinearTransform(2.0)) def testGridCopy(self): grid = openvdb.FloatGrid() self.assertTrue(grid.sharesWith(grid)) self.assertFalse(grid.sharesWith([])) # wrong type; Grid expected copyOfGrid = grid.copy() self.assertTrue(copyOfGrid.sharesWith(grid)) deepCopyOfGrid = grid.deepCopy() self.assertFalse(deepCopyOfGrid.sharesWith(grid)) self.assertFalse(deepCopyOfGrid.sharesWith(copyOfGrid)) def testGridProperties(self): expected = { openvdb.BoolGrid: ('bool', False, True), openvdb.FloatGrid: ('float', 0.0, 1.0), openvdb.Vec3SGrid: ('vec3s', (0, 0, 0), (-1, 0, 1)), } for factory in expected: valType, bg, newbg = expected[factory] grid = factory() self.assertEqual(grid.valueTypeName, valType) def setValueType(obj): obj.valueTypeName = 'double' # Grid.valueTypeName is read-only, so setting it raises an exception. self.assertRaises(AttributeError, lambda obj=grid: setValueType(obj)) self.assertEqual(grid.background, bg) grid.background = newbg self.assertEqual(grid.background, newbg) self.assertEqual(grid.name, '') grid.name = 'test' self.assertEqual(grid.name, 'test') self.assertFalse(grid.saveFloatAsHalf) grid.saveFloatAsHalf = True self.assertTrue(grid.saveFloatAsHalf) self.assertTrue(grid.treeDepth > 2) def testGridMetadata(self): grid = openvdb.BoolGrid() self.assertEqual(grid.metadata, {}) meta = { 'name': 'test', 'xyz': (-1, 0, 1), 'xyzw': (1.0, 2.25, 3.5, 4.0), 'intval': 42, 'floatval': 1.25, 'mat4val': [[1]*4]*4, 'saveFloatAsHalf': True, } grid.metadata = meta self.assertEqual(grid.metadata, meta) meta['xyz'] = (-100, 100, 0) grid.updateMetadata(meta) self.assertEqual(grid.metadata, meta) self.assertEqual(set(grid.iterkeys()), set(meta.keys())) for name in meta: self.assertTrue(name in grid) self.assertEqual(grid[name], meta[name]) self.assertEqual(type(grid[name]), type(meta[name])) for name in grid: self.assertTrue(name in grid) self.assertEqual(grid[name], meta[name]) self.assertEqual(type(grid[name]), type(meta[name])) self.assertTrue('xyz' in grid) del grid['xyz'] self.assertFalse('xyz' in grid) grid['xyz'] = meta['xyz'] self.assertTrue('xyz' in grid) grid.addStatsMetadata() meta = grid.getStatsMetadata() self.assertEqual(0, meta["file_voxel_count"]) def testGridFill(self): grid = openvdb.FloatGrid() acc = grid.getAccessor() ijk = (1, 1, 1) self.assertRaises(TypeError, lambda: grid.fill("", (7, 7, 7), 1, False)) self.assertRaises(TypeError, lambda: grid.fill((0, 0, 0), "", 1, False)) self.assertRaises(TypeError, lambda: grid.fill((0, 0, 0), (7, 7, 7), "", False)) self.assertFalse(acc.isValueOn(ijk)) grid.fill((0, 0, 0), (7, 7, 7), 1, active=False) self.assertEqual(acc.getValue(ijk), 1) self.assertFalse(acc.isValueOn(ijk)) grid.fill((0, 0, 0), (7, 7, 7), 2, active=True) self.assertEqual(acc.getValue(ijk), 2) self.assertTrue(acc.isValueOn(ijk)) activeCount = grid.activeVoxelCount() acc.setValueOn(ijk, 2.125) self.assertEqual(grid.activeVoxelCount(), activeCount) grid.fill(ijk, ijk, 2.125, active=True) self.assertEqual(acc.getValue(ijk), 2.125) self.assertTrue(acc.isValueOn(ijk)) self.assertEqual(grid.activeVoxelCount(), activeCount) leafCount = grid.leafCount() grid.prune() self.assertAlmostEqual(acc.getValue(ijk), 2.125) self.assertTrue(acc.isValueOn(ijk)) self.assertEqual(grid.leafCount(), leafCount) self.assertEqual(grid.activeVoxelCount(), activeCount) grid.prune(tolerance=0.2) self.assertEqual(grid.activeVoxelCount(), activeCount) self.assertEqual(acc.getValue(ijk), 2.0) # median self.assertTrue(acc.isValueOn(ijk)) self.assertTrue(grid.leafCount() < leafCount) def testGridIterators(self): onCoords = set([(-10, -10, -10), (0, 0, 0), (1, 1, 1)]) for factory in openvdb.GridTypes: grid = factory() acc = grid.getAccessor() for c in onCoords: acc.setValueOn(c) coords = set(value.min for value in grid.iterOnValues()) self.assertEqual(coords, onCoords) n = 0 for _ in grid.iterAllValues(): n += 1 for _ in grid.iterOffValues(): n -= 1 self.assertEqual(n, len(onCoords)) grid = factory() grid.fill((0, 0, 1), (18, 18, 18), grid.oneValue) # make active activeCount = grid.activeVoxelCount() # Iterate over active values (via a const iterator) and verify # that the cumulative active voxel count matches the grid's. count = 0 for value in grid.citerOnValues(): count += value.count self.assertEqual(count, activeCount) # Via a non-const iterator, turn off every other active value. # Then verify that the cumulative active voxel count is half the original count. state = True for value in grid.iterOnValues(): count -= value.count value.active = state state = not state self.assertEqual(grid.activeVoxelCount(), activeCount / 2) # Verify that writing through a const iterator is not allowed. value = grid.citerOnValues().next() self.assertRaises(AttributeError, lambda: setattr(value, 'active', 0)) self.assertRaises(AttributeError, lambda: setattr(value, 'depth', 0)) # Verify that some value attributes are immutable, even given a non-const iterator. value = grid.iterOnValues().next() self.assertRaises(AttributeError, lambda: setattr(value, 'min', (0, 0, 0))) self.assertRaises(AttributeError, lambda: setattr(value, 'max', (0, 0, 0))) self.assertRaises(AttributeError, lambda: setattr(value, 'count', 1)) def testMap(self): grid = openvdb.BoolGrid() grid.fill((-4, -4, -4), (5, 5, 5), grid.zeroValue) # make active grid.mapOn(lambda x: not x) # replace active False values with True n = sum(item.value for item in grid.iterOnValues()) self.assertEqual(n, 10 * 10 * 10) grid = openvdb.FloatGrid() grid.fill((-4, -4, -4), (5, 5, 5), grid.oneValue) grid.mapOn(lambda x: x * 2) n = sum(item.value for item in grid.iterOnValues()) self.assertEqual(n, 10 * 10 * 10 * 2) grid = openvdb.Vec3SGrid() grid.fill((-4, -4, -4), (5, 5, 5), grid.zeroValue) grid.mapOn(lambda x: (0, 1, 0)) n = sum(item.value[1] for item in grid.iterOnValues()) self.assertEqual(n, 10 * 10 * 10) def testValueAccessor(self): coords = set([(-10, -10, -10), (0, 0, 0), (1, 1, 1)]) for factory in openvdb.GridTypes: # skip value accessor tests for PointDataGrids (value setting methods are disabled) if factory.valueTypeName.startswith('ptdataidx'): continue grid = factory() zero, one = grid.zeroValue, grid.oneValue acc = grid.getAccessor() cacc = grid.getConstAccessor() leafDepth = grid.treeDepth - 1 self.assertRaises(TypeError, lambda: cacc.setValueOn((5, 5, 5), zero)) self.assertRaises(TypeError, lambda: cacc.setValueOff((5, 5, 5), zero)) self.assertRaises(TypeError, lambda: cacc.setActiveState((5, 5, 5), True)) self.assertRaises(TypeError, lambda: acc.setValueOn("", zero)) self.assertRaises(TypeError, lambda: acc.setValueOff("", zero)) if grid.valueTypeName != "bool": self.assertRaises(TypeError, lambda: acc.setValueOn((5, 5, 5), object())) self.assertRaises(TypeError, lambda: acc.setValueOff((5, 5, 5), object())) for c in coords: grid.clear() # All voxels are inactive, background (0), and stored at the root. self.assertEqual(acc.getValue(c), zero) self.assertEqual(cacc.getValue(c), zero) self.assertFalse(acc.isValueOn(c)) self.assertFalse(cacc.isValueOn(c)) self.assertEqual(acc.getValueDepth(c), -1) self.assertEqual(cacc.getValueDepth(c), -1) acc.setValueOn(c) # active / 0 / leaf self.assertEqual(acc.getValue(c), zero) self.assertEqual(cacc.getValue(c), zero) self.assertTrue(acc.isValueOn(c)) self.assertTrue(cacc.isValueOn(c)) self.assertEqual(acc.getValueDepth(c), leafDepth) self.assertEqual(cacc.getValueDepth(c), leafDepth) acc.setValueOff(c, grid.oneValue) # inactive / 1 / leaf self.assertEqual(acc.getValue(c), one) self.assertEqual(cacc.getValue(c), one) self.assertFalse(acc.isValueOn(c)) self.assertFalse(cacc.isValueOn(c)) self.assertEqual(acc.getValueDepth(c), leafDepth) self.assertEqual(cacc.getValueDepth(c), leafDepth) # Verify that an accessor remains valid even after its grid is deleted # (because the C++ wrapper retains a reference to the C++ grid). def scoped(): grid = factory() acc = grid.getAccessor() cacc = grid.getConstAccessor() one = grid.oneValue acc.setValueOn((0, 0, 0), one) del grid self.assertEqual(acc.getValue((0, 0, 0)), one) self.assertEqual(cacc.getValue((0, 0, 0)), one) scoped() def testValueAccessorCopy(self): xyz = (0, 0, 0) grid = openvdb.BoolGrid() acc = grid.getAccessor() self.assertEqual(acc.getValue(xyz), False) self.assertFalse(acc.isValueOn(xyz)) copyOfAcc = acc.copy() self.assertEqual(copyOfAcc.getValue(xyz), False) self.assertFalse(copyOfAcc.isValueOn(xyz)) # Verify that changes made to the grid through one accessor are reflected in the other. acc.setValueOn(xyz, True) self.assertEqual(acc.getValue(xyz), True) self.assertTrue(acc.isValueOn(xyz)) self.assertEqual(copyOfAcc.getValue(xyz), True) self.assertTrue(copyOfAcc.isValueOn(xyz)) copyOfAcc.setValueOff(xyz) self.assertEqual(acc.getValue(xyz), True) self.assertFalse(acc.isValueOn(xyz)) self.assertEqual(copyOfAcc.getValue(xyz), True) self.assertFalse(copyOfAcc.isValueOn(xyz)) # Verify that the two accessors are distinct, by checking that they # have cached different sets of nodes. xyz2 = (-1, -1, -1) copyOfAcc.setValueOn(xyz2) self.assertTrue(copyOfAcc.isCached(xyz2)) self.assertFalse(copyOfAcc.isCached(xyz)) self.assertTrue(acc.isCached(xyz)) self.assertFalse(acc.isCached(xyz2)) def testPickle(self): import pickle # Test pickling of transforms of various types. testXforms = [ openvdb.createLinearTransform(voxelSize=0.1), openvdb.createLinearTransform(matrix=[[1,0,0,0],[0,2,0,0],[0,0,3,0],[4,3,2,1]]), openvdb.createFrustumTransform((0,0,0), (10,10,10), taper=0.8, depth=10.0), ] for xform in testXforms: s = pickle.dumps(xform) restoredXform = pickle.loads(s) self.assertEqual(restoredXform, xform) # Test pickling of grids of various types. for factory in openvdb.GridTypes: # Construct a grid. grid = factory() # Add some metadata to the grid. meta = { 'name': 'test', 'saveFloatAsHalf': True, 'xyz': (-1, 0, 1) } grid.metadata = meta # Add some voxel data to the grid. active = True for width in range(63, 0, -10): val = valueFactory(grid.zeroValue, width) grid.fill((0, 0, 0), (width,)*3, val, active) active = not active # Pickle the grid to a string, then unpickle the string. s = pickle.dumps(grid) restoredGrid = pickle.loads(s) # Verify that the original and unpickled grids' metadata are equal. self.assertEqual(restoredGrid.metadata, meta) # Verify that the original and unpickled grids have the same active values. for restored, original in zip(restoredGrid.iterOnValues(), grid.iterOnValues()): self.assertEqual(restored, original) # Verify that the original and unpickled grids have the same inactive values. for restored, original in zip(restoredGrid.iterOffValues(), grid.iterOffValues()): self.assertEqual(restored, original) def testGridCombine(self): # Construct two grids and add some voxel data to each. aGrid, bGrid = openvdb.FloatGrid(), openvdb.FloatGrid(background=1.0) for width in range(63, 1, -10): aGrid.fill((0, 0, 0), (width,)*3, width) bGrid.fill((0, 0, 0), (width,)*3, 2 * width) # Save a copy of grid A. copyOfAGrid = aGrid.deepCopy() # Combine corresponding values of the two grids, storing the result in grid A. # (Since the grids have the same topology and B's active values are twice A's, # the function computes 2*min(a, 2*a) + 3*max(a, 2*a) = 2*a + 3*(2*a) = 8*a # for active values, and 2*min(0, 1) + 3*max(0, 1) = 2*0 + 3*1 = 3 # for inactive values.) aGrid.combine(bGrid, lambda a, b: 2 * min(a, b) + 3 * max(a, b)) self.assertTrue(bGrid.empty()) # Verify that the resulting grid's values are as expected. for original, combined in zip(copyOfAGrid.iterOnValues(), aGrid.iterOnValues()): self.assertEqual(combined.min, original.min) self.assertEqual(combined.max, original.max) self.assertEqual(combined.depth, original.depth) self.assertEqual(combined.value, 8 * original.value) for original, combined in zip(copyOfAGrid.iterOffValues(), aGrid.iterOffValues()): self.assertEqual(combined.min, original.min) self.assertEqual(combined.max, original.max) self.assertEqual(combined.depth, original.depth) self.assertEqual(combined.value, 3) def testLevelSetSphere(self): HALF_WIDTH = 4 sphere = openvdb.createLevelSetSphere(halfWidth=HALF_WIDTH, voxelSize=1.0, radius=100.0) lo, hi = sphere.evalMinMax() self.assertTrue(lo >= -HALF_WIDTH) self.assertTrue(hi <= HALF_WIDTH) def testCopyFromArray(self): import random import time # Skip this test if NumPy is not available. try: import numpy as np except ImportError: return # Skip this test if the OpenVDB module was built without NumPy support. arr = np.zeros((1, 2, 1)) grid = openvdb.FloatGrid() try: grid.copyFromArray(arr) except NotImplementedError: return # Verify that a non-three-dimensional array can't be copied into a grid. grid = openvdb.FloatGrid() self.assertRaises(TypeError, lambda: grid.copyFromArray('abc')) arr = np.zeros((1, 2)) self.assertRaises(ValueError, lambda: grid.copyFromArray(arr)) # Verify that complex-valued arrays are not supported. arr = np.zeros((1, 2, 1), dtype = complex) grid = openvdb.FloatGrid() self.assertRaises(TypeError, lambda: grid.copyFromArray(arr)) ARRAY_DIM = 201 BG, FG = 0, 1 # Generate some random voxel coordinates. random.seed(0) def randCoord(): return tuple(random.randint(0, ARRAY_DIM-1) for i in range(3)) coords = set(randCoord() for i in range(200)) def createArrays(): # Test both scalar- and vec3-valued (i.e., four-dimensional) arrays. for shape in ( (ARRAY_DIM, ARRAY_DIM, ARRAY_DIM), # scalar array (ARRAY_DIM, ARRAY_DIM, ARRAY_DIM, 3) # vec3 array ): for dtype in (np.float32, np.int32, np.float64, np.int64, np.uint32, np.bool): # Create a NumPy array, fill it with the background value, # then set some elements to the foreground value. arr = np.ndarray(shape, dtype) arr.fill(BG) bg = arr[0, 0, 0] for c in coords: arr[c] = FG yield arr # Test copying from arrays of various types to grids of various types. for cls in openvdb.GridTypes: # skip copying test for PointDataGrids if cls.valueTypeName.startswith('ptdataidx'): continue for arr in createArrays(): isScalarArray = (len(arr.shape) == 3) isScalarGrid = False try: len(cls.zeroValue) # values of vector grids are sequences, which have a length except TypeError: isScalarGrid = True # values of scalar grids have no length gridBG = valueFactory(cls.zeroValue, BG) gridFG = valueFactory(cls.zeroValue, FG) # Create an empty grid. grid = cls(gridBG) acc = grid.getAccessor() # Verify that scalar arrays can't be copied into vector grids # and vector arrays can't be copied into scalar grids. if isScalarGrid != isScalarArray: self.assertRaises(ValueError, lambda: grid.copyFromArray(arr)) continue # Copy values from the NumPy array to the grid, marking # background values as inactive and all other values as active. now = time.clock() grid.copyFromArray(arr) elapsed = time.clock() - now #print 'copied %d voxels from %s array to %s in %f sec' % ( # arr.shape[0] * arr.shape[1] * arr.shape[2], # str(arr.dtype) + ('' if isScalarArray else '[]'), # grid.__class__.__name__, elapsed) # Verify that the grid's active voxels match the array's foreground elements. self.assertEqual(grid.activeVoxelCount(), len(coords)) for c in coords: self.assertEqual(acc.getValue(c), gridFG) for value in grid.iterOnValues(): self.assertTrue(value.min in coords) def testCopyToArray(self): import random import time # Skip this test if NumPy is not available. try: import numpy as np except ImportError: return # Skip this test if the OpenVDB module was built without NumPy support. arr = np.zeros((1, 2, 1)) grid = openvdb.FloatGrid() try: grid.copyFromArray(arr) except NotImplementedError: return # Verify that a grid can't be copied into a non-three-dimensional array. grid = openvdb.FloatGrid() self.assertRaises(TypeError, lambda: grid.copyToArray('abc')) arr = np.zeros((1, 2)) self.assertRaises(ValueError, lambda: grid.copyToArray(arr)) # Verify that complex-valued arrays are not supported. arr = np.zeros((1, 2, 1), dtype = complex) grid = openvdb.FloatGrid() self.assertRaises(TypeError, lambda: grid.copyToArray(arr)) ARRAY_DIM = 201 BG, FG = 0, 1 # Generate some random voxel coordinates. random.seed(0) def randCoord(): return tuple(random.randint(0, ARRAY_DIM-1) for i in range(3)) coords = set(randCoord() for i in range(200)) def createArrays(): # Test both scalar- and vec3-valued (i.e., four-dimensional) arrays. for shape in ( (ARRAY_DIM, ARRAY_DIM, ARRAY_DIM), # scalar array (ARRAY_DIM, ARRAY_DIM, ARRAY_DIM, 3) # vec3 array ): for dtype in (np.float32, np.int32, np.float64, np.int64, np.uint32, np.bool): # Return a new NumPy array. arr = np.ndarray(shape, dtype) arr.fill(-100) yield arr # Test copying from arrays of various types to grids of various types. for cls in openvdb.GridTypes: # skip copying test for PointDataGrids if cls.valueTypeName.startswith('ptdataidx'): continue for arr in createArrays(): isScalarArray = (len(arr.shape) == 3) isScalarGrid = False try: len(cls.zeroValue) # values of vector grids are sequences, which have a length except TypeError: isScalarGrid = True # values of scalar grids have no length gridBG = valueFactory(cls.zeroValue, BG) gridFG = valueFactory(cls.zeroValue, FG) # Create an empty grid, fill it with the background value, # then set some elements to the foreground value. grid = cls(gridBG) acc = grid.getAccessor() for c in coords: acc.setValueOn(c, gridFG) # Verify that scalar grids can't be copied into vector arrays # and vector grids can't be copied into scalar arrays. if isScalarGrid != isScalarArray: self.assertRaises(ValueError, lambda: grid.copyToArray(arr)) continue # Copy values from the grid to the NumPy array. now = time.clock() grid.copyToArray(arr) elapsed = time.clock() - now #print 'copied %d voxels from %s to %s array in %f sec' % ( # arr.shape[0] * arr.shape[1] * arr.shape[2], grid.__class__.__name__, # str(arr.dtype) + ('' if isScalarArray else '[]'), elapsed) # Verify that the grid's active voxels match the array's foreground elements. for c in coords: self.assertEqual(arr[c] if isScalarArray else tuple(arr[c]), gridFG) arr[c] = gridBG self.assertEqual(np.amin(arr), BG) self.assertEqual(np.amax(arr), BG) def testMeshConversion(self): import time # Skip this test if NumPy is not available. try: import numpy as np except ImportError: return # Test mesh to volume conversion. # Generate the vertices of a cube. cubeVertices = [(x, y, z) for x in (0, 100) for y in (0, 100) for z in (0, 100)] cubePoints = np.array(cubeVertices, float) # Generate the faces of a cube. cubeQuads = np.array([ (0, 1, 3, 2), # left (0, 2, 6, 4), # front (4, 6, 7, 5), # right (5, 7, 3, 1), # back (2, 3, 7, 6), # top (0, 4, 5, 1), # bottom ], float) voxelSize = 2.0 halfWidth = 3.0 xform = openvdb.createLinearTransform(voxelSize) # Only scalar, floating-point grids support createLevelSetFromPolygons() # (and the OpenVDB module might have been compiled without DoubleGrid support). grids = [] for gridType in [n for n in openvdb.GridTypes if n.__name__ in ('FloatGrid', 'DoubleGrid')]: # Skip this test if the OpenVDB module was built without NumPy support. try: grid = gridType.createLevelSetFromPolygons( cubePoints, quads=cubeQuads, transform=xform, halfWidth=halfWidth) except NotImplementedError: return #openvdb.write('/tmp/testMeshConversion.vdb', grid) self.assertEqual(grid.transform, xform) self.assertEqual(grid.background, halfWidth * voxelSize) dim = grid.evalActiveVoxelDim() self.assertTrue(50 < dim[0] < 58) self.assertTrue(50 < dim[1] < 58) self.assertTrue(50 < dim[2] < 58) grids.append(grid) # Boolean-valued grids can't be used to store level sets. self.assertRaises(TypeError, lambda: openvdb.BoolGrid.createLevelSetFromPolygons( cubePoints, quads=cubeQuads, transform=xform, halfWidth=halfWidth)) # Vector-valued grids can't be used to store level sets. self.assertRaises(TypeError, lambda: openvdb.Vec3SGrid.createLevelSetFromPolygons( cubePoints, quads=cubeQuads, transform=xform, halfWidth=halfWidth)) # The "points" argument to createLevelSetFromPolygons() must be a NumPy array. self.assertRaises(TypeError, lambda: openvdb.FloatGrid.createLevelSetFromPolygons( cubeVertices, quads=cubeQuads, transform=xform, halfWidth=halfWidth)) # The "points" argument to createLevelSetFromPolygons() must be a NumPy float or int array. self.assertRaises(TypeError, lambda: openvdb.FloatGrid.createLevelSetFromPolygons( np.array(cubeVertices, bool), quads=cubeQuads, transform=xform, halfWidth=halfWidth)) # The "triangles" argument to createLevelSetFromPolygons() must be an N x 3 NumPy array. self.assertRaises(TypeError, lambda: openvdb.FloatGrid.createLevelSetFromPolygons( cubePoints, triangles=cubeQuads, transform=xform, halfWidth=halfWidth)) # Test volume to mesh conversion. # Vector-valued grids can't be meshed. self.assertRaises(TypeError, lambda: openvdb.Vec3SGrid().convertToQuads()) for grid in grids: points, quads = grid.convertToQuads() # These checks are intended mainly to test the Python/C++ bindings, # not the OpenVDB volume to mesh converter. self.assertTrue(len(points) > 8) self.assertTrue(len(quads) > 6) pmin, pmax = points.min(0), points.max(0) self.assertTrue(-2 < pmin[0] < 2) self.assertTrue(-2 < pmin[1] < 2) self.assertTrue(-2 < pmin[2] < 2) self.assertTrue(98 < pmax[0] < 102) self.assertTrue(98 < pmax[1] < 102) self.assertTrue(98 < pmax[2] < 102) points, triangles, quads = grid.convertToPolygons(adaptivity=1) self.assertTrue(len(points) > 8) pmin, pmax = points.min(0), points.max(0) self.assertTrue(-2 < pmin[0] < 2) self.assertTrue(-2 < pmin[1] < 2) self.assertTrue(-2 < pmin[2] < 2) self.assertTrue(98 < pmax[0] < 102) self.assertTrue(98 < pmax[1] < 102) self.assertTrue(98 < pmax[2] < 102) if __name__ == '__main__': print('Testing %s' % os.path.dirname(openvdb.__file__)) sys.stdout.flush() args = sys.argv # PyUnit doesn't use the "-t" flag to identify test names, # so for consistency, strip out any "-t" arguments, # so that, e.g., "TestOpenVDB.py -t TestOpenVDB.testTransform" # is equivalent to "TestOpenVDB.py TestOpenVDB.testTransform". args = [a for a in args if a != '-t'] unittest.main(argv=args)
31,342
Python
38.978316
99
0.579925
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Formats.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @author Ken Museth /// /// @file Formats.h /// /// @brief Utility routines to output nicely-formatted numeric values #ifndef OPENVDB_UTIL_FORMATS_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_FORMATS_HAS_BEEN_INCLUDED #include <iosfwd> #include <sstream> #include <string> #include <openvdb/version.h> #include <openvdb/Platform.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { /// Output a byte count with the correct binary suffix (KB, MB, GB or TB). /// @param os the output stream /// @param bytes the byte count to be output /// @param head a string to be output before the numeric text /// @param tail a string to be output after the numeric text /// @param exact if true, also output the unmodified count, e.g., "4.6 KB (4620 Bytes)" /// @param width a fixed width for the numeric text /// @param precision the number of digits after the decimal point /// @return 0, 1, 2, 3 or 4, denoting the order of magnitude of the count. OPENVDB_API int printBytes(std::ostream& os, uint64_t bytes, const std::string& head = "", const std::string& tail = "\n", bool exact = false, int width = 8, int precision = 3); /// Output a number with the correct SI suffix (thousand, million, billion or trillion) /// @param os the output stream /// @param number the number to be output /// @param head a string to be output before the numeric text /// @param tail a string to be output after the numeric text /// @param exact if true, also output the unmodified count, e.g., "4.6 Thousand (4620)" /// @param width a fixed width for the numeric text /// @param precision the number of digits after the decimal point /// @return 0, 1, 2, 3 or 4, denoting the order of magnitude of the number. OPENVDB_API int printNumber(std::ostream& os, uint64_t number, const std::string& head = "", const std::string& tail = "\n", bool exact = true, int width = 8, int precision = 3); /// Output a time in milliseconds with the correct suffix (days, hours, minutes, seconds and milliseconds) /// @param os the output stream /// @param milliseconds the time to be output /// @param head a string to be output before the time /// @param tail a string to be output after the time /// @param width a fixed width for the numeric text /// @param precision the number of digits after the decimal point /// @param verbose verbose level, 0 is compact format and 1 is long format /// @return 0, 1, 2, 3, or 4 denoting the order of magnitude of the time. OPENVDB_API int printTime(std::ostream& os, double milliseconds, const std::string& head = "", const std::string& tail = "\n", int width = 4, int precision = 1, int verbose = 0); //////////////////////////////////////// /// @brief I/O manipulator that formats integer values with thousands separators template<typename IntT> class FormattedInt { public: static char sep() { return ','; } FormattedInt(IntT n): mInt(n) {} std::ostream& put(std::ostream& os) const { // Convert the integer to a string. std::ostringstream ostr; ostr << mInt; std::string s = ostr.str(); // Prefix the string with spaces if its length is not a multiple of three. size_t padding = (s.size() % 3) ? 3 - (s.size() % 3) : 0; s = std::string(padding, ' ') + s; // Construct a new string in which groups of three digits are followed // by a separator character. ostr.str(""); for (size_t i = 0, N = s.size(); i < N; ) { ostr << s[i]; ++i; if (i >= padding && i % 3 == 0 && i < s.size()) { ostr << sep(); } } // Remove any padding that was added and output the string. s = ostr.str(); os << s.substr(padding, s.size()); return os; } private: IntT mInt; }; template<typename IntT> std::ostream& operator<<(std::ostream& os, const FormattedInt<IntT>& n) { return n.put(os); } /// @return an I/O manipulator that formats the given integer value for output to a stream. template<typename IntT> FormattedInt<IntT> formattedInt(IntT n) { return FormattedInt<IntT>(n); } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_FORMATS_HAS_BEEN_INCLUDED
4,518
C
35.152
106
0.63745
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Util.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Util.h" #include <limits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { const Index32 INVALID_IDX = std::numeric_limits<Index32>::max(); const Coord COORD_OFFSETS[26] = { Coord( 1, 0, 0), /// Voxel-face adjacent neghbours Coord(-1, 0, 0), /// 0 to 5 Coord( 0, 1, 0), Coord( 0, -1, 0), Coord( 0, 0, 1), Coord( 0, 0, -1), Coord( 1, 0, -1), /// Voxel-edge adjacent neghbours Coord(-1, 0, -1), /// 6 to 17 Coord( 1, 0, 1), Coord(-1, 0, 1), Coord( 1, 1, 0), Coord(-1, 1, 0), Coord( 1, -1, 0), Coord(-1, -1, 0), Coord( 0, -1, 1), Coord( 0, -1, -1), Coord( 0, 1, 1), Coord( 0, 1, -1), Coord(-1, -1, -1), /// Voxel-corner adjacent neghbours Coord(-1, -1, 1), /// 18 to 25 Coord( 1, -1, 1), Coord( 1, -1, -1), Coord(-1, 1, -1), Coord(-1, 1, 1), Coord( 1, 1, 1), Coord( 1, 1, -1) }; } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
1,144
C++
23.361702
64
0.527972
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/logging.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_UTIL_LOGGING_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_LOGGING_HAS_BEEN_INCLUDED #include <openvdb/version.h> #ifdef OPENVDB_USE_LOG4CPLUS #include <log4cplus/appender.h> #include <log4cplus/configurator.h> #include <log4cplus/consoleappender.h> #include <log4cplus/layout.h> #include <log4cplus/logger.h> #include <log4cplus/spi/loggingevent.h> #include <algorithm> // for std::remove() #include <cstring> // for ::strrchr() #include <memory> #include <sstream> #include <string> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace logging { /// @brief Message severity level enum class Level { Debug = log4cplus::DEBUG_LOG_LEVEL, Info = log4cplus::INFO_LOG_LEVEL, Warn = log4cplus::WARN_LOG_LEVEL, Error = log4cplus::ERROR_LOG_LEVEL, Fatal = log4cplus::FATAL_LOG_LEVEL }; namespace internal { /// @brief log4cplus layout that outputs text in different colors /// for different log levels, using ANSI escape codes class ColoredPatternLayout: public log4cplus::PatternLayout { public: explicit ColoredPatternLayout(const std::string& progName_, bool useColor = true) : log4cplus::PatternLayout( progName_.empty() ? std::string{"%5p: %m%n"} : (progName_ + " %5p: %m%n")) , mUseColor(useColor) , mProgName(progName_) { } ~ColoredPatternLayout() override {} const std::string& progName() const { return mProgName; } void formatAndAppend(log4cplus::tostream& strm, const log4cplus::spi::InternalLoggingEvent& event) override { if (!mUseColor) { log4cplus::PatternLayout::formatAndAppend(strm, event); return; } log4cplus::tostringstream s; switch (event.getLogLevel()) { case log4cplus::DEBUG_LOG_LEVEL: s << "\033[32m"; break; // green case log4cplus::ERROR_LOG_LEVEL: case log4cplus::FATAL_LOG_LEVEL: s << "\033[31m"; break; // red case log4cplus::INFO_LOG_LEVEL: s << "\033[36m"; break; // cyan case log4cplus::WARN_LOG_LEVEL: s << "\033[35m"; break; // magenta } log4cplus::PatternLayout::formatAndAppend(s, event); strm << s.str() << "\033[0m" << std::flush; } // Disable deprecation warnings for std::auto_ptr. #if defined(__ICC) #pragma warning push #pragma warning disable:1478 #elif defined(__clang__) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" #elif defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif #if defined(LOG4CPLUS_VERSION) && defined(LOG4CPLUS_MAKE_VERSION) #if LOG4CPLUS_VERSION >= LOG4CPLUS_MAKE_VERSION(2, 0, 0) // In log4cplus 2.0.0, std::auto_ptr was replaced with std::unique_ptr. using Ptr = std::unique_ptr<log4cplus::Layout>; #else using Ptr = std::auto_ptr<log4cplus::Layout>; #endif #else using Ptr = std::auto_ptr<log4cplus::Layout>; #endif static Ptr create(const std::string& progName_, bool useColor = true) { return Ptr{new ColoredPatternLayout{progName_, useColor}}; } #if defined(__ICC) #pragma warning pop #elif defined(__clang__) #pragma clang diagnostic pop #elif defined(__GNUC__) #pragma GCC diagnostic pop #endif private: bool mUseColor = true; std::string mProgName; }; // class ColoredPatternLayout inline log4cplus::Logger getLogger() { return log4cplus::Logger::getInstance(LOG4CPLUS_TEXT("openvdb")); } inline log4cplus::SharedAppenderPtr getAppender() { return getLogger().getAppender(LOG4CPLUS_TEXT("OPENVDB")); } } // namespace internal /// @brief Return the current logging level. inline Level getLevel() { switch (internal::getLogger().getLogLevel()) { case log4cplus::DEBUG_LOG_LEVEL: return Level::Debug; case log4cplus::INFO_LOG_LEVEL: return Level::Info; case log4cplus::WARN_LOG_LEVEL: return Level::Warn; case log4cplus::ERROR_LOG_LEVEL: return Level::Error; case log4cplus::FATAL_LOG_LEVEL: break; } return Level::Fatal; } /// @brief Set the logging level. (Lower-level messages will be suppressed.) inline void setLevel(Level lvl) { internal::getLogger().setLogLevel(static_cast<log4cplus::LogLevel>(lvl)); } /// @brief If "-debug", "-info", "-warn", "-error" or "-fatal" is found /// in the given array of command-line arguments, set the logging level /// appropriately and remove the relevant argument(s) from the array. inline void setLevel(int& argc, char* argv[]) { for (int i = 1; i < argc; ++i) { // note: skip argv[0] const std::string arg{argv[i]}; bool remove = true; if (arg == "-debug") { setLevel(Level::Debug); } else if (arg == "-error") { setLevel(Level::Error); } else if (arg == "-fatal") { setLevel(Level::Fatal); } else if (arg == "-info") { setLevel(Level::Info); } else if (arg == "-warn") { setLevel(Level::Warn); } else { remove = false; } if (remove) argv[i] = nullptr; } auto end = std::remove(argv + 1, argv + argc, nullptr); argc = static_cast<int>(end - argv); } /// @brief Specify a program name to be displayed in log messages. inline void setProgramName(const std::string& progName, bool useColor = true) { // Change the layout of the OpenVDB appender to use colored text // and to incorporate the supplied program name. if (auto appender = internal::getAppender()) { appender->setLayout(internal::ColoredPatternLayout::create(progName, useColor)); } } /// @brief Initialize the logging system if it is not already initialized. inline void initialize(bool useColor = true) { log4cplus::initialize(); if (internal::getAppender()) return; // already initialized // Create the OpenVDB logger if it doesn't already exist. auto logger = internal::getLogger(); // Disable "additivity", so that OpenVDB-related messages are directed // to the OpenVDB logger only and are not forwarded up the logger tree. logger.setAdditivity(false); // Attach a console appender to the OpenVDB logger. if (auto appender = log4cplus::SharedAppenderPtr{new log4cplus::ConsoleAppender}) { appender->setName(LOG4CPLUS_TEXT("OPENVDB")); logger.addAppender(appender); } setLevel(Level::Warn); setProgramName("", useColor); } /// @brief Initialize the logging system from command-line arguments. /// @details If "-debug", "-info", "-warn", "-error" or "-fatal" is found /// in the given array of command-line arguments, set the logging level /// appropriately and remove the relevant argument(s) from the array. inline void initialize(int& argc, char* argv[], bool useColor = true) { initialize(); setLevel(argc, argv); auto progName = (argc > 0 ? argv[0] : ""); if (const char* ptr = ::strrchr(progName, '/')) progName = ptr + 1; setProgramName(progName, useColor); } } // namespace logging } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #define OPENVDB_LOG(level, message) \ do { \ auto _log = openvdb::logging::internal::getLogger(); \ if (_log.isEnabledFor(log4cplus::level##_LOG_LEVEL)) { \ std::ostringstream _buf; \ _buf << message; \ _log.forcedLog(log4cplus::level##_LOG_LEVEL, _buf.str(), __FILE__, __LINE__); \ } \ } while (0); /// Log an info message of the form '<TT>someVar << "some text" << ...</TT>'. #define OPENVDB_LOG_INFO(message) OPENVDB_LOG(INFO, message) /// Log a warning message of the form '<TT>someVar << "some text" << ...</TT>'. #define OPENVDB_LOG_WARN(message) OPENVDB_LOG(WARN, message) /// Log an error message of the form '<TT>someVar << "some text" << ...</TT>'. #define OPENVDB_LOG_ERROR(message) OPENVDB_LOG(ERROR, message) /// Log a fatal error message of the form '<TT>someVar << "some text" << ...</TT>'. #define OPENVDB_LOG_FATAL(message) OPENVDB_LOG(FATAL, message) #ifdef DEBUG /// In debug builds only, log a debugging message of the form '<TT>someVar << "text" << ...</TT>'. #define OPENVDB_LOG_DEBUG(message) OPENVDB_LOG(DEBUG, message) #else /// In debug builds only, log a debugging message of the form '<TT>someVar << "text" << ...</TT>'. #define OPENVDB_LOG_DEBUG(message) #endif /// @brief Log a debugging message in both debug and optimized builds. /// @warning Don't use this in performance-critical code. #define OPENVDB_LOG_DEBUG_RUNTIME(message) OPENVDB_LOG(DEBUG, message) #else // ifdef OPENVDB_USE_LOG4CPLUS #include <iostream> #define OPENVDB_LOG_INFO(mesg) #define OPENVDB_LOG_WARN(mesg) do { std::cerr << "WARNING: " << mesg << std::endl; } while (0); #define OPENVDB_LOG_ERROR(mesg) do { std::cerr << "ERROR: " << mesg << std::endl; } while (0); #define OPENVDB_LOG_FATAL(mesg) do { std::cerr << "FATAL: " << mesg << std::endl; } while (0); #define OPENVDB_LOG_DEBUG(mesg) #define OPENVDB_LOG_DEBUG_RUNTIME(mesg) namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace logging { enum class Level { Debug, Info, Warn, Error, Fatal }; inline Level getLevel() { return Level::Warn; } inline void setLevel(Level) {} inline void setLevel(int&, char*[]) {} inline void setProgramName(const std::string&, bool = true) {} inline void initialize() {} inline void initialize(int&, char*[], bool = true) {} } // namespace logging } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_USE_LOG4CPLUS namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace logging { /// @brief A LevelScope object sets the logging level to a given level /// and restores it to the current level when the object goes out of scope. struct LevelScope { Level level; explicit LevelScope(Level newLevel): level(getLevel()) { setLevel(newLevel); } ~LevelScope() { setLevel(level); } }; } // namespace logging } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_LOGGING_HAS_BEEN_INCLUDED
10,256
C
31.053125
100
0.666537
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/NodeMasks.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @author Ken Museth /// /// @file NodeMasks.h #ifndef OPENVDB_UTIL_NODEMASKS_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_NODEMASKS_HAS_BEEN_INCLUDED #include <algorithm> // for std::min() #include <cassert> #include <cstring> #include <iostream>// for cout #include <openvdb/Platform.h> #include <openvdb/Types.h> //#include <strings.h> // for ffs namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { /// Return the number of on bits in the given 8-bit value. inline Index32 CountOn(Byte v) { #if defined(OPENVDB_USE_SSE42) && defined(_MSC_VER) return __popcnt16(v); #elif defined(OPENVDB_USE_SSE42) && (defined(__GNUC__) || defined(__clang__)) return __builtin_popcount(v); #else // Software Implementation - Simple LUT static const Byte numBits[256] = { #define COUNTONB2(n) n, n+1, n+1, n+2 #define COUNTONB4(n) COUNTONB2(n), COUNTONB2(n+1), COUNTONB2(n+1), COUNTONB2(n+2) #define COUNTONB6(n) COUNTONB4(n), COUNTONB4(n+1), COUNTONB4(n+1), COUNTONB4(n+2) COUNTONB6(0), COUNTONB6(1), COUNTONB6(1), COUNTONB6(2) }; return numBits[v]; #undef COUNTONB6 #undef COUNTONB4 #undef COUNTONB2 #endif } /// Return the number of off bits in the given 8-bit value. inline Index32 CountOff(Byte v) { return CountOn(static_cast<Byte>(~v)); } /// Return the number of on bits in the given 32-bit value. inline Index32 CountOn(Index32 v) { v = v - ((v >> 1) & 0x55555555U); v = (v & 0x33333333U) + ((v >> 2) & 0x33333333U); return (((v + (v >> 4)) & 0xF0F0F0FU) * 0x1010101U) >> 24; } /// Return the number of off bits in the given 32-bit value. inline Index32 CountOff(Index32 v) { return CountOn(~v); } /// Return the number of on bits in the given 64-bit value. inline Index32 CountOn(Index64 v) { #if defined(OPENVDB_USE_SSE42) && defined(_MSC_VER) && defined(_M_X64) v = __popcnt64(v); #elif defined(OPENVDB_USE_SSE42) && (defined(__GNUC__) || defined(__clang__)) v = __builtin_popcountll(v); #else // Software Implementation v = v - ((v >> 1) & UINT64_C(0x5555555555555555)); v = (v & UINT64_C(0x3333333333333333)) + ((v >> 2) & UINT64_C(0x3333333333333333)); v = (((v + (v >> 4)) & UINT64_C(0xF0F0F0F0F0F0F0F)) * UINT64_C(0x101010101010101)) >> 56; #endif return static_cast<Index32>(v); } /// Return the number of off bits in the given 64-bit value. inline Index32 CountOff(Index64 v) { return CountOn(~v); } /// Return the least significant on bit of the given 8-bit value. inline Index32 FindLowestOn(Byte v) { assert(v); #if defined(OPENVDB_USE_SSE42) && defined(_MSC_VER) unsigned long index; _BitScanForward(&index, static_cast<Index32>(v)); return static_cast<Index32>(index); #elif defined(OPENVDB_USE_SSE42) && (defined(__GNUC__) || defined(__clang__)) return __builtin_ctz(v); #else // Software Implementation static const Byte DeBruijn[8] = {0, 1, 6, 2, 7, 5, 4, 3}; return DeBruijn[Byte((v & -v) * 0x1DU) >> 5]; #endif } /// Return the least significant on bit of the given 32-bit value. inline Index32 FindLowestOn(Index32 v) { assert(v); //return ffs(v); static const Byte DeBruijn[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 }; // disable unary minus on unsigned warning #if defined(_MSC_VER) #pragma warning(push) #pragma warning(disable:4146) #endif return DeBruijn[Index32((v & -v) * 0x077CB531U) >> 27]; #if defined(_MSC_VER) #pragma warning(pop) #endif } /// Return the least significant on bit of the given 64-bit value. inline Index32 FindLowestOn(Index64 v) { assert(v); #if defined(OPENVDB_USE_SSE42) && defined(_MSC_VER) unsigned long index; _BitScanForward64(&index, v); return static_cast<Index32>(index); #elif defined(OPENVDB_USE_SSE42) && (defined(__GNUC__) || defined(__clang__)) return static_cast<Index32>(__builtin_ctzll(v)); #else // Software Implementation static const Byte DeBruijn[64] = { 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12, }; // disable unary minus on unsigned warning #if defined(_MSC_VER) #pragma warning(push) #pragma warning(disable:4146) #endif return DeBruijn[Index64((v & -v) * UINT64_C(0x022FDD63CC95386D)) >> 58]; #if defined(_MSC_VER) #pragma warning(pop) #endif #endif } /// Return the most significant on bit of the given 32-bit value. inline Index32 FindHighestOn(Index32 v) { static const Byte DeBruijn[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; v |= v >> 1; // first round down to one less than a power of 2 v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return DeBruijn[Index32(v * 0x07C4ACDDU) >> 27]; } //////////////////////////////////////// /// Base class for the bit mask iterators template<typename NodeMask> class BaseMaskIterator { protected: Index32 mPos; // bit position const NodeMask* mParent; // this iterator can't change the parent_mask! public: BaseMaskIterator(): mPos(NodeMask::SIZE), mParent(nullptr) {} BaseMaskIterator(const BaseMaskIterator&) = default; BaseMaskIterator(Index32 pos, const NodeMask* parent): mPos(pos), mParent(parent) { assert((parent == nullptr && pos == 0) || (parent != nullptr && pos <= NodeMask::SIZE)); } bool operator==(const BaseMaskIterator &iter) const {return mPos == iter.mPos;} bool operator!=(const BaseMaskIterator &iter) const {return mPos != iter.mPos;} bool operator< (const BaseMaskIterator &iter) const {return mPos < iter.mPos;} BaseMaskIterator& operator=(const BaseMaskIterator& iter) { mPos = iter.mPos; mParent = iter.mParent; return *this; } Index32 offset() const { return mPos; } Index32 pos() const { return mPos; } bool test() const { assert(mPos <= NodeMask::SIZE); return (mPos != NodeMask::SIZE); } operator bool() const { return this->test(); } }; // class BaseMaskIterator /// @note This happens to be a const-iterator! template <typename NodeMask> class OnMaskIterator: public BaseMaskIterator<NodeMask> { private: using BaseType = BaseMaskIterator<NodeMask>; using BaseType::mPos;//bit position; using BaseType::mParent;//this iterator can't change the parent_mask! public: OnMaskIterator() : BaseType() {} OnMaskIterator(Index32 pos,const NodeMask *parent) : BaseType(pos,parent) {} void increment() { assert(mParent != nullptr); mPos = mParent->findNextOn(mPos+1); assert(mPos <= NodeMask::SIZE); } void increment(Index n) { while(n-- && this->next()) ; } bool next() { this->increment(); return this->test(); } bool operator*() const {return true;} OnMaskIterator& operator++() { this->increment(); return *this; } }; // class OnMaskIterator template <typename NodeMask> class OffMaskIterator: public BaseMaskIterator<NodeMask> { private: using BaseType = BaseMaskIterator<NodeMask>; using BaseType::mPos;//bit position; using BaseType::mParent;//this iterator can't change the parent_mask! public: OffMaskIterator() : BaseType() {} OffMaskIterator(Index32 pos,const NodeMask *parent) : BaseType(pos,parent) {} void increment() { assert(mParent != nullptr); mPos=mParent->findNextOff(mPos+1); assert(mPos <= NodeMask::SIZE); } void increment(Index n) { while(n-- && this->next()) ; } bool next() { this->increment(); return this->test(); } bool operator*() const {return false;} OffMaskIterator& operator++() { this->increment(); return *this; } }; // class OffMaskIterator template <typename NodeMask> class DenseMaskIterator: public BaseMaskIterator<NodeMask> { private: using BaseType = BaseMaskIterator<NodeMask>; using BaseType::mPos;//bit position; using BaseType::mParent;//this iterator can't change the parent_mask! public: DenseMaskIterator() : BaseType() {} DenseMaskIterator(Index32 pos,const NodeMask *parent) : BaseType(pos,parent) {} void increment() { assert(mParent != nullptr); mPos += 1;//careful - the increment might go beyond the end assert(mPos<= NodeMask::SIZE); } void increment(Index n) { while(n-- && this->next()) ; } bool next() { this->increment(); return this->test(); } bool operator*() const {return mParent->isOn(mPos);} DenseMaskIterator& operator++() { this->increment(); return *this; } }; // class DenseMaskIterator /// @brief Bit mask for the internal and leaf nodes of VDB. This /// is a 64-bit implementation. /// /// @note A template specialization for Log2Dim=1 and Log2Dim=2 are /// given below. template<Index Log2Dim> class NodeMask { public: static_assert(Log2Dim > 2, "expected NodeMask template specialization, got base template"); static const Index32 LOG2DIM = Log2Dim; static const Index32 DIM = 1<<Log2Dim; static const Index32 SIZE = 1<<3*Log2Dim; static const Index32 WORD_COUNT = SIZE >> 6;// 2^6=64 using Word = Index64; private: // The bits are represented as a linear array of Words, and the // size of a Word is 32 or 64 bits depending on the platform. // The BIT_MASK is defined as the number of bits in a Word - 1 //static const Index32 BIT_MASK = sizeof(void*) == 8 ? 63 : 31; //static const Index32 LOG2WORD = BIT_MASK == 63 ? 6 : 5; //static const Index32 WORD_COUNT = SIZE >> LOG2WORD; //using Word = boost::mpl::if_c<BIT_MASK == 63, Index64, Index32>::type; Word mWords[WORD_COUNT];//only member data! public: /// Default constructor sets all bits off NodeMask() { this->setOff(); } /// All bits are set to the specified state NodeMask(bool on) { this->set(on); } /// Copy constructor NodeMask(const NodeMask &other) { *this = other; } /// Destructor ~NodeMask() {} /// Assignment operator NodeMask& operator=(const NodeMask& other) { Index32 n = WORD_COUNT; const Word* w2 = other.mWords; for (Word* w1 = mWords; n--; ++w1, ++w2) *w1 = *w2; return *this; } using OnIterator = OnMaskIterator<NodeMask>; using OffIterator = OffMaskIterator<NodeMask>; using DenseIterator = DenseMaskIterator<NodeMask>; OnIterator beginOn() const { return OnIterator(this->findFirstOn(),this); } OnIterator endOn() const { return OnIterator(SIZE,this); } OffIterator beginOff() const { return OffIterator(this->findFirstOff(),this); } OffIterator endOff() const { return OffIterator(SIZE,this); } DenseIterator beginDense() const { return DenseIterator(0,this); } DenseIterator endDense() const { return DenseIterator(SIZE,this); } bool operator == (const NodeMask &other) const { int n = WORD_COUNT; for (const Word *w1=mWords, *w2=other.mWords; n-- && *w1++ == *w2++;) ; return n == -1; } bool operator != (const NodeMask &other) const { return !(*this == other); } // // Bitwise logical operations // /// @brief Apply a functor to the words of the this and the other mask. /// /// @details An example that implements the "operator&=" method: /// @code /// struct Op { inline void operator()(W &w1, const W& w2) const { w1 &= w2; } }; /// @endcode template<typename WordOp> const NodeMask& foreach(const NodeMask& other, const WordOp& op) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) op( *w1, *w2); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const WordOp& op) { Word *w1 = mWords; const Word *w2 = other1.mWords, *w3 = other2.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2, ++w3) op( *w1, *w2, *w3); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const NodeMask& other3, const WordOp& op) { Word *w1 = mWords; const Word *w2 = other1.mWords, *w3 = other2.mWords, *w4 = other3.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2, ++w3, ++w4) op( *w1, *w2, *w3, *w4); return *this; } /// @brief Bitwise intersection const NodeMask& operator&=(const NodeMask& other) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) *w1 &= *w2; return *this; } /// @brief Bitwise union const NodeMask& operator|=(const NodeMask& other) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) *w1 |= *w2; return *this; } /// @brief Bitwise difference const NodeMask& operator-=(const NodeMask& other) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) *w1 &= ~*w2; return *this; } /// @brief Bitwise XOR const NodeMask& operator^=(const NodeMask& other) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) *w1 ^= *w2; return *this; } NodeMask operator!() const { NodeMask m(*this); m.toggle(); return m; } NodeMask operator&(const NodeMask& other) const { NodeMask m(*this); m &= other; return m; } NodeMask operator|(const NodeMask& other) const { NodeMask m(*this); m |= other; return m; } NodeMask operator^(const NodeMask& other) const { NodeMask m(*this); m ^= other; return m; } /// Return the byte size of this NodeMask static Index32 memUsage() { return static_cast<Index32>(WORD_COUNT*sizeof(Word)); } /// Return the total number of on bits Index32 countOn() const { Index32 sum = 0, n = WORD_COUNT; for (const Word* w = mWords; n--; ++w) sum += CountOn(*w); return sum; } /// Return the total number of on bits Index32 countOff() const { return SIZE-this->countOn(); } /// Set the <i>n</i>th bit on void setOn(Index32 n) { assert( (n >> 6) < WORD_COUNT ); mWords[n >> 6] |= Word(1) << (n & 63); } /// Set the <i>n</i>th bit off void setOff(Index32 n) { assert( (n >> 6) < WORD_COUNT ); mWords[n >> 6] &= ~(Word(1) << (n & 63)); } /// Set the <i>n</i>th bit to the specified state void set(Index32 n, bool On) { On ? this->setOn(n) : this->setOff(n); } /// Set all bits to the specified state void set(bool on) { const Word state = on ? ~Word(0) : Word(0); Index32 n = WORD_COUNT; for (Word* w = mWords; n--; ++w) *w = state; } /// Set all bits on void setOn() { Index32 n = WORD_COUNT; for (Word* w = mWords; n--; ++w) *w = ~Word(0); } /// Set all bits off void setOff() { Index32 n = WORD_COUNT; for (Word* w = mWords; n--; ++w) *w = Word(0); } /// Toggle the state of the <i>n</i>th bit void toggle(Index32 n) { assert( (n >> 6) < WORD_COUNT ); mWords[n >> 6] ^= Word(1) << (n & 63); } /// Toggle the state of all bits in the mask void toggle() { Index32 n = WORD_COUNT; for (Word* w = mWords; n--; ++w) *w = ~*w; } /// Set the first bit on void setFirstOn() { this->setOn(0); } /// Set the last bit on void setLastOn() { this->setOn(SIZE-1); } /// Set the first bit off void setFirstOff() { this->setOff(0); } /// Set the last bit off void setLastOff() { this->setOff(SIZE-1); } /// Return @c true if the <i>n</i>th bit is on bool isOn(Index32 n) const { assert( (n >> 6) < WORD_COUNT ); return 0 != (mWords[n >> 6] & (Word(1) << (n & 63))); } /// Return @c true if the <i>n</i>th bit is off bool isOff(Index32 n) const {return !this->isOn(n); } /// Return @c true if all the bits are on bool isOn() const { int n = WORD_COUNT; for (const Word *w = mWords; n-- && *w++ == ~Word(0);) ; return n == -1; } /// Return @c true if all the bits are off bool isOff() const { int n = WORD_COUNT; for (const Word *w = mWords; n-- && *w++ == Word(0);) ; return n == -1; } /// Return @c true if bits are either all off OR all on. /// @param isOn Takes on the values of all bits if the method /// returns true - else it is undefined. bool isConstant(bool &isOn) const { isOn = (mWords[0] == ~Word(0));//first word has all bits on if ( !isOn && mWords[0] != Word(0)) return false;//early out const Word *w = mWords + 1, *n = mWords + WORD_COUNT; while( w<n && *w == mWords[0] ) ++w; return w == n; } Index32 findFirstOn() const { Index32 n = 0; const Word* w = mWords; for (; n<WORD_COUNT && !*w; ++w, ++n) ; return n==WORD_COUNT ? SIZE : (n << 6) + FindLowestOn(*w); } Index32 findFirstOff() const { Index32 n = 0; const Word* w = mWords; for (; n<WORD_COUNT && !~*w; ++w, ++n) ; return n==WORD_COUNT ? SIZE : (n << 6) + FindLowestOn(~*w); } //@{ /// Return the <i>n</i>th word of the bit mask, for a word of arbitrary size. template<typename WordT> WordT getWord(Index n) const { assert(n*8*sizeof(WordT) < SIZE); return reinterpret_cast<const WordT*>(mWords)[n]; } template<typename WordT> WordT& getWord(Index n) { assert(n*8*sizeof(WordT) < SIZE); return reinterpret_cast<WordT*>(mWords)[n]; } //@} void save(std::ostream& os) const { os.write(reinterpret_cast<const char*>(mWords), this->memUsage()); } void load(std::istream& is) { is.read(reinterpret_cast<char*>(mWords), this->memUsage()); } void seek(std::istream& is) const { is.seekg(this->memUsage(), std::ios_base::cur); } /// @brief simple print method for debugging void printInfo(std::ostream& os=std::cout) const { os << "NodeMask: Dim=" << DIM << " Log2Dim=" << Log2Dim << " Bit count=" << SIZE << " word count=" << WORD_COUNT << std::endl; } void printBits(std::ostream& os=std::cout, Index32 max_out=80u) const { const Index32 n=(SIZE>max_out ? max_out : SIZE); for (Index32 i=0; i < n; ++i) { if ( !(i & 63) ) os << "||"; else if ( !(i%8) ) os << "|"; os << this->isOn(i); } os << "|" << std::endl; } void printAll(std::ostream& os=std::cout, Index32 max_out=80u) const { this->printInfo(os); this->printBits(os, max_out); } Index32 findNextOn(Index32 start) const { Index32 n = start >> 6;//initiate if (n >= WORD_COUNT) return SIZE; // check for out of bounds Index32 m = start & 63; Word b = mWords[n]; if (b & (Word(1) << m)) return start;//simpel case: start is on b &= ~Word(0) << m;// mask out lower bits while(!b && ++n<WORD_COUNT) b = mWords[n];// find next none-zero word return (!b ? SIZE : (n << 6) + FindLowestOn(b));//catch last word=0 } Index32 findNextOff(Index32 start) const { Index32 n = start >> 6;//initiate if (n >= WORD_COUNT) return SIZE; // check for out of bounds Index32 m = start & 63; Word b = ~mWords[n]; if (b & (Word(1) << m)) return start;//simpel case: start is on b &= ~Word(0) << m;// mask out lower bits while(!b && ++n<WORD_COUNT) b = ~mWords[n];// find next none-zero word return (!b ? SIZE : (n << 6) + FindLowestOn(b));//catch last word=0 } };// NodeMask /// @brief Template specialization of NodeMask for Log2Dim=1, i.e. 2^3 nodes template<> class NodeMask<1> { public: static const Index32 LOG2DIM = 1; static const Index32 DIM = 2; static const Index32 SIZE = 8; static const Index32 WORD_COUNT = 1; using Word = Byte; private: Byte mByte;//only member data! public: /// Default constructor sets all bits off NodeMask() : mByte(0x00U) {} /// All bits are set to the specified state NodeMask(bool on) : mByte(on ? 0xFFU : 0x00U) {} /// Copy constructor NodeMask(const NodeMask &other) : mByte(other.mByte) {} /// Destructor ~NodeMask() {} /// Assignment operator void operator = (const NodeMask &other) { mByte = other.mByte; } using OnIterator = OnMaskIterator<NodeMask>; using OffIterator = OffMaskIterator<NodeMask>; using DenseIterator = DenseMaskIterator<NodeMask>; OnIterator beginOn() const { return OnIterator(this->findFirstOn(),this); } OnIterator endOn() const { return OnIterator(SIZE,this); } OffIterator beginOff() const { return OffIterator(this->findFirstOff(),this); } OffIterator endOff() const { return OffIterator(SIZE,this); } DenseIterator beginDense() const { return DenseIterator(0,this); } DenseIterator endDense() const { return DenseIterator(SIZE,this); } bool operator == (const NodeMask &other) const { return mByte == other.mByte; } bool operator != (const NodeMask &other) const {return mByte != other.mByte; } // // Bitwise logical operations // /// @brief Apply a functor to the words of the this and the other mask. /// /// @details An example that implements the "operator&=" method: /// @code /// struct Op { inline void operator()(Word &w1, const Word& w2) const { w1 &= w2; } }; /// @endcode template<typename WordOp> const NodeMask& foreach(const NodeMask& other, const WordOp& op) { op(mByte, other.mByte); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const WordOp& op) { op(mByte, other1.mByte, other2.mByte); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const NodeMask& other3, const WordOp& op) { op(mByte, other1.mByte, other2.mByte, other3.mByte); return *this; } /// @brief Bitwise intersection const NodeMask& operator&=(const NodeMask& other) { mByte &= other.mByte; return *this; } /// @brief Bitwise union const NodeMask& operator|=(const NodeMask& other) { mByte |= other.mByte; return *this; } /// @brief Bitwise difference const NodeMask& operator-=(const NodeMask& other) { mByte &= static_cast<Byte>(~other.mByte); return *this; } /// @brief Bitwise XOR const NodeMask& operator^=(const NodeMask& other) { mByte ^= other.mByte; return *this; } NodeMask operator!() const { NodeMask m(*this); m.toggle(); return m; } NodeMask operator&(const NodeMask& other) const { NodeMask m(*this); m &= other; return m; } NodeMask operator|(const NodeMask& other) const { NodeMask m(*this); m |= other; return m; } NodeMask operator^(const NodeMask& other) const { NodeMask m(*this); m ^= other; return m; } /// Return the byte size of this NodeMask static Index32 memUsage() { return 1; } /// Return the total number of on bits Index32 countOn() const { return CountOn(mByte); } /// Return the total number of on bits Index32 countOff() const { return CountOff(mByte); } /// Set the <i>n</i>th bit on void setOn(Index32 n) { assert( n < 8 ); mByte = static_cast<Byte>(mByte | 0x01U << (n & 7)); } /// Set the <i>n</i>th bit off void setOff(Index32 n) { assert( n < 8 ); mByte = static_cast<Byte>(mByte & ~(0x01U << (n & 7))); } /// Set the <i>n</i>th bit to the specified state void set(Index32 n, bool On) { On ? this->setOn(n) : this->setOff(n); } /// Set all bits to the specified state void set(bool on) { mByte = on ? 0xFFU : 0x00U; } /// Set all bits on void setOn() { mByte = 0xFFU; } /// Set all bits off void setOff() { mByte = 0x00U; } /// Toggle the state of the <i>n</i>th bit void toggle(Index32 n) { assert( n < 8 ); mByte = static_cast<Byte>(mByte ^ 0x01U << (n & 7)); } /// Toggle the state of all bits in the mask void toggle() { mByte = static_cast<Byte>(~mByte); } /// Set the first bit on void setFirstOn() { this->setOn(0); } /// Set the last bit on void setLastOn() { this->setOn(7); } /// Set the first bit off void setFirstOff() { this->setOff(0); } /// Set the last bit off void setLastOff() { this->setOff(7); } /// Return true if the <i>n</i>th bit is on bool isOn(Index32 n) const { assert( n < 8 ); return mByte & (0x01U << (n & 7)); } /// Return true if the <i>n</i>th bit is off bool isOff(Index32 n) const {return !this->isOn(n); } /// Return true if all the bits are on bool isOn() const { return mByte == 0xFFU; } /// Return true if all the bits are off bool isOff() const { return mByte == 0; } /// Return @c true if bits are either all off OR all on. /// @param isOn Takes on the values of all bits if the method /// returns true - else it is undefined. bool isConstant(bool &isOn) const { isOn = this->isOn(); return isOn || this->isOff(); } Index32 findFirstOn() const { return mByte ? FindLowestOn(mByte) : 8; } Index32 findFirstOff() const { const Byte b = static_cast<Byte>(~mByte); return b ? FindLowestOn(b) : 8; } /* //@{ /// Return the <i>n</i>th word of the bit mask, for a word of arbitrary size. /// @note This version assumes WordT=Byte and n=0! template<typename WordT> WordT getWord(Index n) const { static_assert(sizeof(WordT) == sizeof(Byte), "expected word size to be one byte"); assert(n == 0); return reinterpret_cast<WordT>(mByte); } template<typename WordT> WordT& getWord(Index n) { static_assert(sizeof(WordT) == sizeof(Byte), "expected word size to be one byte"); assert(n == 0); return reinterpret_cast<WordT&>(mByte); } //@} */ void save(std::ostream& os) const { os.write(reinterpret_cast<const char*>(&mByte), 1); } void load(std::istream& is) { is.read(reinterpret_cast<char*>(&mByte), 1); } void seek(std::istream& is) const { is.seekg(1, std::ios_base::cur); } /// @brief simple print method for debugging void printInfo(std::ostream& os=std::cout) const { os << "NodeMask: Dim=2, Log2Dim=1, Bit count=8, Word count=1"<<std::endl; } void printBits(std::ostream& os=std::cout) const { os << "||"; for (Index32 i=0; i < 8; ++i) os << this->isOn(i); os << "||" << std::endl; } void printAll(std::ostream& os=std::cout) const { this->printInfo(os); this->printBits(os); } Index32 findNextOn(Index32 start) const { if (start>=8) return 8; const Byte b = static_cast<Byte>(mByte & (0xFFU << start)); return b ? FindLowestOn(b) : 8; } Index32 findNextOff(Index32 start) const { if (start>=8) return 8; const Byte b = static_cast<Byte>(~mByte & (0xFFU << start)); return b ? FindLowestOn(b) : 8; } };// NodeMask<1> /// @brief Template specialization of NodeMask for Log2Dim=2, i.e. 4^3 nodes template<> class NodeMask<2> { public: static const Index32 LOG2DIM = 2; static const Index32 DIM = 4; static const Index32 SIZE = 64; static const Index32 WORD_COUNT = 1; using Word = Index64; private: Word mWord;//only member data! public: /// Default constructor sets all bits off NodeMask() : mWord(UINT64_C(0x00)) {} /// All bits are set to the specified state NodeMask(bool on) : mWord(on ? UINT64_C(0xFFFFFFFFFFFFFFFF) : UINT64_C(0x00)) {} /// Copy constructor NodeMask(const NodeMask &other) : mWord(other.mWord) {} /// Destructor ~NodeMask() {} /// Assignment operator void operator = (const NodeMask &other) { mWord = other.mWord; } using OnIterator = OnMaskIterator<NodeMask>; using OffIterator = OffMaskIterator<NodeMask>; using DenseIterator = DenseMaskIterator<NodeMask>; OnIterator beginOn() const { return OnIterator(this->findFirstOn(),this); } OnIterator endOn() const { return OnIterator(SIZE,this); } OffIterator beginOff() const { return OffIterator(this->findFirstOff(),this); } OffIterator endOff() const { return OffIterator(SIZE,this); } DenseIterator beginDense() const { return DenseIterator(0,this); } DenseIterator endDense() const { return DenseIterator(SIZE,this); } bool operator == (const NodeMask &other) const { return mWord == other.mWord; } bool operator != (const NodeMask &other) const {return mWord != other.mWord; } // // Bitwise logical operations // /// @brief Apply a functor to the words of the this and the other mask. /// /// @details An example that implements the "operator&=" method: /// @code /// struct Op { inline void operator()(Word &w1, const Word& w2) const { w1 &= w2; } }; /// @endcode template<typename WordOp> const NodeMask& foreach(const NodeMask& other, const WordOp& op) { op(mWord, other.mWord); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const WordOp& op) { op(mWord, other1.mWord, other2.mWord); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const NodeMask& other3, const WordOp& op) { op(mWord, other1.mWord, other2.mWord, other3.mWord); return *this; } /// @brief Bitwise intersection const NodeMask& operator&=(const NodeMask& other) { mWord &= other.mWord; return *this; } /// @brief Bitwise union const NodeMask& operator|=(const NodeMask& other) { mWord |= other.mWord; return *this; } /// @brief Bitwise difference const NodeMask& operator-=(const NodeMask& other) { mWord &= ~other.mWord; return *this; } /// @brief Bitwise XOR const NodeMask& operator^=(const NodeMask& other) { mWord ^= other.mWord; return *this; } NodeMask operator!() const { NodeMask m(*this); m.toggle(); return m; } NodeMask operator&(const NodeMask& other) const { NodeMask m(*this); m &= other; return m; } NodeMask operator|(const NodeMask& other) const { NodeMask m(*this); m |= other; return m; } NodeMask operator^(const NodeMask& other) const { NodeMask m(*this); m ^= other; return m; } /// Return the byte size of this NodeMask static Index32 memUsage() { return 8; } /// Return the total number of on bits Index32 countOn() const { return CountOn(mWord); } /// Return the total number of on bits Index32 countOff() const { return CountOff(mWord); } /// Set the <i>n</i>th bit on void setOn(Index32 n) { assert( n < 64 ); mWord |= UINT64_C(0x01) << (n & 63); } /// Set the <i>n</i>th bit off void setOff(Index32 n) { assert( n < 64 ); mWord &= ~(UINT64_C(0x01) << (n & 63)); } /// Set the <i>n</i>th bit to the specified state void set(Index32 n, bool On) { On ? this->setOn(n) : this->setOff(n); } /// Set all bits to the specified state void set(bool on) { mWord = on ? UINT64_C(0xFFFFFFFFFFFFFFFF) : UINT64_C(0x00); } /// Set all bits on void setOn() { mWord = UINT64_C(0xFFFFFFFFFFFFFFFF); } /// Set all bits off void setOff() { mWord = UINT64_C(0x00); } /// Toggle the state of the <i>n</i>th bit void toggle(Index32 n) { assert( n < 64 ); mWord ^= UINT64_C(0x01) << (n & 63); } /// Toggle the state of all bits in the mask void toggle() { mWord = ~mWord; } /// Set the first bit on void setFirstOn() { this->setOn(0); } /// Set the last bit on void setLastOn() { this->setOn(63); } /// Set the first bit off void setFirstOff() { this->setOff(0); } /// Set the last bit off void setLastOff() { this->setOff(63); } /// Return true if the <i>n</i>th bit is on bool isOn(Index32 n) const { assert( n < 64 ); return 0 != (mWord & (UINT64_C(0x01) << (n & 63))); } /// Return true if the <i>n</i>th bit is off bool isOff(Index32 n) const {return !this->isOn(n); } /// Return true if all the bits are on bool isOn() const { return mWord == UINT64_C(0xFFFFFFFFFFFFFFFF); } /// Return true if all the bits are off bool isOff() const { return mWord == 0; } /// Return @c true if bits are either all off OR all on. /// @param isOn Takes on the values of all bits if the method /// returns true - else it is undefined. bool isConstant(bool &isOn) const { isOn = this->isOn(); return isOn || this->isOff(); } Index32 findFirstOn() const { return mWord ? FindLowestOn(mWord) : 64; } Index32 findFirstOff() const { const Word w = ~mWord; return w ? FindLowestOn(w) : 64; } //@{ /// Return the <i>n</i>th word of the bit mask, for a word of arbitrary size. template<typename WordT> WordT getWord(Index n) const { assert(n*8*sizeof(WordT) < SIZE); return reinterpret_cast<const WordT*>(&mWord)[n]; } template<typename WordT> WordT& getWord(Index n) { assert(n*8*sizeof(WordT) < SIZE); return reinterpret_cast<WordT*>(mWord)[n]; } //@} void save(std::ostream& os) const { os.write(reinterpret_cast<const char*>(&mWord), 8); } void load(std::istream& is) { is.read(reinterpret_cast<char*>(&mWord), 8); } void seek(std::istream& is) const { is.seekg(8, std::ios_base::cur); } /// @brief simple print method for debugging void printInfo(std::ostream& os=std::cout) const { os << "NodeMask: Dim=4, Log2Dim=2, Bit count=64, Word count=1"<<std::endl; } void printBits(std::ostream& os=std::cout) const { os << "|"; for (Index32 i=0; i < 64; ++i) { if ( !(i%8) ) os << "|"; os << this->isOn(i); } os << "||" << std::endl; } void printAll(std::ostream& os=std::cout) const { this->printInfo(os); this->printBits(os); } Index32 findNextOn(Index32 start) const { if (start>=64) return 64; const Word w = mWord & (UINT64_C(0xFFFFFFFFFFFFFFFF) << start); return w ? FindLowestOn(w) : 64; } Index32 findNextOff(Index32 start) const { if (start>=64) return 64; const Word w = ~mWord & (UINT64_C(0xFFFFFFFFFFFFFFFF) << start); return w ? FindLowestOn(w) : 64; } };// NodeMask<2> // Unlike NodeMask above this RootNodeMask has a run-time defined size. // It is only included for backward compatibility and will likely be // deprecated in the future! // This class is 32-bit specefic, hence the use if Index32 vs Index! class RootNodeMask { protected: Index32 mBitSize, mIntSize; Index32 *mBits; public: RootNodeMask(): mBitSize(0), mIntSize(0), mBits(nullptr) {} RootNodeMask(Index32 bit_size): mBitSize(bit_size), mIntSize(((bit_size-1)>>5)+1), mBits(new Index32[mIntSize]) { for (Index32 i=0; i<mIntSize; ++i) mBits[i]=0x00000000; } RootNodeMask(const RootNodeMask& B): mBitSize(B.mBitSize), mIntSize(B.mIntSize), mBits(new Index32[mIntSize]) { for (Index32 i=0; i<mIntSize; ++i) mBits[i]=B.mBits[i]; } ~RootNodeMask() {delete [] mBits;} void init(Index32 bit_size) { mBitSize = bit_size; mIntSize =((bit_size-1)>>5)+1; delete [] mBits; mBits = new Index32[mIntSize]; for (Index32 i=0; i<mIntSize; ++i) mBits[i]=0x00000000; } Index getBitSize() const {return mBitSize;} Index getIntSize() const {return mIntSize;} RootNodeMask& operator=(const RootNodeMask& B) { if (mBitSize!=B.mBitSize) { mBitSize=B.mBitSize; mIntSize=B.mIntSize; delete [] mBits; mBits = new Index32[mIntSize]; } for (Index32 i=0; i<mIntSize; ++i) mBits[i]=B.mBits[i]; return *this; } class BaseIterator { protected: Index32 mPos;//bit position Index32 mBitSize; const RootNodeMask* mParent;//this iterator can't change the parent_mask! public: BaseIterator() : mPos(0), mBitSize(0), mParent(nullptr) {} BaseIterator(const BaseIterator&) = default; BaseIterator(Index32 pos, const RootNodeMask* parent): mPos(pos), mBitSize(parent->getBitSize()), mParent(parent) { assert(pos <= mBitSize); } bool operator==(const BaseIterator &iter) const {return mPos == iter.mPos;} bool operator!=(const BaseIterator &iter) const {return mPos != iter.mPos;} bool operator< (const BaseIterator &iter) const {return mPos < iter.mPos;} BaseIterator& operator=(const BaseIterator& iter) { mPos = iter.mPos; mBitSize = iter.mBitSize; mParent = iter.mParent; return *this; } Index32 offset() const {return mPos;} Index32 pos() const {return mPos;} bool test() const { assert(mPos <= mBitSize); return (mPos != mBitSize); } operator bool() const {return this->test();} }; // class BaseIterator /// @note This happens to be a const-iterator! class OnIterator: public BaseIterator { protected: using BaseIterator::mPos;//bit position; using BaseIterator::mBitSize;//bit size; using BaseIterator::mParent;//this iterator can't change the parent_mask! public: OnIterator() : BaseIterator() {} OnIterator(Index32 pos,const RootNodeMask *parent) : BaseIterator(pos,parent) {} void increment() { assert(mParent != nullptr); mPos=mParent->findNextOn(mPos+1); assert(mPos <= mBitSize); } void increment(Index n) { for (Index i=0; i<n && this->next(); ++i) {} } bool next() { this->increment(); return this->test(); } bool operator*() const {return true;} OnIterator& operator++() { this->increment(); return *this; } }; // class OnIterator class OffIterator: public BaseIterator { protected: using BaseIterator::mPos;//bit position; using BaseIterator::mBitSize;//bit size; using BaseIterator::mParent;//this iterator can't change the parent_mask! public: OffIterator() : BaseIterator() {} OffIterator(Index32 pos,const RootNodeMask *parent) : BaseIterator(pos,parent) {} void increment() { assert(mParent != nullptr); mPos=mParent->findNextOff(mPos+1); assert(mPos <= mBitSize); } void increment(Index n) { for (Index i=0; i<n && this->next(); ++i) {} } bool next() { this->increment(); return this->test(); } bool operator*() const {return true;} OffIterator& operator++() { this->increment(); return *this; } }; // class OffIterator class DenseIterator: public BaseIterator { protected: using BaseIterator::mPos;//bit position; using BaseIterator::mBitSize;//bit size; using BaseIterator::mParent;//this iterator can't change the parent_mask! public: DenseIterator() : BaseIterator() {} DenseIterator(Index32 pos,const RootNodeMask *parent) : BaseIterator(pos,parent) {} void increment() { assert(mParent != nullptr); mPos += 1;//carefull - the increament might go beyond the end assert(mPos<= mBitSize); } void increment(Index n) { for (Index i=0; i<n && this->next(); ++i) {} } bool next() { this->increment(); return this->test(); } bool operator*() const {return mParent->isOn(mPos);} DenseIterator& operator++() { this->increment(); return *this; } }; // class DenseIterator OnIterator beginOn() const { return OnIterator(this->findFirstOn(),this); } OnIterator endOn() const { return OnIterator(mBitSize,this); } OffIterator beginOff() const { return OffIterator(this->findFirstOff(),this); } OffIterator endOff() const { return OffIterator(mBitSize,this); } DenseIterator beginDense() const { return DenseIterator(0,this); } DenseIterator endDense() const { return DenseIterator(mBitSize,this); } bool operator == (const RootNodeMask &B) const { if (mBitSize != B.mBitSize) return false; for (Index32 i=0; i<mIntSize; ++i) if (mBits[i] != B.mBits[i]) return false; return true; } bool operator != (const RootNodeMask &B) const { if (mBitSize != B.mBitSize) return true; for (Index32 i=0; i<mIntSize; ++i) if (mBits[i] != B.mBits[i]) return true; return false; } // // Bitwise logical operations // RootNodeMask operator!() const { RootNodeMask m = *this; m.toggle(); return m; } const RootNodeMask& operator&=(const RootNodeMask& other) { assert(mIntSize == other.mIntSize); for (Index32 i = 0, N = std::min(mIntSize, other.mIntSize); i < N; ++i) { mBits[i] &= other.mBits[i]; } for (Index32 i = other.mIntSize; i < mIntSize; ++i) mBits[i] = 0x00000000; return *this; } const RootNodeMask& operator|=(const RootNodeMask& other) { assert(mIntSize == other.mIntSize); for (Index32 i = 0, N = std::min(mIntSize, other.mIntSize); i < N; ++i) { mBits[i] |= other.mBits[i]; } return *this; } const RootNodeMask& operator^=(const RootNodeMask& other) { assert(mIntSize == other.mIntSize); for (Index32 i = 0, N = std::min(mIntSize, other.mIntSize); i < N; ++i) { mBits[i] ^= other.mBits[i]; } return *this; } RootNodeMask operator&(const RootNodeMask& other) const { RootNodeMask m(*this); m &= other; return m; } RootNodeMask operator|(const RootNodeMask& other) const { RootNodeMask m(*this); m |= other; return m; } RootNodeMask operator^(const RootNodeMask& other) const { RootNodeMask m(*this); m ^= other; return m; } Index32 getMemUsage() const { return static_cast<Index32>(mIntSize*sizeof(Index32) + sizeof(*this)); } Index32 countOn() const { assert(mBits); Index32 n=0; for (Index32 i=0; i< mIntSize; ++i) n += CountOn(mBits[i]); return n; } Index32 countOff() const { return mBitSize-this->countOn(); } void setOn(Index32 i) { assert(mBits); assert( (i>>5) < mIntSize); mBits[i>>5] |= 1<<(i&31); } void setOff(Index32 i) { assert(mBits); assert( (i>>5) < mIntSize); mBits[i>>5] &= ~(1<<(i&31)); } void set(Index32 i, bool On) { On ? this->setOn(i) : this->setOff(i); } void setOn() { assert(mBits); for (Index32 i=0; i<mIntSize; ++i) mBits[i]=0xFFFFFFFF; } void setOff() { assert(mBits); for (Index32 i=0; i<mIntSize; ++i) mBits[i]=0x00000000; } void toggle(Index32 i) { assert(mBits); assert( (i>>5) < mIntSize); mBits[i>>5] ^= 1<<(i&31); } void toggle() { assert(mBits); for (Index32 i=0; i<mIntSize; ++i) mBits[i]=~mBits[i]; } void setFirstOn() { this->setOn(0); } void setLastOn() { this->setOn(mBitSize-1); } void setFirstOff() { this->setOff(0); } void setLastOff() { this->setOff(mBitSize-1); } bool isOn(Index32 i) const { assert(mBits); assert( (i>>5) < mIntSize); return ( mBits[i >> 5] & (1<<(i&31)) ); } bool isOff(Index32 i) const { assert(mBits); assert( (i>>5) < mIntSize); return ( ~mBits[i >> 5] & (1<<(i&31)) ); } bool isOn() const { if (!mBits) return false;//undefined is off for (Index32 i=0; i<mIntSize; ++i) if (mBits[i] != 0xFFFFFFFF) return false; return true; } bool isOff() const { if (!mBits) return true;//undefined is off for (Index32 i=0; i<mIntSize; ++i) if (mBits[i] != 0) return false; return true; } Index32 findFirstOn() const { assert(mBits); Index32 i=0; while(!mBits[i]) if (++i == mIntSize) return mBitSize;//reached end return 32*i + FindLowestOn(mBits[i]); } Index32 findFirstOff() const { assert(mBits); Index32 i=0; while(!(~mBits[i])) if (++i == mIntSize) return mBitSize;//reached end return 32*i + FindLowestOn(~mBits[i]); } void save(std::ostream& os) const { assert(mBits); os.write(reinterpret_cast<const char*>(mBits), mIntSize * sizeof(Index32)); } void load(std::istream& is) { assert(mBits); is.read(reinterpret_cast<char*>(mBits), mIntSize * sizeof(Index32)); } void seek(std::istream& is) const { assert(mBits); is.seekg(mIntSize * sizeof(Index32), std::ios_base::cur); } /// @brief simple print method for debugging void printInfo(std::ostream& os=std::cout) const { os << "RootNodeMask: Bit-size="<<mBitSize<<" Int-size="<<mIntSize<<std::endl; } void printBits(std::ostream& os=std::cout, Index32 max_out=80u) const { const Index32 n=(mBitSize>max_out?max_out:mBitSize); for (Index32 i=0; i < n; ++i) { if ( !(i&31) ) os << "||"; else if ( !(i%8) ) os << "|"; os << this->isOn(i); } os << "|" << std::endl; } void printAll(std::ostream& os=std::cout, Index32 max_out=80u) const { this->printInfo(os); this->printBits(os,max_out); } Index32 findNextOn(Index32 start) const { assert(mBits); Index32 n = start >> 5, m = start & 31;//initiate if (n>=mIntSize) return mBitSize; // check for out of bounds Index32 b = mBits[n]; if (b & (1<<m)) return start;//simple case b &= 0xFFFFFFFF << m;// mask lower bits while(!b && ++n<mIntSize) b = mBits[n];// find next nonzero int return (!b ? mBitSize : 32*n + FindLowestOn(b));//catch last-int=0 } Index32 findNextOff(Index32 start) const { assert(mBits); Index32 n = start >> 5, m = start & 31;//initiate if (n>=mIntSize) return mBitSize; // check for out of bounds Index32 b = ~mBits[n]; if (b & (1<<m)) return start;//simple case b &= 0xFFFFFFFF<<m;// mask lower bits while(!b && ++n<mIntSize) b = ~mBits[n];// find next nonzero int return (!b ? mBitSize : 32*n + FindLowestOn(b));//catch last-int=0 } Index32 memUsage() const { assert(mBits); return static_cast<Index32>(sizeof(Index32*)+(2+mIntSize)*sizeof(Index32));//in bytes } }; // class RootNodeMask } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_NODEMASKS_HAS_BEEN_INCLUDED
48,829
C
33.027875
99
0.5824
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/MapsUtil.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file MapsUtil.h #ifndef OPENVDB_UTIL_MAPSUTIL_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_MAPSUTIL_HAS_BEEN_INCLUDED #include <openvdb/math/Maps.h> #include <algorithm> // for std::min(), std::max() #include <cmath> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { // Utility methods for calculating bounding boxes /// @brief Calculate an axis-aligned bounding box in the given map's domain /// (e.g., index space) from an axis-aligned bounding box in its range /// (e.g., world space) template<typename MapType> inline void calculateBounds(const MapType& map, const BBoxd& in, BBoxd& out) { const Vec3d& min = in.min(); const Vec3d& max = in.max(); // the pre-image of the 8 corners of the box Vec3d corners[8]; corners[0] = in.min();; corners[1] = Vec3d(min(0), min(1), min(2)); corners[2] = Vec3d(max(0), max(1), min(2)); corners[3] = Vec3d(min(0), max(1), min(2)); corners[4] = Vec3d(min(0), min(1), max(2)); corners[5] = Vec3d(max(0), min(1), max(2)); corners[6] = max; corners[7] = Vec3d(min(0), max(1), max(2)); Vec3d pre_image; Vec3d& out_min = out.min(); Vec3d& out_max = out.max(); out_min = map.applyInverseMap(corners[0]); out_max = min; for (int i = 1; i < 8; ++i) { pre_image = map.applyInverseMap(corners[i]); for (int j = 0; j < 3; ++j) { out_min(j) = std::min( out_min(j), pre_image(j)); out_max(j) = std::max( out_max(j), pre_image(j)); } } } /// @brief Calculate an axis-aligned bounding box in the given map's domain /// from a spherical bounding box in its range. template<typename MapType> inline void calculateBounds(const MapType& map, const Vec3d& center, const Real radius, BBoxd& out) { // On return, out gives a bounding box in continuous index space // that encloses the sphere. // // the image of a sphere under the inverse of the linearMap will be an ellipsoid. if (math::is_linear<MapType>::value) { // I want to find extrema for three functions f(x', y', z') = x', or = y', or = z' // with the constraint that g = (x-xo)^2 + (y-yo)^2 + (z-zo)^2 = r^2. // Where the point x,y,z is the image of x',y',z' // Solve: \lambda Grad(g) = Grad(f) and g = r^2. // Note: here (x,y,z) is the image of (x',y',z'), and the gradient // is w.r.t the (') space. // // This can be solved exactly: e_a^T (x' -xo') =\pm r\sqrt(e_a^T J^(-1)J^(-T)e_a) // where e_a is one of the three unit vectors. - djh. /// find the image of the center of the sphere Vec3d center_pre_image = map.applyInverseMap(center); std::vector<Vec3d> coordinate_units; coordinate_units.push_back(Vec3d(1,0,0)); coordinate_units.push_back(Vec3d(0,1,0)); coordinate_units.push_back(Vec3d(0,0,1)); Vec3d& out_min = out.min(); Vec3d& out_max = out.max(); for (int direction = 0; direction < 3; ++direction) { Vec3d temp = map.applyIJT(coordinate_units[direction]); double offset = radius * sqrt(temp.x()*temp.x() + temp.y()*temp.y() + temp.z()*temp.z()); out_min(direction) = center_pre_image(direction) - offset; out_max(direction) = center_pre_image(direction) + offset; } } else { // This is some unknown map type. In this case, we form an axis-aligned // bounding box for the sphere in world space and find the pre-images of // the corners in index space. From these corners we compute an axis-aligned // bounding box in index space. BBoxd bounding_box(center - radius*Vec3d(1,1,1), center + radius*Vec3d(1,1,1)); calculateBounds<MapType>(map, bounding_box, out); } } namespace { // anonymous namespace for this helper function /// @brief Find the intersection of a line passing through the point /// (<I>x</I>=0,&nbsp;<I>z</I>=&minus;1/<I>g</I>) with the circle /// (<I>x</I> &minus; <I>xo</I>)&sup2; + (<I>z</I> &minus; <I>zo</I>)&sup2; = <I>r</I>&sup2; /// at a point tangent to the circle. /// @return 0 if the focal point (0, -1/<I>g</I>) is inside the circle, /// 1 if the focal point touches the circle, or 2 when both points are found. inline int findTangentPoints(const double g, const double xo, const double zo, const double r, double& xp, double& zp, double& xm, double& zm) { double x2 = xo * xo; double r2 = r * r; double xd = g * xo; double xd2 = xd*xd; double zd = g * zo + 1.; double zd2 = zd*zd; double rd2 = r2*g*g; double distA = xd2 + zd2; double distB = distA - rd2; if (distB > 0) { double discriminate = sqrt(distB); xp = xo - xo*rd2/distA + r * zd *discriminate / distA; xm = xo - xo*rd2/distA - r * zd *discriminate / distA; zp = (zo*zd2 + zd*g*(x2 - r2) - xo*xo*g - r*xd*discriminate) / distA; zm = (zo*zd2 + zd*g*(x2 - r2) - xo*xo*g + r*xd*discriminate) / distA; return 2; } if (0 >= distB && distB >= -1e-9) { // the circle touches the focal point (x=0, z = -1/g) xp = 0; xm = 0; zp = -1/g; zm = -1/g; return 1; } return 0; } } // end anonymous namespace /// @brief Calculate an axis-aligned bounding box in index space /// from a spherical bounding box in world space. /// @note This specialization is optimized for a frustum map template<> inline void calculateBounds<math::NonlinearFrustumMap>(const math::NonlinearFrustumMap& frustum, const Vec3d& center, const Real radius, BBoxd& out) { // The frustum is a nonlinear map followed by a uniform scale, rotation, translation. // First we invert the translation, rotation and scale to find the spherical pre-image // of the sphere in "local" coordinates where the frustum is aligned with the near plane // on the z=0 plane and the "camera" is located at (x=0, y=0, z=-1/g). // check that the internal map has no shear. const math::AffineMap& secondMap = frustum.secondMap(); // test if the linear part has shear or non-uniform scaling if (!frustum.hasSimpleAffine()) { // In this case, we form an axis-aligned bounding box for sphere in world space // and find the pre_images of the corners in voxel space. From these corners we // compute an axis-algined bounding box in voxel-spae BBoxd bounding_box(center - radius*Vec3d(1,1,1), center + radius*Vec3d(1,1,1)); calculateBounds<math::NonlinearFrustumMap>(frustum, bounding_box, out); return; } // for convenience Vec3d& out_min = out.min(); Vec3d& out_max = out.max(); Vec3d centerLS = secondMap.applyInverseMap(center); Vec3d voxelSize = secondMap.voxelSize(); // all the voxels have the same size since we know this is a simple affine map double radiusLS = radius / voxelSize(0); double gamma = frustum.getGamma(); double xp; double zp; double xm; double zm; int soln_number; // the bounding box in index space for the points in the frustum const BBoxd& bbox = frustum.getBBox(); // initialize min and max const double x_min = bbox.min().x(); const double y_min = bbox.min().y(); const double z_min = bbox.min().z(); const double x_max = bbox.max().x(); const double y_max = bbox.max().y(); const double z_max = bbox.max().z(); out_min.x() = x_min; out_max.x() = x_max; out_min.y() = y_min; out_max.y() = y_max; Vec3d extreme; Vec3d extreme2; Vec3d pre_image; // find the x-range soln_number = findTangentPoints(gamma, centerLS.x(), centerLS.z(), radiusLS, xp, zp, xm, zm); if (soln_number == 2) { extreme.x() = xp; extreme.y() = centerLS.y(); extreme.z() = zp; // location in world space of the tangent point extreme2 = secondMap.applyMap(extreme); // convert back to voxel space pre_image = frustum.applyInverseMap(extreme2); out_max.x() = std::max(x_min, std::min(x_max, pre_image.x())); extreme.x() = xm; extreme.y() = centerLS.y(); extreme.z() = zm; // location in world space of the tangent point extreme2 = secondMap.applyMap(extreme); // convert back to voxel space pre_image = frustum.applyInverseMap(extreme2); out_min.x() = std::max(x_min, std::min(x_max, pre_image.x())); } else if (soln_number == 1) { // the circle was tangent at the focal point } else if (soln_number == 0) { // the focal point was inside the circle } // find the y-range soln_number = findTangentPoints(gamma, centerLS.y(), centerLS.z(), radiusLS, xp, zp, xm, zm); if (soln_number == 2) { extreme.x() = centerLS.x(); extreme.y() = xp; extreme.z() = zp; // location in world space of the tangent point extreme2 = secondMap.applyMap(extreme); // convert back to voxel space pre_image = frustum.applyInverseMap(extreme2); out_max.y() = std::max(y_min, std::min(y_max, pre_image.y())); extreme.x() = centerLS.x(); extreme.y() = xm; extreme.z() = zm; extreme2 = secondMap.applyMap(extreme); // convert back to voxel space pre_image = frustum.applyInverseMap(extreme2); out_min.y() = std::max(y_min, std::min(y_max, pre_image.y())); } else if (soln_number == 1) { // the circle was tangent at the focal point } else if (soln_number == 0) { // the focal point was inside the circle } // the near and far // the closest point. The front of the frustum is at 0 in index space double near_dist = std::max(centerLS.z() - radiusLS, 0.); // the farthest point. The back of the frustum is at mDepth in index space double far_dist = std::min(centerLS.z() + radiusLS, frustum.getDepth() ); Vec3d near_point(0.f, 0.f, near_dist); Vec3d far_point(0.f, 0.f, far_dist); out_min.z() = std::max(z_min, frustum.applyInverseMap(secondMap.applyMap(near_point)).z()); out_max.z() = std::min(z_max, frustum.applyInverseMap(secondMap.applyMap(far_point)).z()); } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_MAPSUTIL_HAS_BEEN_INCLUDED
10,511
C
34.633898
97
0.608315
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Name.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_UTIL_NAME_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_NAME_HAS_BEEN_INCLUDED #include <openvdb/Platform.h> #include <openvdb/version.h> #include <string> #include <iostream> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { typedef std::string Name; inline Name readString(std::istream& is) { uint32_t size; is.read(reinterpret_cast<char*>(&size), sizeof(uint32_t)); std::string buffer(size, ' '); if (size>0) is.read(&buffer[0], size); return buffer; } inline void writeString(std::ostream& os, const Name& name) { uint32_t size = uint32_t(name.size()); os.write(reinterpret_cast<char*>(&size), sizeof(uint32_t)); os.write(&name[0], size); } } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_NAME_HAS_BEEN_INCLUDED
936
C
21.309523
63
0.707265
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/NullInterrupter.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file NullInterrupter.h #ifndef OPENVDB_UTIL_NULL_INTERRUPTER_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_NULL_INTERRUPTER_HAS_BEEN_INCLUDED #include <openvdb/version.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { /// @brief Dummy NOOP interrupter class defining interface /// /// This shows the required interface for the @c InterrupterType template argument /// using by several threaded applications (e.g. tools/PointAdvect.h). The host /// application calls start() at the beginning of an interruptible operation, end() /// at the end of the operation, and wasInterrupted() periodically during the operation. /// If any call to wasInterrupted() returns @c true, the operation will be aborted. /// @note This Dummy interrupter will NEVER interrupt since wasInterrupted() always /// returns false! struct NullInterrupter { /// Default constructor NullInterrupter () {} /// Signal the start of an interruptible operation. /// @param name an optional descriptive name for the operation void start(const char* name = nullptr) { (void)name; } /// Signal the end of an interruptible operation. void end() {} /// Check if an interruptible operation should be aborted. /// @param percent an optional (when >= 0) percentage indicating /// the fraction of the operation that has been completed /// @note this method is assumed to be thread-safe. The current /// implementation is clearly a NOOP and should compile out during /// optimization! inline bool wasInterrupted(int percent = -1) { (void)percent; return false; } }; /// This method allows NullInterrupter::wasInterrupted to be compiled /// out when client code only has a pointer (vs reference) to the interrupter. /// /// @note This is a free-standing function since C++ doesn't allow for /// partial template specialization (in client code of the interrupter). template <typename T> inline bool wasInterrupted(T* i, int percent = -1) { return i && i->wasInterrupted(percent); } /// Specialization for NullInterrupter template<> inline bool wasInterrupted<util::NullInterrupter>(util::NullInterrupter*, int) { return false; } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_NULL_INTERRUPTER_HAS_BEEN_INCLUDED
2,428
C
39.483333
96
0.73682
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/CpuTimer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_UTIL_CPUTIMER_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_CPUTIMER_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <string> #include <chrono> #include <iostream>// for std::cerr #include <sstream>// for ostringstream #include <iomanip>// for setprecision #include "Formats.h"// for printTime namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { /// @brief Simple timer for basic profiling. /// /// @code /// util::CpuTimer timer; /// // code here will not be timed! /// timer.start("algorithm"); /// // code to be timed goes here /// timer.stop(); /// @endcode /// /// or to time multiple blocks of code /// /// @code /// util::CpuTimer timer("algorithm 1"); /// // code to be timed goes here /// timer.restart("algorithm 2"); /// // code to be timed goes here /// timer.stop(); /// @endcode /// /// or to measure speedup between multiple runs /// /// @code /// util::CpuTimer timer("algorithm 1"); /// // code for the first run goes here /// const double t1 = timer.restart("algorithm 2"); /// // code for the second run goes here /// const double t2 = timer.stop(); /// std::cerr << "Algorithm 1 is " << (t2/t1) /// << " timers faster than algorithm 2\n"; /// @endcode /// /// or to measure multiple blocks of code with deferred output /// /// @code /// util::CpuTimer timer(); /// // code here will not be timed! /// timer.start(); /// // code for the first run goes here /// const double t1 = timer.restart();//time in milliseconds /// // code for the second run goes here /// const double t2 = timer.restart();//time in milliseconds /// // code here will not be timed! /// util::printTime(std::cout, t1, "Algorithm 1 completed in "); /// util::printTime(std::cout, t2, "Algorithm 2 completed in "); /// @endcode class CpuTimer { public: /// @brief Initiate timer CpuTimer(std::ostream& os = std::cerr) : mOutStream(os), mT0(this->now()) {} /// @brief Prints message and start timer. /// /// @note Should normally be followed by a call to stop() CpuTimer(const std::string& msg, std::ostream& os = std::cerr) : mOutStream(os) { this->start(msg); } /// @brief Start timer. /// /// @note Should normally be followed by a call to milliseconds() or stop(std::string) inline void start() { mT0 = this->now(); } /// @brief Print message and start timer. /// /// @note Should normally be followed by a call to stop() inline void start(const std::string& msg) { mOutStream << msg << " ..."; this->start(); } /// @brief Return Time difference in microseconds since construction or start was called. /// /// @note Combine this method with start() to get timing without any outputs. inline int64_t microseconds() const { return (this->now() - mT0); } /// @brief Return Time difference in milliseconds since construction or start was called. /// /// @note Combine this method with start() to get timing without any outputs. inline double milliseconds() const { static constexpr double resolution = 1.0 / 1E3; return static_cast<double>(this->microseconds()) * resolution; } /// @brief Return Time difference in seconds since construction or start was called. /// /// @note Combine this method with start() to get timing without any outputs. inline double seconds() const { static constexpr double resolution = 1.0 / 1E6; return static_cast<double>(this->microseconds()) * resolution; } inline std::string time() const { const double msec = this->milliseconds(); std::ostringstream os; printTime(os, msec, "", "", 4, 1, 1); return os.str(); } /// @brief Returns and prints time in milliseconds since construction or start was called. /// /// @note Combine this method with start(std::string) to print at start and stop of task being timed. inline double stop() const { const double msec = this->milliseconds(); printTime(mOutStream, msec, " completed in ", "\n", 4, 3, 1); return msec; } /// @brief Returns and prints time in milliseconds since construction or start was called. /// /// @note Combine this method with start() to delay output of task being timed. inline double stop(const std::string& msg) const { const double msec = this->milliseconds(); mOutStream << msg << " ..."; printTime(mOutStream, msec, " completed in ", "\n", 4, 3, 1); return msec; } /// @brief Re-start timer. /// @return time in milliseconds since previous start or restart. /// /// @note Should normally be followed by a call to stop() or restart() inline double restart() { const double msec = this->milliseconds(); this->start(); return msec; } /// @brief Stop previous timer, print message and re-start timer. /// @return time in milliseconds since previous start or restart. /// /// @note Should normally be followed by a call to stop() or restart() inline double restart(const std::string& msg) { const double delta = this->stop(); this->start(msg); return delta; } private: static int64_t now() { // steady_clock is a monotonically increasing clock designed for timing duration // note that high_resolution_clock is aliased to either steady_clock or system_clock // depending on the platform, so it is preferrable to use steady_clock const auto time_since_epoch = std::chrono::steady_clock::now().time_since_epoch(); // cast time since epoch into microseconds (1 / 1000000 seconds) const auto microseconds = std::chrono::duration_cast<std::chrono::microseconds>(time_since_epoch).count(); // cast to a a 64-bit signed integer as this will overflow in 2262! return static_cast<int64_t>(microseconds); } std::ostream& mOutStream; int64_t mT0{0}; };// CpuTimer } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_CPUTIMER_HAS_BEEN_INCLUDED
6,397
C
32.150259
105
0.626231
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Util.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_UTIL_UTIL_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_UTIL_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/tree/Tree.h> #include <openvdb/tools/ValueTransformer.h> #include <openvdb/tools/Prune.h>// for tree::pruneInactive namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { OPENVDB_API extern const Index32 INVALID_IDX; /// @brief coordinate offset table for neighboring voxels OPENVDB_API extern const Coord COORD_OFFSETS[26]; //////////////////////////////////////// /// Return @a voxelCoord rounded to the closest integer coordinates. inline Coord nearestCoord(const Vec3d& voxelCoord) { Coord ijk; ijk[0] = int(std::floor(voxelCoord[0])); ijk[1] = int(std::floor(voxelCoord[1])); ijk[2] = int(std::floor(voxelCoord[2])); return ijk; } //////////////////////////////////////// /// @brief Functor for use with tools::foreach() to compute the boolean intersection /// between the value masks of corresponding leaf nodes in two trees template<class TreeType1, class TreeType2> class LeafTopologyIntOp { public: LeafTopologyIntOp(const TreeType2& tree): mOtherTree(&tree) {} inline void operator()(const typename TreeType1::LeafIter& lIter) const { const Coord xyz = lIter->origin(); const typename TreeType2::LeafNodeType* leaf = mOtherTree->probeConstLeaf(xyz); if (leaf) {//leaf node lIter->topologyIntersection(*leaf, zeroVal<typename TreeType1::ValueType>()); } else if (!mOtherTree->isValueOn(xyz)) {//inactive tile lIter->setValuesOff(); } } private: const TreeType2* mOtherTree; }; /// @brief Functor for use with tools::foreach() to compute the boolean difference /// between the value masks of corresponding leaf nodes in two trees template<class TreeType1, class TreeType2> class LeafTopologyDiffOp { public: LeafTopologyDiffOp(const TreeType2& tree): mOtherTree(&tree) {} inline void operator()(const typename TreeType1::LeafIter& lIter) const { const Coord xyz = lIter->origin(); const typename TreeType2::LeafNodeType* leaf = mOtherTree->probeConstLeaf(xyz); if (leaf) {//leaf node lIter->topologyDifference(*leaf, zeroVal<typename TreeType1::ValueType>()); } else if (mOtherTree->isValueOn(xyz)) {//active tile lIter->setValuesOff(); } } private: const TreeType2* mOtherTree; }; //////////////////////////////////////// /// @brief Perform a boolean intersection between two leaf nodes' topology masks. /// @return a pointer to a new, boolean-valued tree containing the overlapping voxels. template<class TreeType1, class TreeType2> inline typename TreeType1::template ValueConverter<bool>::Type::Ptr leafTopologyIntersection(const TreeType1& lhs, const TreeType2& rhs, bool threaded = true) { typedef typename TreeType1::template ValueConverter<bool>::Type BoolTreeType; typename BoolTreeType::Ptr topologyTree(new BoolTreeType( lhs, /*inactiveValue=*/false, /*activeValue=*/true, TopologyCopy())); tools::foreach(topologyTree->beginLeaf(), LeafTopologyIntOp<BoolTreeType, TreeType2>(rhs), threaded); tools::pruneInactive(*topologyTree, threaded); return topologyTree; } /// @brief Perform a boolean difference between two leaf nodes' topology masks. /// @return a pointer to a new, boolean-valued tree containing the non-overlapping /// voxels from the lhs. template<class TreeType1, class TreeType2> inline typename TreeType1::template ValueConverter<bool>::Type::Ptr leafTopologyDifference(const TreeType1& lhs, const TreeType2& rhs, bool threaded = true) { typedef typename TreeType1::template ValueConverter<bool>::Type BoolTreeType; typename BoolTreeType::Ptr topologyTree(new BoolTreeType( lhs, /*inactiveValue=*/false, /*activeValue=*/true, TopologyCopy())); tools::foreach(topologyTree->beginLeaf(), LeafTopologyDiffOp<BoolTreeType, TreeType2>(rhs), threaded); tools::pruneInactive(*topologyTree, threaded); return topologyTree; } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_UTIL_HAS_BEEN_INCLUDED
4,333
C
30.867647
90
0.701823
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/PagedArray.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// /// @file PagedArray.h /// /// @author Ken Museth /// /// @brief Concurrent, page-based, dynamically-sized linear data /// structure with O(1) random access and STL-compliant /// iterators. It is primarily intended for applications /// that involve multi-threading push_back of (a possibly /// unkown number of) elements into a dynamically growing /// linear array, and fast random access to said elements. #ifndef OPENVDB_UTIL_PAGED_ARRAY_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_PAGED_ARRAY_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Types.h>// SharedPtr #include <deque> #include <cassert> #include <iostream> #include <algorithm>// std::swap #include <tbb/atomic.h> #include <tbb/spin_mutex.h> #include <tbb/parallel_for.h> #include <tbb/parallel_sort.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { //////////////////////////////////////// /// @brief Concurrent, page-based, dynamically-sized linear data structure /// with O(1) random access and STL-compliant iterators. It is /// primarily intended for applications that concurrently insert /// (a possibly unkown number of) elements into a dynamically /// growing linear array, and fast random access to said elements. /// /// @note Multiple threads can grow the page-table and push_back /// new elements concurrently. A ValueBuffer provides accelerated /// and threadsafe push_back at the cost of potentially re-ordering /// elements (when multiple instances are used). /// /// @details This data structure employes contiguous pages of elements /// (a std::deque) which avoids moving data when the /// capacity is out-grown and new pages are allocated. The /// size of the pages can be controlled with the Log2PageSize /// template parameter (defaults to 1024 elements of type ValueT). /// /// There are three fundamentally different ways to insert elements to /// this container - each with different advanteges and disadvanteges. /// /// The simplest way to insert elements is to use PagedArray::push_back_unsafe /// which is @a not thread-safe: /// @code /// PagedArray<size_t> array; /// for (size_t i=0; i<100000; ++i) array.push_back_unsafe(i); /// @endcode /// /// The fastest way (by far) to insert elements is by means of a PagedArray::ValueBuffer: /// @code /// PagedArray<size_t> array; /// auto buffer = array.getBuffer(); /// for (size_t i=0; i<100000; ++i) buffer.push_back(i); /// buffer.flush(); /// @endcode /// or /// @code /// PagedArray<size_t> array; /// { /// //local scope of a single thread /// auto buffer = array.getBuffer(); /// for (size_t i=0; i<100000; ++i) buffer.push_back(i); /// } /// @endcode /// or with TBB task-based multi-threading: /// @code /// PagedArray<size_t> array; /// tbb::parallel_for( /// tbb::blocked_range<size_t>(0, 10000, array.pageSize()), /// [&array](const tbb::blocked_range<size_t>& range) { /// auto buffer = array.getBuffer(); /// for (size_t i=range.begin(); i!=range.end(); ++i) buffer.push_back(i); /// } /// ); /// @endcode /// or with TBB thread-local storage for even better performance (due /// to fewer concurrent instantiations of partially full ValueBuffers) /// @code /// PagedArray<size_t> array; /// auto exemplar = array.getBuffer();//dummy used for initialization /// tbb::enumerable_thread_specific<PagedArray<size_t>::ValueBuffer> /// pool(exemplar);//thread local storage pool of ValueBuffers /// tbb::parallel_for( /// tbb::blocked_range<size_t>(0, 10000, array.pageSize()), /// [&pool](const tbb::blocked_range<size_t>& range) { /// auto &buffer = pool.local(); /// for (size_t i=range.begin(); i!=range.end(); ++i) buffer.push_back(i); /// } /// ); /// for (auto i=pool.begin(); i!=pool.end(); ++i) i->flush(); /// @endcode /// This technique generally outperforms PagedArray::push_back_unsafe, /// std::vector::push_back, std::deque::push_back and even /// tbb::concurrent_vector::push_back. Additionally it /// is thread-safe as long as each thread has it's own instance of a /// PagedArray::ValueBuffer. The only disadvantage is the ordering of /// the elements is undefined if multiple instance of a /// PagedArray::ValueBuffer are employed. This is typically the case /// in the context of multi-threading, where the /// ordering of inserts are undefined anyway. Note that a local scope /// can be used to guarentee that the ValueBuffer has inserted all its /// elements by the time the scope ends. Alternatively the ValueBuffer /// can be explicitly flushed by calling ValueBuffer::flush. /// /// The third way to insert elements is to resize the container and use /// random access, e.g. /// @code /// PagedArray<int> array; /// array.resize(100000); /// for (int i=0; i<100000; ++i) array[i] = i; /// @endcode /// or in terms of the random access iterator /// @code /// PagedArray<int> array; /// array.resize(100000); /// for (auto i=array.begin(); i!=array.end(); ++i) *i = i.pos(); /// @endcode /// While this approach is both fast and thread-safe it suffers from the /// major disadvantage that the problem size, i.e. number of elements, needs to /// be known in advance. If that's the case you might as well consider /// using std::vector or a raw c-style array! In other words the /// PagedArray is most useful in the context of applications that /// involve multi-threading of dynamically growing linear arrays that /// require fast random access. template<typename ValueT, size_t Log2PageSize = 10UL> class PagedArray { private: static_assert(Log2PageSize > 1UL, "Expected Log2PageSize > 1"); class Page; // must allow mutiple threads to call operator[] as long as only one thread calls push_back using PageTableT = std::deque<Page*>; public: using ValueType = ValueT; using Ptr = SharedPtr<PagedArray>; /// @brief Default constructor PagedArray() : mCapacity{0} { mSize = 0; } /// @brief Destructor removed all allocated pages ~PagedArray() { this->clear(); } // Disallow copy construction and assignment PagedArray(const PagedArray&) = delete; PagedArray& operator=(const PagedArray&) = delete; /// @brief Return a shared pointer to a new instance of this class static Ptr create() { return Ptr(new PagedArray); } /// @brief Caches values into a local memory Page to improve /// performance of push_back into a PagedArray. /// /// @note The ordering of inserted elements is undefined when /// multiple ValueBuffers are used! /// /// @warning By design this ValueBuffer is not threadsafe so /// make sure to create an instance per thread! class ValueBuffer; /// @return a new instance of a ValueBuffer which supports thread-safe push_back! ValueBuffer getBuffer() { return ValueBuffer(*this); } /// Const std-compliant iterator class ConstIterator; /// Non-const std-compliant iterator class Iterator; /// @brief This method is deprecated and will be removed shortly! [[deprecated]] size_t push_back(const ValueType& value) { return this->push_back_unsafe(value); } /// @param value value to be added to this PagedArray /// /// @note For best performance consider using the ValueBuffer! /// /// @warning Not thread-safe and mostly intended for debugging! size_t push_back_unsafe(const ValueType& value) { const size_t index = mSize.fetch_and_increment(); if (index >= mCapacity) { mPageTable.push_back( new Page() ); mCapacity += Page::Size; } (*mPageTable[index >> Log2PageSize])[index] = value; return index; } /// @brief Reduce the page table to fix the current size. /// /// @warning Not thread-safe! void shrink_to_fit(); /// @brief Return a reference to the value at the specified offset /// /// @param i linear offset of the value to be accessed. /// /// @note This random access has constant time complexity. /// /// @warning It is assumed that the i'th element is already allocated! ValueType& operator[](size_t i) { assert(i<mCapacity); return (*mPageTable[i>>Log2PageSize])[i]; } /// @brief Return a const-reference to the value at the specified offset /// /// @param i linear offset of the value to be accessed. /// /// @note This random access has constant time complexity. /// /// @warning It is assumed that the i'th element is already allocated! const ValueType& operator[](size_t i) const { assert(i<mCapacity); return (*mPageTable[i>>Log2PageSize])[i]; } /// @brief Set all elements in the page table to the specified value /// /// @param v value to be filled in all the existing pages of this PagedArray. /// /// @note Multi-threaded void fill(const ValueType& v) { auto op = [&](const tbb::blocked_range<size_t>& r){ for(size_t i=r.begin(); i!=r.end(); ++i) mPageTable[i]->fill(v); }; tbb::parallel_for(tbb::blocked_range<size_t>(0, this->pageCount()), op); } /// @brief Copy the first @a count values in this PageArray into /// a raw c-style array, assuming it to be at least @a count /// elements long. /// /// @param p pointer to an array that will used as the destination of the copy. /// @param count number of elements to be copied. /// bool copy(ValueType *p, size_t count) const { size_t last_page = count >> Log2PageSize; if (last_page >= this->pageCount()) return false; auto op = [&](const tbb::blocked_range<size_t>& r){ for (size_t i=r.begin(); i!=r.end(); ++i) { mPageTable[i]->copy(p+i*Page::Size, Page::Size); } }; if (size_t m = count & Page::Mask) {//count is not divisible by page size tbb::parallel_for(tbb::blocked_range<size_t>(0, last_page, 32), op); mPageTable[last_page]->copy(p+last_page*Page::Size, m); } else { tbb::parallel_for(tbb::blocked_range<size_t>(0, last_page+1, 32), op); } return true; } void copy(ValueType *p) const { this->copy(p, mSize); } /// @brief Resize this array to the specified size. /// /// @param size number of elements that this PageArray will contain. /// /// @details Will grow or shrink the page table to contain /// the specified number of elements. It will affect the size(), /// iteration will go over all those elements, push_back will /// insert after them and operator[] can be used directly access /// them. /// /// @note No reserve method is implemented due to efficiency concerns /// (especially for the ValueBuffer) from having to deal with empty pages. /// /// @warning Not thread-safe! void resize(size_t size) { mSize = size; if (size > mCapacity) { this->grow(size-1); } else { this->shrink_to_fit(); } } /// @brief Resize this array to the specified size and initialize /// all values to @a v. /// /// @param size number of elements that this PageArray will contain. /// @param v value of all the @a size values. /// /// @details Will grow or shrink the page table to contain /// the specified number of elements. It will affect the size(), /// iteration will go over all those elements, push_back will /// insert after them and operator[] can be used directly access them. /// /// @note No reserve method is implemented due to efficiency concerns /// (especially for the ValueBuffer) from having to deal with empty pages. /// /// @warning Not thread-safe! void resize(size_t size, const ValueType& v) { this->resize(size); this->fill(v); } /// @brief Return the number of elements in this array. size_t size() const { return mSize; } /// @brief Return the maximum number of elements that this array /// can contain without allocating more memory pages. size_t capacity() const { return mCapacity; } /// @brief Return the number of additional elements that can be /// added to this array without allocating more memory pages. size_t freeCount() const { return mCapacity - mSize; } /// @brief Return the number of allocated memory pages. size_t pageCount() const { return mPageTable.size(); } /// @brief Return the number of elements per memory page. static size_t pageSize() { return Page::Size; } /// @brief Return log2 of the number of elements per memory page. static size_t log2PageSize() { return Log2PageSize; } /// @brief Return the memory footprint of this array in bytes. size_t memUsage() const { return sizeof(*this) + mPageTable.size() * Page::memUsage(); } /// @brief Return true if the container contains no elements. bool isEmpty() const { return mSize == 0; } /// @brief Return true if the page table is partially full, i.e. the /// last non-empty page contains less than pageSize() elements. /// /// @details When the page table is partially full calling merge() /// or using a ValueBuffer will rearrange the ordering of /// existing elements. bool isPartiallyFull() const { return (mSize & Page::Mask) > 0; } /// @brief Removes all elements from the array and delete all pages. /// /// @warning Not thread-safe! void clear() { for (size_t i=0, n=mPageTable.size(); i<n; ++i) delete mPageTable[i]; PageTableT().swap(mPageTable); mSize = 0; mCapacity = 0; } /// @brief Return a non-const iterator pointing to the first element Iterator begin() { return Iterator(*this, 0); } /// @brief Return a non-const iterator pointing to the /// past-the-last element. /// /// @warning Iterator does not point to a valid element and should not /// be dereferenced! Iterator end() { return Iterator(*this, mSize); } //@{ /// @brief Return a const iterator pointing to the first element ConstIterator cbegin() const { return ConstIterator(*this, 0); } ConstIterator begin() const { return ConstIterator(*this, 0); } //@} //@{ /// @brief Return a const iterator pointing to the /// past-the-last element. /// /// @warning Iterator does not point to a valid element and should not /// be dereferenced! ConstIterator cend() const { return ConstIterator(*this, mSize); } ConstIterator end() const { return ConstIterator(*this, mSize); } //@} /// @brief Parallel sort of all the elements in ascending order. void sort() { tbb::parallel_sort(this->begin(), this->end(), std::less<ValueT>() ); } /// @brief Parallel sort of all the elements in descending order. void invSort() { tbb::parallel_sort(this->begin(), this->end(), std::greater<ValueT>()); } //@{ /// @brief Parallel sort of all the elements based on a custom /// functor with the api: /// @code bool operator()(const ValueT& a, const ValueT& b) @endcode /// which returns true if a comes before b. template <typename Functor> void sort(Functor func) { tbb::parallel_sort(this->begin(), this->end(), func ); } //@} /// @brief Transfer all the elements (and pages) from the other array to this array. /// /// @param other non-const reference to the PagedArray that will be merged into this PagedArray. /// /// @note The other PagedArray is empty on return. /// /// @warning The ordering of elements is undefined if this page table is partially full! void merge(PagedArray& other); /// @brief Print information for debugging void print(std::ostream& os = std::cout) const { os << "PagedArray:\n" << "\tSize: " << this->size() << " elements\n" << "\tPage table: " << this->pageCount() << " pages\n" << "\tPage size: " << this->pageSize() << " elements\n" << "\tCapacity: " << this->capacity() << " elements\n" << "\tFootprint: " << this->memUsage() << " bytes\n"; } private: friend class ValueBuffer; void grow(size_t index) { tbb::spin_mutex::scoped_lock lock(mGrowthMutex); while(index >= mCapacity) { mPageTable.push_back( new Page() ); mCapacity += Page::Size; } } void add_full(Page*& page, size_t size); void add_partially_full(Page*& page, size_t size); void add(Page*& page, size_t size) { tbb::spin_mutex::scoped_lock lock(mGrowthMutex); if (size == Page::Size) {//page is full this->add_full(page, size); } else if (size>0) {//page is only partially full this->add_partially_full(page, size); } } PageTableT mPageTable;//holds points to allocated pages tbb::atomic<size_t> mSize;// current number of elements in array size_t mCapacity;//capacity of array given the current page count tbb::spin_mutex mGrowthMutex;//Mutex-lock required to grow pages }; // Public class PagedArray //////////////////////////////////////////////////////////////////////////////// template <typename ValueT, size_t Log2PageSize> void PagedArray<ValueT, Log2PageSize>::shrink_to_fit() { if (mPageTable.size() > (mSize >> Log2PageSize) + 1) { tbb::spin_mutex::scoped_lock lock(mGrowthMutex); const size_t pageCount = (mSize >> Log2PageSize) + 1; if (mPageTable.size() > pageCount) { delete mPageTable.back(); mPageTable.pop_back(); mCapacity -= Page::Size; } } } template <typename ValueT, size_t Log2PageSize> void PagedArray<ValueT, Log2PageSize>::merge(PagedArray& other) { if (&other != this && !other.isEmpty()) { tbb::spin_mutex::scoped_lock lock(mGrowthMutex); // extract last partially full page if it exists Page* page = nullptr; const size_t size = mSize & Page::Mask; //number of elements in the last page if ( size > 0 ) { page = mPageTable.back(); mPageTable.pop_back(); mSize -= size; } // transfer all pages from the other page table mPageTable.insert(mPageTable.end(), other.mPageTable.begin(), other.mPageTable.end()); mSize += other.mSize; mCapacity = Page::Size*mPageTable.size(); other.mSize = 0; other.mCapacity = 0; PageTableT().swap(other.mPageTable); // add back last partially full page if (page) this->add_partially_full(page, size); } } template <typename ValueT, size_t Log2PageSize> void PagedArray<ValueT, Log2PageSize>::add_full(Page*& page, size_t size) { assert(size == Page::Size);//page must be full if (mSize & Page::Mask) {//page-table is partially full Page*& tmp = mPageTable.back(); std::swap(tmp, page);//swap last table entry with page } mPageTable.push_back(page); mCapacity += Page::Size; mSize += size; page = nullptr; } template <typename ValueT, size_t Log2PageSize> void PagedArray<ValueT, Log2PageSize>::add_partially_full(Page*& page, size_t size) { assert(size > 0 && size < Page::Size);//page must be partially full if (size_t m = mSize & Page::Mask) {//page table is also partially full ValueT *s = page->data(), *t = mPageTable.back()->data() + m; for (size_t i=std::min(mSize+size, mCapacity)-mSize; i; --i) *t++ = *s++; if (mSize+size > mCapacity) {//grow page table mPageTable.push_back( new Page() ); t = mPageTable.back()->data(); for (size_t i=mSize+size-mCapacity; i; --i) *t++ = *s++; mCapacity += Page::Size; } } else {//page table is full so simply append page mPageTable.push_back( page ); mCapacity += Page::Size; page = nullptr; } mSize += size; } //////////////////////////////////////////////////////////////////////////////// // Public member-class of PagedArray template <typename ValueT, size_t Log2PageSize> class PagedArray<ValueT, Log2PageSize>:: ValueBuffer { public: using PagedArrayType = PagedArray<ValueT, Log2PageSize>; /// @brief Constructor from a PageArray ValueBuffer(PagedArray& parent) : mParent(&parent), mPage(new Page()), mSize(0) {} /// @warning This copy-constructor is shallow in the sense that no /// elements are copied, i.e. size = 0. ValueBuffer(const ValueBuffer& other) : mParent(other.mParent), mPage(new Page()), mSize(0) {} /// @brief Destructor that transfers an buffered values to the parent PagedArray. ~ValueBuffer() { mParent->add(mPage, mSize); delete mPage; } ValueBuffer& operator=(const ValueBuffer&) = delete;// disallow copy assignment /// @brief Add a value to the buffer and increment the size. /// /// @details If the internal memory page is full it will /// automaically flush the page to the parent PagedArray. void push_back(const ValueT& v) { (*mPage)[mSize++] = v; if (mSize == Page::Size) this->flush(); } /// @brief Manually transfers the values in this buffer to the parent PagedArray. /// /// @note This method is also called by the destructor and /// push_back so it should only be called if one manually wants to /// sync up the buffer with the array, e.g. during debugging. void flush() { mParent->add(mPage, mSize); if (mPage == nullptr) mPage = new Page(); mSize = 0; } /// @brief Return a reference to the parent PagedArray PagedArrayType& parent() const { return *mParent; } /// @brief Return the current number of elements cached in this buffer. size_t size() const { return mSize; } static size_t pageSize() { return 1UL << Log2PageSize; } private: PagedArray* mParent; Page* mPage; size_t mSize; };// Public class PagedArray::ValueBuffer //////////////////////////////////////////////////////////////////////////////// // Const std-compliant iterator // Public member-class of PagedArray template <typename ValueT, size_t Log2PageSize> class PagedArray<ValueT, Log2PageSize>:: ConstIterator : public std::iterator<std::random_access_iterator_tag, ValueT> { public: using BaseT = std::iterator<std::random_access_iterator_tag, ValueT>; using difference_type = typename BaseT::difference_type; // constructors and assignment ConstIterator() : mPos(0), mParent(nullptr) {} ConstIterator(const PagedArray& parent, size_t pos=0) : mPos(pos), mParent(&parent) {} ConstIterator(const ConstIterator& other) : mPos(other.mPos), mParent(other.mParent) {} ConstIterator& operator=(const ConstIterator& other) { mPos=other.mPos; mParent=other.mParent; return *this; } // prefix ConstIterator& operator++() { ++mPos; return *this; } ConstIterator& operator--() { --mPos; return *this; } // postfix ConstIterator operator++(int) { ConstIterator tmp(*this); ++mPos; return tmp; } ConstIterator operator--(int) { ConstIterator tmp(*this); --mPos; return tmp; } // value access const ValueT& operator*() const { return (*mParent)[mPos]; } const ValueT* operator->() const { return &(this->operator*()); } const ValueT& operator[](const difference_type& pos) const { return (*mParent)[mPos+pos]; } // offset ConstIterator& operator+=(const difference_type& pos) { mPos += pos; return *this; } ConstIterator& operator-=(const difference_type& pos) { mPos -= pos; return *this; } ConstIterator operator+(const difference_type &pos) const { return Iterator(*mParent,mPos+pos); } ConstIterator operator-(const difference_type &pos) const { return Iterator(*mParent,mPos-pos); } difference_type operator-(const ConstIterator& other) const { return mPos - other.pos(); } // comparisons bool operator==(const ConstIterator& other) const { return mPos == other.mPos; } bool operator!=(const ConstIterator& other) const { return mPos != other.mPos; } bool operator>=(const ConstIterator& other) const { return mPos >= other.mPos; } bool operator<=(const ConstIterator& other) const { return mPos <= other.mPos; } bool operator< (const ConstIterator& other) const { return mPos < other.mPos; } bool operator> (const ConstIterator& other) const { return mPos > other.mPos; } // non-std methods bool isValid() const { return mParent != nullptr && mPos < mParent->size(); } size_t pos() const { return mPos; } private: size_t mPos; const PagedArray* mParent; };// Public class PagedArray::ConstIterator //////////////////////////////////////////////////////////////////////////////// // Non-const std-compliant iterator // Public member-class of PagedArray template <typename ValueT, size_t Log2PageSize> class PagedArray<ValueT, Log2PageSize>:: Iterator : public std::iterator<std::random_access_iterator_tag, ValueT> { public: using BaseT = std::iterator<std::random_access_iterator_tag, ValueT>; using difference_type = typename BaseT::difference_type; // constructors and assignment Iterator() : mPos(0), mParent(nullptr) {} Iterator(PagedArray& parent, size_t pos=0) : mPos(pos), mParent(&parent) {} Iterator(const Iterator& other) : mPos(other.mPos), mParent(other.mParent) {} Iterator& operator=(const Iterator& other) { mPos=other.mPos; mParent=other.mParent; return *this; } // prefix Iterator& operator++() { ++mPos; return *this; } Iterator& operator--() { --mPos; return *this; } // postfix Iterator operator++(int) { Iterator tmp(*this); ++mPos; return tmp; } Iterator operator--(int) { Iterator tmp(*this); --mPos; return tmp; } // value access ValueT& operator*() const { return (*mParent)[mPos]; } ValueT* operator->() const { return &(this->operator*()); } ValueT& operator[](const difference_type& pos) const { return (*mParent)[mPos+pos]; } // offset Iterator& operator+=(const difference_type& pos) { mPos += pos; return *this; } Iterator& operator-=(const difference_type& pos) { mPos -= pos; return *this; } Iterator operator+(const difference_type &pos) const { return Iterator(*mParent, mPos+pos); } Iterator operator-(const difference_type &pos) const { return Iterator(*mParent, mPos-pos); } difference_type operator-(const Iterator& other) const { return mPos - other.pos(); } // comparisons bool operator==(const Iterator& other) const { return mPos == other.mPos; } bool operator!=(const Iterator& other) const { return mPos != other.mPos; } bool operator>=(const Iterator& other) const { return mPos >= other.mPos; } bool operator<=(const Iterator& other) const { return mPos <= other.mPos; } bool operator< (const Iterator& other) const { return mPos < other.mPos; } bool operator> (const Iterator& other) const { return mPos > other.mPos; } // non-std methods bool isValid() const { return mParent != nullptr && mPos < mParent->size(); } size_t pos() const { return mPos; } private: size_t mPos; PagedArray* mParent; };// Public class PagedArray::Iterator //////////////////////////////////////////////////////////////////////////////// // Private member-class of PagedArray implementing a memory page template <typename ValueT, size_t Log2PageSize> class PagedArray<ValueT, Log2PageSize>:: Page { public: static const size_t Size = 1UL << Log2PageSize; static const size_t Mask = Size - 1UL; static size_t memUsage() { return sizeof(ValueT)*Size; } // Raw memory allocation without any initialization Page() : mData(reinterpret_cast<ValueT*>(new char[sizeof(ValueT)*Size])) {} ~Page() { delete [] mData; } Page(const Page&) = delete;//copy construction is not implemented Page& operator=(const Page&) = delete;//copy assignment is not implemented ValueT& operator[](const size_t i) { return mData[i & Mask]; } const ValueT& operator[](const size_t i) const { return mData[i & Mask]; } void fill(const ValueT& v) { ValueT* dst = mData; for (size_t i=Size; i; --i) *dst++ = v; } ValueT* data() { return mData; } // Copy the first n elements of this Page to dst (which is assumed to large // enough to hold the n elements). void copy(ValueType *dst, size_t n) const { const ValueT* src = mData; for (size_t i=n; i; --i) *dst++ = *src++; } protected: ValueT* mData; };// Private class PagedArray::Page //////////////////////////////////////////////////////////////////////////////// } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_PAGED_ARRAY_HAS_BEEN_INCLUDED
29,348
C
39.20411
101
0.626755
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Formats.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Formats.h" #include <openvdb/Platform.h> #include <iostream> #include <iomanip> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { int printBytes(std::ostream& os, uint64_t bytes, const std::string& head, const std::string& tail, bool exact, int width, int precision) { const uint64_t one = 1; int group = 0; // Write to a string stream so that I/O manipulators like // std::setprecision() don't alter the output stream. std::ostringstream ostr; ostr << head; ostr << std::setprecision(precision) << std::setiosflags(std::ios::fixed); if (bytes >> 40) { ostr << std::setw(width) << (double(bytes) / double(one << 40)) << " TB"; group = 4; } else if (bytes >> 30) { ostr << std::setw(width) << (double(bytes) / double(one << 30)) << " GB"; group = 3; } else if (bytes >> 20) { ostr << std::setw(width) << (double(bytes) / double(one << 20)) << " MB"; group = 2; } else if (bytes >> 10) { ostr << std::setw(width) << (double(bytes) / double(one << 10)) << " KB"; group = 1; } else { ostr << std::setw(width) << bytes << " Bytes"; } if (exact && group) ostr << " (" << bytes << " Bytes)"; ostr << tail; os << ostr.str(); return group; } int printNumber(std::ostream& os, uint64_t number, const std::string& head, const std::string& tail, bool exact, int width, int precision) { int group = 0; // Write to a string stream so that I/O manipulators like // std::setprecision() don't alter the output stream. std::ostringstream ostr; ostr << head; ostr << std::setprecision(precision) << std::setiosflags(std::ios::fixed); if (number / UINT64_C(1000000000000)) { ostr << std::setw(width) << (double(number) / 1000000000000.0) << " trillion"; group = 4; } else if (number / UINT64_C(1000000000)) { ostr << std::setw(width) << (double(number) / 1000000000.0) << " billion"; group = 3; } else if (number / UINT64_C(1000000)) { ostr << std::setw(width) << (double(number) / 1000000.0) << " million"; group = 2; } else if (number / UINT64_C(1000)) { ostr << std::setw(width) << (double(number) / 1000.0) << " thousand"; group = 1; } else { ostr << std::setw(width) << number; } if (exact && group) ostr << " (" << number << ")"; ostr << tail; os << ostr.str(); return group; } int printTime(std::ostream& os, double milliseconds, const std::string& head, const std::string& tail, int width, int precision, int verbose) { int group = 0; // Write to a string stream so that I/O manipulators like // std::setprecision() don't alter the output stream. std::ostringstream ostr; ostr << head; ostr << std::setprecision(precision) << std::setiosflags(std::ios::fixed); if (milliseconds >= 1000.0) {// one second or longer const uint32_t seconds = static_cast<uint32_t>(milliseconds / 1000.0) % 60 ; const uint32_t minutes = static_cast<uint32_t>(milliseconds / (1000.0*60)) % 60; const uint32_t hours = static_cast<uint32_t>(milliseconds / (1000.0*60*60)) % 24; const uint32_t days = static_cast<uint32_t>(milliseconds / (1000.0*60*60*24)); if (days>0) { ostr << days << (verbose==0 ? "d " : days>1 ? " days, " : " day, "); group = 4; } if (hours>0) { ostr << hours << (verbose==0 ? "h " : hours>1 ? " hours, " : " hour, "); if (!group) group = 3; } if (minutes>0) { ostr << minutes << (verbose==0 ? "m " : minutes>1 ? " minutes, " : " minute, "); if (!group) group = 2; } if (seconds>0) { if (verbose) { ostr << seconds << (seconds>1 ? " seconds and " : " second and "); const double msec = milliseconds - (seconds + (minutes + (hours + days * 24) * 60) * 60) * 1000.0; ostr << std::setw(width) << msec << " milliseconds (" << milliseconds << "ms)"; } else { const double sec = milliseconds/1000.0 - (minutes + (hours + days * 24) * 60) * 60; ostr << std::setw(width) << sec << "s"; } } else {// zero seconds const double msec = milliseconds - (minutes + (hours + days * 24) * 60) * 60 * 1000.0; if (verbose) { ostr << std::setw(width) << msec << " milliseconds (" << milliseconds << "ms)"; } else { ostr << std::setw(width) << msec << "ms"; } } if (!group) group = 1; } else {// less than a second ostr << std::setw(width) << milliseconds << (verbose ? " milliseconds" : "ms"); } ostr << tail; os << ostr.str(); return group; } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
4,970
C++
32.362416
108
0.551911
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestNodeMask.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/util/NodeMasks.h> #include <openvdb/io/Compression.h> using openvdb::Index; template<typename MaskType> void TestAll(); class TestNodeMask: public ::testing::Test { }; template<typename MaskType> void TestAll() { EXPECT_TRUE(MaskType::memUsage() == MaskType::SIZE/8); const Index SIZE = MaskType::SIZE > 512 ? 512 : MaskType::SIZE; {// default constructor MaskType m;//all bits are off for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(m.isOff(i)); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(!m.isOn(i)); EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.countOff()== MaskType::SIZE); m.toggle();//all bits are on EXPECT_TRUE(m.isOn()); EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(m.countOn() == MaskType::SIZE); EXPECT_TRUE(m.countOff()== 0); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(!m.isOff(i)); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(m.isOn(i)); } {// On constructor MaskType m(true);//all bits are on EXPECT_TRUE(m.isOn()); EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(m.countOn() == MaskType::SIZE); EXPECT_TRUE(m.countOff()== 0); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(!m.isOff(i)); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(m.isOn(i)); m.toggle(); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(m.isOff(i)); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(!m.isOn(i)); EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.countOff()== MaskType::SIZE); } {// Off constructor MaskType m(false); EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.countOff()== MaskType::SIZE); m.setOn(); EXPECT_TRUE(m.isOn()); EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(m.countOn() == MaskType::SIZE); EXPECT_TRUE(m.countOff()== 0); m = MaskType();//copy asignment EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.countOff()== MaskType::SIZE); } {// test setOn, setOff, findFirstOn and findFiratOff MaskType m; for (Index i=0; i<SIZE; ++i) { m.setOn(i); EXPECT_TRUE(m.countOn() == 1); EXPECT_TRUE(m.findFirstOn() == i); EXPECT_TRUE(m.findFirstOff() == (i==0 ? 1 : 0)); for (Index j=0; j<SIZE; ++j) { EXPECT_TRUE( i==j ? m.isOn(j) : m.isOff(j) ); } m.setOff(i); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.findFirstOn() == MaskType::SIZE); } } {// OnIterator MaskType m; for (Index i=0; i<SIZE; ++i) { m.setOn(i); for (typename MaskType::OnIterator iter=m.beginOn(); iter; ++iter) { EXPECT_TRUE( iter.pos() == i ); } EXPECT_TRUE(m.countOn() == 1); m.setOff(i); EXPECT_TRUE(m.countOn() == 0); } } {// OffIterator MaskType m(true); for (Index i=0; i<SIZE; ++i) { m.setOff(i); EXPECT_TRUE(m.countOff() == 1); for (typename MaskType::OffIterator iter=m.beginOff(); iter; ++iter) { EXPECT_TRUE( iter.pos() == i ); } EXPECT_TRUE(m.countOn() == MaskType::SIZE-1); m.setOn(i); EXPECT_TRUE(m.countOff() == 0); EXPECT_TRUE(m.countOn() == MaskType::SIZE); } } {// isConstant MaskType m(true);//all bits are on bool isOn = false; EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(m.isOn()); EXPECT_TRUE(m.isConstant(isOn)); EXPECT_TRUE(isOn); m.setOff(MaskType::SIZE-1);//sets last bit off EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(!m.isConstant(isOn)); m.setOff();//sets all bits off EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.isConstant(isOn)); EXPECT_TRUE(!isOn); } {// DenseIterator MaskType m(false); for (Index i=0; i<SIZE; ++i) { m.setOn(i); EXPECT_TRUE(m.countOn() == 1); for (typename MaskType::DenseIterator iter=m.beginDense(); iter; ++iter) { EXPECT_TRUE( iter.pos()==i ? *iter : !*iter ); } m.setOff(i); EXPECT_TRUE(m.countOn() == 0); } } } TEST_F(TestNodeMask, testCompress) { using namespace openvdb; using ValueT = int; using MaskT = openvdb::util::NodeMask<1>; { // no inactive values MaskT valueMask(true); MaskT childMask; std::vector<int> values = {0,1,2,3,4,5,6,7}; int background = 0; EXPECT_EQ(valueMask.countOn(), Index32(8)); EXPECT_EQ(childMask.countOn(), Index32(0)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(maskCompress.metadata, int8_t(openvdb::io::NO_MASK_OR_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // all inactive values are +background MaskT valueMask; MaskT childMask; std::vector<int> values = {10,10,10,10,10,10,10,10}; int background = 10; EXPECT_EQ(valueMask.countOn(), Index32(0)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(maskCompress.metadata, int8_t(openvdb::io::NO_MASK_OR_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // all inactive values are -background MaskT valueMask; MaskT childMask; std::vector<int> values = {-10,-10,-10,-10,-10,-10,-10,-10}; int background = 10; EXPECT_EQ(valueMask.countOn(), Index32(0)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(maskCompress.metadata, int8_t(openvdb::io::NO_MASK_AND_MINUS_BG)); EXPECT_EQ(maskCompress.inactiveVal[0], -background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // all inactive vals have the same non-background val MaskT valueMask(true); MaskT childMask; std::vector<int> values = {0,1,500,500,4,500,500,7}; int background = 10; valueMask.setOff(2); valueMask.setOff(3); valueMask.setOff(5); valueMask.setOff(6); EXPECT_EQ(valueMask.countOn(), Index32(4)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::NO_MASK_AND_ONE_INACTIVE_VAL)); EXPECT_EQ(maskCompress.inactiveVal[0], 500); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // mask selects between -background and +background MaskT valueMask; MaskT childMask; std::vector<int> values = {0,10,10,-10,4,10,-10,10}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_NO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], -background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // mask selects between -background and +background MaskT valueMask; MaskT childMask; std::vector<int> values = {0,-10,-10,10,4,-10,10,-10}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_NO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], -background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // mask selects between backgd and one other inactive val MaskT valueMask; MaskT childMask; std::vector<int> values = {0,500,500,10,4,500,10,500}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_ONE_INACTIVE_VAL)); EXPECT_EQ(maskCompress.inactiveVal[0], 500); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // mask selects between two non-background inactive vals MaskT valueMask; MaskT childMask; std::vector<int> values = {0,500,500,2000,4,500,2000,500}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_TWO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], 500); // first unique value EXPECT_EQ(maskCompress.inactiveVal[1], 2000); // second unique value } { // mask selects between two non-background inactive vals MaskT valueMask; MaskT childMask; std::vector<int> values = {0,2000,2000,500,4,2000,500,2000}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_TWO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], 2000); // first unique value EXPECT_EQ(maskCompress.inactiveVal[1], 500); // second unique value } { // > 2 inactive vals, so no mask compression at all MaskT valueMask; MaskT childMask; std::vector<int> values = {0,1000,2000,3000,4,2000,500,2000}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::NO_MASK_AND_ALL_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], 1000); // first unique value EXPECT_EQ(maskCompress.inactiveVal[1], 2000); // second unique value } { // mask selects between two non-background inactive vals (selective child mask) MaskT valueMask; MaskT childMask; std::vector<int> values = {0,1000,2000,3000,4,2000,500,2000}; int background = 0; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); childMask.setOn(3); childMask.setOn(6); EXPECT_EQ(childMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_TWO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], 1000); // first unique value EXPECT_EQ(maskCompress.inactiveVal[1], 2000); // secone unique value } } TEST_F(TestNodeMask, testAll4) { TestAll<openvdb::util::NodeMask<4> >(); } TEST_F(TestNodeMask, testAll3) { TestAll<openvdb::util::NodeMask<3> >(); } TEST_F(TestNodeMask, testAll2) { TestAll<openvdb::util::NodeMask<2> >(); } TEST_F(TestNodeMask, testAll1) { TestAll<openvdb::util::NodeMask<1> >(); }
12,614
C++
34.435393
94
0.585064
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestDelayedLoadMetadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <openvdb/io/DelayedLoadMetadata.h> class TestDelayedLoadMetadata : public ::testing::Test { }; TEST_F(TestDelayedLoadMetadata, test) { using namespace openvdb::io; // registration EXPECT_TRUE(!DelayedLoadMetadata::isRegisteredType()); DelayedLoadMetadata::registerType(); EXPECT_TRUE(DelayedLoadMetadata::isRegisteredType()); DelayedLoadMetadata::unregisterType(); EXPECT_TRUE(!DelayedLoadMetadata::isRegisteredType()); openvdb::initialize(); EXPECT_TRUE(DelayedLoadMetadata::isRegisteredType()); // construction DelayedLoadMetadata metadata; EXPECT_TRUE(metadata.empty()); metadata.resizeMask(size_t(2)); EXPECT_TRUE(!metadata.empty()); metadata.setMask(0, DelayedLoadMetadata::MaskType(5)); metadata.setMask(1, DelayedLoadMetadata::MaskType(-3)); EXPECT_EQ(metadata.getMask(0), DelayedLoadMetadata::MaskType(5)); EXPECT_EQ(metadata.getMask(1), DelayedLoadMetadata::MaskType(-3)); metadata.resizeCompressedSize(size_t(3)); metadata.setCompressedSize(0, DelayedLoadMetadata::CompressedSizeType(6)); metadata.setCompressedSize(1, DelayedLoadMetadata::CompressedSizeType(101)); metadata.setCompressedSize(2, DelayedLoadMetadata::CompressedSizeType(-13522)); EXPECT_EQ(metadata.getCompressedSize(0), DelayedLoadMetadata::CompressedSizeType(6)); EXPECT_EQ(metadata.getCompressedSize(1), DelayedLoadMetadata::CompressedSizeType(101)); EXPECT_EQ(metadata.getCompressedSize(2), DelayedLoadMetadata::CompressedSizeType(-13522)); // copy construction DelayedLoadMetadata metadataCopy1(metadata); EXPECT_TRUE(!metadataCopy1.empty()); EXPECT_EQ(metadataCopy1.getMask(0), DelayedLoadMetadata::MaskType(5)); EXPECT_EQ(metadataCopy1.getCompressedSize(2), DelayedLoadMetadata::CompressedSizeType(-13522)); openvdb::Metadata::Ptr baseMetadataCopy2 = metadata.copy(); DelayedLoadMetadata::Ptr metadataCopy2 = openvdb::StaticPtrCast<DelayedLoadMetadata>(baseMetadataCopy2); EXPECT_EQ(metadataCopy2->getMask(0), DelayedLoadMetadata::MaskType(5)); EXPECT_EQ(metadataCopy2->getCompressedSize(2), DelayedLoadMetadata::CompressedSizeType(-13522)); // I/O metadata.clear(); EXPECT_TRUE(metadata.empty()); const size_t headerInitialSize(sizeof(openvdb::Index32)); const size_t headerCountSize(sizeof(openvdb::Index32)); const size_t headerMaskSize(sizeof(openvdb::Index32)); const size_t headerCompressedSize(sizeof(openvdb::Index32)); const size_t headerTotalSize(headerInitialSize + headerCountSize + headerMaskSize + headerCompressedSize); { // empty buffer std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); metadata.write(ss); EXPECT_EQ(ss.tellp(), std::streampos(headerInitialSize)); DelayedLoadMetadata newMetadata; newMetadata.read(ss); EXPECT_TRUE(newMetadata.empty()); } { // single value, no compressed sizes metadata.clear(); metadata.resizeMask(size_t(1)); metadata.setMask(0, DelayedLoadMetadata::MaskType(5)); std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); metadata.write(ss); std::streampos expectedPos(headerTotalSize + sizeof(int8_t)); EXPECT_EQ(ss.tellp(), expectedPos); EXPECT_EQ(static_cast<size_t>(expectedPos)-headerInitialSize, size_t(metadata.size())); DelayedLoadMetadata newMetadata; newMetadata.read(ss); EXPECT_TRUE(!newMetadata.empty()); EXPECT_EQ(newMetadata.getMask(0), DelayedLoadMetadata::MaskType(5)); } { // single value, with compressed sizes metadata.clear(); metadata.resizeMask(size_t(1)); metadata.setMask(0, DelayedLoadMetadata::MaskType(5)); metadata.resizeCompressedSize(size_t(1)); metadata.setCompressedSize(0, DelayedLoadMetadata::CompressedSizeType(-10322)); std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); metadata.write(ss); std::streampos expectedPos(headerTotalSize + sizeof(int8_t) + sizeof(int64_t)); EXPECT_EQ(expectedPos, ss.tellp()); EXPECT_EQ(static_cast<size_t>(ss.tellp())-headerInitialSize, size_t(metadata.size())); DelayedLoadMetadata newMetadata; newMetadata.read(ss); EXPECT_TRUE(!newMetadata.empty()); EXPECT_EQ(newMetadata.getMask(0), DelayedLoadMetadata::MaskType(5)); EXPECT_EQ(newMetadata.getCompressedSize(0), DelayedLoadMetadata::CompressedSizeType(-10322)); } { // larger, but compressible buffer metadata.clear(); const size_t size = 1000; const size_t uncompressedBufferSize = (sizeof(int8_t)+sizeof(int64_t))*size; metadata.resizeMask(size); metadata.resizeCompressedSize(size); for (size_t i = 0; i < size; i++) { metadata.setMask(i, DelayedLoadMetadata::MaskType(static_cast<int8_t>((i%32)*2))); metadata.setCompressedSize(i, DelayedLoadMetadata::CompressedSizeType(static_cast<int64_t>((i%64)*200))); } std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); metadata.write(ss); EXPECT_EQ(static_cast<size_t>(ss.tellp())-headerInitialSize, size_t(metadata.size())); std::streampos uncompressedSize(uncompressedBufferSize + headerTotalSize); #ifdef OPENVDB_USE_BLOSC // expect a compression ratio of more than 10x EXPECT_TRUE(ss.tellp() * 10 < uncompressedSize); #else EXPECT_TRUE(ss.tellp() == uncompressedSize); #endif DelayedLoadMetadata newMetadata; newMetadata.read(ss); EXPECT_EQ(metadata.size(), newMetadata.size()); for (size_t i = 0; i < size; i++) { EXPECT_EQ(metadata.getMask(i), newMetadata.getMask(i)); } } // when read as unknown metadata should be treated as temporary metadata { metadata.clear(); metadata.resizeMask(size_t(1)); metadata.setMask(0, DelayedLoadMetadata::MaskType(5)); std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); openvdb::MetaMap metamap; metamap.insertMeta("delayload", metadata); EXPECT_EQ(size_t(1), metamap.metaCount()); metamap.writeMeta(ss); { openvdb::MetaMap newMetamap; newMetamap.readMeta(ss); EXPECT_EQ(size_t(1), newMetamap.metaCount()); } { DelayedLoadMetadata::unregisterType(); openvdb::MetaMap newMetamap; newMetamap.readMeta(ss); EXPECT_EQ(size_t(0), newMetamap.metaCount()); } } }
6,991
C++
32.941747
110
0.672865
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestDiagnostics.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <limits> #include <openvdb/openvdb.h> #include <openvdb/Exceptions.h> #include <openvdb/math/Math.h> #include <openvdb/math/Stats.h> #include <openvdb/tools/Diagnostics.h> #include <openvdb/tools/Statistics.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/LevelSetUtil.h> class TestDiagnostics: public ::testing::Test { }; //////////////////////////////////////// TEST_F(TestDiagnostics, testCheck) { const float val = 1.0f; const float nan = std::numeric_limits<float>::quiet_NaN(); const float inf1= std::numeric_limits<float>::infinity(); const openvdb::math::Vec3<float> inf2(val, inf1, val); {//test CheckNan openvdb::tools::CheckNan<openvdb::FloatGrid> c; EXPECT_TRUE(!c(val)); EXPECT_TRUE( c(nan)); EXPECT_TRUE( c(nan)); EXPECT_TRUE(!c(inf1)); EXPECT_TRUE(!c(inf2)); } {//test CheckInf openvdb::tools::CheckInf<openvdb::FloatGrid> c; EXPECT_TRUE(!c(val)); EXPECT_TRUE(!c(nan)); EXPECT_TRUE(!c(nan)); EXPECT_TRUE( c(inf1)); EXPECT_TRUE( c(inf2)); } {//test CheckFinite openvdb::tools::CheckFinite<openvdb::FloatGrid> c; EXPECT_TRUE(!c(val)); EXPECT_TRUE( c(nan)); EXPECT_TRUE( c(nan)); EXPECT_TRUE( c(inf1)); EXPECT_TRUE( c(inf2)); } {//test CheckMin openvdb::tools::CheckMin<openvdb::FloatGrid> c(0.0f); EXPECT_TRUE(!c( 0.5f)); EXPECT_TRUE(!c( 0.0f)); EXPECT_TRUE(!c( 1.0f)); EXPECT_TRUE(!c( 1.1f)); EXPECT_TRUE( c(-0.1f)); } {//test CheckMax openvdb::tools::CheckMax<openvdb::FloatGrid> c(0.0f); EXPECT_TRUE( c( 0.5f)); EXPECT_TRUE(!c( 0.0f)); EXPECT_TRUE( c( 1.0f)); EXPECT_TRUE( c( 1.1f)); EXPECT_TRUE(!c(-0.1f)); } {//test CheckRange // first check throw on construction from an invalid range EXPECT_THROW(openvdb::tools::CheckRange<openvdb::FloatGrid> c(1.0f, 0.0f), openvdb::ValueError); openvdb::tools::CheckRange<openvdb::FloatGrid> c(0.0f, 1.0f); EXPECT_TRUE(!c(0.5f)); EXPECT_TRUE(!c(0.0f)); EXPECT_TRUE(!c(1.0f)); EXPECT_TRUE( c(1.1f)); EXPECT_TRUE(c(-0.1f)); } }//testCheck TEST_F(TestDiagnostics, testDiagnose) { using namespace openvdb; const float val = 1.0f; const float nan = std::numeric_limits<float>::quiet_NaN(); const float inf = std::numeric_limits<float>::infinity(); {//empty grid FloatGrid grid; tools::Diagnose<FloatGrid> d(grid); tools::CheckNan<FloatGrid> c; std::string str = d.check(c); //std::cerr << "Empty grid:\n" << str; EXPECT_EQ(std::string(), str); EXPECT_EQ(0, int(d.failureCount())); } {//non-empty grid FloatGrid grid; grid.tree().setValue(Coord(-1,3,6), val); tools::Diagnose<FloatGrid> d(grid); tools::CheckNan<FloatGrid> c; std::string str = d.check(c); //std::cerr << "Non-Empty grid:\n" << str; EXPECT_EQ(std::string(), str); EXPECT_EQ(0, int(d.failureCount())); } {//nan grid FloatGrid grid; grid.tree().setValue(Coord(-1,3,6), nan); tools::Diagnose<FloatGrid> d(grid); tools::CheckNan<FloatGrid> c; std::string str = d.check(c); //std::cerr << "NaN grid:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(1, int(d.failureCount())); } {//nan and infinite grid FloatGrid grid; grid.tree().setValue(Coord(-1,3,6), nan); grid.tree().setValue(Coord(10,30,60), inf); tools::Diagnose<FloatGrid> d(grid); tools::CheckFinite<FloatGrid> c; std::string str = d.check(c); //std::cerr << "Not Finite grid:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(2, int(d.failureCount())); } {//out-of-range grid FloatGrid grid(10.0f); grid.tree().setValue(Coord(-1,3,6), 1.0f); grid.tree().setValue(Coord(10,30,60), 1.5); grid.tree().fill(math::CoordBBox::createCube(math::Coord(0),8), 20.0f, true); tools::Diagnose<FloatGrid> d(grid); tools::CheckRange<FloatGrid> c(0.0f, 1.0f); std::string str = d.check(c); //std::cerr << "out-of-range grid:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(3, int(d.failureCount())); } const float radius = 4.3f; const openvdb::Vec3f center(15.8f, 13.2f, 16.7f); const float voxelSize = 0.1f, width = 2.0f, gamma=voxelSize*width; FloatGrid::Ptr gridSphere = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); //gridSphere->print(std::cerr, 2); {// Check min/max of active values math::Extrema ex = tools::extrema(gridSphere->cbeginValueOn()); //std::cerr << "Min = " << ex.min() << " max = " << ex.max() << std::endl; EXPECT_TRUE(ex.min() > -voxelSize*width); EXPECT_TRUE(ex.max() < voxelSize*width); } {// Check min/max of all values math::Extrema ex = tools::extrema(gridSphere->cbeginValueAll()); //std::cerr << "Min = " << ex.min() << " max = " << ex.max() << std::endl; EXPECT_TRUE(ex.min() >= -voxelSize*width); EXPECT_TRUE(ex.max() <= voxelSize*width); } {// check range of all values in a sphere w/o mask tools::CheckRange<FloatGrid, true, true, FloatGrid::ValueAllCIter> c(-gamma, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check range of on values in a sphere w/o mask tools::CheckRange<FloatGrid, true, true, FloatGrid::ValueOnCIter> c(-gamma, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check range of off tiles in a sphere w/o mask tools::CheckRange<FloatGrid, true, true, FloatGrid::ValueOffCIter> c(-gamma, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); {// check off tile iterator FloatGrid::ValueOffCIter i(gridSphere->tree()); i.setMaxDepth(FloatGrid::ValueOffCIter::LEAF_DEPTH - 1); for (; i; ++i) EXPECT_TRUE( math::Abs(*i) <= gamma); } std::string str = d.check(c); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check range of sphere w/o mask tools::CheckRange<FloatGrid> c(0.0f, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_TRUE(d.failureCount() < gridSphere->activeVoxelCount()); } {// check range of sphere w mask tools::CheckRange<FloatGrid> c(0.0f, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c, true); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(d.valueCount(), d.valueCount()); EXPECT_TRUE(d.failureCount() < gridSphere->activeVoxelCount()); } {// check min of sphere w/o mask tools::CheckMin<FloatGrid> c(-gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Min values:\n" << str; EXPECT_EQ(std::string(), str); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check max of sphere w/o mask tools::CheckMax<FloatGrid> c(gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "MAX values:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check norm of gradient of sphere w/o mask tools::CheckEikonal<FloatGrid> c(*gridSphere, 0.97f, 1.03f); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c, false, true, false, false); //std::cerr << "NormGrad:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check norm of gradient of sphere w/o mask tools::CheckNormGrad<FloatGrid> c(*gridSphere, 0.75f, 1.25f); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c, false, true, false, false); //std::cerr << "NormGrad:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check inactive values tools::CheckMagnitude<FloatGrid, FloatGrid::ValueOffCIter> c(gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Magnitude:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } }// testDiagnose TEST_F(TestDiagnostics, testCheckLevelSet) { using namespace openvdb; const float radius = 4.3f; const Vec3f center(15.8f, 13.2f, 16.7f); const float voxelSize = 0.1f, width = LEVEL_SET_HALF_WIDTH; FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); //tools::CheckLevelSet<FloatGrid> c(*grid); //std::string str = c.check(); std::string str = tools::checkLevelSet(*grid); EXPECT_TRUE(str.empty()); //std::cerr << "\n" << str << std::endl; grid->tree().setValue(Coord(0,0,0), voxelSize*(width+0.5f)); //str = c.check(); str = tools::checkLevelSet(*grid); EXPECT_TRUE(!str.empty()); //std::cerr << "\n" << str << std::endl; //str = c.check(6); str = tools::checkLevelSet(*grid, 6); EXPECT_TRUE(str.empty()); }// testCheckLevelSet TEST_F(TestDiagnostics, testCheckFogVolume) { using namespace openvdb; const float radius = 4.3f; const Vec3f center(15.8f, 13.2f, 16.7f); const float voxelSize = 0.1f, width = LEVEL_SET_HALF_WIDTH; FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); tools::sdfToFogVolume(*grid); //tools::CheckFogVolume<FloatGrid> c(*grid); //std::string str = c.check(); std::string str = tools::checkFogVolume(*grid); EXPECT_TRUE(str.empty()); //std::cerr << "\n" << str << std::endl; grid->tree().setValue(Coord(0,0,0), 1.5f); //str = c.check(); str = tools::checkFogVolume(*grid); EXPECT_TRUE(!str.empty()); //std::cerr << "\n" << str << std::endl; str = tools::checkFogVolume(*grid, 5); //str = c.check(5); EXPECT_TRUE(str.empty()); }// testCheckFogVolume TEST_F(TestDiagnostics, testUniqueInactiveValues) { openvdb::FloatGrid grid; grid.tree().setValueOff(openvdb::Coord(0,0,0), -1); grid.tree().setValueOff(openvdb::Coord(0,0,1), -2); grid.tree().setValueOff(openvdb::Coord(0,1,0), -3); grid.tree().setValue(openvdb::Coord(1,0,0), 1); std::vector<float> values; EXPECT_TRUE(openvdb::tools::uniqueInactiveValues(grid, values, 4)); EXPECT_EQ(4, int(values.size())); EXPECT_TRUE(openvdb::math::isApproxEqual(values[0], -3.0f)); EXPECT_TRUE(openvdb::math::isApproxEqual(values[1], -2.0f)); EXPECT_TRUE(openvdb::math::isApproxEqual(values[2], -1.0f)); EXPECT_TRUE(openvdb::math::isApproxEqual(values[3], 0.0f)); // test with level set sphere const float radius = 4.3f; const openvdb::Vec3f center(15.8f, 13.2f, 16.7f); const float voxelSize = 0.5f, width = 2.0f; openvdb::FloatGrid::Ptr gridSphere = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>(radius, center, voxelSize, width); EXPECT_TRUE(openvdb::tools::uniqueInactiveValues(*gridSphere.get(), values, 2)); EXPECT_EQ(2, int(values.size())); EXPECT_TRUE(openvdb::math::isApproxEqual(values[0], -voxelSize * width)); EXPECT_TRUE(openvdb::math::isApproxEqual(values[1], voxelSize * width)); // test with fog volume openvdb::tools::sdfToFogVolume(*gridSphere); EXPECT_TRUE(openvdb::tools::uniqueInactiveValues(*gridSphere.get(), values, 1)); EXPECT_EQ(1, int(values.size())); EXPECT_TRUE(openvdb::math::isApproxEqual(values[0], 0.0f)); }
13,017
C++
34.763736
99
0.586464
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestInit.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> class TestInit: public ::testing::Test { }; TEST_F(TestInit, test) { using namespace openvdb; initialize(); // data types EXPECT_TRUE(DoubleMetadata::isRegisteredType()); EXPECT_TRUE(FloatMetadata::isRegisteredType()); EXPECT_TRUE(Int32Metadata::isRegisteredType()); EXPECT_TRUE(Int64Metadata::isRegisteredType()); EXPECT_TRUE(StringMetadata::isRegisteredType()); EXPECT_TRUE(Vec2IMetadata::isRegisteredType()); EXPECT_TRUE(Vec2SMetadata::isRegisteredType()); EXPECT_TRUE(Vec2DMetadata::isRegisteredType()); EXPECT_TRUE(Vec3IMetadata::isRegisteredType()); EXPECT_TRUE(Vec3SMetadata::isRegisteredType()); EXPECT_TRUE(Vec3DMetadata::isRegisteredType()); // map types EXPECT_TRUE(math::AffineMap::isRegistered()); EXPECT_TRUE(math::UnitaryMap::isRegistered()); EXPECT_TRUE(math::ScaleMap::isRegistered()); EXPECT_TRUE(math::TranslationMap::isRegistered()); EXPECT_TRUE(math::ScaleTranslateMap::isRegistered()); EXPECT_TRUE(math::NonlinearFrustumMap::isRegistered()); // grid types EXPECT_TRUE(BoolGrid::isRegistered()); EXPECT_TRUE(FloatGrid::isRegistered()); EXPECT_TRUE(DoubleGrid::isRegistered()); EXPECT_TRUE(Int32Grid::isRegistered()); EXPECT_TRUE(Int64Grid::isRegistered()); EXPECT_TRUE(StringGrid::isRegistered()); EXPECT_TRUE(Vec3IGrid::isRegistered()); EXPECT_TRUE(Vec3SGrid::isRegistered()); EXPECT_TRUE(Vec3DGrid::isRegistered()); uninitialize(); EXPECT_TRUE(!DoubleMetadata::isRegisteredType()); EXPECT_TRUE(!FloatMetadata::isRegisteredType()); EXPECT_TRUE(!Int32Metadata::isRegisteredType()); EXPECT_TRUE(!Int64Metadata::isRegisteredType()); EXPECT_TRUE(!StringMetadata::isRegisteredType()); EXPECT_TRUE(!Vec2IMetadata::isRegisteredType()); EXPECT_TRUE(!Vec2SMetadata::isRegisteredType()); EXPECT_TRUE(!Vec2DMetadata::isRegisteredType()); EXPECT_TRUE(!Vec3IMetadata::isRegisteredType()); EXPECT_TRUE(!Vec3SMetadata::isRegisteredType()); EXPECT_TRUE(!Vec3DMetadata::isRegisteredType()); EXPECT_TRUE(!math::AffineMap::isRegistered()); EXPECT_TRUE(!math::UnitaryMap::isRegistered()); EXPECT_TRUE(!math::ScaleMap::isRegistered()); EXPECT_TRUE(!math::TranslationMap::isRegistered()); EXPECT_TRUE(!math::ScaleTranslateMap::isRegistered()); EXPECT_TRUE(!math::NonlinearFrustumMap::isRegistered()); EXPECT_TRUE(!BoolGrid::isRegistered()); EXPECT_TRUE(!FloatGrid::isRegistered()); EXPECT_TRUE(!DoubleGrid::isRegistered()); EXPECT_TRUE(!Int32Grid::isRegistered()); EXPECT_TRUE(!Int64Grid::isRegistered()); EXPECT_TRUE(!StringGrid::isRegistered()); EXPECT_TRUE(!Vec3IGrid::isRegistered()); EXPECT_TRUE(!Vec3SGrid::isRegistered()); EXPECT_TRUE(!Vec3DGrid::isRegistered()); } TEST_F(TestInit, testMatGrids) { // small test to ensure matrix grid types compile using Mat3sGrid = openvdb::BoolGrid::ValueConverter<openvdb::Mat3s>::Type; using Mat3dGrid = openvdb::BoolGrid::ValueConverter<openvdb::Mat3d>::Type; using Mat4sGrid = openvdb::BoolGrid::ValueConverter<openvdb::Mat4s>::Type; using Mat4dGrid = openvdb::BoolGrid::ValueConverter<openvdb::Mat4d>::Type; Mat3sGrid a; (void)(a); Mat3dGrid b; (void)(b); Mat4sGrid c; (void)(c); Mat4dGrid d; (void)(d); }
3,508
C++
35.175257
78
0.711231
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestClip.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/math/Maps.h> // for math::NonlinearFrustumMap #include <openvdb/tools/Clip.h> // See also TestGrid::testClipping() class TestClip: public ::testing::Test { public: static const openvdb::CoordBBox kCubeBBox, kInnerBBox; TestClip(): mCube{ []() { auto cube = openvdb::FloatGrid{0.0f}; cube.fill(kCubeBBox, /*value=*/5.0f, /*active=*/true); return cube; }()} {} void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::initialize(); } protected: void validate(const openvdb::FloatGrid&); const openvdb::FloatGrid mCube; }; const openvdb::CoordBBox // The volume to be clipped is a 21 x 21 x 21 solid cube. TestClip::kCubeBBox{openvdb::Coord{-10}, openvdb::Coord{10}}, // The clipping mask is a 1 x 1 x 13 segment extending along the Z axis inside the cube. TestClip::kInnerBBox{openvdb::Coord{4, 4, -6}, openvdb::Coord{4, 4, 6}}; //////////////////////////////////////// void TestClip::validate(const openvdb::FloatGrid& clipped) { using namespace openvdb; const CoordBBox bbox = clipped.evalActiveVoxelBoundingBox(); EXPECT_EQ(kInnerBBox.min().x(), bbox.min().x()); EXPECT_EQ(kInnerBBox.min().y(), bbox.min().y()); EXPECT_EQ(kInnerBBox.min().z(), bbox.min().z()); EXPECT_EQ(kInnerBBox.max().x(), bbox.max().x()); EXPECT_EQ(kInnerBBox.max().y(), bbox.max().y()); EXPECT_EQ(kInnerBBox.max().z(), bbox.max().z()); EXPECT_EQ(6 + 6 + 1, int(clipped.activeVoxelCount())); EXPECT_EQ(2, int(clipped.constTree().leafCount())); FloatGrid::ConstAccessor acc = clipped.getConstAccessor(); const float bg = clipped.background(); Coord xyz; int &x = xyz[0], &y = xyz[1], &z = xyz[2]; for (x = kCubeBBox.min().x(); x <= kCubeBBox.max().x(); ++x) { for (y = kCubeBBox.min().y(); y <= kCubeBBox.max().y(); ++y) { for (z = kCubeBBox.min().z(); z <= kCubeBBox.max().z(); ++z) { if (x == 4 && y == 4 && z >= -6 && z <= 6) { EXPECT_EQ(5.f, acc.getValue(Coord(4, 4, z))); } else { EXPECT_EQ(bg, acc.getValue(Coord(x, y, z))); } } } } } //////////////////////////////////////// // Test clipping against a bounding box. TEST_F(TestClip, testBBox) { using namespace openvdb; BBoxd clipBox(Vec3d(4.0, 4.0, -6.0), Vec3d(4.9, 4.9, 6.0)); FloatGrid::Ptr clipped = tools::clip(mCube, clipBox); validate(*clipped); } // Test clipping against a camera frustum. TEST_F(TestClip, testFrustum) { using namespace openvdb; const auto d = double(kCubeBBox.max().z()); const math::NonlinearFrustumMap frustum{ /*position=*/Vec3d{0.0, 0.0, 5.0 * d}, /*direction=*/Vec3d{0.0, 0.0, -1.0}, /*up=*/Vec3d{0.0, d / 2.0, 0.0}, /*aspect=*/1.0, /*near=*/4.0 * d + 1.0, /*depth=*/kCubeBBox.dim().z() - 2.0, /*x_count=*/100, /*z_count=*/100}; const auto frustumIndexBBox = frustum.getBBox(); { auto clipped = tools::clip(mCube, frustum); const auto bbox = clipped->evalActiveVoxelBoundingBox(); const auto cubeDim = kCubeBBox.dim(); EXPECT_EQ(kCubeBBox.min().z() + 1, bbox.min().z()); EXPECT_EQ(kCubeBBox.max().z() - 1, bbox.max().z()); EXPECT_TRUE(int(bbox.volume()) < int(cubeDim.x() * cubeDim.y() * (cubeDim.z() - 2))); // Note: mCube index space corresponds to world space. for (auto it = clipped->beginValueOn(); it; ++it) { const auto xyz = frustum.applyInverseMap(it.getCoord().asVec3d()); EXPECT_TRUE(frustumIndexBBox.isInside(xyz)); } } { auto tile = openvdb::FloatGrid{0.0f}; tile.tree().addTile(/*level=*/2, Coord{0}, /*value=*/5.0f, /*active=*/true); auto clipped = tools::clip(tile, frustum); EXPECT_TRUE(!clipped->empty()); for (auto it = clipped->beginValueOn(); it; ++it) { const auto xyz = frustum.applyInverseMap(it.getCoord().asVec3d()); EXPECT_TRUE(frustumIndexBBox.isInside(xyz)); } clipped = tools::clip(tile, frustum, /*keepInterior=*/false); EXPECT_TRUE(!clipped->empty()); for (auto it = clipped->beginValueOn(); it; ++it) { const auto xyz = frustum.applyInverseMap(it.getCoord().asVec3d()); EXPECT_TRUE(!frustumIndexBBox.isInside(xyz)); } } } // Test clipping against a MaskGrid. TEST_F(TestClip, testMaskGrid) { using namespace openvdb; MaskGrid mask(false); mask.fill(kInnerBBox, true, true); FloatGrid::Ptr clipped = tools::clip(mCube, mask); validate(*clipped); } // Test clipping against a boolean mask grid. TEST_F(TestClip, testBoolMask) { using namespace openvdb; BoolGrid mask(false); mask.fill(kInnerBBox, true, true); FloatGrid::Ptr clipped = tools::clip(mCube, mask); validate(*clipped); } // Test clipping against a boolean mask grid with mask inversion. TEST_F(TestClip, testInvertedBoolMask) { using namespace openvdb; // Construct a mask grid that is the "inverse" of the mask used in the other tests. // (This is not a true inverse, since the mask's active voxel bounds are finite.) BoolGrid mask(false); mask.fill(kCubeBBox, true, true); mask.fill(kInnerBBox, false, false); // Clipping against the "inverted" mask with mask inversion enabled // should give the same results as clipping normally against the normal mask. FloatGrid::Ptr clipped = tools::clip(mCube, mask, /*keepInterior=*/false); clipped->pruneGrid(); validate(*clipped); } // Test clipping against a non-boolean mask grid. TEST_F(TestClip, testNonBoolMask) { using namespace openvdb; Int32Grid mask(0); mask.fill(kInnerBBox, -5, true); FloatGrid::Ptr clipped = tools::clip(mCube, mask); validate(*clipped); } // Test clipping against a non-boolean mask grid with mask inversion. TEST_F(TestClip, testInvertedNonBoolMask) { using namespace openvdb; // Construct a mask grid that is the "inverse" of the mask used in the other tests. // (This is not a true inverse, since the mask's active voxel bounds are finite.) Grid<UInt32Tree> mask(0); auto paddedCubeBBox = kCubeBBox; paddedCubeBBox.expand(2); mask.fill(paddedCubeBBox, 99, true); mask.fill(kInnerBBox, 0, false); // Clipping against the "inverted" mask with mask inversion enabled // should give the same results as clipping normally against the normal mask. FloatGrid::Ptr clipped = tools::clip(mCube, mask, /*keepInterior=*/false); clipped->pruneGrid(); validate(*clipped); }
6,893
C++
31.985646
93
0.6112
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestNodeManager.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/tree/NodeManager.h> #include <openvdb/tree/LeafManager.h> #include "util.h" // for unittest_util::makeSphere() #include "gtest/gtest.h" class TestNodeManager: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; namespace { template<typename TreeT> struct NodeCountOp { NodeCountOp() : nodeCount(TreeT::DEPTH, 0), totalCount(0) { } NodeCountOp(const NodeCountOp&, tbb::split) : nodeCount(TreeT::DEPTH, 0), totalCount(0) { } void join(const NodeCountOp& other) { for (size_t i = 0; i < nodeCount.size(); ++i) { nodeCount[i] += other.nodeCount[i]; } totalCount += other.totalCount; } // do nothing for the root node void operator()(const typename TreeT::RootNodeType&) { } // count the internal and leaf nodes template<typename NodeT> void operator()(const NodeT&) { ++(nodeCount[NodeT::LEVEL]); ++totalCount; } std::vector<openvdb::Index64> nodeCount; openvdb::Index64 totalCount; };// NodeCountOp }//unnamed namespace TEST_F(TestNodeManager, testAll) { using openvdb::CoordBBox; using openvdb::Coord; using openvdb::Vec3f; using openvdb::Index64; using openvdb::FloatGrid; using openvdb::FloatTree; const Vec3f center(0.35f, 0.35f, 0.35f); const float radius = 0.15f; const int dim = 128, half_width = 5; const float voxel_size = 1.0f/dim; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/half_width*voxel_size); FloatTree& tree = grid->tree(); grid->setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/voxel_size)); unittest_util::makeSphere<FloatGrid>(Coord(dim), center, radius, *grid, unittest_util::SPHERE_SPARSE_NARROW_BAND); EXPECT_EQ(4, int(FloatTree::DEPTH)); EXPECT_EQ(3, int(openvdb::tree::NodeManager<FloatTree>::LEVELS)); std::vector<Index64> nodeCount; for (openvdb::Index i=0; i<FloatTree::DEPTH; ++i) nodeCount.push_back(0); for (FloatTree::NodeCIter it = tree.cbeginNode(); it; ++it) ++(nodeCount[it.getLevel()]); //for (size_t i=0; i<nodeCount.size(); ++i) {//includes the root node // std::cerr << "Level=" << i << " nodes=" << nodeCount[i] << std::endl; //} {// test tree constructor openvdb::tree::NodeManager<FloatTree> manager(tree); //for (openvdb::Index i=0; i<openvdb::tree::NodeManager<FloatTree>::LEVELS; ++i) { // std::cerr << "Level=" << i << " nodes=" << manager.nodeCount(i) << std::endl; //} Index64 totalCount = 0; for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount //std::cerr << "Level=" << i << " expected=" << nodeCount[i] // << " cached=" << manager.nodeCount(i) << std::endl; EXPECT_EQ(nodeCount[i], manager.nodeCount(i)); totalCount += nodeCount[i]; } EXPECT_EQ(totalCount, manager.nodeCount()); // test the map reduce functionality NodeCountOp<FloatTree> bottomUpOp; NodeCountOp<FloatTree> topDownOp; manager.reduceBottomUp(bottomUpOp); manager.reduceTopDown(topDownOp); for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount EXPECT_EQ(bottomUpOp.nodeCount[i], manager.nodeCount(i)); EXPECT_EQ(topDownOp.nodeCount[i], manager.nodeCount(i)); } EXPECT_EQ(bottomUpOp.totalCount, manager.nodeCount()); EXPECT_EQ(topDownOp.totalCount, manager.nodeCount()); } {// test LeafManager constructor typedef openvdb::tree::LeafManager<FloatTree> LeafManagerT; LeafManagerT manager1(tree); EXPECT_EQ(nodeCount[0], Index64(manager1.leafCount())); openvdb::tree::NodeManager<LeafManagerT> manager2(manager1); Index64 totalCount = 0; for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount //std::cerr << "Level=" << i << " expected=" << nodeCount[i] // << " cached=" << manager2.nodeCount(i) << std::endl; EXPECT_EQ(nodeCount[i], Index64(manager2.nodeCount(i))); totalCount += nodeCount[i]; } EXPECT_EQ(totalCount, Index64(manager2.nodeCount())); // test the map reduce functionality NodeCountOp<FloatTree> bottomUpOp; NodeCountOp<FloatTree> topDownOp; manager2.reduceBottomUp(bottomUpOp); manager2.reduceTopDown(topDownOp); for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount EXPECT_EQ(bottomUpOp.nodeCount[i], manager2.nodeCount(i)); EXPECT_EQ(topDownOp.nodeCount[i], manager2.nodeCount(i)); } EXPECT_EQ(bottomUpOp.totalCount, manager2.nodeCount()); EXPECT_EQ(topDownOp.totalCount, manager2.nodeCount()); } } TEST_F(TestNodeManager, testConst) { using namespace openvdb; const Vec3f center(0.35f, 0.35f, 0.35f); const int dim = 128, half_width = 5; const float voxel_size = 1.0f/dim; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/half_width*voxel_size); const FloatTree& tree = grid->constTree(); tree::NodeManager<const FloatTree> manager(tree); NodeCountOp<const FloatTree> topDownOp; manager.reduceTopDown(topDownOp); std::vector<Index64> nodeCount; for (openvdb::Index i=0; i<FloatTree::DEPTH; ++i) nodeCount.push_back(0); for (FloatTree::NodeCIter it = tree.cbeginNode(); it; ++it) ++(nodeCount[it.getLevel()]); Index64 totalCount = 0; for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount EXPECT_EQ(nodeCount[i], manager.nodeCount(i)); totalCount += nodeCount[i]; } EXPECT_EQ(totalCount, manager.nodeCount()); } namespace { template<typename TreeT> struct ExpandOp { using RootT = typename TreeT::RootNodeType; using LeafT = typename TreeT::LeafNodeType; explicit ExpandOp(bool zeroOnly = false) : mZeroOnly(zeroOnly) { } // do nothing for the root node bool operator()(RootT&, size_t = 1) const { return true; } // count the internal and leaf nodes template<typename NodeT> bool operator()(NodeT& node, size_t idx = 1) const { for (auto iter = node.cbeginValueAll(); iter; ++iter) { const openvdb::Coord ijk = iter.getCoord(); if (ijk.x() < 256 && ijk.y() < 256 && ijk.z() < 256) { node.addChild(new typename NodeT::ChildNodeType(iter.getCoord(), NodeT::LEVEL, true)); } } if (mZeroOnly) return idx == 0; return true; } bool operator()(LeafT& leaf, size_t /*idx*/ = 1) const { for (auto iter = leaf.beginValueAll(); iter; ++iter) { iter.setValue(iter.pos()); } return true; } bool mZeroOnly = false; };// ExpandOp template<typename TreeT> struct RootOnlyOp { using RootT = typename TreeT::RootNodeType; RootOnlyOp() = default; RootOnlyOp(const RootOnlyOp&, tbb::split) { } void join(const RootOnlyOp&) { } // do nothing for the root node but return false bool operator()(RootT&, size_t) const { return false; } // throw on internal or leaf nodes template<typename NodeOrLeafT> bool operator()(NodeOrLeafT&, size_t) const { OPENVDB_THROW(openvdb::RuntimeError, "Should not process nodes below root."); } };// RootOnlyOp template<typename TreeT> struct SumOp { using RootT = typename TreeT::RootNodeType; using LeafT = typename TreeT::LeafNodeType; explicit SumOp(bool zeroOnly = false) : mZeroOnly(zeroOnly) { } SumOp(const SumOp& other, tbb::split): totalCount(0), mZeroOnly(other.mZeroOnly) { } void join(const SumOp& other) { totalCount += other.totalCount; } // do nothing for the root node bool operator()(const typename TreeT::RootNodeType&, size_t /*idx*/ = 0) { return true; } // count the internal nodes template<typename NodeT> bool operator()(const NodeT& node, size_t idx = 0) { for (auto iter = node.cbeginValueAll(); iter; ++iter) { totalCount += *iter; } if (mZeroOnly) return idx == 0; return true; } // count the leaf nodes bool operator()(const LeafT& leaf, size_t /*idx*/ = 0) { for (auto iter = leaf.cbeginValueAll(); iter; ++iter) { totalCount += *iter; } return true; } openvdb::Index64 totalCount = openvdb::Index64(0); bool mZeroOnly = false; };// SumOp }//unnamed namespace TEST_F(TestNodeManager, testDynamic) { using openvdb::Coord; using openvdb::Index32; using openvdb::Index64; using openvdb::Int32Tree; using RootNodeType = Int32Tree::RootNodeType; using Internal1NodeType = RootNodeType::ChildNodeType; Int32Tree sourceTree(0); auto child = std::make_unique<Internal1NodeType>(Coord(0, 0, 0), /*value=*/1.0f); EXPECT_TRUE(sourceTree.root().addChild(child.release())); EXPECT_EQ(Index32(0), sourceTree.leafCount()); EXPECT_EQ(Index32(2), sourceTree.nonLeafCount()); ExpandOp<Int32Tree> expandOp; { // use NodeManager::foreachTopDown Int32Tree tree(sourceTree); openvdb::tree::NodeManager<Int32Tree> manager(tree); EXPECT_EQ(Index64(1), manager.nodeCount()); manager.foreachTopDown(expandOp); EXPECT_EQ(Index32(0), tree.leafCount()); // first level has been expanded, but node manager cache does not include the new nodes SumOp<Int32Tree> sumOp; manager.reduceBottomUp(sumOp); EXPECT_EQ(Index64(32760), sumOp.totalCount); } { // use DynamicNodeManager::foreachTopDown and filter out nodes below root Int32Tree tree(sourceTree); openvdb::tree::DynamicNodeManager<Int32Tree> manager(tree); RootOnlyOp<Int32Tree> rootOnlyOp; EXPECT_NO_THROW(manager.foreachTopDown(rootOnlyOp)); EXPECT_NO_THROW(manager.reduceTopDown(rootOnlyOp)); } { // use DynamicNodeManager::foreachTopDown Int32Tree tree(sourceTree); openvdb::tree::DynamicNodeManager<Int32Tree> manager(tree); manager.foreachTopDown(expandOp); EXPECT_EQ(Index32(32768), tree.leafCount()); SumOp<Int32Tree> sumOp; manager.reduceTopDown(sumOp); EXPECT_EQ(Index64(4286611448), sumOp.totalCount); SumOp<Int32Tree> zeroSumOp(true); manager.reduceTopDown(zeroSumOp); EXPECT_EQ(Index64(535855096), zeroSumOp.totalCount); } { // use DynamicNodeManager::foreachTopDown but filter nodes with non-zero index Int32Tree tree(sourceTree); openvdb::tree::DynamicNodeManager<Int32Tree> manager(tree); ExpandOp<Int32Tree> zeroExpandOp(true); manager.foreachTopDown(zeroExpandOp); EXPECT_EQ(Index32(32768), tree.leafCount()); SumOp<Int32Tree> sumOp; manager.reduceTopDown(sumOp); EXPECT_EQ(Index64(550535160), sumOp.totalCount); } }
11,429
C++
32.519061
102
0.633039
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestDivergence.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/tools/GridOperators.h> #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); namespace { const int GRID_DIM = 10; } class TestDivergence: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestDivergence, testDivergenceTool) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); FloatGrid::Ptr divGrid = tools::divergence(*inGrid); EXPECT_EQ(math::Pow3(2*dim), int(divGrid->activeVoxelCount())); FloatGrid::ConstAccessor accessor = divGrid->getConstAccessor(); --dim;//ignore boundary divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); const float d = accessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } } TEST_F(TestDivergence, testDivergenceMaskedTool) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); /// maked region openvdb::CoordBBox maskBBox(openvdb::Coord(0), openvdb::Coord(dim)); BoolGrid::Ptr maskGrid = BoolGrid::create(false); maskGrid->fill(maskBBox, true /*value*/, true /*activate*/); FloatGrid::Ptr divGrid = tools::divergence(*inGrid, *maskGrid); EXPECT_EQ(math::Pow3(dim), int(divGrid->activeVoxelCount())); FloatGrid::ConstAccessor accessor = divGrid->getConstAccessor(); --dim;//ignore boundary divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); const float d = accessor.getValue(xyz); if (maskBBox.isInside(xyz)) { ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } else { ASSERT_DOUBLES_EXACTLY_EQUAL(0, d); } } } } } TEST_F(TestDivergence, testStaggeredDivergence) { // This test is slightly different than the one above for sanity // checking purposes. using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); inGrid->setGridClass( GRID_STAGGERED ); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), float(z))); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); FloatGrid::Ptr divGrid = tools::divergence(*inGrid); EXPECT_EQ(math::Pow3(2*dim), int(divGrid->activeVoxelCount())); FloatGrid::ConstAccessor accessor = divGrid->getConstAccessor(); --dim;//ignore boundary divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(z, v[2]); const float d = accessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(3, d); } } } } TEST_F(TestDivergence, testISDivergence) { using namespace openvdb; typedef VectorGrid::ConstAccessor Accessor; VectorGrid::Ptr inGrid = VectorGrid::create(); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), 0.f)); } } } Accessor inAccessor = inGrid->getConstAccessor(); EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); float d; d = math::ISDivergence<math::CD_2ND>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::BD_1ST>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_1ST>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); float d; d = math::ISDivergence<math::CD_4TH>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_2ND>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::BD_2ND>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); float d; d = math::ISDivergence<math::CD_6TH>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_3RD>::result(inAccessor, xyz); EXPECT_NEAR(2, d, /*tolerance=*/0.00001); d = math::ISDivergence<math::BD_3RD>::result(inAccessor, xyz); EXPECT_NEAR(2, d, /*tolerance=*/0.00001); } } } } TEST_F(TestDivergence, testISDivergenceStencil) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); math::SevenPointStencil<VectorGrid> sevenpt(*inGrid); math::ThirteenPointStencil<VectorGrid> thirteenpt(*inGrid); math::NineteenPointStencil<VectorGrid> nineteenpt(*inGrid); --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); sevenpt.moveTo(xyz); float d; d = math::ISDivergence<math::CD_2ND>::result(sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::BD_1ST>::result(sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_1ST>::result(sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); thirteenpt.moveTo(xyz); float d; d = math::ISDivergence<math::CD_4TH>::result(thirteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_2ND>::result(thirteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::BD_2ND>::result(thirteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); nineteenpt.moveTo(xyz); float d; d = math::ISDivergence<math::CD_6TH>::result(nineteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_3RD>::result(nineteenpt); EXPECT_NEAR(2, d, /*tolerance=*/0.00001); d = math::ISDivergence<math::BD_3RD>::result(nineteenpt); EXPECT_NEAR(2, d, /*tolerance=*/0.00001); } } } } TEST_F(TestDivergence, testWSDivergence) { using namespace openvdb; typedef VectorGrid::ConstAccessor Accessor; { // non-unit voxel size double voxel_size = 0.5; VectorGrid::Ptr inGrid = VectorGrid::create(); inGrid->setTransform(math::Transform::createLinearTransform(voxel_size)); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Vec3d location = inGrid->indexToWorld(Vec3d(x,y,z)); inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(location.x()), float(location.y()), 0.f)); } } } Accessor inAccessor = inGrid->getConstAccessor(); EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test with a map // test with a map math::AffineMap map(voxel_size*math::Mat3d::identity()); math::UniformScaleMap uniform_map(voxel_size); math::UniformScaleTranslateMap uniform_translate_map(voxel_size, Vec3d(0,0,0)); for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { openvdb::Coord xyz(x,y,z); //openvdb::VectorTree::ValueType v = inTree.getValue(xyz); //std::cout << "vec(" << xyz << ")=" << v << std::endl; float d; d = math::Divergence<math::AffineMap, math::CD_2ND>::result( map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::AffineMap, math::BD_1ST>::result( map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::AffineMap, math::FD_1ST>::result( map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::CD_2ND>::result( uniform_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::BD_1ST>::result( uniform_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::FD_1ST>::result( uniform_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::CD_2ND>::result( uniform_translate_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::BD_1ST>::result( uniform_translate_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::FD_1ST>::result( uniform_translate_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } } { // non-uniform scaling and rotation Vec3d voxel_sizes(0.25, 0.45, 0.75); VectorGrid::Ptr inGrid = VectorGrid::create(); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); // apply rotation math::MapBase::Ptr rotated_map = base_map->preRotate(1.5, math::X_AXIS); inGrid->setTransform(math::Transform::Ptr(new math::Transform(rotated_map))); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Vec3d location = inGrid->indexToWorld(Vec3d(x,y,z)); inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(location.x()), float(location.y()), 0.f)); } } } Accessor inAccessor = inGrid->getConstAccessor(); EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test with a map math::AffineMap::ConstPtr map = inGrid->transform().map<math::AffineMap>(); for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { openvdb::Coord xyz(x,y,z); //openvdb::VectorTree::ValueType v = inTree.getValue(xyz); //std::cout << "vec(" << xyz << ")=" << v << std::endl; float d; d = math::Divergence<math::AffineMap, math::CD_2ND>::result( *map, inAccessor, xyz); EXPECT_NEAR(2.0, d, 0.01); d = math::Divergence<math::AffineMap, math::BD_1ST>::result( *map, inAccessor, xyz); EXPECT_NEAR(2.0, d, 0.01); d = math::Divergence<math::AffineMap, math::FD_1ST>::result( *map, inAccessor, xyz); EXPECT_NEAR(2.0, d, 0.01); } } } } } TEST_F(TestDivergence, testWSDivergenceStencil) { using namespace openvdb; { // non-unit voxel size double voxel_size = 0.5; VectorGrid::Ptr inGrid = VectorGrid::create(); inGrid->setTransform(math::Transform::createLinearTransform(voxel_size)); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Vec3d location = inGrid->indexToWorld(Vec3d(x,y,z)); inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(location.x()), float(location.y()), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test with a map math::AffineMap map(voxel_size*math::Mat3d::identity()); math::UniformScaleMap uniform_map(voxel_size); math::UniformScaleTranslateMap uniform_translate_map(voxel_size, Vec3d(0,0,0)); math::SevenPointStencil<VectorGrid> sevenpt(*inGrid); math::SecondOrderDenseStencil<VectorGrid> dense_2ndOrder(*inGrid); for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { openvdb::Coord xyz(x,y,z); //openvdb::VectorTree::ValueType v = inTree.getValue(xyz); //std::cout << "vec(" << xyz << ")=" << v << std::endl; float d; sevenpt.moveTo(xyz); dense_2ndOrder.moveTo(xyz); d = math::Divergence<math::AffineMap, math::CD_2ND>::result( map, dense_2ndOrder); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::AffineMap, math::BD_1ST>::result( map, dense_2ndOrder); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::AffineMap, math::FD_1ST>::result( map, dense_2ndOrder); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::CD_2ND>::result( uniform_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::BD_1ST>::result( uniform_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::FD_1ST>::result( uniform_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::CD_2ND>::result( uniform_translate_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::BD_1ST>::result( uniform_translate_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::FD_1ST>::result( uniform_translate_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } } { // non-uniform scaling and rotation Vec3d voxel_sizes(0.25, 0.45, 0.75); VectorGrid::Ptr inGrid = VectorGrid::create(); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); // apply rotation math::MapBase::Ptr rotated_map = base_map->preRotate(1.5, math::X_AXIS); inGrid->setTransform(math::Transform::Ptr(new math::Transform(rotated_map))); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Vec3d location = inGrid->indexToWorld(Vec3d(x,y,z)); inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(location.x()), float(location.y()), 0.f)); } } } //Accessor inAccessor = inGrid->getConstAccessor(); EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test with a map math::AffineMap::ConstPtr map = inGrid->transform().map<math::AffineMap>(); math::SecondOrderDenseStencil<VectorGrid> dense_2ndOrder(*inGrid); for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { openvdb::Coord xyz(x,y,z); dense_2ndOrder.moveTo(xyz); float d; d = math::Divergence<math::AffineMap, math::CD_2ND>::result( *map, dense_2ndOrder); EXPECT_NEAR(2.0, d, 0.01); d = math::Divergence<math::AffineMap, math::BD_1ST>::result( *map, dense_2ndOrder); EXPECT_NEAR(2.0, d, 0.01); d = math::Divergence<math::AffineMap, math::FD_1ST>::result( *map, dense_2ndOrder); EXPECT_NEAR(2.0, d, 0.01); } } } } }
23,628
C++
35.185299
95
0.511427
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestQuat.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/math/Math.h> #include <openvdb/math/Quat.h> #include <openvdb/math/Mat4.h> using namespace openvdb::math; class TestQuat: public ::testing::Test { }; TEST_F(TestQuat, testConstructor) { { Quat<float> qq(1.23f, 2.34f, 3.45f, 4.56f); EXPECT_TRUE( isExactlyEqual(qq.x(), 1.23f) ); EXPECT_TRUE( isExactlyEqual(qq.y(), 2.34f) ); EXPECT_TRUE( isExactlyEqual(qq.z(), 3.45f) ); EXPECT_TRUE( isExactlyEqual(qq.w(), 4.56f) ); } { float a[] = { 1.23f, 2.34f, 3.45f, 4.56f }; Quat<float> qq(a); EXPECT_TRUE( isExactlyEqual(qq.x(), 1.23f) ); EXPECT_TRUE( isExactlyEqual(qq.y(), 2.34f) ); EXPECT_TRUE( isExactlyEqual(qq.z(), 3.45f) ); EXPECT_TRUE( isExactlyEqual(qq.w(), 4.56f) ); } } TEST_F(TestQuat, testAxisAngle) { float TOL = 1e-6f; Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); Vec3s v(1, 2, 3); v.normalize(); float a = float(M_PI / 4.f); Quat<float> q(v,a); float b = q.angle(); Vec3s vv = q.axis(); EXPECT_TRUE( isApproxEqual(a, b, TOL) ); EXPECT_TRUE( v.eq(vv, TOL) ); q1.setAxisAngle(v,a); b = q1.angle(); vv = q1.axis(); EXPECT_TRUE( isApproxEqual(a, b, TOL) ); EXPECT_TRUE( v.eq(vv, TOL) ); } TEST_F(TestQuat, testOpPlus) { Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); Quat<float> q = q1 + q2; float x=q1.x()+q2.x(), y=q1.y()+q2.y(), z=q1.z()+q2.z(), w=q1.w()+q2.w(); EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); q = q1; q += q2; EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); q.add(q1,q2); EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); } TEST_F(TestQuat, testOpMinus) { Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); Quat<float> q = q1 - q2; float x=q1.x()-q2.x(), y=q1.y()-q2.y(), z=q1.z()-q2.z(), w=q1.w()-q2.w(); EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); q = q1; q -= q2; EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); q.sub(q1,q2); EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); } TEST_F(TestQuat, testOpMultiply) { Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); Quat<float> q = q1 * 1.5f; EXPECT_TRUE( isExactlyEqual(q.x(), float(1.5f)*q1.x()) ); EXPECT_TRUE( isExactlyEqual(q.y(), float(1.5f)*q1.y()) ); EXPECT_TRUE( isExactlyEqual(q.z(), float(1.5f)*q1.z()) ); EXPECT_TRUE( isExactlyEqual(q.w(), float(1.5f)*q1.w()) ); q = q1; q *= 1.5f; EXPECT_TRUE( isExactlyEqual(q.x(), float(1.5f)*q1.x()) ); EXPECT_TRUE( isExactlyEqual(q.y(), float(1.5f)*q1.y()) ); EXPECT_TRUE( isExactlyEqual(q.z(), float(1.5f)*q1.z()) ); EXPECT_TRUE( isExactlyEqual(q.w(), float(1.5f)*q1.w()) ); q.scale(1.5f, q1); EXPECT_TRUE( isExactlyEqual(q.x(), float(1.5f)*q1.x()) ); EXPECT_TRUE( isExactlyEqual(q.y(), float(1.5f)*q1.y()) ); EXPECT_TRUE( isExactlyEqual(q.z(), float(1.5f)*q1.z()) ); EXPECT_TRUE( isExactlyEqual(q.w(), float(1.5f)*q1.w()) ); } TEST_F(TestQuat, testInvert) { float TOL = 1e-6f; Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); q1 = q2; q2 = q2.inverse(); Quat<float> q = q1*q2; EXPECT_TRUE( q.eq( Quat<float>(0,0,0,1), TOL ) ); q1.normalize(); q2 = q1.conjugate(); q = q1*q2; EXPECT_TRUE( q.eq( Quat<float>(0,0,0,1), TOL ) ); } TEST_F(TestQuat, testEulerAngles) { { double TOL = 1e-7; Mat4d rx, ry, rz; const double angle1 = 20. * M_PI / 180.; const double angle2 = 64. * M_PI / 180.; const double angle3 = 125. *M_PI / 180.; rx.setToRotation(Vec3d(1,0,0), angle1); ry.setToRotation(Vec3d(0,1,0), angle2); rz.setToRotation(Vec3d(0,0,1), angle3); Mat4d r = rx * ry * rz; const Quat<double> rot(r.getMat3()); Vec3d result = rot.eulerAngles(ZYX_ROTATION); rx.setToRotation(Vec3d(1,0,0), result[0]); ry.setToRotation(Vec3d(0,1,0), result[1]); rz.setToRotation(Vec3d(0,0,1), result[2]); Mat4d rtest = rx * ry * rz; EXPECT_TRUE(r.eq(rtest, TOL)); } { double TOL = 1e-7; Mat4d rx, ry, rz; const double angle1 = 20. * M_PI / 180.; const double angle2 = 64. * M_PI / 180.; const double angle3 = 125. *M_PI / 180.; rx.setToRotation(Vec3d(1,0,0), angle1); ry.setToRotation(Vec3d(0,1,0), angle2); rz.setToRotation(Vec3d(0,0,1), angle3); Mat4d r = rz * ry * rx; const Quat<double> rot(r.getMat3()); Vec3d result = rot.eulerAngles(XYZ_ROTATION); rx.setToRotation(Vec3d(1,0,0), result[0]); ry.setToRotation(Vec3d(0,1,0), result[1]); rz.setToRotation(Vec3d(0,0,1), result[2]); Mat4d rtest = rz * ry * rx; EXPECT_TRUE(r.eq(rtest, TOL)); } { double TOL = 1e-7; Mat4d rx, ry, rz; const double angle1 = 20. * M_PI / 180.; const double angle2 = 64. * M_PI / 180.; const double angle3 = 125. *M_PI / 180.; rx.setToRotation(Vec3d(1,0,0), angle1); ry.setToRotation(Vec3d(0,1,0), angle2); rz.setToRotation(Vec3d(0,0,1), angle3); Mat4d r = rz * rx * ry; const Quat<double> rot(r.getMat3()); Vec3d result = rot.eulerAngles(YXZ_ROTATION); rx.setToRotation(Vec3d(1,0,0), result[0]); ry.setToRotation(Vec3d(0,1,0), result[1]); rz.setToRotation(Vec3d(0,0,1), result[2]); Mat4d rtest = rz * rx * ry; EXPECT_TRUE(r.eq(rtest, TOL)); } { const Quat<float> rot(X_AXIS, 1.0); Vec3s result = rot.eulerAngles(XZY_ROTATION); EXPECT_EQ(result, Vec3s(1,0,0)); } }
6,816
C++
25.628906
75
0.553991
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointPartitioner.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/tools/PointPartitioner.h> #include <vector> class TestPointPartitioner: public ::testing::Test { }; //////////////////////////////////////// namespace { struct PointList { typedef openvdb::Vec3s PosType; PointList(const std::vector<PosType>& points) : mPoints(&points) {} size_t size() const { return mPoints->size(); } void getPos(size_t n, PosType& xyz) const { xyz = (*mPoints)[n]; } protected: std::vector<PosType> const * const mPoints; }; // PointList } // namespace //////////////////////////////////////// TEST_F(TestPointPartitioner, testPartitioner) { const size_t pointCount = 10000; const float voxelSize = 0.1f; std::vector<openvdb::Vec3s> points(pointCount, openvdb::Vec3s(0.f)); for (size_t n = 1; n < pointCount; ++n) { points[n].x() = points[n-1].x() + voxelSize; } PointList pointList(points); const openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(voxelSize); typedef openvdb::tools::UInt32PointPartitioner PointPartitioner; PointPartitioner::Ptr partitioner = PointPartitioner::create(pointList, *transform); EXPECT_TRUE(!partitioner->empty()); // The default interpretation should be cell-centered. EXPECT_TRUE(partitioner->usingCellCenteredTransform()); const size_t expectedPageCount = pointCount / (1u << PointPartitioner::LOG2DIM); EXPECT_EQ(expectedPageCount, partitioner->size()); EXPECT_EQ(openvdb::Coord(0), partitioner->origin(0)); PointPartitioner::IndexIterator it = partitioner->indices(0); EXPECT_TRUE(it.test()); EXPECT_EQ(it.size(), size_t(1 << PointPartitioner::LOG2DIM)); PointPartitioner::IndexIterator itB = partitioner->indices(0); EXPECT_EQ(++it, ++itB); EXPECT_TRUE(it != ++itB); std::vector<PointPartitioner::IndexType> indices; for (it.reset(); it; ++it) { indices.push_back(*it); } EXPECT_EQ(it.size(), indices.size()); size_t idx = 0; for (itB.reset(); itB; ++itB) { EXPECT_EQ(indices[idx++], *itB); } }
2,234
C++
23.560439
84
0.636079
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLinearInterp.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <openvdb/tools/Interpolation.h> #include <openvdb/math/Stencils.h> namespace { // Absolute tolerance for floating-point equality comparisons const double TOLERANCE = 1.e-6; } class TestLinearInterp: public ::testing::Test { public: template<typename GridType> void test(); template<typename GridType> void testTree(); template<typename GridType> void testAccessor(); template<typename GridType> void testConstantValues(); template<typename GridType> void testFillValues(); template<typename GridType> void testNegativeIndices(); template<typename GridType> void testStencilsMatch(); }; template<typename GridType> void TestLinearInterp::test() { typename GridType::TreeType TreeType; float fillValue = 256.0f; GridType grid(fillValue); typename GridType::TreeType& tree = grid.tree(); tree.setValue(openvdb::Coord(10, 10, 10), 1.0); tree.setValue(openvdb::Coord(11, 10, 10), 2.0); tree.setValue(openvdb::Coord(11, 11, 10), 2.0); tree.setValue(openvdb::Coord(10, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 10, 10), 2.0); tree.setValue(openvdb::Coord( 9, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 9, 10), 2.0); tree.setValue(openvdb::Coord(11, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 10, 11), 3.0); tree.setValue(openvdb::Coord(11, 10, 11), 3.0); tree.setValue(openvdb::Coord(11, 11, 11), 3.0); tree.setValue(openvdb::Coord(10, 11, 11), 3.0); tree.setValue(openvdb::Coord( 9, 11, 11), 3.0); tree.setValue(openvdb::Coord( 9, 10, 11), 3.0); tree.setValue(openvdb::Coord( 9, 9, 11), 3.0); tree.setValue(openvdb::Coord(10, 9, 11), 3.0); tree.setValue(openvdb::Coord(11, 9, 11), 3.0); tree.setValue(openvdb::Coord(10, 10, 9), 4.0); tree.setValue(openvdb::Coord(11, 10, 9), 4.0); tree.setValue(openvdb::Coord(11, 11, 9), 4.0); tree.setValue(openvdb::Coord(10, 11, 9), 4.0); tree.setValue(openvdb::Coord( 9, 11, 9), 4.0); tree.setValue(openvdb::Coord( 9, 10, 9), 4.0); tree.setValue(openvdb::Coord( 9, 9, 9), 4.0); tree.setValue(openvdb::Coord(10, 9, 9), 4.0); tree.setValue(openvdb::Coord(11, 9, 9), 4.0); {//using BoxSampler // transform used for worldspace interpolation) openvdb::tools::GridSampler<GridType, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } {//using Sampler<1> // transform used for worldspace interpolation) openvdb::tools::GridSampler<GridType, openvdb::tools::Sampler<1> > interpolator(grid); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } } TEST_F(TestLinearInterp, testFloat) { test<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testDouble) { test<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); Vec3STree& tree = grid.tree(); tree.setValue(openvdb::Coord(10, 10, 10), Vec3s(1.0, 1.0, 1.0)); tree.setValue(openvdb::Coord(11, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(10, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 9, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(10, 9, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 9, 9), Vec3s(4.0, 4.0, 4.0)); openvdb::tools::GridSampler<Vec3SGrid, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.375f))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.f))); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.f))); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.f))); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_TRUE(val.eq(Vec3s(3.f))); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.f))); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.f))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.1f))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.792f))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.71f))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(2.01f))); } template<typename GridType> void TestLinearInterp::testTree() { float fillValue = 256.0f; typedef typename GridType::TreeType TreeType; TreeType tree(fillValue); tree.setValue(openvdb::Coord(10, 10, 10), 1.0); tree.setValue(openvdb::Coord(11, 10, 10), 2.0); tree.setValue(openvdb::Coord(11, 11, 10), 2.0); tree.setValue(openvdb::Coord(10, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 10, 10), 2.0); tree.setValue(openvdb::Coord( 9, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 9, 10), 2.0); tree.setValue(openvdb::Coord(11, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 10, 11), 3.0); tree.setValue(openvdb::Coord(11, 10, 11), 3.0); tree.setValue(openvdb::Coord(11, 11, 11), 3.0); tree.setValue(openvdb::Coord(10, 11, 11), 3.0); tree.setValue(openvdb::Coord( 9, 11, 11), 3.0); tree.setValue(openvdb::Coord( 9, 10, 11), 3.0); tree.setValue(openvdb::Coord( 9, 9, 11), 3.0); tree.setValue(openvdb::Coord(10, 9, 11), 3.0); tree.setValue(openvdb::Coord(11, 9, 11), 3.0); tree.setValue(openvdb::Coord(10, 10, 9), 4.0); tree.setValue(openvdb::Coord(11, 10, 9), 4.0); tree.setValue(openvdb::Coord(11, 11, 9), 4.0); tree.setValue(openvdb::Coord(10, 11, 9), 4.0); tree.setValue(openvdb::Coord( 9, 11, 9), 4.0); tree.setValue(openvdb::Coord( 9, 10, 9), 4.0); tree.setValue(openvdb::Coord( 9, 9, 9), 4.0); tree.setValue(openvdb::Coord(10, 9, 9), 4.0); tree.setValue(openvdb::Coord(11, 9, 9), 4.0); // transform used for worldspace interpolation) openvdb::tools::GridSampler<TreeType, openvdb::tools::BoxSampler> interpolator(tree, openvdb::math::Transform()); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } TEST_F(TestLinearInterp, testTreeFloat) { testTree<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testTreeDouble) { testTree<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testTreeVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3STree tree(fillValue); tree.setValue(openvdb::Coord(10, 10, 10), Vec3s(1.0, 1.0, 1.0)); tree.setValue(openvdb::Coord(11, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(10, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 9, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(10, 9, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 9, 9), Vec3s(4.0, 4.0, 4.0)); openvdb::tools::GridSampler<Vec3STree, openvdb::tools::BoxSampler> interpolator(tree, openvdb::math::Transform()); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.375f))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.f))); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.f))); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.f))); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_TRUE(val.eq(Vec3s(3.f))); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.f))); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.f))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.1f))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.792f))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.71f))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(2.01f))); } template<typename GridType> void TestLinearInterp::testAccessor() { float fillValue = 256.0f; GridType grid(fillValue); typedef typename GridType::Accessor AccessorType; AccessorType acc = grid.getAccessor(); acc.setValue(openvdb::Coord(10, 10, 10), 1.0); acc.setValue(openvdb::Coord(11, 10, 10), 2.0); acc.setValue(openvdb::Coord(11, 11, 10), 2.0); acc.setValue(openvdb::Coord(10, 11, 10), 2.0); acc.setValue(openvdb::Coord( 9, 11, 10), 2.0); acc.setValue(openvdb::Coord( 9, 10, 10), 2.0); acc.setValue(openvdb::Coord( 9, 9, 10), 2.0); acc.setValue(openvdb::Coord(10, 9, 10), 2.0); acc.setValue(openvdb::Coord(11, 9, 10), 2.0); acc.setValue(openvdb::Coord(10, 10, 11), 3.0); acc.setValue(openvdb::Coord(11, 10, 11), 3.0); acc.setValue(openvdb::Coord(11, 11, 11), 3.0); acc.setValue(openvdb::Coord(10, 11, 11), 3.0); acc.setValue(openvdb::Coord( 9, 11, 11), 3.0); acc.setValue(openvdb::Coord( 9, 10, 11), 3.0); acc.setValue(openvdb::Coord( 9, 9, 11), 3.0); acc.setValue(openvdb::Coord(10, 9, 11), 3.0); acc.setValue(openvdb::Coord(11, 9, 11), 3.0); acc.setValue(openvdb::Coord(10, 10, 9), 4.0); acc.setValue(openvdb::Coord(11, 10, 9), 4.0); acc.setValue(openvdb::Coord(11, 11, 9), 4.0); acc.setValue(openvdb::Coord(10, 11, 9), 4.0); acc.setValue(openvdb::Coord( 9, 11, 9), 4.0); acc.setValue(openvdb::Coord( 9, 10, 9), 4.0); acc.setValue(openvdb::Coord( 9, 9, 9), 4.0); acc.setValue(openvdb::Coord(10, 9, 9), 4.0); acc.setValue(openvdb::Coord(11, 9, 9), 4.0); // transform used for worldspace interpolation) openvdb::tools::GridSampler<AccessorType, openvdb::tools::BoxSampler> interpolator(acc, grid.transform()); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } TEST_F(TestLinearInterp, testAccessorFloat) { testAccessor<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testAccessorDouble) { testAccessor<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testAccessorVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); typedef Vec3SGrid::Accessor AccessorType; AccessorType acc = grid.getAccessor(); acc.setValue(openvdb::Coord(10, 10, 10), Vec3s(1.0, 1.0, 1.0)); acc.setValue(openvdb::Coord(11, 10, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(11, 11, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(10, 11, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord( 9, 11, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord( 9, 10, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord( 9, 9, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(10, 9, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(11, 9, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(10, 10, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(11, 10, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(11, 11, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(10, 11, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord( 9, 11, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord( 9, 10, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord( 9, 9, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(10, 9, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(11, 9, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(10, 10, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(11, 10, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(11, 11, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(10, 11, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord( 9, 11, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord( 9, 10, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord( 9, 9, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(10, 9, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(11, 9, 9), Vec3s(4.0, 4.0, 4.0)); openvdb::tools::GridSampler<AccessorType, openvdb::tools::BoxSampler> interpolator(acc, grid.transform()); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.375f))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.0f))); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.0f))); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.0f))); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_TRUE(val.eq(Vec3s(3.0f))); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.0f))); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.0f))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.1f))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.792f))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.71f))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(2.01f))); } template<typename GridType> void TestLinearInterp::testConstantValues() { typedef typename GridType::TreeType TreeType; float fillValue = 256.0f; GridType grid(fillValue); TreeType& tree = grid.tree(); // Add values to buffer zero. tree.setValue(openvdb::Coord(10, 10, 10), 2.0); tree.setValue(openvdb::Coord(11, 10, 10), 2.0); tree.setValue(openvdb::Coord(11, 11, 10), 2.0); tree.setValue(openvdb::Coord(10, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 10, 10), 2.0); tree.setValue(openvdb::Coord( 9, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 9, 10), 2.0); tree.setValue(openvdb::Coord(11, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 10, 11), 2.0); tree.setValue(openvdb::Coord(11, 10, 11), 2.0); tree.setValue(openvdb::Coord(11, 11, 11), 2.0); tree.setValue(openvdb::Coord(10, 11, 11), 2.0); tree.setValue(openvdb::Coord( 9, 11, 11), 2.0); tree.setValue(openvdb::Coord( 9, 10, 11), 2.0); tree.setValue(openvdb::Coord( 9, 9, 11), 2.0); tree.setValue(openvdb::Coord(10, 9, 11), 2.0); tree.setValue(openvdb::Coord(11, 9, 11), 2.0); tree.setValue(openvdb::Coord(10, 10, 9), 2.0); tree.setValue(openvdb::Coord(11, 10, 9), 2.0); tree.setValue(openvdb::Coord(11, 11, 9), 2.0); tree.setValue(openvdb::Coord(10, 11, 9), 2.0); tree.setValue(openvdb::Coord( 9, 11, 9), 2.0); tree.setValue(openvdb::Coord( 9, 10, 9), 2.0); tree.setValue(openvdb::Coord( 9, 9, 9), 2.0); tree.setValue(openvdb::Coord(10, 9, 9), 2.0); tree.setValue(openvdb::Coord(11, 9, 9), 2.0); openvdb::tools::GridSampler<TreeType, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.0, val, TOLERANCE); } TEST_F(TestLinearInterp, testConstantValuesFloat) { testConstantValues<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testConstantValuesDouble) { testConstantValues<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testConstantValuesVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); Vec3STree& tree = grid.tree(); // Add values to buffer zero. tree.setValue(openvdb::Coord(10, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 10, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 10, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 10, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 10, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 9), Vec3s(2.0, 2.0, 2.0)); openvdb::tools::GridSampler<Vec3STree, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); } template<typename GridType> void TestLinearInterp::testFillValues() { //typedef typename GridType::TreeType TreeType; float fillValue = 256.0f; GridType grid(fillValue); //typename GridType::TreeType& tree = grid.tree(); openvdb::tools::GridSampler<GridType, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(256.0, val, TOLERANCE); } TEST_F(TestLinearInterp, testFillValuesFloat) { testFillValues<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testFillValuesDouble) { testFillValues<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testFillValuesVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); //Vec3STree& tree = grid.tree(); openvdb::tools::GridSampler<Vec3SGrid, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); } template<typename GridType> void TestLinearInterp::testNegativeIndices() { typedef typename GridType::TreeType TreeType; float fillValue = 256.0f; GridType grid(fillValue); TreeType& tree = grid.tree(); tree.setValue(openvdb::Coord(-10, -10, -10), 1.0); tree.setValue(openvdb::Coord(-11, -10, -10), 2.0); tree.setValue(openvdb::Coord(-11, -11, -10), 2.0); tree.setValue(openvdb::Coord(-10, -11, -10), 2.0); tree.setValue(openvdb::Coord( -9, -11, -10), 2.0); tree.setValue(openvdb::Coord( -9, -10, -10), 2.0); tree.setValue(openvdb::Coord( -9, -9, -10), 2.0); tree.setValue(openvdb::Coord(-10, -9, -10), 2.0); tree.setValue(openvdb::Coord(-11, -9, -10), 2.0); tree.setValue(openvdb::Coord(-10, -10, -11), 3.0); tree.setValue(openvdb::Coord(-11, -10, -11), 3.0); tree.setValue(openvdb::Coord(-11, -11, -11), 3.0); tree.setValue(openvdb::Coord(-10, -11, -11), 3.0); tree.setValue(openvdb::Coord( -9, -11, -11), 3.0); tree.setValue(openvdb::Coord( -9, -10, -11), 3.0); tree.setValue(openvdb::Coord( -9, -9, -11), 3.0); tree.setValue(openvdb::Coord(-10, -9, -11), 3.0); tree.setValue(openvdb::Coord(-11, -9, -11), 3.0); tree.setValue(openvdb::Coord(-10, -10, -9), 4.0); tree.setValue(openvdb::Coord(-11, -10, -9), 4.0); tree.setValue(openvdb::Coord(-11, -11, -9), 4.0); tree.setValue(openvdb::Coord(-10, -11, -9), 4.0); tree.setValue(openvdb::Coord( -9, -11, -9), 4.0); tree.setValue(openvdb::Coord( -9, -10, -9), 4.0); tree.setValue(openvdb::Coord( -9, -9, -9), 4.0); tree.setValue(openvdb::Coord(-10, -9, -9), 4.0); tree.setValue(openvdb::Coord(-11, -9, -9), 4.0); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); openvdb::tools::GridSampler<TreeType, openvdb::tools::BoxSampler> interpolator(grid); typename GridType::ValueType val = interpolator.sampleVoxel(-10.5, -10.5, -10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(-10.0, -10.0, -10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(-11.0, -10.0, -10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(-11.0, -11.0, -10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(-11.0, -11.0, -11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(-9.0, -11.0, -9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(-9.0, -10.0, -9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(-10.1, -10.0, -10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(-10.8, -10.8, -10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(-10.1, -10.8, -10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(-10.8, -10.1, -10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(-10.5, -10.1, -10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(-10.5, -10.8, -10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } TEST_F(TestLinearInterp, testNegativeIndicesFloat) { testNegativeIndices<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testNegativeIndicesDouble) { testNegativeIndices<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testNegativeIndicesVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); Vec3STree& tree = grid.tree(); tree.setValue(openvdb::Coord(-10, -10, -10), Vec3s(1.0, 1.0, 1.0)); tree.setValue(openvdb::Coord(-11, -10, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-11, -11, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-10, -11, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( -9, -11, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( -9, -10, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( -9, -9, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-10, -9, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-11, -9, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-10, -10, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-11, -10, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-11, -11, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-10, -11, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( -9, -11, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( -9, -10, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( -9, -9, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-10, -9, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-11, -9, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-10, -10, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-11, -10, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-11, -11, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-10, -11, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( -9, -11, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( -9, -10, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( -9, -9, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-10, -9, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-11, -9, -9), Vec3s(4.0, 4.0, 4.0)); openvdb::tools::GridSampler<Vec3SGrid, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(-10.5, -10.5, -10.5); EXPECT_TRUE(val.eq(Vec3s(2.375f))); val = interpolator.sampleVoxel(-10.0, -10.0, -10.0); EXPECT_TRUE(val.eq(Vec3s(1.0f))); val = interpolator.sampleVoxel(-11.0, -10.0, -10.0); EXPECT_TRUE(val.eq(Vec3s(2.0f))); val = interpolator.sampleVoxel(-11.0, -11.0, -10.0); EXPECT_TRUE(val.eq(Vec3s(2.0f))); val = interpolator.sampleVoxel(-11.0, -11.0, -11.0); EXPECT_TRUE(val.eq(Vec3s(3.0f))); val = interpolator.sampleVoxel(-9.0, -11.0, -9.0); EXPECT_TRUE(val.eq(Vec3s(4.0f))); val = interpolator.sampleVoxel(-9.0, -10.0, -9.0); EXPECT_TRUE(val.eq(Vec3s(4.0f))); val = interpolator.sampleVoxel(-10.1, -10.0, -10.0); EXPECT_TRUE(val.eq(Vec3s(1.1f))); val = interpolator.sampleVoxel(-10.8, -10.8, -10.8); EXPECT_TRUE(val.eq(Vec3s(2.792f))); val = interpolator.sampleVoxel(-10.1, -10.8, -10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(-10.8, -10.1, -10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(-10.5, -10.1, -10.8); EXPECT_TRUE(val.eq(Vec3s(2.71f))); val = interpolator.sampleVoxel(-10.5, -10.8, -10.1); EXPECT_TRUE(val.eq(Vec3s(2.01f))); } template<typename GridType> void TestLinearInterp::testStencilsMatch() { typedef typename GridType::ValueType ValueType; GridType grid; typename GridType::TreeType& tree = grid.tree(); // using mostly recurring numbers tree.setValue(openvdb::Coord(0, 0, 0), ValueType(1.0/3.0)); tree.setValue(openvdb::Coord(0, 1, 0), ValueType(1.0/11.0)); tree.setValue(openvdb::Coord(0, 0, 1), ValueType(1.0/81.0)); tree.setValue(openvdb::Coord(1, 0, 0), ValueType(1.0/97.0)); tree.setValue(openvdb::Coord(1, 1, 0), ValueType(1.0/61.0)); tree.setValue(openvdb::Coord(0, 1, 1), ValueType(9.0/7.0)); tree.setValue(openvdb::Coord(1, 0, 1), ValueType(9.0/11.0)); tree.setValue(openvdb::Coord(1, 1, 1), ValueType(22.0/7.0)); const openvdb::Vec3f pos(7.0f/12.0f, 1.0f/3.0f, 2.0f/3.0f); {//using BoxSampler and BoxStencil openvdb::tools::GridSampler<GridType, openvdb::tools::BoxSampler> interpolator(grid); openvdb::math::BoxStencil<const GridType> stencil(grid); typename GridType::ValueType val1 = interpolator.sampleVoxel(pos.x(), pos.y(), pos.z()); stencil.moveTo(pos); typename GridType::ValueType val2 = stencil.interpolation(pos); EXPECT_EQ(val1, val2); } } TEST_F(TestLinearInterp, testStencilsMatchFloat) { testStencilsMatch<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testStencilsMatchDouble) { testStencilsMatch<openvdb::DoubleGrid>(); }
39,428
C++
37.022179
99
0.622908
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLeaf.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/tree/LeafNode.h> #include <openvdb/math/Math.h>// for math::Random01(), math::Pow3() class TestLeaf: public ::testing::Test { public: void testBuffer(); void testGetValue(); }; typedef openvdb::tree::LeafNode<int, 3> LeafType; typedef LeafType::Buffer BufferType; using openvdb::Index; void TestLeaf::testBuffer() { {// access BufferType buf; for (Index i = 0; i < BufferType::size(); ++i) { buf.mData[i] = i; EXPECT_TRUE(buf[i] == buf.mData[i]); } for (Index i = 0; i < BufferType::size(); ++i) { buf[i] = i; EXPECT_EQ(int(i), buf[i]); } } {// swap BufferType buf0, buf1, buf2; int *buf0Data = buf0.mData; int *buf1Data = buf1.mData; for (Index i = 0; i < BufferType::size(); ++i) { buf0[i] = i; buf1[i] = i * 2; } buf0.swap(buf1); EXPECT_TRUE(buf0.mData == buf1Data); EXPECT_TRUE(buf1.mData == buf0Data); buf1.swap(buf0); EXPECT_TRUE(buf0.mData == buf0Data); EXPECT_TRUE(buf1.mData == buf1Data); buf0.swap(buf2); EXPECT_TRUE(buf2.mData == buf0Data); buf2.swap(buf0); EXPECT_TRUE(buf0.mData == buf0Data); } } TEST_F(TestLeaf, testBuffer) { testBuffer(); } void TestLeaf::testGetValue() { LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.mBuffer[0] = 2; leaf.mBuffer[1] = 3; leaf.mBuffer[2] = 4; leaf.mBuffer[65] = 10; EXPECT_EQ(2, leaf.getValue(openvdb::Coord(0, 0, 0))); EXPECT_EQ(3, leaf.getValue(openvdb::Coord(0, 0, 1))); EXPECT_EQ(4, leaf.getValue(openvdb::Coord(0, 0, 2))); EXPECT_EQ(10, leaf.getValue(openvdb::Coord(1, 0, 1))); } TEST_F(TestLeaf, testGetValue) { testGetValue(); } TEST_F(TestLeaf, testSetValue) { LeafType leaf(openvdb::Coord(0, 0, 0), 3); openvdb::Coord xyz(0, 0, 0); leaf.setValueOn(xyz, 10); EXPECT_EQ(10, leaf.getValue(xyz)); xyz.reset(7, 7, 7); leaf.setValueOn(xyz, 7); EXPECT_EQ(7, leaf.getValue(xyz)); leaf.setValueOnly(xyz, 10); EXPECT_EQ(10, leaf.getValue(xyz)); xyz.reset(2, 3, 6); leaf.setValueOn(xyz, 236); EXPECT_EQ(236, leaf.getValue(xyz)); leaf.setValueOff(xyz, 1); EXPECT_EQ(1, leaf.getValue(xyz)); EXPECT_TRUE(!leaf.isValueOn(xyz)); } TEST_F(TestLeaf, testIsValueSet) { LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.setValueOn(openvdb::Coord(1, 5, 7), 10); EXPECT_TRUE(leaf.isValueOn(openvdb::Coord(1, 5, 7))); EXPECT_TRUE(!leaf.isValueOn(openvdb::Coord(0, 5, 7))); EXPECT_TRUE(!leaf.isValueOn(openvdb::Coord(1, 6, 7))); EXPECT_TRUE(!leaf.isValueOn(openvdb::Coord(0, 5, 6))); } TEST_F(TestLeaf, testProbeValue) { LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.setValueOn(openvdb::Coord(1, 6, 5), 10); LeafType::ValueType val; EXPECT_TRUE(leaf.probeValue(openvdb::Coord(1, 6, 5), val)); EXPECT_TRUE(!leaf.probeValue(openvdb::Coord(1, 6, 4), val)); } TEST_F(TestLeaf, testIterators) { LeafType leaf(openvdb::Coord(0, 0, 0), 2); leaf.setValueOn(openvdb::Coord(1, 2, 3), -3); leaf.setValueOn(openvdb::Coord(5, 2, 3), 4); LeafType::ValueType sum = 0; for (LeafType::ValueOnIter iter = leaf.beginValueOn(); iter; ++iter) sum += *iter; EXPECT_EQ((-3 + 4), sum); } TEST_F(TestLeaf, testEquivalence) { LeafType leaf( openvdb::Coord(0, 0, 0), 2); LeafType leaf2(openvdb::Coord(0, 0, 0), 3); EXPECT_TRUE(leaf != leaf2); for(openvdb::Index32 i = 0; i < LeafType::size(); ++i) { leaf.setValueOnly(i, i); leaf2.setValueOnly(i, i); } EXPECT_TRUE(leaf == leaf2); // set some values. leaf.setValueOn(openvdb::Coord(0, 0, 0), 1); leaf.setValueOn(openvdb::Coord(0, 1, 0), 1); leaf.setValueOn(openvdb::Coord(1, 1, 0), 1); leaf.setValueOn(openvdb::Coord(1, 1, 2), 1); leaf2.setValueOn(openvdb::Coord(0, 0, 0), 1); leaf2.setValueOn(openvdb::Coord(0, 1, 0), 1); leaf2.setValueOn(openvdb::Coord(1, 1, 0), 1); leaf2.setValueOn(openvdb::Coord(1, 1, 2), 1); EXPECT_TRUE(leaf == leaf2); leaf2.setValueOn(openvdb::Coord(0, 0, 1), 1); EXPECT_TRUE(leaf != leaf2); leaf2.setValueOff(openvdb::Coord(0, 0, 1), 1); EXPECT_TRUE(leaf == leaf2); } TEST_F(TestLeaf, testGetOrigin) { { LeafType leaf(openvdb::Coord(1, 0, 0), 1); EXPECT_EQ(openvdb::Coord(0, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(0, 0, 0), 1); EXPECT_EQ(openvdb::Coord(0, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(8, 0, 0), 1); EXPECT_EQ(openvdb::Coord(8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(8, 1, 0), 1); EXPECT_EQ(openvdb::Coord(8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(1024, 1, 3), 1); EXPECT_EQ(openvdb::Coord(128*8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(1023, 1, 3), 1); EXPECT_EQ(openvdb::Coord(127*8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(512, 512, 512), 1); EXPECT_EQ(openvdb::Coord(512, 512, 512), leaf.origin()); } { LeafType leaf(openvdb::Coord(2, 52, 515), 1); EXPECT_EQ(openvdb::Coord(0, 48, 512), leaf.origin()); } } TEST_F(TestLeaf, testIteratorGetCoord) { using namespace openvdb; LeafType leaf(openvdb::Coord(8, 8, 0), 2); EXPECT_EQ(Coord(8, 8, 0), leaf.origin()); leaf.setValueOn(Coord(1, 2, 3), -3); leaf.setValueOn(Coord(5, 2, 3), 4); LeafType::ValueOnIter iter = leaf.beginValueOn(); Coord xyz = iter.getCoord(); EXPECT_EQ(Coord(9, 10, 3), xyz); ++iter; xyz = iter.getCoord(); EXPECT_EQ(Coord(13, 10, 3), xyz); } TEST_F(TestLeaf, testNegativeIndexing) { using namespace openvdb; LeafType leaf(openvdb::Coord(-9, -2, -8), 1); EXPECT_EQ(Coord(-16, -8, -8), leaf.origin()); leaf.setValueOn(Coord(1, 2, 3), -3); leaf.setValueOn(Coord(5, 2, 3), 4); EXPECT_EQ(-3, leaf.getValue(Coord(1, 2, 3))); EXPECT_EQ(4, leaf.getValue(Coord(5, 2, 3))); LeafType::ValueOnIter iter = leaf.beginValueOn(); Coord xyz = iter.getCoord(); EXPECT_EQ(Coord(-15, -6, -5), xyz); ++iter; xyz = iter.getCoord(); EXPECT_EQ(Coord(-11, -6, -5), xyz); } TEST_F(TestLeaf, testIsConstant) { using namespace openvdb; const Coord origin(-9, -2, -8); {// check old version (v3.0 and older) with float // Acceptable range: first-value +/- tolerance const float val = 1.0f, tol = 0.01f; tree::LeafNode<float, 3> leaf(origin, val, true); float v = 0.0f; bool stat = false; EXPECT_TRUE(leaf.isConstant(v, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val, v); leaf.setValueOff(0); EXPECT_TRUE(!leaf.isConstant(v, stat, tol)); leaf.setValueOn(0); EXPECT_TRUE(leaf.isConstant(v, stat, tol)); leaf.setValueOn(0, val + 0.99f*tol); EXPECT_TRUE(leaf.isConstant(v, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val + 0.99f*tol, v); leaf.setValueOn(0, val + 1.01f*tol); EXPECT_TRUE(!leaf.isConstant(v, stat, tol)); } {// check old version (v3.0 and older) with double // Acceptable range: first-value +/- tolerance const double val = 1.0, tol = 0.00001; tree::LeafNode<double, 3> leaf(origin, val, true); double v = 0.0; bool stat = false; EXPECT_TRUE(leaf.isConstant(v, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val, v); leaf.setValueOff(0); EXPECT_TRUE(!leaf.isConstant(v, stat, tol)); leaf.setValueOn(0); EXPECT_TRUE(leaf.isConstant(v, stat, tol)); leaf.setValueOn(0, val + 0.99*tol); EXPECT_TRUE(leaf.isConstant(v, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val + 0.99*tol, v); leaf.setValueOn(0, val + 1.01*tol); EXPECT_TRUE(!leaf.isConstant(v, stat, tol)); } {// check newer version (v3.2 and newer) with float // Acceptable range: max - min <= tolerance const float val = 1.0, tol = 0.01f; tree::LeafNode<float, 3> leaf(origin, val, true); float vmin = 0.0f, vmax = 0.0f; bool stat = false; EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val, vmin); EXPECT_EQ(val, vmax); leaf.setValueOff(0); EXPECT_TRUE(!leaf.isConstant(vmin, vmax, stat, tol)); leaf.setValueOn(0); EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); leaf.setValueOn(0, val + tol); EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_EQ(val, vmin); EXPECT_EQ(val + tol, vmax); leaf.setValueOn(0, val + 1.01f*tol); EXPECT_TRUE(!leaf.isConstant(vmin, vmax, stat, tol)); } {// check newer version (v3.2 and newer) with double // Acceptable range: (max- min) <= tolerance const double val = 1.0, tol = 0.000001; tree::LeafNode<double, 3> leaf(origin, val, true); double vmin = 0.0, vmax = 0.0; bool stat = false; EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val, vmin); EXPECT_EQ(val, vmax); leaf.setValueOff(0); EXPECT_TRUE(!leaf.isConstant(vmin, vmax, stat, tol)); leaf.setValueOn(0); EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); leaf.setValueOn(0, val + tol); EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_EQ(val, vmin); EXPECT_EQ(val + tol, vmax); leaf.setValueOn(0, val + 1.01*tol); EXPECT_TRUE(!leaf.isConstant(vmin, vmax, stat, tol)); } {// check newer version (v3.2 and newer) with float and random values typedef tree::LeafNode<float,3> LeafNodeT; const float val = 1.0, tol = 1.0f; LeafNodeT leaf(origin, val, true); float min = 2.0f, max = -min; math::Random01 r(145);// random values in the range [0,1] for (Index i=0; i<LeafNodeT::NUM_VALUES; ++i) { const float v = float(r()); if (v < min) min = v; if (v > max) max = v; leaf.setValueOnly(i, v); } float vmin = 0.0f, vmax = 0.0f; bool stat = false; EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_TRUE(stat); EXPECT_TRUE(math::isApproxEqual(min, vmin)); EXPECT_TRUE(math::isApproxEqual(max, vmax)); } } TEST_F(TestLeaf, testMedian) { using namespace openvdb; const Coord origin(-9, -2, -8); std::vector<float> v{5, 6, 4, 3, 2, 6, 7, 9, 3}; tree::LeafNode<float, 3> leaf(origin, 1.0f, false); float val = 0.0f; EXPECT_EQ(Index(0), leaf.medianOn(val)); EXPECT_EQ(0.0f, val); EXPECT_EQ(leaf.numValues(), leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(0,0,0), v[0]); EXPECT_EQ(Index(1), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-1, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(0,0,1), v[1]); EXPECT_EQ(Index(2), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-2, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(0,2,1), v[2]); EXPECT_EQ(Index(3), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-3, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(1,2,1), v[3]); EXPECT_EQ(Index(4), leaf.medianOn(val)); EXPECT_EQ(v[2], val); EXPECT_EQ(leaf.numValues()-4, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(1,2,3), v[4]); EXPECT_EQ(Index(5), leaf.medianOn(val)); EXPECT_EQ(v[2], val); EXPECT_EQ(leaf.numValues()-5, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(2,2,1), v[5]); EXPECT_EQ(Index(6), leaf.medianOn(val)); EXPECT_EQ(v[2], val); EXPECT_EQ(leaf.numValues()-6, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(2,4,1), v[6]); EXPECT_EQ(Index(7), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-7, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(2,6,1), v[7]); EXPECT_EQ(Index(8), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-8, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(7,2,1), v[8]); EXPECT_EQ(Index(9), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-9, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.fill(2.0f, true); EXPECT_EQ(leaf.numValues(), leaf.medianOn(val)); EXPECT_EQ(2.0f, val); EXPECT_EQ(Index(0), leaf.medianOff(val)); EXPECT_EQ(2.0f, val); EXPECT_EQ(2.0f, leaf.medianAll()); } TEST_F(TestLeaf, testFill) { using namespace openvdb; const Coord origin(-9, -2, -8); const float bg = 0.0f, fg = 1.0f; tree::LeafNode<float, 3> leaf(origin, bg, false); const int bboxDim = 1 + int(leaf.dim() >> 1); auto bbox = CoordBBox::createCube(leaf.origin(), bboxDim); EXPECT_EQ(math::Pow3(bboxDim), int(bbox.volume())); bbox = leaf.getNodeBoundingBox(); leaf.fill(bbox, bg, false); EXPECT_TRUE(leaf.isEmpty()); leaf.fill(bbox, fg, true); EXPECT_TRUE(leaf.isDense()); leaf.fill(bbox, bg, false); EXPECT_TRUE(leaf.isEmpty()); // Fill a region that is larger than the node but that doesn't completely enclose it. bbox.max() = bbox.min() + (bbox.dim() >> 1); bbox.expand(bbox.min() - Coord{10}); leaf.fill(bbox, fg, true); // Verify that fill() correctly clips the fill region to the node. auto clippedBBox = leaf.getNodeBoundingBox(); clippedBBox.intersect(bbox); EXPECT_EQ(int(clippedBBox.volume()), int(leaf.onVoxelCount())); } TEST_F(TestLeaf, testCount) { using namespace openvdb; const Coord origin(-9, -2, -8); tree::LeafNode<float, 3> leaf(origin, 1.0f, false); EXPECT_EQ(Index(3), leaf.log2dim()); EXPECT_EQ(Index(8), leaf.dim()); EXPECT_EQ(Index(512), leaf.size()); EXPECT_EQ(Index(512), leaf.numValues()); EXPECT_EQ(Index(0), leaf.getLevel()); EXPECT_EQ(Index(1), leaf.getChildDim()); EXPECT_EQ(Index(1), leaf.leafCount()); EXPECT_EQ(Index(0), leaf.nonLeafCount()); EXPECT_EQ(Index(0), leaf.childCount()); std::vector<Index> dims; leaf.getNodeLog2Dims(dims); EXPECT_EQ(size_t(1), dims.size()); EXPECT_EQ(Index(3), dims[0]); }
15,258
C++
28.344231
89
0.585463
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestQuantizedUnitVec.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/math/QuantizedUnitVec.h> #include <openvdb/math/Math.h> #include <openvdb/math/Vec3.h> #include <sstream> #include <algorithm> #include <cmath> #include <ctime> class TestQuantizedUnitVec: public ::testing::Test { protected: // Generate a random number in the range [0, 1]. double randNumber() { return double(rand()) / (double(RAND_MAX) + 1.0); } }; //////////////////////////////////////// namespace { const uint16_t MASK_XSIGN = 0x8000, // 1000000000000000 MASK_YSIGN = 0x4000, // 0100000000000000 MASK_ZSIGN = 0x2000; // 0010000000000000 } //////////////////////////////////////// TEST_F(TestQuantizedUnitVec, testQuantization) { using namespace openvdb; using namespace openvdb::math; // // Check sign bits // Vec3s unitVec = Vec3s(-1.0, -1.0, -1.0); unitVec.normalize(); uint16_t quantizedVec = QuantizedUnitVec::pack(unitVec); EXPECT_TRUE((quantizedVec & MASK_XSIGN)); EXPECT_TRUE((quantizedVec & MASK_YSIGN)); EXPECT_TRUE((quantizedVec & MASK_ZSIGN)); unitVec[0] = -unitVec[0]; unitVec[2] = -unitVec[2]; quantizedVec = QuantizedUnitVec::pack(unitVec); EXPECT_TRUE(!(quantizedVec & MASK_XSIGN)); EXPECT_TRUE((quantizedVec & MASK_YSIGN)); EXPECT_TRUE(!(quantizedVec & MASK_ZSIGN)); unitVec[1] = -unitVec[1]; quantizedVec = QuantizedUnitVec::pack(unitVec); EXPECT_TRUE(!(quantizedVec & MASK_XSIGN)); EXPECT_TRUE(!(quantizedVec & MASK_YSIGN)); EXPECT_TRUE(!(quantizedVec & MASK_ZSIGN)); QuantizedUnitVec::flipSignBits(quantizedVec); EXPECT_TRUE((quantizedVec & MASK_XSIGN)); EXPECT_TRUE((quantizedVec & MASK_YSIGN)); EXPECT_TRUE((quantizedVec & MASK_ZSIGN)); unitVec[2] = -unitVec[2]; quantizedVec = QuantizedUnitVec::pack(unitVec); QuantizedUnitVec::flipSignBits(quantizedVec); EXPECT_TRUE((quantizedVec & MASK_XSIGN)); EXPECT_TRUE((quantizedVec & MASK_YSIGN)); EXPECT_TRUE(!(quantizedVec & MASK_ZSIGN)); // // Check conversion error // const double tol = 0.05; // component error tolerance const int numNormals = 40000; // init srand(0); const int n = int(std::sqrt(double(numNormals))); const double xScale = (2.0 * M_PI) / double(n); const double yScale = M_PI / double(n); double x, y, theta, phi; Vec3s n0, n1; // generate random normals, by uniformly distributing points on a unit-sphere. // loop over a [0 to n) x [0 to n) grid. for (int a = 0; a < n; ++a) { for (int b = 0; b < n; ++b) { // jitter, move to random pos. inside the current cell x = double(a) + randNumber(); y = double(b) + randNumber(); // remap to a lat/long map theta = y * yScale; // [0 to PI] phi = x * xScale; // [0 to 2PI] // convert to cartesian coordinates on a unit sphere. // spherical coordinate triplet (r=1, theta, phi) n0[0] = float(std::sin(theta)*std::cos(phi)); n0[1] = float(std::sin(theta)*std::sin(phi)); n0[2] = float(std::cos(theta)); EXPECT_NEAR(1.0, n0.length(), 1e-6); n1 = QuantizedUnitVec::unpack(QuantizedUnitVec::pack(n0)); EXPECT_NEAR(1.0, n1.length(), 1e-6); EXPECT_NEAR(n0[0], n1[0], tol); EXPECT_NEAR(n0[1], n1[1], tol); EXPECT_NEAR(n0[2], n1[2], tol); float sumDiff = std::abs(n0[0] - n1[0]) + std::abs(n0[1] - n1[1]) + std::abs(n0[2] - n1[2]); EXPECT_TRUE(sumDiff < (2.0 * tol)); } } }
3,772
C++
26.540146
82
0.587487
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestAttributeArray.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/AttributeArray.h> #include <openvdb/points/AttributeSet.h> #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <openvdb/io/File.h> #ifdef __clang__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-macros" #endif // Boost.Interprocess uses a header-only portion of Boost.DateTime #define BOOST_DATE_TIME_NO_LIB #ifdef __clang__ #pragma GCC diagnostic pop #endif #include <boost/interprocess/file_mapping.hpp> #include <boost/interprocess/mapped_region.hpp> #include <tbb/tick_count.h> #include <tbb/atomic.h> #include <cstdio> // for std::remove() #include <fstream> #include <sstream> #include <iostream> #ifdef _MSC_VER #include <boost/interprocess/detail/os_file_functions.hpp> // open_existing_file(), close_file() // boost::interprocess::detail was renamed to boost::interprocess::ipcdetail in Boost 1.48. // Ensure that both namespaces exist. namespace boost { namespace interprocess { namespace detail {} namespace ipcdetail {} } } #include <windows.h> #else #include <sys/types.h> // for struct stat #include <sys/stat.h> // for stat() #endif /// @brief io::MappedFile has a private constructor, so declare a class that acts as the friend class TestMappedFile { public: static openvdb::io::MappedFile::Ptr create(const std::string& filename) { return openvdb::SharedPtr<openvdb::io::MappedFile>(new openvdb::io::MappedFile(filename)); } }; /// @brief Functionality similar to openvdb::util::CpuTimer except with prefix padding and no decimals. /// /// @code /// ProfileTimer timer("algorithm 1"); /// // code to be timed goes here /// timer.stop(); /// @endcode class ProfileTimer { public: /// @brief Prints message and starts timer. /// /// @note Should normally be followed by a call to stop() ProfileTimer(const std::string& msg) { (void)msg; #ifdef PROFILE // padd string to 50 characters std::string newMsg(msg); if (newMsg.size() < 50) newMsg.insert(newMsg.end(), 50 - newMsg.size(), ' '); std::cerr << newMsg << " ... "; #endif mT0 = tbb::tick_count::now(); } ~ProfileTimer() { this->stop(); } /// Return Time diference in milliseconds since construction or start was called. inline double delta() const { tbb::tick_count::interval_t dt = tbb::tick_count::now() - mT0; return 1000.0*dt.seconds(); } /// @brief Print time in milliseconds since construction or start was called. inline void stop() const { #ifdef PROFILE std::stringstream ss; ss << std::setw(6) << ::round(this->delta()); std::cerr << "completed in " << ss.str() << " ms\n"; #endif } private: tbb::tick_count mT0; };// ProfileTimer struct ScopedFile { explicit ScopedFile(const std::string& s): pathname(s) {} ~ScopedFile() { if (!pathname.empty()) std::remove(pathname.c_str()); } const std::string pathname; }; using namespace openvdb; using namespace openvdb::points; class TestAttributeArray: public ::testing::Test { public: void SetUp() override { AttributeArray::clearRegistry(); } void TearDown() override { AttributeArray::clearRegistry(); } void testRegistry(); void testAccessorEval(); void testDelayedLoad(); }; // class TestAttributeArray //////////////////////////////////////// namespace { bool matchingNamePairs(const openvdb::NamePair& lhs, const openvdb::NamePair& rhs) { if (lhs.first != rhs.first) return false; if (lhs.second != rhs.second) return false; return true; } } // namespace //////////////////////////////////////// TEST_F(TestAttributeArray, testFixedPointConversion) { openvdb::math::Transform::Ptr transform(openvdb::math::Transform::createLinearTransform(/*voxelSize=*/0.1)); const float value = 33.5688040469035f; { // convert to fixed-point value const openvdb::Vec3f worldSpaceValue(value); const openvdb::Vec3f indexSpaceValue = transform->worldToIndex(worldSpaceValue); const float voxelSpaceValue = indexSpaceValue.x() - math::Round(indexSpaceValue.x()) + 0.5f; const uint32_t intValue = floatingPointToFixedPoint<uint32_t>(voxelSpaceValue); // convert back to floating-point value const float newVoxelSpaceValue = fixedPointToFloatingPoint<float>(intValue); const openvdb::Vec3f newIndexSpaceValue(newVoxelSpaceValue + math::Round(indexSpaceValue.x()) - 0.5f); const openvdb::Vec3f newWorldSpaceValue = transform->indexToWorld(newIndexSpaceValue); const float newValue = newWorldSpaceValue.x(); EXPECT_NEAR(value, newValue, /*tolerance=*/1e-6); } { // convert to fixed-point value (vector) const openvdb::Vec3f worldSpaceValue(value, value+1, value+2); const openvdb::Vec3f indexSpaceValue = transform->worldToIndex(worldSpaceValue); const float voxelSpaceValueX = indexSpaceValue.x() - math::Round(indexSpaceValue.x()) + 0.5f; const float voxelSpaceValueY = indexSpaceValue.y() - math::Round(indexSpaceValue.y()) + 0.5f; const float voxelSpaceValueZ = indexSpaceValue.z() - math::Round(indexSpaceValue.z()) + 0.5f; const openvdb::Vec3f voxelSpaceValue(voxelSpaceValueX, voxelSpaceValueY, voxelSpaceValueZ); const openvdb::math::Vec3<uint32_t> intValue = floatingPointToFixedPoint<openvdb::math::Vec3<uint32_t>>(voxelSpaceValue); // convert back to floating-point value (vector) const openvdb::Vec3f newVoxelSpaceValue = fixedPointToFloatingPoint<openvdb::Vec3f>(intValue); const float newIndexSpaceValueX = newVoxelSpaceValue.x() + math::Round(indexSpaceValue.x()) - 0.5f; const float newIndexSpaceValueY = newVoxelSpaceValue.y() + math::Round(indexSpaceValue.y()) - 0.5f; const float newIndexSpaceValueZ = newVoxelSpaceValue.z() + math::Round(indexSpaceValue.z()) - 0.5f; const openvdb::Vec3f newIndexSpaceValue(newIndexSpaceValueX, newIndexSpaceValueY, newIndexSpaceValueZ); const openvdb::Vec3f newWorldSpaceValue = transform->indexToWorld(newIndexSpaceValue); EXPECT_NEAR(worldSpaceValue.x(), newWorldSpaceValue.x(), /*tolerance=*/1e-6); EXPECT_NEAR(worldSpaceValue.y(), newWorldSpaceValue.y(), /*tolerance=*/1e-6); EXPECT_NEAR(worldSpaceValue.z(), newWorldSpaceValue.z(), /*tolerance=*/1e-6); } } namespace { // use a dummy factory as TypedAttributeArray::factory is private static AttributeArray::Ptr factoryInt(Index n, Index strideOrTotalSize, bool constantStride, const Metadata*) { return TypedAttributeArray<int>::create(n, strideOrTotalSize, constantStride); } } // namespace void TestAttributeArray::testRegistry() { using AttributeF = TypedAttributeArray<float>; using AttributeFTrnc = TypedAttributeArray<float, TruncateCodec>; AttributeArray::clearRegistry(); { // cannot create AttributeArray that is not registered EXPECT_TRUE(!AttributeArray::isRegistered(AttributeF::attributeType())); EXPECT_THROW(AttributeArray::create(AttributeF::attributeType(), Index(5)), LookupError); } { // throw when attempting to register a float type with an integer factory EXPECT_THROW(AttributeArray::registerType( AttributeF::attributeType(), factoryInt), KeyError); } // register the attribute array AttributeF::registerType(); { // can register an AttributeArray with the same value type but different codec EXPECT_NO_THROW(AttributeFTrnc::registerType()); EXPECT_TRUE(AttributeArray::isRegistered(AttributeF::attributeType())); EXPECT_TRUE(AttributeArray::isRegistered(AttributeFTrnc::attributeType())); } { // un-registering AttributeArray::unregisterType(AttributeF::attributeType()); EXPECT_TRUE(!AttributeArray::isRegistered(AttributeF::attributeType())); EXPECT_TRUE(AttributeArray::isRegistered(AttributeFTrnc::attributeType())); } { // clearing registry AttributeF::registerType(); AttributeArray::clearRegistry(); EXPECT_TRUE(!AttributeArray::isRegistered(AttributeF::attributeType())); } } TEST_F(TestAttributeArray, testRegistry) { testRegistry(); } TEST_F(TestAttributeArray, testAttributeArray) { using AttributeArrayF = TypedAttributeArray<float>; using AttributeArrayD = TypedAttributeArray<double>; { AttributeArray::Ptr attr(new AttributeArrayD(50)); EXPECT_EQ(Index(50), attr->size()); } { AttributeArray::Ptr attr(new AttributeArrayD(50)); EXPECT_EQ(Index(50), attr->size()); AttributeArrayD& typedAttr = static_cast<AttributeArrayD&>(*attr); typedAttr.set(0, 0.5); double value = 0.0; typedAttr.get(0, value); EXPECT_NEAR(double(0.5), value, /*tolerance=*/double(0.0)); // test unsafe methods for get() and set() typedAttr.setUnsafe(0, 1.5); typedAttr.getUnsafe(0, value); EXPECT_NEAR(double(1.5), value, /*tolerance=*/double(0.0)); // out-of-range get() and set() EXPECT_THROW(typedAttr.set(100, 0.5), openvdb::IndexError); EXPECT_THROW(typedAttr.set(100, 1), openvdb::IndexError); EXPECT_THROW(typedAttr.get(100, value), openvdb::IndexError); EXPECT_THROW(typedAttr.get(100), openvdb::IndexError); } { // test copy constructor and copy assignment operator AttributeArrayD attr1(10); AttributeArrayD attr2(5); attr1.set(9, 4.6); // copy constructor AttributeArrayD attr3(attr1); EXPECT_EQ(Index(10), attr3.size()); EXPECT_EQ(4.6, attr3.get(9)); // copy assignment operator attr2 = attr1; EXPECT_EQ(Index(10), attr2.size()); EXPECT_EQ(4.6, attr2.get(9)); } #ifdef NDEBUG { // test setUnsafe and getUnsafe on uniform arrays AttributeArrayD::Ptr attr(new AttributeArrayD(50)); EXPECT_EQ(Index(50), attr->size()); attr->collapse(5.0); EXPECT_TRUE(attr->isUniform()); EXPECT_NEAR(attr->getUnsafe(10), 5.0, /*tolerance=*/double(0.0)); EXPECT_TRUE(attr->isUniform()); // this is expected behaviour because for performance reasons, array is not implicitly expanded attr->setUnsafe(10, 15.0); EXPECT_TRUE(attr->isUniform()); EXPECT_NEAR(attr->getUnsafe(5), 15.0, /*tolerance=*/double(0.0)); attr->expand(); EXPECT_TRUE(!attr->isUniform()); attr->setUnsafe(10, 25.0); EXPECT_NEAR(attr->getUnsafe(5), 15.0, /*tolerance=*/double(0.0)); EXPECT_NEAR(attr->getUnsafe(10), 25.0, /*tolerance=*/double(0.0)); } #endif using AttributeArrayC = TypedAttributeArray<double, FixedPointCodec<false>>; { // test hasValueType() AttributeArray::Ptr attrC(new AttributeArrayC(50)); AttributeArray::Ptr attrD(new AttributeArrayD(50)); AttributeArray::Ptr attrF(new AttributeArrayF(50)); EXPECT_TRUE(attrD->hasValueType<double>()); EXPECT_TRUE(attrC->hasValueType<double>()); EXPECT_TRUE(!attrF->hasValueType<double>()); EXPECT_TRUE(!attrD->hasValueType<float>()); EXPECT_TRUE(!attrC->hasValueType<float>()); EXPECT_TRUE(attrF->hasValueType<float>()); } { // lots of type checking #if OPENVDB_ABI_VERSION_NUMBER >= 6 Index size(50); { TypedAttributeArray<bool> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("bool"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(1), attr.valueTypeSize()); EXPECT_EQ(Index(1), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<int8_t> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("int8"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(1), attr.valueTypeSize()); EXPECT_EQ(Index(1), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<int16_t> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("int16"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(2), attr.valueTypeSize()); EXPECT_EQ(Index(2), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<int32_t> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("int32"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(4), attr.valueTypeSize()); EXPECT_EQ(Index(4), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<int64_t> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("int64"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(8), attr.valueTypeSize()); EXPECT_EQ(Index(8), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { // half is not registered by default, but for complete-ness TypedAttributeArray<half> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("half"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(2), attr.valueTypeSize()); EXPECT_EQ(Index(2), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<float> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("float"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(4), attr.valueTypeSize()); EXPECT_EQ(Index(4), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<double> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("double"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(8), attr.valueTypeSize()); EXPECT_EQ(Index(8), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Vec3<int32_t>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("vec3i"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(12), attr.valueTypeSize()); EXPECT_EQ(Index(12), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Vec3<double>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("vec3d"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(24), attr.valueTypeSize()); EXPECT_EQ(Index(24), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Mat3<float>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("mat3s"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(36), attr.valueTypeSize()); EXPECT_EQ(Index(36), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Mat4<double>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("mat4d"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(128), attr.valueTypeSize()); EXPECT_EQ(Index(128), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Quat<float>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("quats"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(16), attr.valueTypeSize()); EXPECT_EQ(Index(16), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<float, TruncateCodec> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("float"), attr.valueType()); EXPECT_EQ(Name("trnc"), attr.codecType()); EXPECT_EQ(Index(4), attr.valueTypeSize()); EXPECT_EQ(Index(2), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } #endif } { AttributeArray::Ptr attr(new AttributeArrayC(50)); AttributeArrayC& typedAttr = static_cast<AttributeArrayC&>(*attr); typedAttr.set(0, 0.5); double value = 0.0; typedAttr.get(0, value); EXPECT_NEAR(double(0.5), value, /*tolerance=*/double(0.0001)); // test unsafe methods for get() and set() double value2 = 0.0; typedAttr.setUnsafe(0, double(0.2)); typedAttr.getUnsafe(0, value2); EXPECT_NEAR(double(0.2), value2, /*tolerance=*/double(0.0001)); } using AttributeArrayI = TypedAttributeArray<int32_t>; { // Base class API AttributeArray::Ptr attr(new AttributeArrayI(50)); EXPECT_EQ(Index(50), attr->size()); EXPECT_EQ((sizeof(AttributeArrayI) + sizeof(int)), attr->memUsage()); EXPECT_TRUE(attr->isType<AttributeArrayI>()); EXPECT_TRUE(!attr->isType<AttributeArrayD>()); EXPECT_TRUE(*attr == *attr); } { // Typed class API const Index count = 50; const size_t uniformMemUsage = sizeof(AttributeArrayI) + sizeof(int); const size_t expandedMemUsage = sizeof(AttributeArrayI) + count * sizeof(int); AttributeArrayI attr(count); EXPECT_EQ(Index(count), attr.size()); EXPECT_EQ(0, attr.get(0)); EXPECT_EQ(0, attr.get(10)); EXPECT_TRUE(attr.isUniform()); EXPECT_EQ(uniformMemUsage, attr.memUsage()); attr.set(0, 10); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(expandedMemUsage, attr.memUsage()); AttributeArrayI attr2(count); attr2.set(0, 10); EXPECT_TRUE(attr == attr2); attr.set(1, 5); EXPECT_TRUE(!attr.compact()); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(10, attr.get(0)); EXPECT_EQ(5, attr.get(1)); EXPECT_EQ(0, attr.get(2)); attr.collapse(5); EXPECT_TRUE(attr.isUniform()); EXPECT_EQ(uniformMemUsage, attr.memUsage()); EXPECT_EQ(5, attr.get(0)); EXPECT_EQ(5, attr.get(20)); EXPECT_EQ(5, attr.getUnsafe(20)); attr.expand(/*fill=*/false); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(expandedMemUsage, attr.memUsage()); attr.collapse(5); EXPECT_TRUE(attr.isUniform()); attr.expand(); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(expandedMemUsage, attr.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, attr.get(i)); } EXPECT_TRUE(attr.compact()); EXPECT_TRUE(attr.isUniform()); EXPECT_TRUE(attr.compact()); attr.expand(); attr.fill(10); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(expandedMemUsage, attr.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(10, attr.get(i)); } attr.collapse(7); EXPECT_TRUE(attr.isUniform()); EXPECT_EQ(uniformMemUsage, attr.memUsage()); EXPECT_EQ(7, attr.get(0)); EXPECT_EQ(7, attr.get(20)); attr.fill(5); EXPECT_TRUE(attr.isUniform()); EXPECT_EQ(uniformMemUsage, attr.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, attr.get(i)); } EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); attr.setTransient(true); EXPECT_TRUE(attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); attr.setHidden(true); EXPECT_TRUE(attr.isTransient()); EXPECT_TRUE(attr.isHidden()); attr.setTransient(false); EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(attr.isHidden()); attr.setHidden(false); EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); attr.setHidden(true); { // test copy construction AttributeArrayI attrB(attr); EXPECT_TRUE(matchingNamePairs(attr.type(), attrB.type())); EXPECT_EQ(attr.size(), attrB.size()); EXPECT_EQ(attr.memUsage(), attrB.memUsage()); EXPECT_EQ(attr.isUniform(), attrB.isUniform()); EXPECT_EQ(attr.isTransient(), attrB.isTransient()); EXPECT_EQ(attr.isHidden(), attrB.isHidden()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attr.get(i), attrB.get(i)); EXPECT_EQ(attr.get(i), attrB.getUnsafe(i)); EXPECT_EQ(attr.getUnsafe(i), attrB.getUnsafe(i)); } } { // Equality using an unregistered attribute type TypedAttributeArray<half> attr1(50); TypedAttributeArray<half> attr2(50); EXPECT_TRUE(attr1 == attr2); } // attribute array must not be uniform for compression attr.set(1, 7); attr.set(2, 8); attr.set(6, 100); } { // Fixed codec (position range) AttributeArray::Ptr attr1(new AttributeArrayC(50)); AttributeArrayC& fixedPoint = static_cast<AttributeArrayC&>(*attr1); // position range is -0.5 => 0.5 fixedPoint.set(0, -0.6); fixedPoint.set(1, -0.4); fixedPoint.set(2, 0.4); fixedPoint.set(3, 0.6); EXPECT_NEAR(double(-0.5), fixedPoint.get(0), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(-0.4), fixedPoint.get(1), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.4), fixedPoint.get(2), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.5), fixedPoint.get(3), /*tolerance=*/double(0.0001)); } using UnitFixedPointCodec8 = FixedPointCodec<false, UnitRange>; using AttributeArrayUFxpt8 = TypedAttributeArray<float, UnitFixedPointCodec8>; { // 8-bit fixed codec (unit range) AttributeArray::Ptr attr1(new AttributeArrayUFxpt8(50)); AttributeArrayUFxpt8& fixedPoint = static_cast<AttributeArrayUFxpt8&>(*attr1); // unit range is 0.0 => 1.0 fixedPoint.set(0, -0.2); fixedPoint.set(1, 0.3); fixedPoint.set(2, 0.6); fixedPoint.set(3, 1.1); EXPECT_NEAR(double(0.0), fixedPoint.get(0), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.3), fixedPoint.get(1), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.6), fixedPoint.get(2), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(1.0), fixedPoint.get(3), /*tolerance=*/double(0.0001)); } using UnitFixedPointCodec16 = FixedPointCodec<false, UnitRange>; using AttributeArrayUFxpt16 = TypedAttributeArray<float, UnitFixedPointCodec16>; { // 16-bit fixed codec (unit range) AttributeArray::Ptr attr1(new AttributeArrayUFxpt16(50)); AttributeArrayUFxpt16& fixedPoint = static_cast<AttributeArrayUFxpt16&>(*attr1); // unit range is 0.0 => 1.0 fixedPoint.set(0, -0.2); fixedPoint.set(1, 0.3); fixedPoint.set(2, 0.6); fixedPoint.set(3, 1.1); EXPECT_NEAR(double(0.0), fixedPoint.get(0), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.3), fixedPoint.get(1), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.6), fixedPoint.get(2), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(1.0), fixedPoint.get(3), /*tolerance=*/double(0.0001)); } using AttributeArrayU = TypedAttributeArray<openvdb::Vec3f, UnitVecCodec>; { // UnitVec codec test AttributeArray::Ptr attr1(new AttributeArrayU(50)); AttributeArrayU& unitVec = static_cast<AttributeArrayU&>(*attr1); // all vectors must be unit length const openvdb::Vec3f vec1(1.0, 0.0, 0.0); const openvdb::Vec3f vec2(openvdb::Vec3f(1.0, 2.0, 3.0).unit()); const openvdb::Vec3f vec3(openvdb::Vec3f(1.0, 2.0, 300000.0).unit()); unitVec.set(0, vec1); unitVec.set(1, vec2); unitVec.set(2, vec3); EXPECT_NEAR(double(vec1.x()), unitVec.get(0).x(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec1.y()), unitVec.get(0).y(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec1.z()), unitVec.get(0).z(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec2.x()), unitVec.get(1).x(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec2.y()), unitVec.get(1).y(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec2.z()), unitVec.get(1).z(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec3.x()), unitVec.get(2).x(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec3.y()), unitVec.get(2).y(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec3.z()), unitVec.get(2).z(), /*tolerance=*/double(0.0001)); } { // IO const Index count = 50; AttributeArrayI attrA(count); for (unsigned i = 0; i < unsigned(count); ++i) { attrA.set(i, int(i)); } attrA.setHidden(true); std::ostringstream ostr(std::ios_base::binary); io::setDataCompression(ostr, io::COMPRESS_BLOSC); attrA.write(ostr); AttributeArrayI attrB; std::istringstream istr(ostr.str(), std::ios_base::binary); attrB.read(istr); EXPECT_TRUE(attrA == attrB); AttributeArrayI attrC(count, 3); attrC.setTransient(true); std::ostringstream ostrC(std::ios_base::binary); attrC.write(ostrC); EXPECT_TRUE(ostrC.str().empty()); std::ostringstream ostrD(std::ios_base::binary); attrC.write(ostrD, /*transient=*/true); EXPECT_TRUE(!ostrD.str().empty()); } // Registry AttributeArrayI::registerType(); AttributeArray::Ptr attr = AttributeArray::create( AttributeArrayI::attributeType(), 34); { // Casting AttributeArray::Ptr array = TypedAttributeArray<float>::create(0); EXPECT_NO_THROW(TypedAttributeArray<float>::cast(*array)); EXPECT_THROW(TypedAttributeArray<int>::cast(*array), TypeError); AttributeArray::ConstPtr constArray = array; EXPECT_NO_THROW(TypedAttributeArray<float>::cast(*constArray)); EXPECT_THROW(TypedAttributeArray<int>::cast(*constArray), TypeError); } } struct VectorWrapper { using T = std::vector<std::pair<Index, Index>>; VectorWrapper(const T& _data) : data(_data) { } operator bool() const { return index < data.size(); } VectorWrapper& operator++() { index++; return *this; } Index sourceIndex() const { assert(*this); return data[index].first; } Index targetIndex() const { assert(*this); return data[index].second; } private: const T& data; T::size_type index = 0; }; // struct VectorWrapper TEST_F(TestAttributeArray, testAttributeArrayCopy) { using AttributeArrayD = TypedAttributeArray<double>; Index size(50); // initialize some test data AttributeArrayD sourceTypedAttr(size); AttributeArray& sourceAttr(sourceTypedAttr); EXPECT_EQ(size, sourceAttr.size()); sourceAttr.expand(); for (Index i = 0; i < size; i++) { sourceTypedAttr.set(i, double(i)/2); } // initialize source -> target pairs that reverse the order std::vector<std::pair<Index, Index>> indexPairs; for (Index i = 0; i < size; i++) { indexPairs.push_back(std::make_pair(i, size-i-1)); } // create a new index pair wrapper VectorWrapper wrapper(indexPairs); // build a target attribute array AttributeArrayD targetTypedAttr(size); AttributeArray& targetAttr(targetTypedAttr); for (const auto& pair : indexPairs) { targetTypedAttr.set(pair.second, sourceTypedAttr.get(pair.first)); } #if OPENVDB_ABI_VERSION_NUMBER < 6 { // verify behaviour with slow virtual function (ABI<6) AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); for (const auto& pair : indexPairs) { attr.set(pair.second, sourceAttr, pair.first); } EXPECT_TRUE(targetAttr == attr); } #else using AttributeArrayF = TypedAttributeArray<float>; { // use std::vector<std::pair<Index, Index>>::begin() as iterator to AttributeArray::copy() AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); attr.copyValues(sourceAttr, wrapper); EXPECT_TRUE(targetAttr == attr); } { // attempt to copy values between attribute arrays with different storage sizes AttributeArrayF typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_THROW(attr.copyValues(sourceAttr, wrapper), TypeError); } { // attempt to copy values between integer and float attribute arrays AttributeArrayF typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_THROW(attr.copyValues(sourceAttr, wrapper), TypeError); } { // copy values between attribute arrays with different value types, but the same storage type // target half array TypedAttributeArray<half> targetTypedAttr1(size); AttributeArray& targetAttr1(targetTypedAttr1); for (Index i = 0; i < size; i++) { targetTypedAttr1.set(i, io::RealToHalf<double>::convert(sourceTypedAttr.get(i))); } // truncated float array TypedAttributeArray<float, TruncateCodec> targetTypedAttr2(size); AttributeArray& targetAttr2(targetTypedAttr2); targetAttr2.copyValues(targetAttr1, wrapper); // equality fails as attribute types are not the same EXPECT_TRUE(targetAttr2 != targetAttr); EXPECT_TRUE(targetAttr2.type() != targetAttr.type()); // however testing value equality succeeds for (Index i = 0; i < size; i++) { EXPECT_TRUE(targetTypedAttr2.get(i) == targetTypedAttr.get(i)); } } { // out-of-range checking AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); decltype(indexPairs) rangeIndexPairs(indexPairs); rangeIndexPairs[10].first = size+1; VectorWrapper rangeWrapper(rangeIndexPairs); EXPECT_THROW(attr.copyValues(sourceAttr, rangeWrapper), IndexError); rangeIndexPairs[10].first = 0; EXPECT_NO_THROW(attr.copyValues(sourceAttr, rangeWrapper)); rangeIndexPairs[10].second = size+1; EXPECT_THROW(attr.copyValues(sourceAttr, rangeWrapper), IndexError); } { // source attribute array is uniform AttributeArrayD uniformTypedAttr(size); AttributeArray& uniformAttr(uniformTypedAttr); uniformTypedAttr.collapse(5.3); EXPECT_TRUE(uniformAttr.isUniform()); AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_TRUE(attr.isUniform()); attr.copyValues(uniformAttr, wrapper); EXPECT_TRUE(attr.isUniform()); attr.copyValues(uniformAttr, wrapper, /*preserveUniformity=*/false); EXPECT_TRUE(!attr.isUniform()); typedAttr.collapse(1.4); EXPECT_TRUE(attr.isUniform()); // resize the vector to be smaller than the size of the array decltype(indexPairs) subsetIndexPairs(indexPairs); subsetIndexPairs.resize(size-1); decltype(wrapper) subsetWrapper(subsetIndexPairs); // now copy the values attempting to preserve uniformity attr.copyValues(uniformAttr, subsetWrapper, /*preserveUniformity=*/true); // verify that the array cannot be kept uniform EXPECT_TRUE(!attr.isUniform()); } { // target attribute array is uniform AttributeArrayD uniformTypedAttr(size); AttributeArray& uniformAttr(uniformTypedAttr); uniformTypedAttr.collapse(5.3); EXPECT_TRUE(uniformAttr.isUniform()); AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); typedAttr.set(5, 1.2); typedAttr.set(10, 3.1); EXPECT_TRUE(!attr.isUniform()); std::vector<std::pair<Index, Index>> uniformIndexPairs; uniformIndexPairs.push_back(std::make_pair(10, 0)); uniformIndexPairs.push_back(std::make_pair(5, 0)); VectorWrapper uniformWrapper(uniformIndexPairs); // note that calling copyValues() will implicitly expand the uniform target EXPECT_NO_THROW(uniformAttr.copyValuesUnsafe(attr, uniformWrapper)); EXPECT_TRUE(uniformAttr.isUniform()); EXPECT_TRUE(uniformTypedAttr.get(0) == typedAttr.get(5)); } #endif } void TestAttributeArray::testAccessorEval() { using AttributeF = TypedAttributeArray<float>; struct TestAccessor { static float getterError(const AttributeArray* /*array*/, const Index /*n*/) { OPENVDB_THROW(NotImplementedError, ""); } static void setterError [[noreturn]] (AttributeArray* /*array*/, const Index /*n*/, const float& /*value*/) { OPENVDB_THROW(NotImplementedError, ""); } //static float testGetter(const AttributeArray* array, const Index n) { // return AccessorEval<UnknownCodec, float>::get(&getterError, array, n); //} //static void testSetter(AttributeArray* array, const Index n, const float& value) { // AccessorEval<UnknownCodec, float>::set(&setterError, array, n, value); //} }; { // test get and set (NullCodec) AttributeF::Ptr attr = AttributeF::create(10); attr->collapse(5.0f); attr->expand(); AttributeArray& array = *attr; // explicit codec is used here so getter and setter are not called AttributeWriteHandle<float, NullCodec> writeHandle(array); writeHandle.mSetter = TestAccessor::setterError; writeHandle.set(4, 15.0f); AttributeHandle<float, NullCodec> handle(array); const AttributeArray& constArray(array); EXPECT_EQ(&constArray, &handle.array()); handle.mGetter = TestAccessor::getterError; const float result1 = handle.get(4); const float result2 = handle.get(6); EXPECT_EQ(15.0f, result1); EXPECT_EQ(5.0f, result2); } { // test get and set (UnknownCodec) AttributeF::Ptr attr = AttributeF::create(10); attr->collapse(5.0f); attr->expand(); AttributeArray& array = *attr; // unknown codec is used here so getter and setter are called AttributeWriteHandle<float, UnknownCodec> writeHandle(array); EXPECT_EQ(&array, &writeHandle.array()); writeHandle.mSetter = TestAccessor::setterError; EXPECT_THROW(writeHandle.set(4, 15.0f), NotImplementedError); AttributeHandle<float, UnknownCodec> handle(array); handle.mGetter = TestAccessor::getterError; EXPECT_THROW(handle.get(4), NotImplementedError); } } TEST_F(TestAttributeArray, testAccessorEval) { testAccessorEval(); } TEST_F(TestAttributeArray, testAttributeHandle) { using namespace openvdb::math; using AttributeI = TypedAttributeArray<int>; using AttributeFH = TypedAttributeArray<float, TruncateCodec>; using AttributeVec3f = TypedAttributeArray<Vec3f>; using AttributeHandleRWI = AttributeWriteHandle<int>; AttributeI::registerType(); AttributeFH::registerType(); AttributeVec3f::registerType(); // create a Descriptor and AttributeSet using Descriptor = AttributeSet::Descriptor; Descriptor::Ptr descr = Descriptor::create(AttributeVec3f::attributeType()); unsigned count = 500; AttributeSet attrSet(descr, /*arrayLength=*/count); attrSet.appendAttribute("truncate", AttributeFH::attributeType()); attrSet.appendAttribute("int", AttributeI::attributeType()); // check uniform value implementation { AttributeArray* array = attrSet.get(2); AttributeHandleRWI nonExpandingHandle(*array, /*expand=*/false); EXPECT_TRUE(nonExpandingHandle.isUniform()); AttributeHandleRWI handle(*array); EXPECT_TRUE(!handle.isUniform()); EXPECT_EQ(array->size(), handle.size()); EXPECT_EQ(0, handle.get(0)); EXPECT_EQ(0, handle.get(10)); handle.set(0, 10); EXPECT_TRUE(!handle.isUniform()); handle.collapse(5); EXPECT_TRUE(handle.isUniform()); EXPECT_EQ(5, handle.get(0)); EXPECT_EQ(5, handle.get(20)); handle.expand(); EXPECT_TRUE(!handle.isUniform()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, handle.get(i)); } EXPECT_TRUE(handle.compact()); EXPECT_TRUE(handle.isUniform()); handle.expand(); handle.fill(10); EXPECT_TRUE(!handle.isUniform()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(10, handle.get(i)); } handle.collapse(7); EXPECT_TRUE(handle.isUniform()); EXPECT_EQ(7, handle.get(0)); EXPECT_EQ(7, handle.get(20)); handle.fill(5); EXPECT_TRUE(handle.isUniform()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, handle.get(i)); } EXPECT_TRUE(handle.isUniform()); } { AttributeArray* array = attrSet.get(0); AttributeWriteHandle<Vec3f> handle(*array); handle.set(5, Vec3f(10)); EXPECT_EQ(Vec3f(10), handle.get(5)); } { AttributeArray* array = attrSet.get(1); AttributeWriteHandle<float> handle(*array); handle.set(6, float(11)); EXPECT_EQ(float(11), handle.get(6)); { AttributeHandle<float> handleRO(*array); EXPECT_EQ(float(11), handleRO.get(6)); } } // check values have been correctly set without using handles { AttributeVec3f* array = static_cast<AttributeVec3f*>(attrSet.get(0)); EXPECT_TRUE(array); EXPECT_EQ(Vec3f(10), array->get(5)); } { AttributeFH* array = static_cast<AttributeFH*>(attrSet.get(1)); EXPECT_TRUE(array); EXPECT_EQ(float(11), array->get(6)); } } TEST_F(TestAttributeArray, testStrided) { using AttributeArrayI = TypedAttributeArray<int>; using StridedHandle = AttributeHandle<int, /*CodecType=*/UnknownCodec>; using StridedWriteHandle = AttributeWriteHandle<int, /*CodecType=*/UnknownCodec>; { // non-strided array AttributeArrayI::Ptr array = AttributeArrayI::create(/*n=*/2, /*stride=*/1); EXPECT_TRUE(array->hasConstantStride()); EXPECT_EQ(Index(1), array->stride()); EXPECT_EQ(Index(2), array->size()); EXPECT_EQ(Index(2), array->dataSize()); } { // strided array AttributeArrayI::Ptr array = AttributeArrayI::create(/*n=*/2, /*stride=*/3); EXPECT_TRUE(array->hasConstantStride()); EXPECT_EQ(Index(3), array->stride()); EXPECT_EQ(Index(2), array->size()); EXPECT_EQ(Index(6), array->dataSize()); EXPECT_TRUE(array->isUniform()); EXPECT_EQ(0, array->get(0)); EXPECT_EQ(0, array->get(5)); EXPECT_THROW(array->get(6), IndexError); // out-of-range EXPECT_NO_THROW(StridedHandle::create(*array)); EXPECT_NO_THROW(StridedWriteHandle::create(*array)); array->collapse(10); EXPECT_EQ(int(10), array->get(0)); EXPECT_EQ(int(10), array->get(5)); array->expand(); EXPECT_EQ(int(10), array->get(0)); EXPECT_EQ(int(10), array->get(5)); array->collapse(0); EXPECT_EQ(int(0), array->get(0)); EXPECT_EQ(int(0), array->get(5)); StridedWriteHandle writeHandle(*array); writeHandle.set(0, 2, 5); writeHandle.set(1, 1, 10); EXPECT_EQ(Index(3), writeHandle.stride()); EXPECT_EQ(Index(2), writeHandle.size()); // non-interleaved: 0 0 5 0 10 0 EXPECT_EQ(5, array->get(2)); EXPECT_EQ(10, array->get(4)); EXPECT_EQ(5, writeHandle.get(0, 2)); EXPECT_EQ(10, writeHandle.get(1, 1)); StridedHandle handle(*array); EXPECT_TRUE(handle.hasConstantStride()); EXPECT_EQ(5, handle.get(0, 2)); EXPECT_EQ(10, handle.get(1, 1)); EXPECT_EQ(Index(3), handle.stride()); EXPECT_EQ(Index(2), handle.size()); // as of ABI=6, the base memory requirements of an AttributeArray have been lowered #if OPENVDB_ABI_VERSION_NUMBER >= 6 size_t arrayMem = 40; #else size_t arrayMem = 64; #endif EXPECT_EQ(sizeof(int) * /*size*/3 * /*stride*/2 + arrayMem, array->memUsage()); } { // dynamic stride AttributeArrayI::Ptr array = AttributeArrayI::create( /*n=*/2, /*stride=*/7, /*constantStride=*/false); EXPECT_TRUE(!array->hasConstantStride()); // zero indicates dynamic striding EXPECT_EQ(Index(0), array->stride()); EXPECT_EQ(Index(2), array->size()); // the actual array size EXPECT_EQ(Index(7), array->dataSize()); EXPECT_TRUE(array->isUniform()); EXPECT_EQ(0, array->get(0)); EXPECT_EQ(0, array->get(6)); EXPECT_THROW(array->get(7), IndexError); // out-of-range EXPECT_NO_THROW(StridedHandle::create(*array)); EXPECT_NO_THROW(StridedWriteHandle::create(*array)); // handle is bound as if a linear array with stride 1 StridedHandle handle(*array); EXPECT_TRUE(!handle.hasConstantStride()); EXPECT_EQ(Index(1), handle.stride()); EXPECT_EQ(array->dataSize(), handle.size()); } { // IO const Index count = 50, total = 100; AttributeArrayI attrA(count, total, /*constantStride=*/false); for (unsigned i = 0; i < unsigned(total); ++i) { attrA.set(i, int(i)); } std::ostringstream ostr(std::ios_base::binary); io::setDataCompression(ostr, io::COMPRESS_BLOSC); attrA.write(ostr); AttributeArrayI attrB; std::istringstream istr(ostr.str(), std::ios_base::binary); attrB.read(istr); EXPECT_TRUE(attrA == attrB); } } void TestAttributeArray::testDelayedLoad() { using AttributeArrayI = TypedAttributeArray<int>; using AttributeArrayF = TypedAttributeArray<float>; AttributeArrayI::registerType(); AttributeArrayF::registerType(); SharedPtr<io::MappedFile> mappedFile; io::StreamMetadata::Ptr streamMetadata(new io::StreamMetadata); std::string tempDir; if (const char* dir = std::getenv("TMPDIR")) tempDir = dir; #ifdef _MSC_VER if (tempDir.empty()) { char tempDirBuffer[MAX_PATH+1]; int tempDirLen = GetTempPath(MAX_PATH+1, tempDirBuffer); EXPECT_TRUE(tempDirLen > 0 && tempDirLen <= MAX_PATH); tempDir = tempDirBuffer; } #else if (tempDir.empty()) tempDir = P_tmpdir; #endif { // IO const Index count = 50; AttributeArrayI attrA(count); for (unsigned i = 0; i < unsigned(count); ++i) { attrA.set(i, int(i)); } AttributeArrayF attrA2(count); std::string filename; // write out attribute array to a temp file { filename = tempDir + "/openvdb_delayed1"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); attrA.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout); outputStreamSize.setSizeOnly(true); attrA.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout); outputStream.setSizeOnly(false); attrA.writePagedBuffers(outputStream, false); outputStream.flush(); attrA2.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize2(fileout); outputStreamSize2.setSizeOnly(true); attrA2.writePagedBuffers(outputStreamSize2, false); outputStreamSize2.flush(); compression::PagedOutputStream outputStream2(fileout); outputStream2.setSizeOnly(false); attrA2.writePagedBuffers(outputStream2, false); outputStream2.flush(); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check manual loading of data { AttributeArrayI attrB; AttributeArrayF attrB2; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(matchingNamePairs(attrA.type(), attrB.type())); EXPECT_EQ(attrA.size(), attrB.size()); EXPECT_EQ(attrA.isUniform(), attrB.isUniform()); EXPECT_EQ(attrA.isTransient(), attrB.isTransient()); EXPECT_EQ(attrA.isHidden(), attrB.isHidden()); AttributeArrayI attrBcopy(attrB); AttributeArrayI attrBequal = attrB; EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(attrBcopy.isOutOfCore()); EXPECT_TRUE(attrBequal.isOutOfCore()); #if OPENVDB_ABI_VERSION_NUMBER >= 6 EXPECT_TRUE(!static_cast<AttributeArray&>(attrB).isDataLoaded()); EXPECT_TRUE(!static_cast<AttributeArray&>(attrBcopy).isDataLoaded()); EXPECT_TRUE(!static_cast<AttributeArray&>(attrBequal).isDataLoaded()); #endif attrB.loadData(); attrBcopy.loadData(); attrBequal.loadData(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrBcopy.isOutOfCore()); EXPECT_TRUE(!attrBequal.isOutOfCore()); #if OPENVDB_ABI_VERSION_NUMBER >= 6 EXPECT_TRUE(static_cast<AttributeArray&>(attrB).isDataLoaded()); EXPECT_TRUE(static_cast<AttributeArray&>(attrBcopy).isDataLoaded()); EXPECT_TRUE(static_cast<AttributeArray&>(attrBequal).isDataLoaded()); #endif EXPECT_EQ(attrA.memUsage(), attrB.memUsage()); EXPECT_EQ(attrA.memUsage(), attrBcopy.memUsage()); EXPECT_EQ(attrA.memUsage(), attrBequal.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); EXPECT_EQ(attrA.get(i), attrBcopy.get(i)); EXPECT_EQ(attrA.get(i), attrBequal.get(i)); } attrB2.readMetadata(filein); compression::PagedInputStream inputStream2(filein); inputStream2.setSizeOnly(true); attrB2.readPagedBuffers(inputStream2); inputStream2.setSizeOnly(false); attrB2.readPagedBuffers(inputStream2); EXPECT_TRUE(matchingNamePairs(attrA2.type(), attrB2.type())); EXPECT_EQ(attrA2.size(), attrB2.size()); EXPECT_EQ(attrA2.isUniform(), attrB2.isUniform()); EXPECT_EQ(attrA2.isTransient(), attrB2.isTransient()); EXPECT_EQ(attrA2.isHidden(), attrB2.isHidden()); AttributeArrayF attrB2copy(attrB2); AttributeArrayF attrB2equal = attrB2; EXPECT_TRUE(attrB2.isOutOfCore()); EXPECT_TRUE(attrB2copy.isOutOfCore()); EXPECT_TRUE(attrB2equal.isOutOfCore()); attrB2.loadData(); attrB2copy.loadData(); attrB2equal.loadData(); EXPECT_TRUE(!attrB2.isOutOfCore()); EXPECT_TRUE(!attrB2copy.isOutOfCore()); EXPECT_TRUE(!attrB2equal.isOutOfCore()); EXPECT_EQ(attrA2.memUsage(), attrB2.memUsage()); EXPECT_EQ(attrA2.memUsage(), attrB2copy.memUsage()); EXPECT_EQ(attrA2.memUsage(), attrB2equal.memUsage()); EXPECT_EQ(attrA2.get(0), attrB2.get(0)); EXPECT_EQ(attrA2.get(0), attrB2copy.get(0)); EXPECT_EQ(attrA2.get(0), attrB2equal.get(0)); } // read in using delayed load and check fill() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); attrB.fill(5); EXPECT_TRUE(!attrB.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, attrB.get(i)); } } // read in using delayed load and check streaming (write handle) { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); attrB.setStreaming(true); { AttributeWriteHandle<int> handle(attrB); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); } EXPECT_TRUE(!attrB.isUniform()); } // read in using delayed load and check streaming (read handle) { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); attrB.setStreaming(true); { AttributeHandle<int> handle(attrB); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); } EXPECT_TRUE(attrB.isUniform()); } // read in using delayed load and check implicit load through get() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); attrB.get(0); EXPECT_TRUE(!attrB.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); } } // read in using delayed load and check implicit load through compress() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); } // read in using delayed load and check copy and assignment constructors { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); AttributeArrayI attrC(attrB); AttributeArrayI attrD = attrB; EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(attrC.isOutOfCore()); EXPECT_TRUE(attrD.isOutOfCore()); attrB.loadData(); attrC.loadData(); attrD.loadData(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrC.isOutOfCore()); EXPECT_TRUE(!attrD.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); EXPECT_EQ(attrA.get(i), attrC.get(i)); EXPECT_EQ(attrA.get(i), attrD.get(i)); } } // read in using delayed load and check implicit load through AttributeHandle { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); AttributeHandle<int> handle(attrB); EXPECT_TRUE(!attrB.isOutOfCore()); } // read in using delayed load and check detaching of file (using collapse()) { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); attrB.collapse(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(attrB.isUniform()); EXPECT_EQ(0, attrB.get(0)); } // read in and write out using delayed load to check writing out-of-core attributes { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); std::string filename2 = tempDir + "/openvdb_delayed5"; std::ofstream fileout2(filename2.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout2, streamMetadata); io::setDataCompression(fileout2, io::COMPRESS_BLOSC); attrB.writeMetadata(fileout2, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout2); outputStreamSize.setSizeOnly(true); attrB.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout2); outputStream.setSizeOnly(false); attrB.writePagedBuffers(outputStream, false); outputStream.flush(); fileout2.close(); AttributeArrayI attrB2; std::ifstream filein2(filename2.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein2, streamMetadata); io::setMappedFilePtr(filein2, mappedFile); attrB2.readMetadata(filein2); compression::PagedInputStream inputStream2(filein2); inputStream2.setSizeOnly(true); attrB2.readPagedBuffers(inputStream2); inputStream2.setSizeOnly(false); attrB2.readPagedBuffers(inputStream2); EXPECT_TRUE(attrB2.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrB.get(i), attrB2.get(i)); } filein2.close(); } // Clean up temp files. std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); AttributeArrayI attrUniform(count); // write out uniform attribute array to a temp file { filename = tempDir + "/openvdb_delayed2"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); attrUniform.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout); outputStreamSize.setSizeOnly(true); attrUniform.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout); outputStream.setSizeOnly(false); attrUniform.writePagedBuffers(outputStream, false); outputStream.flush(); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check fill() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isUniform()); attrB.fill(5); EXPECT_TRUE(attrB.isUniform()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, attrB.get(i)); } } AttributeArrayI attrStrided(count, /*stride=*/3); EXPECT_EQ(Index(3), attrStrided.stride()); // Clean up temp files. std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); // write out strided attribute array to a temp file { filename = tempDir + "/openvdb_delayed3"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); attrStrided.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout); outputStreamSize.setSizeOnly(true); attrStrided.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout); outputStream.setSizeOnly(false); attrStrided.writePagedBuffers(outputStream, false); outputStream.flush(); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check fill() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_EQ(Index(3), attrB.stride()); } // Clean up temp files. std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); // write out compressed attribute array to a temp file { filename = tempDir + "/openvdb_delayed4"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); attrA.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout); outputStreamSize.setSizeOnly(true); attrA.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout); outputStream.setSizeOnly(false); attrA.writePagedBuffers(outputStream, false); outputStream.flush(); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check manual loading of data { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); attrB.loadData(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_EQ(attrA.memUsage(), attrB.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); } } // read in using delayed load and check partial read state { std::unique_ptr<AttributeArrayI> attrB(new AttributeArrayI); EXPECT_TRUE(!(attrB->flags() & AttributeArray::PARTIALREAD)); std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB->readMetadata(filein); // PARTIALREAD flag should now be set EXPECT_TRUE(attrB->flags() & AttributeArray::PARTIALREAD); // copy-construct and assign AttributeArray AttributeArrayI attrC(*attrB); EXPECT_TRUE(attrC.flags() & AttributeArray::PARTIALREAD); AttributeArrayI attrD = *attrB; EXPECT_TRUE(attrD.flags() & AttributeArray::PARTIALREAD); // verify deleting attrB is safe attrB.reset(); // verify data is not valid EXPECT_TRUE(!attrC.validData()); { // attempting to write a partially-read AttributeArray throws std::string filename = tempDir + "/openvdb_partial1"; ScopedFile f(filename); std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); EXPECT_THROW(attrC.writeMetadata(fileout, false, /*paged=*/true), IoError); } // continue loading with copy-constructed AttributeArray compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrC.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrC.readPagedBuffers(inputStream); EXPECT_TRUE(attrC.isOutOfCore()); attrC.loadData(); EXPECT_TRUE(!attrC.isOutOfCore()); // verify data is now valid EXPECT_TRUE(attrC.validData()); EXPECT_EQ(attrA.memUsage(), attrC.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrC.get(i)); } } // read in using delayed load and check implicit load through get() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); attrB.get(0); EXPECT_TRUE(!attrB.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); } } #ifdef OPENVDB_USE_BLOSC // read in using delayed load and check copy and assignment constructors { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); AttributeArrayI attrC(attrB); AttributeArrayI attrD = attrB; EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(attrC.isOutOfCore()); EXPECT_TRUE(attrD.isOutOfCore()); attrB.loadData(); attrC.loadData(); attrD.loadData(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrC.isOutOfCore()); EXPECT_TRUE(!attrD.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); EXPECT_EQ(attrA.get(i), attrC.get(i)); EXPECT_EQ(attrA.get(i), attrD.get(i)); } } // read in using delayed load and check implicit load through AttributeHandle { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); AttributeHandle<int> handle(attrB); EXPECT_TRUE(!attrB.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), handle.get(i)); } } #endif // Clean up temp files. std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); // write out invalid serialization flags as metadata to a temp file { filename = tempDir + "/openvdb_delayed5"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); // write out unknown serialization flags to check forwards-compatibility Index64 bytes(0); uint8_t flags(0); uint8_t serializationFlags(Int16(0x10)); Index size(0); fileout.write(reinterpret_cast<const char*>(&bytes), sizeof(Index64)); fileout.write(reinterpret_cast<const char*>(&flags), sizeof(uint8_t)); fileout.write(reinterpret_cast<const char*>(&serializationFlags), sizeof(uint8_t)); fileout.write(reinterpret_cast<const char*>(&size), sizeof(Index)); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check metadata fail due to serialization flags { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); EXPECT_THROW(attrB.readMetadata(filein), openvdb::IoError); } // cleanup temp files std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); } } TEST_F(TestAttributeArray, testDelayedLoad) { testDelayedLoad(); } TEST_F(TestAttributeArray, testDefaultValue) { using AttributeArrayF = TypedAttributeArray<float>; using AttributeArrayI = TypedAttributeArray<int>; AttributeArrayI::registerType(); AttributeArrayF::registerType(); TypedMetadata<float> defaultValue(5.4f); Metadata& baseDefaultValue = defaultValue; // default value is same value type AttributeArray::Ptr attr = AttributeArrayF::create(10, 1, true, &baseDefaultValue); EXPECT_TRUE(attr); EXPECT_EQ(5.4f, AttributeArrayF::cast(*attr).get(0)); // default value is different value type, so not used attr = AttributeArrayI::create(10, 1, true, &baseDefaultValue); EXPECT_TRUE(attr); EXPECT_EQ(0, AttributeArrayI::cast(*attr).get(0)); } TEST_F(TestAttributeArray, testQuaternions) { using AttributeQF = TypedAttributeArray<math::Quat<float>>; using AttributeQD = TypedAttributeArray<QuatR>; AttributeQF::registerType(); AttributeQD::registerType(); EXPECT_TRUE(AttributeQF::attributeType().first == "quats"); EXPECT_TRUE(AttributeQD::attributeType().first == "quatd"); AttributeQF test(/*size=*/5); AttributeQD orient(/*size=*/10); { // set some quaternion values AttributeWriteHandle<QuatR> orientHandle(orient); orientHandle.set(4, QuatR(1, 2, 3, 4)); orientHandle.set(7, QuatR::identity()); } { // get some quaternion values AttributeHandle<QuatR> orientHandle(orient); EXPECT_EQ(QuatR::zero(), orientHandle.get(3)); EXPECT_EQ(QuatR(1, 2, 3, 4), orientHandle.get(4)); EXPECT_EQ(QuatR::identity(), orientHandle.get(7)); } { // create a quaternion array with a zero uniform value AttributeQD zero(/*size=*/10, /*stride=*/1, /*constantStride=*/true, QuatR::zero()); EXPECT_EQ(QuatR::zero(), zero.get(5)); } } TEST_F(TestAttributeArray, testMatrices) { typedef TypedAttributeArray<Mat4d> AttributeM; AttributeM::registerType(); EXPECT_TRUE(AttributeM::attributeType().first == "mat4d"); AttributeM matrix(/*size=*/10); Mat4d testMatrix(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); { // set some matrix values AttributeWriteHandle<Mat4d> matrixHandle(matrix); matrixHandle.set(4, testMatrix); matrixHandle.set(7, Mat4d::zero()); } { // get some matrix values AttributeHandle<Mat4d> matrixHandle(matrix); EXPECT_EQ(Mat4d::zero(), matrixHandle.get(3)); EXPECT_EQ(testMatrix, matrixHandle.get(4)); EXPECT_EQ(Mat4d::zero(), matrixHandle.get(7)); } { // create a matrix array with a zero uniform value AttributeM zero(/*size=*/10, /*stride=*/1, /*constantStride=*/true, Mat4d::zero()); EXPECT_EQ(Mat4d::zero(), zero.get(5)); } } namespace profile { template <typename AttrT> void expand(const Name& prefix, AttrT& attr) { ProfileTimer timer(prefix + ": expand"); attr.expand(); } template <typename AttrT> void set(const Name& prefix, AttrT& attr) { ProfileTimer timer(prefix + ": set"); const Index size = attr.size(); for (Index i = 0; i < size; i++) { attr.setUnsafe(i, typename AttrT::ValueType(i)); } } template <typename CodecT, typename AttrT> void setH(const Name& prefix, AttrT& attr) { using ValueType = typename AttrT::ValueType; ProfileTimer timer(prefix + ": setHandle"); AttributeWriteHandle<ValueType, CodecT> handle(attr); const Index size = attr.size(); for (Index i = 0; i < size; i++) { handle.set(i, ValueType(i)); } } template <typename AttrT> void sum(const Name& prefix, const AttrT& attr) { ProfileTimer timer(prefix + ": sum"); using ValueType = typename AttrT::ValueType; ValueType sum = 0; const Index size = attr.size(); for (Index i = 0; i < size; i++) { sum += attr.getUnsafe(i); } // prevent compiler optimisations removing computation EXPECT_TRUE(sum!=ValueType()); } template <typename CodecT, typename AttrT> void sumH(const Name& prefix, const AttrT& attr) { ProfileTimer timer(prefix + ": sumHandle"); using ValueType = typename AttrT::ValueType; ValueType sum = 0; AttributeHandle<ValueType, CodecT> handle(attr); for (Index i = 0; i < attr.size(); i++) { sum += handle.get(i); } // prevent compiler optimisations removing computation EXPECT_TRUE(sum!=ValueType()); } } // namespace profile TEST_F(TestAttributeArray, testProfile) { using namespace openvdb::util; using namespace openvdb::math; using AttributeArrayF = TypedAttributeArray<float>; using AttributeArrayF16 = TypedAttributeArray<float, FixedPointCodec<false>>; using AttributeArrayF8 = TypedAttributeArray<float, FixedPointCodec<true>>; /////////////////////////////////////////////////// #ifdef PROFILE const size_t elements(1000 * 1000 * 1000); std::cerr << std::endl; #else const size_t elements(10 * 1000 * 1000); #endif // std::vector { std::vector<float> values; { ProfileTimer timer("Vector<float>: resize"); values.resize(elements); } { ProfileTimer timer("Vector<float>: set"); for (size_t i = 0; i < elements; i++) { values[i] = float(i); } } { ProfileTimer timer("Vector<float>: sum"); float sum = 0; for (size_t i = 0; i < elements; i++) { sum += float(values[i]); } // to prevent optimisation clean up EXPECT_TRUE(sum!=0.0f); } } // AttributeArray { AttributeArrayF attr(elements); profile::expand("AttributeArray<float>", attr); profile::set("AttributeArray<float>", attr); profile::sum("AttributeArray<float>", attr); } { AttributeArrayF16 attr(elements); profile::expand("AttributeArray<float, fp16>", attr); profile::set("AttributeArray<float, fp16>", attr); profile::sum("AttributeArray<float, fp16>", attr); } { AttributeArrayF8 attr(elements); profile::expand("AttributeArray<float, fp8>", attr); profile::set("AttributeArray<float, fp8>", attr); profile::sum("AttributeArray<float, fp8>", attr); } // AttributeHandle (UnknownCodec) { AttributeArrayF attr(elements); profile::expand("AttributeHandle<float>", attr); profile::setH<UnknownCodec>("AttributeHandle<float>", attr); profile::sumH<UnknownCodec>("AttributeHandle<float>", attr); } { AttributeArrayF16 attr(elements); profile::expand("AttributeHandle<float, fp16>", attr); profile::setH<UnknownCodec>("AttributeHandle<float, fp16>", attr); profile::sumH<UnknownCodec>("AttributeHandle<float, fp16>", attr); } { AttributeArrayF8 attr(elements); profile::expand("AttributeHandle<float, fp8>", attr); profile::setH<UnknownCodec>("AttributeHandle<float, fp8>", attr); profile::sumH<UnknownCodec>("AttributeHandle<float, fp8>", attr); } // AttributeHandle (explicit codec) { AttributeArrayF attr(elements); profile::expand("AttributeHandle<float>", attr); profile::setH<NullCodec>("AttributeHandle<float, Codec>", attr); profile::sumH<NullCodec>("AttributeHandle<float, Codec>", attr); } { AttributeArrayF16 attr(elements); profile::expand("AttributeHandle<float, fp16>", attr); profile::setH<FixedPointCodec<false>>("AttributeHandle<float, fp16, Codec>", attr); profile::sumH<FixedPointCodec<false>>("AttributeHandle<float, fp16, Codec>", attr); } { AttributeArrayF8 attr(elements); profile::expand("AttributeHandle<float, fp8>", attr); profile::setH<FixedPointCodec<true>>("AttributeHandle<float, fp8, Codec>", attr); profile::sumH<FixedPointCodec<true>>("AttributeHandle<float, fp8, Codec>", attr); } }
83,779
C++
32.891586
129
0.60884
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMetadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Metadata.h> #include <sstream> class TestMetadata: public ::testing::Test { public: void SetUp() override { openvdb::Metadata::clearRegistry(); } void TearDown() override { openvdb::Metadata::clearRegistry(); } }; TEST_F(TestMetadata, testMetadataRegistry) { using namespace openvdb; Int32Metadata::registerType(); StringMetadata strMetadata; EXPECT_TRUE(!Metadata::isRegisteredType(strMetadata.typeName())); StringMetadata::registerType(); EXPECT_TRUE(Metadata::isRegisteredType(strMetadata.typeName())); EXPECT_TRUE(Metadata::isRegisteredType(Int32Metadata::staticTypeName())); Metadata::Ptr stringMetadata = Metadata::createMetadata(strMetadata.typeName()); EXPECT_TRUE(stringMetadata->typeName() == strMetadata.typeName()); StringMetadata::unregisterType(); EXPECT_THROW(Metadata::createMetadata(strMetadata.typeName()), openvdb::LookupError); } TEST_F(TestMetadata, testMetadataAsBool) { using namespace openvdb; { FloatMetadata meta(0.0); EXPECT_TRUE(!meta.asBool()); meta.setValue(1.0); EXPECT_TRUE(meta.asBool()); meta.setValue(-1.0); EXPECT_TRUE(meta.asBool()); meta.setValue(999.0); EXPECT_TRUE(meta.asBool()); } { Int32Metadata meta(0); EXPECT_TRUE(!meta.asBool()); meta.setValue(1); EXPECT_TRUE(meta.asBool()); meta.setValue(-1); EXPECT_TRUE(meta.asBool()); meta.setValue(999); EXPECT_TRUE(meta.asBool()); } { StringMetadata meta(""); EXPECT_TRUE(!meta.asBool()); meta.setValue("abc"); EXPECT_TRUE(meta.asBool()); } { Vec3IMetadata meta(Vec3i(0)); EXPECT_TRUE(!meta.asBool()); meta.setValue(Vec3i(-1, 0, 1)); EXPECT_TRUE(meta.asBool()); } { Vec3SMetadata meta(Vec3s(0.0)); EXPECT_TRUE(!meta.asBool()); meta.setValue(Vec3s(-1.0, 0.0, 1.0)); EXPECT_TRUE(meta.asBool()); } { Vec4DMetadata meta(Vec4d(0.0)); EXPECT_TRUE(!meta.asBool()); meta.setValue(Vec4d(1.0)); EXPECT_TRUE(meta.asBool()); } } TEST_F(TestMetadata, testCustomMetadata) { using namespace openvdb; const Vec3i expected(1, 2, 3); std::ostringstream ostr(std::ios_base::binary); { Vec3IMetadata::registerType(); Vec3IMetadata meta(expected); // Write Vec3I metadata to a byte string. meta.write(ostr); } // Unregister Vec3I metadata. Metadata::clearRegistry(); { std::istringstream istr(ostr.str(), std::ios_base::binary); UnknownMetadata meta; // Verify that metadata of an unregistered type can be read successfully. EXPECT_NO_THROW(meta.read(istr)); // Verify that the metadata matches the original vector value. EXPECT_EQ(sizeof(Vec3i), size_t(meta.size())); EXPECT_TRUE(meta.value().size() == size_t(meta.size())); EXPECT_EQ(expected, *reinterpret_cast<const Vec3i*>(&meta.value()[0])); ostr.str(""); meta.write(ostr); // Verify that UnknownMetadata can be copied. auto metaPtr = meta.copy(); EXPECT_TRUE(metaPtr.get() != nullptr); EXPECT_TRUE(meta == *metaPtr); // Verify that typed metadata can be copied into UnknownMetadata. meta.copy(Vec3IMetadata(expected)); EXPECT_EQ(sizeof(expected), size_t(meta.size())); const auto* ptr = reinterpret_cast<const uint8_t*>(&expected); EXPECT_TRUE(UnknownMetadata::ByteVec(ptr, ptr + sizeof(expected)) == meta.value()); } Vec3IMetadata::registerType(); { std::istringstream istr(ostr.str(), std::ios_base::binary); Vec3IMetadata meta; meta.read(istr); EXPECT_EQ(expected, meta.value()); } }
4,024
C++
26.380952
91
0.619781
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestFindActiveValues.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <cstdio> // for remove() #include <fstream> #include <sstream> #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/util/CpuTimer.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/FindActiveValues.h> #include "util.h" // for unittest_util::makeSphere() #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); class TestFindActiveValues: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestFindActiveValues, testBasic) { const float background = 5.0f; openvdb::FloatTree tree(background); const openvdb::Coord min(-1,-2,30), max(20,30,55); const openvdb::CoordBBox bbox(min[0], min[1], min[2], max[0], max[1], max[2]); EXPECT_TRUE( openvdb::tools::noActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveVoxels(tree, bbox)); tree.setValue(min.offsetBy(-1), 1.0f); EXPECT_TRUE( openvdb::tools::noActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveVoxels(tree, bbox)); tree.setValue(max.offsetBy( 1), 1.0f); EXPECT_TRUE( openvdb::tools::noActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveVoxels(tree, bbox)); tree.setValue(min, 1.0f); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveVoxels(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveTiles(tree, bbox)); tree.setValue(max, 1.0f); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveVoxels(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveTiles(tree, bbox)); auto tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() == 0u ); tree.sparseFill(bbox, 1.0f); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveVoxels(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveTiles( tree, bbox)); tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() != 0u ); for (auto &t : tiles) { EXPECT_TRUE( t.level == 1); EXPECT_TRUE( t.bbox.volume() == openvdb::math::Pow3(uint64_t(8)) ); //std::cerr << "bbox = " << t.bbox << ", level = " << t.level << std::endl; } tree.denseFill(bbox, 1.0f); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveVoxels(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveTiles(tree, bbox)); tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() == 0u ); } TEST_F(TestFindActiveValues, testSphere1) { const openvdb::Vec3f center(0.5f, 0.5f, 0.5f); const float radius = 0.3f; const int dim = 100, half_width = 3; const float voxel_size = 1.0f/dim; openvdb::FloatGrid::Ptr grid = openvdb::FloatGrid::create(/*background=*/half_width*voxel_size); const openvdb::FloatTree& tree = grid->tree(); grid->setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/voxel_size)); unittest_util::makeSphere<openvdb::FloatGrid>( openvdb::Coord(dim), center, radius, *grid, unittest_util::SPHERE_SPARSE_NARROW_BAND); const int c = int(0.5f/voxel_size); const openvdb::CoordBBox a(openvdb::Coord(c), openvdb::Coord(c+ 8)); EXPECT_TRUE(!tree.isValueOn(openvdb::Coord(c))); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, a)); const openvdb::Coord d(c + int(radius/voxel_size), c, c); EXPECT_TRUE(tree.isValueOn(d)); const auto b = openvdb::CoordBBox::createCube(d, 4); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, b)); const openvdb::CoordBBox e(openvdb::Coord(0), openvdb::Coord(dim)); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, e)); EXPECT_TRUE(!openvdb::tools::anyActiveTiles(tree, e)); auto tiles = openvdb::tools::activeTiles(tree, e); EXPECT_TRUE( tiles.size() == 0u ); } TEST_F(TestFindActiveValues, testSphere2) { const openvdb::Vec3f center(0.0f); const float radius = 0.5f; const int dim = 400, halfWidth = 3; const float voxelSize = 2.0f/dim; auto grid = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>(radius, center, voxelSize, halfWidth); openvdb::FloatTree& tree = grid->tree(); {//test center const openvdb::CoordBBox bbox(openvdb::Coord(0), openvdb::Coord(8)); EXPECT_TRUE(!tree.isValueOn(openvdb::Coord(0))); //openvdb::util::CpuTimer timer("\ncenter"); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, bbox)); //timer.stop(); } {//test on sphere const openvdb::Coord d(int(radius/voxelSize), 0, 0); EXPECT_TRUE(tree.isValueOn(d)); const auto bbox = openvdb::CoordBBox::createCube(d, 4); //openvdb::util::CpuTimer timer("\non sphere"); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); //timer.stop(); } {//test full domain const openvdb::CoordBBox bbox(openvdb::Coord(-4000), openvdb::Coord(4000)); //openvdb::util::CpuTimer timer("\nfull domain"); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); //timer.stop(); openvdb::tools::FindActiveValues<openvdb::FloatTree> op(tree); EXPECT_TRUE(op.count(bbox) == tree.activeVoxelCount()); } {// find largest inscribed cube in index space containing NO active values openvdb::tools::FindActiveValues<openvdb::FloatTree> op(tree); auto bbox = openvdb::CoordBBox::createCube(openvdb::Coord(0), 1); //openvdb::util::CpuTimer timer("\nInscribed cube (class)"); int count = 0; while(op.noActiveValues(bbox)) { ++count; bbox.expand(1); } //const double t = timer.stop(); //std::cerr << "Inscribed bbox = " << bbox << std::endl; const int n = int(openvdb::math::Sqrt(openvdb::math::Pow2(radius-halfWidth*voxelSize)/3.0f)/voxelSize) + 1; //std::cerr << "n=" << n << std::endl; EXPECT_TRUE( bbox.max() == openvdb::Coord( n)); EXPECT_TRUE( bbox.min() == openvdb::Coord(-n)); //openvdb::util::printTime(std::cerr, t/count, "time per lookup ", "\n", true, 4, 3); } {// find largest inscribed cube in index space containing NO active values auto bbox = openvdb::CoordBBox::createCube(openvdb::Coord(0), 1); //openvdb::util::CpuTimer timer("\nInscribed cube (func)"); int count = 0; while(!openvdb::tools::anyActiveValues(tree, bbox)) { bbox.expand(1); ++count; } //const double t = timer.stop(); //std::cerr << "Inscribed bbox = " << bbox << std::endl; const int n = int(openvdb::math::Sqrt(openvdb::math::Pow2(radius-halfWidth*voxelSize)/3.0f)/voxelSize) + 1; //std::cerr << "n=" << n << std::endl; //openvdb::util::printTime(std::cerr, t/count, "time per lookup ", "\n", true, 4, 3); EXPECT_TRUE( bbox.max() == openvdb::Coord( n)); EXPECT_TRUE( bbox.min() == openvdb::Coord(-n)); } } TEST_F(TestFindActiveValues, testSparseBox) { {//test active tiles in a sparsely filled box const int half_dim = 256; const openvdb::CoordBBox bbox(openvdb::Coord(-half_dim), openvdb::Coord(half_dim-1)); openvdb::FloatTree tree; EXPECT_TRUE(tree.activeTileCount() == 0); EXPECT_TRUE(tree.getValueDepth(openvdb::Coord(0)) == -1);//background value openvdb::tools::FindActiveValues<openvdb::FloatTree> op(tree); tree.sparseFill(bbox, 1.0f, true); op.update(tree);//tree was modified so op needs to be updated EXPECT_TRUE(tree.activeTileCount() > 0); EXPECT_TRUE(tree.getValueDepth(openvdb::Coord(0)) == 1);//upper internal tile value for (int i=1; i<half_dim; ++i) { EXPECT_TRUE( op.anyActiveValues(openvdb::CoordBBox::createCube(openvdb::Coord(-half_dim), i))); EXPECT_TRUE(!op.anyActiveVoxels(openvdb::CoordBBox::createCube(openvdb::Coord(-half_dim), i))); } EXPECT_TRUE(op.count(bbox) == bbox.volume()); auto bbox2 = openvdb::CoordBBox::createCube(openvdb::Coord(-half_dim), 1); //double t = 0.0; //openvdb::util::CpuTimer timer; for (bool test = true; test; ) { //timer.restart(); test = op.anyActiveValues(bbox2); //t = std::max(t, timer.restart()); if (test) bbox2.translate(openvdb::Coord(1)); } //std::cerr << "bbox = " << bbox2 << std::endl; //openvdb::util::printTime(std::cout, t, "The slowest sparse test ", "\n", true, 4, 3); EXPECT_TRUE(bbox2 == openvdb::CoordBBox::createCube(openvdb::Coord(half_dim), 1)); EXPECT_TRUE( openvdb::tools::anyActiveTiles(tree, bbox) ); auto tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() == openvdb::math::Pow3(size_t(4)) ); // {-256, -129} -> {-128, 0} -> {0, 127} -> {128, 255} //std::cerr << "bbox " << bbox << " overlaps with " << tiles.size() << " active tiles " << std::endl; openvdb::CoordBBox tmp; for (auto &t : tiles) { EXPECT_TRUE( t.state ); EXPECT_TRUE( t.level == 2);// tiles at level 1 are 8^3, at level 2 they are 128^3, and at level 3 they are 4096^3 EXPECT_TRUE( t.value == 1.0f); EXPECT_TRUE( t.bbox.volume() == openvdb::math::Pow3(openvdb::Index64(128)) ); tmp.expand( t.bbox ); //std::cerr << t.bbox << std::endl; } //std::cerr << tmp << std::endl; EXPECT_TRUE( tmp == bbox );// uniion of all the active tiles should equal the bbox of the sparseFill operation! } }// testSparseBox TEST_F(TestFindActiveValues, testDenseBox) { {//test active voxels in a densely filled box const int half_dim = 256; const openvdb::CoordBBox bbox(openvdb::Coord(-half_dim), openvdb::Coord(half_dim)); openvdb::FloatTree tree; EXPECT_TRUE(tree.activeTileCount() == 0); EXPECT_TRUE(tree.getValueDepth(openvdb::Coord(0)) == -1);//background value tree.denseFill(bbox, 1.0f, true); EXPECT_TRUE(tree.activeTileCount() == 0); openvdb::tools::FindActiveValues<openvdb::FloatTree> op(tree); EXPECT_TRUE(tree.getValueDepth(openvdb::Coord(0)) == 3);// leaf value for (int i=1; i<half_dim; ++i) { EXPECT_TRUE(op.anyActiveValues(openvdb::CoordBBox::createCube(openvdb::Coord(0), i))); EXPECT_TRUE(op.anyActiveVoxels(openvdb::CoordBBox::createCube(openvdb::Coord(0), i))); } EXPECT_TRUE(op.count(bbox) == bbox.volume()); auto bbox2 = openvdb::CoordBBox::createCube(openvdb::Coord(-half_dim), 1); //double t = 0.0; //openvdb::util::CpuTimer timer; for (bool test = true; test; ) { //timer.restart(); test = op.anyActiveValues(bbox2); //t = std::max(t, timer.restart()); if (test) bbox2.translate(openvdb::Coord(1)); } //std::cerr << "bbox = " << bbox2 << std::endl; //openvdb::util::printTime(std::cout, t, "The slowest dense test ", "\n", true, 4, 3); EXPECT_TRUE(bbox2 == openvdb::CoordBBox::createCube(openvdb::Coord(half_dim + 1), 1)); auto tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() == 0u ); } }// testDenseBox TEST_F(TestFindActiveValues, testBenchmarks) { {//benchmark test against active tiles in a sparsely filled box using namespace openvdb; const int half_dim = 512, bbox_size = 6; const CoordBBox bbox(Coord(-half_dim), Coord(half_dim)); FloatTree tree; tree.sparseFill(bbox, 1.0f, true); tools::FindActiveValues<FloatTree> op(tree); //double t = 0.0; //util::CpuTimer timer; for (auto b = CoordBBox::createCube(Coord(-half_dim), bbox_size); true; b.translate(Coord(1))) { //timer.restart(); bool test = op.anyActiveValues(b); //t = std::max(t, timer.restart()); if (!test) break; } //std::cout << "\n*The slowest sparse test " << t << " milliseconds\n"; EXPECT_TRUE(op.count(bbox) == bbox.volume()); } {//benchmark test against active voxels in a densely filled box using namespace openvdb; const int half_dim = 256, bbox_size = 1; const CoordBBox bbox(Coord(-half_dim), Coord(half_dim)); FloatTree tree; tree.denseFill(bbox, 1.0f, true); tools::FindActiveValues<FloatTree> op(tree); //double t = 0.0; //openvdb::util::CpuTimer timer; for (auto b = CoordBBox::createCube(Coord(-half_dim), bbox_size); true; b.translate(Coord(1))) { //timer.restart(); bool test = op.anyActiveValues(b); //t = std::max(t, timer.restart()); if (!test) break; } //std::cout << "*The slowest dense test " << t << " milliseconds\n"; EXPECT_TRUE(op.count(bbox) == bbox.volume()); } {//benchmark test against active voxels in a densely filled box using namespace openvdb; FloatTree tree; tree.denseFill(CoordBBox::createCube(Coord(0), 256), 1.0f, true); tools::FindActiveValues<FloatTree> op(tree); //openvdb::util::CpuTimer timer("new test"); EXPECT_TRUE(op.noActiveValues(CoordBBox::createCube(Coord(256), 1))); //timer.stop(); } }// testBenchmarks
13,869
C++
42.208723
125
0.62016
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMeshToVolume.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <vector> #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/Exceptions.h> #include <openvdb/tools/MeshToVolume.h> #include <openvdb/util/Util.h> class TestMeshToVolume: public ::testing::Test { }; //////////////////////////////////////// TEST_F(TestMeshToVolume, testUtils) { /// Test nearestCoord openvdb::Vec3d xyz(0.7, 2.2, -2.7); openvdb::Coord ijk = openvdb::util::nearestCoord(xyz); EXPECT_TRUE(ijk[0] == 0 && ijk[1] == 2 && ijk[2] == -3); xyz = openvdb::Vec3d(-22.1, 4.6, 202.34); ijk = openvdb::util::nearestCoord(xyz); EXPECT_TRUE(ijk[0] == -23 && ijk[1] == 4 && ijk[2] == 202); /// Test the coordinate offset table for neghbouring voxels openvdb::Coord sum(0, 0, 0); unsigned int pX = 0, pY = 0, pZ = 0, mX = 0, mY = 0, mZ = 0; for (unsigned int i = 0; i < 26; ++i) { ijk = openvdb::util::COORD_OFFSETS[i]; sum += ijk; if (ijk[0] == 1) ++pX; else if (ijk[0] == -1) ++mX; if (ijk[1] == 1) ++pY; else if (ijk[1] == -1) ++mY; if (ijk[2] == 1) ++pZ; else if (ijk[2] == -1) ++mZ; } EXPECT_TRUE(sum == openvdb::Coord(0, 0, 0)); EXPECT_TRUE( pX == 9); EXPECT_TRUE( pY == 9); EXPECT_TRUE( pZ == 9); EXPECT_TRUE( mX == 9); EXPECT_TRUE( mY == 9); EXPECT_TRUE( mZ == 9); } TEST_F(TestMeshToVolume, testConversion) { using namespace openvdb; std::vector<Vec3s> points; std::vector<Vec4I> quads; // cube vertices points.push_back(Vec3s(2, 2, 2)); // 0 6--------7 points.push_back(Vec3s(5, 2, 2)); // 1 /| /| points.push_back(Vec3s(2, 5, 2)); // 2 2--------3 | points.push_back(Vec3s(5, 5, 2)); // 3 | | | | points.push_back(Vec3s(2, 2, 5)); // 4 | 4------|-5 points.push_back(Vec3s(5, 2, 5)); // 5 |/ |/ points.push_back(Vec3s(2, 5, 5)); // 6 0--------1 points.push_back(Vec3s(5, 5, 5)); // 7 // cube faces quads.push_back(Vec4I(0, 1, 3, 2)); // front quads.push_back(Vec4I(5, 4, 6, 7)); // back quads.push_back(Vec4I(0, 2, 6, 4)); // left quads.push_back(Vec4I(1, 5, 7, 3)); // right quads.push_back(Vec4I(2, 3, 7, 6)); // top quads.push_back(Vec4I(0, 4, 5, 1)); // bottom math::Transform::Ptr xform = math::Transform::createLinearTransform(); tools::QuadAndTriangleDataAdapter<Vec3s, Vec4I> mesh(points, quads); FloatGrid::Ptr grid = tools::meshToVolume<FloatGrid>(mesh, *xform); EXPECT_TRUE(grid.get() != NULL); EXPECT_EQ(int(GRID_LEVEL_SET), int(grid->getGridClass())); EXPECT_EQ(1, int(grid->baseTree().leafCount())); grid = tools::meshToLevelSet<FloatGrid>(*xform, points, quads); EXPECT_TRUE(grid.get() != NULL); EXPECT_EQ(int(GRID_LEVEL_SET), int(grid->getGridClass())); EXPECT_EQ(1, int(grid->baseTree().leafCount())); } TEST_F(TestMeshToVolume, testCreateLevelSetBox) { typedef openvdb::FloatGrid FloatGrid; typedef openvdb::Vec3s Vec3s; typedef openvdb::math::BBox<Vec3s> BBoxs; typedef openvdb::math::Transform Transform; BBoxs bbox(Vec3s(0.0, 0.0, 0.0), Vec3s(1.0, 1.0, 1.0)); Transform::Ptr transform = Transform::createLinearTransform(0.1); FloatGrid::Ptr grid = openvdb::tools::createLevelSetBox<FloatGrid>(bbox, *transform); double gridBackground = grid->background(); double expectedBackground = transform->voxelSize().x() * double(openvdb::LEVEL_SET_HALF_WIDTH); EXPECT_NEAR(expectedBackground, gridBackground, 1e-6); EXPECT_TRUE(grid->tree().leafCount() > 0); // test inside coord value openvdb::Coord ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(0.5, 0.5, 0.5)); EXPECT_TRUE(grid->tree().getValue(ijk) < 0.0f); // test outside coord value ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(1.5, 1.5, 1.5)); EXPECT_TRUE(grid->tree().getValue(ijk) > 0.0f); }
4,063
C++
29.328358
99
0.58159