file_path
stringlengths
21
202
content
stringlengths
19
1.02M
size
int64
19
1.02M
lang
stringclasses
8 values
avg_line_length
float64
5.88
100
max_line_length
int64
12
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Composite.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file Composite.h /// /// @brief Functions to efficiently perform various compositing operations on grids /// /// @authors Peter Cucka, Mihai Alden, Ken Museth #ifndef OPENVDB_TOOLS_COMPOSITE_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_COMPOSITE_HAS_BEEN_INCLUDED #include <openvdb/Platform.h> #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/Grid.h> #include <openvdb/math/Math.h> // for isExactlyEqual() #include "Merge.h" #include "ValueTransformer.h" // for transformValues() #include "Prune.h"// for prune #include "SignedFloodFill.h" // for signedFloodFill() #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/task_group.h> #include <tbb/task_scheduler_init.h> #include <type_traits> #include <functional> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Given two level set grids, replace the A grid with the union of A and B. /// @throw ValueError if the background value of either grid is not greater than zero. /// @note This operation always leaves the B grid empty. template<typename GridOrTreeT> inline void csgUnion(GridOrTreeT& a, GridOrTreeT& b, bool prune = true); /// @brief Given two level set grids, replace the A grid with the intersection of A and B. /// @throw ValueError if the background value of either grid is not greater than zero. /// @note This operation always leaves the B grid empty. template<typename GridOrTreeT> inline void csgIntersection(GridOrTreeT& a, GridOrTreeT& b, bool prune = true); /// @brief Given two level set grids, replace the A grid with the difference A / B. /// @throw ValueError if the background value of either grid is not greater than zero. /// @note This operation always leaves the B grid empty. template<typename GridOrTreeT> inline void csgDifference(GridOrTreeT& a, GridOrTreeT& b, bool prune = true); /// @brief Threaded CSG union operation that produces a new grid or tree from /// immutable inputs. /// @return The CSG union of the @a and @b level set inputs. template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgUnionCopy(const GridOrTreeT& a, const GridOrTreeT& b); /// @brief Threaded CSG intersection operation that produces a new grid or tree from /// immutable inputs. /// @return The CSG intersection of the @a and @b level set inputs. template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgIntersectionCopy(const GridOrTreeT& a, const GridOrTreeT& b); /// @brief Threaded CSG difference operation that produces a new grid or tree from /// immutable inputs. /// @return The CSG difference of the @a and @b level set inputs. template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgDifferenceCopy(const GridOrTreeT& a, const GridOrTreeT& b); /// @brief Given grids A and B, compute max(a, b) per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compMax(GridOrTreeT& a, GridOrTreeT& b); /// @brief Given grids A and B, compute min(a, b) per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compMin(GridOrTreeT& a, GridOrTreeT& b); /// @brief Given grids A and B, compute a + b per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compSum(GridOrTreeT& a, GridOrTreeT& b); /// @brief Given grids A and B, compute a * b per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compMul(GridOrTreeT& a, GridOrTreeT& b); /// @brief Given grids A and B, compute a / b per voxel (using sparse traversal). /// Store the result in the A grid and leave the B grid empty. template<typename GridOrTreeT> inline void compDiv(GridOrTreeT& a, GridOrTreeT& b); /// Copy the active voxels of B into A. template<typename GridOrTreeT> inline void compReplace(GridOrTreeT& a, const GridOrTreeT& b); //////////////////////////////////////// namespace composite { // composite::min() and composite::max() for non-vector types compare with operator<(). template<typename T> inline const typename std::enable_if<!VecTraits<T>::IsVec, T>::type& // = T if T is not a vector type min(const T& a, const T& b) { return std::min(a, b); } template<typename T> inline const typename std::enable_if<!VecTraits<T>::IsVec, T>::type& max(const T& a, const T& b) { return std::max(a, b); } // composite::min() and composite::max() for OpenVDB vector types compare by magnitude. template<typename T> inline const typename std::enable_if<VecTraits<T>::IsVec, T>::type& // = T if T is a vector type min(const T& a, const T& b) { const typename T::ValueType aMag = a.lengthSqr(), bMag = b.lengthSqr(); return (aMag < bMag ? a : (bMag < aMag ? b : std::min(a, b))); } template<typename T> inline const typename std::enable_if<VecTraits<T>::IsVec, T>::type& max(const T& a, const T& b) { const typename T::ValueType aMag = a.lengthSqr(), bMag = b.lengthSqr(); return (aMag < bMag ? b : (bMag < aMag ? a : std::max(a, b))); } template<typename T> inline typename std::enable_if<!std::is_integral<T>::value, T>::type // = T if T is not an integer type divide(const T& a, const T& b) { return a / b; } template<typename T> inline typename std::enable_if<std::is_integral<T>::value, T>::type // = T if T is an integer type divide(const T& a, const T& b) { const T zero(0); if (b != zero) return a / b; if (a == zero) return 0; return (a > 0 ? std::numeric_limits<T>::max() : -std::numeric_limits<T>::max()); } // If b is true, return a / 1 = a. // If b is false and a is true, return 1 / 0 = inf = MAX_BOOL = 1 = a. // If b is false and a is false, return 0 / 0 = NaN = 0 = a. inline bool divide(bool a, bool /*b*/) { return a; } enum CSGOperation { CSG_UNION, CSG_INTERSECTION, CSG_DIFFERENCE }; template<typename TreeType, CSGOperation Operation> struct BuildPrimarySegment { using ValueType = typename TreeType::ValueType; using TreePtrType = typename TreeType::Ptr; using LeafNodeType = typename TreeType::LeafNodeType; using NodeMaskType = typename LeafNodeType::NodeMaskType; using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; BuildPrimarySegment(const TreeType& lhs, const TreeType& rhs) : mSegment(new TreeType(lhs.background())) , mLhsTree(&lhs) , mRhsTree(&rhs) { } void operator()() const { std::vector<const LeafNodeType*> leafNodes; { std::vector<const InternalNodeType*> internalNodes; mLhsTree->getNodes(internalNodes); ProcessInternalNodes op(internalNodes, *mRhsTree, *mSegment, leafNodes); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, internalNodes.size()), op); } ProcessLeafNodes op(leafNodes, *mRhsTree, *mSegment); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, leafNodes.size()), op); } TreePtrType& segment() { return mSegment; } private: struct ProcessInternalNodes { ProcessInternalNodes(std::vector<const InternalNodeType*>& lhsNodes, const TreeType& rhsTree, TreeType& outputTree, std::vector<const LeafNodeType*>& outputLeafNodes) : mLhsNodes(lhsNodes.empty() ? nullptr : &lhsNodes.front()) , mRhsTree(&rhsTree) , mLocalTree(mRhsTree->background()) , mOutputTree(&outputTree) , mLocalLeafNodes() , mOutputLeafNodes(&outputLeafNodes) { } ProcessInternalNodes(ProcessInternalNodes& other, tbb::split) : mLhsNodes(other.mLhsNodes) , mRhsTree(other.mRhsTree) , mLocalTree(mRhsTree->background()) , mOutputTree(&mLocalTree) , mLocalLeafNodes() , mOutputLeafNodes(&mLocalLeafNodes) { } void join(ProcessInternalNodes& other) { mOutputTree->merge(*other.mOutputTree); mOutputLeafNodes->insert(mOutputLeafNodes->end(), other.mOutputLeafNodes->begin(), other.mOutputLeafNodes->end()); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const TreeType> rhsAcc(*mRhsTree); tree::ValueAccessor<TreeType> outputAcc(*mOutputTree); std::vector<const LeafNodeType*> tmpLeafNodes; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const InternalNodeType& lhsNode = *mLhsNodes[n]; const Coord& ijk = lhsNode.origin(); const InternalNodeType * rhsNode = rhsAcc.template probeConstNode<InternalNodeType>(ijk); if (rhsNode) { lhsNode.getNodes(*mOutputLeafNodes); } else { if (Operation == CSG_INTERSECTION) { if (rhsAcc.getValue(ijk) < ValueType(0.0)) { tmpLeafNodes.clear(); lhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { outputAcc.addLeaf(new LeafNodeType(*tmpLeafNodes[i])); } } } else { // Union & Difference if (!(rhsAcc.getValue(ijk) < ValueType(0.0))) { tmpLeafNodes.clear(); lhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { outputAcc.addLeaf(new LeafNodeType(*tmpLeafNodes[i])); } } } } } // end range loop } InternalNodeType const * const * const mLhsNodes; TreeType const * const mRhsTree; TreeType mLocalTree; TreeType * const mOutputTree; std::vector<const LeafNodeType*> mLocalLeafNodes; std::vector<const LeafNodeType*> * const mOutputLeafNodes; }; // struct ProcessInternalNodes struct ProcessLeafNodes { ProcessLeafNodes(std::vector<const LeafNodeType*>& lhsNodes, const TreeType& rhsTree, TreeType& output) : mLhsNodes(lhsNodes.empty() ? nullptr : &lhsNodes.front()) , mRhsTree(&rhsTree) , mLocalTree(mRhsTree->background()) , mOutputTree(&output) { } ProcessLeafNodes(ProcessLeafNodes& other, tbb::split) : mLhsNodes(other.mLhsNodes) , mRhsTree(other.mRhsTree) , mLocalTree(mRhsTree->background()) , mOutputTree(&mLocalTree) { } void join(ProcessLeafNodes& rhs) { mOutputTree->merge(*rhs.mOutputTree); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const TreeType> rhsAcc(*mRhsTree); tree::ValueAccessor<TreeType> outputAcc(*mOutputTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const LeafNodeType& lhsNode = *mLhsNodes[n]; const Coord& ijk = lhsNode.origin(); const LeafNodeType* rhsNodePt = rhsAcc.probeConstLeaf(ijk); if (rhsNodePt) { // combine overlapping nodes LeafNodeType* outputNode = outputAcc.touchLeaf(ijk); ValueType * outputData = outputNode->buffer().data(); NodeMaskType& outputMask = outputNode->getValueMask(); const ValueType * lhsData = lhsNode.buffer().data(); const NodeMaskType& lhsMask = lhsNode.getValueMask(); const ValueType * rhsData = rhsNodePt->buffer().data(); const NodeMaskType& rhsMask = rhsNodePt->getValueMask(); if (Operation == CSG_INTERSECTION) { for (Index pos = 0; pos < LeafNodeType::SIZE; ++pos) { const bool fromRhs = lhsData[pos] < rhsData[pos]; outputData[pos] = fromRhs ? rhsData[pos] : lhsData[pos]; outputMask.set(pos, fromRhs ? rhsMask.isOn(pos) : lhsMask.isOn(pos)); } } else if (Operation == CSG_DIFFERENCE){ for (Index pos = 0; pos < LeafNodeType::SIZE; ++pos) { const ValueType rhsVal = math::negative(rhsData[pos]); const bool fromRhs = lhsData[pos] < rhsVal; outputData[pos] = fromRhs ? rhsVal : lhsData[pos]; outputMask.set(pos, fromRhs ? rhsMask.isOn(pos) : lhsMask.isOn(pos)); } } else { // Union for (Index pos = 0; pos < LeafNodeType::SIZE; ++pos) { const bool fromRhs = lhsData[pos] > rhsData[pos]; outputData[pos] = fromRhs ? rhsData[pos] : lhsData[pos]; outputMask.set(pos, fromRhs ? rhsMask.isOn(pos) : lhsMask.isOn(pos)); } } } else { if (Operation == CSG_INTERSECTION) { if (rhsAcc.getValue(ijk) < ValueType(0.0)) { outputAcc.addLeaf(new LeafNodeType(lhsNode)); } } else { // Union & Difference if (!(rhsAcc.getValue(ijk) < ValueType(0.0))) { outputAcc.addLeaf(new LeafNodeType(lhsNode)); } } } } // end range loop } LeafNodeType const * const * const mLhsNodes; TreeType const * const mRhsTree; TreeType mLocalTree; TreeType * const mOutputTree; }; // struct ProcessLeafNodes TreePtrType mSegment; TreeType const * const mLhsTree; TreeType const * const mRhsTree; }; // struct BuildPrimarySegment template<typename TreeType, CSGOperation Operation> struct BuildSecondarySegment { using ValueType = typename TreeType::ValueType; using TreePtrType = typename TreeType::Ptr; using LeafNodeType = typename TreeType::LeafNodeType; using NodeMaskType = typename LeafNodeType::NodeMaskType; using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; BuildSecondarySegment(const TreeType& lhs, const TreeType& rhs) : mSegment(new TreeType(lhs.background())) , mLhsTree(&lhs) , mRhsTree(&rhs) { } void operator()() const { std::vector<const LeafNodeType*> leafNodes; { std::vector<const InternalNodeType*> internalNodes; mRhsTree->getNodes(internalNodes); ProcessInternalNodes op(internalNodes, *mLhsTree, *mSegment, leafNodes); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, internalNodes.size()), op); } ProcessLeafNodes op(leafNodes, *mLhsTree, *mSegment); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, leafNodes.size()), op); } TreePtrType& segment() { return mSegment; } private: struct ProcessInternalNodes { ProcessInternalNodes(std::vector<const InternalNodeType*>& rhsNodes, const TreeType& lhsTree, TreeType& outputTree, std::vector<const LeafNodeType*>& outputLeafNodes) : mRhsNodes(rhsNodes.empty() ? nullptr : &rhsNodes.front()) , mLhsTree(&lhsTree) , mLocalTree(mLhsTree->background()) , mOutputTree(&outputTree) , mLocalLeafNodes() , mOutputLeafNodes(&outputLeafNodes) { } ProcessInternalNodes(ProcessInternalNodes& other, tbb::split) : mRhsNodes(other.mRhsNodes) , mLhsTree(other.mLhsTree) , mLocalTree(mLhsTree->background()) , mOutputTree(&mLocalTree) , mLocalLeafNodes() , mOutputLeafNodes(&mLocalLeafNodes) { } void join(ProcessInternalNodes& other) { mOutputTree->merge(*other.mOutputTree); mOutputLeafNodes->insert(mOutputLeafNodes->end(), other.mOutputLeafNodes->begin(), other.mOutputLeafNodes->end()); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const TreeType> lhsAcc(*mLhsTree); tree::ValueAccessor<TreeType> outputAcc(*mOutputTree); std::vector<const LeafNodeType*> tmpLeafNodes; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const InternalNodeType& rhsNode = *mRhsNodes[n]; const Coord& ijk = rhsNode.origin(); const InternalNodeType * lhsNode = lhsAcc.template probeConstNode<InternalNodeType>(ijk); if (lhsNode) { rhsNode.getNodes(*mOutputLeafNodes); } else { if (Operation == CSG_INTERSECTION) { if (lhsAcc.getValue(ijk) < ValueType(0.0)) { tmpLeafNodes.clear(); rhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { outputAcc.addLeaf(new LeafNodeType(*tmpLeafNodes[i])); } } } else if (Operation == CSG_DIFFERENCE) { if (lhsAcc.getValue(ijk) < ValueType(0.0)) { tmpLeafNodes.clear(); rhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { LeafNodeType* outputNode = new LeafNodeType(*tmpLeafNodes[i]); outputNode->negate(); outputAcc.addLeaf(outputNode); } } } else { // Union if (!(lhsAcc.getValue(ijk) < ValueType(0.0))) { tmpLeafNodes.clear(); rhsNode.getNodes(tmpLeafNodes); for (size_t i = 0, I = tmpLeafNodes.size(); i < I; ++i) { outputAcc.addLeaf(new LeafNodeType(*tmpLeafNodes[i])); } } } } } // end range loop } InternalNodeType const * const * const mRhsNodes; TreeType const * const mLhsTree; TreeType mLocalTree; TreeType * const mOutputTree; std::vector<const LeafNodeType*> mLocalLeafNodes; std::vector<const LeafNodeType*> * const mOutputLeafNodes; }; // struct ProcessInternalNodes struct ProcessLeafNodes { ProcessLeafNodes(std::vector<const LeafNodeType*>& rhsNodes, const TreeType& lhsTree, TreeType& output) : mRhsNodes(rhsNodes.empty() ? nullptr : &rhsNodes.front()) , mLhsTree(&lhsTree) , mLocalTree(mLhsTree->background()) , mOutputTree(&output) { } ProcessLeafNodes(ProcessLeafNodes& rhs, tbb::split) : mRhsNodes(rhs.mRhsNodes) , mLhsTree(rhs.mLhsTree) , mLocalTree(mLhsTree->background()) , mOutputTree(&mLocalTree) { } void join(ProcessLeafNodes& rhs) { mOutputTree->merge(*rhs.mOutputTree); } void operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<const TreeType> lhsAcc(*mLhsTree); tree::ValueAccessor<TreeType> outputAcc(*mOutputTree); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const LeafNodeType& rhsNode = *mRhsNodes[n]; const Coord& ijk = rhsNode.origin(); const LeafNodeType* lhsNode = lhsAcc.probeConstLeaf(ijk); if (!lhsNode) { if (Operation == CSG_INTERSECTION) { if (lhsAcc.getValue(ijk) < ValueType(0.0)) { outputAcc.addLeaf(new LeafNodeType(rhsNode)); } } else if (Operation == CSG_DIFFERENCE) { if (lhsAcc.getValue(ijk) < ValueType(0.0)) { LeafNodeType* outputNode = new LeafNodeType(rhsNode); outputNode->negate(); outputAcc.addLeaf(outputNode); } } else { // Union if (!(lhsAcc.getValue(ijk) < ValueType(0.0))) { outputAcc.addLeaf(new LeafNodeType(rhsNode)); } } } } // end range loop } LeafNodeType const * const * const mRhsNodes; TreeType const * const mLhsTree; TreeType mLocalTree; TreeType * const mOutputTree; }; // struct ProcessLeafNodes TreePtrType mSegment; TreeType const * const mLhsTree; TreeType const * const mRhsTree; }; // struct BuildSecondarySegment template<CSGOperation Operation, typename TreeType> inline typename TreeType::Ptr doCSGCopy(const TreeType& lhs, const TreeType& rhs) { BuildPrimarySegment<TreeType, Operation> primary(lhs, rhs); BuildSecondarySegment<TreeType, Operation> secondary(lhs, rhs); // Exploiting nested parallelism tbb::task_group tasks; tasks.run(primary); tasks.run(secondary); tasks.wait(); primary.segment()->merge(*secondary.segment()); // The leafnode (level = 0) sign is set in the segment construction. tools::signedFloodFill(*primary.segment(), /*threaded=*/true, /*grainSize=*/1, /*minLevel=*/1); return primary.segment(); } //////////////////////////////////////// template<typename TreeType> struct GridOrTreeConstructor { using TreeTypePtr = typename TreeType::Ptr; static TreeTypePtr construct(const TreeType&, TreeTypePtr& tree) { return tree; } }; template<typename TreeType> struct GridOrTreeConstructor<Grid<TreeType> > { using GridType = Grid<TreeType>; using GridTypePtr = typename Grid<TreeType>::Ptr; using TreeTypePtr = typename TreeType::Ptr; static GridTypePtr construct(const GridType& grid, TreeTypePtr& tree) { GridTypePtr maskGrid(GridType::create(tree)); maskGrid->setTransform(grid.transform().copy()); maskGrid->insertMeta(grid); return maskGrid; } }; //////////////////////////////////////// /// @cond COMPOSITE_INTERNAL /// List of pairs of leaf node pointers template <typename LeafT> using LeafPairList = std::vector<std::pair<LeafT*, LeafT*>>; /// @endcond /// @cond COMPOSITE_INTERNAL /// Transfers leaf nodes from a source tree into a /// desitnation tree, unless it already exists in the destination tree /// in which case pointers to both leaf nodes are added to a list for /// subsequent compositing operations. template <typename TreeT> inline void transferLeafNodes(TreeT &srcTree, TreeT &dstTree, LeafPairList<typename TreeT::LeafNodeType> &overlapping) { using LeafT = typename TreeT::LeafNodeType; tree::ValueAccessor<TreeT> acc(dstTree);//destination std::vector<LeafT*> srcLeafNodes; srcLeafNodes.reserve(srcTree.leafCount()); srcTree.stealNodes(srcLeafNodes); srcTree.clear(); for (LeafT *srcLeaf : srcLeafNodes) { LeafT *dstLeaf = acc.probeLeaf(srcLeaf->origin()); if (dstLeaf) { overlapping.emplace_back(dstLeaf, srcLeaf);//dst, src } else { acc.addLeaf(srcLeaf); } } } /// @endcond /// @cond COMPOSITE_INTERNAL /// Template specailization of compActiveLeafVoxels template <typename TreeT, typename OpT> inline typename std::enable_if< !std::is_same<typename TreeT::ValueType, bool>::value && !std::is_same<typename TreeT::BuildType, ValueMask>::value && std::is_same<typename TreeT::LeafNodeType::Buffer::ValueType, typename TreeT::LeafNodeType::Buffer::StorageType>::value>::type doCompActiveLeafVoxels(TreeT &srcTree, TreeT &dstTree, OpT op) { using LeafT = typename TreeT::LeafNodeType; LeafPairList<LeafT> overlapping;//dst, src transferLeafNodes(srcTree, dstTree, overlapping); using RangeT = tbb::blocked_range<size_t>; tbb::parallel_for(RangeT(0, overlapping.size()), [op, &overlapping](const RangeT& r) { for (auto i = r.begin(); i != r.end(); ++i) { LeafT *dstLeaf = overlapping[i].first, *srcLeaf = overlapping[i].second; dstLeaf->getValueMask() |= srcLeaf->getValueMask(); auto *ptr = dstLeaf->buffer().data(); for (auto v = srcLeaf->cbeginValueOn(); v; ++v) op(ptr[v.pos()], *v); delete srcLeaf; } }); } /// @endcond /// @cond COMPOSITE_INTERNAL /// Template specailization of compActiveLeafVoxels template <typename TreeT, typename OpT> inline typename std::enable_if< std::is_same<typename TreeT::BuildType, ValueMask>::value && std::is_same<typename TreeT::ValueType, bool>::value>::type doCompActiveLeafVoxels(TreeT &srcTree, TreeT &dstTree, OpT) { using LeafT = typename TreeT::LeafNodeType; LeafPairList<LeafT> overlapping;//dst, src transferLeafNodes(srcTree, dstTree, overlapping); using RangeT = tbb::blocked_range<size_t>; tbb::parallel_for(RangeT(0, overlapping.size()), [&overlapping](const RangeT& r) { for (auto i = r.begin(); i != r.end(); ++i) { overlapping[i].first->getValueMask() |= overlapping[i].second->getValueMask(); delete overlapping[i].second; } }); } /// @cond COMPOSITE_INTERNAL /// Template specailization of compActiveLeafVoxels template <typename TreeT, typename OpT> inline typename std::enable_if< std::is_same<typename TreeT::ValueType, bool>::value && !std::is_same<typename TreeT::BuildType, ValueMask>::value>::type doCompActiveLeafVoxels(TreeT &srcTree, TreeT &dstTree, OpT op) { using LeafT = typename TreeT::LeafNodeType; LeafPairList<LeafT> overlapping;//dst, src transferLeafNodes(srcTree, dstTree, overlapping); using RangeT = tbb::blocked_range<size_t>; using WordT = typename LeafT::Buffer::WordType; tbb::parallel_for(RangeT(0, overlapping.size()), [op, &overlapping](const RangeT& r) { for (auto i = r.begin(); i != r.end(); ++i) { LeafT *dstLeaf = overlapping[i].first, *srcLeaf = overlapping[i].second; WordT *w1 = dstLeaf->buffer().data(); const WordT *w2 = srcLeaf->buffer().data(); const WordT *w3 = &(srcLeaf->getValueMask().template getWord<WordT>(0)); for (Index32 n = LeafT::Buffer::WORD_COUNT; n--; ++w1) { WordT tmp = *w1, state = *w3++; op (tmp, *w2++); *w1 = (state & tmp) | (~state & *w1);//inactive values are unchanged } dstLeaf->getValueMask() |= srcLeaf->getValueMask(); delete srcLeaf; } }); } /// @endcond /// @cond COMPOSITE_INTERNAL /// Default functor for compActiveLeafVoxels template <typename TreeT> struct CopyOp { using ValueT = typename TreeT::ValueType; CopyOp() = default; void operator()(ValueT& dst, const ValueT& src) const { dst = src; } }; /// @endcond template <typename TreeT> inline void validateLevelSet(const TreeT& tree, const std::string& gridName = std::string("")) { using ValueT = typename TreeT::ValueType; const ValueT zero = zeroVal<ValueT>(); if (!(tree.background() > zero)) { std::stringstream ss; ss << "expected grid "; if (!gridName.empty()) ss << gridName << " "; ss << "outside value > 0, got " << tree.background(); OPENVDB_THROW(ValueError, ss.str()); } if (!(-tree.background() < zero)) { std::stringstream ss; ss << "expected grid "; if (!gridName.empty()) ss << gridName << " "; ss << "inside value < 0, got " << -tree.background(); OPENVDB_THROW(ValueError, ss.str()); } } } // namespace composite template<typename GridOrTreeT> inline void compMax(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; using ValueT = typename TreeT::ValueType; struct Local { static inline void op(CombineArgs<ValueT>& args) { args.setResult(composite::max(args.a(), args.b())); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } template<typename GridOrTreeT> inline void compMin(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; using ValueT = typename TreeT::ValueType; struct Local { static inline void op(CombineArgs<ValueT>& args) { args.setResult(composite::min(args.a(), args.b())); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } template<typename GridOrTreeT> inline void compSum(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; struct Local { static inline void op(CombineArgs<typename TreeT::ValueType>& args) { args.setResult(args.a() + args.b()); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } template<typename GridOrTreeT> inline void compMul(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; struct Local { static inline void op(CombineArgs<typename TreeT::ValueType>& args) { args.setResult(args.a() * args.b()); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } template<typename GridOrTreeT> inline void compDiv(GridOrTreeT& aTree, GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; struct Local { static inline void op(CombineArgs<typename TreeT::ValueType>& args) { args.setResult(composite::divide(args.a(), args.b())); } }; Adapter::tree(aTree).combineExtended(Adapter::tree(bTree), Local::op, /*prune=*/false); } //////////////////////////////////////// template<typename TreeT> struct CompReplaceOp { TreeT* const aTree; CompReplaceOp(TreeT& _aTree): aTree(&_aTree) {} /// @note fill operation is not thread safe void operator()(const typename TreeT::ValueOnCIter& iter) const { CoordBBox bbox; iter.getBoundingBox(bbox); aTree->fill(bbox, *iter); } void operator()(const typename TreeT::LeafCIter& leafIter) const { tree::ValueAccessor<TreeT> acc(*aTree); for (typename TreeT::LeafCIter::LeafNodeT::ValueOnCIter iter = leafIter->cbeginValueOn(); iter; ++iter) { acc.setValue(iter.getCoord(), *iter); } } }; template<typename GridOrTreeT> inline void compReplace(GridOrTreeT& aTree, const GridOrTreeT& bTree) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; using ValueOnCIterT = typename TreeT::ValueOnCIter; // Copy active states (but not values) from B to A. Adapter::tree(aTree).topologyUnion(Adapter::tree(bTree)); CompReplaceOp<TreeT> op(Adapter::tree(aTree)); // Copy all active tile values from B to A. ValueOnCIterT iter = bTree.cbeginValueOn(); iter.setMaxDepth(iter.getLeafDepth() - 1); // don't descend into leaf nodes foreach(iter, op, /*threaded=*/false); // Copy all active voxel values from B to A. foreach(Adapter::tree(bTree).cbeginLeaf(), op); } //////////////////////////////////////// template<typename GridOrTreeT> inline void csgUnion(GridOrTreeT& a, GridOrTreeT& b, bool prune) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; TreeT &aTree = Adapter::tree(a), &bTree = Adapter::tree(b); composite::validateLevelSet(aTree, "A"); composite::validateLevelSet(bTree, "B"); CsgUnionOp<TreeT> op(bTree, Steal()); tree::DynamicNodeManager<TreeT> nodeManager(aTree); nodeManager.foreachTopDown(op); if (prune) tools::pruneLevelSet(aTree); } template<typename GridOrTreeT> inline void csgIntersection(GridOrTreeT& a, GridOrTreeT& b, bool prune) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; TreeT &aTree = Adapter::tree(a), &bTree = Adapter::tree(b); composite::validateLevelSet(aTree, "A"); composite::validateLevelSet(bTree, "B"); CsgIntersectionOp<TreeT> op(bTree, Steal()); tree::DynamicNodeManager<TreeT> nodeManager(aTree); nodeManager.foreachTopDown(op); if (prune) tools::pruneLevelSet(aTree); } template<typename GridOrTreeT> inline void csgDifference(GridOrTreeT& a, GridOrTreeT& b, bool prune) { using Adapter = TreeAdapter<GridOrTreeT>; using TreeT = typename Adapter::TreeType; TreeT &aTree = Adapter::tree(a), &bTree = Adapter::tree(b); composite::validateLevelSet(aTree, "A"); composite::validateLevelSet(bTree, "B"); CsgDifferenceOp<TreeT> op(bTree, Steal()); tree::DynamicNodeManager<TreeT> nodeManager(aTree); nodeManager.foreachTopDown(op); if (prune) tools::pruneLevelSet(aTree); } template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgUnionCopy(const GridOrTreeT& a, const GridOrTreeT& b) { using Adapter = TreeAdapter<GridOrTreeT>; using TreePtrT = typename Adapter::TreeType::Ptr; TreePtrT output = composite::doCSGCopy<composite::CSG_UNION>( Adapter::tree(a), Adapter::tree(b)); return composite::GridOrTreeConstructor<GridOrTreeT>::construct(a, output); } template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgIntersectionCopy(const GridOrTreeT& a, const GridOrTreeT& b) { using Adapter = TreeAdapter<GridOrTreeT>; using TreePtrT = typename Adapter::TreeType::Ptr; TreePtrT output = composite::doCSGCopy<composite::CSG_INTERSECTION>( Adapter::tree(a), Adapter::tree(b)); return composite::GridOrTreeConstructor<GridOrTreeT>::construct(a, output); } template<typename GridOrTreeT> inline typename GridOrTreeT::Ptr csgDifferenceCopy(const GridOrTreeT& a, const GridOrTreeT& b) { using Adapter = TreeAdapter<GridOrTreeT>; using TreePtrT = typename Adapter::TreeType::Ptr; TreePtrT output = composite::doCSGCopy<composite::CSG_DIFFERENCE>( Adapter::tree(a), Adapter::tree(b)); return composite::GridOrTreeConstructor<GridOrTreeT>::construct(a, output); } //////////////////////////////////////////////////////// /// @brief Composite the active values in leaf nodes, i.e. active /// voxels, of a source tree into a destination tree. /// /// @param srcTree source tree from which active voxels are composited. /// /// @param dstTree destination tree into which active voxels are composited. /// /// @param op a functor of the form <tt>void op(T& dst, const T& src)</tt>, /// where @c T is the @c ValueType of the tree, that composites /// a source value into a destination value. By default /// it copies the value from src to dst. /// /// @details All active voxels in the source tree will /// be active in the destination tree, and their value is /// determined by a use-defined functor (OpT op) that operates on the /// source and destination values. The only exception is when /// the tree type is MaskTree, in which case no functor is /// needed since by defintion a MaskTree has no values (only topology). /// /// @warning This function only operated on leaf node values, /// i.e. tile values are ignored. template<typename TreeT, typename OpT = composite::CopyOp<TreeT> > inline void compActiveLeafVoxels(TreeT &srcTree, TreeT &dstTree, OpT op = composite::CopyOp<TreeT>()) { composite::doCompActiveLeafVoxels<TreeT, OpT>(srcTree, dstTree, op); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_COMPOSITE_HAS_BEEN_INCLUDED
37,675
C
36.451292
99
0.607273
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/FastSweeping.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file FastSweeping.h /// /// @author Ken Museth /// /// @brief Defined the six functions {fog,sdf}To{Sdf,Ext,SdfAndExt} in /// addition to the two functions maskSdf and dilateSdf. Sdf denotes /// a signed-distance field (i.e. negative values are inside), fog /// is a scalar fog volume (i.e. higher values are inside), and Ext is /// a field (of arbitrary type) that is extended off the iso-surface. /// All these functions are implemented with the methods in the class /// named FastSweeping. /// /// @note Solves the (simplified) Eikonal Eq: @f$|\nabla \phi|^2 = 1@f$ and /// performs velocity extension, @f$\nabla f\nabla \phi = 0@f$, both /// by means of the fast sweeping algorithm detailed in: /// "A Fast Sweeping Method For Eikonal Equations" /// by H. Zhao, Mathematics of Computation, Vol 74(230), pp 603-627, 2004 /// /// @details The algorithm used below for parallel fast sweeping was first publised in: /// "New Algorithm for Sparse and Parallel Fast Sweeping: Efficient /// Computation of Sparse Distance Fields" by K. Museth, ACM SIGGRAPH Talk, /// 2017, http://www.museth.org/Ken/Publications_files/Museth_SIG17.pdf #ifndef OPENVDB_TOOLS_FASTSWEEPING_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_FASTSWEEPING_HAS_BEEN_INCLUDED //#define BENCHMARK_FAST_SWEEPING #include <type_traits>// for static_assert #include <cmath> #include <limits> #include <deque> #include <unordered_map> #include <utility>// for std::make_pair #include <tbb/parallel_for.h> #include <tbb/enumerable_thread_specific.h> #include <tbb/task_group.h> #include <openvdb/math/Math.h> // for Abs() and isExactlyEqual() #include <openvdb/math/Stencils.h> // for GradStencil #include <openvdb/tree/LeafManager.h> #include "LevelSetUtil.h" #include "Morphology.h" #include "Statistics.h" #ifdef BENCHMARK_FAST_SWEEPING #include <openvdb/util/CpuTimer.h> #endif namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Converts a scalar fog volume into a signed distance function. Active input voxels /// with scalar values above the given isoValue will have NEGATIVE distance /// values on output, i.e. they are assumed to be INSIDE the iso-surface. /// /// @return A shared pointer to a signed-distance field defined on the active values /// of the input fog volume. /// /// @param fogGrid Scalar (floating-point) volume from which an /// iso-surface can be defined. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a fogGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note Strictly speaking a fog volume is normalized to the range [0,1] but this /// method accepts a scalar volume with an arbitary range, as long as the it /// includes the @a isoValue. /// /// @details Topology of output grid is identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grid! /// /// @warning If @a isoValue does not intersect any active values in /// @a fogGrid then the returned grid has all its active values set to /// plus or minus infinity, depending on if the input values are larger or /// smaller than @a isoValue. template<typename GridT> typename GridT::Ptr fogToSdf(const GridT &fogGrid, typename GridT::ValueType isoValue, int nIter = 1); /// @brief Given an existing approximate SDF it solves the Eikonal equation for all its /// active voxels. Active input voxels with a signed distance value above the /// given isoValue will have POSITIVE distance values on output, i.e. they are /// assumed to be OUTSIDE the iso-surface. /// /// @return A shared pointer to a signed-distance field defined on the active values /// of the input sdf volume. /// /// @param sdfGrid An approximate signed distance field to the specified iso-surface. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a sdfGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note The only difference between this method and fogToSdf, defined above, is the /// convention of the sign of the output distance field. /// /// @details Topology of output grid is identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grid! /// /// @warning If @a isoValue does not intersect any active values in /// @a sdfGrid then the returned grid has all its active values set to /// plus or minus infinity, depending on if the input values are larger or /// smaller than @a isoValue. template<typename GridT> typename GridT::Ptr sdfToSdf(const GridT &sdfGrid, typename GridT::ValueType isoValue = 0, int nIter = 1); /// @brief Computes the extension of a field, defined by the specified functor, /// off an iso-surface from an input FOG volume. /// /// @return A shared pointer to the extension field defined from the active values in /// the input fog volume. /// /// @param fogGrid Scalar (floating-point) volume from which an /// iso-surface can be defined. /// /// @param op Functor with signature [](const Vec3R &xyz)->ExtValueT that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a fogGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note Strictly speaking a fog volume is normalized to the range [0,1] but this /// method accepts a scalar volume with an arbitary range, as long as the it /// includes the @a isoValue. /// /// @details Topology of output grid is identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grid! /// /// @warning If @a isoValue does not intersect any active values in /// @a fogGrid then the returned grid has all its active values set to /// @a background. template<typename FogGridT, typename ExtOpT, typename ExtValueT> typename FogGridT::template ValueConverter<ExtValueT>::Type::Ptr fogToExt(const FogGridT &fogGrid, const ExtOpT &op, const ExtValueT& background, typename FogGridT::ValueType isoValue, int nIter = 1); /// @brief Computes the extension of a scalar field, defined by the specified functor, /// off an iso-surface from an input SDF volume. /// /// @return A shared pointer to the extension field defined on the active values in the /// input signed distance field. /// /// @param sdfGrid An approximate signed distance field to the specified iso-surface. /// /// @param op Functor with signature [](const Vec3R &xyz)->float that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a sdfGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note The only difference between this method and fogToEXT, defined above, is the /// convention of the sign of the signed distance field. /// /// @details Topology of output grid is identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grid! /// /// @warning If @a isoValue does not intersect any active values in /// @a sdfGrid then the returned grid has all its active values set to /// @a background. template<typename SdfGridT, typename ExtOpT, typename ExtValueT> typename SdfGridT::template ValueConverter<ExtValueT>::Type::Ptr sdfToExt(const SdfGridT &sdfGrid, const ExtOpT &op, const ExtValueT &background, typename SdfGridT::ValueType isoValue = 0, int nIter = 1); /// @brief Computes the signed distance field and the extension of a scalar field, /// defined by the specified functor, off an iso-surface from an input FOG volume. /// /// @return An pair of two shared pointers to respectively the SDF and extension field /// /// @param fogGrid Scalar (floating-point) volume from which an /// iso-surface can be defined. /// /// @param op Functor with signature [](const Vec3R &xyz)->float that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a fogGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note Strictly speaking a fog volume is normalized to the range [0,1] but this /// method accepts a scalar volume with an arbitary range, as long as the it /// includes the @a isoValue. /// /// @details Topology of output grids are identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grids! /// /// @warning If @a isoValue does not intersect any active values in /// @a fogGrid then a pair of the following grids is returned: The first /// is a signed distance grid with its active values set to plus or minus /// infinity depending of whether its input values are above or below @a isoValue. /// The second grid, which represents the extension field, has all its active /// values set to @a background. template<typename FogGridT, typename ExtOpT, typename ExtValueT> std::pair<typename FogGridT::Ptr, typename FogGridT::template ValueConverter<ExtValueT>::Type::Ptr> fogToSdfAndExt(const FogGridT &fogGrid, const ExtOpT &op, const ExtValueT &background, typename FogGridT::ValueType isoValue, int nIter = 1); /// @brief Computes the signed distance field and the extension of a scalar field, /// defined by the specified functor, off an iso-surface from an input SDF volume. /// /// @return A pair of two shared pointers to respectively the SDF and extension field /// /// @param sdfGrid Scalar (floating-point) volume from which an /// iso-surface can be defined. /// /// @param op Functor with signature [](const Vec3R &xyz)->float that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue A value which defines a smooth iso-surface that /// intersects active voxels in @a sdfGrid. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @note Strictly speaking a fog volume is normalized to the range [0,1] but this /// method accepts a scalar volume with an arbitary range, as long as the it /// includes the @a isoValue. /// /// @details Topology of output grids are identical to that of the input grid, except /// active tiles in the input grid will be converted to active voxels /// in the output grids! /// /// @warning If @a isoValue does not intersect any active values in /// @a sdfGrid then a pair of the following grids is returned: The first /// is a signed distance grid with its active values set to plus or minus /// infinity depending of whether its input values are above or below @a isoValue. /// The second grid, which represents the extension field, has all its active /// values set to @a background. template<typename SdfGridT, typename ExtOpT, typename ExtValueT> std::pair<typename SdfGridT::Ptr, typename SdfGridT::template ValueConverter<ExtValueT>::Type::Ptr> sdfToSdfAndExt(const SdfGridT &sdfGrid, const ExtOpT &op, const ExtValueT &background, typename SdfGridT::ValueType isoValue = 0, int nIter = 1); /// @brief Dilates an existing signed distance filed by a specified number of voxels /// /// @return A shared pointer to the dilated signed distance field. /// /// @param sdfGrid Input signed distance field to be dilated. /// /// @param dilation Numer of voxels that the input SDF will be dilated. /// /// @param nn Stencil-pattern used for dilation /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @details Topology will change as a result of this dilation. E.g. if /// sdfGrid has a width of 3 and @a dilation = 6 then the grid /// returned by this method is a narrow band signed distance field /// with a total vidth of 9 units. template<typename GridT> typename GridT::Ptr dilateSdf(const GridT &sdfGrid, int dilation, NearestNeighbors nn = NN_FACE, int nIter = 1); /// @brief Fills mask by extending an existing signed distance field into /// the active values of this input ree of arbitrary value type. /// /// @return A shared pointer to the masked signed distance field. /// /// @param sdfGrid Input signed distance field to be extended into the mask. /// /// @param mask Mask used to idetify the topology of the output SDF. /// Note this mask is assume to overlap with the sdfGrid. /// /// @param ignoreActiveTiles If false, active tiles in the mask are treated /// as active voxels. Else they are ignored. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @details Topology of the output SDF is determined by the union of the active /// voxels (or optionally values) in @a sdfGrid and @a mask. template<typename GridT, typename MaskTreeT> typename GridT::Ptr maskSdf(const GridT &sdfGrid, const Grid<MaskTreeT> &mask, bool ignoreActiveTiles = false, int nIter = 1); //////////////////////////////////////////////////////////////////////////////// /// @brief Computes signed distance values from an initial iso-surface and /// optionally performs velocty extension at the same time. This is /// done by means of a novel sparse and parallel fast sweeping /// algorithm based on a first order Goudonov's scheme. /// /// Solves: @f$|\nabla \phi|^2 = 1 @f$ /// /// @warning Note, it is important to call one of the initialization methods before /// called the sweep function. Failure to do so will throw a RuntimeError. /// Consider instead call one of the many higher-level free-standing functions /// defined above! template<typename SdfGridT, typename ExtValueT = typename SdfGridT::ValueType> class FastSweeping { static_assert(std::is_floating_point<typename SdfGridT::ValueType>::value, "FastSweeping requires SdfGridT to have floating-point values"); // Defined types related to the signed disntance (or fog) grid using SdfValueT = typename SdfGridT::ValueType; using SdfTreeT = typename SdfGridT::TreeType; using SdfAccT = tree::ValueAccessor<SdfTreeT, false>;//don't register accessors // define types related to the extension field using ExtGridT = typename SdfGridT::template ValueConverter<ExtValueT>::Type; using ExtTreeT = typename ExtGridT::TreeType; using ExtAccT = tree::ValueAccessor<ExtTreeT, false>; // define types related to the tree that masks out the active voxels to be solved for using SweepMaskTreeT = typename SdfTreeT::template ValueConverter<ValueMask>::Type; using SweepMaskAccT = tree::ValueAccessor<SweepMaskTreeT, false>;//don't register accessors public: /// @brief Constructor FastSweeping(); /// @brief Destructor. ~FastSweeping() { this->clear(); } /// @brief Disallow copy construction. FastSweeping(const FastSweeping&) = delete; /// @brief Disallow copy assignment. FastSweeping& operator=(const FastSweeping&) = delete; /// @brief Returns a shared pointer to the signed distance field computed /// by this class. /// /// @warning This shared pointer might point to NULL if the grid has not been /// initialize (by one of the init methods) or computed (by the sweep /// method). typename SdfGridT::Ptr sdfGrid() { return mSdfGrid; } /// @brief Returns a shared pointer to the extension field computed /// by this class. /// /// @warning This shared pointer might point to NULL if the grid has not been /// initialize (by one of the init methods) or computed (by the sweep /// method). typename ExtGridT::Ptr extGrid() { return mExtGrid; } /// @brief Initializer for input grids that are either a signed distance /// field or a scalar fog volume. /// /// @return True if the initialization succeeded. /// /// @param sdfGrid Input scalar grid that represents an existing signed distance /// field or a fog volume (signified by @a isInputSdf). /// /// @param isoValue Iso-value to be used to define the Dirichlet boundary condition /// of the fast sweeping algorithm (typically 0 for sdfs and a /// positive value for fog volumes). /// /// @param isInputSdf Used to determine if @a sdfGrid is a sigend distance field (true) /// or a scalar fog volume (false). /// /// @details This, or any of ther other initilization methods, should be called /// before any call to sweep(). Failure to do so will throw a RuntimeError. /// /// @warning Note, if this method fails, i.e. returns false, a subsequent call /// to sweep will trow a RuntimeError. Instead call clear and try again. bool initSdf(const SdfGridT &sdfGrid, SdfValueT isoValue, bool isInputSdf); /// @brief Initializer used whenever velocity extension is performed in addition /// to the computation of signed distance fields. /// /// @return True if the initialization succeeded. /// /// /// @param sdfGrid Input scalar grid that represents an existing signed distance /// field or a fog volume (signified by @a isInputSdf). /// /// @param op Functor with signature [](const Vec3R &xyz)->ExtValueT that /// defines the Dirichlet boundary condition, on the iso-surface, /// of the field to be extended. Strictly the return type of this functor /// is only required to be convertible to ExtValueT! /// /// @param background Background value of return grid with the extension field. /// /// @param isoValue Iso-value to be used for the boundary condition of the fast /// sweeping algorithm (typically 0 for sdfs and a positive value /// for fog volumes). /// /// @param isInputSdf Used to determine if @a sdfGrid is a sigend distance field (true) /// or a scalar fog volume (false). /// /// @details This, or any of ther other initilization methods, should be called /// before any call to sweep(). Failure to do so will throw a RuntimeError. /// /// @warning Note, if this method fails, i.e. returns false, a subsequent call /// to sweep will trow a RuntimeError. Instead call clear and try again. template <typename ExtOpT> bool initExt(const SdfGridT &sdfGrid, const ExtOpT &op, const ExtValueT &background, SdfValueT isoValue, bool isInputSdf); /// @brief Initializer used when dilating an exsiting signed distance field. /// /// @return True if the initialization succeeded. /// /// @param sdfGrid Input signed distance field to to be dilated. /// /// @param dilation Numer of voxels that the input SDF will be dilated. /// /// @param nn Stencil-pattern used for dilation /// /// @details This, or any of ther other initilization methods, should be called /// before any call to sweep(). Failure to do so will throw a RuntimeError. /// /// @warning Note, if this method fails, i.e. returns false, a subsequent call /// to sweep will trow a RuntimeError. Instead call clear and try again. bool initDilate(const SdfGridT &sdfGrid, int dilation, NearestNeighbors nn = NN_FACE); /// @brief Initializer used for the extamnsion of an exsiting signed distance field /// into the active values of an input mask of arbitrary value type. /// /// @return True if the initialization succeeded. /// /// @param sdfGrid Input signed distance field to be extended into the mask. /// /// @param mask Mask used to idetify the topology of the output SDF. /// Note this mask is assume to overlap with the sdfGrid. /// /// @param ignoreActiveTiles If false, active tiles in the mask are treated /// as active voxels. Else they are ignored. /// /// @details This, or any of ther other initilization methods, should be called /// before any call to sweep(). Failure to do so will throw a RuntimeError. /// /// @warning Note, if this method fails, i.e. returns false, a subsequent call /// to sweep will trow a RuntimeError. Instead call clear and try again. template<typename MaskTreeT> bool initMask(const SdfGridT &sdfGrid, const Grid<MaskTreeT> &mask, bool ignoreActiveTiles = false); /// @brief Perform @a nIter iterations of the fast sweeping algorithm. /// /// @param nIter Number of iterations of the fast sweeping algorithm. /// Each iteration performs 2^3 = 8 individual sweeps. /// /// @param finalize If true the (possibly asymmetric) inside and outside values of the /// resulting signed distance field are properly set. Unless you're /// an expert this should remain true! /// /// @throw RuntimeError if sweepingVoxelCount() or boundaryVoxelCount() return zero. /// This might happen if none of the initialization methods above were called /// or if that initialization failed. void sweep(int nIter = 1, bool finalize = true); /// @brief Clears all the grids and counters so initializtion can be called again. void clear(); /// @brief Return the number of voxels that will be solved for. size_t sweepingVoxelCount() const { return mSweepingVoxelCount; } /// @brief Return the number of voxels that defined the boundary condition. size_t boundaryVoxelCount() const { return mBoundaryVoxelCount; } /// @brief Return true if there are voxels and boundaries to solve for bool isValid() const { return mSweepingVoxelCount > 0 && mBoundaryVoxelCount > 0; } private: /// @brief Private method to prune the sweep mask and cache leaf origins. void computeSweepMaskLeafOrigins(); // Private utility classes template<typename> struct MaskKernel;// initialization to extend a SDF into a mask template<typename> struct InitExt; struct InitSdf; struct DilateKernel;// initialization to dilate a SDF struct MinMaxKernel; struct SweepingKernel;// performs the actual concurrent sparse fast sweeping // Define the topology (i.e. stencil) of the neighboring grid points static const Coord mOffset[6];// = {{-1,0,0},{1,0,0},{0,-1,0},{0,1,0},{0,0,-1},{0,0,1}}; // Private member data of FastSweeping typename SdfGridT::Ptr mSdfGrid; typename ExtGridT::Ptr mExtGrid; SweepMaskTreeT mSweepMask; // mask tree containing all non-boundary active voxels std::vector<Coord> mSweepMaskLeafOrigins; // cache of leaf node origins for mask tree size_t mSweepingVoxelCount, mBoundaryVoxelCount; };// FastSweeping //////////////////////////////////////////////////////////////////////////////// // Static member data initialization template <typename SdfGridT, typename ExtValueT> const Coord FastSweeping<SdfGridT, ExtValueT>::mOffset[6] = {{-1,0,0},{1,0,0}, {0,-1,0},{0,1,0}, {0,0,-1},{0,0,1}}; template <typename SdfGridT, typename ExtValueT> FastSweeping<SdfGridT, ExtValueT>::FastSweeping() : mSdfGrid(nullptr), mExtGrid(nullptr), mSweepingVoxelCount(0), mBoundaryVoxelCount(0) { } template <typename SdfGridT, typename ExtValueT> void FastSweeping<SdfGridT, ExtValueT>::clear() { mSdfGrid.reset(); mExtGrid.reset(); mSweepMask.clear(); mSweepingVoxelCount = mBoundaryVoxelCount = 0; } template <typename SdfGridT, typename ExtValueT> void FastSweeping<SdfGridT, ExtValueT>::computeSweepMaskLeafOrigins() { // replace any inactive leaf nodes with tiles and voxelize any active tiles pruneInactive(mSweepMask); mSweepMask.voxelizeActiveTiles(); using LeafManagerT = tree::LeafManager<SweepMaskTreeT>; using LeafT = typename SweepMaskTreeT::LeafNodeType; LeafManagerT leafManager(mSweepMask); mSweepMaskLeafOrigins.resize(leafManager.leafCount()); tbb::atomic<size_t> sweepingVoxelCount = 0; auto kernel = [&](const LeafT& leaf, size_t leafIdx) { mSweepMaskLeafOrigins[leafIdx] = leaf.origin(); sweepingVoxelCount += leaf.onVoxelCount(); }; leafManager.foreach(kernel, /*threaded=*/true, /*grainsize=*/1024); mBoundaryVoxelCount = 0; mSweepingVoxelCount = sweepingVoxelCount; if (mSdfGrid) { const size_t totalCount = mSdfGrid->constTree().activeVoxelCount(); assert( totalCount >= mSweepingVoxelCount ); mBoundaryVoxelCount = totalCount - mSweepingVoxelCount; } }// FastSweeping::computeSweepMaskLeafOrigins template <typename SdfGridT, typename ExtValueT> bool FastSweeping<SdfGridT, ExtValueT>::initSdf(const SdfGridT &fogGrid, SdfValueT isoValue, bool isInputSdf) { this->clear(); mSdfGrid = fogGrid.deepCopy();//very fast InitSdf kernel(*this); kernel.run(isoValue, isInputSdf); return this->isValid(); } template <typename SdfGridT, typename ExtValueT> template <typename OpT> bool FastSweeping<SdfGridT, ExtValueT>::initExt(const SdfGridT &fogGrid, const OpT &op, const ExtValueT &background, SdfValueT isoValue, bool isInputSdf) { this->clear(); mSdfGrid = fogGrid.deepCopy();//very fast mExtGrid = createGrid<ExtGridT>( background ); mExtGrid->topologyUnion( *mSdfGrid );//very fast InitExt<OpT> kernel(*this); kernel.run(isoValue, op, isInputSdf); return this->isValid(); } template <typename SdfGridT, typename ExtValueT> bool FastSweeping<SdfGridT, ExtValueT>::initDilate(const SdfGridT &sdfGrid, int dilate, NearestNeighbors nn) { this->clear(); mSdfGrid = sdfGrid.deepCopy();//very fast DilateKernel kernel(*this); kernel.run(dilate, nn); return this->isValid(); } template <typename SdfGridT, typename ExtValueT> template<typename MaskTreeT> bool FastSweeping<SdfGridT, ExtValueT>::initMask(const SdfGridT &sdfGrid, const Grid<MaskTreeT> &mask, bool ignoreActiveTiles) { this->clear(); mSdfGrid = sdfGrid.deepCopy();//very fast if (mSdfGrid->transform() != mask.transform()) { OPENVDB_THROW(RuntimeError, "FastSweeping: Mask not aligned with the grid!"); } if (mask.getGridClass() == GRID_LEVEL_SET) { using T = typename MaskTreeT::template ValueConverter<bool>::Type; typename Grid<T>::Ptr tmp = sdfInteriorMask(mask);//might have active tiles tmp->tree().voxelizeActiveTiles();//multi-threaded MaskKernel<T> kernel(*this); kernel.run(tmp->tree());//multi-threaded } else { if (ignoreActiveTiles || !mask.tree().hasActiveTiles()) { MaskKernel<MaskTreeT> kernel(*this); kernel.run(mask.tree());//multi-threaded } else { using T = typename MaskTreeT::template ValueConverter<ValueMask>::Type; T tmp(mask.tree(), false, TopologyCopy());//multi-threaded tmp.voxelizeActiveTiles(true);//multi-threaded MaskKernel<T> kernel(*this); kernel.run(tmp);//multi-threaded } } return this->isValid(); }// FastSweeping::initMask template <typename SdfGridT, typename ExtValueT> void FastSweeping<SdfGridT, ExtValueT>::sweep(int nIter, bool finalize) { if (!mSdfGrid) { OPENVDB_THROW(RuntimeError, "FastSweeping::sweep called before initialization"); } if (this->boundaryVoxelCount() == 0) { OPENVDB_THROW(RuntimeError, "FastSweeping: No boundary voxels found!"); } else if (this->sweepingVoxelCount() == 0) { OPENVDB_THROW(RuntimeError, "FastSweeping: No computing voxels found!"); } // note: SweepingKernel is non copy-constructible, so use a deque instead of a vector std::deque<SweepingKernel> kernels; for (int i = 0; i < 4; i++) kernels.emplace_back(*this); { // compute voxel slices #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Computing voxel slices"); #endif // Exploiting nested parallelism - all voxel slice data is precomputed tbb::task_group tasks; tasks.run([&] { kernels[0].computeVoxelSlices([](const Coord &a){ return a[0]+a[1]+a[2]; });/*+++ & ---*/ }); tasks.run([&] { kernels[1].computeVoxelSlices([](const Coord &a){ return a[0]+a[1]-a[2]; });/*++- & --+*/ }); tasks.run([&] { kernels[2].computeVoxelSlices([](const Coord &a){ return a[0]-a[1]+a[2]; });/*+-+ & -+-*/ }); tasks.run([&] { kernels[3].computeVoxelSlices([](const Coord &a){ return a[0]-a[1]-a[2]; });/*+-- & -++*/ }); tasks.wait(); #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif } // perform nIter iterations of bi-directional sweeping in all directions for (int i = 0; i < nIter; ++i) { for (SweepingKernel& kernel : kernels) kernel.sweep(); } if (finalize) { #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Computing extrema values"); #endif MinMaxKernel kernel; auto e = kernel.run(*mSdfGrid);//multi-threaded //auto e = extrema(mGrid->beginValueOn());// 100x slower!!!! #ifdef BENCHMARK_FAST_SWEEPING std::cerr << "Min = " << e.min() << " Max = " << e.max() << std::endl; timer.restart("Changing asymmetric background value"); #endif changeAsymmetricLevelSetBackground(mSdfGrid->tree(), e.max(), e.min());//multi-threaded #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif } }// FastSweeping::sweep /// Private class of FastSweeping to quickly compute the extrema /// values of the active voxels in the leaf nodes. Several orders /// of magnitude faster than tools::extrema! template <typename SdfGridT, typename ExtValueT> struct FastSweeping<SdfGridT, ExtValueT>::MinMaxKernel { using LeafMgr = tree::LeafManager<const SdfTreeT>; using LeafRange = typename LeafMgr::LeafRange; MinMaxKernel() : mMin(std::numeric_limits<SdfValueT>::max()), mMax(-mMin) {} MinMaxKernel(MinMaxKernel& other, tbb::split) : mMin(other.mMin), mMax(other.mMax) {} math::MinMax<SdfValueT> run(const SdfGridT &grid) { LeafMgr mgr(grid.tree());// super fast tbb::parallel_reduce(mgr.leafRange(), *this); return math::MinMax<SdfValueT>(mMin, mMax); } void operator()(const LeafRange& r) { for (auto leafIter = r.begin(); leafIter; ++leafIter) { for (auto voxelIter = leafIter->beginValueOn(); voxelIter; ++voxelIter) { const SdfValueT v = *voxelIter; if (v < mMin) mMin = v; if (v > mMax) mMax = v; } } } void join(const MinMaxKernel& other) { if (other.mMin < mMin) mMin = other.mMin; if (other.mMax > mMax) mMax = other.mMax; } SdfValueT mMin, mMax; };// FastSweeping::MinMaxKernel //////////////////////////////////////////////////////////////////////////////// /// Private class of FastSweeping to perform multi-threaded initialization template <typename SdfGridT, typename ExtValueT> struct FastSweeping<SdfGridT, ExtValueT>::DilateKernel { using LeafRange = typename tree::LeafManager<SdfTreeT>::LeafRange; DilateKernel(FastSweeping &parent) : mParent(&parent), mBackground(parent.mSdfGrid->background()) { } DilateKernel(const DilateKernel &parent) = default;// for tbb::parallel_for DilateKernel& operator=(const DilateKernel&) = delete; void run(int dilation, NearestNeighbors nn) { #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Construct LeafManager"); #endif tree::LeafManager<SdfTreeT> mgr(mParent->mSdfGrid->tree());// super fast #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Changing background value"); #endif static const SdfValueT Unknown = std::numeric_limits<SdfValueT>::max(); changeLevelSetBackground(mgr, Unknown);//multi-threaded #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Dilating and updating mgr (parallel)"); //timer.restart("Dilating and updating mgr (serial)"); #endif const int delta = 5; for (int i=0, d = dilation/delta; i<d; ++i) dilateActiveValues(mgr, delta, nn, IGNORE_TILES); dilateActiveValues(mgr, dilation % delta, nn, IGNORE_TILES); //for (int i=0, n=5, d=dilation/n; i<d; ++i) dilateActiveValues(mgr, n, nn, IGNORE_TILES); //dilateVoxels(mgr, dilation, nn); #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Initializing grid and sweep mask"); #endif mParent->mSweepMask.clear(); mParent->mSweepMask.topologyUnion(mParent->mSdfGrid->constTree()); using LeafManagerT = tree::LeafManager<typename SdfGridT::TreeType>; using LeafT = typename SdfGridT::TreeType::LeafNodeType; LeafManagerT leafManager(mParent->mSdfGrid->tree()); auto kernel = [&](LeafT& leaf, size_t /*leafIdx*/) { static const SdfValueT Unknown = std::numeric_limits<SdfValueT>::max(); const SdfValueT background = mBackground;//local copy auto* maskLeaf = mParent->mSweepMask.probeLeaf(leaf.origin()); assert(maskLeaf); for (auto voxelIter = leaf.beginValueOn(); voxelIter; ++voxelIter) { const SdfValueT value = *voxelIter; if (math::Abs(value) < background) {// disable boundary voxels from the mask tree maskLeaf->setValueOff(voxelIter.pos()); } else { voxelIter.setValue(value > 0 ? Unknown : -Unknown); } } }; leafManager.foreach( kernel ); // cache the leaf node origins for fast lookup in the sweeping kernels mParent->computeSweepMaskLeafOrigins(); #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif }// FastSweeping::DilateKernel::run // Private member data of DilateKernel FastSweeping *mParent; const SdfValueT mBackground; };// FastSweeping::DilateKernel //////////////////////////////////////////////////////////////////////////////// template <typename SdfGridT, typename ExtValueT> struct FastSweeping<SdfGridT, ExtValueT>::InitSdf { using LeafRange = typename tree::LeafManager<SdfTreeT>::LeafRange; InitSdf(FastSweeping &parent): mParent(&parent), mSdfGrid(parent.mSdfGrid.get()), mIsoValue(0), mAboveSign(0) {} InitSdf(const InitSdf&) = default;// for tbb::parallel_for InitSdf& operator=(const InitSdf&) = delete; void run(SdfValueT isoValue, bool isInputSdf) { mIsoValue = isoValue; mAboveSign = isInputSdf ? SdfValueT(1) : SdfValueT(-1); SdfTreeT &tree = mSdfGrid->tree();//sdf const bool hasActiveTiles = tree.hasActiveTiles(); if (isInputSdf && hasActiveTiles) { OPENVDB_THROW(RuntimeError, "FastSweeping: A SDF should not have active tiles!"); } #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Initialize voxels"); #endif mParent->mSweepMask.clear(); mParent->mSweepMask.topologyUnion(mParent->mSdfGrid->constTree()); {// Process all voxels tree::LeafManager<SdfTreeT> mgr(tree, 1);// we need one auxiliary buffer tbb::parallel_for(mgr.leafRange(32), *this);//multi-threaded mgr.swapLeafBuffer(1);//swap voxel values } #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Initialize tiles - new"); #endif // Process all tiles tree::NodeManager<SdfTreeT, SdfTreeT::RootNodeType::LEVEL-1> mgr(tree); mgr.foreachBottomUp(*this);//multi-threaded tree.root().setBackground(std::numeric_limits<SdfValueT>::max(), false); if (hasActiveTiles) tree.voxelizeActiveTiles();//multi-threaded // cache the leaf node origins for fast lookup in the sweeping kernels mParent->computeSweepMaskLeafOrigins(); }// FastSweeping::InitSdf::run void operator()(const LeafRange& r) const { SweepMaskAccT sweepMaskAcc(mParent->mSweepMask); math::GradStencil<SdfGridT, false> stencil(*mSdfGrid); const SdfValueT isoValue = mIsoValue, above = mAboveSign*std::numeric_limits<SdfValueT>::max();//local copy const SdfValueT h = mAboveSign*static_cast<SdfValueT>(mSdfGrid->voxelSize()[0]);//Voxel size for (auto leafIter = r.begin(); leafIter; ++leafIter) { SdfValueT* sdf = leafIter.buffer(1).data(); for (auto voxelIter = leafIter->beginValueAll(); voxelIter; ++voxelIter) { const SdfValueT value = *voxelIter; const bool isAbove = value > isoValue; if (!voxelIter.isValueOn()) {// inactive voxels sdf[voxelIter.pos()] = isAbove ? above : -above; } else {// active voxels const Coord ijk = voxelIter.getCoord(); stencil.moveTo(ijk, value); const auto mask = stencil.intersectionMask( isoValue ); if (mask.none()) {// most common case sdf[voxelIter.pos()] = isAbove ? above : -above; } else {// compute distance to iso-surface // disable boundary voxels from the mask tree sweepMaskAcc.setValueOff(ijk); const SdfValueT delta = value - isoValue;//offset relative to iso-value if (math::isApproxZero(delta)) {//voxel is on the iso-surface sdf[voxelIter.pos()] = 0; } else {//voxel is neighboring the iso-surface SdfValueT sum = 0; for (int i=0; i<6;) { SdfValueT d = std::numeric_limits<SdfValueT>::max(), d2; if (mask.test(i++)) d = math::Abs(delta/(value-stencil.getValue(i))); if (mask.test(i++)) { d2 = math::Abs(delta/(value-stencil.getValue(i))); if (d2 < d) d = d2; } if (d < std::numeric_limits<SdfValueT>::max()) sum += 1/(d*d); } sdf[voxelIter.pos()] = isAbove ? h / math::Sqrt(sum) : -h / math::Sqrt(sum); }// voxel is neighboring the iso-surface }// intersecting voxels }// active voxels }// loop over voxels }// loop over leaf nodes }// FastSweeping::InitSdf::operator(const LeafRange&) template<typename RootOrInternalNodeT> void operator()(const RootOrInternalNodeT& node) const { const SdfValueT isoValue = mIsoValue, above = mAboveSign*std::numeric_limits<SdfValueT>::max(); for (auto it = node.cbeginValueAll(); it; ++it) { SdfValueT& v = const_cast<SdfValueT&>(*it); v = v > isoValue ? above : -above; }//loop over all tiles }// FastSweeping::InitSdf::operator()(const RootOrInternalNodeT&) // Public member data FastSweeping *mParent; SdfGridT *mSdfGrid;//raw pointer, i.e. lock free SdfValueT mIsoValue; SdfValueT mAboveSign;//sign of distance values above the iso-value };// FastSweeping::InitSdf /// Private class of FastSweeping to perform multi-threaded initialization template <typename SdfGridT, typename ExtValueT> template <typename OpT> struct FastSweeping<SdfGridT, ExtValueT>::InitExt { using LeafRange = typename tree::LeafManager<SdfTreeT>::LeafRange; using OpPoolT = tbb::enumerable_thread_specific<OpT>; InitExt(FastSweeping &parent) : mParent(&parent), mOpPool(nullptr), mSdfGrid(parent.mSdfGrid.get()), mExtGrid(parent.mExtGrid.get()), mIsoValue(0), mAboveSign(0) {} InitExt(const InitExt&) = default;// for tbb::parallel_for InitExt& operator=(const InitExt&) = delete; void run(SdfValueT isoValue, const OpT &opPrototype, bool isInputSdf) { static_assert(std::is_convertible<decltype(opPrototype(Vec3d(0))),ExtValueT>::value, "Invalid return type of functor"); if (!mExtGrid) { OPENVDB_THROW(RuntimeError, "FastSweeping::InitExt expected an extension grid!"); } mAboveSign = isInputSdf ? SdfValueT(1) : SdfValueT(-1); mIsoValue = isoValue; auto &tree1 = mSdfGrid->tree(); auto &tree2 = mExtGrid->tree(); const bool hasActiveTiles = tree1.hasActiveTiles();//very fast if (isInputSdf && hasActiveTiles) { OPENVDB_THROW(RuntimeError, "FastSweeping: A SDF should not have active tiles!"); } #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Initialize voxels"); #endif mParent->mSweepMask.clear(); mParent->mSweepMask.topologyUnion(mParent->mSdfGrid->constTree()); {// Process all voxels // Define thread-local operators OpPoolT opPool(opPrototype); mOpPool = &opPool; tree::LeafManager<SdfTreeT> mgr(tree1, 1);// we need one auxiliary buffer tbb::parallel_for(mgr.leafRange(32), *this);//multi-threaded mgr.swapLeafBuffer(1);//swap out auxiliary buffer } #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Initialize tiles"); #endif {// Process all tiles tree::NodeManager<SdfTreeT, SdfTreeT::RootNodeType::LEVEL-1> mgr(tree1); mgr.foreachBottomUp(*this);//multi-threaded tree1.root().setBackground(std::numeric_limits<SdfValueT>::max(), false); if (hasActiveTiles) { #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Voxelizing active tiles"); #endif tree1.voxelizeActiveTiles();//multi-threaded tree2.voxelizeActiveTiles();//multi-threaded } } // cache the leaf node origins for fast lookup in the sweeping kernels mParent->computeSweepMaskLeafOrigins(); #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif }// FastSweeping::InitExt::run void operator()(const LeafRange& r) const { ExtAccT acc(mExtGrid->tree()); SweepMaskAccT sweepMaskAcc(mParent->mSweepMask); math::GradStencil<SdfGridT, false> stencil(*mSdfGrid); const math::Transform& xform = mExtGrid->transform(); typename OpPoolT::reference op = mOpPool->local(); const SdfValueT isoValue = mIsoValue, above = mAboveSign*std::numeric_limits<SdfValueT>::max();//local copy const SdfValueT h = mAboveSign*static_cast<SdfValueT>(mSdfGrid->voxelSize()[0]);//Voxel size for (auto leafIter = r.begin(); leafIter; ++leafIter) { SdfValueT *sdf = leafIter.buffer(1).data(); ExtValueT *ext = acc.probeLeaf(leafIter->origin())->buffer().data();//should be safe! for (auto voxelIter = leafIter->beginValueAll(); voxelIter; ++voxelIter) { const SdfValueT value = *voxelIter; const bool isAbove = value > isoValue; if (!voxelIter.isValueOn()) {// inactive voxels sdf[voxelIter.pos()] = isAbove ? above : -above; } else {// active voxels const Coord ijk = voxelIter.getCoord(); stencil.moveTo(ijk, value); const auto mask = stencil.intersectionMask( isoValue ); if (mask.none()) {// no zero-crossing neighbors, most common case sdf[voxelIter.pos()] = isAbove ? above : -above; // the ext grid already has its active values set to the bakground value } else {// compute distance to iso-surface // disable boundary voxels from the mask tree sweepMaskAcc.setValueOff(ijk); const SdfValueT delta = value - isoValue;//offset relative to iso-value if (math::isApproxZero(delta)) {//voxel is on the iso-surface sdf[voxelIter.pos()] = 0; ext[voxelIter.pos()] = ExtValueT(op(xform.indexToWorld(ijk))); } else {//voxel is neighboring the iso-surface SdfValueT sum1 = 0; ExtValueT sum2 = zeroVal<ExtValueT>(); for (int n=0, i=0; i<6;) { SdfValueT d = std::numeric_limits<SdfValueT>::max(), d2; if (mask.test(i++)) { d = math::Abs(delta/(value-stencil.getValue(i))); n = i - 1; } if (mask.test(i++)) { d2 = math::Abs(delta/(value-stencil.getValue(i))); if (d2 < d) { d = d2; n = i - 1; } } if (d < std::numeric_limits<SdfValueT>::max()) { d2 = 1/(d*d); sum1 += d2; const Vec3R xyz(static_cast<SdfValueT>(ijk[0])+d*static_cast<SdfValueT>(FastSweeping::mOffset[n][0]), static_cast<SdfValueT>(ijk[1])+d*static_cast<SdfValueT>(FastSweeping::mOffset[n][1]), static_cast<SdfValueT>(ijk[2])+d*static_cast<SdfValueT>(FastSweeping::mOffset[n][2])); sum2 += d2*ExtValueT(op(xform.indexToWorld(xyz))); } }//look over six cases ext[voxelIter.pos()] = (SdfValueT(1) / sum1) * sum2; sdf[voxelIter.pos()] = isAbove ? h / math::Sqrt(sum1) : -h / math::Sqrt(sum1); }// voxel is neighboring the iso-surface }// intersecting voxels }// active voxels }// loop over voxels }// loop over leaf nodes }// FastSweeping::InitExt::operator(const LeafRange& r) template<typename RootOrInternalNodeT> void operator()(const RootOrInternalNodeT& node) const { const SdfValueT isoValue = mIsoValue, above = mAboveSign*std::numeric_limits<SdfValueT>::max(); for (auto it = node.cbeginValueAll(); it; ++it) { SdfValueT& v = const_cast<SdfValueT&>(*it); v = v > isoValue ? above : -above; }//loop over all tiles } // Public member data FastSweeping *mParent; OpPoolT *mOpPool; SdfGridT *mSdfGrid; ExtGridT *mExtGrid; SdfValueT mIsoValue; SdfValueT mAboveSign;//sign of distance values above the iso-value };// FastSweeping::InitExt /// Private class of FastSweeping to perform multi-threaded initialization template <typename SdfGridT, typename ExtValueT> template <typename MaskTreeT> struct FastSweeping<SdfGridT, ExtValueT>::MaskKernel { using LeafRange = typename tree::LeafManager<const MaskTreeT>::LeafRange; MaskKernel(FastSweeping &parent) : mParent(&parent), mSdfGrid(parent.mSdfGrid.get()) {} MaskKernel(const MaskKernel &parent) = default;// for tbb::parallel_for MaskKernel& operator=(const MaskKernel&) = delete; void run(const MaskTreeT &mask) { #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer; #endif auto &lsTree = mSdfGrid->tree(); static const SdfValueT Unknown = std::numeric_limits<SdfValueT>::max(); #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Changing background value"); #endif changeLevelSetBackground(lsTree, Unknown);//multi-threaded #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Union with mask");//multi-threaded #endif lsTree.topologyUnion(mask);//multi-threaded // ignore active tiles since the input grid is assumed to be a level set tree::LeafManager<const MaskTreeT> mgr(mask);// super fast #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Initializing grid and sweep mask"); #endif mParent->mSweepMask.clear(); mParent->mSweepMask.topologyUnion(mParent->mSdfGrid->constTree()); using LeafManagerT = tree::LeafManager<SweepMaskTreeT>; using LeafT = typename SweepMaskTreeT::LeafNodeType; LeafManagerT leafManager(mParent->mSweepMask); auto kernel = [&](LeafT& leaf, size_t /*leafIdx*/) { static const SdfValueT Unknown = std::numeric_limits<SdfValueT>::max(); SdfAccT acc(mSdfGrid->tree()); // The following hack is safe due to the topoloyUnion in // init and the fact that SdfValueT is known to be a floating point! SdfValueT *data = acc.probeLeaf(leaf.origin())->buffer().data(); for (auto voxelIter = leaf.beginValueOn(); voxelIter; ++voxelIter) {// mask voxels if (math::Abs( data[voxelIter.pos()] ) < Unknown ) { // disable boundary voxels from the mask tree voxelIter.setValue(false); } } }; leafManager.foreach( kernel ); // cache the leaf node origins for fast lookup in the sweeping kernels mParent->computeSweepMaskLeafOrigins(); #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif }// FastSweeping::MaskKernel::run // Private member data of MaskKernel FastSweeping *mParent; SdfGridT *mSdfGrid;//raw pointer, i.e. lock free };// FastSweeping::MaskKernel /// @brief Private class of FastSweeping to perform concurrent fast sweeping in two directions template <typename SdfGridT, typename ExtValueT> struct FastSweeping<SdfGridT, ExtValueT>::SweepingKernel { SweepingKernel(FastSweeping &parent) : mParent(&parent) {} SweepingKernel(const SweepingKernel&) = delete; SweepingKernel& operator=(const SweepingKernel&) = delete; /// Main method that performs concurrent bi-directional sweeps template<typename HashOp> void computeVoxelSlices(HashOp hash) { #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer; #endif // mask of the active voxels to be solved for, i.e. excluding boundary voxels const SweepMaskTreeT& maskTree = mParent->mSweepMask; using LeafManagerT = typename tree::LeafManager<const SweepMaskTreeT>; using LeafT = typename SweepMaskTreeT::LeafNodeType; LeafManagerT leafManager(maskTree); // compute the leaf node slices that have active voxels in them // the sliding window of the has keys is -14 to 21 (based on an 8x8x8 leaf node // and the extrema hash values i-j-k and i+j+k), but we use a larger mask window here to // easily accomodate any leaf dimension. The mask offset is used to be able to // store this in a fixed-size byte array constexpr int maskOffset = LeafT::DIM * 3; constexpr int maskRange = maskOffset * 2; // mark each possible slice in each leaf node that has one or more active voxels in it std::vector<int8_t> leafSliceMasks(leafManager.leafCount()*maskRange); auto kernel1 = [&](const LeafT& leaf, size_t leafIdx) { const size_t leafOffset = leafIdx * maskRange; for (auto voxelIter = leaf.cbeginValueOn(); voxelIter; ++voxelIter) { const Coord ijk = LeafT::offsetToLocalCoord(voxelIter.pos()); leafSliceMasks[leafOffset + hash(ijk) + maskOffset] = uint8_t(1); } }; leafManager.foreach( kernel1 ); // compute the voxel slice map using a thread-local-storage hash map // the key of the hash map is the slice index of the voxel coord (ijk.x() + ijk.y() + ijk.z()) // the values are an array of indices for every leaf that has active voxels with this slice index using ThreadLocalMap = std::unordered_map</*voxelSliceKey=*/int64_t, /*leafIdx=*/std::deque<size_t>>; tbb::enumerable_thread_specific<ThreadLocalMap> pool; auto kernel2 = [&](const LeafT& leaf, size_t leafIdx) { ThreadLocalMap& map = pool.local(); const Coord& origin = leaf.origin(); const int64_t leafKey = hash(origin); const size_t leafOffset = leafIdx * maskRange; for (int sliceIdx = 0; sliceIdx < maskRange; sliceIdx++) { if (leafSliceMasks[leafOffset + sliceIdx] == uint8_t(1)) { const int64_t voxelSliceKey = leafKey+sliceIdx-maskOffset; map[voxelSliceKey].emplace_back(leafIdx); } } }; leafManager.foreach( kernel2 ); // combine into a single ordered map keyed by the voxel slice key // note that this is now stored in a map ordered by voxel slice key, // so sweep slices can be processed in order for (auto poolIt = pool.begin(); poolIt != pool.end(); ++poolIt) { const ThreadLocalMap& map = *poolIt; for (const auto& it : map) { for (const size_t leafIdx : it.second) { mVoxelSliceMap[it.first].emplace_back(leafIdx, NodeMaskPtrT()); } } } // extract the voxel slice keys for random access into the map mVoxelSliceKeys.reserve(mVoxelSliceMap.size()); for (const auto& it : mVoxelSliceMap) { mVoxelSliceKeys.push_back(it.first); } // allocate the node masks in parallel, as the map is populated in serial auto kernel3 = [&](tbb::blocked_range<size_t>& range) { for (size_t i = range.begin(); i < range.end(); i++) { const int64_t key = mVoxelSliceKeys[i]; for (auto& it : mVoxelSliceMap[key]) { it.second = std::make_unique<NodeMaskT>(); } } }; tbb::parallel_for(tbb::blocked_range<size_t>(0, mVoxelSliceKeys.size()), kernel3); // each voxel slice contains a leafIdx-nodeMask pair, // this routine populates these node masks to select only the active voxels // from the mask tree that have the same voxel slice key // TODO: a small optimization here would be to union this leaf node mask with // a pre-computed one for this particular slice pattern auto kernel4 = [&](tbb::blocked_range<size_t>& range) { for (size_t i = range.begin(); i < range.end(); i++) { const int64_t voxelSliceKey = mVoxelSliceKeys[i]; LeafSliceArray& leafSliceArray = mVoxelSliceMap[voxelSliceKey]; for (LeafSlice& leafSlice : leafSliceArray) { const size_t leafIdx = leafSlice.first; NodeMaskPtrT& nodeMask = leafSlice.second; const LeafT& leaf = leafManager.leaf(leafIdx); const Coord& origin = leaf.origin(); const int64_t leafKey = hash(origin); for (auto voxelIter = leaf.cbeginValueOn(); voxelIter; ++voxelIter) { const Index voxelIdx = voxelIter.pos(); const Coord ijk = LeafT::offsetToLocalCoord(voxelIdx); const int64_t key = leafKey + hash(ijk); if (key == voxelSliceKey) { nodeMask->setOn(voxelIdx); } } } } }; tbb::parallel_for(tbb::blocked_range<size_t>(0, mVoxelSliceKeys.size()), kernel4); }// FastSweeping::SweepingKernel::computeVoxelSlices // Private struct for nearest neighbor grid points (very memory light!) struct NN { SdfValueT v; int n; inline static Coord ijk(const Coord &p, int i) { return p + FastSweeping::mOffset[i]; } NN() : v(), n() {} NN(const SdfAccT &a, const Coord &p, int i) : v(math::Abs(a.getValue(ijk(p,i)))), n(i) {} inline Coord operator()(const Coord &p) const { return ijk(p, n); } inline bool operator<(const NN &rhs) const { return v < rhs.v; } inline operator bool() const { return v < SdfValueT(1000); } };// NN void sweep() { typename ExtGridT::TreeType *tree2 = mParent->mExtGrid ? &mParent->mExtGrid->tree() : nullptr; const SdfValueT h = static_cast<SdfValueT>(mParent->mSdfGrid->voxelSize()[0]); const SdfValueT sqrt2h = math::Sqrt(SdfValueT(2))*h; const std::vector<Coord>& leafNodeOrigins = mParent->mSweepMaskLeafOrigins; int64_t voxelSliceIndex(0); auto kernel = [&](const tbb::blocked_range<size_t>& range) { using LeafT = typename SdfGridT::TreeType::LeafNodeType; SdfAccT acc1(mParent->mSdfGrid->tree()); auto acc2 = std::unique_ptr<ExtAccT>(tree2 ? new ExtAccT(*tree2) : nullptr); SdfValueT absV, sign, update, D; NN d1, d2, d3;//distance values and coordinates of closest neighbor points const LeafSliceArray& leafSliceArray = mVoxelSliceMap[voxelSliceIndex]; // Solves Goudonov's scheme: [x-d1]^2 + [x-d2]^2 + [x-d3]^2 = h^2 // where [X] = (X>0?X:0) and ai=min(di+1,di-1) for (size_t i = range.begin(); i < range.end(); ++i) { // iterate over all leafs in the slice and extract the leaf // and node mask for each slice pattern const LeafSlice& leafSlice = leafSliceArray[i]; const size_t leafIdx = leafSlice.first; const NodeMaskPtrT& nodeMask = leafSlice.second; const Coord& origin = leafNodeOrigins[leafIdx]; Coord ijk; for (auto indexIter = nodeMask->beginOn(); indexIter; ++indexIter) { // Get coordinate of center point of the FD stencil ijk = origin + LeafT::offsetToLocalCoord(indexIter.pos()); // Find the closes neighbors in the three axial directions d1 = std::min(NN(acc1, ijk, 0), NN(acc1, ijk, 1)); d2 = std::min(NN(acc1, ijk, 2), NN(acc1, ijk, 3)); d3 = std::min(NN(acc1, ijk, 4), NN(acc1, ijk, 5)); if (!(d1 || d2 || d3)) continue;//no valid neighbors // Get the center point of the FD stencil (assumed to be an active voxel) // Note this const_cast is normally unsafe but by design we know the tree // to be static, of floating-point type and containing active voxels only! SdfValueT &value = const_cast<SdfValueT&>(acc1.getValue(ijk)); // Extract the sign sign = value >= SdfValueT(0) ? SdfValueT(1) : SdfValueT(-1); // Absolute value absV = math::Abs(value); // sort values so d1 <= d2 <= d3 if (d2 < d1) std::swap(d1, d2); if (d3 < d2) std::swap(d2, d3); if (d2 < d1) std::swap(d1, d2); // Test if there is a solution depending on ONE of the neighboring voxels // if d2 - d1 >= h => d2 >= d1 + h then: // (x-d1)^2=h^2 => x = d1 + h update = d1.v + h; if (update <= d2.v) { if (update < absV) { value = sign * update; if (acc2) acc2->setValue(ijk, acc2->getValue(d1(ijk)));//update ext? }//update sdf? continue; }// one neighbor case // Test if there is a solution depending on TWO of the neighboring voxels // (x-d1)^2 + (x-d2)^2 = h^2 //D = SdfValueT(2) * h * h - math::Pow2(d1.v - d2.v);// = 2h^2-(d1-d2)^2 //if (D >= SdfValueT(0)) {// non-negative discriminant if (d2.v <= sqrt2h + d1.v) { D = SdfValueT(2) * h * h - math::Pow2(d1.v - d2.v);// = 2h^2-(d1-d2)^2 update = SdfValueT(0.5) * (d1.v + d2.v + std::sqrt(D)); if (update > d2.v && update <= d3.v) { if (update < absV) { value = sign * update; if (acc2) { d1.v -= update; d2.v -= update; // affine combination of two neighboring extension values const SdfValueT w = SdfValueT(1)/(d1.v+d2.v); acc2->setValue(ijk, w*(d1.v*acc2->getValue(d1(ijk)) + d2.v*acc2->getValue(d2(ijk)))); }//update ext? }//update sdf? continue; }//test for two neighbor case }//test for non-negative determinant // Test if there is a solution depending on THREE of the neighboring voxels // (x-d1)^2 + (x-d2)^2 + (x-d3)^2 = h^2 // 3x^2 - 2(d1 + d2 + d3)x + d1^2 + d2^2 + d3^2 = h^2 // ax^2 + bx + c=0, a=3, b=-2(d1+d2+d3), c=d1^2 + d2^2 + d3^2 - h^2 const SdfValueT d123 = d1.v + d2.v + d3.v; D = d123*d123 - SdfValueT(3)*(d1.v*d1.v + d2.v*d2.v + d3.v*d3.v - h * h); if (D >= SdfValueT(0)) {// non-negative discriminant update = SdfValueT(1.0/3.0) * (d123 + std::sqrt(D));//always passes test //if (update > d3.v) {//disabled due to round-off errors if (update < absV) { value = sign * update; if (acc2) { d1.v -= update; d2.v -= update; d3.v -= update; // affine combination of three neighboring extension values const SdfValueT w = SdfValueT(1)/(d1.v+d2.v+d3.v); acc2->setValue(ijk, w*(d1.v*acc2->getValue(d1(ijk)) + d2.v*acc2->getValue(d2(ijk)) + d3.v*acc2->getValue(d3(ijk)))); }//update ext? }//update sdf? }//test for non-negative determinant }//loop over coordinates } }; #ifdef BENCHMARK_FAST_SWEEPING util::CpuTimer timer("Forward sweep"); #endif for (size_t i = 0; i < mVoxelSliceKeys.size(); i++) { voxelSliceIndex = mVoxelSliceKeys[i]; tbb::parallel_for(tbb::blocked_range<size_t>(0, mVoxelSliceMap[voxelSliceIndex].size()), kernel); } #ifdef BENCHMARK_FAST_SWEEPING timer.restart("Backward sweeps"); #endif for (size_t i = mVoxelSliceKeys.size(); i > 0; i--) { voxelSliceIndex = mVoxelSliceKeys[i-1]; tbb::parallel_for(tbb::blocked_range<size_t>(0, mVoxelSliceMap[voxelSliceIndex].size()), kernel); } #ifdef BENCHMARK_FAST_SWEEPING timer.stop(); #endif }// FastSweeping::SweepingKernel::sweep private: using NodeMaskT = typename SweepMaskTreeT::LeafNodeType::NodeMaskType; using NodeMaskPtrT = std::unique_ptr<NodeMaskT>; // using a unique ptr for the NodeMask allows for parallel allocation, // but makes this class not copy-constructible using LeafSlice = std::pair</*leafIdx=*/size_t, /*leafMask=*/NodeMaskPtrT>; using LeafSliceArray = std::deque<LeafSlice>; using VoxelSliceMap = std::map</*voxelSliceKey=*/int64_t, LeafSliceArray>; // Private member data of SweepingKernel FastSweeping *mParent; VoxelSliceMap mVoxelSliceMap; std::vector<int64_t> mVoxelSliceKeys; };// FastSweeping::SweepingKernel //////////////////////////////////////////////////////////////////////////////// template<typename GridT> typename GridT::Ptr fogToSdf(const GridT &fogGrid, typename GridT::ValueType isoValue, int nIter) { FastSweeping<GridT> fs; if (fs.initSdf(fogGrid, isoValue, /*isInputSdf*/false)) fs.sweep(nIter); return fs.sdfGrid(); } template<typename GridT> typename GridT::Ptr sdfToSdf(const GridT &sdfGrid, typename GridT::ValueType isoValue, int nIter) { FastSweeping<GridT> fs; if (fs.initSdf(sdfGrid, isoValue, /*isInputSdf*/true)) fs.sweep(nIter); return fs.sdfGrid(); } template<typename FogGridT, typename ExtOpT, typename ExtValueT> typename FogGridT::template ValueConverter<ExtValueT>::Type::Ptr fogToExt(const FogGridT &fogGrid, const ExtOpT &op, const ExtValueT& background, typename FogGridT::ValueType isoValue, int nIter) { FastSweeping<FogGridT, ExtValueT> fs; if (fs.initExt(fogGrid, op, background, isoValue, /*isInputSdf*/false)) fs.sweep(nIter); return fs.extGrid(); } template<typename SdfGridT, typename OpT, typename ExtValueT> typename SdfGridT::template ValueConverter<ExtValueT>::Type::Ptr sdfToExt(const SdfGridT &sdfGrid, const OpT &op, const ExtValueT &background, typename SdfGridT::ValueType isoValue, int nIter) { FastSweeping<SdfGridT> fs; if (fs.initExt(sdfGrid, op, background, isoValue, /*isInputSdf*/true)) fs.sweep(nIter); return fs.extGrid(); } template<typename FogGridT, typename ExtOpT, typename ExtValueT> std::pair<typename FogGridT::Ptr, typename FogGridT::template ValueConverter<ExtValueT>::Type::Ptr> fogToSdfAndExt(const FogGridT &fogGrid, const ExtOpT &op, const ExtValueT &background, typename FogGridT::ValueType isoValue, int nIter) { FastSweeping<FogGridT, ExtValueT> fs; if (fs.initExt(fogGrid, op, background, isoValue, /*isInputSdf*/false)) fs.sweep(nIter); return std::make_pair(fs.sdfGrid(), fs.extGrid()); } template<typename SdfGridT, typename ExtOpT, typename ExtValueT> std::pair<typename SdfGridT::Ptr, typename SdfGridT::template ValueConverter<ExtValueT>::Type::Ptr> sdfToSdfAndExt(const SdfGridT &sdfGrid, const ExtOpT &op, const ExtValueT &background, typename SdfGridT::ValueType isoValue, int nIter) { FastSweeping<SdfGridT, ExtValueT> fs; if (fs.initExt(sdfGrid, op, background, isoValue, /*isInputSdf*/true)) fs.sweep(nIter); return std::make_pair(fs.sdfGrid(), fs.extGrid()); } template<typename GridT> typename GridT::Ptr dilateSdf(const GridT &sdfGrid, int dilation, NearestNeighbors nn, int nIter) { FastSweeping<GridT> fs; if (fs.initDilate(sdfGrid, dilation, nn)) fs.sweep(nIter); return fs.sdfGrid(); } template<typename GridT, typename MaskTreeT> typename GridT::Ptr maskSdf(const GridT &sdfGrid, const Grid<MaskTreeT> &mask, bool ignoreActiveTiles, int nIter) { FastSweeping<GridT> fs; if (fs.initMask(sdfGrid, mask, ignoreActiveTiles)) fs.sweep(nIter); return fs.sdfGrid(); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_FASTSWEEPING_HAS_BEEN_INCLUDED
70,315
C
43.90166
153
0.613511
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/DenseSparseTools.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TOOLS_DENSESPARSETOOLS_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_DENSESPARSETOOLS_HAS_BEEN_INCLUDED #include <tbb/parallel_reduce.h> #include <tbb/blocked_range3d.h> #include <tbb/blocked_range2d.h> #include <tbb/blocked_range.h> #include <openvdb/Types.h> #include <openvdb/tree/LeafManager.h> #include "Dense.h" #include <algorithm> // for std::min() #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Selectively extract and transform data from a dense grid, producing a /// sparse tree with leaf nodes only (e.g. create a tree from the square /// of values greater than a cutoff.) /// @param dense A dense grid that acts as a data source /// @param functor A functor that selects and transforms data for output /// @param background The background value of the resulting sparse grid /// @param threaded Option to use threaded or serial code path /// @return @c Ptr to tree with the valuetype and configuration defined /// by typedefs in the @c functor. /// @note To achieve optimal sparsity consider calling the prune() /// method on the result. /// @note To simply copy the all the data from a Dense grid to a /// OpenVDB Grid, use tools::copyFromDense() for better performance. /// /// The type of the sparse tree is determined by the specified OtpType /// functor by means of the typedef OptType::ResultTreeType /// /// The OptType function is responsible for the the transformation of /// dense grid data to sparse grid data on a per-voxel basis. /// /// Only leaf nodes with active values will be added to the sparse grid. /// /// The OpType must struct that defines a the minimal form /// @code /// struct ExampleOp /// { /// using ResultTreeType = DesiredTreeType; /// /// template<typename IndexOrCoord> /// void OpType::operator() (const DenseValueType a, const IndexOrCoord& ijk, /// ResultTreeType::LeafNodeType* leaf); /// }; /// @endcode /// /// For example, to generate a <ValueType, 5, 4, 3> tree with valuesOn /// at locations greater than a given maskvalue /// @code /// template<typename ValueType> /// class Rule /// { /// public: /// // Standard tree type (e.g. MaskTree or FloatTree in openvdb.h) /// using ResultTreeType = typename openvdb::tree::Tree4<ValueType, 5, 4, 3>::Type; /// /// using ResultLeafNodeType = typename ResultTreeType::LeafNodeType; /// using ResultValueType = typename ResultTreeType::ValueType; /// /// using DenseValueType = float; /// /// using Index = vdbmath::Coord::ValueType; /// /// Rule(const DenseValueType& value): mMaskValue(value){}; /// /// template<typename IndexOrCoord> /// void operator()(const DenseValueType& a, const IndexOrCoord& offset, /// ResultLeafNodeType* leaf) const /// { /// if (a > mMaskValue) { /// leaf->setValueOn(offset, a); /// } /// } /// /// private: /// const DenseValueType mMaskValue; /// }; /// @endcode template<typename OpType, typename DenseType> typename OpType::ResultTreeType::Ptr extractSparseTree(const DenseType& dense, const OpType& functor, const typename OpType::ResultValueType& background, bool threaded = true); /// This struct that aids template resolution of a new tree type /// has the same configuration at TreeType, but the ValueType from /// DenseType. template<typename DenseType, typename TreeType> struct DSConverter { using ValueType = typename DenseType::ValueType; using Type = typename TreeType::template ValueConverter<ValueType>::Type; }; /// @brief Copy data from the intersection of a sparse tree and a dense input grid. /// The resulting tree has the same configuration as the sparse tree, but holds /// the data type specified by the dense input. /// @param dense A dense grid that acts as a data source /// @param mask The active voxels and tiles intersected with dense define iteration mask /// @param background The background value of the resulting sparse grid /// @param threaded Option to use threaded or serial code path /// @return @c Ptr to tree with the same configuration as @c mask but of value type /// defined by @c dense. template<typename DenseType, typename MaskTreeType> typename DSConverter<DenseType, MaskTreeType>::Type::Ptr extractSparseTreeWithMask(const DenseType& dense, const MaskTreeType& mask, const typename DenseType::ValueType& background, bool threaded = true); /// Apply a point-wise functor to the intersection of a dense grid and a given bounding box /// @param dense A dense grid to be transformed /// @param bbox Index space bounding box, define region where the transformation is applied /// @param op A functor that acts on the dense grid value type /// @param parallel Used to select multithreaded or single threaded /// Minimally, the @c op class has to support a @c operator() method, /// @code /// // Square values in a grid /// struct Op /// { /// ValueT operator()(const ValueT& in) const /// { /// // do work /// ValueT result = in * in; /// /// return result; /// } /// }; /// @endcode /// NB: only Dense grids with memory layout zxy are supported template<typename ValueT, typename OpType> void transformDense(Dense<ValueT, openvdb::tools::LayoutZYX>& dense, const openvdb::CoordBBox& bbox, const OpType& op, bool parallel=true); /// We currrently support the following operations when compositing sparse /// data into a dense grid. enum DSCompositeOp { DS_OVER, DS_ADD, DS_SUB, DS_MIN, DS_MAX, DS_MULT, DS_SET }; /// @brief Composite data from a sparse tree into a dense array of the same value type. /// @param dense Dense grid to be altered by the operation /// @param source Sparse data to composite into @c dense /// @param alpha Sparse Alpha mask used in compositing operations. /// @param beta Constant multiplier on src /// @param strength Constant multiplier on alpha /// @param threaded Enable threading for this operation. template<DSCompositeOp, typename TreeT> void compositeToDense(Dense<typename TreeT::ValueType, LayoutZYX>& dense, const TreeT& source, const TreeT& alpha, const typename TreeT::ValueType beta, const typename TreeT::ValueType strength, bool threaded = true); /// @brief Functor-based class used to extract data that satisfies some /// criteria defined by the embedded @c OpType functor. The @c extractSparseTree /// function wraps this class. template<typename OpType, typename DenseType> class SparseExtractor { public: using Index = openvdb::math::Coord::ValueType; using DenseValueType = typename DenseType::ValueType; using ResultTreeType = typename OpType::ResultTreeType; using ResultValueType = typename ResultTreeType::ValueType; using ResultLeafNodeType = typename ResultTreeType::LeafNodeType; using MaskTree = typename ResultTreeType::template ValueConverter<ValueMask>::Type; using Range3d = tbb::blocked_range3d<Index, Index, Index>; private: const DenseType& mDense; const OpType& mFunctor; const ResultValueType mBackground; const openvdb::math::CoordBBox mBBox; const Index mWidth; typename ResultTreeType::Ptr mMask; openvdb::math::Coord mMin; public: SparseExtractor(const DenseType& dense, const OpType& functor, const ResultValueType background) : mDense(dense), mFunctor(functor), mBackground(background), mBBox(dense.bbox()), mWidth(ResultLeafNodeType::DIM), mMask( new ResultTreeType(mBackground)) {} SparseExtractor(const DenseType& dense, const openvdb::math::CoordBBox& bbox, const OpType& functor, const ResultValueType background) : mDense(dense), mFunctor(functor), mBackground(background), mBBox(bbox), mWidth(ResultLeafNodeType::DIM), mMask( new ResultTreeType(mBackground)) { // mBBox must be inside the coordinate rage of the dense grid if (!dense.bbox().isInside(mBBox)) { OPENVDB_THROW(ValueError, "Data extraction window out of bound"); } } SparseExtractor(SparseExtractor& other, tbb::split): mDense(other.mDense), mFunctor(other.mFunctor), mBackground(other.mBackground), mBBox(other.mBBox), mWidth(other.mWidth), mMask(new ResultTreeType(mBackground)), mMin(other.mMin) {} typename ResultTreeType::Ptr extract(bool threaded = true) { // Construct 3D range of leaf nodes that // intersect mBBox. // Snap the bbox to nearest leaf nodes min and max openvdb::math::Coord padded_min = mBBox.min(); openvdb::math::Coord padded_max = mBBox.max(); padded_min &= ~(mWidth - 1); padded_max &= ~(mWidth - 1); padded_max[0] += mWidth - 1; padded_max[1] += mWidth - 1; padded_max[2] += mWidth - 1; // number of leaf nodes in each direction // division by leaf width, e.g. 8 in most cases const Index xleafCount = ( padded_max.x() - padded_min.x() + 1 ) / mWidth; const Index yleafCount = ( padded_max.y() - padded_min.y() + 1 ) / mWidth; const Index zleafCount = ( padded_max.z() - padded_min.z() + 1 ) / mWidth; mMin = padded_min; Range3d leafRange(0, xleafCount, 1, 0, yleafCount, 1, 0, zleafCount, 1); // Iterate over the leafnodes applying *this as a functor. if (threaded) { tbb::parallel_reduce(leafRange, *this); } else { (*this)(leafRange); } return mMask; } void operator()(const Range3d& range) { ResultLeafNodeType* leaf = nullptr; // Unpack the range3d item. const Index imin = range.pages().begin(); const Index imax = range.pages().end(); const Index jmin = range.rows().begin(); const Index jmax = range.rows().end(); const Index kmin = range.cols().begin(); const Index kmax = range.cols().end(); // loop over all the candidate leafs. Adding only those with 'true' values // to the tree for (Index i = imin; i < imax; ++i) { for (Index j = jmin; j < jmax; ++j) { for (Index k = kmin; k < kmax; ++k) { // Calculate the origin of candidate leaf const openvdb::math::Coord origin = mMin + openvdb::math::Coord(mWidth * i, mWidth * j, mWidth * k ); if (leaf == nullptr) { leaf = new ResultLeafNodeType(origin, mBackground); } else { leaf->setOrigin(origin); leaf->fill(mBackground); leaf->setValuesOff(); } // The bounding box for this leaf openvdb::math::CoordBBox localBBox = leaf->getNodeBoundingBox(); // Shrink to the intersection with mBBox (i.e. the dense // volume) localBBox.intersect(mBBox); // Early out for non-intersecting leafs if (localBBox.empty()) continue; const openvdb::math::Coord start = localBBox.getStart(); const openvdb::math::Coord end = localBBox.getEnd(); // Order the looping to respect the memory layout in // the Dense source if (mDense.memoryLayout() == openvdb::tools::LayoutZYX) { openvdb::math::Coord ijk; Index offset; const DenseValueType* dp; for (ijk[0] = start.x(); ijk[0] < end.x(); ++ijk[0] ) { for (ijk[1] = start.y(); ijk[1] < end.y(); ++ijk[1] ) { for (ijk[2] = start.z(), offset = ResultLeafNodeType::coordToOffset(ijk), dp = &mDense.getValue(ijk); ijk[2] < end.z(); ++ijk[2], ++offset, ++dp) { mFunctor(*dp, offset, leaf); } } } } else { openvdb::math::Coord ijk; const DenseValueType* dp; for (ijk[2] = start.z(); ijk[2] < end.z(); ++ijk[2]) { for (ijk[1] = start.y(); ijk[1] < end.y(); ++ijk[1]) { for (ijk[0] = start.x(), dp = &mDense.getValue(ijk); ijk[0] < end.x(); ++ijk[0], ++dp) { mFunctor(*dp, ijk, leaf); } } } } // Only add non-empty leafs (empty is defined as all inactive) if (!leaf->isEmpty()) { mMask->addLeaf(leaf); leaf = nullptr; } } } } // Clean up an unused leaf. if (leaf != nullptr) delete leaf; } void join(SparseExtractor& rhs) { mMask->merge(*rhs.mMask); } }; // class SparseExtractor template<typename OpType, typename DenseType> typename OpType::ResultTreeType::Ptr extractSparseTree(const DenseType& dense, const OpType& functor, const typename OpType::ResultValueType& background, bool threaded) { // Construct the mask using a parallel reduce pattern. // Each thread computes disjoint mask-trees. The join merges // into a single tree. SparseExtractor<OpType, DenseType> extractor(dense, functor, background); return extractor.extract(threaded); } /// @brief Functor-based class used to extract data from a dense grid, at /// the index-space intersection with a supplied mask in the form of a sparse tree. /// The @c extractSparseTreeWithMask function wraps this class. template<typename DenseType, typename MaskTreeType> class SparseMaskedExtractor { public: using _ResultTreeType = typename DSConverter<DenseType, MaskTreeType>::Type; using ResultTreeType = _ResultTreeType; using ResultLeafNodeType = typename ResultTreeType::LeafNodeType; using ResultValueType = typename ResultTreeType::ValueType; using DenseValueType = ResultValueType; using MaskTree = typename ResultTreeType::template ValueConverter<ValueMask>::Type; using MaskLeafCIter = typename MaskTree::LeafCIter; using MaskLeafVec = std::vector<const typename MaskTree::LeafNodeType*>; SparseMaskedExtractor(const DenseType& dense, const ResultValueType& background, const MaskLeafVec& leafVec ): mDense(dense), mBackground(background), mBBox(dense.bbox()), mLeafVec(leafVec), mResult(new ResultTreeType(mBackground)) {} SparseMaskedExtractor(const SparseMaskedExtractor& other, tbb::split): mDense(other.mDense), mBackground(other.mBackground), mBBox(other.mBBox), mLeafVec(other.mLeafVec), mResult( new ResultTreeType(mBackground)) {} typename ResultTreeType::Ptr extract(bool threaded = true) { tbb::blocked_range<size_t> range(0, mLeafVec.size()); if (threaded) { tbb::parallel_reduce(range, *this); } else { (*this)(range); } return mResult; } // Used in looping over leaf nodes in the masked grid // and using the active mask to select data to void operator()(const tbb::blocked_range<size_t>& range) { ResultLeafNodeType* leaf = nullptr; // loop over all the candidate leafs. Adding only those with 'true' values // to the tree for (size_t idx = range.begin(); idx < range.end(); ++ idx) { const typename MaskTree::LeafNodeType* maskLeaf = mLeafVec[idx]; // The bounding box for this leaf openvdb::math::CoordBBox localBBox = maskLeaf->getNodeBoundingBox(); // Shrink to the intersection with the dense volume localBBox.intersect(mBBox); // Early out if there was no intersection if (localBBox.empty()) continue; // Reset or allocate the target leaf if (leaf == nullptr) { leaf = new ResultLeafNodeType(maskLeaf->origin(), mBackground); } else { leaf->setOrigin(maskLeaf->origin()); leaf->fill(mBackground); leaf->setValuesOff(); } // Iterate over the intersecting bounding box // copying active values to the result tree const openvdb::math::Coord start = localBBox.getStart(); const openvdb::math::Coord end = localBBox.getEnd(); openvdb::math::Coord ijk; if (mDense.memoryLayout() == openvdb::tools::LayoutZYX && maskLeaf->isDense()) { Index offset; const DenseValueType* src; for (ijk[0] = start.x(); ijk[0] < end.x(); ++ijk[0] ) { for (ijk[1] = start.y(); ijk[1] < end.y(); ++ijk[1] ) { for (ijk[2] = start.z(), offset = ResultLeafNodeType::coordToOffset(ijk), src = &mDense.getValue(ijk); ijk[2] < end.z(); ++ijk[2], ++offset, ++src) { // copy into leaf leaf->setValueOn(offset, *src); } } } } else { Index offset; for (ijk[0] = start.x(); ijk[0] < end.x(); ++ijk[0] ) { for (ijk[1] = start.y(); ijk[1] < end.y(); ++ijk[1] ) { for (ijk[2] = start.z(), offset = ResultLeafNodeType::coordToOffset(ijk); ijk[2] < end.z(); ++ijk[2], ++offset) { if (maskLeaf->isValueOn(offset)) { const ResultValueType denseValue = mDense.getValue(ijk); leaf->setValueOn(offset, denseValue); } } } } } // Only add non-empty leafs (empty is defined as all inactive) if (!leaf->isEmpty()) { mResult->addLeaf(leaf); leaf = nullptr; } } // Clean up an unused leaf. if (leaf != nullptr) delete leaf; } void join(SparseMaskedExtractor& rhs) { mResult->merge(*rhs.mResult); } private: const DenseType& mDense; const ResultValueType mBackground; const openvdb::math::CoordBBox& mBBox; const MaskLeafVec& mLeafVec; typename ResultTreeType::Ptr mResult; }; // class SparseMaskedExtractor /// @brief a simple utility class used by @c extractSparseTreeWithMask template<typename _ResultTreeType, typename DenseValueType> struct ExtractAll { using ResultTreeType = _ResultTreeType; using ResultLeafNodeType = typename ResultTreeType::LeafNodeType; template<typename CoordOrIndex> inline void operator()(const DenseValueType& a, const CoordOrIndex& offset, ResultLeafNodeType* leaf) const { leaf->setValueOn(offset, a); } }; template<typename DenseType, typename MaskTreeType> typename DSConverter<DenseType, MaskTreeType>::Type::Ptr extractSparseTreeWithMask(const DenseType& dense, const MaskTreeType& maskProxy, const typename DenseType::ValueType& background, bool threaded) { using LeafExtractor = SparseMaskedExtractor<DenseType, MaskTreeType>; using DenseValueType = typename LeafExtractor::DenseValueType; using ResultTreeType = typename LeafExtractor::ResultTreeType; using MaskLeafVec = typename LeafExtractor::MaskLeafVec; using MaskTree = typename LeafExtractor::MaskTree; using MaskLeafCIter = typename LeafExtractor::MaskLeafCIter; using ExtractionRule = ExtractAll<ResultTreeType, DenseValueType>; // Use Mask tree to hold the topology MaskTree maskTree(maskProxy, false, TopologyCopy()); // Construct an array of pointers to the mask leafs. const size_t leafCount = maskTree.leafCount(); MaskLeafVec leafarray(leafCount); MaskLeafCIter leafiter = maskTree.cbeginLeaf(); for (size_t n = 0; n != leafCount; ++n, ++leafiter) { leafarray[n] = leafiter.getLeaf(); } // Extract the data that is masked leaf nodes in the mask. LeafExtractor leafextractor(dense, background, leafarray); typename ResultTreeType::Ptr resultTree = leafextractor.extract(threaded); // Extract data that is masked by tiles in the mask. // Loop over the mask tiles, extracting the data into new trees. // These trees will be leaf-orthogonal to the leafTree (i.e. no leaf // nodes will overlap). Merge these trees into the result. typename MaskTreeType::ValueOnCIter tileIter(maskProxy); tileIter.setMaxDepth(MaskTreeType::ValueOnCIter::LEAF_DEPTH - 1); // Return the leaf tree if the mask had no tiles if (!tileIter) return resultTree; ExtractionRule allrule; // Loop over the tiles in series, but the actual data extraction // is in parallel. CoordBBox bbox; for ( ; tileIter; ++tileIter) { // Find the intersection of the tile with the dense grid. tileIter.getBoundingBox(bbox); bbox.intersect(dense.bbox()); if (bbox.empty()) continue; SparseExtractor<ExtractionRule, DenseType> copyData(dense, bbox, allrule, background); typename ResultTreeType::Ptr fromTileTree = copyData.extract(threaded); resultTree->merge(*fromTileTree); } return resultTree; } /// @brief Class that applies a functor to the index space intersection /// of a prescribed bounding box and the dense grid. /// NB: This class only supports DenseGrids with ZYX memory layout. template<typename _ValueT, typename OpType> class DenseTransformer { public: using ValueT = _ValueT; using DenseT = Dense<ValueT, openvdb::tools::LayoutZYX>; using IntType = openvdb::math::Coord::ValueType; using RangeType = tbb::blocked_range2d<IntType, IntType>; private: DenseT& mDense; const OpType& mOp; openvdb::math::CoordBBox mBBox; public: DenseTransformer(DenseT& dense, const openvdb::math::CoordBBox& bbox, const OpType& functor): mDense(dense), mOp(functor), mBBox(dense.bbox()) { // The iteration space is the intersection of the // input bbox and the index-space covered by the dense grid mBBox.intersect(bbox); } DenseTransformer(const DenseTransformer& other) : mDense(other.mDense), mOp(other.mOp), mBBox(other.mBBox) {} void apply(bool threaded = true) { // Early out if the iteration space is empty if (mBBox.empty()) return; const openvdb::math::Coord start = mBBox.getStart(); const openvdb::math::Coord end = mBBox.getEnd(); // The iteration range only the slower two directions. const RangeType range(start.x(), end.x(), 1, start.y(), end.y(), 1); if (threaded) { tbb::parallel_for(range, *this); } else { (*this)(range); } } void operator()(const RangeType& range) const { // The stride in the z-direction. // Note: the bbox is [inclusive, inclusive] const size_t zlength = size_t(mBBox.max().z() - mBBox.min().z() + 1); const IntType imin = range.rows().begin(); const IntType imax = range.rows().end(); const IntType jmin = range.cols().begin(); const IntType jmax = range.cols().end(); openvdb::math::Coord xyz(imin, jmin, mBBox.min().z()); for (xyz[0] = imin; xyz[0] != imax; ++xyz[0]) { for (xyz[1] = jmin; xyz[1] != jmax; ++xyz[1]) { mOp.transform(mDense, xyz, zlength); } } } }; // class DenseTransformer /// @brief a wrapper struct used to avoid unnecessary computation of /// memory access from @c Coord when all offsets are guaranteed to be /// within the dense grid. template<typename ValueT, typename PointWiseOp> struct ContiguousOp { ContiguousOp(const PointWiseOp& op) : mOp(op){} using DenseT = Dense<ValueT, openvdb::tools::LayoutZYX>; inline void transform(DenseT& dense, openvdb::math::Coord& ijk, size_t size) const { ValueT* dp = const_cast<ValueT*>(&dense.getValue(ijk)); for (size_t offset = 0; offset < size; ++offset) { dp[offset] = mOp(dp[offset]); } } const PointWiseOp mOp; }; /// Apply a point-wise functor to the intersection of a dense grid and a given bounding box template<typename ValueT, typename PointwiseOpT> void transformDense(Dense<ValueT, openvdb::tools::LayoutZYX>& dense, const openvdb::CoordBBox& bbox, const PointwiseOpT& functor, bool parallel) { using OpT = ContiguousOp<ValueT, PointwiseOpT>; // Convert the Op so it operates on a contiguous line in memory OpT op(functor); // Apply to the index space intersection in the dense grid DenseTransformer<ValueT, OpT> transformer(dense, bbox, op); transformer.apply(parallel); } template<typename CompositeMethod, typename _TreeT> class SparseToDenseCompositor { public: using TreeT = _TreeT; using ValueT = typename TreeT::ValueType; using LeafT = typename TreeT::LeafNodeType; using MaskTreeT = typename TreeT::template ValueConverter<ValueMask>::Type; using MaskLeafT = typename MaskTreeT::LeafNodeType; using DenseT = Dense<ValueT, openvdb::tools::LayoutZYX>; using Index = openvdb::math::Coord::ValueType; using Range3d = tbb::blocked_range3d<Index, Index, Index>; SparseToDenseCompositor(DenseT& dense, const TreeT& source, const TreeT& alpha, const ValueT beta, const ValueT strength) : mDense(dense), mSource(source), mAlpha(alpha), mBeta(beta), mStrength(strength) {} SparseToDenseCompositor(const SparseToDenseCompositor& other): mDense(other.mDense), mSource(other.mSource), mAlpha(other.mAlpha), mBeta(other.mBeta), mStrength(other.mStrength) {} void sparseComposite(bool threaded) { const ValueT beta = mBeta; const ValueT strength = mStrength; // construct a tree that defines the iteration space MaskTreeT maskTree(mSource, false /*background*/, openvdb::TopologyCopy()); maskTree.topologyUnion(mAlpha); // Composite regions that are represented by leafnodes in either mAlpha or mSource // Parallelize over bool-leafs openvdb::tree::LeafManager<const MaskTreeT> maskLeafs(maskTree); maskLeafs.foreach(*this, threaded); // Composite regions that are represented by tiles // Parallelize within each tile. typename MaskTreeT::ValueOnCIter citer = maskTree.cbeginValueOn(); citer.setMaxDepth(MaskTreeT::ValueOnCIter::LEAF_DEPTH - 1); if (!citer) return; typename tree::ValueAccessor<const TreeT> alphaAccessor(mAlpha); typename tree::ValueAccessor<const TreeT> sourceAccessor(mSource); for (; citer; ++citer) { const openvdb::math::Coord org = citer.getCoord(); // Early out if both alpha and source are zero in this tile. const ValueT alphaValue = alphaAccessor.getValue(org); const ValueT sourceValue = sourceAccessor.getValue(org); if (openvdb::math::isZero(alphaValue) && openvdb::math::isZero(sourceValue)) continue; // Compute overlap of tile with the dense grid openvdb::math::CoordBBox localBBox = citer.getBoundingBox(); localBBox.intersect(mDense.bbox()); // Early out if there is no intersection if (localBBox.empty()) continue; // Composite the tile-uniform values into the dense grid. compositeFromTile(mDense, localBBox, sourceValue, alphaValue, beta, strength, threaded); } } // Composites leaf values where the alpha values are active. // Used in sparseComposite void inline operator()(const MaskLeafT& maskLeaf, size_t /*i*/) const { using ULeaf = UniformLeaf; openvdb::math::CoordBBox localBBox = maskLeaf.getNodeBoundingBox(); localBBox.intersect(mDense.bbox()); // Early out for non-overlapping leafs if (localBBox.empty()) return; const openvdb::math::Coord org = maskLeaf.origin(); const LeafT* alphaLeaf = mAlpha.probeLeaf(org); const LeafT* sourceLeaf = mSource.probeLeaf(org); if (!sourceLeaf) { // Create a source leaf proxy with the correct value ULeaf uniformSource(mSource.getValue(org)); if (!alphaLeaf) { // Create an alpha leaf proxy with the correct value ULeaf uniformAlpha(mAlpha.getValue(org)); compositeFromLeaf(mDense, localBBox, uniformSource, uniformAlpha, mBeta, mStrength); } else { compositeFromLeaf(mDense, localBBox, uniformSource, *alphaLeaf, mBeta, mStrength); } } else { if (!alphaLeaf) { // Create an alpha leaf proxy with the correct value ULeaf uniformAlpha(mAlpha.getValue(org)); compositeFromLeaf(mDense, localBBox, *sourceLeaf, uniformAlpha, mBeta, mStrength); } else { compositeFromLeaf(mDense, localBBox, *sourceLeaf, *alphaLeaf, mBeta, mStrength); } } } // i.e. it assumes that all valueOff Alpha voxels have value 0. template<typename LeafT1, typename LeafT2> inline static void compositeFromLeaf(DenseT& dense, const openvdb::math::CoordBBox& bbox, const LeafT1& source, const LeafT2& alpha, const ValueT beta, const ValueT strength) { using IntType = openvdb::math::Coord::ValueType; const ValueT sbeta = strength * beta; openvdb::math::Coord ijk = bbox.min(); if (alpha.isDense() /*all active values*/) { // Optimal path for dense alphaLeaf const IntType size = bbox.max().z() + 1 - bbox.min().z(); for (ijk[0] = bbox.min().x(); ijk[0] < bbox.max().x() + 1; ++ijk[0]) { for (ijk[1] = bbox.min().y(); ijk[1] < bbox.max().y() + 1; ++ijk[1]) { ValueT* d = const_cast<ValueT*>(&dense.getValue(ijk)); const ValueT* a = &alpha.getValue(ijk); const ValueT* s = &source.getValue(ijk); for (IntType idx = 0; idx < size; ++idx) { d[idx] = CompositeMethod::apply(d[idx], a[idx], s[idx], strength, beta, sbeta); } } } } else { // AlphaLeaf has non-active cells. for (ijk[0] = bbox.min().x(); ijk[0] < bbox.max().x() + 1; ++ijk[0]) { for (ijk[1] = bbox.min().y(); ijk[1] < bbox.max().y() + 1; ++ijk[1]) { for (ijk[2] = bbox.min().z(); ijk[2] < bbox.max().z() + 1; ++ijk[2]) { if (alpha.isValueOn(ijk)) { dense.setValue(ijk, CompositeMethod::apply(dense.getValue(ijk), alpha.getValue(ijk), source.getValue(ijk), strength, beta, sbeta)); } } } } } } inline static void compositeFromTile(DenseT& dense, openvdb::math::CoordBBox& bbox, const ValueT& sourceValue, const ValueT& alphaValue, const ValueT& beta, const ValueT& strength, bool threaded) { using TileTransformer = UniformTransformer; TileTransformer functor(sourceValue, alphaValue, beta, strength); // Transform the data inside the bbox according to the TileTranformer. transformDense(dense, bbox, functor, threaded); } void denseComposite(bool threaded) { /// Construct a range that corresponds to the /// bounding box of the dense volume const openvdb::math::CoordBBox& bbox = mDense.bbox(); Range3d range(bbox.min().x(), bbox.max().x(), LeafT::DIM, bbox.min().y(), bbox.max().y(), LeafT::DIM, bbox.min().z(), bbox.max().z(), LeafT::DIM); // Iterate over the range, compositing into // the dense grid using value accessors for // sparse the grids. if (threaded) { tbb::parallel_for(range, *this); } else { (*this)(range); } } // Composites a dense region using value accessors // into a dense grid void operator()(const Range3d& range) const { // Use value accessors to alpha and source typename tree::ValueAccessor<const TreeT> alphaAccessor(mAlpha); typename tree::ValueAccessor<const TreeT> sourceAccessor(mSource); const ValueT strength = mStrength; const ValueT beta = mBeta; const ValueT sbeta = strength * beta; // Unpack the range3d item. const Index imin = range.pages().begin(); const Index imax = range.pages().end(); const Index jmin = range.rows().begin(); const Index jmax = range.rows().end(); const Index kmin = range.cols().begin(); const Index kmax = range.cols().end(); openvdb::Coord ijk; for (ijk[0] = imin; ijk[0] < imax; ++ijk[0]) { for (ijk[1] = jmin; ijk[1] < jmax; ++ijk[1]) { for (ijk[2] = kmin; ijk[2] < kmax; ++ijk[2]) { const ValueT d_old = mDense.getValue(ijk); const ValueT& alpha = alphaAccessor.getValue(ijk); const ValueT& src = sourceAccessor.getValue(ijk); mDense.setValue(ijk, CompositeMethod::apply(d_old, alpha, src, strength, beta, sbeta)); } } } } private: // Internal class that wraps the templated composite method // for use when both alpha and source are uniform over // a prescribed bbox (e.g. a tile). class UniformTransformer { public: UniformTransformer(const ValueT& source, const ValueT& alpha, const ValueT& _beta, const ValueT& _strength) : mSource(source), mAlpha(alpha), mBeta(_beta), mStrength(_strength), mSBeta(_strength * _beta) {} ValueT operator()(const ValueT& input) const { return CompositeMethod::apply(input, mAlpha, mSource, mStrength, mBeta, mSBeta); } private: const ValueT mSource; const ValueT mAlpha; const ValueT mBeta; const ValueT mStrength; const ValueT mSBeta; }; // Simple Class structure that mimics a leaf // with uniform values. Holds LeafT::DIM copies // of a value in an array. struct Line { ValueT mValues[LeafT::DIM]; }; class UniformLeaf : private Line { public: using ValueT = typename LeafT::ValueType; using BaseT = Line; UniformLeaf(const ValueT& value) : BaseT(init(value)) {} static const BaseT init(const ValueT& value) { BaseT tmp; for (openvdb::Index i = 0; i < LeafT::DIM; ++i) { tmp.mValues[i] = value; } return tmp; } bool isDense() const { return true; } bool isValueOn(openvdb::math::Coord&) const { return true; } const ValueT& getValue(const openvdb::math::Coord&) const { return BaseT::mValues[0]; } }; private: DenseT& mDense; const TreeT& mSource; const TreeT& mAlpha; ValueT mBeta; ValueT mStrength; }; // class SparseToDenseCompositor namespace ds { //@{ /// @brief Point wise methods used to apply various compositing operations. template<typename ValueT> struct OpOver { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT strength, const ValueT beta, const ValueT /*sbeta*/) { return (u + strength * alpha * (beta * v - u)); } }; template<typename ValueT> struct OpAdd { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT /*strength*/, const ValueT /*beta*/, const ValueT sbeta) { return (u + sbeta * alpha * v); } }; template<typename ValueT> struct OpSub { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT /*strength*/, const ValueT /*beta*/, const ValueT sbeta) { return (u - sbeta * alpha * v); } }; template<typename ValueT> struct OpMin { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT s /*trength*/, const ValueT beta, const ValueT /*sbeta*/) { return ( ( 1 - s * alpha) * u + s * alpha * std::min(u, beta * v) ); } }; template<typename ValueT> struct OpMax { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT s/*trength*/, const ValueT beta, const ValueT /*sbeta*/) { return ( ( 1 - s * alpha ) * u + s * alpha * std::min(u, beta * v) ); } }; template<typename ValueT> struct OpMult { static inline ValueT apply(const ValueT u, const ValueT alpha, const ValueT v, const ValueT s/*trength*/, const ValueT /*beta*/, const ValueT sbeta) { return ( ( 1 + alpha * (sbeta * v - s)) * u ); } }; //@} //@{ /// Translator that converts an enum to compositing functor types template<DSCompositeOp OP, typename ValueT> struct CompositeFunctorTranslator{}; template<typename ValueT> struct CompositeFunctorTranslator<DS_OVER, ValueT>{ using OpT = OpOver<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_ADD, ValueT>{ using OpT = OpAdd<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_SUB, ValueT>{ using OpT = OpSub<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_MIN, ValueT>{ using OpT = OpMin<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_MAX, ValueT>{ using OpT = OpMax<ValueT>; }; template<typename ValueT> struct CompositeFunctorTranslator<DS_MULT, ValueT>{ using OpT = OpMult<ValueT>; }; //@} } // namespace ds template<DSCompositeOp OpT, typename TreeT> inline void compositeToDense( Dense<typename TreeT::ValueType, LayoutZYX>& dense, const TreeT& source, const TreeT& alpha, const typename TreeT::ValueType beta, const typename TreeT::ValueType strength, bool threaded) { using ValueT = typename TreeT::ValueType; using Translator = ds::CompositeFunctorTranslator<OpT, ValueT>; using Method = typename Translator::OpT; if (openvdb::math::isZero(strength)) return; SparseToDenseCompositor<Method, TreeT> tool(dense, source, alpha, beta, strength); if (openvdb::math::isZero(alpha.background()) && openvdb::math::isZero(source.background())) { // Use the sparsity of (alpha U source) as the iteration space. tool.sparseComposite(threaded); } else { // Use the bounding box of dense as the iteration space. tool.denseComposite(threaded); } } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif //OPENVDB_TOOLS_DENSESPARSETOOLS_HAS_BEEN_INCLUDED
42,215
C
34.386421
99
0.580813
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetSphere.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// /// @file LevelSetSphere.h /// /// @brief Generate a narrow-band level set of sphere. /// /// @note By definition a level set has a fixed narrow band width /// (the half width is defined by LEVEL_SET_HALF_WIDTH in Types.h), /// whereas an SDF can have a variable narrow band width. #ifndef OPENVDB_TOOLS_LEVELSETSPHERE_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVELSETSPHERE_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/math/Math.h> #include <openvdb/util/NullInterrupter.h> #include "SignedFloodFill.h" #include <type_traits> #include <tbb/enumerable_thread_specific.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/blocked_range.h> #include <thread> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a sphere. /// /// @param radius radius of the sphere in world units /// @param center center of the sphere in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// @param threaded if true multi-threading is enabled (true by default) /// /// @note @c GridType::ValueType must be a floating-point scalar. /// @note The leapfrog algorithm employed in this method is best suited /// for a single large sphere. For multiple small spheres consider /// using the faster algorithm in ParticlesToLevelSet.h template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetSphere(float radius, const openvdb::Vec3f& center, float voxelSize, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr, bool threaded = true); /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a sphere. /// /// @param radius radius of the sphere in world units /// @param center center of the sphere in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param threaded if true multi-threading is enabled (true by default) /// /// @note @c GridType::ValueType must be a floating-point scalar. /// @note The leapfrog algorithm employed in this method is best suited /// for a single large sphere. For multiple small spheres consider /// using the faster algorithm in ParticlesToLevelSet.h template<typename GridType> typename GridType::Ptr createLevelSetSphere(float radius, const openvdb::Vec3f& center, float voxelSize, float halfWidth = float(LEVEL_SET_HALF_WIDTH), bool threaded = true) { return createLevelSetSphere<GridType, util::NullInterrupter>(radius,center,voxelSize,halfWidth,nullptr,threaded); } //////////////////////////////////////// /// @brief Generates a signed distance field (or narrow band level /// set) to a single sphere. /// /// @note The leapfrog algorithm employed in this class is best /// suited for a single large sphere. For multiple small spheres consider /// using the faster algorithm in tools/ParticlesToLevelSet.h template<typename GridT, typename InterruptT = util::NullInterrupter> class LevelSetSphere { public: using TreeT = typename GridT::TreeType; using ValueT = typename GridT::ValueType; using Vec3T = typename math::Vec3<ValueT>; static_assert(std::is_floating_point<ValueT>::value, "level set grids must have scalar, floating-point value types"); /// @brief Constructor /// /// @param radius radius of the sphere in world units /// @param center center of the sphere in world units /// @param interrupt pointer to optional interrupter. Use template /// argument util::NullInterrupter if no interruption is desired. /// /// @note If the radius of the sphere is smaller than /// 1.5*voxelSize, i.e. the sphere is smaller than the Nyquist /// frequency of the grid, it is ignored! LevelSetSphere(ValueT radius, const Vec3T &center, InterruptT* interrupt = nullptr) : mRadius(radius), mCenter(center), mInterrupt(interrupt) { if (mRadius<=0) OPENVDB_THROW(ValueError, "radius must be positive"); } /// @return a narrow-band level set of the sphere /// /// @param voxelSize Size of voxels in world units /// @param halfWidth Half-width of narrow-band in voxel units /// @param threaded If true multi-threading is enabled (true by default) typename GridT::Ptr getLevelSet(ValueT voxelSize, ValueT halfWidth, bool threaded = true) { mGrid = createLevelSet<GridT>(voxelSize, halfWidth); this->rasterSphere(voxelSize, halfWidth, threaded); mGrid->setGridClass(GRID_LEVEL_SET); return mGrid; } private: void rasterSphere(ValueT dx, ValueT w, bool threaded) { if (!(dx>0.0f)) OPENVDB_THROW(ValueError, "voxel size must be positive"); if (!(w>1)) OPENVDB_THROW(ValueError, "half-width must be larger than one"); // Define radius of sphere and narrow-band in voxel units const ValueT r0 = mRadius/dx, rmax = r0 + w; // Radius below the Nyquist frequency if (r0 < 1.5f) return; // Define center of sphere in voxel units const Vec3T c(mCenter[0]/dx, mCenter[1]/dx, mCenter[2]/dx); // Define bounds of the voxel coordinates const int imin=math::Floor(c[0]-rmax), imax=math::Ceil(c[0]+rmax); const int jmin=math::Floor(c[1]-rmax), jmax=math::Ceil(c[1]+rmax); const int kmin=math::Floor(c[2]-rmax), kmax=math::Ceil(c[2]+rmax); // Allocate a ValueAccessor for accelerated random access typename GridT::Accessor accessor = mGrid->getAccessor(); if (mInterrupt) mInterrupt->start("Generating level set of sphere"); tbb::enumerable_thread_specific<TreeT> pool(mGrid->tree()); auto kernel = [&](const tbb::blocked_range<int>& r) { openvdb::Coord ijk; int &i = ijk[0], &j = ijk[1], &k = ijk[2], m=1; TreeT &tree = pool.local(); typename GridT::Accessor acc(tree); // Compute signed distances to sphere using leapfrogging in k for (i = r.begin(); i <= r.end(); ++i) { if (util::wasInterrupted(mInterrupt)) return; const auto x2 = math::Pow2(ValueT(i) - c[0]); for (j = jmin; j <= jmax; ++j) { const auto x2y2 = math::Pow2(ValueT(j) - c[1]) + x2; for (k = kmin; k <= kmax; k += m) { m = 1; // Distance in voxel units to sphere const auto v = math::Sqrt(x2y2 + math::Pow2(ValueT(k)-c[2]))-r0; const auto d = math::Abs(v); if (d < w) { // inside narrow band acc.setValue(ijk, dx*v);// distance in world units } else { // outside narrow band m += math::Floor(d-w);// leapfrog } }//end leapfrog over k }//end loop over j }//end loop over i };// kernel if (threaded) { // The code blow is making use of a TLS container to minimize the number of concurrent trees // initially populated by tbb::parallel_for and subsequently merged by tbb::parallel_reduce. // Experiments have demonstrated this approach to outperform others, including serial reduction // and a custom concurrent reduction implementation. tbb::parallel_for(tbb::blocked_range<int>(imin, imax, 128), kernel); using RangeT = tbb::blocked_range<typename tbb::enumerable_thread_specific<TreeT>::iterator>; struct Op { const bool mDelete; TreeT *mTree; Op(TreeT &tree) : mDelete(false), mTree(&tree) {} Op(const Op& other, tbb::split) : mDelete(true), mTree(new TreeT(other.mTree->background())) {} ~Op() { if (mDelete) delete mTree; } void operator()(RangeT &r) { for (auto i=r.begin(); i!=r.end(); ++i) this->merge(*i);} void join(Op &other) { this->merge(*(other.mTree)); } void merge(TreeT &tree) { mTree->merge(tree, openvdb::MERGE_ACTIVE_STATES); } } op( mGrid->tree() ); tbb::parallel_reduce(RangeT(pool.begin(), pool.end(), 4), op); } else { kernel(tbb::blocked_range<int>(imin, imax));//serial mGrid->tree().merge(*pool.begin(), openvdb::MERGE_ACTIVE_STATES); } // Define consistent signed distances outside the narrow-band tools::signedFloodFill(mGrid->tree(), threaded); if (mInterrupt) mInterrupt->end(); } const ValueT mRadius; const Vec3T mCenter; InterruptT* mInterrupt; typename GridT::Ptr mGrid; };// LevelSetSphere //////////////////////////////////////// template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetSphere(float radius, const openvdb::Vec3f& center, float voxelSize, float halfWidth, InterruptT* interrupt, bool threaded) { // GridType::ValueType is required to be a floating-point scalar. static_assert(std::is_floating_point<typename GridType::ValueType>::value, "level set grids must have scalar, floating-point value types"); using ValueT = typename GridType::ValueType; LevelSetSphere<GridType, InterruptT> factory(ValueT(radius), center, interrupt); return factory.getLevelSet(ValueT(voxelSize), ValueT(halfWidth), threaded); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVELSETSPHERE_HAS_BEEN_INCLUDED
10,109
C
42.205128
117
0.640716
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointPartitioner.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file PointPartitioner.h /// /// @brief Spatially partitions points using a parallel radix-based /// sorting algorithm. /// /// @details Performs a stable deterministic sort; partitioning the same /// point sequence will produce the same result each time. /// @details The algorithm is unbounded meaning that points may be /// distributed anywhere in index space. /// @details The actual points are never stored in the tool, only /// offsets into an external array. /// /// @author Mihai Alden #ifndef OPENVDB_TOOLS_POINT_PARTITIONER_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_POINT_PARTITIONER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/task_scheduler_init.h> #include <algorithm> #include <cmath> // for std::isfinite() #include <deque> #include <map> #include <set> #include <utility> // std::pair #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { //////////////////////////////////////// /// @brief Partitions points into @c BucketLog2Dim aligned buckets /// using a parallel radix-based sorting algorithm. /// /// @interface PointArray /// Expected interface for the PointArray container: /// @code /// template<typename VectorType> /// struct PointArray /// { /// // The type used to represent world-space point positions /// using PosType = VectorType; /// /// // Return the number of points in the array /// size_t size() const; /// /// // Return the world-space position of the nth point in the array. /// void getPos(size_t n, PosType& xyz) const; /// }; /// @endcode /// /// @details Performs a stable deterministic sort; partitioning the same /// point sequence will produce the same result each time. /// @details The algorithm is unbounded meaning that points may be /// distributed anywhere in index space. /// @details The actual points are never stored in the tool, only /// offsets into an external array. /// @details @c BucketLog2Dim defines the bucket coordinate dimensions, /// i.e. BucketLog2Dim = 3 corresponds to a bucket that spans /// a (2^3)^3 = 8^3 voxel region. template<typename PointIndexType = uint32_t, Index BucketLog2Dim = 3> class PointPartitioner { public: enum { LOG2DIM = BucketLog2Dim }; using Ptr = SharedPtr<PointPartitioner>; using ConstPtr = SharedPtr<const PointPartitioner>; using IndexType = PointIndexType; static constexpr Index bits = 1 + (3 * BucketLog2Dim); // signed, so if bits is exactly 16, int32 is required using VoxelOffsetType = typename std::conditional<(bits < 16), int16_t, typename std::conditional<(bits < 32), int32_t, int64_t>::type>::type; using VoxelOffsetArray = std::unique_ptr<VoxelOffsetType[]>; class IndexIterator; ////////// PointPartitioner(); /// @brief Partitions point indices into @c BucketLog2Dim aligned buckets. /// /// @param points list of world space points. /// @param xform world to index space transform. /// @param voxelOrder sort point indices by local voxel offsets. /// @param recordVoxelOffsets construct local voxel offsets /// @param cellCenteredTransform toggle the cell-centered interpretation that imagines world /// space as divided into discrete cells (e.g., cubes) centered /// on the image of the index-space lattice points. template<typename PointArray> void construct(const PointArray& points, const math::Transform& xform, bool voxelOrder = false, bool recordVoxelOffsets = false, bool cellCenteredTransform = true); /// @brief Partitions point indices into @c BucketLog2Dim aligned buckets. /// /// @param points list of world space points. /// @param xform world to index space transform. /// @param voxelOrder sort point indices by local voxel offsets. /// @param recordVoxelOffsets construct local voxel offsets /// @param cellCenteredTransform toggle the cell-centered interpretation that imagines world /// space as divided into discrete cells (e.g., cubes) centered /// on the image of the index-space lattice points. template<typename PointArray> static Ptr create(const PointArray& points, const math::Transform& xform, bool voxelOrder = false, bool recordVoxelOffsets = false, bool cellCenteredTransform = true); /// @brief Returns the number of buckets. size_t size() const { return mPageCount; } /// @brief true if the container size is 0, false otherwise. bool empty() const { return mPageCount == 0; } /// @brief Removes all data and frees up memory. void clear(); /// @brief Exchanges the content of the container by another. void swap(PointPartitioner&); /// @brief Returns the point indices for bucket @a n IndexIterator indices(size_t n) const; /// @brief Returns the coordinate-aligned bounding box for bucket @a n CoordBBox getBBox(size_t n) const { return CoordBBox::createCube(mPageCoordinates[n], (1u << BucketLog2Dim)); } /// @brief Returns the origin coordinate for bucket @a n const Coord& origin(size_t n) const { return mPageCoordinates[n]; } /// @brief Returns a list of @c LeafNode voxel offsets for the points. /// @note The list is optionally constructed. const VoxelOffsetArray& voxelOffsets() const { return mVoxelOffsets; } /// @brief Returns @c true if this point partitioning was constructed /// using a cell-centered transform. /// @note Cell-centered interpretation is the default behavior. bool usingCellCenteredTransform() const { return mUsingCellCenteredTransform; } private: // Disallow copying PointPartitioner(const PointPartitioner&); PointPartitioner& operator=(const PointPartitioner&); std::unique_ptr<IndexType[]> mPointIndices; VoxelOffsetArray mVoxelOffsets; std::unique_ptr<IndexType[]> mPageOffsets; std::unique_ptr<Coord[]> mPageCoordinates; IndexType mPageCount; bool mUsingCellCenteredTransform; }; // class PointPartitioner using UInt32PointPartitioner = PointPartitioner<uint32_t, 3>; template<typename PointIndexType, Index BucketLog2Dim> class PointPartitioner<PointIndexType, BucketLog2Dim>::IndexIterator { public: using IndexType = PointIndexType; IndexIterator(IndexType* begin = nullptr, IndexType* end = nullptr) : mBegin(begin), mEnd(end), mItem(begin) {} /// @brief Rewind to first item. void reset() { mItem = mBegin; } /// @brief Number of point indices in the iterator range. size_t size() const { return mEnd - mBegin; } /// @brief Returns the item to which this iterator is currently pointing. IndexType& operator*() { assert(mItem != nullptr); return *mItem; } const IndexType& operator*() const { assert(mItem != nullptr); return *mItem; } /// @brief Return @c true if this iterator is not yet exhausted. operator bool() const { return mItem < mEnd; } bool test() const { return mItem < mEnd; } /// @brief Advance to the next item. IndexIterator& operator++() { assert(this->test()); ++mItem; return *this; } /// @brief Advance to the next item. bool next() { this->operator++(); return this->test(); } bool increment() { this->next(); return this->test(); } /// @brief Equality operators bool operator==(const IndexIterator& other) const { return mItem == other.mItem; } bool operator!=(const IndexIterator& other) const { return !this->operator==(other); } private: IndexType * const mBegin, * const mEnd; IndexType * mItem; }; // class PointPartitioner::IndexIterator //////////////////////////////////////// //////////////////////////////////////// // Implementation details namespace point_partitioner_internal { template<typename PointIndexType> struct ComputePointOrderOp { ComputePointOrderOp(PointIndexType* pointOrder, const PointIndexType* bucketCounters, const PointIndexType* bucketOffsets) : mPointOrder(pointOrder) , mBucketCounters(bucketCounters) , mBucketOffsets(bucketOffsets) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { mPointOrder[n] += mBucketCounters[mBucketOffsets[n]]; } } PointIndexType * const mPointOrder; PointIndexType const * const mBucketCounters; PointIndexType const * const mBucketOffsets; }; // struct ComputePointOrderOp template<typename PointIndexType> struct CreateOrderedPointIndexArrayOp { CreateOrderedPointIndexArrayOp(PointIndexType* orderedIndexArray, const PointIndexType* pointOrder, const PointIndexType* indices) : mOrderedIndexArray(orderedIndexArray) , mPointOrder(pointOrder) , mIndices(indices) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { mOrderedIndexArray[mPointOrder[n]] = mIndices[n]; } } PointIndexType * const mOrderedIndexArray; PointIndexType const * const mPointOrder; PointIndexType const * const mIndices; }; // struct CreateOrderedPointIndexArrayOp template<typename PointIndexType, Index BucketLog2Dim> struct VoxelOrderOp { static constexpr Index bits = 1 + (3 * BucketLog2Dim); // signed, so if bits is exactly 16, int32 is required using VoxelOffsetType = typename std::conditional<(bits < 16), int16_t, typename std::conditional<(bits < 32), int32_t, int64_t>::type>::type; using VoxelOffsetArray = std::unique_ptr<VoxelOffsetType[]>; using IndexArray = std::unique_ptr<PointIndexType[]>; VoxelOrderOp(IndexArray& indices, const IndexArray& pages,const VoxelOffsetArray& offsets) : mIndices(indices.get()) , mPages(pages.get()) , mVoxelOffsets(offsets.get()) { } void operator()(const tbb::blocked_range<size_t>& range) const { PointIndexType pointCount = 0; for (size_t n(range.begin()), N(range.end()); n != N; ++n) { pointCount = std::max(pointCount, (mPages[n + 1] - mPages[n])); } const PointIndexType voxelCount = 1 << (3 * BucketLog2Dim); // allocate histogram buffers std::unique_ptr<VoxelOffsetType[]> offsets(new VoxelOffsetType[pointCount]); std::unique_ptr<PointIndexType[]> sortedIndices(new PointIndexType[pointCount]); std::unique_ptr<PointIndexType[]> histogram(new PointIndexType[voxelCount]); for (size_t n(range.begin()), N(range.end()); n != N; ++n) { PointIndexType * const indices = mIndices + mPages[n]; pointCount = mPages[n + 1] - mPages[n]; // local copy of voxel offsets. for (PointIndexType i = 0; i < pointCount; ++i) { offsets[i] = mVoxelOffsets[ indices[i] ]; } // reset histogram memset(&histogram[0], 0, voxelCount * sizeof(PointIndexType)); // compute histogram for (PointIndexType i = 0; i < pointCount; ++i) { ++histogram[ offsets[i] ]; } PointIndexType count = 0, startOffset; for (int i = 0; i < int(voxelCount); ++i) { if (histogram[i] > 0) { startOffset = count; count += histogram[i]; histogram[i] = startOffset; } } // sort indices based on voxel offset for (PointIndexType i = 0; i < pointCount; ++i) { sortedIndices[ histogram[ offsets[i] ]++ ] = indices[i]; } memcpy(&indices[0], &sortedIndices[0], sizeof(PointIndexType) * pointCount); } } PointIndexType * const mIndices; PointIndexType const * const mPages; VoxelOffsetType const * const mVoxelOffsets; }; // struct VoxelOrderOp //////////////////////////////////////// template<typename T> struct Array { using Ptr = std::unique_ptr<Array>; Array(size_t size) : mSize(size), mData(new T[size]) { } size_t size() const { return mSize; } T* data() { return mData.get(); } const T* data() const { return mData.get(); } void clear() { mSize = 0; mData.reset(); } private: size_t mSize; std::unique_ptr<T[]> mData; }; // struct Array template<typename PointIndexType> struct MoveSegmentDataOp { using SegmentPtr = typename Array<PointIndexType>::Ptr; MoveSegmentDataOp(std::vector<PointIndexType*>& indexLists, SegmentPtr* segments) : mIndexLists(&indexLists[0]), mSegments(segments) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n(range.begin()), N(range.end()); n != N; ++n) { PointIndexType* indices = mIndexLists[n]; SegmentPtr& segment = mSegments[n]; tbb::parallel_for(tbb::blocked_range<size_t>(0, segment->size()), CopyData(indices, segment->data())); segment.reset(); // clear data } } private: struct CopyData { CopyData(PointIndexType* lhs, const PointIndexType* rhs) : mLhs(lhs), mRhs(rhs) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { mLhs[n] = mRhs[n]; } } PointIndexType * const mLhs; PointIndexType const * const mRhs; }; PointIndexType * const * const mIndexLists; SegmentPtr * const mSegments; }; // struct MoveSegmentDataOp template<typename PointIndexType> struct MergeBinsOp { using Segment = Array<PointIndexType>; using SegmentPtr = typename Segment::Ptr; using IndexPair = std::pair<PointIndexType, PointIndexType>; using IndexPairList = std::deque<IndexPair>; using IndexPairListPtr = std::shared_ptr<IndexPairList>; using IndexPairListMap = std::map<Coord, IndexPairListPtr>; using IndexPairListMapPtr = std::shared_ptr<IndexPairListMap>; MergeBinsOp(IndexPairListMapPtr* bins, SegmentPtr* indexSegments, SegmentPtr* offsetSegments, Coord* coords, size_t numSegments) : mBins(bins) , mIndexSegments(indexSegments) , mOffsetSegments(offsetSegments) , mCoords(coords) , mNumSegments(numSegments) { } void operator()(const tbb::blocked_range<size_t>& range) const { std::vector<IndexPairListPtr*> data; std::vector<PointIndexType> arrayOffsets; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const Coord& ijk = mCoords[n]; size_t numIndices = 0; data.clear(); for (size_t i = 0, I = mNumSegments; i < I; ++i) { IndexPairListMap& idxMap = *mBins[i]; typename IndexPairListMap::iterator iter = idxMap.find(ijk); if (iter != idxMap.end() && iter->second) { IndexPairListPtr& idxListPtr = iter->second; data.push_back(&idxListPtr); numIndices += idxListPtr->size(); } } if (data.empty() || numIndices == 0) continue; SegmentPtr& indexSegment = mIndexSegments[n]; SegmentPtr& offsetSegment = mOffsetSegments[n]; indexSegment.reset(new Segment(numIndices)); offsetSegment.reset(new Segment(numIndices)); arrayOffsets.clear(); arrayOffsets.reserve(data.size()); for (size_t i = 0, count = 0, I = data.size(); i < I; ++i) { arrayOffsets.push_back(PointIndexType(count)); count += (*data[i])->size(); } tbb::parallel_for(tbb::blocked_range<size_t>(0, data.size()), CopyData(&data[0], &arrayOffsets[0], indexSegment->data(), offsetSegment->data())); } } private: struct CopyData { CopyData(IndexPairListPtr** indexLists, const PointIndexType* arrayOffsets, PointIndexType* indices, PointIndexType* offsets) : mIndexLists(indexLists) , mArrayOffsets(arrayOffsets) , mIndices(indices) , mOffsets(offsets) { } void operator()(const tbb::blocked_range<size_t>& range) const { using CIter = typename IndexPairList::const_iterator; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { const PointIndexType arrayOffset = mArrayOffsets[n]; PointIndexType* indexPtr = &mIndices[arrayOffset]; PointIndexType* offsetPtr = &mOffsets[arrayOffset]; IndexPairListPtr& list = *mIndexLists[n]; for (CIter it = list->begin(), end = list->end(); it != end; ++it) { const IndexPair& data = *it; *indexPtr++ = data.first; *offsetPtr++ = data.second; } list.reset(); // clear data } } IndexPairListPtr * const * const mIndexLists; PointIndexType const * const mArrayOffsets; PointIndexType * const mIndices; PointIndexType * const mOffsets; }; // struct CopyData IndexPairListMapPtr * const mBins; SegmentPtr * const mIndexSegments; SegmentPtr * const mOffsetSegments; Coord const * const mCoords; size_t const mNumSegments; }; // struct MergeBinsOp template<typename PointArray, typename PointIndexType, typename VoxelOffsetType> struct BinPointIndicesOp { using PosType = typename PointArray::PosType; using IndexPair = std::pair<PointIndexType, PointIndexType>; using IndexPairList = std::deque<IndexPair>; using IndexPairListPtr = std::shared_ptr<IndexPairList>; using IndexPairListMap = std::map<Coord, IndexPairListPtr>; using IndexPairListMapPtr = std::shared_ptr<IndexPairListMap>; BinPointIndicesOp(IndexPairListMapPtr* data, const PointArray& points, VoxelOffsetType* voxelOffsets, const math::Transform& m, Index binLog2Dim, Index bucketLog2Dim, size_t numSegments, bool cellCenteredTransform) : mData(data) , mPoints(&points) , mVoxelOffsets(voxelOffsets) , mXForm(m) , mBinLog2Dim(binLog2Dim) , mBucketLog2Dim(bucketLog2Dim) , mNumSegments(numSegments) , mCellCenteredTransform(cellCenteredTransform) { } void operator()(const tbb::blocked_range<size_t>& range) const { const Index log2dim = mBucketLog2Dim; const Index log2dim2 = 2 * log2dim; const Index bucketMask = (1u << log2dim) - 1u; const Index binLog2dim = mBinLog2Dim; const Index binLog2dim2 = 2 * binLog2dim; const Index binMask = (1u << (log2dim + binLog2dim)) - 1u; const Index invBinMask = ~binMask; IndexPairList * idxList = nullptr; Coord ijk(0, 0, 0), loc(0, 0, 0), binCoord(0, 0, 0), lastBinCoord(1, 2, 3); PosType pos; PointIndexType bucketOffset = 0; VoxelOffsetType voxelOffset = 0; const bool cellCentered = mCellCenteredTransform; const size_t numPoints = mPoints->size(); const size_t segmentSize = numPoints / mNumSegments; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { IndexPairListMapPtr& dataPtr = mData[n]; if (!dataPtr) dataPtr.reset(new IndexPairListMap()); IndexPairListMap& idxMap = *dataPtr; const bool isLastSegment = (n + 1) >= mNumSegments; const size_t start = n * segmentSize; const size_t end = isLastSegment ? numPoints : (start + segmentSize); for (size_t i = start; i != end; ++i) { mPoints->getPos(i, pos); if (std::isfinite(pos[0]) && std::isfinite(pos[1]) && std::isfinite(pos[2])) { ijk = cellCentered ? mXForm.worldToIndexCellCentered(pos) : mXForm.worldToIndexNodeCentered(pos); if (mVoxelOffsets) { loc[0] = ijk[0] & bucketMask; loc[1] = ijk[1] & bucketMask; loc[2] = ijk[2] & bucketMask; voxelOffset = VoxelOffsetType( (loc[0] << log2dim2) + (loc[1] << log2dim) + loc[2]); } binCoord[0] = ijk[0] & invBinMask; binCoord[1] = ijk[1] & invBinMask; binCoord[2] = ijk[2] & invBinMask; ijk[0] &= binMask; ijk[1] &= binMask; ijk[2] &= binMask; ijk[0] >>= log2dim; ijk[1] >>= log2dim; ijk[2] >>= log2dim; bucketOffset = PointIndexType( (ijk[0] << binLog2dim2) + (ijk[1] << binLog2dim) + ijk[2]); if (lastBinCoord != binCoord) { lastBinCoord = binCoord; IndexPairListPtr& idxListPtr = idxMap[lastBinCoord]; if (!idxListPtr) idxListPtr.reset(new IndexPairList()); idxList = idxListPtr.get(); } idxList->push_back(IndexPair(PointIndexType(i), bucketOffset)); if (mVoxelOffsets) mVoxelOffsets[i] = voxelOffset; } } } } IndexPairListMapPtr * const mData; PointArray const * const mPoints; VoxelOffsetType * const mVoxelOffsets; math::Transform const mXForm; Index const mBinLog2Dim; Index const mBucketLog2Dim; size_t const mNumSegments; bool const mCellCenteredTransform; }; // struct BinPointIndicesOp template<typename PointIndexType> struct OrderSegmentsOp { using IndexArray = std::unique_ptr<PointIndexType[]>; using SegmentPtr = typename Array<PointIndexType>::Ptr; OrderSegmentsOp(SegmentPtr* indexSegments, SegmentPtr* offsetSegments, IndexArray* pageOffsetArrays, IndexArray* pageIndexArrays, Index binVolume) : mIndexSegments(indexSegments) , mOffsetSegments(offsetSegments) , mPageOffsetArrays(pageOffsetArrays) , mPageIndexArrays(pageIndexArrays) , mBinVolume(binVolume) { } void operator()(const tbb::blocked_range<size_t>& range) const { const size_t bucketCountersSize = size_t(mBinVolume); IndexArray bucketCounters(new PointIndexType[bucketCountersSize]); size_t maxSegmentSize = 0; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { maxSegmentSize = std::max(maxSegmentSize, mIndexSegments[n]->size()); } IndexArray bucketIndices(new PointIndexType[maxSegmentSize]); for (size_t n = range.begin(), N = range.end(); n != N; ++n) { memset(bucketCounters.get(), 0, sizeof(PointIndexType) * bucketCountersSize); const size_t segmentSize = mOffsetSegments[n]->size(); PointIndexType* offsets = mOffsetSegments[n]->data(); // Count the number of points per bucket and assign a local bucket index // to each point. for (size_t i = 0; i < segmentSize; ++i) { bucketIndices[i] = bucketCounters[offsets[i]]++; } PointIndexType nonemptyBucketCount = 0; for (size_t i = 0; i < bucketCountersSize; ++i) { nonemptyBucketCount += static_cast<PointIndexType>(bucketCounters[i] != 0); } IndexArray& pageOffsets = mPageOffsetArrays[n]; pageOffsets.reset(new PointIndexType[nonemptyBucketCount + 1]); pageOffsets[0] = nonemptyBucketCount + 1; // stores array size in first element IndexArray& pageIndices = mPageIndexArrays[n]; pageIndices.reset(new PointIndexType[nonemptyBucketCount]); // Compute bucket counter prefix sum PointIndexType count = 0, idx = 0; for (size_t i = 0; i < bucketCountersSize; ++i) { if (bucketCounters[i] != 0) { pageIndices[idx] = static_cast<PointIndexType>(i); pageOffsets[idx+1] = bucketCounters[i]; bucketCounters[i] = count; count += pageOffsets[idx+1]; ++idx; } } PointIndexType* indices = mIndexSegments[n]->data(); const tbb::blocked_range<size_t> segmentRange(0, segmentSize); // Compute final point order by incrementing the local bucket point index // with the prefix sum offset. tbb::parallel_for(segmentRange, ComputePointOrderOp<PointIndexType>( bucketIndices.get(), bucketCounters.get(), offsets)); tbb::parallel_for(segmentRange, CreateOrderedPointIndexArrayOp<PointIndexType>( offsets, bucketIndices.get(), indices)); mIndexSegments[n]->clear(); // clear data } } SegmentPtr * const mIndexSegments; SegmentPtr * const mOffsetSegments; IndexArray * const mPageOffsetArrays; IndexArray * const mPageIndexArrays; Index const mBinVolume; }; // struct OrderSegmentsOp //////////////////////////////////////// /// @brief Segment points using one level of least significant digit radix bins. template<typename PointIndexType, typename VoxelOffsetType, typename PointArray> inline void binAndSegment( const PointArray& points, const math::Transform& xform, std::unique_ptr<typename Array<PointIndexType>::Ptr[]>& indexSegments, std::unique_ptr<typename Array<PointIndexType>::Ptr[]>& offsetSegments, std::vector<Coord>& coords, const Index binLog2Dim, const Index bucketLog2Dim, VoxelOffsetType* voxelOffsets = nullptr, bool cellCenteredTransform = true) { using IndexPair = std::pair<PointIndexType, PointIndexType>; using IndexPairList = std::deque<IndexPair>; using IndexPairListPtr = std::shared_ptr<IndexPairList>; using IndexPairListMap = std::map<Coord, IndexPairListPtr>; using IndexPairListMapPtr = std::shared_ptr<IndexPairListMap>; size_t numTasks = 1, numThreads = size_t(tbb::task_scheduler_init::default_num_threads()); if (points.size() > (numThreads * 2)) numTasks = numThreads * 2; else if (points.size() > numThreads) numTasks = numThreads; std::unique_ptr<IndexPairListMapPtr[]> bins(new IndexPairListMapPtr[numTasks]); using BinOp = BinPointIndicesOp<PointArray, PointIndexType, VoxelOffsetType>; tbb::parallel_for(tbb::blocked_range<size_t>(0, numTasks), BinOp(bins.get(), points, voxelOffsets, xform, binLog2Dim, bucketLog2Dim, numTasks, cellCenteredTransform)); std::set<Coord> uniqueCoords; for (size_t i = 0; i < numTasks; ++i) { IndexPairListMap& idxMap = *bins[i]; for (typename IndexPairListMap::iterator it = idxMap.begin(); it != idxMap.end(); ++it) { uniqueCoords.insert(it->first); } } coords.assign(uniqueCoords.begin(), uniqueCoords.end()); uniqueCoords.clear(); size_t segmentCount = coords.size(); using SegmentPtr = typename Array<PointIndexType>::Ptr; indexSegments.reset(new SegmentPtr[segmentCount]); offsetSegments.reset(new SegmentPtr[segmentCount]); using MergeOp = MergeBinsOp<PointIndexType>; tbb::parallel_for(tbb::blocked_range<size_t>(0, segmentCount), MergeOp(bins.get(), indexSegments.get(), offsetSegments.get(), &coords[0], numTasks)); } template<typename PointIndexType, typename VoxelOffsetType, typename PointArray> inline void partition( const PointArray& points, const math::Transform& xform, const Index bucketLog2Dim, std::unique_ptr<PointIndexType[]>& pointIndices, std::unique_ptr<PointIndexType[]>& pageOffsets, std::unique_ptr<Coord[]>& pageCoordinates, PointIndexType& pageCount, std::unique_ptr<VoxelOffsetType[]>& voxelOffsets, bool recordVoxelOffsets, bool cellCenteredTransform) { using SegmentPtr = typename Array<PointIndexType>::Ptr; if (recordVoxelOffsets) voxelOffsets.reset(new VoxelOffsetType[points.size()]); else voxelOffsets.reset(); const Index binLog2Dim = 5u; // note: Bins span a (2^(binLog2Dim + bucketLog2Dim))^3 voxel region, // i.e. bucketLog2Dim = 3 and binLog2Dim = 5 corresponds to a // (2^8)^3 = 256^3 voxel region. std::vector<Coord> segmentCoords; std::unique_ptr<SegmentPtr[]> indexSegments; std::unique_ptr<SegmentPtr[]> offsetSegments; binAndSegment<PointIndexType, VoxelOffsetType, PointArray>(points, xform, indexSegments, offsetSegments, segmentCoords, binLog2Dim, bucketLog2Dim, voxelOffsets.get(), cellCenteredTransform); size_t numSegments = segmentCoords.size(); const tbb::blocked_range<size_t> segmentRange(0, numSegments); using IndexArray = std::unique_ptr<PointIndexType[]>; std::unique_ptr<IndexArray[]> pageOffsetArrays(new IndexArray[numSegments]); std::unique_ptr<IndexArray[]> pageIndexArrays(new IndexArray[numSegments]); const Index binVolume = 1u << (3u * binLog2Dim); tbb::parallel_for(segmentRange, OrderSegmentsOp<PointIndexType> (indexSegments.get(), offsetSegments.get(), pageOffsetArrays.get(), pageIndexArrays.get(), binVolume)); indexSegments.reset(); std::vector<Index> segmentOffsets; segmentOffsets.reserve(numSegments); pageCount = 0; for (size_t n = 0; n < numSegments; ++n) { segmentOffsets.push_back(pageCount); pageCount += pageOffsetArrays[n][0] - 1; } pageOffsets.reset(new PointIndexType[pageCount + 1]); PointIndexType count = 0; for (size_t n = 0, idx = 0; n < numSegments; ++n) { PointIndexType* offsets = pageOffsetArrays[n].get(); size_t size = size_t(offsets[0]); for (size_t i = 1; i < size; ++i) { pageOffsets[idx++] = count; count += offsets[i]; } } pageOffsets[pageCount] = count; pointIndices.reset(new PointIndexType[points.size()]); std::vector<PointIndexType*> indexArray; indexArray.reserve(numSegments); PointIndexType* index = pointIndices.get(); for (size_t n = 0; n < numSegments; ++n) { indexArray.push_back(index); index += offsetSegments[n]->size(); } // compute leaf node origin for each page pageCoordinates.reset(new Coord[pageCount]); tbb::parallel_for(segmentRange, [&](tbb::blocked_range<size_t>& range) { for (size_t n = range.begin(); n < range.end(); n++) { Index segmentOffset = segmentOffsets[n]; PointIndexType* indices = pageIndexArrays[n].get(); const Coord& segmentCoord = segmentCoords[n]; // segment size stored in the first value of the offset array const size_t segmentSize = pageOffsetArrays[n][0] - 1; tbb::blocked_range<size_t> copyRange(0, segmentSize); tbb::parallel_for(copyRange, [&](tbb::blocked_range<size_t>& r) { for (size_t i = r.begin(); i < r.end(); i++) { Index pageIndex = indices[i]; Coord& ijk = pageCoordinates[segmentOffset+i]; ijk[0] = pageIndex >> (2 * binLog2Dim); Index pageIndexModulo = pageIndex - (ijk[0] << (2 * binLog2Dim)); ijk[1] = pageIndexModulo >> binLog2Dim; ijk[2] = pageIndexModulo - (ijk[1] << binLog2Dim); ijk = (ijk << bucketLog2Dim) + segmentCoord; } } ); } } ); // move segment data tbb::parallel_for(segmentRange, MoveSegmentDataOp<PointIndexType>(indexArray, offsetSegments.get())); } } // namespace point_partitioner_internal //////////////////////////////////////// template<typename PointIndexType, Index BucketLog2Dim> inline PointPartitioner<PointIndexType, BucketLog2Dim>::PointPartitioner() : mPointIndices(nullptr) , mVoxelOffsets(nullptr) , mPageOffsets(nullptr) , mPageCoordinates(nullptr) , mPageCount(0) , mUsingCellCenteredTransform(true) { } template<typename PointIndexType, Index BucketLog2Dim> inline void PointPartitioner<PointIndexType, BucketLog2Dim>::clear() { mPageCount = 0; mUsingCellCenteredTransform = true; mPointIndices.reset(); mVoxelOffsets.reset(); mPageOffsets.reset(); mPageCoordinates.reset(); } template<typename PointIndexType, Index BucketLog2Dim> inline void PointPartitioner<PointIndexType, BucketLog2Dim>::swap(PointPartitioner& rhs) { const IndexType tmpLhsPageCount = mPageCount; mPageCount = rhs.mPageCount; rhs.mPageCount = tmpLhsPageCount; mPointIndices.swap(rhs.mPointIndices); mVoxelOffsets.swap(rhs.mVoxelOffsets); mPageOffsets.swap(rhs.mPageOffsets); mPageCoordinates.swap(rhs.mPageCoordinates); bool lhsCellCenteredTransform = mUsingCellCenteredTransform; mUsingCellCenteredTransform = rhs.mUsingCellCenteredTransform; rhs.mUsingCellCenteredTransform = lhsCellCenteredTransform; } template<typename PointIndexType, Index BucketLog2Dim> inline typename PointPartitioner<PointIndexType, BucketLog2Dim>::IndexIterator PointPartitioner<PointIndexType, BucketLog2Dim>::indices(size_t n) const { assert(bool(mPointIndices) && bool(mPageCount)); return IndexIterator( mPointIndices.get() + mPageOffsets[n], mPointIndices.get() + mPageOffsets[n + 1]); } template<typename PointIndexType, Index BucketLog2Dim> template<typename PointArray> inline void PointPartitioner<PointIndexType, BucketLog2Dim>::construct( const PointArray& points, const math::Transform& xform, bool voxelOrder, bool recordVoxelOffsets, bool cellCenteredTransform) { mUsingCellCenteredTransform = cellCenteredTransform; point_partitioner_internal::partition(points, xform, BucketLog2Dim, mPointIndices, mPageOffsets, mPageCoordinates, mPageCount, mVoxelOffsets, (voxelOrder || recordVoxelOffsets), cellCenteredTransform); const tbb::blocked_range<size_t> pageRange(0, mPageCount); if (mVoxelOffsets && voxelOrder) { tbb::parallel_for(pageRange, point_partitioner_internal::VoxelOrderOp< IndexType, BucketLog2Dim>(mPointIndices, mPageOffsets, mVoxelOffsets)); } if (mVoxelOffsets && !recordVoxelOffsets) { mVoxelOffsets.reset(); } } template<typename PointIndexType, Index BucketLog2Dim> template<typename PointArray> inline typename PointPartitioner<PointIndexType, BucketLog2Dim>::Ptr PointPartitioner<PointIndexType, BucketLog2Dim>::create( const PointArray& points, const math::Transform& xform, bool voxelOrder, bool recordVoxelOffsets, bool cellCenteredTransform) { Ptr ret(new PointPartitioner()); ret->construct(points, xform, voxelOrder, recordVoxelOffsets, cellCenteredTransform); return ret; } //////////////////////////////////////// } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_POINT_PARTITIONER_HAS_BEEN_INCLUDED
36,569
C
33.828571
99
0.619213
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/LevelSetPlatonic.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Ken Museth /// /// @file LevelSetPlatonic.h /// /// @brief Generate a narrow-band level sets of the five platonic solids. /// /// @note By definition a level set has a fixed narrow band width /// (the half width is defined by LEVEL_SET_HALF_WIDTH in Types.h), /// whereas an SDF can have a variable narrow band width. #ifndef OPENVDB_TOOLS_LEVELSETPLATONIC_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_LEVELSETPLATONIC_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/math/Math.h> #include <openvdb/math/Transform.h> #include <openvdb/tools/MeshToVolume.h> #include <openvdb/util/NullInterrupter.h> #include <type_traits> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a platonic solid. /// /// @param faceCount number of faces of the platonic solid, i.e. 4, 6, 8, 12 or 20 /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @details Faces: TETRAHEDRON=4, CUBE=6, OCTAHEDRON=8, DODECAHEDRON=12, ICOSAHEDRON=20 /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetPlatonic( int faceCount, // 4, 6, 8, 12 or 20 float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr); /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a platonic solid. /// /// @param faceCount number of faces of the platonic solid, i.e. 4, 6, 8, 12 or 20 /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @details Faces: TETRAHEDRON=4, CUBE=6, OCTAHEDRON=8, DODECAHEDRON=12, ICOSAHEDRON=20 /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetPlatonic( int faceCount,// 4, 6, 8, 12 or 20 float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(faceCount, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a tetrahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetTetrahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType, InterruptT>( 4, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a tetrahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetTetrahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(4, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a cube. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetCube( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType>(6, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a cube. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetCube( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(6, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of an octahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetOctahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType>(8, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of an octahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetOctahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(8, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a dodecahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetDodecahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType>(12, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of a dodecahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetDodecahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(12, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of an icosahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// @param interrupt a pointer adhering to the util::NullInterrupter interface /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetIcosahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH), InterruptT* interrupt = nullptr) { return createLevelSetPlatonic<GridType>(20, scale, center, voxelSize, halfWidth, interrupt); } /// @brief Return a grid of type @c GridType containing a narrow-band level set /// representation of an icosahedron. /// /// @param scale scale of the platonic solid in world units /// @param center center of the platonic solid in world units /// @param voxelSize voxel size in world units /// @param halfWidth half the width of the narrow band, in voxel units /// /// @note @c GridType::ValueType must be a floating-point scalar. template<typename GridType> typename GridType::Ptr createLevelSetIcosahedron( float scale = 1.0f, const Vec3f& center = Vec3f(0.0f), float voxelSize = 0.1f, float halfWidth = float(LEVEL_SET_HALF_WIDTH)) { util::NullInterrupter tmp; return createLevelSetPlatonic<GridType>(20, scale, center, voxelSize, halfWidth, &tmp); } //////////////////////////////////////////////////////////////////////////////// template<typename GridType, typename InterruptT> typename GridType::Ptr createLevelSetPlatonic(int faceCount,float scale, const Vec3f& center, float voxelSize, float halfWidth, InterruptT *interrupt) { // GridType::ValueType is required to be a floating-point scalar. static_assert(std::is_floating_point<typename GridType::ValueType>::value, "level set grids must have scalar, floating-point value types"); const math::Transform::Ptr xform = math::Transform::createLinearTransform( voxelSize ); std::vector<Vec3f> vtx; std::vector<Vec3I> tri; std::vector<Vec4I> qua; if (faceCount == 4) {// Tetrahedron vtx.push_back( Vec3f( 0.0f, 1.0f, 0.0f) ); vtx.push_back( Vec3f(-0.942810297f, -0.333329707f, 0.0f) ); vtx.push_back( Vec3f( 0.471405149f, -0.333329707f, 0.816497624f) ); vtx.push_back( Vec3f( 0.471405149f, -0.333329707f, -0.816497624f) ); tri.push_back( Vec3I(0, 2, 3) ); tri.push_back( Vec3I(0, 3, 1) ); tri.push_back( Vec3I(0, 1, 2) ); tri.push_back( Vec3I(1, 3, 2) ); } else if (faceCount == 6) {// Cube vtx.push_back( Vec3f(-0.5f, -0.5f, -0.5f) ); vtx.push_back( Vec3f( 0.5f, -0.5f, -0.5f) ); vtx.push_back( Vec3f( 0.5f, -0.5f, 0.5f) ); vtx.push_back( Vec3f(-0.5f, -0.5f, 0.5f) ); vtx.push_back( Vec3f(-0.5f, 0.5f, -0.5f) ); vtx.push_back( Vec3f( 0.5f, 0.5f, -0.5f) ); vtx.push_back( Vec3f( 0.5f, 0.5f, 0.5f) ); vtx.push_back( Vec3f(-0.5f, 0.5f, 0.5f) ); qua.push_back( Vec4I(1, 0, 4, 5) ); qua.push_back( Vec4I(2, 1, 5, 6) ); qua.push_back( Vec4I(3, 2, 6, 7) ); qua.push_back( Vec4I(0, 3, 7, 4) ); qua.push_back( Vec4I(2, 3, 0, 1) ); qua.push_back( Vec4I(5, 4, 7, 6) ); } else if (faceCount == 8) {// Octahedron vtx.push_back( Vec3f( 0.0f, 0.0f, -1.0f) ); vtx.push_back( Vec3f( 1.0f, 0.0f, 0.0f) ); vtx.push_back( Vec3f( 0.0f, 0.0f, 1.0f) ); vtx.push_back( Vec3f(-1.0f, 0.0f, 0.0f) ); vtx.push_back( Vec3f( 0.0f,-1.0f, 0.0f) ); vtx.push_back( Vec3f( 0.0f, 1.0f, 0.0f) ); tri.push_back( Vec3I(0, 4, 3) ); tri.push_back( Vec3I(0, 1, 4) ); tri.push_back( Vec3I(1, 2, 4) ); tri.push_back( Vec3I(2, 3, 4) ); tri.push_back( Vec3I(0, 3, 5) ); tri.push_back( Vec3I(0, 5, 1) ); tri.push_back( Vec3I(1, 5, 2) ); tri.push_back( Vec3I(2, 5, 3) ); } else if (faceCount == 12) {// Dodecahedron vtx.push_back( Vec3f( 0.354437858f, 0.487842113f, -0.789344311f) ); vtx.push_back( Vec3f( 0.573492587f, -0.186338872f, -0.78934437f) ); vtx.push_back( Vec3f( 0.0f, -0.603005826f, -0.78934443f) ); vtx.push_back( Vec3f(-0.573492587f, -0.186338872f, -0.78934437f) ); vtx.push_back( Vec3f(-0.354437858f, 0.487842113f, -0.789344311f) ); vtx.push_back( Vec3f(-0.573492587f, 0.789345026f, -0.186338797f) ); vtx.push_back( Vec3f(-0.927930415f, -0.301502913f, -0.186338872f) ); vtx.push_back( Vec3f( 0.0f, -0.975683928f, -0.186338902f) ); vtx.push_back( Vec3f( 0.927930415f, -0.301502913f, -0.186338872f) ); vtx.push_back( Vec3f( 0.573492587f, 0.789345026f, -0.186338797f) ); vtx.push_back( Vec3f( 0.0f, 0.975683868f, 0.186338902f) ); vtx.push_back( Vec3f(-0.927930415f, 0.301502913f, 0.186338872f) ); vtx.push_back( Vec3f(-0.573492587f, -0.789345026f, 0.186338797f) ); vtx.push_back( Vec3f( 0.573492587f, -0.789345026f, 0.186338797f) ); vtx.push_back( Vec3f( 0.927930415f, 0.301502913f, 0.186338872f) ); vtx.push_back( Vec3f( 0.0f, 0.603005826f, 0.78934443f) ); vtx.push_back( Vec3f( 0.573492587f, 0.186338872f, 0.78934437f) ); vtx.push_back( Vec3f( 0.354437858f, -0.487842113f, 0.789344311f) ); vtx.push_back( Vec3f(-0.354437858f, -0.487842113f, 0.789344311f) ); vtx.push_back( Vec3f(-0.573492587f, 0.186338872f, 0.78934437f) ); qua.push_back( Vec4I(0, 1, 2, 3) ); tri.push_back( Vec3I(0, 3, 4) ); qua.push_back( Vec4I(0, 4, 5, 10) ); tri.push_back( Vec3I(0, 10, 9) ); qua.push_back( Vec4I(0, 9, 14, 8) ); tri.push_back( Vec3I(0, 8, 1) ); qua.push_back( Vec4I(1, 8, 13, 7) ); tri.push_back( Vec3I(1, 7, 2) ); qua.push_back( Vec4I(2, 7, 12, 6) ); tri.push_back( Vec3I(2, 6, 3) ); qua.push_back( Vec4I(3, 6, 11, 5) ); tri.push_back( Vec3I(3, 5, 4) ); qua.push_back( Vec4I(5, 11, 19, 15) ); tri.push_back( Vec3I(5, 15, 10) ); qua.push_back( Vec4I(6, 12, 18, 19) ); tri.push_back( Vec3I(6, 19, 11) ); qua.push_back( Vec4I(7, 13, 17, 18) ); tri.push_back( Vec3I(7, 18, 12) ); qua.push_back( Vec4I(8, 14, 16, 17) ); tri.push_back( Vec3I(8, 17, 13) ); qua.push_back( Vec4I(9, 10, 15, 16) ); tri.push_back( Vec3I(9, 16, 14) ); qua.push_back( Vec4I(15, 19, 18, 17) ); tri.push_back( Vec3I(15, 17, 16) ); } else if (faceCount == 20) {// Icosahedron vtx.push_back( Vec3f(0.0f, 0.0f, -1.0f) ); vtx.push_back( Vec3f(0.0f, 0.894427359f, -0.447213143f) ); vtx.push_back( Vec3f(0.850650847f, 0.276393682f, -0.447213203f) ); vtx.push_back( Vec3f(0.525731206f, -0.723606944f, -0.447213262f) ); vtx.push_back( Vec3f(-0.525731206f, -0.723606944f, -0.447213262f) ); vtx.push_back( Vec3f(-0.850650847f, 0.276393682f, -0.447213203f) ); vtx.push_back( Vec3f(-0.525731206f, 0.723606944f, 0.447213262f) ); vtx.push_back( Vec3f(-0.850650847f, -0.276393682f, 0.447213203f) ); vtx.push_back( Vec3f(0.0f, -0.894427359f, 0.447213143f) ); vtx.push_back( Vec3f(0.850650847f, -0.276393682f, 0.447213203f) ); vtx.push_back( Vec3f(0.525731206f, 0.723606944f, 0.447213262f) ); vtx.push_back( Vec3f(0.0f, 0.0f, 1.0f) ); tri.push_back( Vec3I( 2, 0, 1) ); tri.push_back( Vec3I( 3, 0, 2) ); tri.push_back( Vec3I( 4, 0, 3) ); tri.push_back( Vec3I( 5, 0, 4) ); tri.push_back( Vec3I( 1, 0, 5) ); tri.push_back( Vec3I( 6, 1, 5) ); tri.push_back( Vec3I( 7, 5, 4) ); tri.push_back( Vec3I( 8, 4, 3) ); tri.push_back( Vec3I( 9, 3, 2) ); tri.push_back( Vec3I(10, 2, 1) ); tri.push_back( Vec3I(10, 1, 6) ); tri.push_back( Vec3I( 6, 5, 7) ); tri.push_back( Vec3I( 7, 4, 8) ); tri.push_back( Vec3I( 8, 3, 9) ); tri.push_back( Vec3I( 9, 2, 10) ); tri.push_back( Vec3I( 6, 11, 10) ); tri.push_back( Vec3I(10, 11, 9) ); tri.push_back( Vec3I( 9, 11, 8) ); tri.push_back( Vec3I( 8, 11, 7) ); tri.push_back( Vec3I( 7, 11, 6) ); } else { OPENVDB_THROW(RuntimeError, "Invalid face count"); } // Apply scale and translation to all the vertices for ( size_t i = 0; i<vtx.size(); ++i ) vtx[i] = scale * vtx[i] + center; typename GridType::Ptr grid; if (interrupt == nullptr) { util::NullInterrupter tmp; grid = meshToLevelSet<GridType>(tmp, *xform, vtx, tri, qua, halfWidth); } else { grid = meshToLevelSet<GridType>(*interrupt, *xform, vtx, tri, qua, halfWidth); } return grid; } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_LEVELSETPLATONIC_HAS_BEEN_INCLUDED
19,566
C
39.849687
98
0.634417
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/PointsToMask.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Ken Museth /// /// @file tools/PointsToMask.h /// /// @brief This tool produces a grid where every voxel that contains a /// point is active. It employes thread-local storage for best performance. /// /// The @c PointListT template argument below refers to any class /// with the following interface (see unittest/TestPointsToMask.cc /// and SOP_OpenVDB_From_Particles.cc for practical examples): /// @code /// /// class PointList { /// ... /// public: /// /// // Return the total number of particles in list. /// size_t size() const; /// /// // Get the world space position of the nth particle. /// void getPos(size_t n, Vec3R& xyz) const; /// }; /// @endcode /// /// @note See unittest/TestPointsToMask.cc for an example. /// /// The @c InterruptT template argument below refers to any class /// with the following interface: /// @code /// class Interrupter { /// ... /// public: /// void start(const char* name = nullptr) // called when computations begin /// void end() // called when computations end /// bool wasInterrupted(int percent = -1) // return true to break computation /// }; /// @endcode /// /// @note If no template argument is provided for this InterruptT /// the util::NullInterrupter is used which implies that all /// interrupter calls are no-ops (i.e. incurs no computational overhead). #ifndef OPENVDB_TOOLS_POINTSTOMASK_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_POINTSTOMASK_HAS_BEEN_INCLUDED #include <tbb/enumerable_thread_specific.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/blocked_range.h> #include <openvdb/openvdb.h> // for MaskGrid #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/util/NullInterrupter.h> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { // Forward declaration of main class template<typename GridT = MaskGrid, typename InterrupterT = util::NullInterrupter> class PointsToMask; /// @brief Makes every voxel of the @c grid active if it contains a point. /// /// @param points points that active the voxels of @c grid /// @param grid on out its voxels with points are active template<typename PointListT, typename GridT> inline void maskPoints(const PointListT& points, GridT& grid) { PointsToMask<GridT, util::NullInterrupter> tmp(grid, nullptr); tmp.addPoints(points); } /// @brief Return a MaskGrid where each binary voxel value /// is on if the voxel contains one (or more) points (i.e. /// the 3D position of a point is closer to this voxel than /// any other voxels). /// /// @param points points that active the voxels in the returned grid. /// @param xform transform from world space to voxels in grid space. template<typename PointListT> inline MaskGrid::Ptr createPointMask(const PointListT& points, const math::Transform& xform) { MaskGrid::Ptr grid = createGrid<MaskGrid>( false ); grid->setTransform( xform.copy() ); maskPoints( points, *grid ); return grid; } //////////////////////////////////////// /// @brief Makes every voxel of a grid active if it contains a point. template<typename GridT, typename InterrupterT> class PointsToMask { public: using ValueT = typename GridT::ValueType; /// @brief Constructor from a grid and optional interrupter /// /// @param grid Grid whoes voxels will have their state activated by points. /// @param interrupter Optional interrupter to prematurely terminate execution. explicit PointsToMask(GridT& grid, InterrupterT* interrupter = nullptr) : mGrid(&grid) , mInterrupter(interrupter) { } /// @brief Activates the state of any voxel in the input grid that contains a point. /// /// @param points List of points that active the voxels in the input grid. /// @param grainSize Set the grain-size used for multi-threading. A value of 0 /// disables multi-threading! template<typename PointListT> void addPoints(const PointListT& points, size_t grainSize = 1024) { if (mInterrupter) mInterrupter->start("PointsToMask: adding points"); if (grainSize > 0) { typename GridT::Ptr examplar = mGrid->copyWithNewTree(); PoolType pool( *examplar );//thread local storage pool of grids AddPoints<PointListT> tmp(points, pool, grainSize, *this ); if ( this->interrupt() ) return; ReducePool reducePool(pool, mGrid, size_t(0)); } else { const math::Transform& xform = mGrid->transform(); typename GridT::Accessor acc = mGrid->getAccessor(); Vec3R wPos; for (size_t i = 0, n = points.size(); i < n; ++i) { if ( this->interrupt() ) break; points.getPos(i, wPos); acc.setValueOn( xform.worldToIndexCellCentered( wPos ) ); } } if (mInterrupter) mInterrupter->end(); } private: // Disallow copy construction and copy by assignment! PointsToMask(const PointsToMask&);// not implemented PointsToMask& operator=(const PointsToMask&);// not implemented bool interrupt() const { if (mInterrupter && util::wasInterrupted(mInterrupter)) { tbb::task::self().cancel_group_execution(); return true; } return false; } // Private struct that implements concurrent thread-local // insersion of points into a grid using PoolType = tbb::enumerable_thread_specific<GridT>; template<typename PointListT> struct AddPoints; // Private class that implements concurrent reduction of a thread-local pool struct ReducePool; GridT* mGrid; InterrupterT* mInterrupter; };// PointsToMask // Private member class that implements concurrent thread-local // insersion of points into a grid template<typename GridT, typename InterrupterT> template<typename PointListT> struct PointsToMask<GridT, InterrupterT>::AddPoints { AddPoints(const PointListT& points, PoolType& pool, size_t grainSize, const PointsToMask& parent) : mPoints(&points) , mParent(&parent) , mPool(&pool) { tbb::parallel_for(tbb::blocked_range<size_t>(0, mPoints->size(), grainSize), *this); } void operator()(const tbb::blocked_range<size_t>& range) const { if (mParent->interrupt()) return; GridT& grid = mPool->local(); const math::Transform& xform = grid.transform(); typename GridT::Accessor acc = grid.getAccessor(); Vec3R wPos; for (size_t i=range.begin(), n=range.end(); i!=n; ++i) { mPoints->getPos(i, wPos); acc.setValueOn( xform.worldToIndexCellCentered( wPos ) ); } } const PointListT* mPoints; const PointsToMask* mParent; PoolType* mPool; };// end of private member class AddPoints // Private member class that implements concurrent reduction of a thread-local pool template<typename GridT, typename InterrupterT> struct PointsToMask<GridT, InterrupterT>::ReducePool { using VecT = std::vector<GridT*>; using IterT = typename VecT::iterator; using RangeT = tbb::blocked_range<IterT>; ReducePool(PoolType& pool, GridT* grid, size_t grainSize = 1) : mOwnsGrid(false) , mGrid(grid) { if (grainSize == 0) { for (typename PoolType::const_iterator i = pool.begin(); i != pool.end(); ++i) { mGrid->topologyUnion(*i); } } else { VecT grids( pool.size() ); typename PoolType::iterator i = pool.begin(); for (size_t j=0; j != pool.size(); ++i, ++j) grids[j] = &(*i); tbb::parallel_reduce( RangeT( grids.begin(), grids.end(), grainSize ), *this ); } } ReducePool(const ReducePool&, tbb::split) : mOwnsGrid(true) , mGrid(new GridT()) { } ~ReducePool() { if (mOwnsGrid) delete mGrid; } void operator()(const RangeT& r) { for (IterT i=r.begin(); i!=r.end(); ++i) mGrid->topologyUnion( *(*i) ); } void join(ReducePool& other) { mGrid->topologyUnion(*other.mGrid); } const bool mOwnsGrid; GridT* mGrid; };// end of private member class ReducePool } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_POINTSTOMASK_HAS_BEEN_INCLUDED
8,594
C
32.972332
92
0.646847
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/VelocityFields.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /////////////////////////////////////////////////////////////////////////// // /// @author Ken Museth /// /// @file VelocityFields.h /// /// @brief Defines two simple wrapper classes for advection velocity /// fields as well as VelocitySampler and VelocityIntegrator /// /// /// @details DiscreteField wraps a velocity grid and EnrightField is mostly /// intended for debugging (it's an analytical divergence free and /// periodic field). They both share the same API required by the /// LevelSetAdvection class defined in LevelSetAdvect.h. Thus, any /// class with this API should work with LevelSetAdvection. /// /// @warning Note the Field wrapper classes below always assume the velocity /// is represented in the world-frame of reference. For DiscreteField /// this implies the input grid must contain velocities in world /// coordinates. #ifndef OPENVDB_TOOLS_VELOCITY_FIELDS_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_VELOCITY_FIELDS_HAS_BEEN_INCLUDED #include <tbb/parallel_reduce.h> #include <openvdb/Platform.h> #include <openvdb/openvdb.h> #include "Interpolation.h" // for Sampler, etc. #include <openvdb/math/FiniteDifference.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Thin wrapper class for a velocity grid /// @note Consider replacing BoxSampler with StaggeredBoxSampler template <typename VelGridT, typename Interpolator = BoxSampler> class DiscreteField { public: typedef typename VelGridT::ValueType VectorType; typedef typename VectorType::ValueType ValueType; static_assert(std::is_floating_point<ValueType>::value, "DiscreteField requires a floating point grid."); DiscreteField(const VelGridT &vel) : mAccessor(vel.tree()) , mTransform(&vel.transform()) { } /// @brief Copy constructor DiscreteField(const DiscreteField& other) : mAccessor(other.mAccessor.tree()) , mTransform(other.mTransform) { } /// @return const reference to the transform between world and index space /// @note Use this method to determine if a client grid is /// aligned with the coordinate space of the velocity grid. const math::Transform& transform() const { return *mTransform; } /// @return the interpolated velocity at the world space position xyz /// /// @warning Not threadsafe since it uses a ValueAccessor! So use /// one instance per thread (which is fine since its lightweight). inline VectorType operator() (const Vec3d& xyz, ValueType/*dummy time*/) const { return Interpolator::sample(mAccessor, mTransform->worldToIndex(xyz)); } /// @return the velocity at the coordinate space position ijk /// /// @warning Not threadsafe since it uses a ValueAccessor! So use /// one instance per thread (which is fine since its lightweight). inline VectorType operator() (const Coord& ijk, ValueType/*dummy time*/) const { return mAccessor.getValue(ijk); } private: const typename VelGridT::ConstAccessor mAccessor;//Not thread-safe const math::Transform* mTransform; }; // end of DiscreteField /////////////////////////////////////////////////////////////////////// /// @brief Analytical, divergence-free and periodic velocity field /// @note Primarily intended for debugging! /// @warning This analytical velocity only produce meaningful values /// in the unit box in world space. In other words make sure any level /// set surface is fully enclosed in the axis aligned bounding box /// spanning 0->1 in world units. template <typename ScalarT = float> class EnrightField { public: typedef ScalarT ValueType; typedef math::Vec3<ScalarT> VectorType; static_assert(std::is_floating_point<ScalarT>::value, "EnrightField requires a floating point grid."); EnrightField() {} /// @return const reference to the identity transform between world and index space /// @note Use this method to determine if a client grid is /// aligned with the coordinate space of this velocity field math::Transform transform() const { return math::Transform(); } /// @return the velocity in world units, evaluated at the world /// position xyz and at the specified time inline VectorType operator() (const Vec3d& xyz, ValueType time) const; /// @return the velocity at the coordinate space position ijk inline VectorType operator() (const Coord& ijk, ValueType time) const { return (*this)(ijk.asVec3d(), time); } }; // end of EnrightField template <typename ScalarT> inline math::Vec3<ScalarT> EnrightField<ScalarT>::operator() (const Vec3d& xyz, ValueType time) const { const ScalarT pi = math::pi<ScalarT>(); const ScalarT phase = pi / ScalarT(3); const ScalarT Px = pi * ScalarT(xyz[0]), Py = pi * ScalarT(xyz[1]), Pz = pi * ScalarT(xyz[2]); const ScalarT tr = math::Cos(ScalarT(time) * phase); const ScalarT a = math::Sin(ScalarT(2)*Py); const ScalarT b = -math::Sin(ScalarT(2)*Px); const ScalarT c = math::Sin(ScalarT(2)*Pz); return math::Vec3<ScalarT>( tr * ( ScalarT(2) * math::Pow2(math::Sin(Px)) * a * c ), tr * ( b * math::Pow2(math::Sin(Py)) * c ), tr * ( b * a * math::Pow2(math::Sin(Pz)) )); } /////////////////////////////////////////////////////////////////////// /// Class to hold a Vec3 field interpreted as a velocity field. /// Primarily exists to provide a method(s) that integrate a passive /// point forward in the velocity field for a single time-step (dt) template<typename GridT = Vec3fGrid, bool Staggered = false, size_t Order = 1> class VelocitySampler { public: typedef typename GridT::ConstAccessor AccessorType; typedef typename GridT::ValueType ValueType; /// @brief Constructor from a grid VelocitySampler(const GridT& grid): mGrid(&grid), mAcc(grid.getAccessor()) { } /// @brief Copy-constructor VelocitySampler(const VelocitySampler& other): mGrid(other.mGrid), mAcc(mGrid->getAccessor()) { } /// @brief Samples the velocity at world position onto result. Supports both /// staggered (i.e. MAC) and collocated velocity grids. /// /// @return @c true if any one of the sampled values is active. /// /// @warning Not threadsafe since it uses a ValueAccessor! So use /// one instance per thread (which is fine since its lightweight). template <typename LocationType> inline bool sample(const LocationType& world, ValueType& result) const { const Vec3R xyz = mGrid->worldToIndex(Vec3R(world[0], world[1], world[2])); bool active = Sampler<Order, Staggered>::sample(mAcc, xyz, result); return active; } /// @brief Samples the velocity at world position onto result. Supports both /// staggered (i.e. MAC) and co-located velocity grids. /// /// @warning Not threadsafe since it uses a ValueAccessor! So use /// one instance per thread (which is fine since its lightweight). template <typename LocationType> inline ValueType sample(const LocationType& world) const { const Vec3R xyz = mGrid->worldToIndex(Vec3R(world[0], world[1], world[2])); return Sampler<Order, Staggered>::sample(mAcc, xyz); } private: // holding the Grids for the transforms const GridT* mGrid; // Velocity vector field AccessorType mAcc; };// end of VelocitySampler class /////////////////////////////////////////////////////////////////////// /// @brief Performs Runge-Kutta time integration of variable order in /// a static velocity field. /// /// @note Note that the order of the velocity sampling is controlled /// with the SampleOrder template parameter, which defaults /// to one, i.e. a tri-linear interpolation kernel. template<typename GridT = Vec3fGrid, bool Staggered = false, size_t SampleOrder = 1> class VelocityIntegrator { public: typedef typename GridT::ValueType VecType; typedef typename VecType::ValueType ElementType; VelocityIntegrator(const GridT& velGrid): mVelSampler(velGrid) { } /// @brief Variable order Runge-Kutta time integration for a single time step /// /// @param dt Time sub-step for the Runge-Kutte integrator of order OrderRK /// @param world Location in world space coordinates (both input and output) template<size_t OrderRK, typename LocationType> inline void rungeKutta(const ElementType dt, LocationType& world) const { BOOST_STATIC_ASSERT(OrderRK <= 4); VecType P(static_cast<ElementType>(world[0]), static_cast<ElementType>(world[1]), static_cast<ElementType>(world[2])); // Note the if-branching below is optimized away at compile time if (OrderRK == 0) { return;// do nothing } else if (OrderRK == 1) { VecType V0; mVelSampler.sample(P, V0); P = dt * V0; } else if (OrderRK == 2) { VecType V0, V1; mVelSampler.sample(P, V0); mVelSampler.sample(P + ElementType(0.5) * dt * V0, V1); P = dt * V1; } else if (OrderRK == 3) { VecType V0, V1, V2; mVelSampler.sample(P, V0); mVelSampler.sample(P + ElementType(0.5) * dt * V0, V1); mVelSampler.sample(P + dt * (ElementType(2.0) * V1 - V0), V2); P = dt * (V0 + ElementType(4.0) * V1 + V2) * ElementType(1.0 / 6.0); } else if (OrderRK == 4) { VecType V0, V1, V2, V3; mVelSampler.sample(P, V0); mVelSampler.sample(P + ElementType(0.5) * dt * V0, V1); mVelSampler.sample(P + ElementType(0.5) * dt * V1, V2); mVelSampler.sample(P + dt * V2, V3); P = dt * (V0 + ElementType(2.0) * (V1 + V2) + V3) * ElementType(1.0 / 6.0); } typedef typename LocationType::ValueType OutType; world += LocationType(static_cast<OutType>(P[0]), static_cast<OutType>(P[1]), static_cast<OutType>(P[2])); } private: VelocitySampler<GridT, Staggered, SampleOrder> mVelSampler; };// end of VelocityIntegrator class } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_VELOCITY_FIELDS_HAS_BEEN_INCLUDED
10,691
C
37.599278
99
0.634272
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/Clip.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file Clip.h /// /// @brief Functions to clip a grid against a bounding box, a camera frustum, /// or another grid's active voxel topology #ifndef OPENVDB_TOOLS_CLIP_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_CLIP_HAS_BEEN_INCLUDED #include <openvdb/Grid.h> #include <openvdb/math/Math.h> // for math::isNegative() #include <openvdb/math/Maps.h> // for math::NonlinearFrustumMap #include <openvdb/tree/LeafManager.h> #include "GridTransformer.h" // for tools::resampleToMatch() #include "Prune.h" #include <tbb/blocked_range.h> #include <tbb/parallel_reduce.h> #include <type_traits> // for std::enable_if, std::is_same #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Clip the given grid against a world-space bounding box /// and return a new grid containing the result. /// @param grid the grid to be clipped /// @param bbox a world-space bounding box /// @param keepInterior if true, discard voxels that lie outside the bounding box; /// if false, discard voxels that lie inside the bounding box /// @warning Clipping a level set will likely produce a grid that is /// no longer a valid level set. template<typename GridType> inline typename GridType::Ptr clip(const GridType& grid, const BBoxd& bbox, bool keepInterior = true); /// @brief Clip the given grid against a frustum and return a new grid containing the result. /// @param grid the grid to be clipped /// @param frustum a frustum map /// @param keepInterior if true, discard voxels that lie outside the frustum; /// if false, discard voxels that lie inside the frustum /// @warning Clipping a level set will likely produce a grid that is /// no longer a valid level set. template<typename GridType> inline typename GridType::Ptr clip(const GridType& grid, const math::NonlinearFrustumMap& frustum, bool keepInterior = true); /// @brief Clip a grid against the active voxels of another grid /// and return a new grid containing the result. /// @param grid the grid to be clipped /// @param mask a grid whose active voxels form a boolean clipping mask /// @param keepInterior if true, discard voxels that do not intersect the mask; /// if false, discard voxels that intersect the mask /// @details The mask grid need not have the same transform as the source grid. /// Also, if the mask grid is a level set, consider using tools::sdfInteriorMask /// to construct a new mask comprising the interior (rather than the narrow band) /// of the level set. /// @warning Clipping a level set will likely produce a grid that is /// no longer a valid level set. template<typename GridType, typename MaskTreeType> inline typename GridType::Ptr clip(const GridType& grid, const Grid<MaskTreeType>& mask, bool keepInterior = true); //////////////////////////////////////// namespace clip_internal { // Use either MaskGrids or BoolGrids internally. // (MaskGrids have a somewhat lower memory footprint.) using MaskValueType = ValueMask; //using MaskValueType = bool; template<typename TreeT> class MaskInteriorVoxels { public: using ValueT = typename TreeT::ValueType; using LeafNodeT = typename TreeT::LeafNodeType; MaskInteriorVoxels(const TreeT& tree): mAcc(tree) {} template<typename LeafNodeType> void operator()(LeafNodeType& leaf, size_t /*leafIndex*/) const { const auto* refLeaf = mAcc.probeConstLeaf(leaf.origin()); if (refLeaf) { for (auto iter = leaf.beginValueOff(); iter; ++iter) { const auto pos = iter.pos(); leaf.setActiveState(pos, math::isNegative(refLeaf->getValue(pos))); } } } private: tree::ValueAccessor<const TreeT> mAcc; }; //////////////////////////////////////// template<typename TreeT> class CopyLeafNodes { public: using MaskTreeT = typename TreeT::template ValueConverter<MaskValueType>::Type; using MaskLeafManagerT = tree::LeafManager<const MaskTreeT>; CopyLeafNodes(const TreeT&, const MaskLeafManagerT&); void run(bool threaded = true); typename TreeT::Ptr tree() const { return mNewTree; } CopyLeafNodes(CopyLeafNodes&, tbb::split); void operator()(const tbb::blocked_range<size_t>&); void join(const CopyLeafNodes& rhs) { mNewTree->merge(*rhs.mNewTree); } private: const MaskTreeT* mClipMask; const TreeT* mTree; const MaskLeafManagerT* mLeafNodes; typename TreeT::Ptr mNewTree; }; template<typename TreeT> CopyLeafNodes<TreeT>::CopyLeafNodes(const TreeT& tree, const MaskLeafManagerT& leafNodes) : mTree(&tree) , mLeafNodes(&leafNodes) , mNewTree(new TreeT(mTree->background())) { } template<typename TreeT> CopyLeafNodes<TreeT>::CopyLeafNodes(CopyLeafNodes& rhs, tbb::split) : mTree(rhs.mTree) , mLeafNodes(rhs.mLeafNodes) , mNewTree(new TreeT(mTree->background())) { } template<typename TreeT> void CopyLeafNodes<TreeT>::run(bool threaded) { if (threaded) tbb::parallel_reduce(mLeafNodes->getRange(), *this); else (*this)(mLeafNodes->getRange()); } template<typename TreeT> void CopyLeafNodes<TreeT>::operator()(const tbb::blocked_range<size_t>& range) { tree::ValueAccessor<TreeT> acc(*mNewTree); tree::ValueAccessor<const TreeT> refAcc(*mTree); for (auto n = range.begin(); n != range.end(); ++n) { const auto& maskLeaf = mLeafNodes->leaf(n); const auto& ijk = maskLeaf.origin(); const auto* refLeaf = refAcc.probeConstLeaf(ijk); auto* newLeaf = acc.touchLeaf(ijk); if (refLeaf) { for (auto it = maskLeaf.cbeginValueOn(); it; ++it) { const auto pos = it.pos(); newLeaf->setValueOnly(pos, refLeaf->getValue(pos)); newLeaf->setActiveState(pos, refLeaf->isValueOn(pos)); } } else { typename TreeT::ValueType value; bool isActive = refAcc.probeValue(ijk, value); for (auto it = maskLeaf.cbeginValueOn(); it; ++it) { const auto pos = it.pos(); newLeaf->setValueOnly(pos, value); newLeaf->setActiveState(pos, isActive); } } } } //////////////////////////////////////// struct BoolSampler { static const char* name() { return "bin"; } static int radius() { return 2; } static bool mipmap() { return false; } static bool consistent() { return true; } template<class TreeT> static bool sample(const TreeT& inTree, const Vec3R& inCoord, typename TreeT::ValueType& result) { return inTree.probeValue(Coord::floor(inCoord), result); } }; //////////////////////////////////////// // Convert a grid of one type to a grid of another type template<typename FromGridT, typename ToGridT> struct ConvertGrid { using FromGridCPtrT = typename FromGridT::ConstPtr; using ToGridPtrT = typename ToGridT::Ptr; ToGridPtrT operator()(const FromGridCPtrT& grid) { return ToGridPtrT(new ToGridT(*grid)); } }; // Partial specialization that avoids copying when // the input and output grid types are the same template<typename GridT> struct ConvertGrid<GridT, GridT> { using GridCPtrT = typename GridT::ConstPtr; GridCPtrT operator()(const GridCPtrT& grid) { return grid; } }; //////////////////////////////////////// // Convert a grid of arbitrary type to a mask grid with the same tree configuration // and return a pointer to the new grid. /// @private template<typename GridT> inline typename std::enable_if<!std::is_same<MaskValueType, typename GridT::BuildType>::value, typename GridT::template ValueConverter<MaskValueType>::Type::Ptr>::type convertToMaskGrid(const GridT& grid) { using MaskGridT = typename GridT::template ValueConverter<MaskValueType>::Type; auto mask = MaskGridT::create(/*background=*/false); mask->topologyUnion(grid); mask->setTransform(grid.constTransform().copy()); return mask; } // Overload that avoids any processing if the input grid is already a mask grid /// @private template<typename GridT> inline typename std::enable_if<std::is_same<MaskValueType, typename GridT::BuildType>::value, typename GridT::ConstPtr>::type convertToMaskGrid(const GridT& grid) { return grid.copy(); // shallow copy } //////////////////////////////////////// /// @private template<typename GridType> inline typename GridType::Ptr doClip( const GridType& grid, const typename GridType::template ValueConverter<MaskValueType>::Type& clipMask, bool keepInterior) { using TreeT = typename GridType::TreeType; using MaskTreeT = typename GridType::TreeType::template ValueConverter<MaskValueType>::Type; const auto gridClass = grid.getGridClass(); const auto& tree = grid.tree(); MaskTreeT gridMask(false); gridMask.topologyUnion(tree); if (gridClass == GRID_LEVEL_SET) { tree::LeafManager<MaskTreeT> leafNodes(gridMask); leafNodes.foreach(MaskInteriorVoxels<TreeT>(tree)); tree::ValueAccessor<const TreeT> acc(tree); typename MaskTreeT::ValueAllIter iter(gridMask); iter.setMaxDepth(MaskTreeT::ValueAllIter::LEAF_DEPTH - 1); for ( ; iter; ++iter) { iter.setActiveState(math::isNegative(acc.getValue(iter.getCoord()))); } } if (keepInterior) { gridMask.topologyIntersection(clipMask.constTree()); } else { gridMask.topologyDifference(clipMask.constTree()); } auto outGrid = grid.copyWithNewTree(); { // Copy voxel values and states. tree::LeafManager<const MaskTreeT> leafNodes(gridMask); CopyLeafNodes<TreeT> maskOp(tree, leafNodes); maskOp.run(); outGrid->setTree(maskOp.tree()); } { // Copy tile values and states. tree::ValueAccessor<const TreeT> refAcc(tree); tree::ValueAccessor<const MaskTreeT> maskAcc(gridMask); typename TreeT::ValueAllIter it(outGrid->tree()); it.setMaxDepth(TreeT::ValueAllIter::LEAF_DEPTH - 1); for ( ; it; ++it) { Coord ijk = it.getCoord(); if (maskAcc.isValueOn(ijk)) { typename TreeT::ValueType value; bool isActive = refAcc.probeValue(ijk, value); it.setValue(value); if (!isActive) it.setValueOff(); } } } outGrid->setTransform(grid.transform().copy()); if (gridClass != GRID_LEVEL_SET) outGrid->setGridClass(gridClass); return outGrid; } } // namespace clip_internal //////////////////////////////////////// /// @private template<typename GridType> inline typename GridType::Ptr clip(const GridType& grid, const BBoxd& bbox, bool keepInterior) { using MaskValueT = clip_internal::MaskValueType; using MaskGridT = typename GridType::template ValueConverter<MaskValueT>::Type; // Transform the world-space bounding box into the source grid's index space. Vec3d idxMin, idxMax; math::calculateBounds(grid.constTransform(), bbox.min(), bbox.max(), idxMin, idxMax); CoordBBox region(Coord::floor(idxMin), Coord::floor(idxMax)); // Construct a boolean mask grid that is true inside the index-space bounding box // and false everywhere else. MaskGridT clipMask(/*background=*/false); clipMask.fill(region, /*value=*/true, /*active=*/true); return clip_internal::doClip(grid, clipMask, keepInterior); } /// @private template<typename SrcGridType, typename ClipTreeType> inline typename SrcGridType::Ptr clip(const SrcGridType& srcGrid, const Grid<ClipTreeType>& clipGrid, bool keepInterior) { using MaskValueT = clip_internal::MaskValueType; using ClipGridType = Grid<ClipTreeType>; using SrcMaskGridType = typename SrcGridType::template ValueConverter<MaskValueT>::Type; using ClipMaskGridType = typename ClipGridType::template ValueConverter<MaskValueT>::Type; // Convert the clipping grid to a boolean-valued mask grid with the same tree configuration. auto maskGrid = clip_internal::convertToMaskGrid(clipGrid); // Resample the mask grid into the source grid's index space. if (srcGrid.constTransform() != maskGrid->constTransform()) { auto resampledMask = ClipMaskGridType::create(/*background=*/false); resampledMask->setTransform(srcGrid.constTransform().copy()); tools::resampleToMatch<clip_internal::BoolSampler>(*maskGrid, *resampledMask); tools::prune(resampledMask->tree()); maskGrid = resampledMask; } // Convert the mask grid to a mask grid with the same tree configuration as the source grid. auto clipMask = clip_internal::ConvertGrid< /*from=*/ClipMaskGridType, /*to=*/SrcMaskGridType>()(maskGrid); // Clip the source grid against the mask grid. return clip_internal::doClip(srcGrid, *clipMask, keepInterior); } /// @private template<typename GridType> inline typename GridType::Ptr clip(const GridType& inGrid, const math::NonlinearFrustumMap& frustumMap, bool keepInterior) { using ValueT = typename GridType::ValueType; using TreeT = typename GridType::TreeType; using LeafT = typename TreeT::LeafNodeType; const auto& gridXform = inGrid.transform(); const auto frustumIndexBBox = frustumMap.getBBox(); // Return true if index-space point (i,j,k) lies inside the frustum. auto frustumContainsCoord = [&](const Coord& ijk) -> bool { auto xyz = gridXform.indexToWorld(ijk); xyz = frustumMap.applyInverseMap(xyz); return frustumIndexBBox.isInside(xyz); }; // Return the frustum index-space bounding box of the corners of // the given grid index-space bounding box. auto toFrustumIndexSpace = [&](const CoordBBox& inBBox) -> BBoxd { const Coord bounds[2] = { inBBox.min(), inBBox.max() }; Coord ijk; BBoxd outBBox; for (int i = 0; i < 8; ++i) { ijk[0] = bounds[(i & 1) >> 0][0]; ijk[1] = bounds[(i & 2) >> 1][1]; ijk[2] = bounds[(i & 4) >> 2][2]; auto xyz = gridXform.indexToWorld(ijk); xyz = frustumMap.applyInverseMap(xyz); outBBox.expand(xyz); } return outBBox; }; // Construct an output grid with the same transform and metadata as the input grid. auto outGrid = inGrid.copyWithNewTree(); if (outGrid->getGridClass() == GRID_LEVEL_SET) { // After clipping, a level set grid might no longer be a valid SDF. outGrid->setGridClass(GRID_UNKNOWN); } const auto& bg = outGrid->background(); auto outAcc = outGrid->getAccessor(); // Copy active and inactive tiles that intersect the clipping region // from the input grid to the output grid. // ("Clipping region" refers to either the interior or the exterior // of the frustum, depending on the value of keepInterior.) auto tileIter = inGrid.beginValueAll(); tileIter.setMaxDepth(GridType::ValueAllIter::LEAF_DEPTH - 1); CoordBBox tileBBox; for ( ; tileIter; ++tileIter) { const bool tileActive = tileIter.isValueOn(); const auto& tileValue = tileIter.getValue(); // Skip background tiles. if (!tileActive && math::isApproxEqual(tileValue, bg)) continue; // Transform the tile's bounding box into frustum index space. tileIter.getBoundingBox(tileBBox); const auto tileFrustumBBox = toFrustumIndexSpace(tileBBox); // Determine whether any or all of the tile intersects the clipping region. enum class CopyTile { kNone, kPartial, kFull }; auto copyTile = CopyTile::kNone; if (keepInterior) { if (frustumIndexBBox.isInside(tileFrustumBBox)) { copyTile = CopyTile::kFull; } else if (frustumIndexBBox.hasOverlap(tileFrustumBBox)) { copyTile = CopyTile::kPartial; } } else { if (!frustumIndexBBox.hasOverlap(tileFrustumBBox)) { copyTile = CopyTile::kFull; } else if (!frustumIndexBBox.isInside(tileFrustumBBox)) { copyTile = CopyTile::kPartial; } } switch (copyTile) { case CopyTile::kNone: break; case CopyTile::kFull: // Copy the entire tile. outAcc.addTile(tileIter.getLevel(), tileBBox.min(), tileValue, tileActive); break; case CopyTile::kPartial: // Copy only voxels inside the clipping region. for (std::vector<CoordBBox> bboxVec = { tileBBox }; !bboxVec.empty(); ) { // For efficiency, subdivide sufficiently large tiles and discard // subregions based on additional bounding box intersection tests. // The mimimum subregion size is chosen so that cost of the // bounding box test is comparable to testing every voxel. if (bboxVec.back().volume() > 64 && bboxVec.back().is_divisible()) { // Subdivide this region in-place and append the other half to the list. bboxVec.emplace_back(bboxVec.back(), tbb::split{}); continue; } auto subBBox = bboxVec.back(); bboxVec.pop_back(); // Discard the subregion if it lies completely outside the clipping region. if (keepInterior) { if (!frustumIndexBBox.hasOverlap(toFrustumIndexSpace(subBBox))) continue; } else { if (frustumIndexBBox.isInside(toFrustumIndexSpace(subBBox))) continue; } // Test every voxel within the subregion. for (const auto& ijk: subBBox) { if (frustumContainsCoord(ijk) == keepInterior) { if (tileActive) { outAcc.setValueOn(ijk, tileValue); } else { outAcc.setValueOff(ijk, tileValue); } } } } break; } } tools::prune(outGrid->tree()); // Ensure that the output grid has the same leaf node topology as the input grid, // with the exception of leaf nodes that lie completely outside the clipping region. // (This operation is serial.) for (auto leafIter = inGrid.constTree().beginLeaf(); leafIter; ++leafIter) { const auto leafBBox = leafIter->getNodeBoundingBox(); const auto leafFrustumBBox = toFrustumIndexSpace(leafBBox); if (keepInterior) { if (frustumIndexBBox.hasOverlap(leafFrustumBBox)) { outAcc.touchLeaf(leafBBox.min()); } } else { if (!frustumIndexBBox.hasOverlap(leafFrustumBBox) || !frustumIndexBBox.isInside(leafFrustumBBox)) { outAcc.touchLeaf(leafBBox.min()); } } } // In parallel across output leaf nodes, copy leaf voxels // from the input grid to the output grid. tree::LeafManager<TreeT> outLeafNodes{outGrid->tree()}; outLeafNodes.foreach( [&](LeafT& leaf, size_t /*idx*/) { auto inAcc = inGrid.getConstAccessor(); ValueT val; for (auto voxelIter = leaf.beginValueAll(); voxelIter; ++voxelIter) { const auto ijk = voxelIter.getCoord(); if (frustumContainsCoord(ijk) == keepInterior) { const bool active = inAcc.probeValue(ijk, val); voxelIter.setValue(val); voxelIter.setValueOn(active); } } } ); return outGrid; } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_CLIP_HAS_BEEN_INCLUDED
20,145
C
34.46831
97
0.636833
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tools/TopologyToLevelSet.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file TopologyToLevelSet.h /// /// @brief This tool generates a narrow-band signed distance field / level set /// from the interface between active and inactive voxels in a vdb grid. /// /// @par Example: /// Combine with @c tools::PointsToVolume for fast point cloud to level set conversion. #ifndef OPENVDB_TOOLS_TOPOLOGY_TO_LEVELSET_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_TOPOLOGY_TO_LEVELSET_HAS_BEEN_INCLUDED #include "LevelSetFilter.h" #include "Morphology.h" // for erodeVoxels and dilateActiveValues #include "SignedFloodFill.h" #include <openvdb/Grid.h> #include <openvdb/Types.h> #include <openvdb/math/FiniteDifference.h> // for math::BiasedGradientScheme #include <openvdb/util/NullInterrupter.h> #include <tbb/task_group.h> #include <algorithm> // for std::min(), std::max() #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tools { /// @brief Compute the narrow-band signed distance to the interface between /// active and inactive voxels in the input grid. /// /// @return A shared pointer to a new sdf / level set grid of type @c float /// /// @param grid Input grid of arbitrary type whose active voxels are used /// in constructing the level set. /// @param halfWidth Half the width of the narrow band in voxel units. /// @param closingSteps Number of morphological closing steps used to fill gaps /// in the active voxel region. /// @param dilation Number of voxels to expand the active voxel region. /// @param smoothingSteps Number of smoothing interations. template<typename GridT> inline typename GridT::template ValueConverter<float>::Type::Ptr topologyToLevelSet(const GridT& grid, int halfWidth = 3, int closingSteps = 1, int dilation = 0, int smoothingSteps = 0); /// @brief Compute the narrow-band signed distance to the interface between /// active and inactive voxels in the input grid. /// /// @return A shared pointer to a new sdf / level set grid of type @c float /// /// @param grid Input grid of arbitrary type whose active voxels are used /// in constructing the level set. /// @param halfWidth Half the width of the narrow band in voxel units. /// @param closingSteps Number of morphological closing steps used to fill gaps /// in the active voxel region. /// @param dilation Number of voxels to expand the active voxel region. /// @param smoothingSteps Number of smoothing interations. /// @param interrupt Optional object adhering to the util::NullInterrupter interface. template<typename GridT, typename InterrupterT> inline typename GridT::template ValueConverter<float>::Type::Ptr topologyToLevelSet(const GridT& grid, int halfWidth = 3, int closingSteps = 1, int dilation = 0, int smoothingSteps = 0, InterrupterT* interrupt = nullptr); //////////////////////////////////////// namespace ttls_internal { template<typename TreeT> struct DilateOp { DilateOp(TreeT& t, int n) : tree(&t), size(n) {} void operator()() const { dilateActiveValues( *tree, size, tools::NN_FACE, tools::IGNORE_TILES); } TreeT* tree; const int size; }; template<typename TreeT> struct ErodeOp { ErodeOp(TreeT& t, int n) : tree(&t), size(n) {} void operator()() const { erodeVoxels( *tree, size); } TreeT* tree; const int size; }; template<typename TreeType> struct OffsetAndMinComp { using LeafNodeType = typename TreeType::LeafNodeType; using ValueType = typename TreeType::ValueType; OffsetAndMinComp(std::vector<LeafNodeType*>& lhsNodes, const TreeType& rhsTree, ValueType offset) : mLhsNodes(lhsNodes.empty() ? nullptr : &lhsNodes[0]), mRhsTree(&rhsTree), mOffset(offset) { } void operator()(const tbb::blocked_range<size_t>& range) const { using Iterator = typename LeafNodeType::ValueOnIter; tree::ValueAccessor<const TreeType> rhsAcc(*mRhsTree); const ValueType offset = mOffset; for (size_t n = range.begin(), N = range.end(); n < N; ++n) { LeafNodeType& lhsNode = *mLhsNodes[n]; const LeafNodeType * rhsNodePt = rhsAcc.probeConstLeaf(lhsNode.origin()); if (!rhsNodePt) continue; for (Iterator it = lhsNode.beginValueOn(); it; ++it) { ValueType& val = const_cast<ValueType&>(it.getValue()); val = std::min(val, offset + rhsNodePt->getValue(it.pos())); } } } private: LeafNodeType * * const mLhsNodes; TreeType const * const mRhsTree; ValueType const mOffset; }; // struct OffsetAndMinComp template<typename GridType, typename InterrupterType> inline void normalizeLevelSet(GridType& grid, const int halfWidthInVoxels, InterrupterType* interrupt = nullptr) { LevelSetFilter<GridType, GridType, InterrupterType> filter(grid, interrupt); filter.setSpatialScheme(math::FIRST_BIAS); filter.setNormCount(halfWidthInVoxels); filter.normalize(); filter.prune(); } template<typename GridType, typename InterrupterType> inline void smoothLevelSet(GridType& grid, int iterations, int halfBandWidthInVoxels, InterrupterType* interrupt = nullptr) { using ValueType = typename GridType::ValueType; using TreeType = typename GridType::TreeType; using LeafNodeType = typename TreeType::LeafNodeType; GridType filterGrid(grid); LevelSetFilter<GridType, GridType, InterrupterType> filter(filterGrid, interrupt); filter.setSpatialScheme(math::FIRST_BIAS); for (int n = 0; n < iterations; ++n) { if (interrupt && interrupt->wasInterrupted()) break; filter.mean(1); } std::vector<LeafNodeType*> nodes; grid.tree().getNodes(nodes); const ValueType offset = ValueType(double(0.5) * grid.transform().voxelSize()[0]); tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), OffsetAndMinComp<TreeType>(nodes, filterGrid.tree(), -offset)); // Clean up any damanage that was done by the min operation normalizeLevelSet(grid, halfBandWidthInVoxels, interrupt); } } // namespace ttls_internal template<typename GridT, typename InterrupterT> inline typename GridT::template ValueConverter<float>::Type::Ptr topologyToLevelSet(const GridT& grid, int halfWidth, int closingSteps, int dilation, int smoothingSteps, InterrupterT* interrupt) { using MaskTreeT = typename GridT::TreeType::template ValueConverter<ValueMask>::Type; using FloatTreeT = typename GridT::TreeType::template ValueConverter<float>::Type; using FloatGridT = Grid<FloatTreeT>; // Check inputs halfWidth = std::max(halfWidth, 1); closingSteps = std::max(closingSteps, 0); dilation = std::max(dilation, 0); if (!grid.hasUniformVoxels()) { OPENVDB_THROW(ValueError, "Non-uniform voxels are not supported!"); } // Copy the topology into a MaskGrid. MaskTreeT maskTree( grid.tree(), false/*background*/, openvdb::TopologyCopy() ); // Morphological closing operation. dilateActiveValues( maskTree, closingSteps + dilation, tools::NN_FACE, tools::IGNORE_TILES ); erodeVoxels( maskTree, closingSteps ); // Generate a volume with an implicit zero crossing at the boundary // between active and inactive values in the input grid. const float background = float(grid.voxelSize()[0]) * float(halfWidth); typename FloatTreeT::Ptr lsTree( new FloatTreeT( maskTree, /*out=*/background, /*in=*/-background, openvdb::TopologyCopy() ) ); tbb::task_group pool; pool.run( ttls_internal::ErodeOp< MaskTreeT >( maskTree, halfWidth ) ); pool.run( ttls_internal::DilateOp<FloatTreeT>( *lsTree , halfWidth ) ); pool.wait();// wait for both tasks to complete lsTree->topologyDifference( maskTree ); tools::pruneLevelSet( *lsTree, /*threading=*/true); // Create a level set grid from the tree typename FloatGridT::Ptr lsGrid = FloatGridT::create( lsTree ); lsGrid->setTransform( grid.transform().copy() ); lsGrid->setGridClass( openvdb::GRID_LEVEL_SET ); // Use a PDE based scheme to propagate distance values from the // implicit zero crossing. ttls_internal::normalizeLevelSet(*lsGrid, 3*halfWidth, interrupt); // Additional filtering if (smoothingSteps > 0) { ttls_internal::smoothLevelSet(*lsGrid, smoothingSteps, halfWidth, interrupt); } return lsGrid; } template<typename GridT> inline typename GridT::template ValueConverter<float>::Type::Ptr topologyToLevelSet(const GridT& grid, int halfWidth, int closingSteps, int dilation, int smoothingSteps) { util::NullInterrupter interrupt; return topologyToLevelSet(grid, halfWidth, closingSteps, dilation, smoothingSteps, &interrupt); } } // namespace tools } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_TOPOLOGY_TO_LEVELSET_HAS_BEEN_INCLUDED
9,138
C
34.285714
104
0.69118
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/RenderModules.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_VIEWER_RENDERMODULES_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_RENDERMODULES_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/tools/VolumeToMesh.h> #include <openvdb/tools/MeshToVolume.h> #include <openvdb/tools/PointScatter.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/math/Operators.h> #include <string> #include <vector> #if defined(__APPLE__) || defined(MACOSX) #include <OpenGL/gl.h> #include <OpenGL/glu.h> #elif defined(_WIN32) #include <GL/glew.h> #else #include <GL/gl.h> #include <GL/glu.h> #endif namespace openvdb_viewer { // OpenGL helper objects class BufferObject { public: BufferObject(); ~BufferObject(); void render() const; /// @note accepted @c primType: GL_POINTS, GL_LINE_STRIP, GL_LINE_LOOP, /// GL_LINES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN, GL_TRIANGLES, /// GL_QUAD_STRIP, GL_QUADS and GL_POLYGON void genIndexBuffer(const std::vector<GLuint>&, GLenum primType); void genVertexBuffer(const std::vector<GLfloat>&); void genNormalBuffer(const std::vector<GLfloat>&); void genColorBuffer(const std::vector<GLfloat>&); void clear(); private: GLuint mVertexBuffer, mNormalBuffer, mIndexBuffer, mColorBuffer; GLenum mPrimType; GLsizei mPrimNum; }; class ShaderProgram { public: ShaderProgram(); ~ShaderProgram(); void setVertShader(const std::string&); void setFragShader(const std::string&); void build(); void build(const std::vector<GLchar*>& attributes); void startShading() const; void stopShading() const; void clear(); private: GLuint mProgram, mVertShader, mFragShader; }; //////////////////////////////////////// /// @brief interface class class RenderModule { public: virtual ~RenderModule() {} virtual void render() = 0; bool visible() { return mIsVisible; } void setVisible(bool b) { mIsVisible = b; } protected: RenderModule(): mIsVisible(true) {} bool mIsVisible; }; //////////////////////////////////////// /// @brief Basic render module, axis gnomon and ground plane. class ViewportModule: public RenderModule { public: ViewportModule(); ~ViewportModule() override = default; void render() override; private: float mAxisGnomonScale, mGroundPlaneScale; }; //////////////////////////////////////// /// @brief Tree topology render module class TreeTopologyModule: public RenderModule { public: TreeTopologyModule(const openvdb::GridBase::ConstPtr&); ~TreeTopologyModule() override = default; void render() override; private: void init(); const openvdb::GridBase::ConstPtr& mGrid; BufferObject mBufferObject; bool mIsInitialized; ShaderProgram mShader; }; //////////////////////////////////////// /// @brief Module to render active voxels as points class VoxelModule: public RenderModule { public: VoxelModule(const openvdb::GridBase::ConstPtr&); ~VoxelModule() override = default; void render() override; private: void init(); const openvdb::GridBase::ConstPtr& mGrid; BufferObject mInteriorBuffer, mSurfaceBuffer, mVectorBuffer; bool mIsInitialized; ShaderProgram mFlatShader, mSurfaceShader; }; //////////////////////////////////////// /// @brief Surfacing render module class MeshModule: public RenderModule { public: MeshModule(const openvdb::GridBase::ConstPtr&); ~MeshModule() override = default; void render() override; private: void init(); const openvdb::GridBase::ConstPtr& mGrid; BufferObject mBufferObject; bool mIsInitialized; ShaderProgram mShader; }; } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_RENDERMODULES_HAS_BEEN_INCLUDED
3,803
C
19.562162
75
0.670523
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Viewer.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Viewer.h" #include "Camera.h" #include "ClipBox.h" #include "Font.h" #include "RenderModules.h" #include <openvdb/util/Formats.h> // for formattedInt() #include <openvdb/util/logging.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointCount.h> #include <openvdb/version.h> // for OPENVDB_LIBRARY_MAJOR_VERSION, etc. #include <tbb/atomic.h> #include <tbb/mutex.h> #include <cmath> // for fabs() #include <iomanip> // for std::setprecision() #include <iostream> #include <memory> #include <sstream> #include <vector> #include <limits> #include <thread> #include <chrono> #if defined(_WIN32) #include <GL/glew.h> #endif #include <GLFW/glfw3.h> namespace openvdb_viewer { class ViewerImpl { public: using CameraPtr = std::shared_ptr<Camera>; using ClipBoxPtr = std::shared_ptr<ClipBox>; using RenderModulePtr = std::shared_ptr<RenderModule>; ViewerImpl(); void init(const std::string& progName); std::string getVersionString() const; bool isOpen() const; bool open(int width = 900, int height = 800); void view(const openvdb::GridCPtrVec&); void handleEvents(); void close(); void resize(int width, int height); void showPrevGrid(); void showNextGrid(); bool needsDisplay(); void setNeedsDisplay(); void toggleRenderModule(size_t n); void toggleInfoText(); // Internal void render(); void interrupt(); void setWindowTitle(double fps = 0.0); void showNthGrid(size_t n); void updateCutPlanes(int wheelPos); void swapBuffers(); void keyCallback(int key, int action); void mouseButtonCallback(int button, int action); void mousePosCallback(int x, int y); void mouseWheelCallback(int pos); void windowSizeCallback(int width, int height); void windowRefreshCallback(); static openvdb::BBoxd worldSpaceBBox(const openvdb::math::Transform&, const openvdb::CoordBBox&); static void sleep(double seconds); private: bool mDidInit; CameraPtr mCamera; ClipBoxPtr mClipBox; RenderModulePtr mViewportModule; std::vector<RenderModulePtr> mRenderModules; openvdb::GridCPtrVec mGrids; size_t mGridIdx, mUpdates; std::string mGridName, mProgName, mGridInfo, mTransformInfo, mTreeInfo; int mWheelPos; bool mShiftIsDown, mCtrlIsDown, mShowInfo; bool mInterrupt; GLFWwindow* mWindow; }; // class ViewerImpl class ThreadManager { public: ThreadManager(); void view(const openvdb::GridCPtrVec& gridList); void close(); void resize(int width, int height); private: void doView(); static void* doViewTask(void* arg); tbb::atomic<bool> mRedisplay; bool mClose, mHasThread; std::thread mThread; openvdb::GridCPtrVec mGrids; }; //////////////////////////////////////// namespace { ViewerImpl* sViewer = nullptr; ThreadManager* sThreadMgr = nullptr; tbb::mutex sLock; void keyCB(GLFWwindow*, int key, int /*scancode*/, int action, int /*modifiers*/) { if (sViewer) sViewer->keyCallback(key, action); } void mouseButtonCB(GLFWwindow*, int button, int action, int /*modifiers*/) { if (sViewer) sViewer->mouseButtonCallback(button, action); } void mousePosCB(GLFWwindow*, double x, double y) { if (sViewer) sViewer->mousePosCallback(int(x), int(y)); } void mouseWheelCB(GLFWwindow*, double /*xoffset*/, double yoffset) { if (sViewer) sViewer->mouseWheelCallback(int(yoffset)); } void windowSizeCB(GLFWwindow*, int width, int height) { if (sViewer) sViewer->windowSizeCallback(width, height); } void windowRefreshCB(GLFWwindow*) { if (sViewer) sViewer->windowRefreshCallback(); } } // unnamed namespace //////////////////////////////////////// Viewer init(const std::string& progName, bool background) { if (sViewer == nullptr) { tbb::mutex::scoped_lock lock(sLock); if (sViewer == nullptr) { OPENVDB_START_THREADSAFE_STATIC_WRITE sViewer = new ViewerImpl; OPENVDB_FINISH_THREADSAFE_STATIC_WRITE } } sViewer->init(progName); if (background) { if (sThreadMgr == nullptr) { tbb::mutex::scoped_lock lock(sLock); if (sThreadMgr == nullptr) { OPENVDB_START_THREADSAFE_STATIC_WRITE sThreadMgr = new ThreadManager; OPENVDB_FINISH_THREADSAFE_STATIC_WRITE } } } else { if (sThreadMgr != nullptr) { tbb::mutex::scoped_lock lock(sLock); delete sThreadMgr; OPENVDB_START_THREADSAFE_STATIC_WRITE sThreadMgr = nullptr; OPENVDB_FINISH_THREADSAFE_STATIC_WRITE } } return Viewer(); } void exit() { glfwTerminate(); } //////////////////////////////////////// Viewer::Viewer() { OPENVDB_LOG_DEBUG_RUNTIME("constructed Viewer from thread " << std::this_thread::get_id()); } void Viewer::open(int width, int height) { if (sViewer) sViewer->open(width, height); } void Viewer::view(const openvdb::GridCPtrVec& grids) { if (sThreadMgr) { sThreadMgr->view(grids); } else if (sViewer) { sViewer->view(grids); } } void Viewer::handleEvents() { if (sViewer) sViewer->handleEvents(); } void Viewer::close() { if (sThreadMgr) sThreadMgr->close(); else if (sViewer) sViewer->close(); } void Viewer::resize(int width, int height) { if (sViewer) sViewer->resize(width, height); } std::string Viewer::getVersionString() const { std::string version; if (sViewer) version = sViewer->getVersionString(); return version; } //////////////////////////////////////// ThreadManager::ThreadManager() : mClose(false) , mHasThread(false) { mRedisplay = false; } void ThreadManager::view(const openvdb::GridCPtrVec& gridList) { if (!sViewer) return; mGrids = gridList; mClose = false; mRedisplay = true; if (!mHasThread) { mThread = std::thread(doViewTask, this); mHasThread = true; } } void ThreadManager::close() { if (!sViewer) return; // Tell the viewer thread to exit. mRedisplay = false; mClose = true; // Tell the viewer to terminate its event loop. sViewer->interrupt(); if (mHasThread) { mThread.join(); mHasThread = false; } // Tell the viewer to close its window. sViewer->close(); } void ThreadManager::doView() { // This function runs in its own thread. // The mClose and mRedisplay flags are set from the main thread. while (!mClose) { if (mRedisplay.compare_and_swap(/*set to*/false, /*if*/true)) { if (sViewer) sViewer->view(mGrids); } sViewer->sleep(0.5/*sec*/); } } //static void* ThreadManager::doViewTask(void* arg) { if (ThreadManager* self = static_cast<ThreadManager*>(arg)) { self->doView(); } return nullptr; } //////////////////////////////////////// ViewerImpl::ViewerImpl() : mDidInit(false) , mCamera(new Camera) , mClipBox(new ClipBox) , mGridIdx(0) , mUpdates(0) , mWheelPos(0) , mShiftIsDown(false) , mCtrlIsDown(false) , mShowInfo(true) , mInterrupt(false) , mWindow(nullptr) { } void ViewerImpl::init(const std::string& progName) { mProgName = progName; if (!mDidInit) { struct Local { static void errorCB(int error, const char* descr) { OPENVDB_LOG_ERROR("GLFW Error " << error << ": " << descr); } }; glfwSetErrorCallback(Local::errorCB); if (glfwInit() == GL_TRUE) { OPENVDB_LOG_DEBUG_RUNTIME("initialized GLFW from thread " << std::this_thread::get_id()); mDidInit = true; } else { OPENVDB_LOG_ERROR("GLFW initialization failed"); } } mViewportModule.reset(new ViewportModule); } std::string ViewerImpl::getVersionString() const { std::ostringstream ostr; ostr << "OpenVDB: " << openvdb::OPENVDB_LIBRARY_MAJOR_VERSION << "." << openvdb::OPENVDB_LIBRARY_MINOR_VERSION << "." << openvdb::OPENVDB_LIBRARY_PATCH_VERSION; int major, minor, rev; glfwGetVersion(&major, &minor, &rev); ostr << ", " << "GLFW: " << major << "." << minor << "." << rev; if (mDidInit) { ostr << ", " << "OpenGL: "; std::shared_ptr<GLFWwindow> wPtr; GLFWwindow* w = mWindow; if (!w) { wPtr.reset(glfwCreateWindow(100, 100, "", nullptr, nullptr), &glfwDestroyWindow); w = wPtr.get(); } if (w) { ostr << glfwGetWindowAttrib(w, GLFW_CONTEXT_VERSION_MAJOR) << "." << glfwGetWindowAttrib(w, GLFW_CONTEXT_VERSION_MINOR) << "." << glfwGetWindowAttrib(w, GLFW_CONTEXT_REVISION); } } return ostr.str(); } bool ViewerImpl::open(int width, int height) { if (mWindow == nullptr) { glfwWindowHint(GLFW_RED_BITS, 8); glfwWindowHint(GLFW_GREEN_BITS, 8); glfwWindowHint(GLFW_BLUE_BITS, 8); glfwWindowHint(GLFW_ALPHA_BITS, 8); glfwWindowHint(GLFW_DEPTH_BITS, 32); glfwWindowHint(GLFW_STENCIL_BITS, 0); mWindow = glfwCreateWindow( width, height, mProgName.c_str(), /*monitor=*/nullptr, /*share=*/nullptr); OPENVDB_LOG_DEBUG_RUNTIME("created window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); if (mWindow != nullptr) { // Temporarily make the new window the current context, then create a font. std::shared_ptr<GLFWwindow> curWindow( glfwGetCurrentContext(), glfwMakeContextCurrent); glfwMakeContextCurrent(mWindow); BitmapFont13::initialize(); } } mCamera->setWindow(mWindow); if (mWindow != nullptr) { glfwSetKeyCallback(mWindow, keyCB); glfwSetMouseButtonCallback(mWindow, mouseButtonCB); glfwSetCursorPosCallback(mWindow, mousePosCB); glfwSetScrollCallback(mWindow, mouseWheelCB); glfwSetWindowSizeCallback(mWindow, windowSizeCB); glfwSetWindowRefreshCallback(mWindow, windowRefreshCB); } return (mWindow != nullptr); } bool ViewerImpl::isOpen() const { return (mWindow != nullptr); } // Set a flag so as to break out of the event loop on the next iteration. // (Useful only if the event loop is running in a separate thread.) void ViewerImpl::interrupt() { mInterrupt = true; if (mWindow) glfwSetWindowShouldClose(mWindow, true); } void ViewerImpl::handleEvents() { glfwPollEvents(); } void ViewerImpl::close() { OPENVDB_LOG_DEBUG_RUNTIME("about to close window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); mViewportModule.reset(); mRenderModules.clear(); mCamera->setWindow(nullptr); GLFWwindow* win = mWindow; mWindow = nullptr; glfwDestroyWindow(win); OPENVDB_LOG_DEBUG_RUNTIME("destroyed window " << std::hex << win << std::dec << " from thread " << std::this_thread::get_id()); } //////////////////////////////////////// void ViewerImpl::view(const openvdb::GridCPtrVec& gridList) { if (!isOpen()) return; mGrids = gridList; mGridIdx = size_t(-1); mGridName.clear(); // Compute the combined bounding box of all the grids. openvdb::BBoxd bbox(openvdb::Vec3d(0.0), openvdb::Vec3d(0.0)); if (!gridList.empty()) { bbox = worldSpaceBBox( gridList[0]->transform(), gridList[0]->evalActiveVoxelBoundingBox()); openvdb::Vec3d voxelSize = gridList[0]->voxelSize(); for (size_t n = 1; n < gridList.size(); ++n) { bbox.expand(worldSpaceBBox(gridList[n]->transform(), gridList[n]->evalActiveVoxelBoundingBox())); voxelSize = minComponent(voxelSize, gridList[n]->voxelSize()); } mClipBox->setStepSize(voxelSize); } mClipBox->setBBox(bbox); // Prepare window for rendering. glfwMakeContextCurrent(mWindow); #if defined(_WIN32) // This must come after glfwMakeContextCurrent if (GLEW_OK != glewInit()) { OPENVDB_LOG_ERROR("GLEW initialization failed"); } #endif { // set up camera openvdb::Vec3d extents = bbox.extents(); double maxExtent = std::max(extents[0], std::max(extents[1], extents[2])); mCamera->setTarget(bbox.getCenter(), maxExtent); mCamera->lookAtTarget(); mCamera->setSpeed(); } swapBuffers(); setNeedsDisplay(); ////////// // Screen color glClearColor(0.85f, 0.85f, 0.85f, 0.0f); glDepthFunc(GL_LESS); glEnable(GL_DEPTH_TEST); glShadeModel(GL_SMOOTH); glPointSize(4); glLineWidth(2); ////////// // construct render modules showNthGrid(/*n=*/0); // main loop size_t frame = 0; double time = glfwGetTime(); glfwSwapInterval(1); OPENVDB_LOG_DEBUG_RUNTIME("starting to render in window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); mInterrupt = false; for (bool stop = false; !stop; ) { if (needsDisplay()) render(); // eval fps ++frame; double elapsed = glfwGetTime() - time; if (elapsed > 1.0) { time = glfwGetTime(); setWindowTitle(/*fps=*/double(frame) / elapsed); frame = 0; } // Swap front and back buffers swapBuffers(); sleep(0.01/*sec*/); // Exit if the Esc key is pressed or the window is closed. handleEvents(); stop = (mInterrupt || glfwWindowShouldClose(mWindow)); } if (glfwGetCurrentContext() == mWindow) { ///< @todo not thread-safe // Detach this viewer's GL context. glfwMakeContextCurrent(nullptr); OPENVDB_LOG_DEBUG_RUNTIME("detached window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); } OPENVDB_LOG_DEBUG_RUNTIME("finished rendering in window " << std::hex << mWindow << std::dec << " from thread " << std::this_thread::get_id()); } //////////////////////////////////////// void ViewerImpl::resize(int width, int height) { if (mWindow) glfwSetWindowSize(mWindow, width, height); } //////////////////////////////////////// void ViewerImpl::render() { if (mWindow == nullptr) return; // Prepare window for rendering. glfwMakeContextCurrent(mWindow); mCamera->aim(); // draw scene mViewportModule->render(); // ground plane. mClipBox->render(); mClipBox->enableClipping(); for (size_t n = 0, N = mRenderModules.size(); n < N; ++n) { mRenderModules[n]->render(); } mClipBox->disableClipping(); // Render text if (mShowInfo) { BitmapFont13::enableFontRendering(); glColor3d(0.2, 0.2, 0.2); int width, height; glfwGetFramebufferSize(mWindow, &width, &height); BitmapFont13::print(10, height - 13 - 10, mGridInfo); BitmapFont13::print(10, height - 13 - 30, mTransformInfo); BitmapFont13::print(10, height - 13 - 50, mTreeInfo); // Indicate via their hotkeys which render modules are enabled. std::string keys = "123"; for (auto n: {0, 1, 2}) { if (!mRenderModules[n]->visible()) keys[n] = ' '; } BitmapFont13::print(width - 10 - 30, 10, keys); glColor3d(0.75, 0.75, 0.75); BitmapFont13::print(width - 10 - 30, 10, "123"); BitmapFont13::disableFontRendering(); } } //////////////////////////////////////// //static void ViewerImpl::sleep(double secs) { secs = fabs(secs); int isecs = int(secs); std::this_thread::sleep_for(std::chrono::seconds(isecs)); } //////////////////////////////////////// //static openvdb::BBoxd ViewerImpl::worldSpaceBBox(const openvdb::math::Transform& xform, const openvdb::CoordBBox& bbox) { openvdb::Vec3d pMin = openvdb::Vec3d(std::numeric_limits<double>::max()); openvdb::Vec3d pMax = -pMin; const openvdb::Coord& min = bbox.min(); const openvdb::Coord& max = bbox.max(); openvdb::Coord ijk; // corner 1 openvdb::Vec3d ptn = xform.indexToWorld(min); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 2 ijk[0] = min.x(); ijk[1] = min.y(); ijk[2] = max.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 3 ijk[0] = max.x(); ijk[1] = min.y(); ijk[2] = max.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 4 ijk[0] = max.x(); ijk[1] = min.y(); ijk[2] = min.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 5 ijk[0] = min.x(); ijk[1] = max.y(); ijk[2] = min.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 6 ijk[0] = min.x(); ijk[1] = max.y(); ijk[2] = max.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 7 ptn = xform.indexToWorld(max); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } // corner 8 ijk[0] = max.x(); ijk[1] = max.y(); ijk[2] = min.z(); ptn = xform.indexToWorld(ijk); for (int i = 0; i < 3; ++i) { if (ptn[i] < pMin[i]) pMin[i] = ptn[i]; if (ptn[i] > pMax[i]) pMax[i] = ptn[i]; } return openvdb::BBoxd(pMin, pMax); } //////////////////////////////////////// void ViewerImpl::updateCutPlanes(int wheelPos) { double speed = std::abs(mWheelPos - wheelPos); if (mWheelPos < wheelPos) mClipBox->update(speed); else mClipBox->update(-speed); setNeedsDisplay(); } //////////////////////////////////////// void ViewerImpl::swapBuffers() { glfwSwapBuffers(mWindow); } //////////////////////////////////////// void ViewerImpl::setWindowTitle(double fps) { std::ostringstream ss; ss << mProgName << ": " << (mGridName.empty() ? std::string("OpenVDB") : mGridName) << " (" << (mGridIdx + 1) << " of " << mGrids.size() << ") @ " << std::setprecision(1) << std::fixed << fps << " fps"; if (mWindow) glfwSetWindowTitle(mWindow, ss.str().c_str()); } //////////////////////////////////////// void ViewerImpl::showPrevGrid() { if (const size_t numGrids = mGrids.size()) { size_t idx = ((numGrids + mGridIdx) - 1) % numGrids; showNthGrid(idx); } } void ViewerImpl::showNextGrid() { if (const size_t numGrids = mGrids.size()) { size_t idx = (mGridIdx + 1) % numGrids; showNthGrid(idx); } } void ViewerImpl::showNthGrid(size_t n) { if (mGrids.empty()) return; n = n % mGrids.size(); if (n == mGridIdx) return; mGridName = mGrids[n]->getName(); mGridIdx = n; // save render settings std::vector<bool> active(mRenderModules.size()); for (size_t i = 0, I = active.size(); i < I; ++i) { active[i] = mRenderModules[i]->visible(); } mRenderModules.clear(); mRenderModules.push_back(RenderModulePtr(new TreeTopologyModule(mGrids[n]))); mRenderModules.push_back(RenderModulePtr(new MeshModule(mGrids[n]))); mRenderModules.push_back(RenderModulePtr(new VoxelModule(mGrids[n]))); if (active.empty()) { for (size_t i = 1, I = mRenderModules.size(); i < I; ++i) { mRenderModules[i]->setVisible(false); } } else { for (size_t i = 0, I = active.size(); i < I; ++i) { mRenderModules[i]->setVisible(active[i]); } } // Collect info { std::ostringstream ostrm; std::string s = mGrids[n]->getName(); const openvdb::GridClass cls = mGrids[n]->getGridClass(); if (!s.empty()) ostrm << s << " / "; ostrm << mGrids[n]->valueType() << " / "; if (cls == openvdb::GRID_UNKNOWN) ostrm << " class unknown"; else ostrm << " " << openvdb::GridBase::gridClassToString(cls); mGridInfo = ostrm.str(); } { openvdb::Coord dim = mGrids[n]->evalActiveVoxelDim(); std::ostringstream ostrm; ostrm << dim[0] << " x " << dim[1] << " x " << dim[2] << " / voxel size " << std::setprecision(4) << mGrids[n]->voxelSize()[0] << " (" << mGrids[n]->transform().mapType() << ")"; mTransformInfo = ostrm.str(); } { std::ostringstream ostrm; const openvdb::Index64 count = mGrids[n]->activeVoxelCount(); ostrm << openvdb::util::formattedInt(count) << " active voxel" << (count == 1 ? "" : "s"); mTreeInfo = ostrm.str(); } { if (mGrids[n]->isType<openvdb::points::PointDataGrid>()) { const openvdb::points::PointDataGrid::ConstPtr points = openvdb::gridConstPtrCast<openvdb::points::PointDataGrid>(mGrids[n]); const openvdb::Index64 count = openvdb::points::pointCount(points->tree()); std::ostringstream ostrm; ostrm << " / " << openvdb::util::formattedInt(count) << " point" << (count == 1 ? "" : "s"); mTreeInfo.append(ostrm.str()); } } setWindowTitle(); } //////////////////////////////////////// void ViewerImpl::keyCallback(int key, int action) { mCamera->keyCallback(key, action); if (mWindow == nullptr) return; const bool keyPress = (glfwGetKey(mWindow, key) == GLFW_PRESS); /// @todo Should use "modifiers" argument to keyCB(). mShiftIsDown = glfwGetKey(mWindow, GLFW_KEY_LEFT_SHIFT); mCtrlIsDown = glfwGetKey(mWindow, GLFW_KEY_LEFT_CONTROL); if (keyPress) { switch (key) { case '1': case GLFW_KEY_KP_1: toggleRenderModule(0); break; case '2': case GLFW_KEY_KP_2: toggleRenderModule(1); break; case '3': case GLFW_KEY_KP_3: toggleRenderModule(2); break; case 'c': case 'C': mClipBox->reset(); break; case 'h': case 'H': // center home mCamera->lookAt(openvdb::Vec3d(0.0), 10.0); break; case 'g': case 'G': // center geometry mCamera->lookAtTarget(); break; case 'i': case 'I': toggleInfoText(); break; case GLFW_KEY_LEFT: showPrevGrid(); break; case GLFW_KEY_RIGHT: showNextGrid(); break; case GLFW_KEY_ESCAPE: glfwSetWindowShouldClose(mWindow, true); break; } } switch (key) { case 'x': case 'X': mClipBox->activateXPlanes() = keyPress; break; case 'y': case 'Y': mClipBox->activateYPlanes() = keyPress; break; case 'z': case 'Z': mClipBox->activateZPlanes() = keyPress; break; } mClipBox->shiftIsDown() = mShiftIsDown; mClipBox->ctrlIsDown() = mCtrlIsDown; setNeedsDisplay(); } void ViewerImpl::mouseButtonCallback(int button, int action) { mCamera->mouseButtonCallback(button, action); mClipBox->mouseButtonCallback(button, action); if (mCamera->needsDisplay()) setNeedsDisplay(); } void ViewerImpl::mousePosCallback(int x, int y) { bool handled = mClipBox->mousePosCallback(x, y); if (!handled) mCamera->mousePosCallback(x, y); if (mCamera->needsDisplay()) setNeedsDisplay(); } void ViewerImpl::mouseWheelCallback(int pos) { pos += mWheelPos; if (mClipBox->isActive()) { updateCutPlanes(pos); } else { mCamera->mouseWheelCallback(pos, mWheelPos); if (mCamera->needsDisplay()) setNeedsDisplay(); } mWheelPos = pos; } void ViewerImpl::windowSizeCallback(int, int) { setNeedsDisplay(); } void ViewerImpl::windowRefreshCallback() { setNeedsDisplay(); } //////////////////////////////////////// bool ViewerImpl::needsDisplay() { if (mUpdates < 2) { mUpdates += 1; return true; } return false; } void ViewerImpl::setNeedsDisplay() { mUpdates = 0; } void ViewerImpl::toggleRenderModule(size_t n) { mRenderModules[n]->setVisible(!mRenderModules[n]->visible()); } void ViewerImpl::toggleInfoText() { mShowInfo = !mShowInfo; } } // namespace openvdb_viewer
25,156
C++
22.511215
97
0.574018
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Font.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_VIEWER_FONT_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_FONT_HAS_BEEN_INCLUDED #include <string> #if defined(__APPLE__) || defined(MACOSX) #include <OpenGL/gl.h> #include <OpenGL/glu.h> #elif defined(_WIN32) #include <GL/glew.h> #else #include <GL/gl.h> #include <GL/glu.h> #endif namespace openvdb_viewer { class BitmapFont13 { public: BitmapFont13() {} static void initialize(); static void enableFontRendering(); static void disableFontRendering(); static void print(GLint px, GLint py, const std::string&); private: static GLuint sOffset; static GLubyte sCharacters[95][13]; }; } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_FONT_HAS_BEEN_INCLUDED
798
C
18.023809
62
0.715539
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/ClipBox.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "ClipBox.h" namespace openvdb_viewer { ClipBox::ClipBox() : mStepSize(1.0) , mBBox() , mXIsActive(false) , mYIsActive(false) , mZIsActive(false) , mShiftIsDown(false) , mCtrlIsDown(false) { GLdouble front [] = { 0.0, 0.0, 1.0, 0.0}; std::copy(front, front + 4, mFrontPlane); GLdouble back [] = { 0.0, 0.0,-1.0, 0.0}; std::copy(back, back + 4, mBackPlane); GLdouble left [] = { 1.0, 0.0, 0.0, 0.0}; std::copy(left, left + 4, mLeftPlane); GLdouble right [] = {-1.0, 0.0, 0.0, 0.0}; std::copy(right, right + 4, mRightPlane); GLdouble top [] = { 0.0, 1.0, 0.0, 0.0}; std::copy(top, top + 4, mTopPlane); GLdouble bottom [] = { 0.0,-1.0, 0.0, 0.0}; std::copy(bottom, bottom + 4, mBottomPlane); } void ClipBox::setBBox(const openvdb::BBoxd& bbox) { mBBox = bbox; reset(); } void ClipBox::update(double steps) { if (mXIsActive) { GLdouble s = steps * mStepSize.x() * 4.0; if (mShiftIsDown || mCtrlIsDown) { mLeftPlane[3] -= s; mLeftPlane[3] = -std::min(-mLeftPlane[3], (mRightPlane[3] - mStepSize.x())); mLeftPlane[3] = -std::max(-mLeftPlane[3], mBBox.min().x()); } if (!mShiftIsDown || mCtrlIsDown) { mRightPlane[3] += s; mRightPlane[3] = std::min(mRightPlane[3], mBBox.max().x()); mRightPlane[3] = std::max(mRightPlane[3], (-mLeftPlane[3] + mStepSize.x())); } } if (mYIsActive) { GLdouble s = steps * mStepSize.y() * 4.0; if (mShiftIsDown || mCtrlIsDown) { mTopPlane[3] -= s; mTopPlane[3] = -std::min(-mTopPlane[3], (mBottomPlane[3] - mStepSize.y())); mTopPlane[3] = -std::max(-mTopPlane[3], mBBox.min().y()); } if (!mShiftIsDown || mCtrlIsDown) { mBottomPlane[3] += s; mBottomPlane[3] = std::min(mBottomPlane[3], mBBox.max().y()); mBottomPlane[3] = std::max(mBottomPlane[3], (-mTopPlane[3] + mStepSize.y())); } } if (mZIsActive) { GLdouble s = steps * mStepSize.z() * 4.0; if (mShiftIsDown || mCtrlIsDown) { mFrontPlane[3] -= s; mFrontPlane[3] = -std::min(-mFrontPlane[3], (mBackPlane[3] - mStepSize.z())); mFrontPlane[3] = -std::max(-mFrontPlane[3], mBBox.min().z()); } if (!mShiftIsDown || mCtrlIsDown) { mBackPlane[3] += s; mBackPlane[3] = std::min(mBackPlane[3], mBBox.max().z()); mBackPlane[3] = std::max(mBackPlane[3], (-mFrontPlane[3] + mStepSize.z())); } } } void ClipBox::reset() { mFrontPlane[3] = std::abs(mBBox.min().z()); mBackPlane[3] = mBBox.max().z(); mLeftPlane[3] = std::abs(mBBox.min().x()); mRightPlane[3] = mBBox.max().x(); mTopPlane[3] = std::abs(mBBox.min().y()); mBottomPlane[3] = mBBox.max().y(); } void ClipBox::update() const { glClipPlane(GL_CLIP_PLANE0, mFrontPlane); glClipPlane(GL_CLIP_PLANE1, mBackPlane); glClipPlane(GL_CLIP_PLANE2, mLeftPlane); glClipPlane(GL_CLIP_PLANE3, mRightPlane); glClipPlane(GL_CLIP_PLANE4, mTopPlane); glClipPlane(GL_CLIP_PLANE5, mBottomPlane); } void ClipBox::enableClipping() const { update(); if (-mFrontPlane[3] > mBBox.min().z()) glEnable(GL_CLIP_PLANE0); if (mBackPlane[3] < mBBox.max().z()) glEnable(GL_CLIP_PLANE1); if (-mLeftPlane[3] > mBBox.min().x()) glEnable(GL_CLIP_PLANE2); if (mRightPlane[3] < mBBox.max().x()) glEnable(GL_CLIP_PLANE3); if (-mTopPlane[3] > mBBox.min().y()) glEnable(GL_CLIP_PLANE4); if (mBottomPlane[3] < mBBox.max().y()) glEnable(GL_CLIP_PLANE5); } void ClipBox::disableClipping() const { glDisable(GL_CLIP_PLANE0); glDisable(GL_CLIP_PLANE1); glDisable(GL_CLIP_PLANE2); glDisable(GL_CLIP_PLANE3); glDisable(GL_CLIP_PLANE4); glDisable(GL_CLIP_PLANE5); } void ClipBox::render() { bool drawBbox = false; const GLenum geoMode = GL_LINE_LOOP; glColor3d(0.1, 0.1, 0.9); if (-mFrontPlane[3] > mBBox.min().z()) { glBegin(geoMode); glVertex3d(mBBox.min().x(), mBBox.min().y(), -mFrontPlane[3]); glVertex3d(mBBox.min().x(), mBBox.max().y(), -mFrontPlane[3]); glVertex3d(mBBox.max().x(), mBBox.max().y(), -mFrontPlane[3]); glVertex3d(mBBox.max().x(), mBBox.min().y(), -mFrontPlane[3]); glEnd(); drawBbox = true; } if (mBackPlane[3] < mBBox.max().z()) { glBegin(geoMode); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBackPlane[3]); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBackPlane[3]); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBackPlane[3]); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBackPlane[3]); glEnd(); drawBbox = true; } glColor3d(0.9, 0.1, 0.1); if (-mLeftPlane[3] > mBBox.min().x()) { glBegin(geoMode); glVertex3d(-mLeftPlane[3], mBBox.min().y(), mBBox.min().z()); glVertex3d(-mLeftPlane[3], mBBox.max().y(), mBBox.min().z()); glVertex3d(-mLeftPlane[3], mBBox.max().y(), mBBox.max().z()); glVertex3d(-mLeftPlane[3], mBBox.min().y(), mBBox.max().z()); glEnd(); drawBbox = true; } if (mRightPlane[3] < mBBox.max().x()) { glBegin(geoMode); glVertex3d(mRightPlane[3], mBBox.min().y(), mBBox.min().z()); glVertex3d(mRightPlane[3], mBBox.max().y(), mBBox.min().z()); glVertex3d(mRightPlane[3], mBBox.max().y(), mBBox.max().z()); glVertex3d(mRightPlane[3], mBBox.min().y(), mBBox.max().z()); glEnd(); drawBbox = true; } glColor3d(0.1, 0.9, 0.1); if (-mTopPlane[3] > mBBox.min().y()) { glBegin(geoMode); glVertex3d(mBBox.min().x(), -mTopPlane[3], mBBox.min().z()); glVertex3d(mBBox.min().x(), -mTopPlane[3], mBBox.max().z()); glVertex3d(mBBox.max().x(), -mTopPlane[3], mBBox.max().z()); glVertex3d(mBBox.max().x(), -mTopPlane[3], mBBox.min().z()); glEnd(); drawBbox = true; } if (mBottomPlane[3] < mBBox.max().y()) { glBegin(geoMode); glVertex3d(mBBox.min().x(), mBottomPlane[3], mBBox.min().z()); glVertex3d(mBBox.min().x(), mBottomPlane[3], mBBox.max().z()); glVertex3d(mBBox.max().x(), mBottomPlane[3], mBBox.max().z()); glVertex3d(mBBox.max().x(), mBottomPlane[3], mBBox.min().z()); glEnd(); drawBbox = true; } if (drawBbox) { glColor3d(0.5, 0.5, 0.5); glBegin(GL_LINE_LOOP); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBBox.min().z()); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBBox.min().z()); glEnd(); glBegin(GL_LINE_LOOP); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBBox.min().z()); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBBox.min().z()); glEnd(); glBegin(GL_LINES); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBBox.min().z()); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBBox.min().z()); glVertex3d(mBBox.min().x(), mBBox.min().y(), mBBox.max().z()); glVertex3d(mBBox.min().x(), mBBox.max().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBBox.max().z()); glVertex3d(mBBox.max().x(), mBBox.min().y(), mBBox.min().z()); glVertex3d(mBBox.max().x(), mBBox.max().y(), mBBox.min().z()); glEnd(); } } //////////////////////////////////////// bool ClipBox::mouseButtonCallback(int /*button*/, int /*action*/) { return false; // unhandled } bool ClipBox::mousePosCallback(int /*x*/, int /*y*/) { return false; // unhandled } } // namespace openvdb_viewer
8,209
C++
29.749064
89
0.545621
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Viewer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_VIEWER_VIEWER_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_VIEWER_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <string> namespace openvdb_viewer { class Viewer; enum { DEFAULT_WIDTH = 900, DEFAULT_HEIGHT = 800 }; /// @brief Initialize and return a viewer. /// @param progName the name of the calling program (for use in info displays) /// @param background if true, run the viewer in a separate thread /// @note Currently, the viewer window is a singleton (but that might change /// in the future), so although this function returns a new Viewer instance /// on each call, all instances are associated with the same window. Viewer init(const std::string& progName, bool background); /// @brief Destroy all viewer windows and release resources. /// @details This should be called from the main thread before your program exits. void exit(); /// Manager for a window that displays OpenVDB grids class Viewer { public: /// Set the size of and open the window associated with this viewer. void open(int width = DEFAULT_WIDTH, int height = DEFAULT_HEIGHT); /// Display the given grids. void view(const openvdb::GridCPtrVec&); /// @brief Process any pending user input (keyboard, mouse, etc.) /// in the window associated with this viewer. void handleEvents(); /// @brief Close the window associated with this viewer. /// @warning The window associated with this viewer might be shared with other viewers. void close(); /// Resize the window associated with this viewer. void resize(int width, int height); /// Return a string with version number information. std::string getVersionString() const; private: friend Viewer init(const std::string&, bool); Viewer(); }; } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_VIEWER_HAS_BEEN_INCLUDED
1,939
C
29.79365
91
0.720474
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/ClipBox.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_VIEWER_CLIPBOX_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_CLIPBOX_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #if defined(__APPLE__) || defined(MACOSX) #include <OpenGL/gl.h> #include <OpenGL/glu.h> #elif defined(_WIN32) #include <GL/glew.h> #else #include <GL/gl.h> #include <GL/glu.h> #endif namespace openvdb_viewer { class ClipBox { public: ClipBox(); void enableClipping() const; void disableClipping() const; void setBBox(const openvdb::BBoxd&); void setStepSize(const openvdb::Vec3d& s) { mStepSize = s; } void render(); void update(double steps); void reset(); bool isActive() const { return (mXIsActive || mYIsActive ||mZIsActive); } bool& activateXPlanes() { return mXIsActive; } bool& activateYPlanes() { return mYIsActive; } bool& activateZPlanes() { return mZIsActive; } bool& shiftIsDown() { return mShiftIsDown; } bool& ctrlIsDown() { return mCtrlIsDown; } bool mouseButtonCallback(int button, int action); bool mousePosCallback(int x, int y); private: void update() const; openvdb::Vec3d mStepSize; openvdb::BBoxd mBBox; bool mXIsActive, mYIsActive, mZIsActive, mShiftIsDown, mCtrlIsDown; GLdouble mFrontPlane[4], mBackPlane[4], mLeftPlane[4], mRightPlane[4], mTopPlane[4], mBottomPlane[4]; }; // class ClipBox } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_CLIPBOX_HAS_BEEN_INCLUDED
1,521
C
23.15873
77
0.69428
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Camera.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file Camera.h /// @brief Basic GL camera class #ifndef OPENVDB_VIEWER_CAMERA_HAS_BEEN_INCLUDED #define OPENVDB_VIEWER_CAMERA_HAS_BEEN_INCLUDED #include <openvdb/Types.h> struct GLFWwindow; // forward declaration namespace openvdb_viewer { class Camera { public: Camera(); void setWindow(GLFWwindow* w) { mWindow = w; } void aim(); void lookAt(const openvdb::Vec3d& p, double dist = 1.0); void lookAtTarget(); void setTarget(const openvdb::Vec3d& p, double dist = 1.0); void setNearFarPlanes(double n, double f) { mNearPlane = n; mFarPlane = f; } void setFieldOfView(double degrees) { mFov = degrees; } void setSpeed(double zoomSpeed = 0.1, double strafeSpeed = 0.002, double tumblingSpeed = 0.02); void keyCallback(int key, int action); void mouseButtonCallback(int button, int action); void mousePosCallback(int x, int y); void mouseWheelCallback(int pos, int prevPos); bool needsDisplay() const { return mNeedsDisplay; } private: // Camera parameters double mFov, mNearPlane, mFarPlane; openvdb::Vec3d mTarget, mLookAt, mUp, mForward, mRight, mEye; double mTumblingSpeed, mZoomSpeed, mStrafeSpeed; double mHead, mPitch, mTargetDistance, mDistance; // Input states bool mMouseDown, mStartTumbling, mZoomMode, mChanged, mNeedsDisplay; double mMouseXPos, mMouseYPos; GLFWwindow* mWindow; static const double sDeg2rad; }; // class Camera } // namespace openvdb_viewer #endif // OPENVDB_VIEWER_CAMERA_HAS_BEEN_INCLUDED
1,624
C
25.639344
99
0.710591
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Font.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Font.h" #include <openvdb/Types.h> // for OPENVDB_START_THREADSAFE_STATIC_WRITE namespace openvdb_viewer { GLuint BitmapFont13::sOffset = 0; GLubyte BitmapFont13::sCharacters[95][13] = { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36 }, { 0x00, 0x00, 0x00, 0x66, 0x66, 0xFF, 0x66, 0x66, 0xFF, 0x66, 0x66, 0x00, 0x00 }, { 0x00, 0x00, 0x18, 0x7E, 0xFF, 0x1B, 0x1F, 0x7E, 0xF8, 0xD8, 0xFF, 0x7E, 0x18 }, { 0x00, 0x00, 0x0E, 0x1B, 0xDB, 0x6E, 0x30, 0x18, 0x0C, 0x76, 0xDB, 0xD8, 0x70 }, { 0x00, 0x00, 0x7F, 0xC6, 0xCF, 0xD8, 0x70, 0x70, 0xD8, 0xCC, 0xCC, 0x6C, 0x38 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x1C, 0x0C, 0x0E }, { 0x00, 0x00, 0x0C, 0x18, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x18, 0x0C }, { 0x00, 0x00, 0x30, 0x18, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x18, 0x30 }, { 0x00, 0x00, 0x00, 0x00, 0x99, 0x5A, 0x3C, 0xFF, 0x3C, 0x5A, 0x99, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0xFF, 0xFF, 0x18, 0x18, 0x18, 0x00, 0x00 }, { 0x00, 0x00, 0x30, 0x18, 0x1C, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x60, 0x60, 0x30, 0x30, 0x18, 0x18, 0x0C, 0x0C, 0x06, 0x06, 0x03, 0x03 }, { 0x00, 0x00, 0x3C, 0x66, 0xC3, 0xE3, 0xF3, 0xDB, 0xCF, 0xC7, 0xC3, 0x66, 0x3C }, { 0x00, 0x00, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x78, 0x38, 0x18 }, { 0x00, 0x00, 0xFF, 0xC0, 0xC0, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0xE7, 0x7E }, { 0x00, 0x00, 0x7E, 0xE7, 0x03, 0x03, 0x07, 0x7E, 0x07, 0x03, 0x03, 0xE7, 0x7E }, { 0x00, 0x00, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0xFF, 0xCC, 0x6C, 0x3C, 0x1C, 0x0C }, { 0x00, 0x00, 0x7E, 0xE7, 0x03, 0x03, 0x07, 0xFE, 0xC0, 0xC0, 0xC0, 0xC0, 0xFF }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xC7, 0xFE, 0xC0, 0xC0, 0xC0, 0xE7, 0x7E }, { 0x00, 0x00, 0x30, 0x30, 0x30, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0xFF }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xE7, 0x7E, 0xE7, 0xC3, 0xC3, 0xE7, 0x7E }, { 0x00, 0x00, 0x7E, 0xE7, 0x03, 0x03, 0x03, 0x7F, 0xE7, 0xC3, 0xC3, 0xE7, 0x7E }, { 0x00, 0x00, 0x00, 0x38, 0x38, 0x00, 0x00, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x30, 0x18, 0x1C, 0x1C, 0x00, 0x00, 0x1C, 0x1C, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0x60, 0x30, 0x18, 0x0C, 0x06 }, { 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60 }, { 0x00, 0x00, 0x18, 0x00, 0x00, 0x18, 0x18, 0x0C, 0x06, 0x03, 0xC3, 0xC3, 0x7E }, { 0x00, 0x00, 0x3F, 0x60, 0xCF, 0xDB, 0xD3, 0xDD, 0xC3, 0x7E, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC3, 0xC3, 0xC3, 0xC3, 0xFF, 0xC3, 0xC3, 0xC3, 0x66, 0x3C, 0x18 }, { 0x00, 0x00, 0xFE, 0xC7, 0xC3, 0xC3, 0xC7, 0xFE, 0xC7, 0xC3, 0xC3, 0xC7, 0xFE }, { 0x00, 0x00, 0x7E, 0xE7, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xE7, 0x7E }, { 0x00, 0x00, 0xFC, 0xCE, 0xC7, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC7, 0xCE, 0xFC }, { 0x00, 0x00, 0xFF, 0xC0, 0xC0, 0xC0, 0xC0, 0xFC, 0xC0, 0xC0, 0xC0, 0xC0, 0xFF }, { 0x00, 0x00, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xFC, 0xC0, 0xC0, 0xC0, 0xFF }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xCF, 0xC0, 0xC0, 0xC0, 0xC0, 0xE7, 0x7E }, { 0x00, 0x00, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xFF, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3 }, { 0x00, 0x00, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7E }, { 0x00, 0x00, 0x7C, 0xEE, 0xC6, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 }, { 0x00, 0x00, 0xC3, 0xC6, 0xCC, 0xD8, 0xF0, 0xE0, 0xF0, 0xD8, 0xCC, 0xC6, 0xC3 }, { 0x00, 0x00, 0xFF, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0 }, { 0x00, 0x00, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xDB, 0xFF, 0xFF, 0xE7, 0xC3 }, { 0x00, 0x00, 0xC7, 0xC7, 0xCF, 0xCF, 0xDF, 0xDB, 0xFB, 0xF3, 0xF3, 0xE3, 0xE3 }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xE7, 0x7E }, { 0x00, 0x00, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xFE, 0xC7, 0xC3, 0xC3, 0xC7, 0xFE }, { 0x00, 0x00, 0x3F, 0x6E, 0xDF, 0xDB, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0x66, 0x3C }, { 0x00, 0x00, 0xC3, 0xC6, 0xCC, 0xD8, 0xF0, 0xFE, 0xC7, 0xC3, 0xC3, 0xC7, 0xFE }, { 0x00, 0x00, 0x7E, 0xE7, 0x03, 0x03, 0x07, 0x7E, 0xE0, 0xC0, 0xC0, 0xE7, 0x7E }, { 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xFF }, { 0x00, 0x00, 0x7E, 0xE7, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3 }, { 0x00, 0x00, 0x18, 0x3C, 0x3C, 0x66, 0x66, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3 }, { 0x00, 0x00, 0xC3, 0xE7, 0xFF, 0xFF, 0xDB, 0xDB, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3 }, { 0x00, 0x00, 0xC3, 0x66, 0x66, 0x3C, 0x3C, 0x18, 0x3C, 0x3C, 0x66, 0x66, 0xC3 }, { 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3C, 0x3C, 0x66, 0x66, 0xC3 }, { 0x00, 0x00, 0xFF, 0xC0, 0xC0, 0x60, 0x30, 0x7E, 0x0C, 0x06, 0x03, 0x03, 0xFF }, { 0x00, 0x00, 0x3C, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x3C }, { 0x00, 0x03, 0x03, 0x06, 0x06, 0x0C, 0x0C, 0x18, 0x18, 0x30, 0x30, 0x60, 0x60 }, { 0x00, 0x00, 0x3C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x3C }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC3, 0x66, 0x3C, 0x18 }, { 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x30, 0x70 }, { 0x00, 0x00, 0x7F, 0xC3, 0xC3, 0x7F, 0x03, 0xC3, 0x7E, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xFE, 0xC3, 0xC3, 0xC3, 0xC3, 0xFE, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0 }, { 0x00, 0x00, 0x7E, 0xC3, 0xC0, 0xC0, 0xC0, 0xC3, 0x7E, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x7F, 0xC3, 0xC3, 0xC3, 0xC3, 0x7F, 0x03, 0x03, 0x03, 0x03, 0x03 }, { 0x00, 0x00, 0x7F, 0xC0, 0xC0, 0xFE, 0xC3, 0xC3, 0x7E, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x30, 0x30, 0x30, 0x30, 0x30, 0xFC, 0x30, 0x30, 0x30, 0x33, 0x1E }, { 0x7E, 0xC3, 0x03, 0x03, 0x7F, 0xC3, 0xC3, 0xC3, 0x7E, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xFE, 0xC0, 0xC0, 0xC0, 0xC0 }, { 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x18, 0x00 }, { 0x38, 0x6C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x00, 0x00, 0x0C, 0x00 }, { 0x00, 0x00, 0xC6, 0xCC, 0xF8, 0xF0, 0xD8, 0xCC, 0xC6, 0xC0, 0xC0, 0xC0, 0xC0 }, { 0x00, 0x00, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x78 }, { 0x00, 0x00, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xFE, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xFC, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x7C, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0x7C, 0x00, 0x00, 0x00, 0x00 }, { 0xC0, 0xC0, 0xC0, 0xFE, 0xC3, 0xC3, 0xC3, 0xC3, 0xFE, 0x00, 0x00, 0x00, 0x00 }, { 0x03, 0x03, 0x03, 0x7F, 0xC3, 0xC3, 0xC3, 0xC3, 0x7F, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xE0, 0xFE, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xFE, 0x03, 0x03, 0x7E, 0xC0, 0xC0, 0x7F, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x1C, 0x36, 0x30, 0x30, 0x30, 0x30, 0xFC, 0x30, 0x30, 0x30, 0x00 }, { 0x00, 0x00, 0x7E, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x18, 0x3C, 0x3C, 0x66, 0x66, 0xC3, 0xC3, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC3, 0xE7, 0xFF, 0xDB, 0xC3, 0xC3, 0xC3, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xC3, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0xC3, 0x00, 0x00, 0x00, 0x00 }, { 0xC0, 0x60, 0x60, 0x30, 0x18, 0x3C, 0x66, 0x66, 0xC3, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0xFF, 0x60, 0x30, 0x18, 0x0C, 0x06, 0xFF, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x0F, 0x18, 0x18, 0x18, 0x38, 0xF0, 0x38, 0x18, 0x18, 0x18, 0x0F }, { 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18 }, { 0x00, 0x00, 0xF0, 0x18, 0x18, 0x18, 0x1C, 0x0F, 0x1C, 0x18, 0x18, 0x18, 0xF0 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x8F, 0xF1, 0x60, 0x00, 0x00, 0x00 } }; // sCharacters void BitmapFont13::initialize() { OPENVDB_START_THREADSAFE_STATIC_WRITE glShadeModel(GL_FLAT); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); BitmapFont13::sOffset = glGenLists(128); for (GLuint c = 32; c < 127; ++c) { glNewList(c + BitmapFont13::sOffset, GL_COMPILE); glBitmap(8, 13, 0.0, 2.0, 10.0, 0.0, BitmapFont13::sCharacters[c-32]); glEndList(); } OPENVDB_FINISH_THREADSAFE_STATIC_WRITE } void BitmapFont13::enableFontRendering() { glPushMatrix(); GLint vp[4] = { 0, 0, 0, 0 }; glGetIntegerv(GL_VIEWPORT, vp); const int width = vp[2], height = std::max(1, vp[3]); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, width, 0, height, -1.0, 1.0); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); //glShadeModel(GL_FLAT); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); } void BitmapFont13::disableFontRendering() { glFlush(); glPopMatrix(); } void BitmapFont13::print(GLint px, GLint py, const std::string& str) { glRasterPos2i(px, py); glPushAttrib(GL_LIST_BIT); glListBase(BitmapFont13::sOffset); glCallLists(GLsizei(str.length()), GL_UNSIGNED_BYTE, reinterpret_cast<const GLubyte*>(str.c_str())); glPopAttrib(); } } // namespace openvdb_viewer
9,760
C++
56.417647
85
0.614344
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/RenderModules.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "RenderModules.h" #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointCount.h> #include <openvdb/points/PointConversion.h> #include <openvdb/tools/Morphology.h> #include <openvdb/tools/Prune.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/util/logging.h> #include <algorithm> // for std::min() #include <cmath> // for std::abs(), std::fabs(), std::floor() #include <limits> #include <type_traits> // for std::is_const namespace openvdb_viewer { namespace util { /// Helper class used internally by processTypedGrid() template<typename GridType, typename OpType, bool IsConst/*=false*/> struct GridProcessor { static inline void call(OpType& op, openvdb::GridBase::Ptr grid) { op.template operator()<GridType>(openvdb::gridPtrCast<GridType>(grid)); } }; /// Helper class used internally by processTypedGrid() template<typename GridType, typename OpType> struct GridProcessor<GridType, OpType, /*IsConst=*/true> { static inline void call(OpType& op, openvdb::GridBase::ConstPtr grid) { op.template operator()<GridType>(openvdb::gridConstPtrCast<GridType>(grid)); } }; /// Helper function used internally by processTypedGrid() template<typename GridType, typename OpType, typename GridPtrType> inline void doProcessTypedGrid(GridPtrType grid, OpType& op) { GridProcessor<GridType, OpType, std::is_const<typename GridPtrType::element_type>::value>::call(op, grid); } //////////////////////////////////////// /// @brief Utility function that, given a generic grid pointer, /// calls a functor on the fully-resolved grid /// /// Usage: /// @code /// struct PruneOp { /// template<typename GridT> /// void operator()(typename GridT::Ptr grid) const { grid->tree()->prune(); } /// }; /// /// processTypedGrid(myGridPtr, PruneOp()); /// @endcode /// /// @return @c false if the grid type is unknown or unhandled. template<typename GridPtrType, typename OpType> bool processTypedGrid(GridPtrType grid, OpType& op) { using namespace openvdb; if (grid->template isType<BoolGrid>()) doProcessTypedGrid<BoolGrid>(grid, op); else if (grid->template isType<FloatGrid>()) doProcessTypedGrid<FloatGrid>(grid, op); else if (grid->template isType<DoubleGrid>()) doProcessTypedGrid<DoubleGrid>(grid, op); else if (grid->template isType<Int32Grid>()) doProcessTypedGrid<Int32Grid>(grid, op); else if (grid->template isType<Int64Grid>()) doProcessTypedGrid<Int64Grid>(grid, op); else if (grid->template isType<Vec3IGrid>()) doProcessTypedGrid<Vec3IGrid>(grid, op); else if (grid->template isType<Vec3SGrid>()) doProcessTypedGrid<Vec3SGrid>(grid, op); else if (grid->template isType<Vec3DGrid>()) doProcessTypedGrid<Vec3DGrid>(grid, op); else if (grid->template isType<points::PointDataGrid>()) { doProcessTypedGrid<points::PointDataGrid>(grid, op); } else return false; return true; } /// @brief Utility function that, given a generic grid pointer, calls /// a functor on the fully-resolved grid, provided that the grid's /// voxel values are scalars template<typename GridPtrType, typename OpType> bool processTypedScalarGrid(GridPtrType grid, OpType& op) { using namespace openvdb; if (grid->template isType<FloatGrid>()) doProcessTypedGrid<FloatGrid>(grid, op); else if (grid->template isType<DoubleGrid>()) doProcessTypedGrid<DoubleGrid>(grid, op); else if (grid->template isType<Int32Grid>()) doProcessTypedGrid<Int32Grid>(grid, op); else if (grid->template isType<Int64Grid>()) doProcessTypedGrid<Int64Grid>(grid, op); else return false; return true; } /// @brief Utility function that, given a generic grid pointer, calls /// a functor on the fully-resolved grid, provided that the grid's /// voxel values are scalars or PointIndex objects template<typename GridPtrType, typename OpType> bool processTypedScalarOrPointDataGrid(GridPtrType grid, OpType& op) { using namespace openvdb; if (processTypedScalarGrid(grid, op)) return true; if (grid->template isType<points::PointDataGrid>()) { doProcessTypedGrid<points::PointDataGrid>(grid, op); return true; } return false; } /// @brief Utility function that, given a generic grid pointer, calls /// a functor on the fully-resolved grid, provided that the grid's /// voxel values are vectors template<typename GridPtrType, typename OpType> bool processTypedVectorGrid(GridPtrType grid, OpType& op) { using namespace openvdb; if (grid->template isType<Vec3IGrid>()) doProcessTypedGrid<Vec3IGrid>(grid, op); else if (grid->template isType<Vec3SGrid>()) doProcessTypedGrid<Vec3SGrid>(grid, op); else if (grid->template isType<Vec3DGrid>()) doProcessTypedGrid<Vec3DGrid>(grid, op); else return false; return true; } template<class TreeType> class MinMaxVoxel { public: using LeafArray = openvdb::tree::LeafManager<TreeType>; using ValueType = typename TreeType::ValueType; // LeafArray = openvdb::tree::LeafManager<TreeType> leafs(myTree) MinMaxVoxel(LeafArray&); void runParallel(); void runSerial(); const ValueType& minVoxel() const { return mMin; } const ValueType& maxVoxel() const { return mMax; } inline MinMaxVoxel(const MinMaxVoxel<TreeType>&, tbb::split); inline void operator()(const tbb::blocked_range<size_t>&); inline void join(const MinMaxVoxel<TreeType>&); private: LeafArray& mLeafArray; ValueType mMin, mMax; }; template <class TreeType> MinMaxVoxel<TreeType>::MinMaxVoxel(LeafArray& leafs) : mLeafArray(leafs) , mMin(std::numeric_limits<ValueType>::max()) , mMax(std::numeric_limits<ValueType>::lowest()) { } template <class TreeType> inline MinMaxVoxel<TreeType>::MinMaxVoxel(const MinMaxVoxel<TreeType>& rhs, tbb::split) : mLeafArray(rhs.mLeafArray) , mMin(std::numeric_limits<ValueType>::max()) , mMax(std::numeric_limits<ValueType>::lowest()) { } template <class TreeType> void MinMaxVoxel<TreeType>::runParallel() { tbb::parallel_reduce(mLeafArray.getRange(), *this); } template <class TreeType> void MinMaxVoxel<TreeType>::runSerial() { (*this)(mLeafArray.getRange()); } template <class TreeType> inline void MinMaxVoxel<TreeType>::operator()(const tbb::blocked_range<size_t>& range) { typename TreeType::LeafNodeType::ValueOnCIter iter; for (size_t n = range.begin(); n < range.end(); ++n) { iter = mLeafArray.leaf(n).cbeginValueOn(); for (; iter; ++iter) { const ValueType value = iter.getValue(); mMin = std::min(mMin, value); mMax = std::max(mMax, value); } } } template <class TreeType> inline void MinMaxVoxel<TreeType>::join(const MinMaxVoxel<TreeType>& rhs) { mMin = std::min(mMin, rhs.mMin); mMax = std::max(mMax, rhs.mMax); } } // namespace util //////////////////////////////////////// // BufferObject BufferObject::BufferObject(): mVertexBuffer(0), mNormalBuffer(0), mIndexBuffer(0), mColorBuffer(0), mPrimType(GL_POINTS), mPrimNum(0) { } BufferObject::~BufferObject() { clear(); } void BufferObject::render() const { if (mPrimNum == 0 || !glIsBuffer(mIndexBuffer) || !glIsBuffer(mVertexBuffer)) { OPENVDB_LOG_DEBUG_RUNTIME("request to render empty or uninitialized buffer"); return; } const bool usesColorBuffer = glIsBuffer(mColorBuffer); const bool usesNormalBuffer = glIsBuffer(mNormalBuffer); glBindBuffer(GL_ARRAY_BUFFER, mVertexBuffer); glEnableClientState(GL_VERTEX_ARRAY); glVertexPointer(3, GL_FLOAT, 0, nullptr); if (usesColorBuffer) { glBindBuffer(GL_ARRAY_BUFFER, mColorBuffer); glEnableClientState(GL_COLOR_ARRAY); glColorPointer(3, GL_FLOAT, 0, nullptr); } if (usesNormalBuffer) { glEnableClientState(GL_NORMAL_ARRAY); glBindBuffer(GL_ARRAY_BUFFER, mNormalBuffer); glNormalPointer(GL_FLOAT, 0, nullptr); } glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer); glDrawElements(mPrimType, mPrimNum, GL_UNSIGNED_INT, nullptr); // disable client-side capabilities if (usesColorBuffer) glDisableClientState(GL_COLOR_ARRAY); if (usesNormalBuffer) glDisableClientState(GL_NORMAL_ARRAY); // release vbo's glBindBuffer(GL_ARRAY_BUFFER, 0); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); } void BufferObject::genIndexBuffer(const std::vector<GLuint>& v, GLenum primType) { // clear old buffer if (glIsBuffer(mIndexBuffer) == GL_TRUE) glDeleteBuffers(1, &mIndexBuffer); // gen new buffer glGenBuffers(1, &mIndexBuffer); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer); if (glIsBuffer(mIndexBuffer) == GL_FALSE) throw "Error: Unable to create index buffer"; // upload data glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint) * v.size(), &v[0], GL_STATIC_DRAW); // upload data if (GL_NO_ERROR != glGetError()) throw "Error: Unable to upload index buffer data"; // release buffer glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); mPrimNum = GLsizei(v.size()); mPrimType = primType; } void BufferObject::genVertexBuffer(const std::vector<GLfloat>& v) { if (glIsBuffer(mVertexBuffer) == GL_TRUE) glDeleteBuffers(1, &mVertexBuffer); glGenBuffers(1, &mVertexBuffer); glBindBuffer(GL_ARRAY_BUFFER, mVertexBuffer); if (glIsBuffer(mVertexBuffer) == GL_FALSE) throw "Error: Unable to create vertex buffer"; glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * v.size(), &v[0], GL_STATIC_DRAW); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to upload vertex buffer data"; glBindBuffer(GL_ARRAY_BUFFER, 0); } void BufferObject::genNormalBuffer(const std::vector<GLfloat>& v) { if (glIsBuffer(mNormalBuffer) == GL_TRUE) glDeleteBuffers(1, &mNormalBuffer); glGenBuffers(1, &mNormalBuffer); glBindBuffer(GL_ARRAY_BUFFER, mNormalBuffer); if (glIsBuffer(mNormalBuffer) == GL_FALSE) throw "Error: Unable to create normal buffer"; glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * v.size(), &v[0], GL_STATIC_DRAW); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to upload normal buffer data"; glBindBuffer(GL_ARRAY_BUFFER, 0); } void BufferObject::genColorBuffer(const std::vector<GLfloat>& v) { if (glIsBuffer(mColorBuffer) == GL_TRUE) glDeleteBuffers(1, &mColorBuffer); glGenBuffers(1, &mColorBuffer); glBindBuffer(GL_ARRAY_BUFFER, mColorBuffer); if (glIsBuffer(mColorBuffer) == GL_FALSE) throw "Error: Unable to create color buffer"; glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * v.size(), &v[0], GL_STATIC_DRAW); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to upload color buffer data"; glBindBuffer(GL_ARRAY_BUFFER, 0); } void BufferObject::clear() { if (glIsBuffer(mIndexBuffer) == GL_TRUE) glDeleteBuffers(1, &mIndexBuffer); if (glIsBuffer(mVertexBuffer) == GL_TRUE) glDeleteBuffers(1, &mVertexBuffer); if (glIsBuffer(mColorBuffer) == GL_TRUE) glDeleteBuffers(1, &mColorBuffer); if (glIsBuffer(mNormalBuffer) == GL_TRUE) glDeleteBuffers(1, &mNormalBuffer); mPrimType = GL_POINTS; mPrimNum = 0; } //////////////////////////////////////// ShaderProgram::ShaderProgram(): mProgram(0), mVertShader(0), mFragShader(0) { } ShaderProgram::~ShaderProgram() { clear(); } void ShaderProgram::setVertShader(const std::string& s) { mVertShader = glCreateShader(GL_VERTEX_SHADER); if (glIsShader(mVertShader) == GL_FALSE) throw "Error: Unable to create shader program."; GLint length = GLint(s.length()); const char *str = s.c_str(); glShaderSource(mVertShader, 1, &str, &length); glCompileShader(mVertShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to compile vertex shader."; } void ShaderProgram::setFragShader(const std::string& s) { mFragShader = glCreateShader(GL_FRAGMENT_SHADER); if (glIsShader(mFragShader) == GL_FALSE) throw "Error: Unable to create shader program."; GLint length = GLint(s.length()); const char *str = s.c_str(); glShaderSource(mFragShader, 1, &str, &length); glCompileShader(mFragShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to compile fragment shader."; } void ShaderProgram::build() { mProgram = glCreateProgram(); if (glIsProgram(mProgram) == GL_FALSE) throw "Error: Unable to create shader program."; if (glIsShader(mVertShader) == GL_TRUE) glAttachShader(mProgram, mVertShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to attach vertex shader."; if (glIsShader(mFragShader) == GL_TRUE) glAttachShader(mProgram, mFragShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to attach fragment shader."; glLinkProgram(mProgram); GLint linked = 0; glGetProgramiv(mProgram, GL_LINK_STATUS, &linked); if (!linked) throw "Error: Unable to link shader program."; } void ShaderProgram::build(const std::vector<GLchar*>& attributes) { mProgram = glCreateProgram(); if (glIsProgram(mProgram) == GL_FALSE) throw "Error: Unable to create shader program."; for (GLuint n = 0, N = GLuint(attributes.size()); n < N; ++n) { glBindAttribLocation(mProgram, n, attributes[n]); } if (glIsShader(mVertShader) == GL_TRUE) glAttachShader(mProgram, mVertShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to attach vertex shader."; if (glIsShader(mFragShader) == GL_TRUE) glAttachShader(mProgram, mFragShader); if (GL_NO_ERROR != glGetError()) throw "Error: Unable to attach fragment shader."; glLinkProgram(mProgram); GLint linked; glGetProgramiv(mProgram, GL_LINK_STATUS, &linked); if (!linked) throw "Error: Unable to link shader program."; } void ShaderProgram::startShading() const { if (glIsProgram(mProgram) == GL_FALSE) { throw "Error: called startShading() on uncompiled shader program."; } glUseProgram(mProgram); } void ShaderProgram::stopShading() const { glUseProgram(0); } void ShaderProgram::clear() { GLsizei numShaders = 0; GLuint shaders[2] = { 0, 0 }; glGetAttachedShaders(mProgram, 2, &numShaders, shaders); // detach and remove shaders for (GLsizei n = 0; n < numShaders; ++n) { glDetachShader(mProgram, shaders[n]); if (glIsShader(shaders[n]) == GL_TRUE) glDeleteShader(shaders[n]); } // remove program if (glIsProgram(mProgram)) glDeleteProgram(mProgram); } //////////////////////////////////////// // ViewportModule ViewportModule::ViewportModule(): mAxisGnomonScale(1.5), mGroundPlaneScale(8.0) { } void ViewportModule::render() { if (!mIsVisible) return; /// @todo use VBO's // Ground plane glPushMatrix(); glScalef(mGroundPlaneScale, mGroundPlaneScale, mGroundPlaneScale); glColor3d(0.6, 0.6, 0.6); OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN float step = 0.125; for (float x = -1; x < 1.125; x+=step) { if (std::fabs(x) == 0.5 || std::fabs(x) == 0.0) { glLineWidth(1.5); } else { glLineWidth(1.0); } glBegin(GL_LINES); glVertex3f(x, 0, 1); glVertex3f(x, 0, -1); glVertex3f(1, 0, x); glVertex3f(-1, 0, x); glEnd(); } OPENVDB_NO_FP_EQUALITY_WARNING_END glPopMatrix(); // Axis gnomon GLfloat modelview[16]; glGetFloatv(GL_MODELVIEW_MATRIX, &modelview[0]); // Stash current viewport settigs. GLint viewport[4]; glGetIntegerv(GL_VIEWPORT, &viewport[0]); GLint width = viewport[2] / 20; GLint height = viewport[3] / 20; glViewport(0, 0, width, height); glPushMatrix(); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); GLfloat campos[3] = { modelview[2], modelview[6], modelview[10] }; GLfloat up[3] = { modelview[1], modelview[5], modelview[9] }; gluLookAt(campos[0], campos[1], campos[2], 0.0, 0.0, 0.0, up[0], up[1], up[2]); glScalef(mAxisGnomonScale, mAxisGnomonScale, mAxisGnomonScale); glLineWidth(1.0); glBegin(GL_LINES); glColor3f(1.0f, 0.0f, 0.0f); glVertex3f(0, 0, 0); glVertex3f(1, 0, 0); glColor3f(0.0f, 1.0f, 0.0f ); glVertex3f(0, 0, 0); glVertex3f(0, 1, 0); glColor3f(0.0f, 0.0f, 1.0f); glVertex3f(0, 0, 0); glVertex3f(0, 0, 1); glEnd(); glLineWidth(1.0); // reset viewport glPopMatrix(); glViewport(viewport[0], viewport[1], viewport[2], viewport[3]); } //////////////////////////////////////// class TreeTopologyOp { public: TreeTopologyOp(BufferObject& buffer) : mBuffer(&buffer) {} template<typename GridType> void operator()(typename GridType::ConstPtr grid) { using openvdb::Index64; Index64 nodeCount = grid->tree().leafCount() + grid->tree().nonLeafCount(); const Index64 N = nodeCount * 8 * 3; std::vector<GLfloat> points(N); std::vector<GLfloat> colors(N); std::vector<GLuint> indices(N); openvdb::Vec3d ptn; openvdb::Vec3s color; openvdb::CoordBBox bbox; Index64 pOffset = 0, iOffset = 0, cOffset = 0, idx = 0; for (typename GridType::TreeType::NodeCIter iter = grid->tree().cbeginNode(); iter; ++iter) { iter.getBoundingBox(bbox); // Nodes are rendered as cell-centered const openvdb::Vec3d min(bbox.min().x()-0.5, bbox.min().y()-0.5, bbox.min().z()-0.5); const openvdb::Vec3d max(bbox.max().x()+0.5, bbox.max().y()+0.5, bbox.max().z()+0.5); // corner 1 ptn = grid->indexToWorld(min); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 2 ptn = openvdb::Vec3d(min.x(), min.y(), max.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 3 ptn = openvdb::Vec3d(max.x(), min.y(), max.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 4 ptn = openvdb::Vec3d(max.x(), min.y(), min.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 5 ptn = openvdb::Vec3d(min.x(), max.y(), min.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 6 ptn = openvdb::Vec3d(min.x(), max.y(), max.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 7 ptn = grid->indexToWorld(max); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // corner 8 ptn = openvdb::Vec3d(max.x(), max.y(), min.z()); ptn = grid->indexToWorld(ptn); points[pOffset++] = static_cast<GLfloat>(ptn[0]); points[pOffset++] = static_cast<GLfloat>(ptn[1]); points[pOffset++] = static_cast<GLfloat>(ptn[2]); // edge 1 indices[iOffset++] = GLuint(idx); indices[iOffset++] = GLuint(idx + 1); // edge 2 indices[iOffset++] = GLuint(idx + 1); indices[iOffset++] = GLuint(idx + 2); // edge 3 indices[iOffset++] = GLuint(idx + 2); indices[iOffset++] = GLuint(idx + 3); // edge 4 indices[iOffset++] = GLuint(idx + 3); indices[iOffset++] = GLuint(idx); // edge 5 indices[iOffset++] = GLuint(idx + 4); indices[iOffset++] = GLuint(idx + 5); // edge 6 indices[iOffset++] = GLuint(idx + 5); indices[iOffset++] = GLuint(idx + 6); // edge 7 indices[iOffset++] = GLuint(idx + 6); indices[iOffset++] = GLuint(idx + 7); // edge 8 indices[iOffset++] = GLuint(idx + 7); indices[iOffset++] = GLuint(idx + 4); // edge 9 indices[iOffset++] = GLuint(idx); indices[iOffset++] = GLuint(idx + 4); // edge 10 indices[iOffset++] = GLuint(idx + 1); indices[iOffset++] = GLuint(idx + 5); // edge 11 indices[iOffset++] = GLuint(idx + 2); indices[iOffset++] = GLuint(idx + 6); // edge 12 indices[iOffset++] = GLuint(idx + 3); indices[iOffset++] = GLuint(idx + 7); // node vertex color const int level = iter.getLevel(); color = sNodeColors[(level == 0) ? 3 : (level == 1) ? 2 : 1]; for (Index64 n = 0; n < 8; ++n) { colors[cOffset++] = color[0]; colors[cOffset++] = color[1]; colors[cOffset++] = color[2]; } idx += 8; } // end node iteration // gen buffers and upload data to GPU mBuffer->genVertexBuffer(points); mBuffer->genColorBuffer(colors); mBuffer->genIndexBuffer(indices, GL_LINES); } private: BufferObject *mBuffer; static openvdb::Vec3s sNodeColors[]; }; // TreeTopologyOp openvdb::Vec3s TreeTopologyOp::sNodeColors[] = { openvdb::Vec3s(0.045f, 0.045f, 0.045f), // root openvdb::Vec3s(0.0432f, 0.33f, 0.0411023f), // first internal node level openvdb::Vec3s(0.871f, 0.394f, 0.01916f), // intermediate internal node levels openvdb::Vec3s(0.00608299f, 0.279541f, 0.625f) // leaf nodes }; //////////////////////////////////////// // Tree topology render module TreeTopologyModule::TreeTopologyModule(const openvdb::GridBase::ConstPtr& grid): mGrid(grid), mIsInitialized(false) { mShader.setVertShader( "#version 120\n" "void main() {\n" "gl_FrontColor = gl_Color;\n" "gl_Position = ftransform();\n" "gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;\n" "}\n"); mShader.setFragShader( "#version 120\n" "void main() {\n" "gl_FragColor = gl_Color;}\n"); mShader.build(); } void TreeTopologyModule::init() { mIsInitialized = true; // extract grid topology TreeTopologyOp drawTopology(mBufferObject); if (!util::processTypedGrid(mGrid, drawTopology)) { OPENVDB_LOG_INFO("Ignoring unrecognized grid type" " during tree topology module initialization."); } } void TreeTopologyModule::render() { if (!mIsVisible) return; if (!mIsInitialized) init(); mShader.startShading(); mBufferObject.render(); mShader.stopShading(); } //////////////////////////////////////// template<typename TreeType> class PointGenerator { public: using LeafManagerType = openvdb::tree::LeafManager<TreeType>; PointGenerator( std::vector<GLfloat>& points, std::vector<GLuint>& indices, LeafManagerType& leafs, std::vector<size_t>& indexMap, const openvdb::math::Transform& transform, openvdb::Index64 voxelsPerLeaf = TreeType::LeafNodeType::NUM_VOXELS) : mPoints(points) , mIndices(indices) , mLeafs(leafs) , mIndexMap(indexMap) , mTransform(transform) , mVoxelsPerLeaf(voxelsPerLeaf) { } void runParallel() { tbb::parallel_for(mLeafs.getRange(), *this); } inline void operator()(const typename LeafManagerType::RangeType& range) const { using openvdb::Index64; using ValueOnCIter = typename TreeType::LeafNodeType::ValueOnCIter; openvdb::Vec3d pos; size_t index = 0; Index64 activeVoxels = 0; for (size_t n = range.begin(); n < range.end(); ++n) { index = mIndexMap[n]; ValueOnCIter it = mLeafs.leaf(n).cbeginValueOn(); activeVoxels = mLeafs.leaf(n).onVoxelCount(); if (activeVoxels <= mVoxelsPerLeaf) { for ( ; it; ++it) { pos = mTransform.indexToWorld(it.getCoord()); insertPoint(pos, index); ++index; } } else if (1 == mVoxelsPerLeaf) { pos = mTransform.indexToWorld(it.getCoord()); insertPoint(pos, index); } else { std::vector<openvdb::Coord> coords; coords.reserve(static_cast<size_t>(activeVoxels)); for ( ; it; ++it) { coords.push_back(it.getCoord()); } pos = mTransform.indexToWorld(coords[0]); insertPoint(pos, index); ++index; pos = mTransform.indexToWorld(coords[static_cast<size_t>(activeVoxels-1)]); insertPoint(pos, index); ++index; Index64 r = Index64(std::floor(double(mVoxelsPerLeaf) / double(activeVoxels))); for (Index64 i = 1, I = mVoxelsPerLeaf - 2; i < I; ++i) { pos = mTransform.indexToWorld(coords[static_cast<size_t>(i * r)]); insertPoint(pos, index); ++index; } } } } private: void insertPoint(const openvdb::Vec3d& pos, size_t index) const { mIndices[index] = GLuint(index); const size_t element = index * 3; mPoints[element ] = static_cast<GLfloat>(pos[0]); mPoints[element + 1] = static_cast<GLfloat>(pos[1]); mPoints[element + 2] = static_cast<GLfloat>(pos[2]); } std::vector<GLfloat>& mPoints; std::vector<GLuint>& mIndices; LeafManagerType& mLeafs; std::vector<size_t>& mIndexMap; const openvdb::math::Transform& mTransform; const openvdb::Index64 mVoxelsPerLeaf; }; // PointGenerator template<typename GridType> class NormalGenerator { public: using AccessorType = typename GridType::ConstAccessor; using Grad = openvdb::math::ISGradient<openvdb::math::CD_2ND>; NormalGenerator(const AccessorType& acc): mAccessor(acc) {} NormalGenerator(const NormalGenerator&) = delete; NormalGenerator& operator=(const NormalGenerator&) = delete; void operator()(const openvdb::Coord& ijk, openvdb::Vec3d& normal) { openvdb::Vec3d v{Grad::result(mAccessor, ijk)}; const double length = v.length(); if (length > 1.0e-7) { v *= 1.0 / length; normal = v; } } private: const AccessorType& mAccessor; }; // class NormalGenerator // Specialization for PointDataGrids, for which normals are not generated template<> class NormalGenerator<openvdb::points::PointDataGrid> { public: NormalGenerator(const openvdb::points::PointDataGrid::ConstAccessor&) {} NormalGenerator(const NormalGenerator&) = delete; NormalGenerator& operator=(const NormalGenerator&) = delete; void operator()(const openvdb::Coord&, openvdb::Vec3d&) {} }; template<typename GridType> class PointAttributeGenerator { public: using ValueType = typename GridType::ValueType; PointAttributeGenerator( std::vector<GLfloat>& points, std::vector<GLfloat>& colors, const GridType& grid, ValueType minValue, ValueType maxValue, openvdb::Vec3s (&colorMap)[4], bool isLevelSet = false) : mPoints(points) , mColors(colors) , mNormals(nullptr) , mGrid(grid) , mAccessor(grid.tree()) , mMinValue(minValue) , mMaxValue(maxValue) , mColorMap(colorMap) , mIsLevelSet(isLevelSet) , mZeroValue(openvdb::zeroVal<ValueType>()) { init(); } PointAttributeGenerator( std::vector<GLfloat>& points, std::vector<GLfloat>& colors, std::vector<GLfloat>& normals, const GridType& grid, ValueType minValue, ValueType maxValue, openvdb::Vec3s (&colorMap)[4], bool isLevelSet = false) : mPoints(points) , mColors(colors) , mNormals(&normals) , mGrid(grid) , mAccessor(grid.tree()) , mMinValue(minValue) , mMaxValue(maxValue) , mColorMap(colorMap) , mIsLevelSet(isLevelSet) , mZeroValue(openvdb::zeroVal<ValueType>()) { init(); } void runParallel() { tbb::parallel_for(tbb::blocked_range<size_t>(0, (mPoints.size() / 3)), *this); } inline void operator()(const tbb::blocked_range<size_t>& range) const { openvdb::Coord ijk; openvdb::Vec3d pos, normal(0.0, -1.0, 0.0); openvdb::Vec3s color(0.9f, 0.3f, 0.3f); float w = 0.0; NormalGenerator<GridType> computeNormal{mAccessor}; size_t e1, e2, e3, voxelNum = 0; for (size_t n = range.begin(); n < range.end(); ++n) { e1 = 3 * n; e2 = e1 + 1; e3 = e2 + 1; pos[0] = mPoints[e1]; pos[1] = mPoints[e2]; pos[2] = mPoints[e3]; pos = mGrid.worldToIndex(pos); ijk[0] = int(pos[0]); ijk[1] = int(pos[1]); ijk[2] = int(pos[2]); const ValueType& value = mAccessor.getValue(ijk); if (value < mZeroValue) { // is negative if (mIsLevelSet) { color = mColorMap[1]; } else { w = (float(value) - mOffset[1]) * mScale[1]; color = openvdb::Vec3s(w * mColorMap[0] + (1.0 - w) * mColorMap[1]); } } else { if (mIsLevelSet) { color = mColorMap[2]; } else { w = (float(value) - mOffset[0]) * mScale[0]; color = openvdb::Vec3s(w * mColorMap[2] + (1.0 - w) * mColorMap[3]); } } mColors[e1] = color[0]; mColors[e2] = color[1]; mColors[e3] = color[2]; if (mNormals) { if ((voxelNum % 2) == 0) { computeNormal(ijk, normal); } ++voxelNum; (*mNormals)[e1] = static_cast<GLfloat>(normal[0]); (*mNormals)[e2] = static_cast<GLfloat>(normal[1]); (*mNormals)[e3] = static_cast<GLfloat>(normal[2]); } } } private: void init() { mOffset[0] = static_cast<float>(std::min(mZeroValue, mMinValue)); mScale[0] = static_cast<float>( 1.0 / (std::abs(float(std::max(mZeroValue, mMaxValue)) - mOffset[0]))); mOffset[1] = static_cast<float>(std::min(mZeroValue, mMinValue)); mScale[1] = static_cast<float>( 1.0 / (std::abs(float(std::max(mZeroValue, mMaxValue)) - mOffset[1]))); } std::vector<GLfloat>& mPoints; std::vector<GLfloat>& mColors; std::vector<GLfloat>* mNormals; const GridType& mGrid; openvdb::tree::ValueAccessor<const typename GridType::TreeType> mAccessor; ValueType mMinValue, mMaxValue; openvdb::Vec3s (&mColorMap)[4]; const bool mIsLevelSet; ValueType mZeroValue; float mOffset[2], mScale[2]; }; // PointAttributeGenerator //////////////////////////////////////// class ActiveScalarValuesOp { public: ActiveScalarValuesOp( BufferObject& interiorBuffer, BufferObject& surfaceBuffer) : mInteriorBuffer(&interiorBuffer) , mSurfaceBuffer(&surfaceBuffer) { } template<typename GridType> void operator()(typename GridType::ConstPtr grid) { using openvdb::Index64; const Index64 maxVoxelPoints = 26000000; openvdb::Vec3s colorMap[4]; colorMap[0] = openvdb::Vec3s(0.3f, 0.9f, 0.3f); // green colorMap[1] = openvdb::Vec3s(0.9f, 0.3f, 0.3f); // red colorMap[2] = openvdb::Vec3s(0.9f, 0.9f, 0.3f); // yellow colorMap[3] = openvdb::Vec3s(0.3f, 0.3f, 0.9f); // blue ////////// using ValueType = typename GridType::ValueType; using TreeType = typename GridType::TreeType; using BoolTreeT = typename TreeType::template ValueConverter<bool>::Type; const TreeType& tree = grid->tree(); const bool isLevelSetGrid = grid->getGridClass() == openvdb::GRID_LEVEL_SET; ValueType minValue, maxValue; openvdb::tree::LeafManager<const TreeType> leafs(tree); { util::MinMaxVoxel<const TreeType> minmax(leafs); minmax.runParallel(); minValue = minmax.minVoxel(); maxValue = minmax.maxVoxel(); } openvdb::Index64 voxelsPerLeaf = TreeType::LeafNodeType::NUM_VOXELS; if (!isLevelSetGrid) { typename BoolTreeT::Ptr interiorMask(new BoolTreeT(false)); { // Generate Interior Points interiorMask->topologyUnion(tree); interiorMask->voxelizeActiveTiles(); if (interiorMask->activeLeafVoxelCount() > maxVoxelPoints) { voxelsPerLeaf = std::max<Index64>(1, (maxVoxelPoints / interiorMask->leafCount())); } openvdb::tools::erodeVoxels(*interiorMask, 2); openvdb::tree::LeafManager<BoolTreeT> maskleafs(*interiorMask); std::vector<size_t> indexMap(maskleafs.leafCount()); size_t voxelCount = 0; for (Index64 l = 0, L = maskleafs.leafCount(); l < L; ++l) { indexMap[l] = voxelCount; voxelCount += std::min(maskleafs.leaf(l).onVoxelCount(), voxelsPerLeaf); } std::vector<GLfloat> points(voxelCount * 3), colors(voxelCount * 3); std::vector<GLuint> indices(voxelCount); PointGenerator<BoolTreeT> pointGen( points, indices, maskleafs, indexMap, grid->transform(), voxelsPerLeaf); pointGen.runParallel(); PointAttributeGenerator<GridType> attributeGen( points, colors, *grid, minValue, maxValue, colorMap); attributeGen.runParallel(); // gen buffers and upload data to GPU mInteriorBuffer->genVertexBuffer(points); mInteriorBuffer->genColorBuffer(colors); mInteriorBuffer->genIndexBuffer(indices, GL_POINTS); } { // Generate Surface Points typename BoolTreeT::Ptr surfaceMask(new BoolTreeT(false)); surfaceMask->topologyUnion(tree); surfaceMask->voxelizeActiveTiles(); openvdb::tree::ValueAccessor<BoolTreeT> interiorAcc(*interiorMask); for (typename BoolTreeT::LeafIter leafIt = surfaceMask->beginLeaf(); leafIt; ++leafIt) { const typename BoolTreeT::LeafNodeType* leaf = interiorAcc.probeConstLeaf(leafIt->origin()); if (leaf) leafIt->topologyDifference(*leaf, false); } openvdb::tools::pruneInactive(*surfaceMask); openvdb::tree::LeafManager<BoolTreeT> maskleafs(*surfaceMask); std::vector<size_t> indexMap(maskleafs.leafCount()); size_t voxelCount = 0; for (Index64 l = 0, L = maskleafs.leafCount(); l < L; ++l) { indexMap[l] = voxelCount; voxelCount += std::min(maskleafs.leaf(l).onVoxelCount(), voxelsPerLeaf); } std::vector<GLfloat> points(voxelCount * 3), colors(voxelCount * 3), normals(voxelCount * 3); std::vector<GLuint> indices(voxelCount); PointGenerator<BoolTreeT> pointGen( points, indices, maskleafs, indexMap, grid->transform(), voxelsPerLeaf); pointGen.runParallel(); PointAttributeGenerator<GridType> attributeGen( points, colors, normals, *grid, minValue, maxValue, colorMap); attributeGen.runParallel(); mSurfaceBuffer->genVertexBuffer(points); mSurfaceBuffer->genColorBuffer(colors); mSurfaceBuffer->genNormalBuffer(normals); mSurfaceBuffer->genIndexBuffer(indices, GL_POINTS); } return; } // Level set rendering if (tree.activeLeafVoxelCount() > maxVoxelPoints) { voxelsPerLeaf = std::max<Index64>(1, (maxVoxelPoints / tree.leafCount())); } std::vector<size_t> indexMap(leafs.leafCount()); size_t voxelCount = 0; for (Index64 l = 0, L = leafs.leafCount(); l < L; ++l) { indexMap[l] = voxelCount; voxelCount += std::min(leafs.leaf(l).onVoxelCount(), voxelsPerLeaf); } std::vector<GLfloat> points(voxelCount * 3), colors(voxelCount * 3), normals(voxelCount * 3); std::vector<GLuint> indices(voxelCount); PointGenerator<const TreeType> pointGen( points, indices, leafs, indexMap, grid->transform(), voxelsPerLeaf); pointGen.runParallel(); PointAttributeGenerator<GridType> attributeGen( points, colors, normals, *grid, minValue, maxValue, colorMap, isLevelSetGrid); attributeGen.runParallel(); mSurfaceBuffer->genVertexBuffer(points); mSurfaceBuffer->genColorBuffer(colors); mSurfaceBuffer->genNormalBuffer(normals); mSurfaceBuffer->genIndexBuffer(indices, GL_POINTS); } private: BufferObject *mInteriorBuffer; BufferObject *mSurfaceBuffer; }; // ActiveScalarValuesOp class ActiveVectorValuesOp { public: ActiveVectorValuesOp(BufferObject& vectorBuffer) : mVectorBuffer(&vectorBuffer) { } template<typename GridType> void operator()(typename GridType::ConstPtr grid) { using openvdb::Index64; using ValueType = typename GridType::ValueType; using TreeType = typename GridType::TreeType; using BoolTreeT = typename TreeType::template ValueConverter<bool>::Type; const TreeType& tree = grid->tree(); double length = 0.0; { ValueType minVal, maxVal; tree.evalMinMax(minVal, maxVal); length = maxVal.length(); } typename BoolTreeT::Ptr mask(new BoolTreeT(false)); mask->topologyUnion(tree); mask->voxelizeActiveTiles(); ///@todo thread and restructure. const Index64 voxelCount = mask->activeLeafVoxelCount(); const Index64 pointCount = voxelCount * 2; std::vector<GLfloat> points(pointCount*3), colors(pointCount*3); std::vector<GLuint> indices(pointCount); openvdb::Coord ijk; openvdb::Vec3d pos, color; openvdb::tree::LeafManager<BoolTreeT> leafs(*mask); openvdb::tree::ValueAccessor<const TreeType> acc(tree); Index64 idx = 0, pt = 0, cc = 0; for (Index64 l = 0, L = leafs.leafCount(); l < L; ++l) { typename BoolTreeT::LeafNodeType::ValueOnIter iter = leafs.leaf(l).beginValueOn(); for (; iter; ++iter) { ijk = iter.getCoord(); ValueType vec = acc.getValue(ijk); pos = grid->indexToWorld(ijk); points[idx++] = static_cast<GLfloat>(pos[0]); points[idx++] = static_cast<GLfloat>(pos[1]); points[idx++] = static_cast<GLfloat>(pos[2]); indices[pt] = GLuint(pt); ++pt; indices[pt] = GLuint(pt); ++pt; double w = vec.length() / length; vec.normalize(); pos += grid->voxelSize()[0] * 0.9 * vec; points[idx++] = static_cast<GLfloat>(pos[0]); points[idx++] = static_cast<GLfloat>(pos[1]); points[idx++] = static_cast<GLfloat>(pos[2]); color = w * openvdb::Vec3d(0.9, 0.3, 0.3) + (1.0 - w) * openvdb::Vec3d(0.3, 0.3, 0.9); colors[cc++] = static_cast<GLfloat>(color[0] * 0.3); colors[cc++] = static_cast<GLfloat>(color[1] * 0.3); colors[cc++] = static_cast<GLfloat>(color[2] * 0.3); colors[cc++] = static_cast<GLfloat>(color[0]); colors[cc++] = static_cast<GLfloat>(color[1]); colors[cc++] = static_cast<GLfloat>(color[2]); } } mVectorBuffer->genVertexBuffer(points); mVectorBuffer->genColorBuffer(colors); mVectorBuffer->genIndexBuffer(indices, GL_LINES); } private: BufferObject *mVectorBuffer; }; // ActiveVectorValuesOp class PointDataOp { public: using GLfloatVec = std::vector<GLfloat>; using GLuintVec = std::vector<GLuint>; private: struct VectorAttributeWrapper { using ValueType = openvdb::Vec3f; struct Handle { explicit Handle(VectorAttributeWrapper& attribute): mValues(attribute.mValues), mIndices(attribute.mIndices) {} void set(openvdb::Index offset, openvdb::Index/*unused*/, const ValueType& value) { if (mIndices) (*mIndices)[offset] = static_cast<GLuint>(offset); offset *= 3; for (int i = 0; i < 3; ++i, ++offset) { mValues[offset] = value[i]; } } private: GLfloatVec& mValues; GLuintVec* mIndices; }; // struct Handle explicit VectorAttributeWrapper(GLfloatVec& values, GLuintVec* indices = nullptr): mValues(values), mIndices(indices) {} void expand() {} void compact() {} private: GLfloatVec& mValues; GLuintVec* mIndices; }; // struct VectorAttributeWrapper public: explicit PointDataOp(BufferObject& buffer) : mBuffer(&buffer) {} template<typename GridType> void operator()(typename GridType::ConstPtr grid) { const typename GridType::TreeType& tree = grid->tree(); // obtain cumulative point offsets and total points std::vector<openvdb::Index64> pointOffsets; const openvdb::Index64 total = openvdb::points::pointOffsets(pointOffsets, tree); // @todo use glDrawArrays with GL_POINTS to avoid generating indices GLfloatVec values(total * 3); GLuintVec indices(total); VectorAttributeWrapper positionWrapper{values, &indices}; openvdb::points::convertPointDataGridPosition(positionWrapper, *grid, pointOffsets, 0); // gen buffers and upload data to GPU mBuffer->genVertexBuffer(values); mBuffer->genIndexBuffer(indices, GL_POINTS); const auto leafIter = tree.cbeginLeaf(); if (!leafIter) return; const size_t colorIdx = leafIter->attributeSet().find("Cd"); if (colorIdx == openvdb::points::AttributeSet::INVALID_POS) return; const auto& colorArray = leafIter->constAttributeArray(colorIdx); if (colorArray.template hasValueType<openvdb::Vec3f>()) { VectorAttributeWrapper colorWrapper{values}; openvdb::points::convertPointDataGridAttribute(colorWrapper, tree, pointOffsets, /*startOffset=*/0, static_cast<unsigned>(colorIdx)); // gen color buffer mBuffer->genColorBuffer(values); } } private: BufferObject* mBuffer; }; // PointDataOp //////////////////////////////////////// // Active value render module VoxelModule::VoxelModule(const openvdb::GridBase::ConstPtr& grid): mGrid(grid), mIsInitialized(false) { mFlatShader.setVertShader( "#version 120\n" "void main() {\n" "gl_FrontColor = gl_Color;\n" "gl_Position = ftransform();\n" "gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;\n" "}\n"); mFlatShader.setFragShader( "#version 120\n" "void main() {\n" "gl_FragColor = gl_Color;}\n"); mFlatShader.build(); mSurfaceShader.setVertShader( "#version 120\n" "varying vec3 normal;\n" "void main() {\n" "gl_FrontColor = gl_Color;\n" "normal = normalize(gl_NormalMatrix * gl_Normal);\n" "gl_Position = ftransform();\n" "gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;\n" "}\n"); mSurfaceShader.setFragShader( "#version 120\n" "varying vec3 normal;\n" "void main() {\n" "vec3 normalized_normal = normalize(normal);\n" "float w = 0.5 * (1.0 + dot(normalized_normal, vec3(0.0, 1.0, 0.0)));\n" "vec4 diffuseColor = w * gl_Color + (1.0 - w) * (gl_Color * 0.3);\n" "gl_FragColor = diffuseColor;\n" "}\n"); mSurfaceShader.build(); } void VoxelModule::init() { mIsInitialized = true; if (mGrid->isType<openvdb::points::PointDataGrid>()) { mSurfaceBuffer.clear(); PointDataOp drawPoints(mInteriorBuffer); util::doProcessTypedGrid<openvdb::points::PointDataGrid>(mGrid, drawPoints); } else { ActiveScalarValuesOp drawScalars(mInteriorBuffer, mSurfaceBuffer); if (!util::processTypedScalarOrPointDataGrid(mGrid, drawScalars)) { ActiveVectorValuesOp drawVectors(mVectorBuffer); if (!util::processTypedVectorGrid(mGrid, drawVectors)) { OPENVDB_LOG_INFO("Ignoring unrecognized grid type " << mGrid->type() << " during active value module initialization."); } } } } void VoxelModule::render() { if (!mIsVisible) return; if (!mIsInitialized) init(); mFlatShader.startShading(); mInteriorBuffer.render(); mVectorBuffer.render(); mFlatShader.stopShading(); mSurfaceShader.startShading(); mSurfaceBuffer.render(); mSurfaceShader.stopShading(); } //////////////////////////////////////// class MeshOp { public: MeshOp(BufferObject& buffer) : mBuffer(&buffer) {} template<typename GridType> void operator()(typename GridType::ConstPtr grid) { using openvdb::Index64; openvdb::tools::VolumeToMesh mesher( grid->getGridClass() == openvdb::GRID_LEVEL_SET ? 0.0 : 0.01); mesher(*grid); // Copy points and generate point normals. std::vector<GLfloat> points(mesher.pointListSize() * 3); std::vector<GLfloat> normals(mesher.pointListSize() * 3); openvdb::tree::ValueAccessor<const typename GridType::TreeType> acc(grid->tree()); openvdb::math::GenericMap map(grid->transform()); openvdb::Coord ijk; for (Index64 n = 0, i = 0, N = mesher.pointListSize(); n < N; ++n) { const openvdb::Vec3s& p = mesher.pointList()[n]; points[i++] = p[0]; points[i++] = p[1]; points[i++] = p[2]; } // Copy primitives openvdb::tools::PolygonPoolList& polygonPoolList = mesher.polygonPoolList(); Index64 numQuads = 0; for (Index64 n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { numQuads += polygonPoolList[n].numQuads(); } std::vector<GLuint> indices; indices.reserve(numQuads * 4); openvdb::Vec3d normal, e1, e2; for (Index64 n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { const openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; for (Index64 i = 0, I = polygons.numQuads(); i < I; ++i) { const openvdb::Vec4I& quad = polygons.quad(i); indices.push_back(quad[0]); indices.push_back(quad[1]); indices.push_back(quad[2]); indices.push_back(quad[3]); e1 = mesher.pointList()[quad[1]]; e1 -= mesher.pointList()[quad[0]]; e2 = mesher.pointList()[quad[2]]; e2 -= mesher.pointList()[quad[1]]; normal = e1.cross(e2); const double length = normal.length(); if (length > 1.0e-7) normal *= (1.0 / length); for (int v = 0; v < 4; ++v) { normals[quad[v]*3] = static_cast<GLfloat>(-normal[0]); normals[quad[v]*3+1] = static_cast<GLfloat>(-normal[1]); normals[quad[v]*3+2] = static_cast<GLfloat>(-normal[2]); } } } // Construct and transfer GPU buffers. mBuffer->genVertexBuffer(points); mBuffer->genNormalBuffer(normals); mBuffer->genIndexBuffer(indices, GL_QUADS); } private: BufferObject *mBuffer; static openvdb::Vec3s sNodeColors[]; }; // MeshOp //////////////////////////////////////// // Meshing module MeshModule::MeshModule(const openvdb::GridBase::ConstPtr& grid): mGrid(grid), mIsInitialized(false) { mShader.setVertShader( "#version 120\n" "varying vec3 normal;\n" "void main() {\n" "normal = normalize(gl_NormalMatrix * gl_Normal);\n" "gl_Position = ftransform();\n" "gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;\n" "}\n"); mShader.setFragShader( "#version 120\n" "varying vec3 normal;\n" "const vec4 skyColor = vec4(0.9, 0.9, 1.0, 1.0);\n" "const vec4 groundColor = vec4(0.3, 0.3, 0.2, 1.0);\n" "void main() {\n" "vec3 normalized_normal = normalize(normal);\n" "float w = 0.5 * (1.0 + dot(normalized_normal, vec3(0.0, 1.0, 0.0)));\n" "vec4 diffuseColor = w * skyColor + (1.0 - w) * groundColor;\n" "gl_FragColor = diffuseColor;\n" "}\n"); mShader.build(); } void MeshModule::init() { mIsInitialized = true; MeshOp drawMesh(mBufferObject); if (!util::processTypedScalarGrid(mGrid, drawMesh)) { OPENVDB_LOG_INFO( "Ignoring non-scalar grid type during mesh module initialization."); } } void MeshModule::render() { if (!mIsVisible) return; if (!mIsInitialized) init(); mShader.startShading(); mBufferObject.render(); mShader.stopShading(); } } // namespace openvdb_viewer
51,351
C++
29.972256
99
0.59251
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/viewer/Camera.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Camera.h" #include <cmath> #define GLFW_INCLUDE_GLU #include <GLFW/glfw3.h> namespace openvdb_viewer { const double Camera::sDeg2rad = M_PI / 180.0; Camera::Camera() : mFov(65.0) , mNearPlane(0.1) , mFarPlane(10000.0) , mTarget(openvdb::Vec3d(0.0)) , mLookAt(mTarget) , mUp(openvdb::Vec3d(0.0, 1.0, 0.0)) , mForward(openvdb::Vec3d(0.0, 0.0, 1.0)) , mRight(openvdb::Vec3d(1.0, 0.0, 0.0)) , mEye(openvdb::Vec3d(0.0, 0.0, -1.0)) , mTumblingSpeed(0.5) , mZoomSpeed(0.2) , mStrafeSpeed(0.05) , mHead(30.0) , mPitch(45.0) , mTargetDistance(25.0) , mDistance(mTargetDistance) , mMouseDown(false) , mStartTumbling(false) , mZoomMode(false) , mChanged(true) , mNeedsDisplay(true) , mMouseXPos(0.0) , mMouseYPos(0.0) , mWindow(nullptr) { } void Camera::lookAt(const openvdb::Vec3d& p, double dist) { mLookAt = p; mDistance = dist; mNeedsDisplay = true; } void Camera::lookAtTarget() { mLookAt = mTarget; mDistance = mTargetDistance; mNeedsDisplay = true; } void Camera::setSpeed(double zoomSpeed, double strafeSpeed, double tumblingSpeed) { mZoomSpeed = std::max(0.0001, mDistance * zoomSpeed); mStrafeSpeed = std::max(0.0001, mDistance * strafeSpeed); mTumblingSpeed = std::max(0.2, mDistance * tumblingSpeed); mTumblingSpeed = std::min(1.0, mDistance * tumblingSpeed); } void Camera::setTarget(const openvdb::Vec3d& p, double dist) { mTarget = p; mTargetDistance = dist; } void Camera::aim() { if (mWindow == nullptr) return; // Get the window size int width, height; glfwGetFramebufferSize(mWindow, &width, &height); // Make sure that height is non-zero to avoid division by zero height = std::max(1, height); glViewport(0, 0, width, height); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Set up the projection matrix glMatrixMode(GL_PROJECTION); glLoadIdentity(); // Window aspect (assumes square pixels) double aspectRatio = double(width) / double(height); // Set perspective view (fov is in degrees in the y direction.) gluPerspective(mFov, aspectRatio, mNearPlane, mFarPlane); if (mChanged) { mChanged = false; mEye[0] = mLookAt[0] + mDistance * std::cos(mHead * sDeg2rad) * std::cos(mPitch * sDeg2rad); mEye[1] = mLookAt[1] + mDistance * std::sin(mHead * sDeg2rad); mEye[2] = mLookAt[2] + mDistance * std::cos(mHead * sDeg2rad) * std::sin(mPitch * sDeg2rad); mForward = mLookAt - mEye; mForward.normalize(); mUp[1] = std::cos(mHead * sDeg2rad) > 0 ? 1.0 : -1.0; mRight = mForward.cross(mUp); } // Set up modelview matrix glMatrixMode(GL_MODELVIEW); glLoadIdentity(); gluLookAt(mEye[0], mEye[1], mEye[2], mLookAt[0], mLookAt[1], mLookAt[2], mUp[0], mUp[1], mUp[2]); mNeedsDisplay = false; } void Camera::keyCallback(int key, int) { if (mWindow == nullptr) return; int state = glfwGetKey(mWindow, key); switch (state) { case GLFW_PRESS: switch(key) { case GLFW_KEY_SPACE: mZoomMode = true; break; } break; case GLFW_RELEASE: switch(key) { case GLFW_KEY_SPACE: mZoomMode = false; break; } break; } mChanged = true; } void Camera::mouseButtonCallback(int button, int action) { if (button == GLFW_MOUSE_BUTTON_LEFT) { if (action == GLFW_PRESS) mMouseDown = true; else if (action == GLFW_RELEASE) mMouseDown = false; } else if (button == GLFW_MOUSE_BUTTON_RIGHT) { if (action == GLFW_PRESS) { mMouseDown = true; mZoomMode = true; } else if (action == GLFW_RELEASE) { mMouseDown = false; mZoomMode = false; } } if (action == GLFW_RELEASE) mMouseDown = false; mStartTumbling = true; mChanged = true; } void Camera::mousePosCallback(int x, int y) { if (mStartTumbling) { mMouseXPos = x; mMouseYPos = y; mStartTumbling = false; } double dx, dy; dx = x - mMouseXPos; dy = y - mMouseYPos; if (mMouseDown && !mZoomMode) { mNeedsDisplay = true; mHead += dy * mTumblingSpeed; mPitch += dx * mTumblingSpeed; } else if (mMouseDown && mZoomMode) { mNeedsDisplay = true; mLookAt += (dy * mUp - dx * mRight) * mStrafeSpeed; } mMouseXPos = x; mMouseYPos = y; mChanged = true; } void Camera::mouseWheelCallback(int pos, int prevPos) { double speed = std::abs(prevPos - pos); if (prevPos < pos) { mDistance += speed * mZoomSpeed; } else { double temp = mDistance - speed * mZoomSpeed; mDistance = std::max(0.0, temp); } setSpeed(); mChanged = true; mNeedsDisplay = true; } } // namespace openvdb_viewer
5,144
C++
21.765487
100
0.586897
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/Tree.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tree/Tree.h #ifndef OPENVDB_TREE_TREE_HAS_BEEN_INCLUDED #define OPENVDB_TREE_TREE_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/Metadata.h> #include <openvdb/math/Math.h> #include <openvdb/math/BBox.h> #include <openvdb/util/Formats.h> #include <openvdb/util/logging.h> #include <openvdb/Platform.h> #include "RootNode.h" #include "InternalNode.h" #include "LeafNode.h" #include "TreeIterator.h" #include "ValueAccessor.h" #include <tbb/concurrent_hash_map.h> #include <cstdint> #include <iostream> #include <mutex> #include <sstream> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { /// @brief Base class for typed trees class OPENVDB_API TreeBase { public: using Ptr = SharedPtr<TreeBase>; using ConstPtr = SharedPtr<const TreeBase>; TreeBase() = default; TreeBase(const TreeBase&) = default; TreeBase& operator=(const TreeBase&) = delete; // disallow assignment virtual ~TreeBase() = default; /// Return the name of this tree's type. virtual const Name& type() const = 0; /// Return the name of the type of a voxel's value (e.g., "float" or "vec3d"). virtual Name valueType() const = 0; /// Return a pointer to a deep copy of this tree virtual TreeBase::Ptr copy() const = 0; // // Tree methods // /// @brief Return this tree's background value wrapped as metadata. /// @note Query the metadata object for the value's type. virtual Metadata::Ptr getBackgroundValue() const { return Metadata::Ptr(); } /// @brief Return in @a bbox the axis-aligned bounding box of all /// active tiles and leaf nodes with active values. /// @details This is faster than calling evalActiveVoxelBoundingBox, /// which visits the individual active voxels, and hence /// evalLeafBoundingBox produces a less tight, i.e. approximate, bbox. /// @return @c false if the bounding box is empty (in which case /// the bbox is set to its default value). virtual bool evalLeafBoundingBox(CoordBBox& bbox) const = 0; /// @brief Return in @a dim the dimensions of the axis-aligned bounding box /// of all leaf nodes. /// @return @c false if the bounding box is empty. virtual bool evalLeafDim(Coord& dim) const = 0; /// @brief Return in @a bbox the axis-aligned bounding box of all /// active voxels and tiles. /// @details This method produces a more accurate, i.e. tighter, /// bounding box than evalLeafBoundingBox which is approximate but /// faster. /// @return @c false if the bounding box is empty (in which case /// the bbox is set to its default value). virtual bool evalActiveVoxelBoundingBox(CoordBBox& bbox) const = 0; /// @brief Return in @a dim the dimensions of the axis-aligned bounding box of all /// active voxels. This is a tighter bounding box than the leaf node bounding box. /// @return @c false if the bounding box is empty. virtual bool evalActiveVoxelDim(Coord& dim) const = 0; virtual void getIndexRange(CoordBBox& bbox) const = 0; /// @brief Replace with background tiles any nodes whose voxel buffers /// have not yet been allocated. /// @details Typically, unallocated nodes are leaf nodes whose voxel buffers /// are not yet resident in memory because delayed loading is in effect. /// @sa readNonresidentBuffers, io::File::open virtual void clipUnallocatedNodes() = 0; /// Return the total number of unallocated leaf nodes residing in this tree. virtual Index32 unallocatedLeafCount() const = 0; // // Statistics // /// @brief Return the depth of this tree. /// /// A tree with only a root node and leaf nodes has depth 2, for example. virtual Index treeDepth() const = 0; /// Return the number of leaf nodes. virtual Index32 leafCount() const = 0; #if OPENVDB_ABI_VERSION_NUMBER >= 7 /// Return a vector with node counts. The number of nodes of type NodeType /// is given as element NodeType::LEVEL in the return vector. Thus, the size /// of this vector corresponds to the height (or depth) of this tree. virtual std::vector<Index32> nodeCount() const = 0; #endif /// Return the number of non-leaf nodes. virtual Index32 nonLeafCount() const = 0; /// Return the number of active voxels stored in leaf nodes. virtual Index64 activeLeafVoxelCount() const = 0; /// Return the number of inactive voxels stored in leaf nodes. virtual Index64 inactiveLeafVoxelCount() const = 0; /// Return the total number of active voxels. virtual Index64 activeVoxelCount() const = 0; /// Return the number of inactive voxels within the bounding box of all active voxels. virtual Index64 inactiveVoxelCount() const = 0; /// Return the total number of active tiles. virtual Index64 activeTileCount() const = 0; /// Return the total amount of memory in bytes occupied by this tree. virtual Index64 memUsage() const { return 0; } // // I/O methods // /// @brief Read the tree topology from a stream. /// /// This will read the tree structure and tile values, but not voxel data. virtual void readTopology(std::istream&, bool saveFloatAsHalf = false); /// @brief Write the tree topology to a stream. /// /// This will write the tree structure and tile values, but not voxel data. virtual void writeTopology(std::ostream&, bool saveFloatAsHalf = false) const; /// Read all data buffers for this tree. virtual void readBuffers(std::istream&, bool saveFloatAsHalf = false) = 0; /// Read all of this tree's data buffers that intersect the given bounding box. virtual void readBuffers(std::istream&, const CoordBBox&, bool saveFloatAsHalf = false) = 0; /// @brief Read all of this tree's data buffers that are not yet resident in memory /// (because delayed loading is in effect). /// @details If this tree was read from a memory-mapped file, this operation /// disconnects the tree from the file. /// @sa clipUnallocatedNodes, io::File::open, io::MappedFile virtual void readNonresidentBuffers() const = 0; /// Write out all the data buffers for this tree. virtual void writeBuffers(std::ostream&, bool saveFloatAsHalf = false) const = 0; /// @brief Print statistics, memory usage and other information about this tree. /// @param os a stream to which to write textual information /// @param verboseLevel 1: print tree configuration only; /// 2: include node and voxel statistics; /// 3: include memory usage; /// 4: include minimum and maximum voxel values /// @warning @a verboseLevel 4 forces loading of any unallocated nodes. virtual void print(std::ostream& os = std::cout, int verboseLevel = 1) const; }; //////////////////////////////////////// template<typename _RootNodeType> class Tree: public TreeBase { public: using Ptr = SharedPtr<Tree>; using ConstPtr = SharedPtr<const Tree>; using RootNodeType = _RootNodeType; using ValueType = typename RootNodeType::ValueType; using BuildType = typename RootNodeType::BuildType; using LeafNodeType = typename RootNodeType::LeafNodeType; static const Index DEPTH = RootNodeType::LEVEL + 1; /// @brief ValueConverter<T>::Type is the type of a tree having the same /// hierarchy as this tree but a different value type, T. /// /// For example, FloatTree::ValueConverter<double>::Type is equivalent to DoubleTree. /// @note If the source tree type is a template argument, it might be necessary /// to write "typename SourceTree::template ValueConverter<T>::Type". template<typename OtherValueType> struct ValueConverter { using Type = Tree<typename RootNodeType::template ValueConverter<OtherValueType>::Type>; }; Tree() {} Tree& operator=(const Tree&) = delete; // disallow assignment /// Deep copy constructor Tree(const Tree& other): TreeBase(other), mRoot(other.mRoot) { } /// @brief Value conversion deep copy constructor /// /// Deep copy a tree of the same configuration as this tree type but a different /// ValueType, casting the other tree's values to this tree's ValueType. /// @throw TypeError if the other tree's configuration doesn't match this tree's /// or if this tree's ValueType is not constructible from the other tree's ValueType. template<typename OtherRootType> explicit Tree(const Tree<OtherRootType>& other): TreeBase(other), mRoot(other.root()) { } /// @brief Topology copy constructor from a tree of a different type /// /// Copy the structure, i.e., the active states of tiles and voxels, of another /// tree of a possibly different type, but don't copy any tile or voxel values. /// Instead, initialize tiles and voxels with the given active and inactive values. /// @param other a tree having (possibly) a different ValueType /// @param inactiveValue background value for this tree, and the value to which /// all inactive tiles and voxels are initialized /// @param activeValue value to which active tiles and voxels are initialized /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherTreeType> Tree(const OtherTreeType& other, const ValueType& inactiveValue, const ValueType& activeValue, TopologyCopy): TreeBase(other), mRoot(other.root(), inactiveValue, activeValue, TopologyCopy()) { } /// @brief Topology copy constructor from a tree of a different type /// /// @note This topology copy constructor is generally faster than /// the one that takes both a foreground and a background value. /// /// Copy the structure, i.e., the active states of tiles and voxels, of another /// tree of a possibly different type, but don't copy any tile or voxel values. /// Instead, initialize tiles and voxels with the given background value. /// @param other a tree having (possibly) a different ValueType /// @param background the value to which tiles and voxels are initialized /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherTreeType> Tree(const OtherTreeType& other, const ValueType& background, TopologyCopy): TreeBase(other), mRoot(other.root(), background, TopologyCopy()) { } /// Empty tree constructor Tree(const ValueType& background): mRoot(background) {} ~Tree() override { this->clear(); releaseAllAccessors(); } /// Return a pointer to a deep copy of this tree TreeBase::Ptr copy() const override { return TreeBase::Ptr(new Tree(*this)); } /// Return the name of the type of a voxel's value (e.g., "float" or "vec3d") Name valueType() const override { return typeNameAsString<ValueType>(); } /// Return the name of this type of tree. static const Name& treeType(); /// Return the name of this type of tree. const Name& type() const override { return this->treeType(); } bool operator==(const Tree&) const { OPENVDB_THROW(NotImplementedError, ""); } bool operator!=(const Tree&) const { OPENVDB_THROW(NotImplementedError, ""); } //@{ /// Return this tree's root node. RootNodeType& root() { return mRoot; } const RootNodeType& root() const { return mRoot; } //@} // // Tree methods // /// @brief Return @c true if the given tree has the same node and active value /// topology as this tree, whether or not it has the same @c ValueType. template<typename OtherRootNodeType> bool hasSameTopology(const Tree<OtherRootNodeType>& other) const; bool evalLeafBoundingBox(CoordBBox& bbox) const override; bool evalActiveVoxelBoundingBox(CoordBBox& bbox) const override; bool evalActiveVoxelDim(Coord& dim) const override; bool evalLeafDim(Coord& dim) const override; /// @brief Traverse the type hierarchy of nodes, and return, in @a dims, a list /// of the Log2Dims of nodes in order from RootNode to LeafNode. /// @note Because RootNodes are resizable, the RootNode Log2Dim is 0 for all trees. static void getNodeLog2Dims(std::vector<Index>& dims); // // I/O methods // /// @brief Read the tree topology from a stream. /// /// This will read the tree structure and tile values, but not voxel data. void readTopology(std::istream&, bool saveFloatAsHalf = false) override; /// @brief Write the tree topology to a stream. /// /// This will write the tree structure and tile values, but not voxel data. void writeTopology(std::ostream&, bool saveFloatAsHalf = false) const override; /// Read all data buffers for this tree. void readBuffers(std::istream&, bool saveFloatAsHalf = false) override; /// Read all of this tree's data buffers that intersect the given bounding box. void readBuffers(std::istream&, const CoordBBox&, bool saveFloatAsHalf = false) override; /// @brief Read all of this tree's data buffers that are not yet resident in memory /// (because delayed loading is in effect). /// @details If this tree was read from a memory-mapped file, this operation /// disconnects the tree from the file. /// @sa clipUnallocatedNodes, io::File::open, io::MappedFile void readNonresidentBuffers() const override; /// Write out all data buffers for this tree. void writeBuffers(std::ostream&, bool saveFloatAsHalf = false) const override; void print(std::ostream& os = std::cout, int verboseLevel = 1) const override; // // Statistics // /// @brief Return the depth of this tree. /// /// A tree with only a root node and leaf nodes has depth 2, for example. Index treeDepth() const override { return DEPTH; } /// Return the number of leaf nodes. Index32 leafCount() const override { return mRoot.leafCount(); } #if OPENVDB_ABI_VERSION_NUMBER >= 7 /// Return a vector with node counts. The number of nodes of type NodeType /// is given as element NodeType::LEVEL in the return vector. Thus, the size /// of this vector corresponds to the height (or depth) of this tree. std::vector<Index32> nodeCount() const override { std::vector<Index32> vec(DEPTH, 0); mRoot.nodeCount( vec ); return vec;// Named Return Value Optimization } #endif /// Return the number of non-leaf nodes. Index32 nonLeafCount() const override { return mRoot.nonLeafCount(); } /// Return the number of active voxels stored in leaf nodes. Index64 activeLeafVoxelCount() const override { return mRoot.onLeafVoxelCount(); } /// Return the number of inactive voxels stored in leaf nodes. Index64 inactiveLeafVoxelCount() const override { return mRoot.offLeafVoxelCount(); } /// Return the total number of active voxels. Index64 activeVoxelCount() const override { return mRoot.onVoxelCount(); } /// Return the number of inactive voxels within the bounding box of all active voxels. Index64 inactiveVoxelCount() const override; /// Return the total number of active tiles. Index64 activeTileCount() const override { return mRoot.onTileCount(); } /// Return the minimum and maximum active values in this tree. void evalMinMax(ValueType &min, ValueType &max) const; Index64 memUsage() const override { return sizeof(*this) + mRoot.memUsage(); } // // Voxel access methods (using signed indexing) // /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const; /// @brief Return the value of the voxel at the given coordinates /// and update the given accessor's node cache. template<typename AccessT> const ValueType& getValue(const Coord& xyz, AccessT&) const; /// @brief Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides. /// @details If (x, y, z) isn't explicitly represented in the tree (i.e., it is /// implicitly a background voxel), return -1. int getValueDepth(const Coord& xyz) const; /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value); /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, const ValueType& value); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value); /// @brief Set the value of the voxel at the given coordinates, mark the voxel as active, /// and update the given accessor's node cache. template<typename AccessT> void setValue(const Coord& xyz, const ValueType& value, AccessT&); /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details Provided that the functor can be inlined, this is typically /// significantly faster than calling getValue() followed by setValueOn(). /// @param xyz the coordinates of a voxel whose value is to be modified /// @param op a functor of the form <tt>void op(ValueType&) const</tt> that modifies /// its argument in place /// @par Example: /// @code /// Coord xyz(1, 0, -2); /// // Multiply the value of a voxel by a constant and mark the voxel as active. /// floatTree.modifyValue(xyz, [](float& f) { f *= 0.25; }); // C++11 /// // Set the value of a voxel to the maximum of its current value and 0.25, /// // and mark the voxel as active. /// floatTree.modifyValue(xyz, [](float& f) { f = std::max(f, 0.25f); }); // C++11 /// @endcode /// @note The functor is not guaranteed to be called only once. /// @see tools::foreach() template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op); /// @brief Apply a functor to the voxel at the given coordinates. /// @details Provided that the functor can be inlined, this is typically /// significantly faster than calling getValue() followed by setValue(). /// @param xyz the coordinates of a voxel to be modified /// @param op a functor of the form <tt>void op(ValueType&, bool&) const</tt> that /// modifies its arguments, a voxel's value and active state, in place /// @par Example: /// @code /// Coord xyz(1, 0, -2); /// // Multiply the value of a voxel by a constant and mark the voxel as inactive. /// floatTree.modifyValueAndActiveState(xyz, /// [](float& f, bool& b) { f *= 0.25; b = false; }); // C++11 /// // Set the value of a voxel to the maximum of its current value and 0.25, /// // but don't change the voxel's active state. /// floatTree.modifyValueAndActiveState(xyz, /// [](float& f, bool&) { f = std::max(f, 0.25f); }); // C++11 /// @endcode /// @note The functor is not guaranteed to be called only once. /// @see tools::foreach() template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op); /// @brief Get the value of the voxel at the given coordinates. /// @return @c true if the value is active. bool probeValue(const Coord& xyz, ValueType& value) const; /// Return @c true if the value at the given coordinates is active. bool isValueOn(const Coord& xyz) const { return mRoot.isValueOn(xyz); } /// Return @c true if the value at the given coordinates is inactive. bool isValueOff(const Coord& xyz) const { return !this->isValueOn(xyz); } /// Return @c true if this tree has any active tiles. bool hasActiveTiles() const { return mRoot.hasActiveTiles(); } /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&); /// @brief Replace with background tiles any nodes whose voxel buffers /// have not yet been allocated. /// @details Typically, unallocated nodes are leaf nodes whose voxel buffers /// are not yet resident in memory because delayed loading is in effect. /// @sa readNonresidentBuffers, io::File::open void clipUnallocatedNodes() override; /// Return the total number of unallocated leaf nodes residing in this tree. Index32 unallocatedLeafCount() const override; //@{ /// @brief Set all voxels within a given axis-aligned box to a constant value. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box /// @param value the value to which to set voxels within the box /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive /// @note This operation generates a sparse, but not always optimally sparse, /// representation of the filled box. Follow fill operations with a prune() /// operation for optimal sparseness. void sparseFill(const CoordBBox& bbox, const ValueType& value, bool active = true); void fill(const CoordBBox& bbox, const ValueType& value, bool active = true) { this->sparseFill(bbox, value, active); } //@} /// @brief Set all voxels within a given axis-aligned box to a constant value /// and ensure that those voxels are all represented at the leaf level. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box. /// @param value the value to which to set voxels within the box. /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive. /// @sa voxelizeActiveTiles() void denseFill(const CoordBBox& bbox, const ValueType& value, bool active = true); /// @brief Densify active tiles, i.e., replace them with leaf-level active voxels. /// /// @param threaded if true, this operation is multi-threaded (over the internal nodes). /// /// @warning This method can explode the tree's memory footprint, especially if it /// contains active tiles at the upper levels (in particular the root level)! /// /// @sa denseFill() void voxelizeActiveTiles(bool threaded = true); /// @brief Reduce the memory footprint of this tree by replacing with tiles /// any nodes whose values are all the same (optionally to within a tolerance) /// and have the same active state. /// @warning Will soon be deprecated! void prune(const ValueType& tolerance = zeroVal<ValueType>()) { this->clearAllAccessors(); mRoot.prune(tolerance); } /// @brief Add the given leaf node to this tree, creating a new branch if necessary. /// If a leaf node with the same origin already exists, replace it. /// /// @warning Ownership of the leaf is transferred to the tree so /// the client code should not attempt to delete the leaf pointer! void addLeaf(LeafNodeType* leaf) { assert(leaf); mRoot.addLeaf(leaf); } /// @brief Add a tile containing voxel (x, y, z) at the specified tree level, /// creating a new branch if necessary. Delete any existing lower-level nodes /// that contain (x, y, z). /// @note @a level must be less than this tree's depth. void addTile(Index level, const Coord& xyz, const ValueType& value, bool active); /// @brief Return a pointer to the node of type @c NodeT that contains voxel (x, y, z) /// and replace it with a tile of the specified value and state. /// If no such node exists, leave the tree unchanged and return @c nullptr. /// @note The caller takes ownership of the node and is responsible for deleting it. template<typename NodeT> NodeT* stealNode(const Coord& xyz, const ValueType& value, bool active); /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, create one that preserves the values and /// active states of all voxels. /// @details Use this method to preallocate a static tree topology over which to /// safely perform multithreaded processing. LeafNodeType* touchLeaf(const Coord& xyz); //@{ /// @brief Return a pointer to the node of type @c NodeType that contains /// voxel (x, y, z). If no such node exists, return @c nullptr. template<typename NodeType> NodeType* probeNode(const Coord& xyz); template<typename NodeType> const NodeType* probeConstNode(const Coord& xyz) const; template<typename NodeType> const NodeType* probeNode(const Coord& xyz) const; //@} //@{ /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, return @c nullptr. LeafNodeType* probeLeaf(const Coord& xyz); const LeafNodeType* probeConstLeaf(const Coord& xyz) const; const LeafNodeType* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } //@} //@{ /// @brief Adds all nodes of a certain type to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...; // the type of node to be added to the array /// void push_back(value_type nodePtr); // add a node to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.getNodes(array); /// @endcode template<typename ArrayT> void getNodes(ArrayT& array) { mRoot.getNodes(array); } template<typename ArrayT> void getNodes(ArrayT& array) const { mRoot.getNodes(array); } //@} /// @brief Steals all nodes of a certain type from the tree and /// adds them to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...; // the type of node to be added to the array /// void push_back(value_type nodePtr); // add a node to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.stealNodes(array); /// @endcode template<typename ArrayT> void stealNodes(ArrayT& array) { this->clearAllAccessors(); mRoot.stealNodes(array); } template<typename ArrayT> void stealNodes(ArrayT& array, const ValueType& value, bool state) { this->clearAllAccessors(); mRoot.stealNodes(array, value, state); } // // Aux methods // /// @brief Return @c true if this tree contains no nodes other than /// the root node and no tiles other than background tiles. bool empty() const { return mRoot.empty(); } /// Remove all tiles from this tree and all nodes other than the root node. void clear(); /// Clear all registered accessors. void clearAllAccessors(); //@{ /// @brief Register an accessor for this tree. Registered accessors are /// automatically cleared whenever one of this tree's nodes is deleted. void attachAccessor(ValueAccessorBase<Tree, true>&) const; void attachAccessor(ValueAccessorBase<const Tree, true>&) const; //@} //@{ /// Dummy implementations void attachAccessor(ValueAccessorBase<Tree, false>&) const {} void attachAccessor(ValueAccessorBase<const Tree, false>&) const {} //@} //@{ /// Deregister an accessor so that it is no longer automatically cleared. void releaseAccessor(ValueAccessorBase<Tree, true>&) const; void releaseAccessor(ValueAccessorBase<const Tree, true>&) const; //@} //@{ /// Dummy implementations void releaseAccessor(ValueAccessorBase<Tree, false>&) const {} void releaseAccessor(ValueAccessorBase<const Tree, false>&) const {} //@} /// @brief Return this tree's background value wrapped as metadata. /// @note Query the metadata object for the value's type. Metadata::Ptr getBackgroundValue() const override; /// @brief Return this tree's background value. /// /// @note Use tools::changeBackground to efficiently modify the /// background values. Else use tree.root().setBackground, which /// is serial and hence slower. const ValueType& background() const { return mRoot.background(); } /// Min and max are both inclusive. void getIndexRange(CoordBBox& bbox) const override { mRoot.getIndexRange(bbox); } /// @brief Efficiently merge another tree into this tree using one of several schemes. /// @details This operation is primarily intended to combine trees that are mostly /// non-overlapping (for example, intermediate trees from computations that are /// parallelized across disjoint regions of space). /// @note This operation is not guaranteed to produce an optimally sparse tree. /// Follow merge() with prune() for optimal sparseness. /// @warning This operation always empties the other tree. void merge(Tree& other, MergePolicy = MERGE_ACTIVE_STATES); /// @brief Union this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active if the corresponding value /// was already active OR if it is active in the other tree. Also, a resulting /// value maps to a voxel if the corresponding value already mapped to a voxel /// OR if it is a voxel in the other tree. Thus, a resulting value can only /// map to a tile if the corresponding value already mapped to a tile /// AND if it is a tile value in other tree. /// /// @note This operation modifies only active states, not values. /// Specifically, active tiles and voxels in this tree are not changed, and /// tiles or voxels that were inactive in this tree but active in the other tree /// are marked as active in this tree but left with their original values. template<typename OtherRootNodeType> void topologyUnion(const Tree<OtherRootNodeType>& other); /// @brief Intersects this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active only if the corresponding /// value was already active AND if it is active in the other tree. Also, a /// resulting value maps to a voxel if the corresponding value /// already mapped to an active voxel in either of the two grids /// and it maps to an active tile or voxel in the other grid. /// /// @note This operation can delete branches in this grid if they /// overlap with inactive tiles in the other grid. Likewise active /// voxels can be turned into unactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call tools::pruneInactive. template<typename OtherRootNodeType> void topologyIntersection(const Tree<OtherRootNodeType>& other); /// @brief Difference this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this tree and inactive in the other tree. /// /// @note This operation can delete branches in this grid if they /// overlap with active tiles in the other grid. Likewise active /// voxels can be turned into inactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call tools::pruneInactive. template<typename OtherRootNodeType> void topologyDifference(const Tree<OtherRootNodeType>& other); /// For a given function @c f, use sparse traversal to compute <tt>f(this, other)</tt> /// over all corresponding pairs of values (tile or voxel) of this tree and the other tree /// and store the result in this tree. /// This method is typically more space-efficient than the two-tree combine2(), /// since it moves rather than copies nodes from the other tree into this tree. /// @note This operation always empties the other tree. /// @param other a tree of the same type as this tree /// @param op a functor of the form <tt>void op(const T& a, const T& b, T& result)</tt>, /// where @c T is this tree's @c ValueType, that computes /// <tt>result = f(a, b)</tt> /// @param prune if true, prune the resulting tree one branch at a time (this is usually /// more space-efficient than pruning the entire tree in one pass) /// /// @par Example: /// Compute the per-voxel difference between two floating-point trees, /// @c aTree and @c bTree, and store the result in @c aTree (leaving @c bTree empty). /// @code /// { /// struct Local { /// static inline void diff(const float& a, const float& b, float& result) { /// result = a - b; /// } /// }; /// aTree.combine(bTree, Local::diff); /// } /// @endcode /// /// @par Example: /// Compute <tt>f * a + (1 - f) * b</tt> over all voxels of two floating-point trees, /// @c aTree and @c bTree, and store the result in @c aTree (leaving @c bTree empty). /// @code /// namespace { /// struct Blend { /// Blend(float f): frac(f) {} /// inline void operator()(const float& a, const float& b, float& result) const { /// result = frac * a + (1.0 - frac) * b; /// } /// float frac; /// }; /// } /// { /// aTree.combine(bTree, Blend(0.25)); // 0.25 * a + 0.75 * b /// } /// @endcode template<typename CombineOp> void combine(Tree& other, CombineOp& op, bool prune = false); #ifndef _MSC_VER template<typename CombineOp> void combine(Tree& other, const CombineOp& op, bool prune = false); #endif /// Like combine(), but with /// @param other a tree of the same type as this tree /// @param op a functor of the form <tt>void op(CombineArgs<ValueType>& args)</tt> that /// computes <tt>args.setResult(f(args.a(), args.b()))</tt> and, optionally, /// <tt>args.setResultIsActive(g(args.aIsActive(), args.bIsActive()))</tt> /// for some functions @c f and @c g /// @param prune if true, prune the resulting tree one branch at a time (this is usually /// more space-efficient than pruning the entire tree in one pass) /// /// This variant passes not only the @em a and @em b values but also the active states /// of the @em a and @em b values to the functor, which may then return, by calling /// @c args.setResultIsActive(), a computed active state for the result value. /// By default, the result is active if either the @em a or the @em b value is active. /// /// @see openvdb/Types.h for the definition of the CombineArgs struct. /// /// @par Example: /// Replace voxel values in floating-point @c aTree with corresponding values /// from floating-point @c bTree (leaving @c bTree empty) wherever the @c bTree /// values are larger. Also, preserve the active states of any transferred values. /// @code /// { /// struct Local { /// static inline void max(CombineArgs<float>& args) { /// if (args.b() > args.a()) { /// // Transfer the B value and its active state. /// args.setResult(args.b()); /// args.setResultIsActive(args.bIsActive()); /// } else { /// // Preserve the A value and its active state. /// args.setResult(args.a()); /// args.setResultIsActive(args.aIsActive()); /// } /// } /// }; /// aTree.combineExtended(bTree, Local::max); /// } /// @endcode template<typename ExtendedCombineOp> void combineExtended(Tree& other, ExtendedCombineOp& op, bool prune = false); #ifndef _MSC_VER template<typename ExtendedCombineOp> void combineExtended(Tree& other, const ExtendedCombineOp& op, bool prune = false); #endif /// For a given function @c f, use sparse traversal to compute <tt>f(a, b)</tt> over all /// corresponding pairs of values (tile or voxel) of trees A and B and store the result /// in this tree. /// @param a,b two trees with the same configuration (levels and node dimensions) /// as this tree but with the B tree possibly having a different value type /// @param op a functor of the form <tt>void op(const T1& a, const T2& b, T1& result)</tt>, /// where @c T1 is this tree's and the A tree's @c ValueType and @c T2 is the /// B tree's @c ValueType, that computes <tt>result = f(a, b)</tt> /// @param prune if true, prune the resulting tree one branch at a time (this is usually /// more space-efficient than pruning the entire tree in one pass) /// /// @throw TypeError if the B tree's configuration doesn't match this tree's /// or if this tree's ValueType is not constructible from the B tree's ValueType. /// /// @par Example: /// Compute the per-voxel difference between two floating-point trees, /// @c aTree and @c bTree, and store the result in a third tree. /// @code /// { /// struct Local { /// static inline void diff(const float& a, const float& b, float& result) { /// result = a - b; /// } /// }; /// FloatTree resultTree; /// resultTree.combine2(aTree, bTree, Local::diff); /// } /// @endcode template<typename CombineOp, typename OtherTreeType /*= Tree*/> void combine2(const Tree& a, const OtherTreeType& b, CombineOp& op, bool prune = false); #ifndef _MSC_VER template<typename CombineOp, typename OtherTreeType /*= Tree*/> void combine2(const Tree& a, const OtherTreeType& b, const CombineOp& op, bool prune = false); #endif /// Like combine2(), but with /// @param a,b two trees with the same configuration (levels and node dimensions) /// as this tree but with the B tree possibly having a different value type /// @param op a functor of the form <tt>void op(CombineArgs<T1, T2>& args)</tt>, where /// @c T1 is this tree's and the A tree's @c ValueType and @c T2 is the B tree's /// @c ValueType, that computes <tt>args.setResult(f(args.a(), args.b()))</tt> /// and, optionally, /// <tt>args.setResultIsActive(g(args.aIsActive(), args.bIsActive()))</tt> /// for some functions @c f and @c g /// @param prune if true, prune the resulting tree one branch at a time (this is usually /// more space-efficient than pruning the entire tree in one pass) /// This variant passes not only the @em a and @em b values but also the active states /// of the @em a and @em b values to the functor, which may then return, by calling /// <tt>args.setResultIsActive()</tt>, a computed active state for the result value. /// By default, the result is active if either the @em a or the @em b value is active. /// /// @throw TypeError if the B tree's configuration doesn't match this tree's /// or if this tree's ValueType is not constructible from the B tree's ValueType. /// /// @see openvdb/Types.h for the definition of the CombineArgs struct. /// /// @par Example: /// Compute the per-voxel maximum values of two single-precision floating-point trees, /// @c aTree and @c bTree, and store the result in a third tree. Set the active state /// of each output value to that of the larger of the two input values. /// @code /// { /// struct Local { /// static inline void max(CombineArgs<float>& args) { /// if (args.b() > args.a()) { /// // Transfer the B value and its active state. /// args.setResult(args.b()); /// args.setResultIsActive(args.bIsActive()); /// } else { /// // Preserve the A value and its active state. /// args.setResult(args.a()); /// args.setResultIsActive(args.aIsActive()); /// } /// } /// }; /// FloatTree aTree = ...; /// FloatTree bTree = ...; /// FloatTree resultTree; /// resultTree.combine2Extended(aTree, bTree, Local::max); /// } /// @endcode /// /// @par Example: /// Compute the per-voxel maximum values of a double-precision and a single-precision /// floating-point tree, @c aTree and @c bTree, and store the result in a third, /// double-precision tree. Set the active state of each output value to that of /// the larger of the two input values. /// @code /// { /// struct Local { /// static inline void max(CombineArgs<double, float>& args) { /// if (args.b() > args.a()) { /// // Transfer the B value and its active state. /// args.setResult(args.b()); /// args.setResultIsActive(args.bIsActive()); /// } else { /// // Preserve the A value and its active state. /// args.setResult(args.a()); /// args.setResultIsActive(args.aIsActive()); /// } /// } /// }; /// DoubleTree aTree = ...; /// FloatTree bTree = ...; /// DoubleTree resultTree; /// resultTree.combine2Extended(aTree, bTree, Local::max); /// } /// @endcode template<typename ExtendedCombineOp, typename OtherTreeType /*= Tree*/> void combine2Extended(const Tree& a, const OtherTreeType& b, ExtendedCombineOp& op, bool prune = false); #ifndef _MSC_VER template<typename ExtendedCombineOp, typename OtherTreeType /*= Tree*/> void combine2Extended(const Tree& a, const OtherTreeType& b, const ExtendedCombineOp&, bool prune = false); #endif template<typename BBoxOp> [[deprecated("Use DynamicNodeManager instead")]] void visitActiveBBox(BBoxOp& op) const { mRoot.visitActiveBBox(op); } template<typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit(VisitorOp& op); template<typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit(const VisitorOp& op); template<typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit(VisitorOp& op) const; template<typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit(const VisitorOp& op) const; template<typename OtherTreeType, typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit2(OtherTreeType& other, VisitorOp& op); template<typename OtherTreeType, typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit2(OtherTreeType& other, const VisitorOp& op); template<typename OtherTreeType, typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit2(OtherTreeType& other, VisitorOp& op) const; template<typename OtherTreeType, typename VisitorOp> [[deprecated("Use DynamicNodeManager instead")]] void visit2(OtherTreeType& other, const VisitorOp& op) const; // // Iteration // //@{ /// Return an iterator over children of the root node. typename RootNodeType::ChildOnCIter beginRootChildren() const { return mRoot.cbeginChildOn(); } typename RootNodeType::ChildOnCIter cbeginRootChildren() const { return mRoot.cbeginChildOn(); } typename RootNodeType::ChildOnIter beginRootChildren() { return mRoot.beginChildOn(); } //@} //@{ /// Return an iterator over non-child entries of the root node's table. typename RootNodeType::ChildOffCIter beginRootTiles() const { return mRoot.cbeginChildOff(); } typename RootNodeType::ChildOffCIter cbeginRootTiles() const { return mRoot.cbeginChildOff(); } typename RootNodeType::ChildOffIter beginRootTiles() { return mRoot.beginChildOff(); } //@} //@{ /// Return an iterator over all entries of the root node's table. typename RootNodeType::ChildAllCIter beginRootDense() const { return mRoot.cbeginChildAll(); } typename RootNodeType::ChildAllCIter cbeginRootDense() const { return mRoot.cbeginChildAll(); } typename RootNodeType::ChildAllIter beginRootDense() { return mRoot.beginChildAll(); } //@} //@{ /// Iterator over all nodes in this tree using NodeIter = NodeIteratorBase<Tree, typename RootNodeType::ChildOnIter>; using NodeCIter = NodeIteratorBase<const Tree, typename RootNodeType::ChildOnCIter>; //@} //@{ /// Iterator over all leaf nodes in this tree using LeafIter = LeafIteratorBase<Tree, typename RootNodeType::ChildOnIter>; using LeafCIter = LeafIteratorBase<const Tree, typename RootNodeType::ChildOnCIter>; //@} //@{ /// Return an iterator over all nodes in this tree. NodeIter beginNode() { return NodeIter(*this); } NodeCIter beginNode() const { return NodeCIter(*this); } NodeCIter cbeginNode() const { return NodeCIter(*this); } //@} //@{ /// Return an iterator over all leaf nodes in this tree. LeafIter beginLeaf() { return LeafIter(*this); } LeafCIter beginLeaf() const { return LeafCIter(*this); } LeafCIter cbeginLeaf() const { return LeafCIter(*this); } //@} using ValueAllIter = TreeValueIteratorBase<Tree, typename RootNodeType::ValueAllIter>; using ValueAllCIter = TreeValueIteratorBase<const Tree, typename RootNodeType::ValueAllCIter>; using ValueOnIter = TreeValueIteratorBase<Tree, typename RootNodeType::ValueOnIter>; using ValueOnCIter = TreeValueIteratorBase<const Tree, typename RootNodeType::ValueOnCIter>; using ValueOffIter = TreeValueIteratorBase<Tree, typename RootNodeType::ValueOffIter>; using ValueOffCIter = TreeValueIteratorBase<const Tree, typename RootNodeType::ValueOffCIter>; //@{ /// Return an iterator over all values (tile and voxel) across all nodes. ValueAllIter beginValueAll() { return ValueAllIter(*this); } ValueAllCIter beginValueAll() const { return ValueAllCIter(*this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(*this); } //@} //@{ /// Return an iterator over active values (tile and voxel) across all nodes. ValueOnIter beginValueOn() { return ValueOnIter(*this); } ValueOnCIter beginValueOn() const { return ValueOnCIter(*this); } ValueOnCIter cbeginValueOn() const { return ValueOnCIter(*this); } //@} //@{ /// Return an iterator over inactive values (tile and voxel) across all nodes. ValueOffIter beginValueOff() { return ValueOffIter(*this); } ValueOffCIter beginValueOff() const { return ValueOffCIter(*this); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(*this); } //@} /// @brief Return an iterator of type @c IterT (for example, begin<ValueOnIter>() is /// equivalent to beginValueOn()). template<typename IterT> IterT begin(); /// @brief Return a const iterator of type CIterT (for example, cbegin<ValueOnCIter>() /// is equivalent to cbeginValueOn()). template<typename CIterT> CIterT cbegin() const; protected: using AccessorRegistry = tbb::concurrent_hash_map<ValueAccessorBase<Tree, true>*, bool>; using ConstAccessorRegistry = tbb::concurrent_hash_map<ValueAccessorBase<const Tree, true>*, bool>; /// @brief Notify all registered accessors, by calling ValueAccessor::release(), /// that this tree is about to be deleted. void releaseAllAccessors(); // TBB body object used to deallocates nodes in parallel. template<typename NodeType> struct DeallocateNodes { DeallocateNodes(std::vector<NodeType*>& nodes) : mNodes(nodes.empty() ? nullptr : &nodes.front()) { } void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { delete mNodes[n]; mNodes[n] = nullptr; } } NodeType ** const mNodes; }; // // Data members // RootNodeType mRoot; // root node of the tree mutable AccessorRegistry mAccessorRegistry; mutable ConstAccessorRegistry mConstAccessorRegistry; static std::unique_ptr<const Name> sTreeTypeName; }; // end of Tree class template<typename _RootNodeType> std::unique_ptr<const Name> Tree<_RootNodeType>::sTreeTypeName; /// @brief Tree3<T, N1, N2>::Type is the type of a three-level tree /// (Root, Internal, Leaf) with value type T and /// internal and leaf node log dimensions N1 and N2, respectively. /// @note This is NOT the standard tree configuration (Tree4 is). template<typename T, Index N1=4, Index N2=3> struct Tree3 { using Type = Tree<RootNode<InternalNode<LeafNode<T, N2>, N1>>>; }; /// @brief Tree4<T, N1, N2, N3>::Type is the type of a four-level tree /// (Root, Internal, Internal, Leaf) with value type T and /// internal and leaf node log dimensions N1, N2 and N3, respectively. /// @note This is the standard tree configuration. template<typename T, Index N1=5, Index N2=4, Index N3=3> struct Tree4 { using Type = Tree<RootNode<InternalNode<InternalNode<LeafNode<T, N3>, N2>, N1>>>; }; /// @brief Tree5<T, N1, N2, N3, N4>::Type is the type of a five-level tree /// (Root, Internal, Internal, Internal, Leaf) with value type T and /// internal and leaf node log dimensions N1, N2, N3 and N4, respectively. /// @note This is NOT the standard tree configuration (Tree4 is). template<typename T, Index N1=6, Index N2=5, Index N3=4, Index N4=3> struct Tree5 { using Type = Tree<RootNode<InternalNode<InternalNode<InternalNode<LeafNode<T, N4>, N3>, N2>, N1>>>; }; //////////////////////////////////////// inline void TreeBase::readTopology(std::istream& is, bool /*saveFloatAsHalf*/) { int32_t bufferCount; is.read(reinterpret_cast<char*>(&bufferCount), sizeof(int32_t)); if (bufferCount != 1) OPENVDB_LOG_WARN("multi-buffer trees are no longer supported"); } inline void TreeBase::writeTopology(std::ostream& os, bool /*saveFloatAsHalf*/) const { int32_t bufferCount = 1; os.write(reinterpret_cast<char*>(&bufferCount), sizeof(int32_t)); } inline void TreeBase::print(std::ostream& os, int /*verboseLevel*/) const { os << " Tree Type: " << type() << " Active Voxel Count: " << activeVoxelCount() << std::endl << " Active tile Count: " << activeTileCount() << std::endl << " Inactive Voxel Count: " << inactiveVoxelCount() << std::endl << " Leaf Node Count: " << leafCount() << std::endl << " Non-leaf Node Count: " << nonLeafCount() << std::endl; } //////////////////////////////////////// // // Type traits for tree iterators // /// @brief TreeIterTraits provides, for all tree iterators, a begin(tree) function /// that returns an iterator over a tree of arbitrary type. template<typename TreeT, typename IterT> struct TreeIterTraits; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildOnIter> { static typename TreeT::RootNodeType::ChildOnIter begin(TreeT& tree) { return tree.beginRootChildren(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildOnCIter> { static typename TreeT::RootNodeType::ChildOnCIter begin(const TreeT& tree) { return tree.cbeginRootChildren(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildOffIter> { static typename TreeT::RootNodeType::ChildOffIter begin(TreeT& tree) { return tree.beginRootTiles(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildOffCIter> { static typename TreeT::RootNodeType::ChildOffCIter begin(const TreeT& tree) { return tree.cbeginRootTiles(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildAllIter> { static typename TreeT::RootNodeType::ChildAllIter begin(TreeT& tree) { return tree.beginRootDense(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::RootNodeType::ChildAllCIter> { static typename TreeT::RootNodeType::ChildAllCIter begin(const TreeT& tree) { return tree.cbeginRootDense(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::NodeIter> { static typename TreeT::NodeIter begin(TreeT& tree) { return tree.beginNode(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::NodeCIter> { static typename TreeT::NodeCIter begin(const TreeT& tree) { return tree.cbeginNode(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::LeafIter> { static typename TreeT::LeafIter begin(TreeT& tree) { return tree.beginLeaf(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::LeafCIter> { static typename TreeT::LeafCIter begin(const TreeT& tree) { return tree.cbeginLeaf(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueOnIter> { static typename TreeT::ValueOnIter begin(TreeT& tree) { return tree.beginValueOn(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueOnCIter> { static typename TreeT::ValueOnCIter begin(const TreeT& tree) { return tree.cbeginValueOn(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueOffIter> { static typename TreeT::ValueOffIter begin(TreeT& tree) { return tree.beginValueOff(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueOffCIter> { static typename TreeT::ValueOffCIter begin(const TreeT& tree) { return tree.cbeginValueOff(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueAllIter> { static typename TreeT::ValueAllIter begin(TreeT& tree) { return tree.beginValueAll(); } }; template<typename TreeT> struct TreeIterTraits<TreeT, typename TreeT::ValueAllCIter> { static typename TreeT::ValueAllCIter begin(const TreeT& tree) { return tree.cbeginValueAll(); } }; template<typename RootNodeType> template<typename IterT> inline IterT Tree<RootNodeType>::begin() { return TreeIterTraits<Tree, IterT>::begin(*this); } template<typename RootNodeType> template<typename IterT> inline IterT Tree<RootNodeType>::cbegin() const { return TreeIterTraits<Tree, IterT>::begin(*this); } //////////////////////////////////////// template<typename RootNodeType> void Tree<RootNodeType>::readTopology(std::istream& is, bool saveFloatAsHalf) { this->clearAllAccessors(); TreeBase::readTopology(is, saveFloatAsHalf); mRoot.readTopology(is, saveFloatAsHalf); } template<typename RootNodeType> void Tree<RootNodeType>::writeTopology(std::ostream& os, bool saveFloatAsHalf) const { TreeBase::writeTopology(os, saveFloatAsHalf); mRoot.writeTopology(os, saveFloatAsHalf); } template<typename RootNodeType> inline void Tree<RootNodeType>::readBuffers(std::istream &is, bool saveFloatAsHalf) { this->clearAllAccessors(); mRoot.readBuffers(is, saveFloatAsHalf); } template<typename RootNodeType> inline void Tree<RootNodeType>::readBuffers(std::istream &is, const CoordBBox& bbox, bool saveFloatAsHalf) { this->clearAllAccessors(); mRoot.readBuffers(is, bbox, saveFloatAsHalf); } template<typename RootNodeType> inline void Tree<RootNodeType>::readNonresidentBuffers() const { for (LeafCIter it = this->cbeginLeaf(); it; ++it) { // Retrieving the value of a leaf voxel forces loading of the leaf node's voxel buffer. it->getValue(Index(0)); } } template<typename RootNodeType> inline void Tree<RootNodeType>::writeBuffers(std::ostream &os, bool saveFloatAsHalf) const { mRoot.writeBuffers(os, saveFloatAsHalf); } template<typename RootNodeType> inline void Tree<RootNodeType>::clear() { std::vector<LeafNodeType*> leafnodes; this->stealNodes(leafnodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, leafnodes.size()), DeallocateNodes<LeafNodeType>(leafnodes)); std::vector<typename RootNodeType::ChildNodeType*> internalNodes; this->stealNodes(internalNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, internalNodes.size()), DeallocateNodes<typename RootNodeType::ChildNodeType>(internalNodes)); mRoot.clear(); this->clearAllAccessors(); } //////////////////////////////////////// template<typename RootNodeType> inline void Tree<RootNodeType>::attachAccessor(ValueAccessorBase<Tree, true>& accessor) const { typename AccessorRegistry::accessor a; mAccessorRegistry.insert(a, &accessor); } template<typename RootNodeType> inline void Tree<RootNodeType>::attachAccessor(ValueAccessorBase<const Tree, true>& accessor) const { typename ConstAccessorRegistry::accessor a; mConstAccessorRegistry.insert(a, &accessor); } template<typename RootNodeType> inline void Tree<RootNodeType>::releaseAccessor(ValueAccessorBase<Tree, true>& accessor) const { mAccessorRegistry.erase(&accessor); } template<typename RootNodeType> inline void Tree<RootNodeType>::releaseAccessor(ValueAccessorBase<const Tree, true>& accessor) const { mConstAccessorRegistry.erase(&accessor); } template<typename RootNodeType> inline void Tree<RootNodeType>::clearAllAccessors() { for (typename AccessorRegistry::iterator it = mAccessorRegistry.begin(); it != mAccessorRegistry.end(); ++it) { if (it->first) it->first->clear(); } for (typename ConstAccessorRegistry::iterator it = mConstAccessorRegistry.begin(); it != mConstAccessorRegistry.end(); ++it) { if (it->first) it->first->clear(); } } template<typename RootNodeType> inline void Tree<RootNodeType>::releaseAllAccessors() { mAccessorRegistry.erase(nullptr); for (typename AccessorRegistry::iterator it = mAccessorRegistry.begin(); it != mAccessorRegistry.end(); ++it) { it->first->release(); } mAccessorRegistry.clear(); mAccessorRegistry.erase(nullptr); for (typename ConstAccessorRegistry::iterator it = mConstAccessorRegistry.begin(); it != mConstAccessorRegistry.end(); ++it) { it->first->release(); } mConstAccessorRegistry.clear(); } //////////////////////////////////////// template<typename RootNodeType> inline const typename RootNodeType::ValueType& Tree<RootNodeType>::getValue(const Coord& xyz) const { return mRoot.getValue(xyz); } template<typename RootNodeType> template<typename AccessT> inline const typename RootNodeType::ValueType& Tree<RootNodeType>::getValue(const Coord& xyz, AccessT& accessor) const { return accessor.getValue(xyz); } template<typename RootNodeType> inline int Tree<RootNodeType>::getValueDepth(const Coord& xyz) const { return mRoot.getValueDepth(xyz); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOff(const Coord& xyz) { mRoot.setValueOff(xyz); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOff(const Coord& xyz, const ValueType& value) { mRoot.setValueOff(xyz, value); } template<typename RootNodeType> inline void Tree<RootNodeType>::setActiveState(const Coord& xyz, bool on) { mRoot.setActiveState(xyz, on); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValue(const Coord& xyz, const ValueType& value) { mRoot.setValueOn(xyz, value); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOnly(const Coord& xyz, const ValueType& value) { mRoot.setValueOnly(xyz, value); } template<typename RootNodeType> template<typename AccessT> inline void Tree<RootNodeType>::setValue(const Coord& xyz, const ValueType& value, AccessT& accessor) { accessor.setValue(xyz, value); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOn(const Coord& xyz) { mRoot.setActiveState(xyz, true); } template<typename RootNodeType> inline void Tree<RootNodeType>::setValueOn(const Coord& xyz, const ValueType& value) { mRoot.setValueOn(xyz, value); } template<typename RootNodeType> template<typename ModifyOp> inline void Tree<RootNodeType>::modifyValue(const Coord& xyz, const ModifyOp& op) { mRoot.modifyValue(xyz, op); } template<typename RootNodeType> template<typename ModifyOp> inline void Tree<RootNodeType>::modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { mRoot.modifyValueAndActiveState(xyz, op); } template<typename RootNodeType> inline bool Tree<RootNodeType>::probeValue(const Coord& xyz, ValueType& value) const { return mRoot.probeValue(xyz, value); } //////////////////////////////////////// template<typename RootNodeType> inline void Tree<RootNodeType>::addTile(Index level, const Coord& xyz, const ValueType& value, bool active) { mRoot.addTile(level, xyz, value, active); } template<typename RootNodeType> template<typename NodeT> inline NodeT* Tree<RootNodeType>::stealNode(const Coord& xyz, const ValueType& value, bool active) { this->clearAllAccessors(); return mRoot.template stealNode<NodeT>(xyz, value, active); } template<typename RootNodeType> inline typename RootNodeType::LeafNodeType* Tree<RootNodeType>::touchLeaf(const Coord& xyz) { return mRoot.touchLeaf(xyz); } template<typename RootNodeType> inline typename RootNodeType::LeafNodeType* Tree<RootNodeType>::probeLeaf(const Coord& xyz) { return mRoot.probeLeaf(xyz); } template<typename RootNodeType> inline const typename RootNodeType::LeafNodeType* Tree<RootNodeType>::probeConstLeaf(const Coord& xyz) const { return mRoot.probeConstLeaf(xyz); } template<typename RootNodeType> template<typename NodeType> inline NodeType* Tree<RootNodeType>::probeNode(const Coord& xyz) { return mRoot.template probeNode<NodeType>(xyz); } template<typename RootNodeType> template<typename NodeType> inline const NodeType* Tree<RootNodeType>::probeNode(const Coord& xyz) const { return this->template probeConstNode<NodeType>(xyz); } template<typename RootNodeType> template<typename NodeType> inline const NodeType* Tree<RootNodeType>::probeConstNode(const Coord& xyz) const { return mRoot.template probeConstNode<NodeType>(xyz); } //////////////////////////////////////// template<typename RootNodeType> inline void Tree<RootNodeType>::clip(const CoordBBox& bbox) { this->clearAllAccessors(); return mRoot.clip(bbox); } template<typename RootNodeType> inline void Tree<RootNodeType>::clipUnallocatedNodes() { this->clearAllAccessors(); for (LeafIter it = this->beginLeaf(); it; ) { const LeafNodeType* leaf = it.getLeaf(); ++it; // advance the iterator before deleting the leaf node if (!leaf->isAllocated()) { this->addTile(/*level=*/0, leaf->origin(), this->background(), /*active=*/false); } } } template<typename RootNodeType> inline Index32 Tree<RootNodeType>::unallocatedLeafCount() const { Index32 sum = 0; for (auto it = this->cbeginLeaf(); it; ++it) if (!it->isAllocated()) ++sum; return sum; } template<typename RootNodeType> inline void Tree<RootNodeType>::sparseFill(const CoordBBox& bbox, const ValueType& value, bool active) { this->clearAllAccessors(); return mRoot.sparseFill(bbox, value, active); } template<typename RootNodeType> inline void Tree<RootNodeType>::denseFill(const CoordBBox& bbox, const ValueType& value, bool active) { this->clearAllAccessors(); return mRoot.denseFill(bbox, value, active); } template<typename RootNodeType> inline void Tree<RootNodeType>::voxelizeActiveTiles(bool threaded) { this->clearAllAccessors(); mRoot.voxelizeActiveTiles(threaded); } template<typename RootNodeType> Metadata::Ptr Tree<RootNodeType>::getBackgroundValue() const { Metadata::Ptr result; if (Metadata::isRegisteredType(valueType())) { using MetadataT = TypedMetadata<ValueType>; result = Metadata::createMetadata(valueType()); if (result->typeName() == MetadataT::staticTypeName()) { MetadataT* m = static_cast<MetadataT*>(result.get()); m->value() = mRoot.background(); } } return result; } //////////////////////////////////////// template<typename RootNodeType> inline void Tree<RootNodeType>::merge(Tree& other, MergePolicy policy) { this->clearAllAccessors(); other.clearAllAccessors(); switch (policy) { case MERGE_ACTIVE_STATES: mRoot.template merge<MERGE_ACTIVE_STATES>(other.mRoot); break; case MERGE_NODES: mRoot.template merge<MERGE_NODES>(other.mRoot); break; case MERGE_ACTIVE_STATES_AND_NODES: mRoot.template merge<MERGE_ACTIVE_STATES_AND_NODES>(other.mRoot); break; } } template<typename RootNodeType> template<typename OtherRootNodeType> inline void Tree<RootNodeType>::topologyUnion(const Tree<OtherRootNodeType>& other) { this->clearAllAccessors(); mRoot.topologyUnion(other.root()); } template<typename RootNodeType> template<typename OtherRootNodeType> inline void Tree<RootNodeType>::topologyIntersection(const Tree<OtherRootNodeType>& other) { this->clearAllAccessors(); mRoot.topologyIntersection(other.root()); } template<typename RootNodeType> template<typename OtherRootNodeType> inline void Tree<RootNodeType>::topologyDifference(const Tree<OtherRootNodeType>& other) { this->clearAllAccessors(); mRoot.topologyDifference(other.root()); } //////////////////////////////////////// /// @brief Helper class to adapt a three-argument (a, b, result) CombineOp functor /// into a single-argument functor that accepts a CombineArgs struct template<typename AValueT, typename CombineOp, typename BValueT = AValueT> struct CombineOpAdapter { CombineOpAdapter(CombineOp& _op): op(_op) {} void operator()(CombineArgs<AValueT, BValueT>& args) const { op(args.a(), args.b(), args.result()); } CombineOp& op; }; template<typename RootNodeType> template<typename CombineOp> inline void Tree<RootNodeType>::combine(Tree& other, CombineOp& op, bool prune) { CombineOpAdapter<ValueType, CombineOp> extendedOp(op); this->combineExtended(other, extendedOp, prune); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>aTree.combine(bTree, MyCombineOp(...))</tt>. #ifndef _MSC_VER template<typename RootNodeType> template<typename CombineOp> inline void Tree<RootNodeType>::combine(Tree& other, const CombineOp& op, bool prune) { CombineOpAdapter<ValueType, const CombineOp> extendedOp(op); this->combineExtended(other, extendedOp, prune); } #endif template<typename RootNodeType> template<typename ExtendedCombineOp> inline void Tree<RootNodeType>::combineExtended(Tree& other, ExtendedCombineOp& op, bool prune) { this->clearAllAccessors(); mRoot.combine(other.root(), op, prune); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>aTree.combineExtended(bTree, MyCombineOp(...))</tt>. #ifndef _MSC_VER template<typename RootNodeType> template<typename ExtendedCombineOp> inline void Tree<RootNodeType>::combineExtended(Tree& other, const ExtendedCombineOp& op, bool prune) { this->clearAllAccessors(); mRoot.template combine<const ExtendedCombineOp>(other.mRoot, op, prune); } #endif template<typename RootNodeType> template<typename CombineOp, typename OtherTreeType> inline void Tree<RootNodeType>::combine2(const Tree& a, const OtherTreeType& b, CombineOp& op, bool prune) { CombineOpAdapter<ValueType, CombineOp, typename OtherTreeType::ValueType> extendedOp(op); this->combine2Extended(a, b, extendedOp, prune); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>tree.combine2(aTree, bTree, MyCombineOp(...))</tt>. #ifndef _MSC_VER template<typename RootNodeType> template<typename CombineOp, typename OtherTreeType> inline void Tree<RootNodeType>::combine2(const Tree& a, const OtherTreeType& b, const CombineOp& op, bool prune) { CombineOpAdapter<ValueType, const CombineOp, typename OtherTreeType::ValueType> extendedOp(op); this->combine2Extended(a, b, extendedOp, prune); } #endif template<typename RootNodeType> template<typename ExtendedCombineOp, typename OtherTreeType> inline void Tree<RootNodeType>::combine2Extended(const Tree& a, const OtherTreeType& b, ExtendedCombineOp& op, bool prune) { this->clearAllAccessors(); mRoot.combine2(a.root(), b.root(), op, prune); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like the following, where the functor argument is a temporary: /// <tt>tree.combine2Extended(aTree, bTree, MyCombineOp(...))</tt>. #ifndef _MSC_VER template<typename RootNodeType> template<typename ExtendedCombineOp, typename OtherTreeType> inline void Tree<RootNodeType>::combine2Extended(const Tree& a, const OtherTreeType& b, const ExtendedCombineOp& op, bool prune) { this->clearAllAccessors(); mRoot.template combine2<const ExtendedCombineOp>(a.root(), b.root(), op, prune); } #endif //////////////////////////////////////// template<typename RootNodeType> template<typename VisitorOp> inline void Tree<RootNodeType>::visit(VisitorOp& op) { this->clearAllAccessors(); mRoot.template visit<VisitorOp>(op); } template<typename RootNodeType> template<typename VisitorOp> inline void Tree<RootNodeType>::visit(VisitorOp& op) const { mRoot.template visit<VisitorOp>(op); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>tree.visit(MyVisitorOp(...))</tt>. template<typename RootNodeType> template<typename VisitorOp> inline void Tree<RootNodeType>::visit(const VisitorOp& op) { this->clearAllAccessors(); mRoot.template visit<const VisitorOp>(op); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>tree.visit(MyVisitorOp(...))</tt>. template<typename RootNodeType> template<typename VisitorOp> inline void Tree<RootNodeType>::visit(const VisitorOp& op) const { mRoot.template visit<const VisitorOp>(op); } //////////////////////////////////////// template<typename RootNodeType> template<typename OtherTreeType, typename VisitorOp> inline void Tree<RootNodeType>::visit2(OtherTreeType& other, VisitorOp& op) { this->clearAllAccessors(); using OtherRootNodeType = typename OtherTreeType::RootNodeType; mRoot.template visit2<OtherRootNodeType, VisitorOp>(other.root(), op); } template<typename RootNodeType> template<typename OtherTreeType, typename VisitorOp> inline void Tree<RootNodeType>::visit2(OtherTreeType& other, VisitorOp& op) const { using OtherRootNodeType = typename OtherTreeType::RootNodeType; mRoot.template visit2<OtherRootNodeType, VisitorOp>(other.root(), op); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>aTree.visit2(bTree, MyVisitorOp(...))</tt>. template<typename RootNodeType> template<typename OtherTreeType, typename VisitorOp> inline void Tree<RootNodeType>::visit2(OtherTreeType& other, const VisitorOp& op) { this->clearAllAccessors(); using OtherRootNodeType = typename OtherTreeType::RootNodeType; mRoot.template visit2<OtherRootNodeType, const VisitorOp>(other.root(), op); } /// @internal This overload is needed (for ICC and GCC, but not for VC) to disambiguate /// code like this: <tt>aTree.visit2(bTree, MyVisitorOp(...))</tt>. template<typename RootNodeType> template<typename OtherTreeType, typename VisitorOp> inline void Tree<RootNodeType>::visit2(OtherTreeType& other, const VisitorOp& op) const { using OtherRootNodeType = typename OtherTreeType::RootNodeType; mRoot.template visit2<OtherRootNodeType, const VisitorOp>(other.root(), op); } //////////////////////////////////////// template<typename RootNodeType> inline const Name& Tree<RootNodeType>::treeType() { static std::once_flag once; std::call_once(once, []() { std::vector<Index> dims; Tree::getNodeLog2Dims(dims); std::ostringstream ostr; ostr << "Tree_" << typeNameAsString<BuildType>(); for (size_t i = 1, N = dims.size(); i < N; ++i) { // start from 1 to skip the RootNode ostr << "_" << dims[i]; } sTreeTypeName.reset(new Name(ostr.str())); }); return *sTreeTypeName; } template<typename RootNodeType> template<typename OtherRootNodeType> inline bool Tree<RootNodeType>::hasSameTopology(const Tree<OtherRootNodeType>& other) const { return mRoot.hasSameTopology(other.root()); } template<typename RootNodeType> Index64 Tree<RootNodeType>::inactiveVoxelCount() const { Coord dim(0, 0, 0); this->evalActiveVoxelDim(dim); const Index64 totalVoxels = dim.x() * dim.y() * dim.z(), activeVoxels = this->activeVoxelCount(); assert(totalVoxels >= activeVoxels); return totalVoxels - activeVoxels; } template<typename RootNodeType> inline bool Tree<RootNodeType>::evalLeafBoundingBox(CoordBBox& bbox) const { bbox.reset(); // default invalid bbox if (this->empty()) return false; // empty mRoot.evalActiveBoundingBox(bbox, false); return !bbox.empty(); } template<typename RootNodeType> inline bool Tree<RootNodeType>::evalActiveVoxelBoundingBox(CoordBBox& bbox) const { bbox.reset(); // default invalid bbox if (this->empty()) return false; // empty mRoot.evalActiveBoundingBox(bbox, true); return !bbox.empty(); } template<typename RootNodeType> inline bool Tree<RootNodeType>::evalActiveVoxelDim(Coord& dim) const { CoordBBox bbox; bool notEmpty = this->evalActiveVoxelBoundingBox(bbox); dim = bbox.extents(); return notEmpty; } template<typename RootNodeType> inline bool Tree<RootNodeType>::evalLeafDim(Coord& dim) const { CoordBBox bbox; bool notEmpty = this->evalLeafBoundingBox(bbox); dim = bbox.extents(); return notEmpty; } template<typename RootNodeType> inline void Tree<RootNodeType>::evalMinMax(ValueType& minVal, ValueType& maxVal) const { /// @todo optimize minVal = maxVal = zeroVal<ValueType>(); if (ValueOnCIter iter = this->cbeginValueOn()) { minVal = maxVal = *iter; for (++iter; iter; ++iter) { const ValueType& val = *iter; if (math::cwiseLessThan(val, minVal)) minVal = val; if (math::cwiseGreaterThan(val, maxVal)) maxVal = val; } } } template<typename RootNodeType> inline void Tree<RootNodeType>::getNodeLog2Dims(std::vector<Index>& dims) { dims.clear(); RootNodeType::getNodeLog2Dims(dims); } template<typename RootNodeType> inline void Tree<RootNodeType>::print(std::ostream& os, int verboseLevel) const { if (verboseLevel <= 0) return; /// @todo Consider using boost::io::ios_precision_saver instead. struct OnExit { std::ostream& os; std::streamsize savedPrecision; OnExit(std::ostream& _os): os(_os), savedPrecision(os.precision()) {} ~OnExit() { os.precision(savedPrecision); } }; OnExit restorePrecision(os); std::vector<Index> dims; Tree::getNodeLog2Dims(dims);// leaf is the last element os << "Information about Tree:\n" << " Type: " << this->type() << "\n"; os << " Configuration:\n"; if (verboseLevel <= 1) { // Print node types and sizes. os << " Root(" << mRoot.getTableSize() << ")"; if (dims.size() > 1) { for (size_t i = 1, N = dims.size() - 1; i < N; ++i) { os << ", Internal(" << (1 << dims[i]) << "^3)"; } os << ", Leaf(" << (1 << dims.back()) << "^3)\n"; } os << " Background value: " << mRoot.background() << "\n"; return; } // The following is tree information that is expensive to extract. ValueType minVal = zeroVal<ValueType>(), maxVal = zeroVal<ValueType>(); if (verboseLevel > 3) { // This forces loading of all non-resident nodes. this->evalMinMax(minVal, maxVal); } #if OPENVDB_ABI_VERSION_NUMBER >= 7 const auto nodeCount = this->nodeCount();//fast const Index32 leafCount = nodeCount.front();// leaf is the first element #else std::vector<Index64> nodeCount(dims.size()); for (NodeCIter it = cbeginNode(); it; ++it) ++(nodeCount[it.getDepth()]);//slow const Index64 leafCount = *nodeCount.rbegin();// leaf is the last element #endif assert(dims.size() == nodeCount.size()); Index64 totalNodeCount = 0; for (size_t i = 0; i < nodeCount.size(); ++i) totalNodeCount += nodeCount[i]; // Print node types, counts and sizes. os << " Root(1 x " << mRoot.getTableSize() << ")"; if (dims.size() >= 2) { for (size_t i = 1, N = dims.size() - 1; i < N; ++i) { #if OPENVDB_ABI_VERSION_NUMBER >= 7 os << ", Internal(" << util::formattedInt(nodeCount[N - i]); #else os << ", Internal(" << util::formattedInt(nodeCount[i]); #endif os << " x " << (1 << dims[i]) << "^3)"; } os << ", Leaf(" << util::formattedInt(leafCount); os << " x " << (1 << dims.back()) << "^3)\n"; } os << " Background value: " << mRoot.background() << "\n"; // Statistics of topology and values if (verboseLevel > 3) { os << " Min value: " << minVal << "\n"; os << " Max value: " << maxVal << "\n"; } const Index64 numActiveVoxels = this->activeVoxelCount(), numActiveLeafVoxels = this->activeLeafVoxelCount(), numActiveTiles = this->activeTileCount(); os << " Number of active voxels: " << util::formattedInt(numActiveVoxels) << "\n"; os << " Number of active tiles: " << util::formattedInt(numActiveTiles) << "\n"; Coord dim(0, 0, 0); Index64 totalVoxels = 0; if (numActiveVoxels) { // nonempty CoordBBox bbox; this->evalActiveVoxelBoundingBox(bbox); dim = bbox.extents(); totalVoxels = dim.x() * uint64_t(dim.y()) * dim.z(); os << " Bounding box of active voxels: " << bbox << "\n"; os << " Dimensions of active voxels: " << dim[0] << " x " << dim[1] << " x " << dim[2] << "\n"; const double activeRatio = (100.0 * double(numActiveVoxels)) / double(totalVoxels); os << " Percentage of active voxels: " << std::setprecision(3) << activeRatio << "%\n"; if (leafCount > 0) { const double fillRatio = (100.0 * double(numActiveLeafVoxels)) / (double(leafCount) * double(LeafNodeType::NUM_VOXELS)); os << " Average leaf node fill ratio: " << fillRatio << "%\n"; } if (verboseLevel > 2) { Index64 sum = 0;// count the number of unallocated leaf nodes for (auto it = this->cbeginLeaf(); it; ++it) if (!it->isAllocated()) ++sum; os << " Number of unallocated nodes: " << util::formattedInt(sum) << " (" << (100.0 * double(sum) / double(totalNodeCount)) << "%)\n"; } } else { os << " Tree is empty!\n"; } os << std::flush; if (verboseLevel == 2) return; // Memory footprint in bytes const Index64 actualMem = this->memUsage(), denseMem = sizeof(ValueType) * totalVoxels, voxelsMem = sizeof(ValueType) * numActiveLeafVoxels; ///< @todo not accurate for BoolTree (and probably should count tile values) os << "Memory footprint:\n"; util::printBytes(os, actualMem, " Actual: "); util::printBytes(os, voxelsMem, " Active leaf voxels: "); if (numActiveVoxels) { util::printBytes(os, denseMem, " Dense equivalent: "); os << " Actual footprint is " << (100.0 * double(actualMem) / double(denseMem)) << "% of an equivalent dense volume\n"; os << " Leaf voxel footprint is " << (100.0 * double(voxelsMem) / double(actualMem)) << "% of actual footprint\n"; } } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_TREE_HAS_BEEN_INCLUDED
82,743
C
36.474638
103
0.668757
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/ValueAccessor.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tree/ValueAccessor.h /// /// When traversing a grid in a spatially coherent pattern (e.g., iterating /// over neighboring voxels), request a @c ValueAccessor from the grid /// (with Grid::getAccessor()) and use the accessor's @c getValue() and /// @c setValue() methods. These will typically be significantly faster /// than accessing voxels directly in the grid's tree. /// /// @par Example: /// /// @code /// FloatGrid grid; /// FloatGrid::Accessor acc = grid.getAccessor(); /// // First access is slow: /// acc.setValue(Coord(0, 0, 0), 100); /// // Subsequent nearby accesses are fast, since the accessor now holds pointers /// // to nodes that contain (0, 0, 0) along the path from the root of the grid's /// // tree to the leaf: /// acc.setValue(Coord(0, 0, 1), 100); /// acc.getValue(Coord(0, 2, 0), 100); /// // Slow, because the accessor must be repopulated: /// acc.getValue(Coord(-1, -1, -1)); /// // Fast: /// acc.getValue(Coord(-1, -1, -2)); /// acc.setValue(Coord(-1, -2, 0), -100); /// @endcode #ifndef OPENVDB_TREE_VALUEACCESSOR_HAS_BEEN_INCLUDED #define OPENVDB_TREE_VALUEACCESSOR_HAS_BEEN_INCLUDED #include <tbb/null_mutex.h> #include <tbb/spin_mutex.h> #include <openvdb/version.h> #include <openvdb/Types.h> #include <cassert> #include <limits> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { // Forward declarations of local classes that are not intended for general use // The IsSafe template parameter is explained in the warning below. template<typename TreeType, bool IsSafe = true> class ValueAccessor0; template<typename TreeType, bool IsSafe = true, Index L0 = 0> class ValueAccessor1; template<typename TreeType, bool IsSafe = true, Index L0 = 0, Index L1 = 1> class ValueAccessor2; template<typename TreeType, bool IsSafe = true, Index L0 = 0, Index L1 = 1, Index L2 = 2> class ValueAccessor3; template<typename TreeCacheT, typename NodeVecT, bool AtRoot> class CacheItem; /// @brief This base class for ValueAccessors manages registration of an accessor /// with a tree so that the tree can automatically clear the accessor whenever /// one of its nodes is deleted. /// /// @internal A base class is needed because ValueAccessor is templated on both /// a Tree type and a mutex type. The various instantiations of the template /// are distinct, unrelated types, so they can't easily be stored in a container /// such as the Tree's CacheRegistry. This base class, in contrast, is templated /// only on the Tree type, so for any given Tree, only two distinct instantiations /// are possible, ValueAccessorBase<Tree> and ValueAccessorBase<const Tree>. /// /// @warning If IsSafe = false then the ValueAccessor will not register itself /// with the tree from which it is constructed. While in some rare cases this can /// lead to better performance (since it avoids the small overhead of insertion /// on creation and deletion on destruction) it is also unsafe if the tree is /// modified. So unless you're an expert it is highly recommended to set /// IsSafe = true, which is the default in all derived ValueAccessors defined /// below. However if you know that the tree is no being modifed for the lifespan /// of the ValueAccessor AND the work performed per ValueAccessor is small relative /// to overhead of registering it you should consider setting IsSafe = false. If /// this turns out to improve performance you should really rewrite your code so as /// to better amortize the construction of the ValueAccessor, i.e. reuse it as much /// as possible! template<typename TreeType, bool IsSafe> class ValueAccessorBase { public: static const bool IsConstTree = std::is_const<TreeType>::value; /// @brief Return true if this accessor is safe, i.e. registered /// by the tree from which it is constructed. Un-registered /// accessors can in rare cases be faster because it avoids the /// (small) overhead of registration, but they are unsafe if the /// tree is modified. So unless you're an expert it is highly /// recommended to set IsSafe = true (which is the default). static bool isSafe() { return IsSafe; } ValueAccessorBase(TreeType& tree): mTree(&tree) { if (IsSafe) tree.attachAccessor(*this); } virtual ~ValueAccessorBase() { if (IsSafe && mTree) mTree->releaseAccessor(*this); } /// @brief Return a pointer to the tree associated with this accessor. /// @details The pointer will be null only if the tree from which this accessor /// was constructed was subsequently deleted (which generally leaves the /// accessor in an unsafe state). TreeType* getTree() const { return mTree; } /// Return a reference to the tree associated with this accessor. TreeType& tree() const { assert(mTree); return *mTree; } ValueAccessorBase(const ValueAccessorBase& other): mTree(other.mTree) { if (IsSafe && mTree) mTree->attachAccessor(*this); } ValueAccessorBase& operator=(const ValueAccessorBase& other) { if (&other != this) { if (IsSafe && mTree) mTree->releaseAccessor(*this); mTree = other.mTree; if (IsSafe && mTree) mTree->attachAccessor(*this); } return *this; } virtual void clear() = 0; protected: // Allow trees to deregister themselves. template<typename> friend class Tree; virtual void release() { mTree = nullptr; } TreeType* mTree; }; // class ValueAccessorBase //////////////////////////////////////// /// When traversing a grid in a spatially coherent pattern (e.g., iterating /// over neighboring voxels), request a @c ValueAccessor from the grid /// (with Grid::getAccessor()) and use the accessor's @c getValue() and /// @c setValue() methods. These will typically be significantly faster /// than accessing voxels directly in the grid's tree. /// /// A ValueAccessor caches pointers to tree nodes along the path to a voxel (x, y, z). /// A subsequent access to voxel (x', y', z') starts from the cached leaf node and /// moves up until a cached node that encloses (x', y', z') is found, then traverses /// down the tree from that node to a leaf, updating the cache with the new path. /// This leads to significant acceleration of spatially-coherent accesses. /// /// @param _TreeType the type of the tree to be accessed [required] /// @param IsSafe if IsSafe = false then the ValueAccessor will /// not register itself with the tree from which /// it is consturcted (see warning). /// @param CacheLevels the number of nodes to be cached, starting from the leaf level /// and not including the root (i.e., CacheLevels < DEPTH), /// and defaulting to all non-root nodes /// @param MutexType the type of mutex to use (see note) /// /// @warning If IsSafe = false then the ValueAccessor will not register itself /// with the tree from which it is constructed. While in some rare cases this can /// lead to better performance (since it avoids the small overhead of insertion /// on creation and deletion on destruction) it is also unsafe if the tree is /// modified. So unless you're an expert it is highly recommended to set /// IsSafe = true, which is the default. However if you know that the tree is no /// being modifed for the lifespan of the ValueAccessor AND the work performed /// per ValueAccessor is small relative to overhead of registering it you should /// consider setting IsSafe = false. If this improves performance you should /// really rewrite your code so as to better amortize the construction of the /// ValueAccessor, i.e. reuse it as much as possible! /// /// @note If @c MutexType is a TBB-compatible mutex, then multiple threads may /// safely access a single, shared accessor. However, it is highly recommended /// that, instead, each thread be assigned its own, non-mutex-protected accessor. template<typename _TreeType, bool IsSafe = true, Index CacheLevels = _TreeType::DEPTH-1, typename MutexType = tbb::null_mutex> class ValueAccessor: public ValueAccessorBase<_TreeType, IsSafe> { public: static_assert(CacheLevels < _TreeType::DEPTH, "cache size exceeds tree depth"); using TreeType = _TreeType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using ValueType = typename RootNodeT::ValueType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; using LockT = typename MutexType::scoped_lock; using BaseT::IsConstTree; ValueAccessor(TreeType& tree): BaseT(tree), mCache(*this) { mCache.insert(Coord(), &tree.root()); } ValueAccessor(const ValueAccessor& other): BaseT(other), mCache(*this, other.mCache) {} ValueAccessor& operator=(const ValueAccessor& other) { if (&other != this) { this->BaseT::operator=(other); mCache.copy(*this, other.mCache); } return *this; } ~ValueAccessor() override = default; /// Return the number of cache levels employed by this accessor. static Index numCacheLevels() { return CacheLevels; } /// Return @c true if nodes along the path to the given voxel have been cached. bool isCached(const Coord& xyz) const { LockT lock(mMutex); return mCache.isCached(xyz); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { LockT lock(mMutex); return mCache.getValue(xyz); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { LockT lock(mMutex); return mCache.isValueOn(xyz); } /// Return the active state of the voxel as well as its value bool probeValue(const Coord& xyz, ValueType& value) const { LockT lock(mMutex); return mCache.probeValue(xyz,value); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { LockT lock(mMutex); return mCache.getValueDepth(xyz); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { LockT lock(mMutex); return mCache.isVoxel(xyz); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { LockT lock(mMutex); mCache.setValue(xyz, value); } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { LockT lock(mMutex); mCache.setValueOnly(xyz, value); } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { LockT lock(mMutex); mCache.setValueOff(xyz, value); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { LockT lock(mMutex); mCache.modifyValue(xyz, op); } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { LockT lock(mMutex); mCache.modifyValueAndActiveState(xyz, op); } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on = true) { LockT lock(mMutex); mCache.setActiveState(xyz, on); } /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeType> NodeType* getNode() { LockT lock(mMutex); NodeType* node = nullptr; mCache.getNode(node); return node; } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeType> void insertNode(const Coord& xyz, NodeType& node) { LockT lock(mMutex); mCache.insert(xyz, &node); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeType> void eraseNode() { LockT lock(mMutex); NodeType* node = nullptr; mCache.erase(node); } /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { LockT lock(mMutex); mCache.addLeaf(leaf); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { LockT lock(mMutex); mCache.addTile(level, xyz, value, state); } /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, create one, but preserve the values and /// active states of all voxels. /// @details Use this method to preallocate a static tree topology /// over which to safely perform multithreaded processing. LeafNodeT* touchLeaf(const Coord& xyz) { LockT lock(mMutex); return mCache.touchLeaf(xyz); } //@{ /// @brief Return a pointer to the node of the specified type that contains /// voxel (x, y, z), or @c nullptr if no such node exists. template<typename NodeT> NodeT* probeNode(const Coord& xyz) { LockT lock(mMutex); return mCache.template probeNode<NodeT>(xyz); } template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { LockT lock(mMutex); return mCache.template probeConstNode<NodeT>(xyz); } template<typename NodeT> const NodeT* probeNode(const Coord& xyz) const { return this->template probeConstNode<NodeT>(xyz); } //@} //@{ /// @brief Return a pointer to the leaf node that contains voxel (x, y, z), /// or @c nullptr if no such node exists. LeafNodeT* probeLeaf(const Coord& xyz) { LockT lock(mMutex); return mCache.probeLeaf(xyz); } const LeafNodeT* probeConstLeaf(const Coord& xyz) const { LockT lock(mMutex); return mCache.probeConstLeaf(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } //@} /// Remove all nodes from this cache, then reinsert the root node. void clear() override { LockT lock(mMutex); mCache.clear(); if (this->mTree) mCache.insert(Coord(), &(this->mTree->root())); } private: // Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, Index> friend class InternalNode; template<typename, Index> friend class LeafNode; // Allow trees to deregister themselves. template<typename> friend class Tree; /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { LockT lock(mMutex); this->BaseT::release(); mCache.clear(); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). /// @note This operation is not mutex-protected and is intended to be called /// only by nodes and only in the context of a getValue() or setValue() call. template<typename NodeType> void insert(const Coord& xyz, NodeType* node) { mCache.insert(xyz, node); } // Define a list of all tree node types from LeafNode to RootNode using InvTreeT = typename RootNodeT::NodeChainType; // Remove all tree node types that are excluded from the cache static constexpr int64_t First = CacheLevels; static constexpr int64_t Last = InvTreeT::template Index<RootNodeT>; using SubtreeT = typename InvTreeT::template RemoveByIndex<First, Last-1>; using CacheItemT = CacheItem<ValueAccessor, SubtreeT, SubtreeT::Size==1>; // Private member data mutable CacheItemT mCache; mutable MutexType mMutex; }; // class ValueAccessor /// @brief Template specialization of the ValueAccessor with no mutex and no cache levels /// @details This specialization is provided mainly for benchmarking. /// Accessors with caching will almost always be faster. template<typename TreeType, bool IsSafe> class ValueAccessor<TreeType, IsSafe, 0, tbb::null_mutex> : public ValueAccessor0<TreeType, IsSafe> { public: ValueAccessor(TreeType& tree): ValueAccessor0<TreeType, IsSafe>(tree) {} ValueAccessor(const ValueAccessor& other): ValueAccessor0<TreeType, IsSafe>(other) {} ~ValueAccessor() override = default; }; /// Template specialization of the ValueAccessor with no mutex and one cache level template<typename TreeType, bool IsSafe> class ValueAccessor<TreeType, IsSafe, 1, tbb::null_mutex> : public ValueAccessor1<TreeType, IsSafe> { public: ValueAccessor(TreeType& tree): ValueAccessor1<TreeType, IsSafe>(tree) {} ValueAccessor(const ValueAccessor& other): ValueAccessor1<TreeType, IsSafe>(other) {} ~ValueAccessor() override = default; }; /// Template specialization of the ValueAccessor with no mutex and two cache levels template<typename TreeType, bool IsSafe> class ValueAccessor<TreeType, IsSafe, 2, tbb::null_mutex> : public ValueAccessor2<TreeType, IsSafe> { public: ValueAccessor(TreeType& tree): ValueAccessor2<TreeType, IsSafe>(tree) {} ValueAccessor(const ValueAccessor& other): ValueAccessor2<TreeType, IsSafe>(other) {} ~ValueAccessor() override = default; }; /// Template specialization of the ValueAccessor with no mutex and three cache levels template<typename TreeType, bool IsSafe> class ValueAccessor<TreeType, IsSafe, 3, tbb::null_mutex>: public ValueAccessor3<TreeType, IsSafe> { public: ValueAccessor(TreeType& tree): ValueAccessor3<TreeType, IsSafe>(tree) {} ValueAccessor(const ValueAccessor&) = default; ValueAccessor& operator=(const ValueAccessor&) = default; ~ValueAccessor() override = default; }; //////////////////////////////////////// /// @brief This accessor is thread-safe (at the cost of speed) for both reading and /// writing to a tree. That is, multiple threads may safely access a single, /// shared ValueAccessorRW. /// /// @warning Since the mutex-locking employed by the ValueAccessorRW /// can seriously impair performance of multithreaded applications, it /// is recommended that, instead, each thread be assigned its own /// (non-mutex protected) accessor. template<typename TreeType, bool IsSafe = true> class ValueAccessorRW: public ValueAccessor<TreeType, IsSafe, TreeType::DEPTH-1, tbb::spin_mutex> { public: ValueAccessorRW(TreeType& tree) : ValueAccessor<TreeType, IsSafe, TreeType::DEPTH-1, tbb::spin_mutex>(tree) { } }; //////////////////////////////////////// // // The classes below are for internal use and should rarely be used directly. // // An element of a compile-time linked list of node pointers, ordered from LeafNode to RootNode template<typename TreeCacheT, typename NodeVecT, bool AtRoot> class CacheItem { public: using NodeType = typename NodeVecT::Front; using ValueType = typename NodeType::ValueType; using LeafNodeType = typename NodeType::LeafNodeType; using CoordLimits = std::numeric_limits<Int32>; CacheItem(TreeCacheT& parent): mParent(&parent), mHash(CoordLimits::max()), mNode(nullptr), mNext(parent) { } //@{ /// Copy another CacheItem's node pointers and hash keys, but not its parent pointer. CacheItem(TreeCacheT& parent, const CacheItem& other): mParent(&parent), mHash(other.mHash), mNode(other.mNode), mNext(parent, other.mNext) { } CacheItem& copy(TreeCacheT& parent, const CacheItem& other) { mParent = &parent; mHash = other.mHash; mNode = other.mNode; mNext.copy(parent, other.mNext); return *this; } //@} bool isCached(const Coord& xyz) const { return (this->isHashed(xyz) || mNext.isCached(xyz)); } /// Cache the given node at this level. void insert(const Coord& xyz, const NodeType* node) { mHash = (node != nullptr) ? xyz & ~(NodeType::DIM-1) : Coord::max(); mNode = node; } /// Forward the given node to another level of the cache. template<typename OtherNodeType> void insert(const Coord& xyz, const OtherNodeType* node) { mNext.insert(xyz, node); } /// Erase the node at this level. void erase(const NodeType*) { mHash = Coord::max(); mNode = nullptr; } /// Erase the node at another level of the cache. template<typename OtherNodeType> void erase(const OtherNodeType* node) { mNext.erase(node); } /// Erase the nodes at this and lower levels of the cache. void clear() { mHash = Coord::max(); mNode = nullptr; mNext.clear(); } /// Return the cached node (if any) at this level. void getNode(const NodeType*& node) const { node = mNode; } void getNode(const NodeType*& node) { node = mNode; } void getNode(NodeType*& node) { // This combination of a static assertion and a const_cast might not be elegant, // but it is a lot simpler than specializing TreeCache for const Trees. static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); node = const_cast<NodeType*>(mNode); } /// Forward the request to another level of the cache. template<typename OtherNodeType> void getNode(OtherNodeType*& node) { mNext.getNode(node); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return mNode->getValueAndCache(xyz, *mParent); } return mNext.getValue(xyz); } void addLeaf(LeafNodeType* leaf) { static_assert(!TreeCacheT::IsConstTree, "can't add a node to a const tree"); if (NodeType::LEVEL == 0) return; if (this->isHashed(leaf->origin())) { assert(mNode); return const_cast<NodeType*>(mNode)->addLeafAndCache(leaf, *mParent); } mNext.addLeaf(leaf); } void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { static_assert(!TreeCacheT::IsConstTree, "can't add a tile to a const tree"); if (NodeType::LEVEL < level) return; if (this->isHashed(xyz)) { assert(mNode); return const_cast<NodeType*>(mNode)->addTileAndCache( level, xyz, value, state, *mParent); } mNext.addTile(level, xyz, value, state); } LeafNodeType* touchLeaf(const Coord& xyz) { static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed(xyz)) { assert(mNode); return const_cast<NodeType*>(mNode)->touchLeafAndCache(xyz, *mParent); } return mNext.touchLeaf(xyz); } LeafNodeType* probeLeaf(const Coord& xyz) { static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed(xyz)) { assert(mNode); return const_cast<NodeType*>(mNode)->probeLeafAndCache(xyz, *mParent); } return mNext.probeLeaf(xyz); } const LeafNodeType* probeConstLeaf(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return mNode->probeConstLeafAndCache(xyz, *mParent); } return mNext.probeConstLeaf(xyz); } template<typename NodeT> NodeT* probeNode(const Coord& xyz) { static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (this->isHashed(xyz)) { if ((std::is_same<NodeT, NodeType>::value)) { assert(mNode); return reinterpret_cast<NodeT*>(const_cast<NodeType*>(mNode)); } return const_cast<NodeType*>(mNode)->template probeNodeAndCache<NodeT>(xyz, *mParent); } return mNext.template probeNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (this->isHashed(xyz)) { if ((std::is_same<NodeT, NodeType>::value)) { assert(mNode); return reinterpret_cast<const NodeT*>(mNode); } return mNode->template probeConstNodeAndCache<NodeT>(xyz, *mParent); } return mNext.template probeConstNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return mNode->isValueOnAndCache(xyz, *mParent); } return mNext.isValueOn(xyz); } /// Return the active state and value of the voxel at the given coordinates. bool probeValue(const Coord& xyz, ValueType& value) { if (this->isHashed(xyz)) { assert(mNode); return mNode->probeValueAndCache(xyz, value, *mParent); } return mNext.probeValue(xyz, value); } int getValueDepth(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return static_cast<int>(TreeCacheT::RootNodeT::LEVEL) - static_cast<int>(mNode->getValueLevelAndCache(xyz, *mParent)); } else { return mNext.getValueDepth(xyz); } } bool isVoxel(const Coord& xyz) { if (this->isHashed(xyz)) { assert(mNode); return mNode->getValueLevelAndCache(xyz, *mParent)==0; } else { return mNext.isVoxel(xyz); } } /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->setValueAndCache(xyz, value, *mParent); } else { mNext.setValue(xyz, value); } } void setValueOnly(const Coord& xyz, const ValueType& value) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->setValueOnlyAndCache(xyz, value, *mParent); } else { mNext.setValueOnly(xyz, value); } } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->modifyValueAndCache(xyz, op, *mParent); } else { mNext.modifyValue(xyz, op); } } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->modifyValueAndActiveStateAndCache(xyz, op, *mParent); } else { mNext.modifyValueAndActiveState(xyz, op); } } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->setValueOffAndCache(xyz, value, *mParent); } else { mNext.setValueOff(xyz, value); } } /// Set the active state of the voxel at the given coordinates. void setActiveState(const Coord& xyz, bool on) { if (this->isHashed(xyz)) { assert(mNode); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<NodeType*>(mNode)->setActiveStateAndCache(xyz, on, *mParent); } else { mNext.setActiveState(xyz, on); } } private: CacheItem(const CacheItem&); CacheItem& operator=(const CacheItem&); bool isHashed(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeType::DIM-1)) == mHash[0] && (xyz[1] & ~Coord::ValueType(NodeType::DIM-1)) == mHash[1] && (xyz[2] & ~Coord::ValueType(NodeType::DIM-1)) == mHash[2]; } TreeCacheT* mParent; Coord mHash; const NodeType* mNode; using RestT = typename NodeVecT::PopFront; CacheItem<TreeCacheT, RestT, /*AtRoot=*/RestT::Size == 1> mNext; };// end of CacheItem /// The tail of a compile-time list of cached node pointers, ordered from LeafNode to RootNode template<typename TreeCacheT, typename NodeVecT> class CacheItem<TreeCacheT, NodeVecT, /*AtRoot=*/true> { public: using RootNodeType = typename NodeVecT::Front; using ValueType = typename RootNodeType::ValueType; using LeafNodeType = typename RootNodeType::LeafNodeType; CacheItem(TreeCacheT& parent): mParent(&parent), mRoot(nullptr) {} CacheItem(TreeCacheT& parent, const CacheItem& other): mParent(&parent), mRoot(other.mRoot) {} CacheItem& copy(TreeCacheT& parent, const CacheItem& other) { mParent = &parent; mRoot = other.mRoot; return *this; } bool isCached(const Coord& xyz) const { return this->isHashed(xyz); } void insert(const Coord&, const RootNodeType* root) { mRoot = root; } // Needed for node types that are not cached template<typename OtherNodeType> void insert(const Coord&, const OtherNodeType*) {} void erase(const RootNodeType*) { mRoot = nullptr; } void clear() { mRoot = nullptr; } void getNode(RootNodeType*& node) { static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); node = const_cast<RootNodeType*>(mRoot); } void getNode(const RootNodeType*& node) const { node = mRoot; } void addLeaf(LeafNodeType* leaf) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't add a node to a const tree"); const_cast<RootNodeType*>(mRoot)->addLeafAndCache(leaf, *mParent); } void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't add a tile to a const tree"); const_cast<RootNodeType*>(mRoot)->addTileAndCache(level, xyz, value, state, *mParent); } LeafNodeType* touchLeaf(const Coord& xyz) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); return const_cast<RootNodeType*>(mRoot)->touchLeafAndCache(xyz, *mParent); } LeafNodeType* probeLeaf(const Coord& xyz) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); return const_cast<RootNodeType*>(mRoot)->probeLeafAndCache(xyz, *mParent); } const LeafNodeType* probeConstLeaf(const Coord& xyz) { assert(mRoot); return mRoot->probeConstLeafAndCache(xyz, *mParent); } template<typename NodeType> NodeType* probeNode(const Coord& xyz) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't get a non-const node from a const tree"); return const_cast<RootNodeType*>(mRoot)-> template probeNodeAndCache<NodeType>(xyz, *mParent); } template<typename NodeType> const NodeType* probeConstNode(const Coord& xyz) { assert(mRoot); return mRoot->template probeConstNodeAndCache<NodeType>(xyz, *mParent); } int getValueDepth(const Coord& xyz) { assert(mRoot); return mRoot->getValueDepthAndCache(xyz, *mParent); } bool isValueOn(const Coord& xyz) { assert(mRoot); return mRoot->isValueOnAndCache(xyz, *mParent); } bool probeValue(const Coord& xyz, ValueType& value) { assert(mRoot); return mRoot->probeValueAndCache(xyz, value, *mParent); } bool isVoxel(const Coord& xyz) { assert(mRoot); return mRoot->getValueDepthAndCache(xyz, *mParent) == static_cast<int>(RootNodeType::LEVEL); } const ValueType& getValue(const Coord& xyz) { assert(mRoot); return mRoot->getValueAndCache(xyz, *mParent); } void setValue(const Coord& xyz, const ValueType& value) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->setValueAndCache(xyz, value, *mParent); } void setValueOnly(const Coord& xyz, const ValueType& value) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->setValueOnlyAndCache(xyz, value, *mParent); } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->modifyValueAndCache(xyz, op, *mParent); } template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->modifyValueAndActiveStateAndCache(xyz, op, *mParent); } void setValueOff(const Coord& xyz, const ValueType& value) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->setValueOffAndCache(xyz, value, *mParent); } void setActiveState(const Coord& xyz, bool on) { assert(mRoot); static_assert(!TreeCacheT::IsConstTree, "can't modify a const tree's values"); const_cast<RootNodeType*>(mRoot)->setActiveStateAndCache(xyz, on, *mParent); } private: CacheItem(const CacheItem&); CacheItem& operator=(const CacheItem&); bool isHashed(const Coord&) const { return false; } TreeCacheT* mParent; const RootNodeType* mRoot; };// end of CacheItem specialized for RootNode //////////////////////////////////////// /// @brief ValueAccessor with no mutex and no node caching. /// @details This specialization is provided mainly for benchmarking. /// Accessors with caching will almost always be faster. template<typename _TreeType, bool IsSafe> class ValueAccessor0: public ValueAccessorBase<_TreeType, IsSafe> { public: using TreeType = _TreeType; using ValueType = typename TreeType::ValueType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; ValueAccessor0(TreeType& tree): BaseT(tree) {} ValueAccessor0(const ValueAccessor0& other): BaseT(other) {} /// Return the number of cache levels employed by this accessor. static Index numCacheLevels() { return 0; } ValueAccessor0& operator=(const ValueAccessor0& other) { if (&other != this) this->BaseT::operator=(other); return *this; } ~ValueAccessor0() override = default; /// Return @c true if nodes along the path to the given voxel have been cached. bool isCached(const Coord&) const { return false; } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->getValue(xyz); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->isValueOn(xyz); } /// Return the active state and, in @a value, the value of the voxel at the given coordinates. bool probeValue(const Coord& xyz, ValueType& value) const { assert(BaseT::mTree); return BaseT::mTree->probeValue(xyz, value); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->getValueDepth(xyz); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->getValueDepth(xyz) == static_cast<int>(RootNodeT::LEVEL); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->setValue(xyz, value); } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->setValueOnly(xyz, value); } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->root().setValueOff(xyz, value); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->modifyValue(xyz, op); } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->modifyValueAndActiveState(xyz, op); } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on = true) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); BaseT::mTree->setActiveState(xyz, on); } /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeT> NodeT* getNode() { return nullptr; } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeT> void insertNode(const Coord&, NodeT&) {} /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a node to a const tree"); BaseT::mTree->root().addLeaf(leaf); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a tile to a const tree"); BaseT::mTree->root().addTile(level, xyz, value, state); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeT> void eraseNode() {} LeafNodeT* touchLeaf(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); return BaseT::mTree->touchLeaf(xyz); } template<typename NodeT> NodeT* probeNode(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); return BaseT::mTree->template probeNode<NodeT>(xyz); } template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { assert(BaseT::mTree); return BaseT::mTree->template probeConstNode<NodeT>(xyz); } LeafNodeT* probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeT>(xyz); } const LeafNodeT* probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeT>(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } /// Remove all nodes from this cache, then reinsert the root node. void clear() override {} private: // Allow trees to deregister themselves. template<typename> friend class Tree; /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { this->BaseT::release(); } }; // ValueAccessor0 /// @brief Value accessor with one level of node caching. /// @details The node cache level is specified by L0 with the default value 0 /// (defined in the forward declaration) corresponding to a LeafNode. /// /// @note This class is for experts only and should rarely be used /// directly. Instead use ValueAccessor with its default template arguments. template<typename _TreeType, bool IsSafe, Index L0> class ValueAccessor1 : public ValueAccessorBase<_TreeType, IsSafe> { public: static_assert(_TreeType::DEPTH >= 2, "cache size exceeds tree depth"); static_assert(L0 < _TreeType::RootNodeType::LEVEL, "invalid cache level"); using TreeType = _TreeType; using ValueType = typename TreeType::ValueType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; using InvTreeT = typename RootNodeT::NodeChainType; using NodeT0 = typename InvTreeT::template Get<L0>; /// Constructor from a tree ValueAccessor1(TreeType& tree) : BaseT(tree), mKey0(Coord::max()), mNode0(nullptr) { } /// Copy constructor ValueAccessor1(const ValueAccessor1& other) : BaseT(other) { this->copy(other); } /// Return the number of cache levels employed by this ValueAccessor static Index numCacheLevels() { return 1; } /// Asignment operator ValueAccessor1& operator=(const ValueAccessor1& other) { if (&other != this) { this->BaseT::operator=(other); this->copy(other); } return *this; } /// Virtual destructor ~ValueAccessor1() override = default; /// Return @c true if any of the nodes along the path to the given /// voxel have been cached. bool isCached(const Coord& xyz) const { assert(BaseT::mTree); return this->isHashed(xyz); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return mNode0->getValueAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueAndCache(xyz, this->self()); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return mNode0->isValueOnAndCache(xyz, this->self()); } return BaseT::mTree->root().isValueOnAndCache(xyz, this->self()); } /// Return the active state of the voxel as well as its value bool probeValue(const Coord& xyz, ValueType& value) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return mNode0->probeValueAndCache(xyz, value, this->self()); } return BaseT::mTree->root().probeValueAndCache(xyz, value, this->self()); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return RootNodeT::LEVEL - mNode0->getValueLevelAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed(xyz)) { assert(mNode0); return mNode0->getValueLevelAndCache(xyz, this->self()) == 0; } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()) == static_cast<int>(RootNodeT::LEVEL); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueAndCache(xyz, value, *this); } } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but preserves its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOnlyAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOnlyAndCache(xyz, value, *this); } } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOffAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOffAndCache(xyz, value, *this); } } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndCache(xyz, op, *this); } } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndActiveStateAndCache(xyz, op, *this); } } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on = true) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setActiveStateAndCache(xyz, on, *this); } else { BaseT::mTree->root().setActiveStateAndCache(xyz, on, *this); } } /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeT> NodeT* getNode() { const NodeT* node = nullptr; this->getNode(node); return const_cast<NodeT*>(node); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeT> void insertNode(const Coord& xyz, NodeT& node) { this->insert(xyz, &node); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeT> void eraseNode() { const NodeT* node = nullptr; this->eraseNode(node); } /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a node to a const tree"); BaseT::mTree->root().addLeaf(leaf); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a tile to a const tree"); BaseT::mTree->root().addTile(level, xyz, value, state); } /// @brief @return the leaf node that contains voxel (x, y, z) and /// if it doesn't exist, create it, but preserve the values and /// active states of all voxels. /// /// Use this method to preallocate a static tree topology over which to /// safely perform multithreaded processing. LeafNodeT* touchLeaf(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed(xyz)) { assert(mNode0); return const_cast<NodeT0*>(mNode0)->touchLeafAndCache(xyz, *this); } return BaseT::mTree->root().touchLeafAndCache(xyz, *this); } /// @brief @return a pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> NodeT* probeNode(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed(xyz)) { assert(mNode0); return reinterpret_cast<NodeT*>(const_cast<NodeT0*>(mNode0)); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } LeafNodeT* probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeT>(xyz); } /// @brief @return a const pointer to the nodeof the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { assert(BaseT::mTree); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed(xyz)) { assert(mNode0); return reinterpret_cast<const NodeT*>(mNode0); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } const LeafNodeT* probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeT>(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } /// Remove all the cached nodes and invalidate the corresponding hash-keys. void clear() override { mKey0 = Coord::max(); mNode0 = nullptr; } private: // Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, Index> friend class InternalNode; template<typename, Index> friend class LeafNode; // Allow trees to deregister themselves. template<typename> friend class Tree; // This private method is merely for convenience. inline ValueAccessor1& self() const { return const_cast<ValueAccessor1&>(*this); } void getNode(const NodeT0*& node) { node = mNode0; } void getNode(const RootNodeT*& node) { node = (BaseT::mTree ? &BaseT::mTree->root() : nullptr); } template<typename OtherNodeType> void getNode(const OtherNodeType*& node) { node = nullptr; } void eraseNode(const NodeT0*) { mKey0 = Coord::max(); mNode0 = nullptr; } template<typename OtherNodeType> void eraseNode(const OtherNodeType*) {} /// Private copy method inline void copy(const ValueAccessor1& other) { mKey0 = other.mKey0; mNode0 = other.mNode0; } /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { this->BaseT::release(); this->clear(); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). /// @note This operation is not mutex-protected and is intended to be called /// only by nodes and only in the context of a getValue() or setValue() call. inline void insert(const Coord& xyz, const NodeT0* node) { assert(node); mKey0 = xyz & ~(NodeT0::DIM-1); mNode0 = node; } /// No-op in case a tree traversal attemps to insert a node that /// is not cached by the ValueAccessor template<typename OtherNodeType> inline void insert(const Coord&, const OtherNodeType*) {} inline bool isHashed(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[0] && (xyz[1] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[1] && (xyz[2] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[2]; } mutable Coord mKey0; mutable const NodeT0* mNode0; }; // ValueAccessor1 /// @brief Value accessor with two levels of node caching. /// @details The node cache levels are specified by L0 and L1 /// with the default values 0 and 1 (defined in the forward declaration) /// corresponding to a LeafNode and its parent InternalNode. /// /// @note This class is for experts only and should rarely be used directly. /// Instead use ValueAccessor with its default template arguments. template<typename _TreeType, bool IsSafe, Index L0, Index L1> class ValueAccessor2 : public ValueAccessorBase<_TreeType, IsSafe> { public: static_assert(_TreeType::DEPTH >= 3, "cache size exceeds tree depth"); static_assert(L0 < L1, "invalid cache level"); static_assert(L1 < _TreeType::RootNodeType::LEVEL, "invalid cache level"); using TreeType = _TreeType; using ValueType = typename TreeType::ValueType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; using InvTreeT = typename RootNodeT::NodeChainType; using NodeT0 = typename InvTreeT::template Get<L0>; using NodeT1 = typename InvTreeT::template Get<L1>; /// Constructor from a tree ValueAccessor2(TreeType& tree) : BaseT(tree), mKey0(Coord::max()), mNode0(nullptr), mKey1(Coord::max()), mNode1(nullptr) {} /// Copy constructor ValueAccessor2(const ValueAccessor2& other) : BaseT(other) { this->copy(other); } /// Return the number of cache levels employed by this ValueAccessor static Index numCacheLevels() { return 2; } /// Asignment operator ValueAccessor2& operator=(const ValueAccessor2& other) { if (&other != this) { this->BaseT::operator=(other); this->copy(other); } return *this; } /// Virtual destructor ~ValueAccessor2() override = default; /// Return @c true if any of the nodes along the path to the given /// voxel have been cached. bool isCached(const Coord& xyz) const { assert(BaseT::mTree); return this->isHashed1(xyz) || this->isHashed0(xyz); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->getValueAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->getValueAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueAndCache(xyz, this->self()); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->isValueOnAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->isValueOnAndCache(xyz, this->self()); } return BaseT::mTree->root().isValueOnAndCache(xyz, this->self()); } /// Return the active state of the voxel as well as its value bool probeValue(const Coord& xyz, ValueType& value) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->probeValueAndCache(xyz, value, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->probeValueAndCache(xyz, value, this->self()); } return BaseT::mTree->root().probeValueAndCache(xyz, value, this->self()); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return RootNodeT::LEVEL - mNode0->getValueLevelAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return RootNodeT::LEVEL - mNode1->getValueLevelAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->getValueLevelAndCache(xyz, this->self())==0; } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->getValueLevelAndCache(xyz, this->self())==0; } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()) == static_cast<int>(RootNodeT::LEVEL); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueAndCache(xyz, value, *this); } } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but preserves its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOnlyAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueOnlyAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOnlyAndCache(xyz, value, *this); } } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOffAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueOffAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOffAndCache(xyz, value, *this); } } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndCache(xyz, op, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->modifyValueAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndCache(xyz, op, *this); } } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndActiveStateAndCache(xyz, op, *this); } } /// Set the active state of the voxel at the given coordinates without changing its value. void setActiveState(const Coord& xyz, bool on = true) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setActiveStateAndCache(xyz, on, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setActiveStateAndCache(xyz, on, *this); } else { BaseT::mTree->root().setActiveStateAndCache(xyz, on, *this); } } /// Mark the voxel at the given coordinates as active without changing its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive without changing its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeT> NodeT* getNode() { const NodeT* node = nullptr; this->getNode(node); return const_cast<NodeT*>(node); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeT> void insertNode(const Coord& xyz, NodeT& node) { this->insert(xyz, &node); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeT> void eraseNode() { const NodeT* node = nullptr; this->eraseNode(node); } /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a node to a const tree"); if (this->isHashed1(leaf->origin())) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->addLeafAndCache(leaf, *this); } BaseT::mTree->root().addLeafAndCache(leaf, *this); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a tile to a const tree"); if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->addTileAndCache(level, xyz, value, state, *this); } BaseT::mTree->root().addTileAndCache(level, xyz, value, state, *this); } /// @brief @return the leaf node that contains voxel (x, y, z) and /// if it doesn't exist, create it, but preserve the values and /// active states of all voxels. /// /// Use this method to preallocate a static tree topology over which to /// safely perform multithreaded processing. LeafNodeT* touchLeaf(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed0(xyz)) { assert(mNode0); return const_cast<NodeT0*>(mNode0)->touchLeafAndCache(xyz, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->touchLeafAndCache(xyz, *this); } return BaseT::mTree->root().touchLeafAndCache(xyz, *this); } /// @brief @return a pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> NodeT* probeNode(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<NodeT*>(const_cast<NodeT0*>(mNode0)); } else if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->template probeNodeAndCache<NodeT>(xyz, *this); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<NodeT*>(const_cast<NodeT1*>(mNode1)); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief @return a pointer to the leaf node that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. LeafNodeT* probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeT>(xyz); } /// @brief @return a const pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> const NodeT* probeConstLeaf(const Coord& xyz) const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<const NodeT*>(mNode0); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<const NodeT*>(mNode1); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief @return a const pointer to the leaf node that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. const LeafNodeT* probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeT>(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } /// @brief @return a const pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { assert(BaseT::mTree); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<const NodeT*>(mNode0); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<const NodeT*>(mNode1); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// Remove all the cached nodes and invalidate the corresponding hash-keys. void clear() override { mKey0 = Coord::max(); mNode0 = nullptr; mKey1 = Coord::max(); mNode1 = nullptr; } private: // Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, Index> friend class InternalNode; template<typename, Index> friend class LeafNode; // Allow trees to deregister themselves. template<typename> friend class Tree; // This private method is merely for convenience. inline ValueAccessor2& self() const { return const_cast<ValueAccessor2&>(*this); } void getNode(const NodeT0*& node) { node = mNode0; } void getNode(const NodeT1*& node) { node = mNode1; } void getNode(const RootNodeT*& node) { node = (BaseT::mTree ? &BaseT::mTree->root() : nullptr); } template<typename OtherNodeType> void getNode(const OtherNodeType*& node) { node = nullptr; } void eraseNode(const NodeT0*) { mKey0 = Coord::max(); mNode0 = nullptr; } void eraseNode(const NodeT1*) { mKey1 = Coord::max(); mNode1 = nullptr; } template<typename OtherNodeType> void eraseNode(const OtherNodeType*) {} /// Private copy method inline void copy(const ValueAccessor2& other) { mKey0 = other.mKey0; mNode0 = other.mNode0; mKey1 = other.mKey1; mNode1 = other.mNode1; } /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { this->BaseT::release(); this->clear(); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). /// @note This operation is not mutex-protected and is intended to be called /// only by nodes and only in the context of a getValue() or setValue() call. inline void insert(const Coord& xyz, const NodeT0* node) { assert(node); mKey0 = xyz & ~(NodeT0::DIM-1); mNode0 = node; } inline void insert(const Coord& xyz, const NodeT1* node) { assert(node); mKey1 = xyz & ~(NodeT1::DIM-1); mNode1 = node; } /// No-op in case a tree traversal attemps to insert a node that /// is not cached by the ValueAccessor template<typename NodeT> inline void insert(const Coord&, const NodeT*) {} inline bool isHashed0(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[0] && (xyz[1] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[1] && (xyz[2] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[2]; } inline bool isHashed1(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[0] && (xyz[1] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[1] && (xyz[2] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[2]; } mutable Coord mKey0; mutable const NodeT0* mNode0; mutable Coord mKey1; mutable const NodeT1* mNode1; }; // ValueAccessor2 /// @brief Value accessor with three levels of node caching. /// @details The node cache levels are specified by L0, L1, and L2 /// with the default values 0, 1 and 2 (defined in the forward declaration) /// corresponding to a LeafNode, its parent InternalNode, and its parent InternalNode. /// Since the default configuration of all typed trees and grids, e.g., /// FloatTree or FloatGrid, has a depth of four, this value accessor is the one /// used by default. /// /// @note This class is for experts only and should rarely be used /// directly. Instead use ValueAccessor with its default template arguments template<typename _TreeType, bool IsSafe, Index L0, Index L1, Index L2> class ValueAccessor3 : public ValueAccessorBase<_TreeType, IsSafe> { public: static_assert(_TreeType::DEPTH >= 4, "cache size exceeds tree depth"); static_assert(L0 < L1, "invalid cache level"); static_assert(L1 < L2, "invalid cache level"); static_assert(L2 < _TreeType::RootNodeType::LEVEL, "invalid cache level"); using TreeType = _TreeType; using ValueType = typename TreeType::ValueType; using RootNodeT = typename TreeType::RootNodeType; using LeafNodeT = typename TreeType::LeafNodeType; using BaseT = ValueAccessorBase<TreeType, IsSafe>; using InvTreeT = typename RootNodeT::NodeChainType; using NodeT0 = typename InvTreeT::template Get<L0>; using NodeT1 = typename InvTreeT::template Get<L1>; using NodeT2 = typename InvTreeT::template Get<L2>; /// Constructor from a tree ValueAccessor3(TreeType& tree) : BaseT(tree), mKey0(Coord::max()), mNode0(nullptr), mKey1(Coord::max()), mNode1(nullptr), mKey2(Coord::max()), mNode2(nullptr) {} /// Copy constructor ValueAccessor3(const ValueAccessor3& other) : BaseT(other) { this->copy(other); } /// Asignment operator ValueAccessor3& operator=(const ValueAccessor3& other) { if (&other != this) { this->BaseT::operator=(other); this->copy(other); } return *this; } /// Return the number of cache levels employed by this ValueAccessor static Index numCacheLevels() { return 3; } /// Virtual destructor ~ValueAccessor3() override = default; /// Return @c true if any of the nodes along the path to the given /// voxel have been cached. bool isCached(const Coord& xyz) const { assert(BaseT::mTree); return this->isHashed2(xyz) || this->isHashed1(xyz) || this->isHashed0(xyz); } /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->getValueAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->getValueAndCache(xyz, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->getValueAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueAndCache(xyz, this->self()); } /// Return the active state of the voxel at the given coordinates. bool isValueOn(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->isValueOnAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->isValueOnAndCache(xyz, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->isValueOnAndCache(xyz, this->self()); } return BaseT::mTree->root().isValueOnAndCache(xyz, this->self()); } /// Return the active state of the voxel as well as its value bool probeValue(const Coord& xyz, ValueType& value) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->probeValueAndCache(xyz, value, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->probeValueAndCache(xyz, value, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->probeValueAndCache(xyz, value, this->self()); } return BaseT::mTree->root().probeValueAndCache(xyz, value, this->self()); } /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides, /// or -1 if (x, y, z) isn't explicitly represented in the tree (i.e., if it is /// implicitly a background voxel). int getValueDepth(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return RootNodeT::LEVEL - mNode0->getValueLevelAndCache(xyz, this->self()); } else if (this->isHashed1(xyz)) { assert(mNode1); return RootNodeT::LEVEL - mNode1->getValueLevelAndCache(xyz, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return RootNodeT::LEVEL - mNode2->getValueLevelAndCache(xyz, this->self()); } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()); } /// Return @c true if the value of voxel (x, y, z) resides at the leaf level /// of the tree, i.e., if it is not a tile value. bool isVoxel(const Coord& xyz) const { assert(BaseT::mTree); if (this->isHashed0(xyz)) { assert(mNode0); return mNode0->getValueLevelAndCache(xyz, this->self())==0; } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->getValueLevelAndCache(xyz, this->self())==0; } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->getValueLevelAndCache(xyz, this->self())==0; } return BaseT::mTree->root().getValueDepthAndCache(xyz, this->self()) == static_cast<int>(RootNodeT::LEVEL); } //@{ /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueAndCache(xyz, value, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->setValueAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueAndCache(xyz, value, *this); } } void setValueOn(const Coord& xyz, const ValueType& value) { this->setValue(xyz, value); } //@} /// Set the value of the voxel at the given coordinate but preserves its active state. void setValueOnly(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOnlyAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueOnlyAndCache(xyz, value, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->setValueOnlyAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOnlyAndCache(xyz, value, *this); } } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setValueOffAndCache(xyz, value, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setValueOffAndCache(xyz, value, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->setValueOffAndCache(xyz, value, *this); } else { BaseT::mTree->root().setValueOffAndCache(xyz, value, *this); } } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @details See Tree::modifyValue() for details. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndCache(xyz, op, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->modifyValueAndCache(xyz, op, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->modifyValueAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndCache(xyz, op, *this); } } /// @brief Apply a functor to the voxel at the given coordinates. /// @details See Tree::modifyValueAndActiveState() for details. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->modifyValueAndActiveStateAndCache(xyz, op, *this); } else { BaseT::mTree->root().modifyValueAndActiveStateAndCache(xyz, op, *this); } } /// Set the active state of the voxel at the given coordinates without changing its value. void setActiveState(const Coord& xyz, bool on = true) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't modify a const tree's values"); if (this->isHashed0(xyz)) { assert(mNode0); const_cast<NodeT0*>(mNode0)->setActiveStateAndCache(xyz, on, *this); } else if (this->isHashed1(xyz)) { assert(mNode1); const_cast<NodeT1*>(mNode1)->setActiveStateAndCache(xyz, on, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); const_cast<NodeT2*>(mNode2)->setActiveStateAndCache(xyz, on, *this); } else { BaseT::mTree->root().setActiveStateAndCache(xyz, on, *this); } } /// Mark the voxel at the given coordinates as active without changing its value. void setValueOn(const Coord& xyz) { this->setActiveState(xyz, true); } /// Mark the voxel at the given coordinates as inactive without changing its value. void setValueOff(const Coord& xyz) { this->setActiveState(xyz, false); } /// Return the cached node of type @a NodeType. [Mainly for internal use] template<typename NodeT> NodeT* getNode() { const NodeT* node = nullptr; this->getNode(node); return const_cast<NodeT*>(node); } /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). [Mainly for internal use] template<typename NodeT> void insertNode(const Coord& xyz, NodeT& node) { this->insert(xyz, &node); } /// If a node of the given type exists in the cache, remove it, so that /// isCached(xyz) returns @c false for any voxel (x, y, z) contained in /// that node. [Mainly for internal use] template<typename NodeT> void eraseNode() { const NodeT* node = nullptr; this->eraseNode(node); } /// @brief Add the specified leaf to this tree, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeT* leaf) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a node to a const tree"); if (this->isHashed1(leaf->origin())) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->addLeafAndCache(leaf, *this); } else if (this->isHashed2(leaf->origin())) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->addLeafAndCache(leaf, *this); } BaseT::mTree->root().addLeafAndCache(leaf, *this); } /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly deleting existing nodes or creating new nodes in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't add a tile to a const tree"); if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->addTileAndCache(level, xyz, value, state, *this); } if (this->isHashed2(xyz)) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->addTileAndCache(level, xyz, value, state, *this); } BaseT::mTree->root().addTileAndCache(level, xyz, value, state, *this); } /// @brief @return the leaf node that contains voxel (x, y, z) and /// if it doesn't exist, create it, but preserve the values and /// active states of all voxels. /// /// Use this method to preallocate a static tree topology over which to /// safely perform multithreaded processing. LeafNodeT* touchLeaf(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); if (this->isHashed0(xyz)) { assert(mNode0); return const_cast<NodeT0*>(mNode0); } else if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->touchLeafAndCache(xyz, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->touchLeafAndCache(xyz, *this); } return BaseT::mTree->root().touchLeafAndCache(xyz, *this); } /// @brief @return a pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> NodeT* probeNode(const Coord& xyz) { assert(BaseT::mTree); static_assert(!BaseT::IsConstTree, "can't get a non-const node from a const tree"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<NodeT*>(const_cast<NodeT0*>(mNode0)); } else if (this->isHashed1(xyz)) { assert(mNode1); return const_cast<NodeT1*>(mNode1)->template probeNodeAndCache<NodeT>(xyz, *this); } else if (this->isHashed2(xyz)) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->template probeNodeAndCache<NodeT>(xyz, *this); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<NodeT*>(const_cast<NodeT1*>(mNode1)); } else if (this->isHashed2(xyz)) { assert(mNode2); return const_cast<NodeT2*>(mNode2)->template probeNodeAndCache<NodeT>(xyz, *this); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } else if ((std::is_same<NodeT, NodeT2>::value)) { if (this->isHashed2(xyz)) { assert(mNode2); return reinterpret_cast<NodeT*>(const_cast<NodeT2*>(mNode2)); } return BaseT::mTree->root().template probeNodeAndCache<NodeT>(xyz, *this); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief @return a pointer to the leaf node that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. LeafNodeT* probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeT>(xyz); } /// @brief @return a const pointer to the node of the specified type that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. template<typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const { assert(BaseT::mTree); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if ((std::is_same<NodeT, NodeT0>::value)) { if (this->isHashed0(xyz)) { assert(mNode0); return reinterpret_cast<const NodeT*>(mNode0); } else if (this->isHashed1(xyz)) { assert(mNode1); return mNode1->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if ((std::is_same<NodeT, NodeT1>::value)) { if (this->isHashed1(xyz)) { assert(mNode1); return reinterpret_cast<const NodeT*>(mNode1); } else if (this->isHashed2(xyz)) { assert(mNode2); return mNode2->template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } else if ((std::is_same<NodeT, NodeT2>::value)) { if (this->isHashed2(xyz)) { assert(mNode2); return reinterpret_cast<const NodeT*>(mNode2); } return BaseT::mTree->root().template probeConstNodeAndCache<NodeT>(xyz, this->self()); } return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief @return a const pointer to the leaf node that contains /// voxel (x, y, z) and if it doesn't exist, return @c nullptr. const LeafNodeT* probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeT>(xyz); } const LeafNodeT* probeLeaf(const Coord& xyz) const { return this->probeConstLeaf(xyz); } /// Remove all the cached nodes and invalidate the corresponding hash-keys. void clear() override { mKey0 = Coord::max(); mNode0 = nullptr; mKey1 = Coord::max(); mNode1 = nullptr; mKey2 = Coord::max(); mNode2 = nullptr; } private: // Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, Index> friend class InternalNode; template<typename, Index> friend class LeafNode; // Allow trees to deregister themselves. template<typename> friend class Tree; // This private method is merely for convenience. inline ValueAccessor3& self() const { return const_cast<ValueAccessor3&>(*this); } /// Private copy method inline void copy(const ValueAccessor3& other) { mKey0 = other.mKey0; mNode0 = other.mNode0; mKey1 = other.mKey1; mNode1 = other.mNode1; mKey2 = other.mKey2; mNode2 = other.mNode2; } /// Prevent this accessor from calling Tree::releaseCache() on a tree that /// no longer exists. (Called by mTree when it is destroyed.) void release() override { this->BaseT::release(); this->clear(); } void getNode(const NodeT0*& node) { node = mNode0; } void getNode(const NodeT1*& node) { node = mNode1; } void getNode(const NodeT2*& node) { node = mNode2; } void getNode(const RootNodeT*& node) { node = (BaseT::mTree ? &BaseT::mTree->root() : nullptr); } template<typename OtherNodeType> void getNode(const OtherNodeType*& node) { node = nullptr; } void eraseNode(const NodeT0*) { mKey0 = Coord::max(); mNode0 = nullptr; } void eraseNode(const NodeT1*) { mKey1 = Coord::max(); mNode1 = nullptr; } void eraseNode(const NodeT2*) { mKey2 = Coord::max(); mNode2 = nullptr; } template<typename OtherNodeType> void eraseNode(const OtherNodeType*) {} /// Cache the given node, which should lie along the path from the root node to /// the node containing voxel (x, y, z). /// @note This operation is not mutex-protected and is intended to be called /// only by nodes and only in the context of a getValue() or setValue() call. inline void insert(const Coord& xyz, const NodeT0* node) { assert(node); mKey0 = xyz & ~(NodeT0::DIM-1); mNode0 = node; } inline void insert(const Coord& xyz, const NodeT1* node) { assert(node); mKey1 = xyz & ~(NodeT1::DIM-1); mNode1 = node; } inline void insert(const Coord& xyz, const NodeT2* node) { assert(node); mKey2 = xyz & ~(NodeT2::DIM-1); mNode2 = node; } /// No-op in case a tree traversal attemps to insert a node that /// is not cached by the ValueAccessor template<typename OtherNodeType> inline void insert(const Coord&, const OtherNodeType*) { } inline bool isHashed0(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[0] && (xyz[1] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[1] && (xyz[2] & ~Coord::ValueType(NodeT0::DIM-1)) == mKey0[2]; } inline bool isHashed1(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[0] && (xyz[1] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[1] && (xyz[2] & ~Coord::ValueType(NodeT1::DIM-1)) == mKey1[2]; } inline bool isHashed2(const Coord& xyz) const { return (xyz[0] & ~Coord::ValueType(NodeT2::DIM-1)) == mKey2[0] && (xyz[1] & ~Coord::ValueType(NodeT2::DIM-1)) == mKey2[1] && (xyz[2] & ~Coord::ValueType(NodeT2::DIM-1)) == mKey2[2]; } mutable Coord mKey0; mutable const NodeT0* mNode0; mutable Coord mKey1; mutable const NodeT1* mNode1; mutable Coord mKey2; mutable const NodeT2* mNode2; }; // ValueAccessor3 } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_VALUEACCESSOR_HAS_BEEN_INCLUDED
103,262
C
38.368281
98
0.62997
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/LeafManager.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file LeafManager.h /// /// @brief A LeafManager manages a linear array of pointers to a given tree's /// leaf nodes, as well as optional auxiliary buffers (one or more per leaf) /// that can be swapped with the leaf nodes' voxel data buffers. /// @details The leaf array is useful for multithreaded computations over /// leaf voxels in a tree with static topology but varying voxel values. /// The auxiliary buffers are convenient for temporal integration. /// Efficient methods are provided for multithreaded swapping and synching /// (i.e., copying the contents) of these buffers. #ifndef OPENVDB_TREE_LEAFMANAGER_HAS_BEEN_INCLUDED #define OPENVDB_TREE_LEAFMANAGER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/tree/RootNode.h> // for NodeChain #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <deque> #include <functional> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { namespace leafmgr { //@{ /// Useful traits for Tree types template<typename TreeT> struct TreeTraits { static const bool IsConstTree = false; using LeafIterType = typename TreeT::LeafIter; }; template<typename TreeT> struct TreeTraits<const TreeT> { static const bool IsConstTree = true; using LeafIterType = typename TreeT::LeafCIter; }; //@} } // namespace leafmgr /// This helper class implements LeafManager methods that need to be /// specialized for const vs. non-const trees. template<typename ManagerT> struct LeafManagerImpl { using RangeT = typename ManagerT::RangeType; using LeafT = typename ManagerT::LeafType; using BufT = typename ManagerT::BufferType; static inline void doSwapLeafBuffer(const RangeT& r, size_t auxBufferIdx, LeafT** leafs, BufT* bufs, size_t bufsPerLeaf) { for (size_t n = r.begin(), m = r.end(), N = bufsPerLeaf; n != m; ++n) { leafs[n]->swap(bufs[n * N + auxBufferIdx]); } } }; //////////////////////////////////////// /// @brief This class manages a linear array of pointers to a given tree's /// leaf nodes, as well as optional auxiliary buffers (one or more per leaf) /// that can be swapped with the leaf nodes' voxel data buffers. /// @details The leaf array is useful for multithreaded computations over /// leaf voxels in a tree with static topology but varying voxel values. /// The auxiliary buffers are convenient for temporal integration. /// Efficient methods are provided for multithreaded swapping and sync'ing /// (i.e., copying the contents) of these buffers. /// /// @note Buffer index 0 denotes a leaf node's internal voxel data buffer. /// Any auxiliary buffers are indexed starting from one. template<typename TreeT> class LeafManager { public: using TreeType = TreeT; using ValueType = typename TreeT::ValueType; using RootNodeType = typename TreeT::RootNodeType; using NonConstLeafType = typename TreeType::LeafNodeType; using LeafType = typename CopyConstness<TreeType, NonConstLeafType>::Type; using LeafNodeType = LeafType; using LeafIterType = typename leafmgr::TreeTraits<TreeT>::LeafIterType; using NonConstBufferType = typename LeafType::Buffer; using BufferType = typename CopyConstness<TreeType, NonConstBufferType>::Type; using RangeType = tbb::blocked_range<size_t>; // leaf index range static const Index DEPTH = 2; // root + leaf nodes static const bool IsConstTree = leafmgr::TreeTraits<TreeT>::IsConstTree; class LeafRange { public: class Iterator { public: Iterator(const LeafRange& range, size_t pos): mRange(range), mPos(pos) { assert(this->isValid()); } Iterator(const Iterator&) = default; Iterator& operator=(const Iterator&) = default; /// Advance to the next leaf node. Iterator& operator++() { ++mPos; return *this; } /// Return a reference to the leaf node to which this iterator is pointing. LeafType& operator*() const { return mRange.mLeafManager.leaf(mPos); } /// Return a pointer to the leaf node to which this iterator is pointing. LeafType* operator->() const { return &(this->operator*()); } /// @brief Return the nth buffer for the leaf node to which this iterator is pointing, /// where n = @a bufferIdx and n = 0 corresponds to the leaf node's own buffer. BufferType& buffer(size_t bufferIdx) { return mRange.mLeafManager.getBuffer(mPos, bufferIdx); } /// Return the index into the leaf array of the current leaf node. size_t pos() const { return mPos; } /// Return @c true if the position of this iterator is in a valid range. bool isValid() const { return mPos>=mRange.mBegin && mPos<=mRange.mEnd; } /// Return @c true if this iterator is not yet exhausted. bool test() const { return mPos < mRange.mEnd; } /// Return @c true if this iterator is not yet exhausted. operator bool() const { return this->test(); } /// Return @c true if this iterator is exhausted. bool empty() const { return !this->test(); } bool operator!=(const Iterator& other) const { return (mPos != other.mPos) || (&mRange != &other.mRange); } bool operator==(const Iterator& other) const { return !(*this != other); } const LeafRange& leafRange() const { return mRange; } private: const LeafRange& mRange; size_t mPos; };// end Iterator LeafRange(size_t begin, size_t end, const LeafManager& leafManager, size_t grainSize=1) : mEnd(end) , mBegin(begin) , mGrainSize(grainSize) , mLeafManager(leafManager) { } Iterator begin() const {return Iterator(*this, mBegin);} Iterator end() const {return Iterator(*this, mEnd);} size_t size() const { return mEnd - mBegin; } size_t grainsize() const { return mGrainSize; } const LeafManager& leafManager() const { return mLeafManager; } bool empty() const {return !(mBegin < mEnd);} bool is_divisible() const {return mGrainSize < this->size();} LeafRange(LeafRange& r, tbb::split) : mEnd(r.mEnd) , mBegin(doSplit(r)) , mGrainSize(r.mGrainSize) , mLeafManager(r.mLeafManager) { } private: size_t mEnd, mBegin, mGrainSize; const LeafManager& mLeafManager; static size_t doSplit(LeafRange& r) { assert(r.is_divisible()); size_t middle = r.mBegin + (r.mEnd - r.mBegin) / 2u; r.mEnd = middle; return middle; } };// end of LeafRange /// @brief Constructor from a tree reference and an auxiliary buffer count /// @note The default is no auxiliary buffers LeafManager(TreeType& tree, size_t auxBuffersPerLeaf=0, bool serial=false) : mTree(&tree) , mLeafCount(0) , mAuxBufferCount(0) , mAuxBuffersPerLeaf(auxBuffersPerLeaf) { this->rebuild(serial); } /// @brief Construct directly from an existing array of leafnodes. /// @warning The leafnodes are implicitly assumed to exist in the /// input @a tree. LeafManager(TreeType& tree, LeafType** begin, LeafType** end, size_t auxBuffersPerLeaf=0, bool serial=false) : mTree(&tree) , mLeafCount(end-begin) , mAuxBufferCount(0) , mAuxBuffersPerLeaf(auxBuffersPerLeaf) , mLeafPtrs(new LeafType*[mLeafCount]) , mLeafs(mLeafPtrs.get()) { size_t n = mLeafCount; LeafType **target = mLeafs, **source = begin; while (n--) *target++ = *source++; if (auxBuffersPerLeaf) this->initAuxBuffers(serial); } /// Shallow copy constructor called by tbb::parallel_for() threads /// /// @note This should never get called directly LeafManager(const LeafManager& other) : mTree(other.mTree) , mLeafCount(other.mLeafCount) , mAuxBufferCount(other.mAuxBufferCount) , mAuxBuffersPerLeaf(other.mAuxBuffersPerLeaf) , mLeafs(other.mLeafs) , mAuxBuffers(other.mAuxBuffers) , mTask(other.mTask) { } /// @brief (Re)initialize by resizing (if necessary) and repopulating the leaf array /// and by deleting existing auxiliary buffers and allocating new ones. /// @details Call this method if the tree's topology, and therefore the number /// of leaf nodes, changes. New auxiliary buffers are initialized with copies /// of corresponding leaf node buffers. void rebuild(bool serial=false) { this->initLeafArray(serial); this->initAuxBuffers(serial); } //@{ /// Repopulate the leaf array and delete and reallocate auxiliary buffers. void rebuild(size_t auxBuffersPerLeaf, bool serial=false) { mAuxBuffersPerLeaf = auxBuffersPerLeaf; this->rebuild(serial); } void rebuild(TreeType& tree, bool serial=false) { mTree = &tree; this->rebuild(serial); } void rebuild(TreeType& tree, size_t auxBuffersPerLeaf, bool serial=false) { mTree = &tree; mAuxBuffersPerLeaf = auxBuffersPerLeaf; this->rebuild(serial); } //@} /// @brief Change the number of auxiliary buffers. /// @details If auxBuffersPerLeaf is 0, all existing auxiliary buffers are deleted. /// New auxiliary buffers are initialized with copies of corresponding leaf node buffers. /// This method does not rebuild the leaf array. void rebuildAuxBuffers(size_t auxBuffersPerLeaf, bool serial=false) { mAuxBuffersPerLeaf = auxBuffersPerLeaf; this->initAuxBuffers(serial); } /// @brief Remove the auxiliary buffers, but don't rebuild the leaf array. void removeAuxBuffers() { this->rebuildAuxBuffers(0); } /// @brief Remove the auxiliary buffers and rebuild the leaf array. void rebuildLeafArray(bool serial = false) { this->removeAuxBuffers(); this->initLeafArray(serial); } /// @brief Return the total number of allocated auxiliary buffers. size_t auxBufferCount() const { return mAuxBufferCount; } /// @brief Return the number of auxiliary buffers per leaf node. size_t auxBuffersPerLeaf() const { return mAuxBuffersPerLeaf; } /// @brief Return the number of leaf nodes. size_t leafCount() const { return mLeafCount; } /// @brief Return the number of active voxels in the leaf nodes. /// @note Multi-threaded for better performance than Tree::activeLeafVoxelCount Index64 activeLeafVoxelCount() const { return tbb::parallel_reduce(this->leafRange(), Index64(0), [] (const LeafRange& range, Index64 sum) -> Index64 { for (const auto& leaf: range) { sum += leaf.onVoxelCount(); } return sum; }, [] (Index64 n, Index64 m) -> Index64 { return n + m; }); } /// Return a const reference to tree associated with this manager. const TreeType& tree() const { return *mTree; } /// Return a reference to the tree associated with this manager. TreeType& tree() { return *mTree; } /// Return a const reference to root node associated with this manager. const RootNodeType& root() const { return mTree->root(); } /// Return a reference to the root node associated with this manager. RootNodeType& root() { return mTree->root(); } /// Return @c true if the tree associated with this manager is immutable. bool isConstTree() const { return this->IsConstTree; } /// @brief Return a pointer to the leaf node at index @a leafIdx in the array. /// @note For performance reasons no range check is performed (other than an assertion)! LeafType& leaf(size_t leafIdx) const { assert(leafIdx<mLeafCount); return *mLeafs[leafIdx]; } /// @brief Return the leaf or auxiliary buffer for the leaf node at index @a leafIdx. /// If @a bufferIdx is zero, return the leaf buffer, otherwise return the nth /// auxiliary buffer, where n = @a bufferIdx - 1. /// /// @note For performance reasons no range checks are performed on the inputs /// (other than assertions)! Since auxiliary buffers, unlike leaf buffers, /// might not exist, be especially careful when specifying the @a bufferIdx. /// @note For const trees, this method always returns a reference to a const buffer. /// It is safe to @c const_cast and modify any auxiliary buffer (@a bufferIdx > 0), /// but it is not safe to modify the leaf buffer (@a bufferIdx = 0). BufferType& getBuffer(size_t leafIdx, size_t bufferIdx) const { assert(leafIdx < mLeafCount); assert(bufferIdx == 0 || bufferIdx - 1 < mAuxBuffersPerLeaf); return bufferIdx == 0 ? mLeafs[leafIdx]->buffer() : mAuxBuffers[leafIdx * mAuxBuffersPerLeaf + bufferIdx - 1]; } /// @brief Return a @c tbb::blocked_range of leaf array indices. /// /// @note Consider using leafRange() instead, which provides access methods /// to leaf nodes and buffers. RangeType getRange(size_t grainsize = 1) const { return RangeType(0, mLeafCount, grainsize); } /// Return a TBB-compatible LeafRange. LeafRange leafRange(size_t grainsize = 1) const { return LeafRange(0, mLeafCount, *this, grainsize); } /// @brief Swap each leaf node's buffer with the nth corresponding auxiliary buffer, /// where n = @a bufferIdx. /// @return @c true if the swap was successful /// @param bufferIdx index of the buffer that will be swapped with /// the corresponding leaf node buffer /// @param serial if false, swap buffers in parallel using multiple threads. /// @note Recall that the indexing of auxiliary buffers is 1-based, since /// buffer index 0 denotes the leaf node buffer. So buffer index 1 denotes /// the first auxiliary buffer. bool swapLeafBuffer(size_t bufferIdx, bool serial = false) { namespace ph = std::placeholders; if (bufferIdx == 0 || bufferIdx > mAuxBuffersPerLeaf || this->isConstTree()) return false; mTask = std::bind(&LeafManager::doSwapLeafBuffer, ph::_1, ph::_2, bufferIdx - 1); this->cook(serial ? 0 : 512); return true;//success } /// @brief Swap any two buffers for each leaf node. /// @note Recall that the indexing of auxiliary buffers is 1-based, since /// buffer index 0 denotes the leaf node buffer. So buffer index 1 denotes /// the first auxiliary buffer. bool swapBuffer(size_t bufferIdx1, size_t bufferIdx2, bool serial = false) { namespace ph = std::placeholders; const size_t b1 = std::min(bufferIdx1, bufferIdx2); const size_t b2 = std::max(bufferIdx1, bufferIdx2); if (b1 == b2 || b2 > mAuxBuffersPerLeaf) return false; if (b1 == 0) { if (this->isConstTree()) return false; mTask = std::bind(&LeafManager::doSwapLeafBuffer, ph::_1, ph::_2, b2-1); } else { mTask = std::bind(&LeafManager::doSwapAuxBuffer, ph::_1, ph::_2, b1-1, b2-1); } this->cook(serial ? 0 : 512); return true;//success } /// @brief Sync up the specified auxiliary buffer with the corresponding leaf node buffer. /// @return @c true if the sync was successful /// @param bufferIdx index of the buffer that will contain a /// copy of the corresponding leaf node buffer /// @param serial if false, sync buffers in parallel using multiple threads. /// @note Recall that the indexing of auxiliary buffers is 1-based, since /// buffer index 0 denotes the leaf node buffer. So buffer index 1 denotes /// the first auxiliary buffer. bool syncAuxBuffer(size_t bufferIdx, bool serial = false) { namespace ph = std::placeholders; if (bufferIdx == 0 || bufferIdx > mAuxBuffersPerLeaf) return false; mTask = std::bind(&LeafManager::doSyncAuxBuffer, ph::_1, ph::_2, bufferIdx - 1); this->cook(serial ? 0 : 64); return true;//success } /// @brief Sync up all auxiliary buffers with their corresponding leaf node buffers. /// @return true if the sync was successful /// @param serial if false, sync buffers in parallel using multiple threads. bool syncAllBuffers(bool serial = false) { namespace ph = std::placeholders; switch (mAuxBuffersPerLeaf) { case 0: return false;//nothing to do case 1: mTask = std::bind(&LeafManager::doSyncAllBuffers1, ph::_1, ph::_2); break; case 2: mTask = std::bind(&LeafManager::doSyncAllBuffers2, ph::_1, ph::_2); break; default: mTask = std::bind(&LeafManager::doSyncAllBuffersN, ph::_1, ph::_2); break; } this->cook(serial ? 0 : 64); return true;//success } /// @brief Threaded method that applies a user-supplied functor /// to each leaf node in the LeafManager. /// /// @details The user-supplied functor needs to define the methods /// required for tbb::parallel_for. /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @warning The functor object is deep-copied to create TBB tasks. /// This allows the function to use non-thread-safe members /// like a ValueAccessor. /// /// @par Example: /// @code /// // Functor to offset a tree's voxel values with values from another tree. /// template<typename TreeType> /// struct OffsetOp /// { /// using Accessor = tree::ValueAccessor<const TreeType>; /// /// OffsetOp(const TreeType& tree): mRhsTreeAcc(tree) {} /// /// template <typename LeafNodeType> /// void operator()(LeafNodeType &lhsLeaf, size_t) const /// { /// const LeafNodeType *rhsLeaf = mRhsTreeAcc.probeConstLeaf(lhsLeaf.origin()); /// if (rhsLeaf) { /// typename LeafNodeType::ValueOnIter iter = lhsLeaf.beginValueOn(); /// for (; iter; ++iter) { /// iter.setValue(iter.getValue() + rhsLeaf->getValue(iter.pos())); /// } /// } /// } /// Accessor mRhsTreeAcc; /// }; /// /// // usage: /// tree::LeafManager<FloatTree> leafNodes(lhsTree); /// leafNodes.foreach(OffsetOp<FloatTree>(rhsTree)); /// /// // A functor that performs a min operation between different auxiliary buffers. /// template<typename LeafManagerType> /// struct MinOp /// { /// using BufferType = typename LeafManagerType::BufferType; /// /// MinOp(LeafManagerType& leafNodes): mLeafs(leafNodes) {} /// /// template <typename LeafNodeType> /// void operator()(LeafNodeType &leaf, size_t leafIndex) const /// { /// // get the first buffer /// BufferType& buffer = mLeafs.getBuffer(leafIndex, 1); /// /// // min ... /// } /// LeafManagerType& mLeafs; /// }; /// @endcode template<typename LeafOp> void foreach(const LeafOp& op, bool threaded = true, size_t grainSize=1) { LeafTransformer<LeafOp> transform(op); transform.run(this->leafRange(grainSize), threaded); } /// @brief Threaded method that applies a user-supplied functor /// to each leaf node in the LeafManager. Unlike foreach /// (defined above) this method performs a reduction on /// all the leaf nodes. /// /// @details The user-supplied functor needs to define the methods /// required for tbb::parallel_reduce. /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @warning The functor object is deep-copied to create TBB tasks. /// This allows the function to use non-thread-safe members /// like a ValueAccessor. /// /// @par Example: /// @code /// // Functor to count the number of negative (active) leaf values /// struct CountOp /// { /// CountOp() : mCounter(0) {} /// CountOp(const CountOp &other) : mCounter(other.mCounter) {} /// CountOp(const CountOp &other, tbb::split) : mCounter(0) {} /// template <typename LeafNodeType> /// void operator()(LeafNodeType &leaf, size_t) /// { /// typename LeafNodeType::ValueOnIter iter = leaf.beginValueOn(); /// for (; iter; ++iter) if (*iter < 0.0f) ++mCounter; /// } /// void join(const CountOp &other) {mCounter += other.mCounter;} /// size_t mCounter; /// }; /// /// // usage: /// tree::LeafManager<FloatTree> leafNodes(tree); /// MinValueOp min; /// leafNodes.reduce(min); /// std::cerr << "Number of negative active voxels = " << min.mCounter << std::endl; /// /// @endcode template<typename LeafOp> void reduce(LeafOp& op, bool threaded = true, size_t grainSize=1) { LeafReducer<LeafOp> transform(op); transform.run(this->leafRange(grainSize), threaded); } template<typename ArrayT> [[deprecated("Use Tree::getNodes()")]] void getNodes(ArrayT& array) { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to getNodes() must be a pointer array"); using LeafT = typename std::conditional<std::is_const< typename std::remove_pointer<T>::type>::value, const LeafType, LeafType>::type; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<T, LeafT*>::value) { array.resize(mLeafCount); for (size_t i=0; i<mLeafCount; ++i) array[i] = reinterpret_cast<T>(mLeafs[i]); } else { mTree->getNodes(array); } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ArrayT> [[deprecated("Use Tree::getNodes()")]] void getNodes(ArrayT& array) const { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to getNodes() must be a pointer array"); static_assert(std::is_const<typename std::remove_pointer<T>::type>::value, "argument to getNodes() must be an array of const node pointers"); OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<T, const LeafType*>::value) { array.resize(mLeafCount); for (size_t i=0; i<mLeafCount; ++i) array[i] = reinterpret_cast<T>(mLeafs[i]); } else { mTree->getNodes(array); } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } /// @brief Generate a linear array of prefix sums of offsets into the /// active voxels in the leafs. So @a offsets[n]+m is the offset to the /// mth active voxel in the nth leaf node (useful for /// user-managed value buffers, e.g. in tools/LevelSetAdvect.h). /// @return The total number of active values in the leaf nodes /// @param offsets array of prefix sums of offsets to active voxels /// @param size on input, the size of @a offsets; on output, its new size /// @param grainSize optional grain size for threading /// @details If @a offsets is @c nullptr or @a size is smaller than the /// total number of active voxels (the return value) then @a offsets /// is reallocated and @a size equals the total number of active voxels. size_t getPrefixSum(size_t*& offsets, size_t& size, size_t grainSize=1) const { if (offsets == nullptr || size < mLeafCount) { delete [] offsets; offsets = new size_t[mLeafCount]; size = mLeafCount; } size_t prefix = 0; if ( grainSize > 0 ) { PrefixSum tmp(this->leafRange( grainSize ), offsets, prefix); } else {// serial for (size_t i=0; i<mLeafCount; ++i) { offsets[i] = prefix; prefix += mLeafs[i]->onVoxelCount(); } } return prefix; } //////////////////////////////////////////////////////////////////////////////////// // All methods below are for internal use only and should never be called directly /// Used internally by tbb::parallel_for() - never call it directly! void operator()(const RangeType& r) const { if (mTask) mTask(const_cast<LeafManager*>(this), r); else OPENVDB_THROW(ValueError, "task is undefined"); } private: void initLeafArray(bool serial = false) { // Build an array of all nodes that have leaf nodes as their immediate children using NodeChainT = typename NodeChain<RootNodeType, RootNodeType::LEVEL>::Type; using NonConstLeafParentT = typename NodeChainT::template Get</*Level=*/1>; using LeafParentT = typename CopyConstness<TreeType, NonConstLeafParentT>::Type; std::deque<LeafParentT*> leafParents; mTree->getNodes(leafParents); // Compute the leaf counts for each node std::vector<Index32> leafCounts; if (serial) { leafCounts.reserve(leafParents.size()); for (LeafParentT* leafParent : leafParents) { leafCounts.push_back(leafParent->childCount()); } } else { leafCounts.resize(leafParents.size()); tbb::parallel_for( // with typical node sizes and SSE enabled, there are only a handful // of instructions executed per-operation with a default grainsize // of 1, so increase to 64 to reduce parallel scheduling overhead tbb::blocked_range<size_t>(0, leafParents.size(), /*grainsize=*/64), [&](tbb::blocked_range<size_t>& range) { for (size_t i = range.begin(); i < range.end(); i++) { leafCounts[i] = leafParents[i]->childCount(); } } ); } // Turn leaf counts into a cumulative histogram and obtain total leaf count for (size_t i = 1; i < leafCounts.size(); i++) { leafCounts[i] += leafCounts[i-1]; } const size_t leafCount = leafCounts.empty() ? 0 : leafCounts.back(); // Allocate (or deallocate) the leaf pointer array if (leafCount != mLeafCount) { if (leafCount > 0) { mLeafPtrs.reset(new LeafType*[leafCount]); mLeafs = mLeafPtrs.get(); } else { mLeafPtrs.reset(); mLeafs = nullptr; } mLeafCount = leafCount; } if (mLeafCount == 0) return; // Populate the leaf node pointers if (serial) { LeafType** leafPtr = mLeafs; for (LeafParentT* leafParent : leafParents) { for (auto iter = leafParent->beginChildOn(); iter; ++iter) { *leafPtr++ = &iter.getValue(); } } } else { tbb::parallel_for( tbb::blocked_range<size_t>(0, leafParents.size()), [&](tbb::blocked_range<size_t>& range) { size_t i = range.begin(); LeafType** leafPtr = mLeafs; if (i > 0) leafPtr += leafCounts[i-1]; for ( ; i < range.end(); i++) { for (auto iter = leafParents[i]->beginChildOn(); iter; ++iter) { *leafPtr++ = &iter.getValue(); } } } ); } } void initAuxBuffers(bool serial) { const size_t auxBufferCount = mLeafCount * mAuxBuffersPerLeaf; if (auxBufferCount != mAuxBufferCount) { if (auxBufferCount > 0) { mAuxBufferPtrs.reset(new NonConstBufferType[auxBufferCount]); mAuxBuffers = mAuxBufferPtrs.get(); } else { mAuxBufferPtrs.reset(); mAuxBuffers = nullptr; } mAuxBufferCount = auxBufferCount; } this->syncAllBuffers(serial); } void cook(size_t grainsize) { if (grainsize>0) { tbb::parallel_for(this->getRange(grainsize), *this); } else { (*this)(this->getRange()); } } void doSwapLeafBuffer(const RangeType& r, size_t auxBufferIdx) { LeafManagerImpl<LeafManager>::doSwapLeafBuffer( r, auxBufferIdx, mLeafs, mAuxBuffers, mAuxBuffersPerLeaf); } void doSwapAuxBuffer(const RangeType& r, size_t auxBufferIdx1, size_t auxBufferIdx2) { for (size_t N = mAuxBuffersPerLeaf, n = N*r.begin(), m = N*r.end(); n != m; n+=N) { mAuxBuffers[n + auxBufferIdx1].swap(mAuxBuffers[n + auxBufferIdx2]); } } void doSyncAuxBuffer(const RangeType& r, size_t auxBufferIdx) { for (size_t n = r.begin(), m = r.end(), N = mAuxBuffersPerLeaf; n != m; ++n) { mAuxBuffers[n*N + auxBufferIdx] = mLeafs[n]->buffer(); } } void doSyncAllBuffers1(const RangeType& r) { for (size_t n = r.begin(), m = r.end(); n != m; ++n) { mAuxBuffers[n] = mLeafs[n]->buffer(); } } void doSyncAllBuffers2(const RangeType& r) { for (size_t n = r.begin(), m = r.end(); n != m; ++n) { const BufferType& leafBuffer = mLeafs[n]->buffer(); mAuxBuffers[2*n ] = leafBuffer; mAuxBuffers[2*n+1] = leafBuffer; } } void doSyncAllBuffersN(const RangeType& r) { for (size_t n = r.begin(), m = r.end(), N = mAuxBuffersPerLeaf; n != m; ++n) { const BufferType& leafBuffer = mLeafs[n]->buffer(); for (size_t i=n*N, j=i+N; i!=j; ++i) mAuxBuffers[i] = leafBuffer; } } /// @brief Private member class that applies a user-defined /// functor to perform parallel_for on all the leaf nodes. template<typename LeafOp> struct LeafTransformer { LeafTransformer(const LeafOp &leafOp) : mLeafOp(leafOp) { } void run(const LeafRange &range, bool threaded) const { threaded ? tbb::parallel_for(range, *this) : (*this)(range); } void operator()(const LeafRange &range) const { for (typename LeafRange::Iterator it = range.begin(); it; ++it) mLeafOp(*it, it.pos()); } const LeafOp mLeafOp; };// LeafTransformer /// @brief Private member class that applies a user-defined /// functor to perform parallel_reduce on all the leaf nodes. template<typename LeafOp> struct LeafReducer { LeafReducer(LeafOp &leafOp) : mLeafOp(&leafOp) { } LeafReducer(const LeafReducer &other, tbb::split) : mLeafOpPtr(std::make_unique<LeafOp>(*(other.mLeafOp), tbb::split())) , mLeafOp(mLeafOpPtr.get()) { } void run(const LeafRange& range, bool threaded) { threaded ? tbb::parallel_reduce(range, *this) : (*this)(range); } void operator()(const LeafRange& range) { LeafOp &op = *mLeafOp;//local registry for (typename LeafRange::Iterator it = range.begin(); it; ++it) op(*it, it.pos()); } void join(const LeafReducer& other) { mLeafOp->join(*(other.mLeafOp)); } std::unique_ptr<LeafOp> mLeafOpPtr; LeafOp *mLeafOp = nullptr; };// LeafReducer // Helper class to compute a prefix sum of offsets to active voxels struct PrefixSum { PrefixSum(const LeafRange& r, size_t* offsets, size_t& prefix) : mOffsets(offsets) { tbb::parallel_for( r, *this); for (size_t i=0, leafCount = r.size(); i<leafCount; ++i) { size_t tmp = offsets[i]; offsets[i] = prefix; prefix += tmp; } } inline void operator()(const LeafRange& r) const { for (typename LeafRange::Iterator i = r.begin(); i; ++i) { mOffsets[i.pos()] = i->onVoxelCount(); } } size_t* mOffsets; };// PrefixSum using FuncType = typename std::function<void (LeafManager*, const RangeType&)>; TreeType* mTree; size_t mLeafCount, mAuxBufferCount, mAuxBuffersPerLeaf; std::unique_ptr<LeafType*[]> mLeafPtrs; LeafType** mLeafs = nullptr;//array of LeafNode pointers std::unique_ptr<NonConstBufferType[]> mAuxBufferPtrs; NonConstBufferType* mAuxBuffers = nullptr;//array of auxiliary buffers FuncType mTask = nullptr; };//end of LeafManager class // Partial specializations of LeafManager methods for const trees template<typename TreeT> struct LeafManagerImpl<LeafManager<const TreeT> > { using ManagerT = LeafManager<const TreeT>; using RangeT = typename ManagerT::RangeType; using LeafT = typename ManagerT::LeafType; using BufT = typename ManagerT::BufferType; static inline void doSwapLeafBuffer(const RangeT&, size_t /*auxBufferIdx*/, LeafT**, BufT*, size_t /*bufsPerLeaf*/) { // Buffers can't be swapped into const trees. } }; } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_LEAFMANAGER_HAS_BEEN_INCLUDED
34,615
C
38.788506
99
0.600722
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/LeafNode.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TREE_LEAFNODE_HAS_BEEN_INCLUDED #define OPENVDB_TREE_LEAFNODE_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/util/NodeMasks.h> #include <openvdb/io/Compression.h> // for io::readData(), etc. #include "Iterator.h" #include "LeafBuffer.h" #include <algorithm> // for std::nth_element() #include <iostream> #include <memory> #include <sstream> #include <string> #include <type_traits> #include <vector> class TestLeaf; template<typename> class TestLeafIO; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { template<Index, typename> struct SameLeafConfig; // forward declaration /// @brief Templated block class to hold specific data types and a fixed /// number of values determined by Log2Dim. The actual coordinate /// dimension of the block is 2^Log2Dim, i.e. Log2Dim=3 corresponds to /// a LeafNode that spans a 8^3 block. template<typename T, Index Log2Dim> class LeafNode { public: using BuildType = T; using ValueType = T; using Buffer = LeafBuffer<ValueType, Log2Dim>; using LeafNodeType = LeafNode<ValueType, Log2Dim>; using NodeMaskType = util::NodeMask<Log2Dim>; using Ptr = SharedPtr<LeafNode>; static const Index LOG2DIM = Log2Dim, // needed by parent nodes TOTAL = Log2Dim, // needed by parent nodes DIM = 1 << TOTAL, // dimension along one coordinate direction NUM_VALUES = 1 << 3 * Log2Dim, NUM_VOXELS = NUM_VALUES, // total number of voxels represented by this node SIZE = NUM_VALUES, LEVEL = 0; // level 0 = leaf /// @brief ValueConverter<T>::Type is the type of a LeafNode having the same /// dimensions as this node but a different value type, T. template<typename OtherValueType> struct ValueConverter { using Type = LeafNode<OtherValueType, Log2Dim>; }; /// @brief SameConfiguration<OtherNodeType>::value is @c true if and only if /// OtherNodeType is the type of a LeafNode with the same dimensions as this node. template<typename OtherNodeType> struct SameConfiguration { static const bool value = SameLeafConfig<LOG2DIM, OtherNodeType>::value; }; /// Default constructor LeafNode(); /// @brief Constructor /// @param coords the grid index coordinates of a voxel /// @param value a value with which to fill the buffer /// @param active the active state to which to initialize all voxels explicit LeafNode(const Coord& coords, const ValueType& value = zeroVal<ValueType>(), bool active = false); /// @brief "Partial creation" constructor used during file input /// @param coords the grid index coordinates of a voxel /// @param value a value with which to fill the buffer /// @param active the active state to which to initialize all voxels /// @details This constructor does not allocate memory for voxel values. LeafNode(PartialCreate, const Coord& coords, const ValueType& value = zeroVal<ValueType>(), bool active = false); /// Deep copy constructor LeafNode(const LeafNode&); /// Deep assignment operator LeafNode& operator=(const LeafNode&) = default; /// Value conversion copy constructor template<typename OtherValueType> explicit LeafNode(const LeafNode<OtherValueType, Log2Dim>& other); /// Topology copy constructor template<typename OtherValueType> LeafNode(const LeafNode<OtherValueType, Log2Dim>& other, const ValueType& offValue, const ValueType& onValue, TopologyCopy); /// Topology copy constructor template<typename OtherValueType> LeafNode(const LeafNode<OtherValueType, Log2Dim>& other, const ValueType& background, TopologyCopy); /// Destructor. ~LeafNode(); // // Statistics // /// Return log2 of the dimension of this LeafNode, e.g. 3 if dimensions are 8^3 static Index log2dim() { return Log2Dim; } /// Return the number of voxels in each coordinate dimension. static Index dim() { return DIM; } /// Return the total number of voxels represented by this LeafNode static Index size() { return SIZE; } /// Return the total number of voxels represented by this LeafNode static Index numValues() { return SIZE; } /// Return the level of this node, which by definition is zero for LeafNodes static Index getLevel() { return LEVEL; } /// Append the Log2Dim of this LeafNode to the specified vector static void getNodeLog2Dims(std::vector<Index>& dims) { dims.push_back(Log2Dim); } /// Return the dimension of child nodes of this LeafNode, which is one for voxels. static Index getChildDim() { return 1; } /// Return the leaf count for this node, which is one. static Index32 leafCount() { return 1; } /// no-op void nodeCount(std::vector<Index32> &) const {} /// Return the non-leaf count for this node, which is zero. static Index32 nonLeafCount() { return 0; } /// Return the child count for this node, which is zero. static Index32 childCount() { return 0; } /// Return the number of voxels marked On. Index64 onVoxelCount() const { return mValueMask.countOn(); } /// Return the number of voxels marked Off. Index64 offVoxelCount() const { return mValueMask.countOff(); } Index64 onLeafVoxelCount() const { return onVoxelCount(); } Index64 offLeafVoxelCount() const { return offVoxelCount(); } static Index64 onTileCount() { return 0; } static Index64 offTileCount() { return 0; } /// Return @c true if this node has no active voxels. bool isEmpty() const { return mValueMask.isOff(); } /// Return @c true if this node contains only active voxels. bool isDense() const { return mValueMask.isOn(); } /// Return @c true if memory for this node's buffer has been allocated. bool isAllocated() const { return !mBuffer.isOutOfCore() && !mBuffer.empty(); } /// Allocate memory for this node's buffer if it has not already been allocated. bool allocate() { return mBuffer.allocate(); } /// Return the memory in bytes occupied by this node. Index64 memUsage() const; /// Expand the given bounding box so that it includes this leaf node's active voxels. /// If visitVoxels is false this LeafNode will be approximated as dense, i.e. with all /// voxels active. Else the individual active voxels are visited to produce a tight bbox. void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// @brief Return the bounding box of this node, i.e., the full index space /// spanned by this leaf node. CoordBBox getNodeBoundingBox() const { return CoordBBox::createCube(mOrigin, DIM); } /// Set the grid index coordinates of this node's local origin. void setOrigin(const Coord& origin) { mOrigin = origin; } //@{ /// Return the grid index coordinates of this node's local origin. const Coord& origin() const { return mOrigin; } void getOrigin(Coord& origin) const { origin = mOrigin; } void getOrigin(Int32& x, Int32& y, Int32& z) const { mOrigin.asXYZ(x, y, z); } //@} /// Return the linear table offset of the given global or local coordinates. static Index coordToOffset(const Coord& xyz); /// @brief Return the local coordinates for a linear table offset, /// where offset 0 has coordinates (0, 0, 0). static Coord offsetToLocalCoord(Index n); /// Return the global coordinates for a linear table offset. Coord offsetToGlobalCoord(Index n) const; /// Return a string representation of this node. std::string str() const; /// @brief Return @c true if the given node (which may have a different @c ValueType /// than this node) has the same active value topology as this node. template<typename OtherType, Index OtherLog2Dim> bool hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const; /// Check for buffer, state and origin equivalence. bool operator==(const LeafNode& other) const; bool operator!=(const LeafNode& other) const { return !(other == *this); } protected: using MaskOnIterator = typename NodeMaskType::OnIterator; using MaskOffIterator = typename NodeMaskType::OffIterator; using MaskDenseIterator = typename NodeMaskType::DenseIterator; // Type tags to disambiguate template instantiations struct ValueOn {}; struct ValueOff {}; struct ValueAll {}; struct ChildOn {}; struct ChildOff {}; struct ChildAll {}; template<typename MaskIterT, typename NodeT, typename ValueT, typename TagT> struct ValueIter: // Derives from SparseIteratorBase, but can also be used as a dense iterator, // if MaskIterT is a dense mask iterator type. public SparseIteratorBase< MaskIterT, ValueIter<MaskIterT, NodeT, ValueT, TagT>, NodeT, ValueT> { using BaseT = SparseIteratorBase<MaskIterT, ValueIter, NodeT, ValueT>; ValueIter() {} ValueIter(const MaskIterT& iter, NodeT* parent): BaseT(iter, parent) {} ValueT& getItem(Index pos) const { return this->parent().getValue(pos); } ValueT& getValue() const { return this->parent().getValue(this->pos()); } // Note: setItem() can't be called on const iterators. void setItem(Index pos, const ValueT& value) const { this->parent().setValueOnly(pos, value); } // Note: setValue() can't be called on const iterators. void setValue(const ValueT& value) const { this->parent().setValueOnly(this->pos(), value); } // Note: modifyItem() can't be called on const iterators. template<typename ModifyOp> void modifyItem(Index n, const ModifyOp& op) const { this->parent().modifyValue(n, op); } // Note: modifyValue() can't be called on const iterators. template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { this->parent().modifyValue(this->pos(), op); } }; /// Leaf nodes have no children, so their child iterators have no get/set accessors. template<typename MaskIterT, typename NodeT, typename TagT> struct ChildIter: public SparseIteratorBase<MaskIterT, ChildIter<MaskIterT, NodeT, TagT>, NodeT, ValueType> { ChildIter() {} ChildIter(const MaskIterT& iter, NodeT* parent): SparseIteratorBase< MaskIterT, ChildIter<MaskIterT, NodeT, TagT>, NodeT, ValueType>(iter, parent) {} }; template<typename NodeT, typename ValueT, typename TagT> struct DenseIter: public DenseIteratorBase< MaskDenseIterator, DenseIter<NodeT, ValueT, TagT>, NodeT, /*ChildT=*/void, ValueT> { using BaseT = DenseIteratorBase<MaskDenseIterator, DenseIter, NodeT, void, ValueT>; using NonConstValueT = typename BaseT::NonConstValueType; DenseIter() {} DenseIter(const MaskDenseIterator& iter, NodeT* parent): BaseT(iter, parent) {} bool getItem(Index pos, void*& child, NonConstValueT& value) const { value = this->parent().getValue(pos); child = nullptr; return false; // no child } // Note: setItem() can't be called on const iterators. //void setItem(Index pos, void* child) const {} // Note: unsetItem() can't be called on const iterators. void unsetItem(Index pos, const ValueT& value) const { this->parent().setValueOnly(pos, value); } }; public: using ValueOnIter = ValueIter<MaskOnIterator, LeafNode, const ValueType, ValueOn>; using ValueOnCIter = ValueIter<MaskOnIterator, const LeafNode, const ValueType, ValueOn>; using ValueOffIter = ValueIter<MaskOffIterator, LeafNode, const ValueType, ValueOff>; using ValueOffCIter = ValueIter<MaskOffIterator,const LeafNode,const ValueType,ValueOff>; using ValueAllIter = ValueIter<MaskDenseIterator, LeafNode, const ValueType, ValueAll>; using ValueAllCIter = ValueIter<MaskDenseIterator,const LeafNode,const ValueType,ValueAll>; using ChildOnIter = ChildIter<MaskOnIterator, LeafNode, ChildOn>; using ChildOnCIter = ChildIter<MaskOnIterator, const LeafNode, ChildOn>; using ChildOffIter = ChildIter<MaskOffIterator, LeafNode, ChildOff>; using ChildOffCIter = ChildIter<MaskOffIterator, const LeafNode, ChildOff>; using ChildAllIter = DenseIter<LeafNode, ValueType, ChildAll>; using ChildAllCIter = DenseIter<const LeafNode, const ValueType, ChildAll>; ValueOnCIter cbeginValueOn() const { return ValueOnCIter(mValueMask.beginOn(), this); } ValueOnCIter beginValueOn() const { return ValueOnCIter(mValueMask.beginOn(), this); } ValueOnIter beginValueOn() { return ValueOnIter(mValueMask.beginOn(), this); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(mValueMask.beginOff(), this); } ValueOffCIter beginValueOff() const { return ValueOffCIter(mValueMask.beginOff(), this); } ValueOffIter beginValueOff() { return ValueOffIter(mValueMask.beginOff(), this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(mValueMask.beginDense(), this); } ValueAllCIter beginValueAll() const { return ValueAllCIter(mValueMask.beginDense(), this); } ValueAllIter beginValueAll() { return ValueAllIter(mValueMask.beginDense(), this); } ValueOnCIter cendValueOn() const { return ValueOnCIter(mValueMask.endOn(), this); } ValueOnCIter endValueOn() const { return ValueOnCIter(mValueMask.endOn(), this); } ValueOnIter endValueOn() { return ValueOnIter(mValueMask.endOn(), this); } ValueOffCIter cendValueOff() const { return ValueOffCIter(mValueMask.endOff(), this); } ValueOffCIter endValueOff() const { return ValueOffCIter(mValueMask.endOff(), this); } ValueOffIter endValueOff() { return ValueOffIter(mValueMask.endOff(), this); } ValueAllCIter cendValueAll() const { return ValueAllCIter(mValueMask.endDense(), this); } ValueAllCIter endValueAll() const { return ValueAllCIter(mValueMask.endDense(), this); } ValueAllIter endValueAll() { return ValueAllIter(mValueMask.endDense(), this); } // Note that [c]beginChildOn() and [c]beginChildOff() actually return end iterators, // because leaf nodes have no children. ChildOnCIter cbeginChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnCIter beginChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnIter beginChildOn() { return ChildOnIter(mValueMask.endOn(), this); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffCIter beginChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffIter beginChildOff() { return ChildOffIter(mValueMask.endOff(), this); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(mValueMask.beginDense(), this); } ChildAllCIter beginChildAll() const { return ChildAllCIter(mValueMask.beginDense(), this); } ChildAllIter beginChildAll() { return ChildAllIter(mValueMask.beginDense(), this); } ChildOnCIter cendChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnCIter endChildOn() const { return ChildOnCIter(mValueMask.endOn(), this); } ChildOnIter endChildOn() { return ChildOnIter(mValueMask.endOn(), this); } ChildOffCIter cendChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffCIter endChildOff() const { return ChildOffCIter(mValueMask.endOff(), this); } ChildOffIter endChildOff() { return ChildOffIter(mValueMask.endOff(), this); } ChildAllCIter cendChildAll() const { return ChildAllCIter(mValueMask.endDense(), this); } ChildAllCIter endChildAll() const { return ChildAllCIter(mValueMask.endDense(), this); } ChildAllIter endChildAll() { return ChildAllIter(mValueMask.endDense(), this); } // // Buffer management // /// @brief Exchange this node's data buffer with the given data buffer /// without changing the active states of the values. void swap(Buffer& other) { mBuffer.swap(other); } const Buffer& buffer() const { return mBuffer; } Buffer& buffer() { return mBuffer; } // // I/O methods // /// @brief Read in just the topology. /// @param is the stream from which to read /// @param fromHalf if true, floating-point input values are assumed to be 16-bit void readTopology(std::istream& is, bool fromHalf = false); /// @brief Write out just the topology. /// @param os the stream to which to write /// @param toHalf if true, output floating-point values as 16-bit half floats void writeTopology(std::ostream& os, bool toHalf = false) const; /// @brief Read buffers from a stream. /// @param is the stream from which to read /// @param fromHalf if true, floating-point input values are assumed to be 16-bit void readBuffers(std::istream& is, bool fromHalf = false); /// @brief Read buffers that intersect the given bounding box. /// @param is the stream from which to read /// @param bbox an index-space bounding box /// @param fromHalf if true, floating-point input values are assumed to be 16-bit void readBuffers(std::istream& is, const CoordBBox& bbox, bool fromHalf = false); /// @brief Write buffers to a stream. /// @param os the stream to which to write /// @param toHalf if true, output floating-point values as 16-bit half floats void writeBuffers(std::ostream& os, bool toHalf = false) const; size_t streamingSize(bool toHalf = false) const; // // Accessor methods // /// Return the value of the voxel at the given coordinates. const ValueType& getValue(const Coord& xyz) const; /// Return the value of the voxel at the given linear offset. const ValueType& getValue(Index offset) const; /// @brief Return @c true if the voxel at the given coordinates is active. /// @param xyz the coordinates of the voxel to be probed /// @param[out] val the value of the voxel at the given coordinates bool probeValue(const Coord& xyz, ValueType& val) const; /// @brief Return @c true if the voxel at the given offset is active. /// @param offset the linear offset of the voxel to be probed /// @param[out] val the value of the voxel at the given coordinates bool probeValue(Index offset, ValueType& val) const; /// Return the level (i.e., 0) at which leaf node values reside. static Index getValueLevel(const Coord&) { return LEVEL; } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the active state of the voxel at the given offset but don't change its value. void setActiveState(Index offset, bool on) { assert(offset<SIZE); mValueMask.set(offset, on); } /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& val); /// Set the value of the voxel at the given offset but don't change its active state. void setValueOnly(Index offset, const ValueType& val); /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { mValueMask.setOff(LeafNode::coordToOffset(xyz)); } /// Mark the voxel at the given offset as inactive but don't change its value. void setValueOff(Index offset) { assert(offset < SIZE); mValueMask.setOff(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& val); /// Set the value of the voxel at the given offset and mark the voxel as inactive. void setValueOff(Index offset, const ValueType& val); /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { mValueMask.setOn(LeafNode::coordToOffset(xyz)); } /// Mark the voxel at the given offset as active but don't change its value. void setValueOn(Index offset) { assert(offset < SIZE); mValueMask.setOn(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, const ValueType& val) { this->setValueOn(LeafNode::coordToOffset(xyz), val); } /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, const ValueType& val) { this->setValueOn(xyz, val); } /// Set the value of the voxel at the given offset and mark the voxel as active. void setValueOn(Index offset, const ValueType& val) { mBuffer.setValue(offset, val); mValueMask.setOn(offset); } /// @brief Apply a functor to the value of the voxel at the given offset /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(Index offset, const ModifyOp& op) { mBuffer.loadValues(); if (!mBuffer.empty()) { // in-place modify value ValueType& val = const_cast<ValueType&>(mBuffer[offset]); op(val); mValueMask.setOn(offset); } } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op) { this->modifyValue(this->coordToOffset(xyz), op); } /// Apply a functor to the voxel at the given coordinates. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { mBuffer.loadValues(); if (!mBuffer.empty()) { const Index offset = this->coordToOffset(xyz); bool state = mValueMask.isOn(offset); // in-place modify value ValueType& val = const_cast<ValueType&>(mBuffer[offset]); op(val, state); mValueMask.set(offset, state); } } /// Mark all voxels as active but don't change their values. void setValuesOn() { mValueMask.setOn(); } /// Mark all voxels as inactive but don't change their values. void setValuesOff() { mValueMask.setOff(); } /// Return @c true if the voxel at the given coordinates is active. bool isValueOn(const Coord& xyz) const {return this->isValueOn(LeafNode::coordToOffset(xyz));} /// Return @c true if the voxel at the given offset is active. bool isValueOn(Index offset) const { return mValueMask.isOn(offset); } /// Return @c false since leaf nodes never contain tiles. static bool hasActiveTiles() { return false; } /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&, const ValueType& background); /// Set all voxels within an axis-aligned box to the specified value and active state. void fill(const CoordBBox& bbox, const ValueType&, bool active = true); /// Set all voxels within an axis-aligned box to the specified value and active state. void denseFill(const CoordBBox& bbox, const ValueType& value, bool active = true) { this->fill(bbox, value, active); } /// Set all voxels to the specified value but don't change their active states. void fill(const ValueType& value); /// Set all voxels to the specified value and active state. void fill(const ValueType& value, bool active); /// @brief Copy into a dense grid the values of the voxels that lie within /// a given bounding box. /// /// @param bbox inclusive bounding box of the voxels to be copied into the dense grid /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyToDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyToDense(const CoordBBox& bbox, DenseT& dense) const; /// @brief Copy from a dense grid into this node the values of the voxels /// that lie within a given bounding box. /// @details Only values that are different (by more than the given tolerance) /// from the background value will be active. Other values are inactive /// and truncated to the background value. /// /// @param bbox inclusive bounding box of the voxels to be copied into this node /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// @param background background value of the tree that this node belongs to /// @param tolerance tolerance within which a value equals the background value /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyFromDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyFromDense(const CoordBBox& bbox, const DenseT& dense, const ValueType& background, const ValueType& tolerance); /// @brief Return the value of the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename AccessorT> const ValueType& getValueAndCache(const Coord& xyz, AccessorT&) const { return this->getValue(xyz); } /// @brief Return @c true if the voxel at the given coordinates is active. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool isValueOnAndCache(const Coord& xyz, AccessorT&) const { return this->isValueOn(xyz); } /// @brief Change the value of the voxel at the given coordinates and mark it as active. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueAndCache(const Coord& xyz, const ValueType& val, AccessorT&) { this->setValueOn(xyz, val); } /// @brief Change the value of the voxel at the given coordinates /// but preserve its state. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOnlyAndCache(const Coord& xyz, const ValueType& val, AccessorT&) { this->setValueOnly(xyz, val); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValue(xyz, op); } /// Apply a functor to the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValueAndActiveState(xyz, op); } /// @brief Change the value of the voxel at the given coordinates and mark it as inactive. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT&) { this->setValueOff(xyz, value); } /// @brief Set the active state of the voxel at the given coordinates /// without changing its value. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT&) { this->setActiveState(xyz, on); } /// @brief Return @c true if the voxel at the given coordinates is active /// and return the voxel value in @a val. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool probeValueAndCache(const Coord& xyz, ValueType& val, AccessorT&) const { return this->probeValue(xyz, val); } /// @brief Return the value of the voxel at the given coordinates and return /// its active state and level (i.e., 0) in @a state and @a level. /// @note Used internally by ValueAccessor. template<typename AccessorT> const ValueType& getValue(const Coord& xyz, bool& state, int& level, AccessorT&) const { const Index offset = this->coordToOffset(xyz); state = mValueMask.isOn(offset); level = LEVEL; return mBuffer[offset]; } /// @brief Return the LEVEL (=0) at which leaf node values reside. /// @note Used internally by ValueAccessor (note last argument is a dummy). template<typename AccessorT> static Index getValueLevelAndCache(const Coord&, AccessorT&) { return LEVEL; } /// @brief Return a const reference to the first value in the buffer. /// @note Though it is potentially risky you can convert this /// to a non-const pointer by means of const_case<ValueType*>&. const ValueType& getFirstValue() const { return mBuffer[0]; } /// Return a const reference to the last value in the buffer. const ValueType& getLastValue() const { return mBuffer[SIZE - 1]; } /// @brief Replace inactive occurrences of @a oldBackground with @a newBackground, /// and inactive occurrences of @a -oldBackground with @a -newBackground. void resetBackground(const ValueType& oldBackground, const ValueType& newBackground); void negate(); /// @brief No-op /// @details This function exists only to enable template instantiation. void voxelizeActiveTiles(bool = true) {} template<MergePolicy Policy> void merge(const LeafNode&); template<MergePolicy Policy> void merge(const ValueType& tileValue, bool tileActive); template<MergePolicy Policy> void merge(const LeafNode& other, const ValueType& /*bg*/, const ValueType& /*otherBG*/); /// @brief Union this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active if either of the original voxels /// were active. /// /// @note This operation modifies only active states, not values. template<typename OtherType> void topologyUnion(const LeafNode<OtherType, Log2Dim>& other); /// @brief Intersect this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if both of the original voxels /// were active. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyIntersection. /// /// @note This operation modifies only active states, not /// values. Also note that this operation can result in all voxels /// being inactive so consider subsequnetly calling prune. template<typename OtherType> void topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const ValueType&); /// @brief Difference this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this LeafNode and inactive in the other LeafNode. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyDifference. /// /// @note This operation modifies only active states, not values. /// Also, because it can deactivate all of this node's voxels, /// consider subsequently calling prune. template<typename OtherType> void topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const ValueType&); template<typename CombineOp> void combine(const LeafNode& other, CombineOp& op); template<typename CombineOp> void combine(const ValueType& value, bool valueIsActive, CombineOp& op); template<typename CombineOp, typename OtherType /*= ValueType*/> void combine2(const LeafNode& other, const OtherType&, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(const ValueType&, const OtherNodeT& other, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp&); /// @brief Calls the templated functor BBoxOp with bounding box /// information. An additional level argument is provided to the /// callback. /// /// @note The bounding boxes are guarenteed to be non-overlapping. template<typename BBoxOp> void visitActiveBBox(BBoxOp&) const; template<typename VisitorOp> void visit(VisitorOp&); template<typename VisitorOp> void visit(VisitorOp&) const; template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&); template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&) const; template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false); template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false) const; //@{ /// This function exists only to enable template instantiation. void prune(const ValueType& /*tolerance*/ = zeroVal<ValueType>()) {} void addLeaf(LeafNode*) {} template<typename AccessorT> void addLeafAndCache(LeafNode*, AccessorT&) {} template<typename NodeT> NodeT* stealNode(const Coord&, const ValueType&, bool) { return nullptr; } template<typename NodeT> NodeT* probeNode(const Coord&) { return nullptr; } template<typename NodeT> const NodeT* probeConstNode(const Coord&) const { return nullptr; } template<typename ArrayT> void getNodes(ArrayT&) const {} template<typename ArrayT> void stealNodes(ArrayT&, const ValueType&, bool) {} //@} void addTile(Index level, const Coord&, const ValueType&, bool); void addTile(Index offset, const ValueType&, bool); template<typename AccessorT> void addTileAndCache(Index, const Coord&, const ValueType&, bool, AccessorT&); //@{ /// @brief Return a pointer to this node. LeafNode* touchLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* touchLeafAndCache(const Coord&, AccessorT&) { return this; } template<typename NodeT, typename AccessorT> NodeT* probeNodeAndCache(const Coord&, AccessorT&) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } LeafNode* probeLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* probeLeafAndCache(const Coord&, AccessorT&) { return this; } //@} //@{ /// @brief Return a @const pointer to this node. const LeafNode* probeConstLeaf(const Coord&) const { return this; } template<typename AccessorT> const LeafNode* probeConstLeafAndCache(const Coord&, AccessorT&) const { return this; } template<typename AccessorT> const LeafNode* probeLeafAndCache(const Coord&, AccessorT&) const { return this; } const LeafNode* probeLeaf(const Coord&) const { return this; } template<typename NodeT, typename AccessorT> const NodeT* probeConstNodeAndCache(const Coord&, AccessorT&) const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<const NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} /// Return @c true if all of this node's values have the same active state /// and are in the range this->getFirstValue() +/- @a tolerance. /// /// /// @param firstValue Is updated with the first value of this leaf node. /// @param state Is updated with the state of all values IF method /// returns @c true. Else the value is undefined! /// @param tolerance The tolerance used to determine if values are /// approximatly equal to the for value. bool isConstant(ValueType& firstValue, bool& state, const ValueType& tolerance = zeroVal<ValueType>()) const; /// Return @c true if all of this node's values have the same active state /// and the range (@a maxValue - @a minValue) < @a tolerance. /// /// @param minValue Is updated with the minimum of all values IF method /// returns @c true. Else the value is undefined! /// @param maxValue Is updated with the maximum of all values IF method /// returns @c true. Else the value is undefined! /// @param state Is updated with the state of all values IF method /// returns @c true. Else the value is undefined! /// @param tolerance The tolerance used to determine if values are /// approximatly constant. bool isConstant(ValueType& minValue, ValueType& maxValue, bool& state, const ValueType& tolerance = zeroVal<ValueType>()) const; /// @brief Computes the median value of all the active AND inactive voxels in this node. /// @return The median value of all values in this node. /// /// @param tmp Optional temporary storage that can hold at least NUM_VALUES values /// Use of this temporary storage can improve performance /// when this method is called multiple times. /// /// @note If tmp = this->buffer().data() then the median /// value is computed very efficiently (in place) but /// the voxel values in this node are re-shuffeled! /// /// @warning If tmp != nullptr then it is the responsibility of /// the client code that it points to enough memory to /// hold NUM_VALUES elements of type ValueType. ValueType medianAll(ValueType *tmp = nullptr) const; /// @brief Computes the median value of all the active voxels in this node. /// @return The number of active voxels. /// /// @param value If the return value is non zero @a value is updated /// with the median value. /// /// @param tmp Optional temporary storage that can hold at least /// as many values as there are active voxels in this node. /// Use of this temporary storage can improve performance /// when this method is called multiple times. /// /// @warning If tmp != nullptr then it is the responsibility of /// the client code that it points to enough memory to /// hold the number of active voxels of type ValueType. Index medianOn(ValueType &value, ValueType *tmp = nullptr) const; /// @brief Computes the median value of all the inactive voxels in this node. /// @return The number of inactive voxels. /// /// @param value If the return value is non zero @a value is updated /// with the median value. /// /// @param tmp Optional temporary storage that can hold at least /// as many values as there are inactive voxels in this node. /// Use of this temporary storage can improve performance /// when this method is called multiple times. /// /// @warning If tmp != nullptr then it is the responsibility of /// the client code that it points to enough memory to /// hold the number of inactive voxels of type ValueType. Index medianOff(ValueType &value, ValueType *tmp = nullptr) const; /// Return @c true if all of this node's values are inactive. bool isInactive() const { return mValueMask.isOff(); } protected: friend class ::TestLeaf; template<typename> friend class ::TestLeafIO; // During topology-only construction, access is needed // to protected/private members of other template instances. template<typename, Index> friend class LeafNode; friend struct ValueIter<MaskOnIterator, LeafNode, ValueType, ValueOn>; friend struct ValueIter<MaskOffIterator, LeafNode, ValueType, ValueOff>; friend struct ValueIter<MaskDenseIterator, LeafNode, ValueType, ValueAll>; friend struct ValueIter<MaskOnIterator, const LeafNode, ValueType, ValueOn>; friend struct ValueIter<MaskOffIterator, const LeafNode, ValueType, ValueOff>; friend struct ValueIter<MaskDenseIterator, const LeafNode, ValueType, ValueAll>; // Allow iterators to call mask accessor methods (see below). /// @todo Make mask accessors public? friend class IteratorBase<MaskOnIterator, LeafNode>; friend class IteratorBase<MaskOffIterator, LeafNode>; friend class IteratorBase<MaskDenseIterator, LeafNode>; // Mask accessors public: bool isValueMaskOn(Index n) const { return mValueMask.isOn(n); } bool isValueMaskOn() const { return mValueMask.isOn(); } bool isValueMaskOff(Index n) const { return mValueMask.isOff(n); } bool isValueMaskOff() const { return mValueMask.isOff(); } const NodeMaskType& getValueMask() const { return mValueMask; } NodeMaskType& getValueMask() { return mValueMask; } const NodeMaskType& valueMask() const { return mValueMask; } void setValueMask(const NodeMaskType& mask) { mValueMask = mask; } bool isChildMaskOn(Index) const { return false; } // leaf nodes have no children bool isChildMaskOff(Index) const { return true; } bool isChildMaskOff() const { return true; } protected: void setValueMask(Index n, bool on) { mValueMask.set(n, on); } void setValueMaskOn(Index n) { mValueMask.setOn(n); } void setValueMaskOff(Index n) { mValueMask.setOff(n); } inline void skipCompressedValues(bool seekable, std::istream&, bool fromHalf); /// Compute the origin of the leaf node that contains the voxel with the given coordinates. static void evalNodeOrigin(Coord& xyz) { xyz &= ~(DIM - 1); } template<typename NodeT, typename VisitorOp, typename ChildAllIterT> static inline void doVisit(NodeT&, VisitorOp&); template<typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp&); template<typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2(NodeT& self, OtherChildAllIterT&, VisitorOp&, bool otherIsLHS); private: /// Buffer containing the actual data values Buffer mBuffer; /// Bitmask that determines which voxels are active NodeMaskType mValueMask; /// Global grid index coordinates (x,y,z) of the local origin of this node Coord mOrigin; }; // end of LeafNode class //////////////////////////////////////// //@{ /// Helper metafunction used to implement LeafNode::SameConfiguration /// (which, as an inner class, can't be independently specialized) template<Index Dim1, typename NodeT2> struct SameLeafConfig { static const bool value = false; }; template<Index Dim1, typename T2> struct SameLeafConfig<Dim1, LeafNode<T2, Dim1> > { static const bool value = true; }; //@} //////////////////////////////////////// template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::LeafNode(): mValueMask(),//default is off! mOrigin(0, 0, 0) { } template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::LeafNode(const Coord& xyz, const ValueType& val, bool active): mBuffer(val), mValueMask(active), mOrigin(xyz & (~(DIM - 1))) { } template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::LeafNode(PartialCreate, const Coord& xyz, const ValueType& val, bool active): mBuffer(PartialCreate(), val), mValueMask(active), mOrigin(xyz & (~(DIM - 1))) { } template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::LeafNode(const LeafNode& other): mBuffer(other.mBuffer), mValueMask(other.valueMask()), mOrigin(other.mOrigin) { } // Copy-construct from a leaf node with the same configuration but a different ValueType. template<typename T, Index Log2Dim> template<typename OtherValueType> inline LeafNode<T, Log2Dim>::LeafNode(const LeafNode<OtherValueType, Log2Dim>& other): mValueMask(other.valueMask()), mOrigin(other.mOrigin) { struct Local { /// @todo Consider using a value conversion functor passed as an argument instead. static inline ValueType convertValue(const OtherValueType& val) { return ValueType(val); } }; for (Index i = 0; i < SIZE; ++i) { mBuffer[i] = Local::convertValue(other.mBuffer[i]); } } template<typename T, Index Log2Dim> template<typename OtherValueType> inline LeafNode<T, Log2Dim>::LeafNode(const LeafNode<OtherValueType, Log2Dim>& other, const ValueType& background, TopologyCopy): mBuffer(background), mValueMask(other.valueMask()), mOrigin(other.mOrigin) { } template<typename T, Index Log2Dim> template<typename OtherValueType> inline LeafNode<T, Log2Dim>::LeafNode(const LeafNode<OtherValueType, Log2Dim>& other, const ValueType& offValue, const ValueType& onValue, TopologyCopy): mValueMask(other.valueMask()), mOrigin(other.mOrigin) { for (Index i = 0; i < SIZE; ++i) { mBuffer[i] = (mValueMask.isOn(i) ? onValue : offValue); } } template<typename T, Index Log2Dim> inline LeafNode<T, Log2Dim>::~LeafNode() { } template<typename T, Index Log2Dim> inline std::string LeafNode<T, Log2Dim>::str() const { std::ostringstream ostr; ostr << "LeafNode @" << mOrigin << ": " << mBuffer; return ostr.str(); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline Index LeafNode<T, Log2Dim>::coordToOffset(const Coord& xyz) { assert ((xyz[0] & (DIM-1u)) < DIM && (xyz[1] & (DIM-1u)) < DIM && (xyz[2] & (DIM-1u)) < DIM); return ((xyz[0] & (DIM-1u)) << 2*Log2Dim) + ((xyz[1] & (DIM-1u)) << Log2Dim) + (xyz[2] & (DIM-1u)); } template<typename T, Index Log2Dim> inline Coord LeafNode<T, Log2Dim>::offsetToLocalCoord(Index n) { assert(n<(1<< 3*Log2Dim)); Coord xyz; xyz.setX(n >> 2*Log2Dim); n &= ((1<<2*Log2Dim)-1); xyz.setY(n >> Log2Dim); xyz.setZ(n & ((1<<Log2Dim)-1)); return xyz; } template<typename T, Index Log2Dim> inline Coord LeafNode<T, Log2Dim>::offsetToGlobalCoord(Index n) const { return (this->offsetToLocalCoord(n) + this->origin()); } //////////////////////////////////////// template<typename ValueT, Index Log2Dim> inline const ValueT& LeafNode<ValueT, Log2Dim>::getValue(const Coord& xyz) const { return this->getValue(LeafNode::coordToOffset(xyz)); } template<typename ValueT, Index Log2Dim> inline const ValueT& LeafNode<ValueT, Log2Dim>::getValue(Index offset) const { assert(offset < SIZE); return mBuffer[offset]; } template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::probeValue(const Coord& xyz, ValueType& val) const { return this->probeValue(LeafNode::coordToOffset(xyz), val); } template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::probeValue(Index offset, ValueType& val) const { assert(offset < SIZE); val = mBuffer[offset]; return mValueMask.isOn(offset); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setValueOff(const Coord& xyz, const ValueType& val) { this->setValueOff(LeafNode::coordToOffset(xyz), val); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setValueOff(Index offset, const ValueType& val) { assert(offset < SIZE); mBuffer.setValue(offset, val); mValueMask.setOff(offset); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setActiveState(const Coord& xyz, bool on) { mValueMask.set(this->coordToOffset(xyz), on); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setValueOnly(const Coord& xyz, const ValueType& val) { this->setValueOnly(LeafNode::coordToOffset(xyz), val); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::setValueOnly(Index offset, const ValueType& val) { assert(offset<SIZE); mBuffer.setValue(offset, val); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::clip(const CoordBBox& clipBBox, const T& background) { CoordBBox nodeBBox = this->getNodeBoundingBox(); if (!clipBBox.hasOverlap(nodeBBox)) { // This node lies completely outside the clipping region. Fill it with the background. this->fill(background, /*active=*/false); } else if (clipBBox.isInside(nodeBBox)) { // This node lies completely inside the clipping region. Leave it intact. return; } // This node isn't completely contained inside the clipping region. // Set any voxels that lie outside the region to the background value. // Construct a boolean mask that is on inside the clipping region and off outside it. NodeMaskType mask; nodeBBox.intersect(clipBBox); Coord xyz; int &x = xyz.x(), &y = xyz.y(), &z = xyz.z(); for (x = nodeBBox.min().x(); x <= nodeBBox.max().x(); ++x) { for (y = nodeBBox.min().y(); y <= nodeBBox.max().y(); ++y) { for (z = nodeBBox.min().z(); z <= nodeBBox.max().z(); ++z) { mask.setOn(static_cast<Index32>(this->coordToOffset(xyz))); } } } // Set voxels that lie in the inactive region of the mask (i.e., outside // the clipping region) to the background value. for (MaskOffIterator maskIter = mask.beginOff(); maskIter; ++maskIter) { this->setValueOff(maskIter.pos(), background); } } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::fill(const CoordBBox& bbox, const ValueType& value, bool active) { if (!this->allocate()) return; auto clippedBBox = this->getNodeBoundingBox(); clippedBBox.intersect(bbox); if (!clippedBBox) return; for (Int32 x = clippedBBox.min().x(); x <= clippedBBox.max().x(); ++x) { const Index offsetX = (x & (DIM-1u)) << 2*Log2Dim; for (Int32 y = clippedBBox.min().y(); y <= clippedBBox.max().y(); ++y) { const Index offsetXY = offsetX + ((y & (DIM-1u)) << Log2Dim); for (Int32 z = clippedBBox.min().z(); z <= clippedBBox.max().z(); ++z) { const Index offset = offsetXY + (z & (DIM-1u)); mBuffer[offset] = value; mValueMask.set(offset, active); } } } } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::fill(const ValueType& value) { mBuffer.fill(value); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::fill(const ValueType& value, bool active) { mBuffer.fill(value); mValueMask.set(active); } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename DenseT> inline void LeafNode<T, Log2Dim>::copyToDense(const CoordBBox& bbox, DenseT& dense) const { mBuffer.loadValues(); using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); DenseValueType* t0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // target array const T* s0 = &mBuffer[bbox.min()[2] & (DIM-1u)]; // source array for (Int32 x = bbox.min()[0], ex = bbox.max()[0] + 1; x < ex; ++x) { DenseValueType* t1 = t0 + xStride * (x - min[0]); const T* s1 = s0 + ((x & (DIM-1u)) << 2*Log2Dim); for (Int32 y = bbox.min()[1], ey = bbox.max()[1] + 1; y < ey; ++y) { DenseValueType* t2 = t1 + yStride * (y - min[1]); const T* s2 = s1 + ((y & (DIM-1u)) << Log2Dim); for (Int32 z = bbox.min()[2], ez = bbox.max()[2] + 1; z < ez; ++z, t2 += zStride) { *t2 = DenseValueType(*s2++); } } } } template<typename T, Index Log2Dim> template<typename DenseT> inline void LeafNode<T, Log2Dim>::copyFromDense(const CoordBBox& bbox, const DenseT& dense, const ValueType& background, const ValueType& tolerance) { if (!this->allocate()) return; using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); const DenseValueType* s0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // source const Int32 n0 = bbox.min()[2] & (DIM-1u); for (Int32 x = bbox.min()[0], ex = bbox.max()[0]+1; x < ex; ++x) { const DenseValueType* s1 = s0 + xStride * (x - min[0]); const Int32 n1 = n0 + ((x & (DIM-1u)) << 2*LOG2DIM); for (Int32 y = bbox.min()[1], ey = bbox.max()[1]+1; y < ey; ++y) { const DenseValueType* s2 = s1 + yStride * (y - min[1]); Int32 n2 = n1 + ((y & (DIM-1u)) << LOG2DIM); for (Int32 z = bbox.min()[2], ez = bbox.max()[2]+1; z < ez; ++z, ++n2, s2 += zStride) { if (math::isApproxEqual(background, ValueType(*s2), tolerance)) { mValueMask.setOff(n2); mBuffer[n2] = background; } else { mValueMask.setOn(n2); mBuffer[n2] = ValueType(*s2); } } } } } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::readTopology(std::istream& is, bool /*fromHalf*/) { mValueMask.load(is); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::writeTopology(std::ostream& os, bool /*toHalf*/) const { mValueMask.save(os); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T,Log2Dim>::skipCompressedValues(bool seekable, std::istream& is, bool fromHalf) { if (seekable) { // Seek over voxel values. io::readCompressedValues<ValueType, NodeMaskType>( is, nullptr, SIZE, mValueMask, fromHalf); } else { // Read and discard voxel values. Buffer temp; io::readCompressedValues(is, temp.mData, SIZE, mValueMask, fromHalf); } } template<typename T, Index Log2Dim> inline void LeafNode<T,Log2Dim>::readBuffers(std::istream& is, bool fromHalf) { this->readBuffers(is, CoordBBox::inf(), fromHalf); } template<typename T, Index Log2Dim> inline void LeafNode<T,Log2Dim>::readBuffers(std::istream& is, const CoordBBox& clipBBox, bool fromHalf) { SharedPtr<io::StreamMetadata> meta = io::getStreamMetadataPtr(is); const bool seekable = meta && meta->seekable(); std::streamoff maskpos = is.tellg(); if (seekable) { // Seek over the value mask. mValueMask.seek(is); } else { // Read in the value mask. mValueMask.load(is); } int8_t numBuffers = 1; if (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_NODE_MASK_COMPRESSION) { // Read in the origin. is.read(reinterpret_cast<char*>(&mOrigin), sizeof(Coord::ValueType) * 3); // Read in the number of buffers, which should now always be one. is.read(reinterpret_cast<char*>(&numBuffers), sizeof(int8_t)); } CoordBBox nodeBBox = this->getNodeBoundingBox(); if (!clipBBox.hasOverlap(nodeBBox)) { // This node lies completely outside the clipping region. skipCompressedValues(seekable, is, fromHalf); mValueMask.setOff(); mBuffer.setOutOfCore(false); } else { // If this node lies completely inside the clipping region and it is being read // from a memory-mapped file, delay loading of its buffer until the buffer // is actually accessed. (If this node requires clipping, its buffer // must be accessed and therefore must be loaded.) io::MappedFile::Ptr mappedFile = io::getMappedFilePtr(is); const bool delayLoad = ((mappedFile.get() != nullptr) && clipBBox.isInside(nodeBBox)); if (delayLoad) { mBuffer.setOutOfCore(true); mBuffer.mFileInfo = new typename Buffer::FileInfo; mBuffer.mFileInfo->meta = meta; mBuffer.mFileInfo->bufpos = is.tellg(); mBuffer.mFileInfo->mapping = mappedFile; // Save the offset to the value mask, because the in-memory copy // might change before the value buffer gets read. mBuffer.mFileInfo->maskpos = maskpos; // Skip over voxel values. skipCompressedValues(seekable, is, fromHalf); } else { mBuffer.allocate(); io::readCompressedValues(is, mBuffer.mData, SIZE, mValueMask, fromHalf); mBuffer.setOutOfCore(false); // Get this tree's background value. T background = zeroVal<T>(); if (const void* bgPtr = io::getGridBackgroundValuePtr(is)) { background = *static_cast<const T*>(bgPtr); } this->clip(clipBBox, background); } } if (numBuffers > 1) { // Read in and discard auxiliary buffers that were created with earlier // versions of the library. (Auxiliary buffers are not mask compressed.) const bool zipped = io::getDataCompression(is) & io::COMPRESS_ZIP; Buffer temp; for (int i = 1; i < numBuffers; ++i) { if (fromHalf) { io::HalfReader<io::RealToHalf<T>::isReal, T>::read(is, temp.mData, SIZE, zipped); } else { io::readData<T>(is, temp.mData, SIZE, zipped); } } } // increment the leaf number if (meta) meta->setLeaf(meta->leaf() + 1); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::writeBuffers(std::ostream& os, bool toHalf) const { // Write out the value mask. mValueMask.save(os); mBuffer.loadValues(); io::writeCompressedValues(os, mBuffer.mData, SIZE, mValueMask, /*childMask=*/NodeMaskType(), toHalf); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::operator==(const LeafNode& other) const { return mOrigin == other.mOrigin && mValueMask == other.valueMask() && mBuffer == other.mBuffer; } template<typename T, Index Log2Dim> inline Index64 LeafNode<T, Log2Dim>::memUsage() const { // Use sizeof(*this) to capture alignment-related padding // (but note that sizeof(*this) includes sizeof(mBuffer)). return sizeof(*this) + mBuffer.memUsage() - sizeof(mBuffer); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { CoordBBox this_bbox = this->getNodeBoundingBox(); if (bbox.isInside(this_bbox)) return;//this LeafNode is already enclosed in the bbox if (ValueOnCIter iter = this->cbeginValueOn()) {//any active values? if (visitVoxels) {//use voxel granularity? this_bbox.reset(); for(; iter; ++iter) this_bbox.expand(this->offsetToLocalCoord(iter.pos())); this_bbox.translate(this->origin()); } bbox.expand(this_bbox); } } template<typename T, Index Log2Dim> template<typename OtherType, Index OtherLog2Dim> inline bool LeafNode<T, Log2Dim>::hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const { assert(other); return (Log2Dim == OtherLog2Dim && mValueMask == other->getValueMask()); } template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::isConstant(ValueType& firstValue, bool& state, const ValueType& tolerance) const { if (!mValueMask.isConstant(state)) return false;// early termination firstValue = mBuffer[0]; for (Index i = 1; i < SIZE; ++i) { if ( !math::isApproxEqual(mBuffer[i], firstValue, tolerance) ) return false;// early termination } return true; } template<typename T, Index Log2Dim> inline bool LeafNode<T, Log2Dim>::isConstant(ValueType& minValue, ValueType& maxValue, bool& state, const ValueType& tolerance) const { if (!mValueMask.isConstant(state)) return false;// early termination minValue = maxValue = mBuffer[0]; for (Index i = 1; i < SIZE; ++i) { const T& v = mBuffer[i]; if (v < minValue) { if ((maxValue - v) > tolerance) return false;// early termination minValue = v; } else if (v > maxValue) { if ((v - minValue) > tolerance) return false;// early termination maxValue = v; } } return true; } template<typename T, Index Log2Dim> inline T LeafNode<T, Log2Dim>::medianAll(T *tmp) const { std::unique_ptr<T[]> data(nullptr); if (tmp == nullptr) {//allocate temporary storage data.reset(new T[NUM_VALUES]); tmp = data.get(); } if (tmp != mBuffer.data()) { const T* src = mBuffer.data(); for (T* dst = tmp; dst-tmp < NUM_VALUES;) *dst++ = *src++; } static const size_t midpoint = (NUM_VALUES - 1) >> 1; std::nth_element(tmp, tmp + midpoint, tmp + NUM_VALUES); return tmp[midpoint]; } template<typename T, Index Log2Dim> inline Index LeafNode<T, Log2Dim>::medianOn(T &value, T *tmp) const { const Index count = mValueMask.countOn(); if (count == NUM_VALUES) {//special case: all voxels are active value = this->medianAll(tmp); return NUM_VALUES; } else if (count == 0) { return 0; } std::unique_ptr<T[]> data(nullptr); if (tmp == nullptr) {//allocate temporary storage data.reset(new T[count]);// 0 < count < NUM_VALUES tmp = data.get(); } for (auto iter=this->cbeginValueOn(); iter; ++iter) *tmp++ = *iter; T *begin = tmp - count; const size_t midpoint = (count - 1) >> 1; std::nth_element(begin, begin + midpoint, tmp); value = begin[midpoint]; return count; } template<typename T, Index Log2Dim> inline Index LeafNode<T, Log2Dim>::medianOff(T &value, T *tmp) const { const Index count = mValueMask.countOff(); if (count == NUM_VALUES) {//special case: all voxels are inactive value = this->medianAll(tmp); return NUM_VALUES; } else if (count == 0) { return 0; } std::unique_ptr<T[]> data(nullptr); if (tmp == nullptr) {//allocate temporary storage data.reset(new T[count]);// 0 < count < NUM_VALUES tmp = data.get(); } for (auto iter=this->cbeginValueOff(); iter; ++iter) *tmp++ = *iter; T *begin = tmp - count; const size_t midpoint = (count - 1) >> 1; std::nth_element(begin, begin + midpoint, tmp); value = begin[midpoint]; return count; } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::addTile(Index /*level*/, const Coord& xyz, const ValueType& val, bool active) { this->addTile(this->coordToOffset(xyz), val, active); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::addTile(Index offset, const ValueType& val, bool active) { assert(offset < SIZE); setValueOnly(offset, val); setActiveState(offset, active); } template<typename T, Index Log2Dim> template<typename AccessorT> inline void LeafNode<T, Log2Dim>::addTileAndCache(Index level, const Coord& xyz, const ValueType& val, bool active, AccessorT&) { this->addTile(level, xyz, val, active); } //////////////////////////////////////// template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::resetBackground(const ValueType& oldBackground, const ValueType& newBackground) { if (!this->allocate()) return; typename NodeMaskType::OffIterator iter; // For all inactive values... for (iter = this->mValueMask.beginOff(); iter; ++iter) { ValueType &inactiveValue = mBuffer[iter.pos()]; if (math::isApproxEqual(inactiveValue, oldBackground)) { inactiveValue = newBackground; } else if (math::isApproxEqual(inactiveValue, math::negative(oldBackground))) { inactiveValue = math::negative(newBackground); } } } template<typename T, Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<T, Log2Dim>::merge(const LeafNode& other) { if (!this->allocate()) return; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy == MERGE_NODES) return; typename NodeMaskType::OnIterator iter = other.valueMask().beginOn(); for (; iter; ++iter) { const Index n = iter.pos(); if (mValueMask.isOff(n)) { mBuffer[n] = other.mBuffer[n]; mValueMask.setOn(n); } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename T, Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<T, Log2Dim>::merge(const LeafNode& other, const ValueType& /*bg*/, const ValueType& /*otherBG*/) { this->template merge<Policy>(other); } template<typename T, Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<T, Log2Dim>::merge(const ValueType& tileValue, bool tileActive) { if (!this->allocate()) return; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy != MERGE_ACTIVE_STATES_AND_NODES) return; if (!tileActive) return; // Replace all inactive values with the active tile value. for (typename NodeMaskType::OffIterator iter = mValueMask.beginOff(); iter; ++iter) { const Index n = iter.pos(); mBuffer[n] = tileValue; mValueMask.setOn(n); } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename T, Index Log2Dim> template<typename OtherType> inline void LeafNode<T, Log2Dim>::topologyUnion(const LeafNode<OtherType, Log2Dim>& other) { mValueMask |= other.valueMask(); } template<typename T, Index Log2Dim> template<typename OtherType> inline void LeafNode<T, Log2Dim>::topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const ValueType&) { mValueMask &= other.valueMask(); } template<typename T, Index Log2Dim> template<typename OtherType> inline void LeafNode<T, Log2Dim>::topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const ValueType&) { mValueMask &= !other.valueMask(); } template<typename T, Index Log2Dim> inline void LeafNode<T, Log2Dim>::negate() { if (!this->allocate()) return; for (Index i = 0; i < SIZE; ++i) { mBuffer[i] = -mBuffer[i]; } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename CombineOp> inline void LeafNode<T, Log2Dim>::combine(const LeafNode& other, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T> args; for (Index i = 0; i < SIZE; ++i) { op(args.setARef(mBuffer[i]) .setAIsActive(mValueMask.isOn(i)) .setBRef(other.mBuffer[i]) .setBIsActive(other.valueMask().isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } template<typename T, Index Log2Dim> template<typename CombineOp> inline void LeafNode<T, Log2Dim>::combine(const ValueType& value, bool valueIsActive, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { op(args.setARef(mBuffer[i]) .setAIsActive(mValueMask.isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename CombineOp, typename OtherType> inline void LeafNode<T, Log2Dim>::combine2(const LeafNode& other, const OtherType& value, bool valueIsActive, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T, OtherType> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { op(args.setARef(other.mBuffer[i]) .setAIsActive(other.valueMask().isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } template<typename T, Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<T, Log2Dim>::combine2(const ValueType& value, const OtherNodeT& other, bool valueIsActive, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T, typename OtherNodeT::ValueType> args; args.setARef(value).setAIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { op(args.setBRef(other.mBuffer[i]) .setBIsActive(other.valueMask().isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } template<typename T, Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<T, Log2Dim>::combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp& op) { if (!this->allocate()) return; CombineArgs<T, typename OtherNodeT::ValueType> args; for (Index i = 0; i < SIZE; ++i) { mValueMask.set(i, b0.valueMask().isOn(i) || b1.valueMask().isOn(i)); op(args.setARef(b0.mBuffer[i]) .setAIsActive(b0.valueMask().isOn(i)) .setBRef(b1.mBuffer[i]) .setBIsActive(b1.valueMask().isOn(i)) .setResultRef(mBuffer[i])); mValueMask.set(i, args.resultIsActive()); } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename BBoxOp> inline void LeafNode<T, Log2Dim>::visitActiveBBox(BBoxOp& op) const { if (op.template descent<LEVEL>()) { for (ValueOnCIter i=this->cbeginValueOn(); i; ++i) { op.template operator()<LEVEL>(CoordBBox::createCube(i.getCoord(), 1)); } } else { op.template operator()<LEVEL>(this->getNodeBoundingBox()); } } template<typename T, Index Log2Dim> template<typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit(VisitorOp& op) { doVisit<LeafNode, VisitorOp, ChildAllIter>(*this, op); } template<typename T, Index Log2Dim> template<typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit(VisitorOp& op) const { doVisit<const LeafNode, VisitorOp, ChildAllCIter>(*this, op); } template<typename T, Index Log2Dim> template<typename NodeT, typename VisitorOp, typename ChildAllIterT> inline void LeafNode<T, Log2Dim>::doVisit(NodeT& self, VisitorOp& op) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter); } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) { doVisit2Node<LeafNode, OtherLeafNodeType, VisitorOp, ChildAllIter, typename OtherLeafNodeType::ChildAllIter>(*this, other, op); } template<typename T, Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) const { doVisit2Node<const LeafNode, OtherLeafNodeType, VisitorOp, ChildAllCIter, typename OtherLeafNodeType::ChildAllCIter>(*this, other, op); } template<typename T, Index Log2Dim> template< typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<T, Log2Dim>::doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp& op) { // Allow the two nodes to have different ValueTypes, but not different dimensions. static_assert(OtherNodeT::SIZE == NodeT::SIZE, "can't visit nodes of different sizes simultaneously"); static_assert(OtherNodeT::LEVEL == NodeT::LEVEL, "can't visit nodes at different tree levels simultaneously"); ChildAllIterT iter = self.beginChildAll(); OtherChildAllIterT otherIter = other.beginChildAll(); for ( ; iter && otherIter; ++iter, ++otherIter) { op(iter, otherIter); } } //////////////////////////////////////// template<typename T, Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) { doVisit2<LeafNode, VisitorOp, ChildAllIter, IterT>( *this, otherIter, op, otherIsLHS); } template<typename T, Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<T, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) const { doVisit2<const LeafNode, VisitorOp, ChildAllCIter, IterT>( *this, otherIter, op, otherIsLHS); } template<typename T, Index Log2Dim> template< typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<T, Log2Dim>::doVisit2(NodeT& self, OtherChildAllIterT& otherIter, VisitorOp& op, bool otherIsLHS) { if (!otherIter) return; if (otherIsLHS) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(otherIter, iter); } } else { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter, otherIter); } } } //////////////////////////////////////// template<typename T, Index Log2Dim> inline std::ostream& operator<<(std::ostream& os, const typename LeafNode<T, Log2Dim>::Buffer& buf) { for (Index32 i = 0, N = buf.size(); i < N; ++i) os << buf.mData[i] << ", "; return os; } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb //////////////////////////////////////// // Specialization for LeafNodes of type bool #include "LeafNodeBool.h" // Specialization for LeafNodes with mask information only #include "LeafNodeMask.h" #endif // OPENVDB_TREE_LEAFNODE_HAS_BEEN_INCLUDED
75,094
C
36.831234
104
0.661504
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/Iterator.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file tree/Iterator.h /// /// @author Peter Cucka and Ken Museth #ifndef OPENVDB_TREE_ITERATOR_HAS_BEEN_INCLUDED #define OPENVDB_TREE_ITERATOR_HAS_BEEN_INCLUDED #include <sstream> #include <type_traits> #include <openvdb/util/NodeMasks.h> #include <openvdb/Exceptions.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { /// @brief Base class for iterators over internal and leaf nodes /// /// This class is typically not instantiated directly, since it doesn't provide methods /// to dereference the iterator. Those methods (@vdblink::tree::SparseIteratorBase::operator*() /// operator*()@endlink, @vdblink::tree::SparseIteratorBase::setValue() setValue()@endlink, etc.) /// are implemented in the @vdblink::tree::SparseIteratorBase sparse@endlink and /// @vdblink::tree::DenseIteratorBase dense@endlink iterator subclasses. template<typename MaskIterT, typename NodeT> class IteratorBase { public: IteratorBase(): mParentNode(nullptr) {} IteratorBase(const MaskIterT& iter, NodeT* parent): mParentNode(parent), mMaskIter(iter) {} IteratorBase(const IteratorBase&) = default; IteratorBase& operator=(const IteratorBase&) = default; bool operator==(const IteratorBase& other) const { return (mParentNode == other.mParentNode) && (mMaskIter == other.mMaskIter); } bool operator!=(const IteratorBase& other) const { return !(*this == other); } /// Return a pointer to the node (if any) over which this iterator is iterating. NodeT* getParentNode() const { return mParentNode; } /// @brief Return a reference to the node over which this iterator is iterating. /// @throw ValueError if there is no parent node. NodeT& parent() const { if (!mParentNode) OPENVDB_THROW(ValueError, "iterator references a null node"); return *mParentNode; } /// Return this iterator's position as an index into the parent node's table. Index offset() const { return mMaskIter.offset(); } /// Identical to offset Index pos() const { return mMaskIter.offset(); } /// Return @c true if this iterator is not yet exhausted. bool test() const { return mMaskIter.test(); } /// Return @c true if this iterator is not yet exhausted. operator bool() const { return this->test(); } /// Advance to the next item in the parent node's table. bool next() { return mMaskIter.next(); } /// Advance to the next item in the parent node's table. void increment() { mMaskIter.increment(); } /// Advance to the next item in the parent node's table. IteratorBase& operator++() { this->increment(); return *this; } /// Advance @a n items in the parent node's table. void increment(Index n) { mMaskIter.increment(n); } /// @brief Return @c true if this iterator is pointing to an active value. /// Return @c false if it is pointing to either an inactive value or a child node. bool isValueOn() const { return parent().isValueMaskOn(this->pos()); } /// @brief If this iterator is pointing to a value, set the value's active state. /// Otherwise, do nothing. void setValueOn(bool on = true) const { parent().setValueMask(this->pos(), on); } /// @brief If this iterator is pointing to a value, mark the value as inactive. /// @details If this iterator is pointing to a child node, then the current item /// in the parent node's table is required to be inactive. In that case, /// this method has no effect. void setValueOff() const { parent().mValueMask.setOff(this->pos()); } /// Return the coordinates of the item to which this iterator is pointing. Coord getCoord() const { return parent().offsetToGlobalCoord(this->pos()); } /// Return in @a xyz the coordinates of the item to which this iterator is pointing. void getCoord(Coord& xyz) const { xyz = this->getCoord(); } private: /// @note This parent node pointer is mutable, because setValueOn() and /// setValueOff(), though const, need to call non-const methods on the parent. /// There is a distinction between a const iterator (e.g., const ValueOnIter), /// which is an iterator that can't be incremented, and an iterator over /// a const node (e.g., ValueOnCIter), which might be const or non-const itself /// but can't call non-const methods like setValue() on the node. mutable NodeT* mParentNode; MaskIterT mMaskIter; }; // class IteratorBase //////////////////////////////////////// /// @brief Base class for sparse iterators over internal and leaf nodes template< typename MaskIterT, // mask iterator type (OnIterator, OffIterator, etc.) typename IterT, // SparseIteratorBase subclass (the "Curiously Recurring Template Pattern") typename NodeT, // type of node over which to iterate typename ItemT> // type of value to which this iterator points struct SparseIteratorBase: public IteratorBase<MaskIterT, NodeT> { using NodeType = NodeT; using ValueType = ItemT; using NonConstNodeType = typename std::remove_const<NodeT>::type; using NonConstValueType = typename std::remove_const<ItemT>::type; static const bool IsSparseIterator = true, IsDenseIterator = false; SparseIteratorBase() {} SparseIteratorBase(const MaskIterT& iter, NodeT* parent): IteratorBase<MaskIterT, NodeT>(iter, parent) {} /// @brief Return the item at the given index in the parent node's table. /// @note All subclasses must implement this accessor. ItemT& getItem(Index) const; /// @brief Set the value of the item at the given index in the parent node's table. /// @note All non-const iterator subclasses must implement this accessor. void setItem(Index, const ItemT&) const; /// Return a reference to the item to which this iterator is pointing. ItemT& operator*() const { return this->getValue(); } /// Return a pointer to the item to which this iterator is pointing. ItemT* operator->() const { return &(this->operator*()); } /// Return the item to which this iterator is pointing. ItemT& getValue() const { return static_cast<const IterT*>(this)->getItem(this->pos()); // static polymorphism } /// @brief Set the value of the item to which this iterator is pointing. /// (Not valid for const iterators.) void setValue(const ItemT& value) const { static_assert(!std::is_const<NodeT>::value, "setValue() not allowed for const iterators"); static_cast<const IterT*>(this)->setItem(this->pos(), value); // static polymorphism } /// @brief Apply a functor to the item to which this iterator is pointing. /// (Not valid for const iterators.) /// @param op a functor of the form <tt>void op(ValueType&) const</tt> that modifies /// its argument in place /// @see Tree::modifyValue() template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { static_assert(!std::is_const<NodeT>::value, "modifyValue() not allowed for const iterators"); static_cast<const IterT*>(this)->modifyItem(this->pos(), op); // static polymorphism } }; // class SparseIteratorBase //////////////////////////////////////// /// @brief Base class for dense iterators over internal and leaf nodes /// @note Dense iterators have no @c %operator*() or @c %operator->(), /// because their return type would have to vary depending on whether /// the iterator is pointing to a value or a child node. template< typename MaskIterT, // mask iterator type (typically a DenseIterator) typename IterT, // DenseIteratorBase subclass (the "Curiously Recurring Template Pattern") typename NodeT, // type of node over which to iterate typename SetItemT, // type of set value (ChildNodeType, for non-leaf nodes) typename UnsetItemT> // type of unset value (ValueType, usually) struct DenseIteratorBase: public IteratorBase<MaskIterT, NodeT> { using NodeType = NodeT; using ValueType = UnsetItemT; using ChildNodeType = SetItemT; using NonConstNodeType = typename std::remove_const<NodeT>::type; using NonConstValueType = typename std::remove_const<UnsetItemT>::type; using NonConstChildNodeType = typename std::remove_const<SetItemT>::type; static const bool IsSparseIterator = false, IsDenseIterator = true; DenseIteratorBase() {} DenseIteratorBase(const MaskIterT& iter, NodeT* parent): IteratorBase<MaskIterT, NodeT>(iter, parent) {} /// @brief Return @c true if the item at the given index in the parent node's table /// is a set value and return either the set value in @a child or the unset value /// in @a value. /// @note All subclasses must implement this accessor. bool getItem(Index, SetItemT*& child, NonConstValueType& value) const; /// @brief Set the value of the item at the given index in the parent node's table. /// @note All non-const iterator subclasses must implement this accessor. void setItem(Index, SetItemT*) const; /// @brief "Unset" the value of the item at the given index in the parent node's table. /// @note All non-const iterator subclasses must implement this accessor. void unsetItem(Index, const UnsetItemT&) const; /// Return @c true if this iterator is pointing to a child node. bool isChildNode() const { return this->parent().isChildMaskOn(this->pos()); } /// @brief If this iterator is pointing to a child node, return a pointer to the node. /// Otherwise, return nullptr and, in @a value, the value to which this iterator is pointing. SetItemT* probeChild(NonConstValueType& value) const { SetItemT* child = nullptr; static_cast<const IterT*>(this)->getItem(this->pos(), child, value); // static polymorphism return child; } /// @brief If this iterator is pointing to a child node, return @c true and return /// a pointer to the child node in @a child. Otherwise, return @c false and return /// the value to which this iterator is pointing in @a value. bool probeChild(SetItemT*& child, NonConstValueType& value) const { child = probeChild(value); return (child != nullptr); } /// @brief Return @c true if this iterator is pointing to a value and return /// the value in @a value. Otherwise, return @c false. bool probeValue(NonConstValueType& value) const { SetItemT* child = nullptr; const bool isChild = static_cast<const IterT*>(this)-> // static polymorphism getItem(this->pos(), child, value); return !isChild; } /// @brief Replace with the given child node the item in the parent node's table /// to which this iterator is pointing. void setChild(SetItemT* child) const { static_cast<const IterT*>(this)->setItem(this->pos(), child); // static polymorphism } /// @brief Replace with the given value the item in the parent node's table /// to which this iterator is pointing. void setValue(const UnsetItemT& value) const { static_cast<const IterT*>(this)->unsetItem(this->pos(), value); // static polymorphism } }; // struct DenseIteratorBase } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_ITERATOR_HAS_BEEN_INCLUDED
11,473
C
44.173228
99
0.683692
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/NodeManager.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file tree/NodeManager.h /// /// @authors Ken Museth, Dan Bailey /// /// @brief NodeManager produces linear arrays of all tree nodes /// allowing for efficient threading and bottom-up processing. /// /// @note A NodeManager can be constructed from a Tree or LeafManager. #ifndef OPENVDB_TREE_NODEMANAGER_HAS_BEEN_INCLUDED #define OPENVDB_TREE_NODEMANAGER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <deque> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { // Produce linear arrays of all tree nodes, to facilitate efficient threading // and bottom-up processing. template<typename TreeOrLeafManagerT, Index LEVELS = TreeOrLeafManagerT::RootNodeType::LEVEL> class NodeManager; // Produce linear arrays of all tree nodes lazily, to facilitate efficient threading // of topology-changing top-down workflows. template<typename TreeOrLeafManagerT, Index _LEVELS = TreeOrLeafManagerT::RootNodeType::LEVEL> class DynamicNodeManager; //////////////////////////////////////// // This is a dummy node filtering class used by the NodeManager class to match // the internal filtering interface used by the DynamicNodeManager. struct NodeFilter { static bool valid(size_t) { return true; } }; // struct NodeFilter /// @brief This class caches tree nodes of a specific type in a linear array. /// /// @note It is for internal use and should rarely be used directly. template<typename NodeT> class NodeList { public: NodeList() = default; NodeT& operator()(size_t n) const { assert(n<mNodeCount); return *(mNodes[n]); } NodeT*& operator[](size_t n) { assert(n<mNodeCount); return mNodes[n]; } Index64 nodeCount() const { return mNodeCount; } void clear() { mNodePtrs.reset(); mNodes = nullptr; mNodeCount = 0; } // initialize this node list from the provided root node template <typename RootT> bool initRootChildren(RootT& root) { // Allocate (or deallocate) the node pointer array size_t nodeCount = root.childCount(); if (nodeCount != mNodeCount) { if (nodeCount > 0) { mNodePtrs.reset(new NodeT*[nodeCount]); mNodes = mNodePtrs.get(); } else { mNodePtrs.reset(); mNodes = nullptr; } mNodeCount = nodeCount; } if (mNodeCount == 0) return false; // Populate the node pointers NodeT** nodePtr = mNodes; for (auto iter = root.beginChildOn(); iter; ++iter) { *nodePtr++ = &iter.getValue(); } return true; } // initialize this node list from another node list containing the parent nodes template <typename ParentsT, typename NodeFilterT> bool initNodeChildren(ParentsT& parents, const NodeFilterT& nodeFilter = NodeFilterT(), bool serial = false) { // Compute the node counts for each node std::vector<Index32> nodeCounts; if (serial) { nodeCounts.reserve(parents.nodeCount()); for (size_t i = 0; i < parents.nodeCount(); i++) { if (!nodeFilter.valid(i)) nodeCounts.push_back(0); else nodeCounts.push_back(parents(i).childCount()); } } else { nodeCounts.resize(parents.nodeCount()); tbb::parallel_for( // with typical node sizes and SSE enabled, there are only a handful // of instructions executed per-operation with a default grainsize // of 1, so increase to 64 to reduce parallel scheduling overhead tbb::blocked_range<Index64>(0, parents.nodeCount(), /*grainsize=*/64), [&](tbb::blocked_range<Index64>& range) { for (Index64 i = range.begin(); i < range.end(); i++) { if (!nodeFilter.valid(i)) nodeCounts[i] = 0; else nodeCounts[i] = parents(i).childCount(); } } ); } // Turn node counts into a cumulative histogram and obtain total node count for (size_t i = 1; i < nodeCounts.size(); i++) { nodeCounts[i] += nodeCounts[i-1]; } const size_t nodeCount = nodeCounts.empty() ? 0 : nodeCounts.back(); // Allocate (or deallocate) the node pointer array if (nodeCount != mNodeCount) { if (nodeCount > 0) { mNodePtrs.reset(new NodeT*[nodeCount]); mNodes = mNodePtrs.get(); } else { mNodePtrs.reset(); mNodes = nullptr; } mNodeCount = nodeCount; } if (mNodeCount == 0) return false; // Populate the node pointers if (serial) { NodeT** nodePtr = mNodes; for (size_t i = 0; i < parents.nodeCount(); i++) { if (!nodeFilter.valid(i)) continue; for (auto iter = parents(i).beginChildOn(); iter; ++iter) { *nodePtr++ = &iter.getValue(); } } } else { tbb::parallel_for( tbb::blocked_range<Index64>(0, parents.nodeCount()), [&](tbb::blocked_range<Index64>& range) { Index64 i = range.begin(); NodeT** nodePtr = mNodes; if (i > 0) nodePtr += nodeCounts[i-1]; for ( ; i < range.end(); i++) { if (!nodeFilter.valid(i)) continue; for (auto iter = parents(i).beginChildOn(); iter; ++iter) { *nodePtr++ = &iter.getValue(); } } } ); } return true; } class NodeRange { public: NodeRange(size_t begin, size_t end, const NodeList& nodeList, size_t grainSize=1): mEnd(end), mBegin(begin), mGrainSize(grainSize), mNodeList(nodeList) {} NodeRange(NodeRange& r, tbb::split): mEnd(r.mEnd), mBegin(doSplit(r)), mGrainSize(r.mGrainSize), mNodeList(r.mNodeList) {} size_t size() const { return mEnd - mBegin; } size_t grainsize() const { return mGrainSize; } const NodeList& nodeList() const { return mNodeList; } bool empty() const {return !(mBegin < mEnd);} bool is_divisible() const {return mGrainSize < this->size();} class Iterator { public: Iterator(const NodeRange& range, size_t pos): mRange(range), mPos(pos) { assert(this->isValid()); } Iterator(const Iterator&) = default; Iterator& operator=(const Iterator&) = default; /// Advance to the next node. Iterator& operator++() { ++mPos; return *this; } /// Return a reference to the node to which this iterator is pointing. NodeT& operator*() const { return mRange.mNodeList(mPos); } /// Return a pointer to the node to which this iterator is pointing. NodeT* operator->() const { return &(this->operator*()); } /// Return the index into the list of the current node. size_t pos() const { return mPos; } bool isValid() const { return mPos>=mRange.mBegin && mPos<=mRange.mEnd; } /// Return @c true if this iterator is not yet exhausted. bool test() const { return mPos < mRange.mEnd; } /// Return @c true if this iterator is not yet exhausted. operator bool() const { return this->test(); } /// Return @c true if this iterator is exhausted. bool empty() const { return !this->test(); } bool operator!=(const Iterator& other) const { return (mPos != other.mPos) || (&mRange != &other.mRange); } bool operator==(const Iterator& other) const { return !(*this != other); } const NodeRange& nodeRange() const { return mRange; } private: const NodeRange& mRange; size_t mPos; };// NodeList::NodeRange::Iterator Iterator begin() const {return Iterator(*this, mBegin);} Iterator end() const {return Iterator(*this, mEnd);} private: size_t mEnd, mBegin, mGrainSize; const NodeList& mNodeList; static size_t doSplit(NodeRange& r) { assert(r.is_divisible()); size_t middle = r.mBegin + (r.mEnd - r.mBegin) / 2u; r.mEnd = middle; return middle; } };// NodeList::NodeRange /// Return a TBB-compatible NodeRange. NodeRange nodeRange(size_t grainsize = 1) const { return NodeRange(0, this->nodeCount(), *this, grainsize); } template<typename NodeOp> void foreach(const NodeOp& op, bool threaded = true, size_t grainSize=1) { NodeTransformer<NodeOp> transform(op); transform.run(this->nodeRange(grainSize), threaded); } template<typename NodeOp> void reduce(NodeOp& op, bool threaded = true, size_t grainSize=1) { NodeReducer<NodeOp> transform(op); transform.run(this->nodeRange(grainSize), threaded); } // identical to foreach except the operator() method has a node index template<typename NodeOp> void foreachWithIndex(const NodeOp& op, bool threaded = true, size_t grainSize=1) { NodeTransformer<NodeOp, OpWithIndex> transform(op); transform.run(this->nodeRange(grainSize), threaded); } // identical to reduce except the operator() method has a node index template<typename NodeOp> void reduceWithIndex(NodeOp& op, bool threaded = true, size_t grainSize=1) { NodeReducer<NodeOp, OpWithIndex> transform(op); transform.run(this->nodeRange(grainSize), threaded); } private: // default execution in the NodeManager ignores the node index // given by the iterator position struct OpWithoutIndex { template <typename T> static void eval(T& node, typename NodeRange::Iterator& iter) { node(*iter); } }; // execution in the DynamicNodeManager matches that of the LeafManager in // passing through the node index given by the iterator position struct OpWithIndex { template <typename T> static void eval(T& node, typename NodeRange::Iterator& iter) { node(*iter, iter.pos()); } }; // Private struct of NodeList that performs parallel_for template<typename NodeOp, typename OpT = OpWithoutIndex> struct NodeTransformer { NodeTransformer(const NodeOp& nodeOp) : mNodeOp(nodeOp) { } void run(const NodeRange& range, bool threaded = true) { threaded ? tbb::parallel_for(range, *this) : (*this)(range); } void operator()(const NodeRange& range) const { for (typename NodeRange::Iterator it = range.begin(); it; ++it) { OpT::template eval(mNodeOp, it); } } const NodeOp mNodeOp; };// NodeList::NodeTransformer // Private struct of NodeList that performs parallel_reduce template<typename NodeOp, typename OpT = OpWithoutIndex> struct NodeReducer { NodeReducer(NodeOp& nodeOp) : mNodeOp(&nodeOp) { } NodeReducer(const NodeReducer& other, tbb::split) : mNodeOpPtr(std::make_unique<NodeOp>(*(other.mNodeOp), tbb::split())) , mNodeOp(mNodeOpPtr.get()) { } void run(const NodeRange& range, bool threaded = true) { threaded ? tbb::parallel_reduce(range, *this) : (*this)(range); } void operator()(const NodeRange& range) { for (typename NodeRange::Iterator it = range.begin(); it; ++it) { OpT::template eval(*mNodeOp, it); } } void join(const NodeReducer& other) { mNodeOp->join(*(other.mNodeOp)); } std::unique_ptr<NodeOp> mNodeOpPtr; NodeOp *mNodeOp = nullptr; };// NodeList::NodeReducer protected: size_t mNodeCount = 0; std::unique_ptr<NodeT*[]> mNodePtrs; NodeT** mNodes = nullptr; };// NodeList ///////////////////////////////////////////// /// @brief This class is a link in a chain that each caches tree nodes /// of a specific type in a linear array. /// /// @note It is for internal use and should rarely be used directly. template<typename NodeT, Index LEVEL> class NodeManagerLink { public: using NonConstChildNodeType = typename NodeT::ChildNodeType; using ChildNodeType = typename CopyConstness<NodeT, NonConstChildNodeType>::Type; NodeManagerLink() = default; void clear() { mList.clear(); mNext.clear(); } template <typename RootT> void initRootChildren(RootT& root, bool serial = false) { mList.initRootChildren(root); mNext.initNodeChildren(mList, serial); } template<typename ParentsT> void initNodeChildren(ParentsT& parents, bool serial = false) { mList.initNodeChildren(parents, NodeFilter(), serial); mNext.initNodeChildren(mList, serial); } Index64 nodeCount() const { return mList.nodeCount() + mNext.nodeCount(); } Index64 nodeCount(Index i) const { return i==NodeT::LEVEL ? mList.nodeCount() : mNext.nodeCount(i); } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded, size_t grainSize) { mNext.foreachBottomUp(op, threaded, grainSize); mList.foreach(op, threaded, grainSize); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded, size_t grainSize) { mList.foreach(op, threaded, grainSize); mNext.foreachTopDown(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded, size_t grainSize) { mNext.reduceBottomUp(op, threaded, grainSize); mList.reduce(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded, size_t grainSize) { mList.reduce(op, threaded, grainSize); mNext.reduceTopDown(op, threaded, grainSize); } protected: NodeList<NodeT> mList; NodeManagerLink<ChildNodeType, LEVEL-1> mNext; };// NodeManagerLink class //////////////////////////////////////// /// @private /// @brief Specialization that terminates the chain of cached tree nodes /// @note It is for internal use and should rarely be used directly. template<typename NodeT> class NodeManagerLink<NodeT, 0> { public: NodeManagerLink() = default; /// @brief Clear all the cached tree nodes void clear() { mList.clear(); } template <typename RootT> void initRootChildren(RootT& root, bool /*serial*/ = false) { mList.initRootChildren(root); } template<typename ParentsT> void initNodeChildren(ParentsT& parents, bool serial = false) { mList.initNodeChildren(parents, NodeFilter(), serial); } Index64 nodeCount() const { return mList.nodeCount(); } Index64 nodeCount(Index) const { return mList.nodeCount(); } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded, size_t grainSize) { mList.foreach(op, threaded, grainSize); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded, size_t grainSize) { mList.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded, size_t grainSize) { mList.reduce(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded, size_t grainSize) { mList.reduce(op, threaded, grainSize); } protected: NodeList<NodeT> mList; };// NodeManagerLink class //////////////////////////////////////// /// @brief To facilitate threading over the nodes of a tree, cache /// node pointers in linear arrays, one for each level of the tree. /// /// @details This implementation works with trees of any depth, but /// optimized specializations are provided for the most typical tree depths. template<typename TreeOrLeafManagerT, Index _LEVELS> class NodeManager { public: static const Index LEVELS = _LEVELS; static_assert(LEVELS > 0, "expected instantiation of template specialization"); // see specialization below using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; using NonConstChildNodeType = typename RootNodeType::ChildNodeType; using ChildNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstChildNodeType>::Type; static_assert(RootNodeType::LEVEL >= LEVELS, "number of levels exceeds root node height"); NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() { mChain.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool serial = false) { mChain.initRootChildren(mRoot, serial); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mChain.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return mChain.nodeCount(i); } //@{ /// @brief Threaded method that applies a user-supplied functor /// to all the nodes in the tree. /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @warning The functor object is deep-copied to create TBB tasks. /// /// @par Example: /// @code /// // Functor to offset all the inactive values of a tree. Note /// // this implementation also illustrates how different /// // computation can be applied to the different node types. /// template<typename TreeType> /// struct OffsetOp /// { /// using ValueT = typename TreeT::ValueType; /// using RootT = typename TreeT::RootNodeType; /// using LeafT = typename TreeT::LeafNodeType; /// OffsetOp(const ValueT& v) : mOffset(v) {} /// /// // Processes the root node. Required by the NodeManager /// void operator()(RootT& root) const /// { /// for (typename RootT::ValueOffIter i = root.beginValueOff(); i; ++i) *i += mOffset; /// } /// // Processes the leaf nodes. Required by the NodeManager /// void operator()(LeafT& leaf) const /// { /// for (typename LeafT::ValueOffIter i = leaf.beginValueOff(); i; ++i) *i += mOffset; /// } /// // Processes the internal nodes. Required by the NodeManager /// template<typename NodeT> /// void operator()(NodeT& node) const /// { /// for (typename NodeT::ValueOffIter i = node.beginValueOff(); i; ++i) *i += mOffset; /// } /// private: /// const ValueT mOffset; /// }; /// /// // usage: /// OffsetOp<FloatTree> op(3.0f); /// tree::NodeManager<FloatTree> nodes(tree); /// nodes.foreachBottomUp(op); /// /// // or if a LeafManager already exists /// using T = tree::LeafManager<FloatTree>; /// OffsetOp<T> op(3.0f); /// tree::NodeManager<T> nodes(leafManager); /// nodes.foreachBottomUp(op); /// /// @endcode template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mChain.foreachBottomUp(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mChain.foreachTopDown(op, threaded, grainSize); } //@} //@{ /// @brief Threaded method that processes nodes with a user supplied functor /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @warning The functor object is deep-copied to create TBB tasks. /// /// @par Example: /// @code /// // Functor to count nodes in a tree /// template<typename TreeType> /// struct NodeCountOp /// { /// NodeCountOp() : nodeCount(TreeType::DEPTH, 0), totalCount(0) /// { /// } /// NodeCountOp(const NodeCountOp& other, tbb::split) : /// nodeCount(TreeType::DEPTH, 0), totalCount(0) /// { /// } /// void join(const NodeCountOp& other) /// { /// for (size_t i = 0; i < nodeCount.size(); ++i) { /// nodeCount[i] += other.nodeCount[i]; /// } /// totalCount += other.totalCount; /// } /// // do nothing for the root node /// void operator()(const typename TreeT::RootNodeType& node) /// { /// } /// // count the internal and leaf nodes /// template<typename NodeT> /// void operator()(const NodeT& node) /// { /// ++(nodeCount[NodeT::LEVEL]); /// ++totalCount; /// } /// std::vector<openvdb::Index64> nodeCount; /// openvdb::Index64 totalCount; /// }; /// /// // usage: /// NodeCountOp<FloatTree> op; /// tree::NodeManager<FloatTree> nodes(tree); /// nodes.reduceBottomUp(op); /// /// // or if a LeafManager already exists /// NodeCountOp<FloatTree> op; /// using T = tree::LeafManager<FloatTree>; /// T leafManager(tree); /// tree::NodeManager<T> nodes(leafManager); /// nodes.reduceBottomUp(op); /// /// @endcode template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mChain.reduceBottomUp(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mChain.reduceTopDown(op, threaded, grainSize); } //@} protected: RootNodeType& mRoot; NodeManagerLink<ChildNodeType, LEVELS-1> mChain; };// NodeManager class //////////////////////////////////////////// // Wraps a user-supplied DynamicNodeManager operator and stores the return // value of the operator() method to the index of the node in a bool array template <typename OpT> struct ForeachFilterOp { explicit ForeachFilterOp(const OpT& op, openvdb::Index64 size) : mOp(op) , mValidPtr(std::make_unique<bool[]>(size)) , mValid(mValidPtr.get()) { } ForeachFilterOp(const ForeachFilterOp& other) : mOp(other.mOp) , mValid(other.mValid) { } template<typename NodeT> void operator()(NodeT& node, size_t idx) const { mValid[idx] = mOp(node, idx); } bool valid(size_t idx) const { return mValid[idx]; } const OpT& op() const { return mOp; } private: const OpT& mOp; std::unique_ptr<bool[]> mValidPtr; bool* mValid = nullptr; }; // struct ForeachFilterOp // Wraps a user-supplied DynamicNodeManager operator and stores the return // value of the operator() method to the index of the node in a bool array template <typename OpT> struct ReduceFilterOp { ReduceFilterOp(OpT& op, openvdb::Index64 size) : mOp(&op) , mValidPtr(std::make_unique<bool[]>(size)) , mValid(mValidPtr.get()) { } ReduceFilterOp(const ReduceFilterOp& other) : mOp(other.mOp) , mValid(other.mValid) { } ReduceFilterOp(const ReduceFilterOp& other, tbb::split) : mOpPtr(std::make_unique<OpT>(*(other.mOp), tbb::split())) , mOp(mOpPtr.get()) , mValid(other.mValid) { } template<typename NodeT> void operator()(NodeT& node, size_t idx) const { mValid[idx] = (*mOp)(node, idx); } void join(const ReduceFilterOp& other) { mOp->join(*(other.mOp)); } bool valid(size_t idx) const { return mValid[idx]; } OpT& op() { return *mOp; } private: std::unique_ptr<OpT> mOpPtr; OpT* mOp = nullptr; std::unique_ptr<bool[]> mValidPtr; bool* mValid = nullptr; }; // struct ReduceFilterOp /// @brief This class is a link in a chain that each caches tree nodes /// of a specific type in a linear array. /// /// @note It is for internal use and should rarely be used directly. template<typename NodeT, Index LEVEL> class DynamicNodeManagerLink { public: DynamicNodeManagerLink() = default; template<typename NodeOpT, typename RootT> void foreachTopDown(const NodeOpT& op, RootT& root, bool threaded, size_t grainSize) { if (!op(root, /*index=*/0)) return; if (!mList.initRootChildren(root)) return; ForeachFilterOp<NodeOpT> filterOp(op, mList.nodeCount()); mList.foreachWithIndex(filterOp, threaded, grainSize); mNext.foreachTopDownRecurse(filterOp, mList, threaded, grainSize); } template<typename FilterOpT, typename ParentT> void foreachTopDownRecurse(const FilterOpT& filterOp, ParentT& parent, bool threaded, size_t grainSize) { if (!mList.initNodeChildren(parent, filterOp, !threaded)) return; FilterOpT childFilterOp(filterOp.op(), mList.nodeCount()); mList.foreachWithIndex(childFilterOp, threaded, grainSize); } template<typename NodeOpT, typename RootT> void reduceTopDown(NodeOpT& op, RootT& root, bool threaded, size_t grainSize) { if (!op(root, /*index=*/0)) return; if (!mList.initRootChildren(root)) return; ReduceFilterOp<NodeOpT> filterOp(op, mList.nodeCount()); mList.reduceWithIndex(filterOp, threaded, grainSize); mNext.reduceTopDownRecurse(filterOp, mList, threaded, grainSize); } template<typename FilterOpT, typename ParentT> void reduceTopDownRecurse(FilterOpT& filterOp, ParentT& parent, bool threaded, size_t grainSize) { if (!mList.initNodeChildren(parent, filterOp, !threaded)) return; FilterOpT childFilterOp(filterOp.op(), mList.nodeCount()); mList.reduceWithIndex(childFilterOp, threaded, grainSize); } protected: NodeList<NodeT> mList; DynamicNodeManagerLink<typename NodeT::ChildNodeType, LEVEL-1> mNext; };// DynamicNodeManagerLink class /// @private /// @brief Specialization that terminates the chain of cached tree nodes /// @note It is for internal use and should rarely be used directly. template<typename NodeT> class DynamicNodeManagerLink<NodeT, 0> { public: DynamicNodeManagerLink() = default; template<typename NodeFilterOp, typename ParentT> void foreachTopDownRecurse(const NodeFilterOp& nodeFilterOp, ParentT& parent, bool threaded, size_t grainSize) { if (!mList.initNodeChildren(parent, nodeFilterOp, !threaded)) return; mList.foreachWithIndex(nodeFilterOp.op(), threaded, grainSize); } template<typename NodeFilterOp, typename ParentT> void reduceTopDownRecurse(NodeFilterOp& nodeFilterOp, ParentT& parent, bool threaded, size_t grainSize) { if (!mList.initNodeChildren(parent, nodeFilterOp, !threaded)) return; mList.reduceWithIndex(nodeFilterOp.op(), threaded, grainSize); } protected: NodeList<NodeT> mList; };// DynamicNodeManagerLink class template<typename TreeOrLeafManagerT, Index _LEVELS> class DynamicNodeManager { public: static const Index LEVELS = _LEVELS; static_assert(LEVELS > 0, "expected instantiation of template specialization"); // see specialization below using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL >= LEVELS, "number of levels exceeds root node height"); explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Threaded method that applies a user-supplied functor /// to all the nodes in the tree. /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @note There are two key differences to the interface of the /// user-supplied functor to the NodeManager class - (1) the operator() /// method aligns with the LeafManager class in expecting the index of the /// node in a linear array of identical node types, (2) the operator() /// method returns a boolean termination value with true indicating that /// children of this node should be processed, false indicating the /// early-exit termination should occur. /// /// @par Example: /// @code /// // Functor to densify the first child node in a linear array. Note /// // this implementation also illustrates how different /// // computation can be applied to the different node types. /// /// template<typename TreeT> /// struct DensifyOp /// { /// using RootT = typename TreeT::RootNodeType; /// using LeafT = typename TreeT::LeafNodeType; /// /// DensifyOp() = default; /// /// // Processes the root node. Required by the DynamicNodeManager /// bool operator()(RootT&, size_t) const { return true; } /// /// // Processes the internal nodes. Required by the DynamicNodeManager /// template<typename NodeT> /// bool operator()(NodeT& node, size_t idx) const /// { /// // densify child /// for (auto iter = node.cbeginValueAll(); iter; ++iter) { /// const openvdb::Coord ijk = iter.getCoord(); /// node.addChild(new typename NodeT::ChildNodeType(iter.getCoord(), NodeT::LEVEL, true)); /// } /// // early-exit termination for all non-zero index children /// return idx == 0; /// } /// // Processes the leaf nodes. Required by the DynamicNodeManager /// bool operator()(LeafT&, size_t) const /// { /// return true; /// } /// };// DensifyOp /// /// // usage: /// DensifyOp<FloatTree> op; /// tree::DynamicNodeManager<FloatTree> nodes(tree); /// nodes.foreachTopDown(op); /// /// @endcode template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mChain.foreachTopDown(op, mRoot, threaded, grainSize); } /// @brief Threaded method that processes nodes with a user supplied functor /// /// @param op user-supplied functor, see examples for interface details. /// @param threaded optional toggle to disable threading, on by default. /// @param grainSize optional parameter to specify the grainsize /// for threading, one by default. /// /// @note There are two key differences to the interface of the /// user-supplied functor to the NodeManager class - (1) the operator() /// method aligns with the LeafManager class in expecting the index of the /// node in a linear array of identical node types, (2) the operator() /// method returns a boolean termination value with true indicating that /// children of this node should be processed, false indicating the /// early-exit termination should occur. /// /// @par Example: /// @code /// // Functor to count nodes in a tree /// template<typename TreeType> /// struct NodeCountOp /// { /// NodeCountOp() : nodeCount(TreeType::DEPTH, 0), totalCount(0) /// { /// } /// NodeCountOp(const NodeCountOp& other, tbb::split) : /// nodeCount(TreeType::DEPTH, 0), totalCount(0) /// { /// } /// void join(const NodeCountOp& other) /// { /// for (size_t i = 0; i < nodeCount.size(); ++i) { /// nodeCount[i] += other.nodeCount[i]; /// } /// totalCount += other.totalCount; /// } /// // do nothing for the root node /// bool operator()(const typename TreeT::RootNodeType& node, size_t) /// { /// return true; /// } /// // count the internal and leaf nodes /// template<typename NodeT> /// bool operator()(const NodeT& node, size_t) /// { /// ++(nodeCount[NodeT::LEVEL]); /// ++totalCount; /// return true; /// } /// std::vector<openvdb::Index64> nodeCount; /// openvdb::Index64 totalCount; /// }; /// /// // usage: /// NodeCountOp<FloatTree> op; /// tree::DynamicNodeManager<FloatTree> nodes(tree); /// nodes.reduceTopDown(op); /// /// @endcode template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { mChain.reduceTopDown(op, mRoot, threaded, grainSize); } protected: RootNodeType& mRoot; DynamicNodeManagerLink<typename RootNodeType::ChildNodeType, LEVELS-1> mChain; };// DynamicNodeManager class //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with no caching of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 0> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static const Index LEVELS = 0; NodeManager(TreeOrLeafManagerT& tree, bool /*serial*/ = false) : mRoot(tree.root()) { } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() {} /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool /*serial*/ = false) { } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return 0; } Index64 nodeCount(Index) const { return 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool, size_t) { op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool, size_t) { op(mRoot); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool, size_t) { op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool, size_t) { op(mRoot); } protected: RootNodeType& mRoot; }; // NodeManager<0> //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with one level of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 1> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static_assert(RootNodeType::LEVEL > 0, "expected instantiation of template specialization"); static const Index LEVELS = 1; NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() { mList0.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool /*serial*/ = false) { mList0.initRootChildren(mRoot); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mList0.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return i==0 ? mList0.nodeCount() : 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.foreach(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList0.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.reduce(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList0.reduce(op, threaded, grainSize); } protected: using NodeT1 = RootNodeType; using NonConstNodeT0 = typename NodeT1::ChildNodeType; using NodeT0 = typename CopyConstness<RootNodeType, NonConstNodeT0>::Type; using ListT0 = NodeList<NodeT0>; NodeT1& mRoot; ListT0 mList0; }; // NodeManager<1> //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with two levels of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 2> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static_assert(RootNodeType::LEVEL > 1, "expected instantiation of template specialization"); static const Index LEVELS = 2; NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() { mList0.clear(); mList1.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool serial = false) { mList1.initRootChildren(mRoot); mList0.initNodeChildren(mList1, NodeFilter(), serial); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mList0.nodeCount() + mList1.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return i==0 ? mList0.nodeCount() : i==1 ? mList1.nodeCount() : 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList1.foreach(op, threaded, grainSize); mList0.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList1.reduce(op, threaded, grainSize); mList0.reduce(op, threaded, grainSize); } protected: using NodeT2 = RootNodeType; using NonConstNodeT1 = typename NodeT2::ChildNodeType; using NodeT1 = typename CopyConstness<RootNodeType, NonConstNodeT1>::Type; // upper level using NonConstNodeT0 = typename NodeT1::ChildNodeType; using NodeT0 = typename CopyConstness<RootNodeType, NonConstNodeT0>::Type; // lower level using ListT1 = NodeList<NodeT1>; // upper level using ListT0 = NodeList<NodeT0>; // lower level NodeT2& mRoot; ListT1 mList1; ListT0 mList0; }; // NodeManager<2> //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with three levels of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 3> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static_assert(RootNodeType::LEVEL > 2, "expected instantiation of template specialization"); static const Index LEVELS = 3; NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; /// @brief Clear all the cached tree nodes void clear() { mList0.clear(); mList1.clear(); mList2.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool serial = false) { mList2.initRootChildren(mRoot); mList1.initNodeChildren(mList2, NodeFilter(), serial); mList0.initNodeChildren(mList1, NodeFilter(), serial); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mList0.nodeCount()+mList1.nodeCount()+mList2.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return i==0 ? mList0.nodeCount() : i==1 ? mList1.nodeCount() : i==2 ? mList2.nodeCount() : 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); mList2.foreach(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList2.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); mList0.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); mList2.reduce(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList2.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); mList0.reduce(op, threaded, grainSize); } protected: using NodeT3 = RootNodeType; using NonConstNodeT2 = typename NodeT3::ChildNodeType; using NodeT2 = typename CopyConstness<RootNodeType, NonConstNodeT2>::Type; // upper level using NonConstNodeT1 = typename NodeT2::ChildNodeType; using NodeT1 = typename CopyConstness<RootNodeType, NonConstNodeT1>::Type; // mid level using NonConstNodeT0 = typename NodeT1::ChildNodeType; using NodeT0 = typename CopyConstness<RootNodeType, NonConstNodeT0>::Type; // lower level using ListT2 = NodeList<NodeT2>; // upper level of internal nodes using ListT1 = NodeList<NodeT1>; // lower level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT3& mRoot; ListT2 mList2; ListT1 mList1; ListT0 mList0; }; // NodeManager<3> //////////////////////////////////////////// /// @private /// Template specialization of the NodeManager with four levels of nodes template<typename TreeOrLeafManagerT> class NodeManager<TreeOrLeafManagerT, 4> { public: using NonConstRootNodeType = typename TreeOrLeafManagerT::RootNodeType; using RootNodeType = typename CopyConstness<TreeOrLeafManagerT, NonConstRootNodeType>::Type; static_assert(RootNodeType::LEVEL > 3, "expected instantiation of template specialization"); static const Index LEVELS = 4; NodeManager(TreeOrLeafManagerT& tree, bool serial = false) : mRoot(tree.root()) { this->rebuild(serial); } NodeManager(const NodeManager&) = delete; // disallow copy-construction /// @brief Clear all the cached tree nodes void clear() { mList0.clear(); mList1.clear(); mList2.clear(); mList3.clear(); } /// @brief Clear and recache all the tree nodes from the /// tree. This is required if tree nodes have been added or removed. void rebuild(bool serial = false) { mList3.initRootChildren(mRoot); mList2.initNodeChildren(mList3, NodeFilter(), serial); mList1.initNodeChildren(mList2, NodeFilter(), serial); mList0.initNodeChildren(mList1, NodeFilter(), serial); } /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } /// @brief Return the total number of cached nodes (excluding the root node) Index64 nodeCount() const { return mList0.nodeCount() + mList1.nodeCount() + mList2.nodeCount() + mList3.nodeCount(); } /// @brief Return the number of cached nodes at level @a i, where /// 0 corresponds to the lowest level. Index64 nodeCount(Index i) const { return i==0 ? mList0.nodeCount() : i==1 ? mList1.nodeCount() : i==2 ? mList2.nodeCount() : i==3 ? mList3.nodeCount() : 0; } template<typename NodeOp> void foreachBottomUp(const NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); mList2.foreach(op, threaded, grainSize); mList3.foreach(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList3.foreach(op, threaded, grainSize); mList2.foreach(op, threaded, grainSize); mList1.foreach(op, threaded, grainSize); mList0.foreach(op, threaded, grainSize); } template<typename NodeOp> void reduceBottomUp(NodeOp& op, bool threaded = true, size_t grainSize=1) { mList0.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); mList2.reduce(op, threaded, grainSize); mList3.reduce(op, threaded, grainSize); op(mRoot); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { op(mRoot); mList3.reduce(op, threaded, grainSize); mList2.reduce(op, threaded, grainSize); mList1.reduce(op, threaded, grainSize); mList0.reduce(op, threaded, grainSize); } protected: using NodeT4 = RootNodeType; using NonConstNodeT3 = typename NodeT4::ChildNodeType; using NodeT3 = typename CopyConstness<RootNodeType, NonConstNodeT3>::Type; // upper level using NonConstNodeT2 = typename NodeT3::ChildNodeType; using NodeT2 = typename CopyConstness<RootNodeType, NonConstNodeT2>::Type; // upper mid level using NonConstNodeT1 = typename NodeT2::ChildNodeType; using NodeT1 = typename CopyConstness<RootNodeType, NonConstNodeT1>::Type; // lower mid level using NonConstNodeT0 = typename NodeT1::ChildNodeType; using NodeT0 = typename CopyConstness<RootNodeType, NonConstNodeT0>::Type; // lower level using ListT3 = NodeList<NodeT3>; // upper level of internal nodes using ListT2 = NodeList<NodeT2>; // upper mid level of internal nodes using ListT1 = NodeList<NodeT1>; // lower mid level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT4& mRoot; ListT3 mList3; ListT2 mList2; ListT1 mList1; ListT0 mList0; }; // NodeManager<4> //////////////////////////////////////////// /// @private /// Template specialization of the DynamicNodeManager with one level of nodes template<typename TreeOrLeafManagerT> class DynamicNodeManager<TreeOrLeafManagerT, 1> { public: using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL > 0, "expected instantiation of template specialization"); static const Index LEVELS = 1; explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list0 if (!mList0.initRootChildren(mRoot)) return; ForeachFilterOp<NodeOp> nodeOp(op, mList0.nodeCount()); mList0.foreachWithIndex(nodeOp, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list0 if (!mList0.initRootChildren(mRoot)) return; ReduceFilterOp<NodeOp> nodeOp(op, mList0.nodeCount()); mList0.reduceWithIndex(nodeOp, threaded, grainSize); } protected: using NodeT1 = RootNodeType; using NodeT0 = typename NodeT1::ChildNodeType; using ListT0 = NodeList<NodeT0>; NodeT1& mRoot; ListT0 mList0; };// DynamicNodeManager<1> class //////////////////////////////////////////// /// @private /// Template specialization of the DynamicNodeManager with two levels of nodes template<typename TreeOrLeafManagerT> class DynamicNodeManager<TreeOrLeafManagerT, 2> { public: using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL > 1, "expected instantiation of template specialization"); static const Index LEVELS = 2; explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list1 if (!mList1.initRootChildren(mRoot)) return; ForeachFilterOp<NodeOp> nodeOp(op, mList1.nodeCount()); mList1.foreachWithIndex(nodeOp, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp, !threaded)) return; mList0.foreachWithIndex(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list1 if (!mList1.initRootChildren(mRoot)) return; ReduceFilterOp<NodeOp> nodeOp(op, mList1.nodeCount()); mList1.reduceWithIndex(nodeOp, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp, !threaded)) return; mList0.reduceWithIndex(op, threaded, grainSize); } protected: using NodeT2 = RootNodeType; using NodeT1 = typename NodeT2::ChildNodeType; // upper level using NodeT0 = typename NodeT1::ChildNodeType; // lower level using ListT1 = NodeList<NodeT1>; // upper level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT2& mRoot; ListT1 mList1; ListT0 mList0; };// DynamicNodeManager<2> class //////////////////////////////////////////// /// @private /// Template specialization of the DynamicNodeManager with three levels of nodes template<typename TreeOrLeafManagerT> class DynamicNodeManager<TreeOrLeafManagerT, 3> { public: using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL > 2, "expected instantiation of template specialization"); static const Index LEVELS = 3; explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list2 if (!mList2.initRootChildren(mRoot)) return; ForeachFilterOp<NodeOp> nodeOp2(op, mList2.nodeCount()); mList2.foreachWithIndex(nodeOp2, threaded, grainSize); // list1 if (!mList1.initNodeChildren(mList2, nodeOp2, !threaded)) return; ForeachFilterOp<NodeOp> nodeOp1(op, mList1.nodeCount()); mList1.foreachWithIndex(nodeOp1, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp1, !threaded)) return; mList0.foreachWithIndex(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list2 if (!mList2.initRootChildren(mRoot)) return; ReduceFilterOp<NodeOp> nodeOp2(op, mList2.nodeCount()); mList2.reduceWithIndex(nodeOp2, threaded, grainSize); // list1 if (!mList1.initNodeChildren(mList2, nodeOp2, !threaded)) return; ReduceFilterOp<NodeOp> nodeOp1(op, mList1.nodeCount()); mList1.reduceWithIndex(nodeOp1, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp1, !threaded)) return; mList0.reduceWithIndex(op, threaded, grainSize); } protected: using NodeT3 = RootNodeType; using NodeT2 = typename NodeT3::ChildNodeType; // upper level using NodeT1 = typename NodeT2::ChildNodeType; // mid level using NodeT0 = typename NodeT1::ChildNodeType; // lower level using ListT2 = NodeList<NodeT2>; // upper level of internal nodes using ListT1 = NodeList<NodeT1>; // lower level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT3& mRoot; ListT2 mList2; ListT1 mList1; ListT0 mList0; };// DynamicNodeManager<3> class //////////////////////////////////////////// /// @private /// Template specialization of the DynamicNodeManager with four levels of nodes template<typename TreeOrLeafManagerT> class DynamicNodeManager<TreeOrLeafManagerT, 4> { public: using RootNodeType = typename TreeOrLeafManagerT::RootNodeType; static_assert(RootNodeType::LEVEL > 3, "expected instantiation of template specialization"); static const Index LEVELS = 4; explicit DynamicNodeManager(TreeOrLeafManagerT& tree) : mRoot(tree.root()) { } DynamicNodeManager(const DynamicNodeManager&) = delete; /// @brief Return a reference to the root node. const RootNodeType& root() const { return mRoot; } template<typename NodeOp> void foreachTopDown(const NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list3 if (!mList3.initRootChildren(mRoot)) return; ForeachFilterOp<NodeOp> nodeOp3(op, mList3.nodeCount()); mList3.foreachWithIndex(nodeOp3, threaded, grainSize); // list2 if (!mList2.initNodeChildren(mList3, nodeOp3, !threaded)) return; ForeachFilterOp<NodeOp> nodeOp2(op, mList2.nodeCount()); mList2.foreachWithIndex(nodeOp2, threaded, grainSize); // list1 if (!mList1.initNodeChildren(mList2, nodeOp2, !threaded)) return; ForeachFilterOp<NodeOp> nodeOp1(op, mList1.nodeCount()); mList1.foreachWithIndex(nodeOp1, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp1, !threaded)) return; mList0.foreachWithIndex(op, threaded, grainSize); } template<typename NodeOp> void reduceTopDown(NodeOp& op, bool threaded = true, size_t grainSize=1) { // root if (!op(mRoot, /*index=*/0)) return; // list3 if (!mList3.initRootChildren(mRoot)) return; ReduceFilterOp<NodeOp> nodeOp3(op, mList3.nodeCount()); mList3.reduceWithIndex(nodeOp3, threaded, grainSize); // list2 if (!mList2.initNodeChildren(mList3, nodeOp3, !threaded)) return; ReduceFilterOp<NodeOp> nodeOp2(op, mList2.nodeCount()); mList2.reduceWithIndex(nodeOp2, threaded, grainSize); // list1 if (!mList1.initNodeChildren(mList2, nodeOp2, !threaded)) return; ReduceFilterOp<NodeOp> nodeOp1(op, mList1.nodeCount()); mList1.reduceWithIndex(nodeOp1, threaded, grainSize); // list0 if (!mList0.initNodeChildren(mList1, nodeOp1, !threaded)) return; mList0.reduceWithIndex(op, threaded, grainSize); } protected: using NodeT4 = RootNodeType; using NodeT3 = typename NodeT4::ChildNodeType; // upper level using NodeT2 = typename NodeT3::ChildNodeType; // upper mid level using NodeT1 = typename NodeT2::ChildNodeType; // lower mid level using NodeT0 = typename NodeT1::ChildNodeType; // lower level using ListT3 = NodeList<NodeT3>; // upper level of internal nodes using ListT2 = NodeList<NodeT2>; // upper mid level of internal nodes using ListT1 = NodeList<NodeT1>; // lower mid level of internal nodes using ListT0 = NodeList<NodeT0>; // lower level of internal nodes or leafs NodeT4& mRoot; ListT3 mList3; ListT2 mList2; ListT1 mList1; ListT0 mList0; };// DynamicNodeManager<4> class } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_NODEMANAGER_HAS_BEEN_INCLUDED
60,466
C
34.073666
124
0.628155
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/RootNode.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// /// @file RootNode.h /// /// @brief The root node of an OpenVDB tree #ifndef OPENVDB_TREE_ROOTNODE_HAS_BEEN_INCLUDED #define OPENVDB_TREE_ROOTNODE_HAS_BEEN_INCLUDED #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/io/Compression.h> // for truncateRealToHalf() #include <openvdb/math/Math.h> // for isZero(), isExactlyEqual(), etc. #include <openvdb/math/BBox.h> #include <openvdb/util/NodeMasks.h> // for backward compatibility only (see readTopology()) #include <openvdb/version.h> #include <tbb/parallel_for.h> #include <map> #include <set> #include <sstream> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { // Forward declarations template<typename HeadType, int HeadLevel> struct NodeChain; template<typename, typename> struct SameRootConfig; template<typename, typename, bool> struct RootNodeCopyHelper; template<typename, typename, typename, bool> struct RootNodeCombineHelper; template<typename ChildType> class RootNode { public: using ChildNodeType = ChildType; using LeafNodeType = typename ChildType::LeafNodeType; using ValueType = typename ChildType::ValueType; using BuildType = typename ChildType::BuildType; static const Index LEVEL = 1 + ChildType::LEVEL; // level 0 = leaf /// NodeChainType is a list of this tree's node types, from LeafNodeType to RootNode. using NodeChainType = typename NodeChain<RootNode, LEVEL>::Type; static_assert(NodeChainType::Size == LEVEL + 1, "wrong number of entries in RootNode node chain"); /// @brief ValueConverter<T>::Type is the type of a RootNode having the same /// child hierarchy as this node but a different value type, T. template<typename OtherValueType> struct ValueConverter { using Type = RootNode<typename ChildType::template ValueConverter<OtherValueType>::Type>; }; /// @brief SameConfiguration<OtherNodeType>::value is @c true if and only if /// OtherNodeType is the type of a RootNode whose ChildNodeType has the same /// configuration as this node's ChildNodeType. template<typename OtherNodeType> struct SameConfiguration { static const bool value = SameRootConfig<ChildNodeType, OtherNodeType>::value; }; /// Construct a new tree with a background value of 0. RootNode(); /// Construct a new tree with the given background value. explicit RootNode(const ValueType& background); RootNode(const RootNode& other) { *this = other; } /// @brief Construct a new tree that reproduces the topology and active states /// of a tree of a different ValueType but the same configuration (levels, /// node dimensions and branching factors). Cast the other tree's values to /// this tree's ValueType. /// @throw TypeError if the other tree's configuration doesn't match this tree's /// or if this tree's ValueType is not constructible from the other tree's ValueType. template<typename OtherChildType> explicit RootNode(const RootNode<OtherChildType>& other) { *this = other; } /// @brief Construct a new tree that reproduces the topology and active states of /// another tree (which may have a different ValueType), but not the other tree's values. /// @details All tiles and voxels that are active in the other tree are set to /// @a foreground in the new tree, and all inactive tiles and voxels are set to @a background. /// @param other the root node of a tree having (possibly) a different ValueType /// @param background the value to which inactive tiles and voxels are initialized /// @param foreground the value to which active tiles and voxels are initialized /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherChildType> RootNode(const RootNode<OtherChildType>& other, const ValueType& background, const ValueType& foreground, TopologyCopy); /// @brief Construct a new tree that reproduces the topology and active states of /// another tree (which may have a different ValueType), but not the other tree's values. /// All tiles and voxels in the new tree are set to @a background regardless of /// their active states in the other tree. /// @param other the root node of a tree having (possibly) a different ValueType /// @param background the value to which inactive tiles and voxels are initialized /// @note This copy constructor is generally faster than the one that takes both /// a foreground and a background value. Its main application is in multithreaded /// operations where the topology of the output tree exactly matches the input tree. /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherChildType> RootNode(const RootNode<OtherChildType>& other, const ValueType& background, TopologyCopy); /// @brief Copy a root node of the same type as this node. RootNode& operator=(const RootNode& other); /// @brief Copy a root node of the same tree configuration as this node /// but a different ValueType. /// @throw TypeError if the other tree's configuration doesn't match this tree's. /// @note This node's ValueType must be constructible from the other node's ValueType. /// For example, a root node with values of type float can be assigned to a root node /// with values of type Vec3s, because a Vec3s can be constructed from a float. /// But a Vec3s root node cannot be assigned to a float root node. template<typename OtherChildType> RootNode& operator=(const RootNode<OtherChildType>& other); ~RootNode() { this->clear(); } private: struct Tile { Tile(): value(zeroVal<ValueType>()), active(false) {} Tile(const ValueType& v, bool b): value(v), active(b) {} ValueType value; bool active; }; // This lightweight struct pairs child pointers and tiles. struct NodeStruct { ChildType* child; Tile tile; NodeStruct(): child(nullptr) {} NodeStruct(ChildType& c): child(&c) {} NodeStruct(const Tile& t): child(nullptr), tile(t) {} NodeStruct(const NodeStruct&) = default; NodeStruct& operator=(const NodeStruct&) = default; ~NodeStruct() {} ///< @note doesn't delete child bool isChild() const { return child != nullptr; } bool isTile() const { return child == nullptr; } bool isTileOff() const { return isTile() && !tile.active; } bool isTileOn() const { return isTile() && tile.active; } void set(ChildType& c) { delete child; child = &c; } void set(const Tile& t) { delete child; child = nullptr; tile = t; } ChildType& steal(const Tile& t) { ChildType* c=child; child=nullptr; tile=t; return *c; } }; using MapType = std::map<Coord, NodeStruct>; using MapIter = typename MapType::iterator; using MapCIter = typename MapType::const_iterator; using CoordSet = std::set<Coord>; using CoordSetIter = typename CoordSet::iterator; using CoordSetCIter = typename CoordSet::const_iterator; static void setTile(const MapIter& i, const Tile& t) { i->second.set(t); } static void setChild(const MapIter& i, ChildType& c) { i->second.set(c); } static Tile& getTile(const MapIter& i) { return i->second.tile; } static const Tile& getTile(const MapCIter& i) { return i->second.tile; } static ChildType& getChild(const MapIter& i) { return *(i->second.child); } static const ChildType& getChild(const MapCIter& i) { return *(i->second.child); } static ChildType& stealChild(const MapIter& i, const Tile& t) {return i->second.steal(t);} static const ChildType& stealChild(const MapCIter& i,const Tile& t) {return i->second.steal(t);} static bool isChild(const MapCIter& i) { return i->second.isChild(); } static bool isChild(const MapIter& i) { return i->second.isChild(); } static bool isTile(const MapCIter& i) { return i->second.isTile(); } static bool isTile(const MapIter& i) { return i->second.isTile(); } static bool isTileOff(const MapCIter& i) { return i->second.isTileOff(); } static bool isTileOff(const MapIter& i) { return i->second.isTileOff(); } static bool isTileOn(const MapCIter& i) { return i->second.isTileOn(); } static bool isTileOn(const MapIter& i) { return i->second.isTileOn(); } struct NullPred { static inline bool test(const MapIter&) { return true; } static inline bool test(const MapCIter&) { return true; } }; struct ValueOnPred { static inline bool test(const MapIter& i) { return isTileOn(i); } static inline bool test(const MapCIter& i) { return isTileOn(i); } }; struct ValueOffPred { static inline bool test(const MapIter& i) { return isTileOff(i); } static inline bool test(const MapCIter& i) { return isTileOff(i); } }; struct ValueAllPred { static inline bool test(const MapIter& i) { return isTile(i); } static inline bool test(const MapCIter& i) { return isTile(i); } }; struct ChildOnPred { static inline bool test(const MapIter& i) { return isChild(i); } static inline bool test(const MapCIter& i) { return isChild(i); } }; struct ChildOffPred { static inline bool test(const MapIter& i) { return isTile(i); } static inline bool test(const MapCIter& i) { return isTile(i); } }; template<typename _RootNodeT, typename _MapIterT, typename FilterPredT> class BaseIter { public: using RootNodeT = _RootNodeT; using MapIterT = _MapIterT; // either MapIter or MapCIter bool operator==(const BaseIter& other) const { return (mParentNode == other.mParentNode) && (mIter == other.mIter); } bool operator!=(const BaseIter& other) const { return !(*this == other); } RootNodeT* getParentNode() const { return mParentNode; } /// Return a reference to the node over which this iterator iterates. RootNodeT& parent() const { if (!mParentNode) OPENVDB_THROW(ValueError, "iterator references a null parent node"); return *mParentNode; } bool test() const { assert(mParentNode); return mIter != mParentNode->mTable.end(); } operator bool() const { return this->test(); } void increment() { if (this->test()) { ++mIter; } this->skip(); } bool next() { this->increment(); return this->test(); } void increment(Index n) { for (Index i = 0; i < n && this->next(); ++i) {} } /// @brief Return this iterator's position as an offset from /// the beginning of the parent node's map. Index pos() const { return !mParentNode ? 0U : Index(std::distance(mParentNode->mTable.begin(), mIter)); } bool isValueOn() const { return RootNodeT::isTileOn(mIter); } bool isValueOff() const { return RootNodeT::isTileOff(mIter); } void setValueOn(bool on = true) const { mIter->second.tile.active = on; } void setValueOff() const { mIter->second.tile.active = false; } /// Return the coordinates of the item to which this iterator is pointing. Coord getCoord() const { return mIter->first; } /// Return in @a xyz the coordinates of the item to which this iterator is pointing. void getCoord(Coord& xyz) const { xyz = this->getCoord(); } protected: BaseIter(): mParentNode(nullptr) {} BaseIter(RootNodeT& parent, const MapIterT& iter): mParentNode(&parent), mIter(iter) {} void skip() { while (this->test() && !FilterPredT::test(mIter)) ++mIter; } RootNodeT* mParentNode; MapIterT mIter; }; // BaseIter template<typename RootNodeT, typename MapIterT, typename FilterPredT, typename ChildNodeT> class ChildIter: public BaseIter<RootNodeT, MapIterT, FilterPredT> { public: using BaseT = BaseIter<RootNodeT, MapIterT, FilterPredT>; using NodeType = RootNodeT; using ValueType = NodeType; using ChildNodeType = ChildNodeT; using NonConstNodeType = typename std::remove_const<NodeType>::type; using NonConstValueType = typename std::remove_const<ValueType>::type; using NonConstChildNodeType = typename std::remove_const<ChildNodeType>::type; using BaseT::mIter; ChildIter() {} ChildIter(RootNodeT& parent, const MapIterT& iter): BaseT(parent, iter) { BaseT::skip(); } ChildIter& operator++() { BaseT::increment(); return *this; } ChildNodeT& getValue() const { return getChild(mIter); } ChildNodeT& operator*() const { return this->getValue(); } ChildNodeT* operator->() const { return &this->getValue(); } }; // ChildIter template<typename RootNodeT, typename MapIterT, typename FilterPredT, typename ValueT> class ValueIter: public BaseIter<RootNodeT, MapIterT, FilterPredT> { public: using BaseT = BaseIter<RootNodeT, MapIterT, FilterPredT>; using NodeType = RootNodeT; using ValueType = ValueT; using NonConstNodeType = typename std::remove_const<NodeType>::type; using NonConstValueType = typename std::remove_const<ValueT>::type; using BaseT::mIter; ValueIter() {} ValueIter(RootNodeT& parent, const MapIterT& iter): BaseT(parent, iter) { BaseT::skip(); } ValueIter& operator++() { BaseT::increment(); return *this; } ValueT& getValue() const { return getTile(mIter).value; } ValueT& operator*() const { return this->getValue(); } ValueT* operator->() const { return &(this->getValue()); } void setValue(const ValueT& v) const { assert(isTile(mIter)); getTile(mIter).value = v; } template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { assert(isTile(mIter)); op(getTile(mIter).value); } }; // ValueIter template<typename RootNodeT, typename MapIterT, typename ChildNodeT, typename ValueT> class DenseIter: public BaseIter<RootNodeT, MapIterT, NullPred> { public: using BaseT = BaseIter<RootNodeT, MapIterT, NullPred>; using NodeType = RootNodeT; using ValueType = ValueT; using ChildNodeType = ChildNodeT; using NonConstNodeType = typename std::remove_const<NodeType>::type; using NonConstValueType = typename std::remove_const<ValueT>::type; using NonConstChildNodeType = typename std::remove_const<ChildNodeT>::type; using BaseT::mIter; DenseIter() {} DenseIter(RootNodeT& parent, const MapIterT& iter): BaseT(parent, iter) {} DenseIter& operator++() { BaseT::increment(); return *this; } bool isChildNode() const { return isChild(mIter); } ChildNodeT* probeChild(NonConstValueType& value) const { if (isChild(mIter)) return &getChild(mIter); value = getTile(mIter).value; return nullptr; } bool probeChild(ChildNodeT*& child, NonConstValueType& value) const { child = this->probeChild(value); return child != nullptr; } bool probeValue(NonConstValueType& value) const { return !this->probeChild(value); } void setChild(ChildNodeT& c) const { RootNodeT::setChild(mIter, c); } void setChild(ChildNodeT* c) const { assert(c != nullptr); RootNodeT::setChild(mIter, *c); } void setValue(const ValueT& v) const { if (isTile(mIter)) getTile(mIter).value = v; /// @internal For consistency with iterators for other node types /// (see, e.g., InternalNode::DenseIter::unsetItem()), we don't call /// setTile() here, because that would also delete the child. else stealChild(mIter, Tile(v, /*active=*/true)); } }; // DenseIter public: using ChildOnIter = ChildIter<RootNode, MapIter, ChildOnPred, ChildType>; using ChildOnCIter = ChildIter<const RootNode, MapCIter, ChildOnPred, const ChildType>; using ChildOffIter = ValueIter<RootNode, MapIter, ChildOffPred, const ValueType>; using ChildOffCIter = ValueIter<const RootNode, MapCIter, ChildOffPred, ValueType>; using ChildAllIter = DenseIter<RootNode, MapIter, ChildType, ValueType>; using ChildAllCIter = DenseIter<const RootNode, MapCIter, const ChildType, const ValueType>; using ValueOnIter = ValueIter<RootNode, MapIter, ValueOnPred, ValueType>; using ValueOnCIter = ValueIter<const RootNode, MapCIter, ValueOnPred, const ValueType>; using ValueOffIter = ValueIter<RootNode, MapIter, ValueOffPred, ValueType>; using ValueOffCIter = ValueIter<const RootNode, MapCIter, ValueOffPred, const ValueType>; using ValueAllIter = ValueIter<RootNode, MapIter, ValueAllPred, ValueType>; using ValueAllCIter = ValueIter<const RootNode, MapCIter, ValueAllPred, const ValueType>; ChildOnCIter cbeginChildOn() const { return ChildOnCIter(*this, mTable.begin()); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(*this, mTable.begin()); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(*this, mTable.begin()); } ChildOnCIter beginChildOn() const { return cbeginChildOn(); } ChildOffCIter beginChildOff() const { return cbeginChildOff(); } ChildAllCIter beginChildAll() const { return cbeginChildAll(); } ChildOnIter beginChildOn() { return ChildOnIter(*this, mTable.begin()); } ChildOffIter beginChildOff() { return ChildOffIter(*this, mTable.begin()); } ChildAllIter beginChildAll() { return ChildAllIter(*this, mTable.begin()); } ValueOnCIter cbeginValueOn() const { return ValueOnCIter(*this, mTable.begin()); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(*this, mTable.begin()); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(*this, mTable.begin()); } ValueOnCIter beginValueOn() const { return cbeginValueOn(); } ValueOffCIter beginValueOff() const { return cbeginValueOff(); } ValueAllCIter beginValueAll() const { return cbeginValueAll(); } ValueOnIter beginValueOn() { return ValueOnIter(*this, mTable.begin()); } ValueOffIter beginValueOff() { return ValueOffIter(*this, mTable.begin()); } ValueAllIter beginValueAll() { return ValueAllIter(*this, mTable.begin()); } /// Return the total amount of memory in bytes occupied by this node and its children. Index64 memUsage() const; /// @brief Expand the specified bbox so it includes the active tiles of /// this root node as well as all the active values in its child /// nodes. If visitVoxels is false LeafNodes will be approximated /// as dense, i.e. with all voxels active. Else the individual /// active voxels are visited to produce a tight bbox. void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// Return the bounding box of this RootNode, i.e., an infinite bounding box. static CoordBBox getNodeBoundingBox() { return CoordBBox::inf(); } /// @brief Change inactive tiles or voxels with a value equal to +/- the /// old background to the specified value (with the same sign). Active values /// are unchanged. /// /// @param value The new background value /// @param updateChildNodes If true the background values of the /// child nodes is also updated. Else only the background value /// stored in the RootNode itself is changed. /// /// @note Instead of setting @a updateChildNodes to true, consider /// using tools::changeBackground or /// tools::changeLevelSetBackground which are multi-threaded! void setBackground(const ValueType& value, bool updateChildNodes); /// Return this node's background value. const ValueType& background() const { return mBackground; } /// Return @c true if the given tile is inactive and has the background value. bool isBackgroundTile(const Tile&) const; //@{ /// Return @c true if the given iterator points to an inactive tile with the background value. bool isBackgroundTile(const MapIter&) const; bool isBackgroundTile(const MapCIter&) const; //@} /// Return the number of background tiles. size_t numBackgroundTiles() const; /// @brief Remove all background tiles. /// @return the number of tiles removed. size_t eraseBackgroundTiles(); inline void clear(); /// Return @c true if this node's table is either empty or contains only background tiles. bool empty() const { return mTable.size() == numBackgroundTiles(); } /// @brief Expand this node's table so that (x, y, z) is included in the index range. /// @return @c true if an expansion was performed (i.e., if (x, y, z) was not already /// included in the index range). bool expand(const Coord& xyz); static Index getLevel() { return LEVEL; } static void getNodeLog2Dims(std::vector<Index>& dims); static Index getChildDim() { return ChildType::DIM; } /// Return the number of entries in this node's table. Index getTableSize() const { return static_cast<Index>(mTable.size()); } Index getWidth() const { return this->getMaxIndex()[0] - this->getMinIndex()[0]; } Index getHeight() const { return this->getMaxIndex()[1] - this->getMinIndex()[1]; } Index getDepth() const { return this->getMaxIndex()[2] - this->getMinIndex()[2]; } /// Return the smallest index of the current tree. Coord getMinIndex() const; /// Return the largest index of the current tree. Coord getMaxIndex() const; /// Return the current index range. Both min and max are inclusive. void getIndexRange(CoordBBox& bbox) const; /// @brief Return @c true if the given tree has the same node and active value /// topology as this tree (but possibly a different @c ValueType). template<typename OtherChildType> bool hasSameTopology(const RootNode<OtherChildType>& other) const; /// Return @c false if the other node's dimensions don't match this node's. template<typename OtherChildType> static bool hasSameConfiguration(const RootNode<OtherChildType>& other); /// Return @c true if values of the other node's ValueType can be converted /// to values of this node's ValueType. template<typename OtherChildType> static bool hasCompatibleValueType(const RootNode<OtherChildType>& other); Index32 leafCount() const; Index32 nonLeafCount() const; Index32 childCount() const; Index64 onVoxelCount() const; Index64 offVoxelCount() const; Index64 onLeafVoxelCount() const; Index64 offLeafVoxelCount() const; Index64 onTileCount() const; void nodeCount(std::vector<Index32> &vec) const; bool isValueOn(const Coord& xyz) const; /// Return @c true if this root node, or any of its child nodes, have active tiles. bool hasActiveTiles() const; const ValueType& getValue(const Coord& xyz) const; bool probeValue(const Coord& xyz, ValueType& value) const; /// @brief Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides. /// @details If (x, y, z) isn't explicitly represented in the tree (i.e., /// it is implicitly a background voxel), return -1. int getValueDepth(const Coord& xyz) const; /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, const ValueType& value); /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op); /// Apply a functor to the voxel at the given coordinates. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op); //@{ /// @brief Set all voxels within a given axis-aligned box to a constant value. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box /// @param value the value to which to set voxels within the box /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive /// @note This operation generates a sparse, but not always optimally sparse, /// representation of the filled box. Follow fill operations with a prune() /// operation for optimal sparseness. void fill(const CoordBBox& bbox, const ValueType& value, bool active = true); void sparseFill(const CoordBBox& bbox, const ValueType& value, bool active = true) { this->fill(bbox, value, active); } //@} /// @brief Set all voxels within a given axis-aligned box to a constant value /// and ensure that those voxels are all represented at the leaf level. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box. /// @param value the value to which to set voxels within the box. /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive. /// @sa voxelizeActiveTiles() void denseFill(const CoordBBox& bbox, const ValueType& value, bool active = true); /// @brief Densify active tiles, i.e., replace them with leaf-level active voxels. /// /// @param threaded if true, this operation is multi-threaded (over the internal nodes). /// /// @warning This method can explode the tree's memory footprint, especially if it /// contains active tiles at the upper levels (in particular the root level)! /// /// @sa denseFill() void voxelizeActiveTiles(bool threaded = true); /// @brief Copy into a dense grid the values of all voxels, both active and inactive, /// that intersect a given bounding box. /// @param bbox inclusive bounding box of the voxels to be copied into the dense grid /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) template<typename DenseT> void copyToDense(const CoordBBox& bbox, DenseT& dense) const; // // I/O // bool writeTopology(std::ostream&, bool toHalf = false) const; bool readTopology(std::istream&, bool fromHalf = false); void writeBuffers(std::ostream&, bool toHalf = false) const; void readBuffers(std::istream&, bool fromHalf = false); void readBuffers(std::istream&, const CoordBBox&, bool fromHalf = false); // // Voxel access // /// Return the value of the voxel at the given coordinates and, if necessary, update /// the accessor with pointers to the nodes along the path from the root node to /// the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> const ValueType& getValueAndCache(const Coord& xyz, AccessorT&) const; /// Return @c true if the voxel at the given coordinates is active and, if necessary, /// update the accessor with pointers to the nodes along the path from the root node /// to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool isValueOnAndCache(const Coord& xyz, AccessorT&) const; /// Change the value of the voxel at the given coordinates and mark it as active. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Set the value of the voxel at the given coordinates without changing its active state. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOnlyAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&); /// Apply a functor to the voxel at the given coordinates. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&); /// Change the value of the voxel at the given coordinates and mark it as inactive. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Set the active state of the voxel at the given coordinates without changing its value. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT&); /// Return, in @a value, the value of the voxel at the given coordinates and, /// if necessary, update the accessor with pointers to the nodes along /// the path from the root node to the node containing the voxel. /// @return @c true if the voxel at the given coordinates is active /// @note Used internally by ValueAccessor. template<typename AccessorT> bool probeValueAndCache(const Coord& xyz, ValueType& value, AccessorT&) const; /// Return the tree depth (0 = root) at which the value of voxel (x, y, z) resides. /// If (x, y, z) isn't explicitly represented in the tree (i.e., it is implicitly /// a background voxel), return -1. If necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> int getValueDepthAndCache(const Coord& xyz, AccessorT&) const; /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&); /// @brief Reduce the memory footprint of this tree by replacing with tiles /// any nodes whose values are all the same (optionally to within a tolerance) /// and have the same active state. /// /// @note Consider instead using tools::prune which is multi-threaded! void prune(const ValueType& tolerance = zeroVal<ValueType>()); /// @brief Add the given leaf node to this tree, creating a new branch if necessary. /// If a leaf node with the same origin already exists, replace it. void addLeaf(LeafNodeType* leaf); /// @brief Same as addLeaf() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> void addLeafAndCache(LeafNodeType* leaf, AccessorT&); /// @brief Return a pointer to the node of type @c NodeT that contains voxel (x, y, z) /// and replace it with a tile of the specified value and state. /// If no such node exists, leave the tree unchanged and return @c nullptr. /// /// @note The caller takes ownership of the node and is responsible for deleting it. /// /// @warning Since this method potentially removes nodes and branches of the tree, /// it is important to clear the caches of all ValueAccessors associated with this tree. template<typename NodeT> NodeT* stealNode(const Coord& xyz, const ValueType& value, bool state); /// @brief Add the given child node at the root level. /// If a child node with the same origin already exists, delete the old node and add /// the new node in its place (i.e. ownership of the new child node is transferred /// to this RootNode). /// @return @c true (for consistency with InternalNode::addChild) bool addChild(ChildType* child); /// @brief Add a tile containing voxel (x, y, z) at the root level, /// deleting the existing branch if necessary. void addTile(const Coord& xyz, const ValueType& value, bool state); /// @brief Add a tile containing voxel (x, y, z) at the specified tree level, /// creating a new branch if necessary. Delete any existing lower-level nodes /// that contain (x, y, z). void addTile(Index level, const Coord& xyz, const ValueType& value, bool state); /// @brief Same as addTile() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> void addTileAndCache(Index level, const Coord& xyz, const ValueType&, bool state, AccessorT&); /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, create one that preserves the values and /// active states of all voxels. /// @details Use this method to preallocate a static tree topology /// over which to safely perform multithreaded processing. LeafNodeType* touchLeaf(const Coord& xyz); /// @brief Same as touchLeaf() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> LeafNodeType* touchLeafAndCache(const Coord& xyz, AccessorT& acc); //@{ /// @brief Return a pointer to the node that contains voxel (x, y, z). /// If no such node exists, return @c nullptr. template <typename NodeT> NodeT* probeNode(const Coord& xyz); template <typename NodeT> const NodeT* probeConstNode(const Coord& xyz) const; //@} //@{ /// @brief Same as probeNode() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename NodeT, typename AccessorT> NodeT* probeNodeAndCache(const Coord& xyz, AccessorT& acc); template<typename NodeT, typename AccessorT> const NodeT* probeConstNodeAndCache(const Coord& xyz, AccessorT& acc) const; //@} //@{ /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, return @c nullptr. LeafNodeType* probeLeaf(const Coord& xyz); const LeafNodeType* probeConstLeaf(const Coord& xyz) const; const LeafNodeType* probeLeaf(const Coord& xyz) const; //@} //@{ /// @brief Same as probeLeaf() but, if necessary, update the given accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> LeafNodeType* probeLeafAndCache(const Coord& xyz, AccessorT& acc); template<typename AccessorT> const LeafNodeType* probeConstLeafAndCache(const Coord& xyz, AccessorT& acc) const; template<typename AccessorT> const LeafNodeType* probeLeafAndCache(const Coord& xyz, AccessorT& acc) const; //@} // // Aux methods // //@{ /// @brief Adds all nodes of a certain type to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...;// defines the type of nodes to be added to the array /// void push_back(value_type nodePtr);// method that add nodes to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.getNodes(array); /// @endcode template<typename ArrayT> void getNodes(ArrayT& array); template<typename ArrayT> void getNodes(ArrayT& array) const; //@} //@{ /// @brief Steals all nodes of a certain type from the tree and /// adds them to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...;// defines the type of nodes to be added to the array /// void push_back(value_type nodePtr);// method that add nodes to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.stealNodes(array); /// @endcode template<typename ArrayT> void stealNodes(ArrayT& array, const ValueType& value, bool state); template<typename ArrayT> void stealNodes(ArrayT& array) { this->stealNodes(array, mBackground, false); } //@} /// @brief Efficiently merge another tree into this tree using one of several schemes. /// @details This operation is primarily intended to combine trees that are mostly /// non-overlapping (for example, intermediate trees from computations that are /// parallelized across disjoint regions of space). /// @note This operation is not guaranteed to produce an optimally sparse tree. /// Follow merge() with prune() for optimal sparseness. /// @warning This operation always empties the other tree. template<MergePolicy Policy> void merge(RootNode& other); /// @brief Union this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active if the corresponding value /// was already active OR if it is active in the other tree. Also, a resulting /// value maps to a voxel if the corresponding value already mapped to a voxel /// OR if it is a voxel in the other tree. Thus, a resulting value can only /// map to a tile if the corresponding value already mapped to a tile /// AND if it is a tile value in other tree. /// /// @note This operation modifies only active states, not values. /// Specifically, active tiles and voxels in this tree are not changed, and /// tiles or voxels that were inactive in this tree but active in the other tree /// are marked as active in this tree but left with their original values. template<typename OtherChildType> void topologyUnion(const RootNode<OtherChildType>& other); /// @brief Intersects this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active only if the corresponding /// value was already active AND if it is active in the other tree. Also, a /// resulting value maps to a voxel if the corresponding value /// already mapped to an active voxel in either of the two grids /// and it maps to an active tile or voxel in the other grid. /// /// @note This operation can delete branches in this grid if they /// overlap with inactive tiles in the other grid. Likewise active /// voxels can be turned into inactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call prune. template<typename OtherChildType> void topologyIntersection(const RootNode<OtherChildType>& other); /// @brief Difference this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this tree and inactive in the other tree. /// /// @note This operation can delete branches in this grid if they /// overlap with active tiles in the other grid. Likewise active /// voxels can be turned into inactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call prune. template<typename OtherChildType> void topologyDifference(const RootNode<OtherChildType>& other); template<typename CombineOp> void combine(RootNode& other, CombineOp&, bool prune = false); template<typename CombineOp, typename OtherRootNode /*= RootNode*/> void combine2(const RootNode& other0, const OtherRootNode& other1, CombineOp& op, bool prune = false); /// @brief Call the templated functor BBoxOp with bounding box /// information for all active tiles and leaf nodes in the tree. /// An additional level argument is provided for each callback. /// /// @note The bounding boxes are guaranteed to be non-overlapping. template<typename BBoxOp> void visitActiveBBox(BBoxOp&) const; template<typename VisitorOp> void visit(VisitorOp&); template<typename VisitorOp> void visit(VisitorOp&) const; template<typename OtherRootNodeType, typename VisitorOp> void visit2(OtherRootNodeType& other, VisitorOp&); template<typename OtherRootNodeType, typename VisitorOp> void visit2(OtherRootNodeType& other, VisitorOp&) const; private: /// During topology-only construction, access is needed /// to protected/private members of other template instances. template<typename> friend class RootNode; template<typename, typename, bool> friend struct RootNodeCopyHelper; template<typename, typename, typename, bool> friend struct RootNodeCombineHelper; /// Currently no-op, but can be used to define empty and delete keys for mTable void initTable() {} //@{ /// @internal Used by doVisit2(). void resetTable(MapType& table) { mTable.swap(table); table.clear(); } void resetTable(const MapType&) const {} //@} #if OPENVDB_ABI_VERSION_NUMBER < 8 Index getChildCount() const; #endif Index getTileCount() const; Index getActiveTileCount() const; Index getInactiveTileCount() const; /// Return a MapType key for the given coordinates. static Coord coordToKey(const Coord& xyz) { return xyz & ~(ChildType::DIM - 1); } /// Insert this node's mTable keys into the given set. void insertKeys(CoordSet&) const; /// Return @c true if this node's mTable contains the given key. bool hasKey(const Coord& key) const { return mTable.find(key) != mTable.end(); } //@{ /// @brief Look up the given key in this node's mTable. /// @return an iterator pointing to the matching mTable entry or to mTable.end(). MapIter findKey(const Coord& key) { return mTable.find(key); } MapCIter findKey(const Coord& key) const { return mTable.find(key); } //@} //@{ /// @brief Convert the given coordinates to a key and look the key up in this node's mTable. /// @return an iterator pointing to the matching mTable entry or to mTable.end(). MapIter findCoord(const Coord& xyz) { return mTable.find(coordToKey(xyz)); } MapCIter findCoord(const Coord& xyz) const { return mTable.find(coordToKey(xyz)); } //@} /// @brief Convert the given coordinates to a key and look the key up in this node's mTable. /// @details If the key is not found, insert a background tile with that key. /// @return an iterator pointing to the matching mTable entry. MapIter findOrAddCoord(const Coord& xyz); /// @brief Verify that the tree rooted at @a other has the same configuration /// (levels, branching factors and node dimensions) as this tree, but allow /// their ValueTypes to differ. /// @throw TypeError if the other tree's configuration doesn't match this tree's. template<typename OtherChildType> static void enforceSameConfiguration(const RootNode<OtherChildType>& other); /// @brief Verify that @a other has values of a type that can be converted /// to this node's ValueType. /// @details For example, values of type float are compatible with values of type Vec3s, /// because a Vec3s can be constructed from a float. But the reverse is not true. /// @throw TypeError if the other node's ValueType is not convertible into this node's. template<typename OtherChildType> static void enforceCompatibleValueTypes(const RootNode<OtherChildType>& other); template<typename CombineOp, typename OtherRootNode /*= RootNode*/> void doCombine2(const RootNode&, const OtherRootNode&, CombineOp&, bool prune); template<typename RootNodeT, typename VisitorOp, typename ChildAllIterT> static inline void doVisit(RootNodeT&, VisitorOp&); template<typename RootNodeT, typename OtherRootNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2(RootNodeT&, OtherRootNodeT&, VisitorOp&); MapType mTable; ValueType mBackground; }; // end of RootNode class //////////////////////////////////////// /// @brief NodeChain<RootNodeType, RootNodeType::LEVEL>::Type is a openvdb::TypeList /// that lists the types of the nodes of the tree rooted at RootNodeType in reverse order, /// from LeafNode to RootNode. /// @details For example, if RootNodeType is /// @code /// RootNode<InternalNode<InternalNode<LeafNode> > > /// @endcode /// then NodeChain::Type is /// @code /// openvdb::TypeList< /// LeafNode, /// InternalNode<LeafNode>, /// InternalNode<InternalNode<LeafNode> >, /// RootNode<InternalNode<InternalNode<LeafNode> > > > /// @endcode /// /// @note Use the following to get the Nth node type, where N=0 is the LeafNodeType: /// @code /// NodeChainType::Get<N>; /// @endcode template<typename HeadT, int HeadLevel> struct NodeChain { using SubtreeT = typename NodeChain<typename HeadT::ChildNodeType, HeadLevel-1>::Type; using Type = typename SubtreeT::template Append<HeadT>; }; /// Specialization to terminate NodeChain template<typename HeadT> struct NodeChain<HeadT, /*HeadLevel=*/1> { using Type = TypeList<typename HeadT::ChildNodeType, HeadT>; }; //////////////////////////////////////// //@{ /// Helper metafunction used to implement RootNode::SameConfiguration /// (which, as an inner class, can't be independently specialized) template<typename ChildT1, typename NodeT2> struct SameRootConfig { static const bool value = false; }; template<typename ChildT1, typename ChildT2> struct SameRootConfig<ChildT1, RootNode<ChildT2> > { static const bool value = ChildT1::template SameConfiguration<ChildT2>::value; }; //@} //////////////////////////////////////// template<typename ChildT> inline RootNode<ChildT>::RootNode(): mBackground(zeroVal<ValueType>()) { this->initTable(); } template<typename ChildT> inline RootNode<ChildT>::RootNode(const ValueType& background): mBackground(background) { this->initTable(); } template<typename ChildT> template<typename OtherChildType> inline RootNode<ChildT>::RootNode(const RootNode<OtherChildType>& other, const ValueType& backgd, const ValueType& foregd, TopologyCopy): mBackground(backgd) { using OtherRootT = RootNode<OtherChildType>; enforceSameConfiguration(other); const Tile bgTile(backgd, /*active=*/false), fgTile(foregd, true); this->initTable(); for (typename OtherRootT::MapCIter i=other.mTable.begin(), e=other.mTable.end(); i != e; ++i) { mTable[i->first] = OtherRootT::isTile(i) ? NodeStruct(OtherRootT::isTileOn(i) ? fgTile : bgTile) : NodeStruct(*(new ChildT(OtherRootT::getChild(i), backgd, foregd, TopologyCopy()))); } } template<typename ChildT> template<typename OtherChildType> inline RootNode<ChildT>::RootNode(const RootNode<OtherChildType>& other, const ValueType& backgd, TopologyCopy): mBackground(backgd) { using OtherRootT = RootNode<OtherChildType>; enforceSameConfiguration(other); const Tile bgTile(backgd, /*active=*/false), fgTile(backgd, true); this->initTable(); for (typename OtherRootT::MapCIter i=other.mTable.begin(), e=other.mTable.end(); i != e; ++i) { mTable[i->first] = OtherRootT::isTile(i) ? NodeStruct(OtherRootT::isTileOn(i) ? fgTile : bgTile) : NodeStruct(*(new ChildT(OtherRootT::getChild(i), backgd, TopologyCopy()))); } } //////////////////////////////////////// // This helper class is a friend of RootNode and is needed so that assignment // with value conversion can be specialized for compatible and incompatible // pairs of RootNode types. template<typename RootT, typename OtherRootT, bool Compatible = false> struct RootNodeCopyHelper { static inline void copyWithValueConversion(RootT& self, const OtherRootT& other) { // If the two root nodes have different configurations or incompatible ValueTypes, // throw an exception. self.enforceSameConfiguration(other); self.enforceCompatibleValueTypes(other); // One of the above two tests should throw, so we should never get here: std::ostringstream ostr; ostr << "cannot convert a " << typeid(OtherRootT).name() << " to a " << typeid(RootT).name(); OPENVDB_THROW(TypeError, ostr.str()); } }; // Specialization for root nodes of compatible types template<typename RootT, typename OtherRootT> struct RootNodeCopyHelper<RootT, OtherRootT, /*Compatible=*/true> { static inline void copyWithValueConversion(RootT& self, const OtherRootT& other) { using ValueT = typename RootT::ValueType; using ChildT = typename RootT::ChildNodeType; using NodeStruct = typename RootT::NodeStruct; using Tile = typename RootT::Tile; using OtherValueT = typename OtherRootT::ValueType; using OtherMapCIter = typename OtherRootT::MapCIter; using OtherTile = typename OtherRootT::Tile; struct Local { /// @todo Consider using a value conversion functor passed as an argument instead. static inline ValueT convertValue(const OtherValueT& val) { return ValueT(val); } }; self.mBackground = Local::convertValue(other.mBackground); self.clear(); self.initTable(); for (OtherMapCIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { if (other.isTile(i)) { // Copy the other node's tile, but convert its value to this node's ValueType. const OtherTile& otherTile = other.getTile(i); self.mTable[i->first] = NodeStruct( Tile(Local::convertValue(otherTile.value), otherTile.active)); } else { // Copy the other node's child, but convert its values to this node's ValueType. self.mTable[i->first] = NodeStruct(*(new ChildT(other.getChild(i)))); } } } }; // Overload for root nodes of the same type as this node template<typename ChildT> inline RootNode<ChildT>& RootNode<ChildT>::operator=(const RootNode& other) { if (&other != this) { mBackground = other.mBackground; this->clear(); this->initTable(); for (MapCIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { mTable[i->first] = isTile(i) ? NodeStruct(getTile(i)) : NodeStruct(*(new ChildT(getChild(i)))); } } return *this; } // Overload for root nodes of different types template<typename ChildT> template<typename OtherChildType> inline RootNode<ChildT>& RootNode<ChildT>::operator=(const RootNode<OtherChildType>& other) { using OtherRootT = RootNode<OtherChildType>; using OtherValueT = typename OtherRootT::ValueType; static const bool compatible = (SameConfiguration<OtherRootT>::value && CanConvertType</*from=*/OtherValueT, /*to=*/ValueType>::value); RootNodeCopyHelper<RootNode, OtherRootT, compatible>::copyWithValueConversion(*this, other); return *this; } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::setBackground(const ValueType& background, bool updateChildNodes) { if (math::isExactlyEqual(background, mBackground)) return; if (updateChildNodes) { // Traverse the tree, replacing occurrences of mBackground with background // and -mBackground with -background. for (MapIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { ChildT *child = iter->second.child; if (child) { child->resetBackground(/*old=*/mBackground, /*new=*/background); } else { Tile& tile = getTile(iter); if (tile.active) continue;//only change inactive tiles if (math::isApproxEqual(tile.value, mBackground)) { tile.value = background; } else if (math::isApproxEqual(tile.value, math::negative(mBackground))) { tile.value = math::negative(background); } } } } mBackground = background; } template<typename ChildT> inline bool RootNode<ChildT>::isBackgroundTile(const Tile& tile) const { return !tile.active && math::isApproxEqual(tile.value, mBackground); } template<typename ChildT> inline bool RootNode<ChildT>::isBackgroundTile(const MapIter& iter) const { return isTileOff(iter) && math::isApproxEqual(getTile(iter).value, mBackground); } template<typename ChildT> inline bool RootNode<ChildT>::isBackgroundTile(const MapCIter& iter) const { return isTileOff(iter) && math::isApproxEqual(getTile(iter).value, mBackground); } template<typename ChildT> inline size_t RootNode<ChildT>::numBackgroundTiles() const { size_t count = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isBackgroundTile(i)) ++count; } return count; } template<typename ChildT> inline size_t RootNode<ChildT>::eraseBackgroundTiles() { std::set<Coord> keysToErase; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isBackgroundTile(i)) keysToErase.insert(i->first); } for (std::set<Coord>::iterator i = keysToErase.begin(), e = keysToErase.end(); i != e; ++i) { mTable.erase(*i); } return keysToErase.size(); } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::insertKeys(CoordSet& keys) const { for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { keys.insert(i->first); } } template<typename ChildT> inline typename RootNode<ChildT>::MapIter RootNode<ChildT>::findOrAddCoord(const Coord& xyz) { const Coord key = coordToKey(xyz); std::pair<MapIter, bool> result = mTable.insert( typename MapType::value_type(key, NodeStruct(Tile(mBackground, /*active=*/false)))); return result.first; } template<typename ChildT> inline bool RootNode<ChildT>::expand(const Coord& xyz) { const Coord key = coordToKey(xyz); std::pair<MapIter, bool> result = mTable.insert( typename MapType::value_type(key, NodeStruct(Tile(mBackground, /*active=*/false)))); return result.second; // return true if the key did not already exist } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::getNodeLog2Dims(std::vector<Index>& dims) { dims.push_back(0); // magic number; RootNode has no Log2Dim ChildT::getNodeLog2Dims(dims); } template<typename ChildT> inline Coord RootNode<ChildT>::getMinIndex() const { return mTable.empty() ? Coord(0) : mTable.begin()->first; } template<typename ChildT> inline Coord RootNode<ChildT>::getMaxIndex() const { return mTable.empty() ? Coord(0) : mTable.rbegin()->first + Coord(ChildT::DIM - 1); } template<typename ChildT> inline void RootNode<ChildT>::getIndexRange(CoordBBox& bbox) const { bbox.min() = this->getMinIndex(); bbox.max() = this->getMaxIndex(); } //////////////////////////////////////// template<typename ChildT> template<typename OtherChildType> inline bool RootNode<ChildT>::hasSameTopology(const RootNode<OtherChildType>& other) const { using OtherRootT = RootNode<OtherChildType>; using OtherMapT = typename OtherRootT::MapType; using OtherIterT = typename OtherRootT::MapIter; using OtherCIterT = typename OtherRootT::MapCIter; if (!hasSameConfiguration(other)) return false; // Create a local copy of the other node's table. OtherMapT copyOfOtherTable = other.mTable; // For each entry in this node's table... for (MapCIter thisIter = mTable.begin(); thisIter != mTable.end(); ++thisIter) { if (this->isBackgroundTile(thisIter)) continue; // ignore background tiles // Fail if there is no corresponding entry in the other node's table. OtherCIterT otherIter = other.findKey(thisIter->first); if (otherIter == other.mTable.end()) return false; // Fail if this entry is a tile and the other is a child or vice-versa. if (isChild(thisIter)) {//thisIter points to a child if (OtherRootT::isTile(otherIter)) return false; // Fail if both entries are children, but the children have different topology. if (!getChild(thisIter).hasSameTopology(&OtherRootT::getChild(otherIter))) return false; } else {//thisIter points to a tile if (OtherRootT::isChild(otherIter)) return false; if (getTile(thisIter).active != OtherRootT::getTile(otherIter).active) return false; } // Remove tiles and child nodes with matching topology from // the copy of the other node's table. This is required since // the two root tables can include an arbitrary number of // background tiles and still have the same topology! copyOfOtherTable.erase(otherIter->first); } // Fail if the remaining entries in copyOfOtherTable are not all background tiles. for (OtherIterT i = copyOfOtherTable.begin(), e = copyOfOtherTable.end(); i != e; ++i) { if (!other.isBackgroundTile(i)) return false; } return true; } template<typename ChildT> template<typename OtherChildType> inline bool RootNode<ChildT>::hasSameConfiguration(const RootNode<OtherChildType>&) { std::vector<Index> thisDims, otherDims; RootNode::getNodeLog2Dims(thisDims); RootNode<OtherChildType>::getNodeLog2Dims(otherDims); return (thisDims == otherDims); } template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::enforceSameConfiguration(const RootNode<OtherChildType>&) { std::vector<Index> thisDims, otherDims; RootNode::getNodeLog2Dims(thisDims); RootNode<OtherChildType>::getNodeLog2Dims(otherDims); if (thisDims != otherDims) { std::ostringstream ostr; ostr << "grids have incompatible configurations (" << thisDims[0]; for (size_t i = 1, N = thisDims.size(); i < N; ++i) ostr << " x " << thisDims[i]; ostr << " vs. " << otherDims[0]; for (size_t i = 1, N = otherDims.size(); i < N; ++i) ostr << " x " << otherDims[i]; ostr << ")"; OPENVDB_THROW(TypeError, ostr.str()); } } template<typename ChildT> template<typename OtherChildType> inline bool RootNode<ChildT>::hasCompatibleValueType(const RootNode<OtherChildType>&) { using OtherValueType = typename OtherChildType::ValueType; return CanConvertType</*from=*/OtherValueType, /*to=*/ValueType>::value; } template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::enforceCompatibleValueTypes(const RootNode<OtherChildType>&) { using OtherValueType = typename OtherChildType::ValueType; if (!CanConvertType</*from=*/OtherValueType, /*to=*/ValueType>::value) { std::ostringstream ostr; ostr << "values of type " << typeNameAsString<OtherValueType>() << " cannot be converted to type " << typeNameAsString<ValueType>(); OPENVDB_THROW(TypeError, ostr.str()); } } //////////////////////////////////////// template<typename ChildT> inline Index64 RootNode<ChildT>::memUsage() const { Index64 sum = sizeof(*this); for (MapCIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (const ChildT *child = iter->second.child) { sum += child->memUsage(); } } return sum; } template<typename ChildT> inline void RootNode<ChildT>::clear() { for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { delete i->second.child; } mTable.clear(); } template<typename ChildT> inline void RootNode<ChildT>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { for (MapCIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (const ChildT *child = iter->second.child) { child->evalActiveBoundingBox(bbox, visitVoxels); } else if (isTileOn(iter)) { bbox.expand(iter->first, ChildT::DIM); } } } #if OPENVDB_ABI_VERSION_NUMBER < 8 template<typename ChildT> inline Index RootNode<ChildT>::getChildCount() const { return this->childCount(); } #endif template<typename ChildT> inline Index RootNode<ChildT>::getTileCount() const { Index sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isTile(i)) ++sum; } return sum; } template<typename ChildT> inline Index RootNode<ChildT>::getActiveTileCount() const { Index sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isTileOn(i)) ++sum; } return sum; } template<typename ChildT> inline Index RootNode<ChildT>::getInactiveTileCount() const { Index sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isTileOff(i)) ++sum; } return sum; } template<typename ChildT> inline Index32 RootNode<ChildT>::leafCount() const { Index32 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) sum += getChild(i).leafCount(); } return sum; } template<typename ChildT> inline Index32 RootNode<ChildT>::nonLeafCount() const { Index32 sum = 1; if (ChildT::LEVEL != 0) { for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) sum += getChild(i).nonLeafCount(); } } return sum; } template<typename ChildT> inline Index32 RootNode<ChildT>::childCount() const { Index sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) ++sum; } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::onVoxelCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { sum += getChild(i).onVoxelCount(); } else if (isTileOn(i)) { sum += ChildT::NUM_VOXELS; } } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::offVoxelCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { sum += getChild(i).offVoxelCount(); } else if (isTileOff(i) && !this->isBackgroundTile(i)) { sum += ChildT::NUM_VOXELS; } } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::onLeafVoxelCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) sum += getChild(i).onLeafVoxelCount(); } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::offLeafVoxelCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) sum += getChild(i).offLeafVoxelCount(); } return sum; } template<typename ChildT> inline Index64 RootNode<ChildT>::onTileCount() const { Index64 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { sum += getChild(i).onTileCount(); } else if (isTileOn(i)) { sum += 1; } } return sum; } template<typename ChildT> inline void RootNode<ChildT>::nodeCount(std::vector<Index32> &vec) const { assert(vec.size() > LEVEL); Index32 sum = 0; for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { ++sum; getChild(i).nodeCount(vec); } } vec[LEVEL] = 1;// one root node vec[ChildNodeType::LEVEL] = sum; } //////////////////////////////////////// template<typename ChildT> inline bool RootNode<ChildT>::isValueOn(const Coord& xyz) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTileOff(iter)) return false; return isTileOn(iter) ? true : getChild(iter).isValueOn(xyz); } template<typename ChildT> inline bool RootNode<ChildT>::hasActiveTiles() const { for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i) ? getChild(i).hasActiveTiles() : getTile(i).active) return true; } return false; } template<typename ChildT> template<typename AccessorT> inline bool RootNode<ChildT>::isValueOnAndCache(const Coord& xyz, AccessorT& acc) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTileOff(iter)) return false; if (isTileOn(iter)) return true; acc.insert(xyz, &getChild(iter)); return getChild(iter).isValueOnAndCache(xyz, acc); } template<typename ChildT> inline const typename ChildT::ValueType& RootNode<ChildT>::getValue(const Coord& xyz) const { MapCIter iter = this->findCoord(xyz); return iter == mTable.end() ? mBackground : (isTile(iter) ? getTile(iter).value : getChild(iter).getValue(xyz)); } template<typename ChildT> template<typename AccessorT> inline const typename ChildT::ValueType& RootNode<ChildT>::getValueAndCache(const Coord& xyz, AccessorT& acc) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end()) return mBackground; if (isChild(iter)) { acc.insert(xyz, &getChild(iter)); return getChild(iter).getValueAndCache(xyz, acc); } return getTile(iter).value; } template<typename ChildT> inline int RootNode<ChildT>::getValueDepth(const Coord& xyz) const { MapCIter iter = this->findCoord(xyz); return iter == mTable.end() ? -1 : (isTile(iter) ? 0 : int(LEVEL) - int(getChild(iter).getValueLevel(xyz))); } template<typename ChildT> template<typename AccessorT> inline int RootNode<ChildT>::getValueDepthAndCache(const Coord& xyz, AccessorT& acc) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end()) return -1; if (isTile(iter)) return 0; acc.insert(xyz, &getChild(iter)); return int(LEVEL) - int(getChild(iter).getValueLevelAndCache(xyz, acc)); } template<typename ChildT> inline void RootNode<ChildT>::setValueOff(const Coord& xyz) { MapIter iter = this->findCoord(xyz); if (iter != mTable.end() && !isTileOff(iter)) { if (isTileOn(iter)) { setChild(iter, *new ChildT(xyz, getTile(iter).value, /*active=*/true)); } getChild(iter).setValueOff(xyz); } } template<typename ChildT> inline void RootNode<ChildT>::setActiveState(const Coord& xyz, bool on) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (on) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else { // Nothing to do; (x, y, z) is background and therefore already inactive. } } else if (isChild(iter)) { child = &getChild(iter); } else if (on != getTile(iter).active) { child = new ChildT(xyz, getTile(iter).value, !on); setChild(iter, *child); } if (child) child->setActiveState(xyz, on); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::setActiveStateAndCache(const Coord& xyz, bool on, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (on) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else { // Nothing to do; (x, y, z) is background and therefore already inactive. } } else if (isChild(iter)) { child = &getChild(iter); } else if (on != getTile(iter).active) { child = new ChildT(xyz, getTile(iter).value, !on); setChild(iter, *child); } if (child) { acc.insert(xyz, child); child->setActiveStateAndCache(xyz, on, acc); } } template<typename ChildT> inline void RootNode<ChildT>::setValueOff(const Coord& xyz, const ValueType& value) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (!math::isExactlyEqual(mBackground, value)) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } } else if (isChild(iter)) { child = &getChild(iter); } else if (isTileOn(iter) || !math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) child->setValueOff(xyz, value); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (!math::isExactlyEqual(mBackground, value)) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } } else if (isChild(iter)) { child = &getChild(iter); } else if (isTileOn(iter) || !math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) { acc.insert(xyz, child); child->setValueOffAndCache(xyz, value, acc); } } template<typename ChildT> inline void RootNode<ChildT>::setValueOn(const Coord& xyz, const ValueType& value) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else if (isTileOff(iter) || !math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) child->setValueOn(xyz, value); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::setValueAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else if (isTileOff(iter) || !math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) { acc.insert(xyz, child); child->setValueAndCache(xyz, value, acc); } } template<typename ChildT> inline void RootNode<ChildT>::setValueOnly(const Coord& xyz, const ValueType& value) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else if (!math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) child->setValueOnly(xyz, value); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::setValueOnlyAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else if (!math::isExactlyEqual(getTile(iter).value, value)) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } if (child) { acc.insert(xyz, child); child->setValueOnlyAndCache(xyz, value, acc); } } template<typename ChildT> template<typename ModifyOp> inline void RootNode<ChildT>::modifyValue(const Coord& xyz, const ModifyOp& op) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { // Need to create a child if the tile is inactive, // in order to activate voxel (x, y, z). bool createChild = isTileOff(iter); if (!createChild) { // Need to create a child if applying the functor // to the tile value produces a different value. const ValueType& tileVal = getTile(iter).value; ValueType modifiedVal = tileVal; op(modifiedVal); createChild = !math::isExactlyEqual(tileVal, modifiedVal); } if (createChild) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } } if (child) child->modifyValue(xyz, op); } template<typename ChildT> template<typename ModifyOp, typename AccessorT> inline void RootNode<ChildT>::modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { // Need to create a child if the tile is inactive, // in order to activate voxel (x, y, z). bool createChild = isTileOff(iter); if (!createChild) { // Need to create a child if applying the functor // to the tile value produces a different value. const ValueType& tileVal = getTile(iter).value; ValueType modifiedVal = tileVal; op(modifiedVal); createChild = !math::isExactlyEqual(tileVal, modifiedVal); } if (createChild) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } } if (child) { acc.insert(xyz, child); child->modifyValueAndCache(xyz, op, acc); } } template<typename ChildT> template<typename ModifyOp> inline void RootNode<ChildT>::modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { const Tile& tile = getTile(iter); bool modifiedState = tile.active; ValueType modifiedVal = tile.value; op(modifiedVal, modifiedState); // Need to create a child if applying the functor to the tile // produces a different value or active state. if (modifiedState != tile.active || !math::isExactlyEqual(modifiedVal, tile.value)) { child = new ChildT(xyz, tile.value, tile.active); setChild(iter, *child); } } if (child) child->modifyValueAndActiveState(xyz, op); } template<typename ChildT> template<typename ModifyOp, typename AccessorT> inline void RootNode<ChildT>::modifyValueAndActiveStateAndCache( const Coord& xyz, const ModifyOp& op, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { const Tile& tile = getTile(iter); bool modifiedState = tile.active; ValueType modifiedVal = tile.value; op(modifiedVal, modifiedState); // Need to create a child if applying the functor to the tile // produces a different value or active state. if (modifiedState != tile.active || !math::isExactlyEqual(modifiedVal, tile.value)) { child = new ChildT(xyz, tile.value, tile.active); setChild(iter, *child); } } if (child) { acc.insert(xyz, child); child->modifyValueAndActiveStateAndCache(xyz, op, acc); } } template<typename ChildT> inline bool RootNode<ChildT>::probeValue(const Coord& xyz, ValueType& value) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end()) { value = mBackground; return false; } else if (isChild(iter)) { return getChild(iter).probeValue(xyz, value); } value = getTile(iter).value; return isTileOn(iter); } template<typename ChildT> template<typename AccessorT> inline bool RootNode<ChildT>::probeValueAndCache(const Coord& xyz, ValueType& value, AccessorT& acc) const { MapCIter iter = this->findCoord(xyz); if (iter == mTable.end()) { value = mBackground; return false; } else if (isChild(iter)) { acc.insert(xyz, &getChild(iter)); return getChild(iter).probeValueAndCache(xyz, value, acc); } value = getTile(iter).value; return isTileOn(iter); } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::fill(const CoordBBox& bbox, const ValueType& value, bool active) { if (bbox.empty()) return; // Iterate over the fill region in axis-aligned, tile-sized chunks. // (The first and last chunks along each axis might be smaller than a tile.) Coord xyz, tileMax; for (int x = bbox.min().x(); x <= bbox.max().x(); x = tileMax.x() + 1) { xyz.setX(x); for (int y = bbox.min().y(); y <= bbox.max().y(); y = tileMax.y() + 1) { xyz.setY(y); for (int z = bbox.min().z(); z <= bbox.max().z(); z = tileMax.z() + 1) { xyz.setZ(z); // Get the bounds of the tile that contains voxel (x, y, z). Coord tileMin = coordToKey(xyz); tileMax = tileMin.offsetBy(ChildT::DIM - 1); if (xyz != tileMin || Coord::lessThan(bbox.max(), tileMax)) { // If the box defined by (xyz, bbox.max()) doesn't completely enclose // the tile to which xyz belongs, create a child node (or retrieve // the existing one). ChildT* child = nullptr; MapIter iter = this->findKey(tileMin); if (iter == mTable.end()) { // No child or tile exists. Create a child and initialize it // with the background value. child = new ChildT(xyz, mBackground); mTable[tileMin] = NodeStruct(*child); } else if (isTile(iter)) { // Replace the tile with a newly-created child that is filled // with the tile's value and active state. const Tile& tile = getTile(iter); child = new ChildT(xyz, tile.value, tile.active); mTable[tileMin] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } // Forward the fill request to the child. if (child) { const Coord tmp = Coord::minComponent(bbox.max(), tileMax); child->fill(CoordBBox(xyz, tmp), value, active); } } else { // If the box given by (xyz, bbox.max()) completely encloses // the tile to which xyz belongs, create the tile (if it // doesn't already exist) and give it the fill value. MapIter iter = this->findOrAddCoord(tileMin); setTile(iter, Tile(value, active)); } } } } } template<typename ChildT> inline void RootNode<ChildT>::denseFill(const CoordBBox& bbox, const ValueType& value, bool active) { if (bbox.empty()) return; if (active && mTable.empty()) { // If this tree is empty, then a sparse fill followed by (threaded) // densification of active tiles is the more efficient approach. sparseFill(bbox, value, active); voxelizeActiveTiles(/*threaded=*/true); return; } // Iterate over the fill region in axis-aligned, tile-sized chunks. // (The first and last chunks along each axis might be smaller than a tile.) Coord xyz, tileMin, tileMax; for (int x = bbox.min().x(); x <= bbox.max().x(); x = tileMax.x() + 1) { xyz.setX(x); for (int y = bbox.min().y(); y <= bbox.max().y(); y = tileMax.y() + 1) { xyz.setY(y); for (int z = bbox.min().z(); z <= bbox.max().z(); z = tileMax.z() + 1) { xyz.setZ(z); // Get the bounds of the tile that contains voxel (x, y, z). tileMin = coordToKey(xyz); tileMax = tileMin.offsetBy(ChildT::DIM - 1); // Retrieve the table entry for the tile that contains xyz, // or, if there is no table entry, add a background tile. const auto iter = findOrAddCoord(tileMin); if (isTile(iter)) { // If the table entry is a tile, replace it with a child node // that is filled with the tile's value and active state. const auto& tile = getTile(iter); auto* child = new ChildT{tileMin, tile.value, tile.active}; setChild(iter, *child); } // Forward the fill request to the child. getChild(iter).denseFill(bbox, value, active); } } } } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::voxelizeActiveTiles(bool threaded) { // There is little point in threading over the root table since each tile // spans a huge index space (by default 4096^3) and hence we expect few // active tiles if any at all. In fact, you're very likely to run out of // memory if this method is called on a tree with root-level active tiles! for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isTileOff(i)) continue; ChildT* child = i->second.child; if (child == nullptr) { // If this table entry is an active tile (i.e., not off and not a child node), // replace it with a child node filled with active tiles of the same value. child = new ChildT{i->first, this->getTile(i).value, true}; i->second.child = child; } child->voxelizeActiveTiles(threaded); } } //////////////////////////////////////// template<typename ChildT> template<typename DenseT> inline void RootNode<ChildT>::copyToDense(const CoordBBox& bbox, DenseT& dense) const { using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); CoordBBox nodeBBox; for (Coord xyz = bbox.min(); xyz[0] <= bbox.max()[0]; xyz[0] = nodeBBox.max()[0] + 1) { for (xyz[1] = bbox.min()[1]; xyz[1] <= bbox.max()[1]; xyz[1] = nodeBBox.max()[1] + 1) { for (xyz[2] = bbox.min()[2]; xyz[2] <= bbox.max()[2]; xyz[2] = nodeBBox.max()[2] + 1) { // Get the coordinate bbox of the child node that contains voxel xyz. nodeBBox = CoordBBox::createCube(coordToKey(xyz), ChildT::DIM); // Get the coordinate bbox of the interection of inBBox and nodeBBox CoordBBox sub(xyz, Coord::minComponent(bbox.max(), nodeBBox.max())); MapCIter iter = this->findKey(nodeBBox.min()); if (iter != mTable.end() && isChild(iter)) {//is a child getChild(iter).copyToDense(sub, dense); } else {//is background or a tile value const ValueType value = iter==mTable.end() ? mBackground : getTile(iter).value; sub.translate(-min); DenseValueType* a0 = dense.data() + zStride*sub.min()[2]; for (Int32 x=sub.min()[0], ex=sub.max()[0]+1; x<ex; ++x) { DenseValueType* a1 = a0 + x*xStride; for (Int32 y=sub.min()[1], ey=sub.max()[1]+1; y<ey; ++y) { DenseValueType* a2 = a1 + y*yStride; for (Int32 z=sub.min()[2], ez=sub.max()[2]+1; z<ez; ++z, a2 += zStride) { *a2 = DenseValueType(value); } } } } } } } } //////////////////////////////////////// template<typename ChildT> inline bool RootNode<ChildT>::writeTopology(std::ostream& os, bool toHalf) const { if (!toHalf) { os.write(reinterpret_cast<const char*>(&mBackground), sizeof(ValueType)); } else { ValueType truncatedVal = io::truncateRealToHalf(mBackground); os.write(reinterpret_cast<const char*>(&truncatedVal), sizeof(ValueType)); } io::setGridBackgroundValuePtr(os, &mBackground); const Index numTiles = this->getTileCount(), numChildren = this->childCount(); os.write(reinterpret_cast<const char*>(&numTiles), sizeof(Index)); os.write(reinterpret_cast<const char*>(&numChildren), sizeof(Index)); if (numTiles == 0 && numChildren == 0) return false; // Write tiles. for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) continue; os.write(reinterpret_cast<const char*>(i->first.asPointer()), 3 * sizeof(Int32)); os.write(reinterpret_cast<const char*>(&getTile(i).value), sizeof(ValueType)); os.write(reinterpret_cast<const char*>(&getTile(i).active), sizeof(bool)); } // Write child nodes. for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isTile(i)) continue; os.write(reinterpret_cast<const char*>(i->first.asPointer()), 3 * sizeof(Int32)); getChild(i).writeTopology(os, toHalf); } return true; // not empty } template<typename ChildT> inline bool RootNode<ChildT>::readTopology(std::istream& is, bool fromHalf) { // Delete the existing tree. this->clear(); if (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_ROOTNODE_MAP) { // Read and convert an older-format RootNode. // For backward compatibility with older file formats, read both // outside and inside background values. is.read(reinterpret_cast<char*>(&mBackground), sizeof(ValueType)); ValueType inside; is.read(reinterpret_cast<char*>(&inside), sizeof(ValueType)); io::setGridBackgroundValuePtr(is, &mBackground); // Read the index range. Coord rangeMin, rangeMax; is.read(reinterpret_cast<char*>(rangeMin.asPointer()), 3 * sizeof(Int32)); is.read(reinterpret_cast<char*>(rangeMax.asPointer()), 3 * sizeof(Int32)); this->initTable(); Index tableSize = 0, log2Dim[4] = { 0, 0, 0, 0 }; Int32 offset[3]; for (int i = 0; i < 3; ++i) { offset[i] = rangeMin[i] >> ChildT::TOTAL; rangeMin[i] = offset[i] << ChildT::TOTAL; log2Dim[i] = 1 + util::FindHighestOn((rangeMax[i] >> ChildT::TOTAL) - offset[i]); tableSize += log2Dim[i]; rangeMax[i] = (((1 << log2Dim[i]) + offset[i]) << ChildT::TOTAL) - 1; } log2Dim[3] = log2Dim[1] + log2Dim[2]; tableSize = 1U << tableSize; // Read masks. util::RootNodeMask childMask(tableSize), valueMask(tableSize); childMask.load(is); valueMask.load(is); // Read child nodes/values. for (Index i = 0; i < tableSize; ++i) { // Compute origin = offset2coord(i). Index n = i; Coord origin; origin[0] = (n >> log2Dim[3]) + offset[0]; n &= (1U << log2Dim[3]) - 1; origin[1] = (n >> log2Dim[2]) + offset[1]; origin[2] = (n & ((1U << log2Dim[2]) - 1)) + offset[1]; origin <<= ChildT::TOTAL; if (childMask.isOn(i)) { // Read in and insert a child node. ChildT* child = new ChildT(PartialCreate(), origin, mBackground); child->readTopology(is); mTable[origin] = NodeStruct(*child); } else { // Read in a tile value and insert a tile, but only if the value // is either active or non-background. ValueType value; is.read(reinterpret_cast<char*>(&value), sizeof(ValueType)); if (valueMask.isOn(i) || (!math::isApproxEqual(value, mBackground))) { mTable[origin] = NodeStruct(Tile(value, valueMask.isOn(i))); } } } return true; } // Read a RootNode that was stored in the current format. is.read(reinterpret_cast<char*>(&mBackground), sizeof(ValueType)); io::setGridBackgroundValuePtr(is, &mBackground); Index numTiles = 0, numChildren = 0; is.read(reinterpret_cast<char*>(&numTiles), sizeof(Index)); is.read(reinterpret_cast<char*>(&numChildren), sizeof(Index)); if (numTiles == 0 && numChildren == 0) return false; Int32 vec[3]; ValueType value; bool active; // Read tiles. for (Index n = 0; n < numTiles; ++n) { is.read(reinterpret_cast<char*>(vec), 3 * sizeof(Int32)); is.read(reinterpret_cast<char*>(&value), sizeof(ValueType)); is.read(reinterpret_cast<char*>(&active), sizeof(bool)); mTable[Coord(vec)] = NodeStruct(Tile(value, active)); } // Read child nodes. for (Index n = 0; n < numChildren; ++n) { is.read(reinterpret_cast<char*>(vec), 3 * sizeof(Int32)); Coord origin(vec); ChildT* child = new ChildT(PartialCreate(), origin, mBackground); child->readTopology(is, fromHalf); mTable[Coord(vec)] = NodeStruct(*child); } return true; // not empty } template<typename ChildT> inline void RootNode<ChildT>::writeBuffers(std::ostream& os, bool toHalf) const { for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) getChild(i).writeBuffers(os, toHalf); } } template<typename ChildT> inline void RootNode<ChildT>::readBuffers(std::istream& is, bool fromHalf) { for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) getChild(i).readBuffers(is, fromHalf); } } template<typename ChildT> inline void RootNode<ChildT>::readBuffers(std::istream& is, const CoordBBox& clipBBox, bool fromHalf) { const Tile bgTile(mBackground, /*active=*/false); for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (isChild(i)) { // Stream in and clip the branch rooted at this child. // (We can't skip over children that lie outside the clipping region, // because buffers are serialized in depth-first order and need to be // unserialized in the same order.) ChildT& child = getChild(i); child.readBuffers(is, clipBBox, fromHalf); } } // Clip root-level tiles and prune children that were clipped. this->clip(clipBBox); } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::clip(const CoordBBox& clipBBox) { const Tile bgTile(mBackground, /*active=*/false); // Iterate over a copy of this node's table so that we can modify the original. // (Copying the table copies child node pointers, not the nodes themselves.) MapType copyOfTable(mTable); for (MapIter i = copyOfTable.begin(), e = copyOfTable.end(); i != e; ++i) { const Coord& xyz = i->first; // tile or child origin CoordBBox tileBBox(xyz, xyz.offsetBy(ChildT::DIM - 1)); // tile or child bounds if (!clipBBox.hasOverlap(tileBBox)) { // This table entry lies completely outside the clipping region. Delete it. setTile(this->findCoord(xyz), bgTile); // delete any existing child node first mTable.erase(xyz); } else if (!clipBBox.isInside(tileBBox)) { // This table entry does not lie completely inside the clipping region // and must be clipped. if (isChild(i)) { getChild(i).clip(clipBBox, mBackground); } else { // Replace this tile with a background tile, then fill the clip region // with the tile's original value. (This might create a child branch.) tileBBox.intersect(clipBBox); const Tile& origTile = getTile(i); setTile(this->findCoord(xyz), bgTile); this->sparseFill(tileBBox, origTile.value, origTile.active); } } else { // This table entry lies completely inside the clipping region. Leave it intact. } } this->prune(); // also erases root-level background tiles } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::prune(const ValueType& tolerance) { bool state = false; ValueType value = zeroVal<ValueType>(); for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isTile(i)) continue; this->getChild(i).prune(tolerance); if (this->getChild(i).isConstant(value, state, tolerance)) { this->setTile(i, Tile(value, state)); } } this->eraseBackgroundTiles(); } //////////////////////////////////////// template<typename ChildT> template<typename NodeT> inline NodeT* RootNode<ChildT>::stealNode(const Coord& xyz, const ValueType& value, bool state) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(&stealChild(iter, Tile(value, state))) : getChild(iter).template stealNode<NodeT>(xyz, value, state); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT> inline void RootNode<ChildT>::addLeaf(LeafNodeType* leaf) { if (leaf == nullptr) return; ChildT* child = nullptr; const Coord& xyz = leaf->origin(); MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (ChildT::LEVEL>0) { child = new ChildT(xyz, mBackground, false); } else { child = reinterpret_cast<ChildT*>(leaf); } mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { if (ChildT::LEVEL>0) { child = &getChild(iter); } else { child = reinterpret_cast<ChildT*>(leaf); setChild(iter, *child);//this also deletes the existing child node } } else {//tile if (ChildT::LEVEL>0) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); } else { child = reinterpret_cast<ChildT*>(leaf); } setChild(iter, *child); } child->addLeaf(leaf); } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::addLeafAndCache(LeafNodeType* leaf, AccessorT& acc) { if (leaf == nullptr) return; ChildT* child = nullptr; const Coord& xyz = leaf->origin(); MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { if (ChildT::LEVEL>0) { child = new ChildT(xyz, mBackground, false); } else { child = reinterpret_cast<ChildT*>(leaf); } mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { if (ChildT::LEVEL>0) { child = &getChild(iter); } else { child = reinterpret_cast<ChildT*>(leaf); setChild(iter, *child);//this also deletes the existing child node } } else {//tile if (ChildT::LEVEL>0) { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); } else { child = reinterpret_cast<ChildT*>(leaf); } setChild(iter, *child); } acc.insert(xyz, child); child->addLeafAndCache(leaf, acc); } template<typename ChildT> inline bool RootNode<ChildT>::addChild(ChildT* child) { if (!child) return false; const Coord& xyz = child->origin(); MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) {//background mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else {//child or tile setChild(iter, *child);//this also deletes the existing child node } return true; } template<typename ChildT> inline void RootNode<ChildT>::addTile(const Coord& xyz, const ValueType& value, bool state) { MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) {//background mTable[this->coordToKey(xyz)] = NodeStruct(Tile(value, state)); } else {//child or tile setTile(iter, Tile(value, state));//this also deletes the existing child node } } template<typename ChildT> inline void RootNode<ChildT>::addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { if (LEVEL >= level) { MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) {//background if (LEVEL > level) { ChildT* child = new ChildT(xyz, mBackground, false); mTable[this->coordToKey(xyz)] = NodeStruct(*child); child->addTile(level, xyz, value, state); } else { mTable[this->coordToKey(xyz)] = NodeStruct(Tile(value, state)); } } else if (isChild(iter)) {//child if (LEVEL > level) { getChild(iter).addTile(level, xyz, value, state); } else { setTile(iter, Tile(value, state));//this also deletes the existing child node } } else {//tile if (LEVEL > level) { ChildT* child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); child->addTile(level, xyz, value, state); } else { setTile(iter, Tile(value, state)); } } } } template<typename ChildT> template<typename AccessorT> inline void RootNode<ChildT>::addTileAndCache(Index level, const Coord& xyz, const ValueType& value, bool state, AccessorT& acc) { if (LEVEL >= level) { MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) {//background if (LEVEL > level) { ChildT* child = new ChildT(xyz, mBackground, false); acc.insert(xyz, child); mTable[this->coordToKey(xyz)] = NodeStruct(*child); child->addTileAndCache(level, xyz, value, state, acc); } else { mTable[this->coordToKey(xyz)] = NodeStruct(Tile(value, state)); } } else if (isChild(iter)) {//child if (LEVEL > level) { ChildT* child = &getChild(iter); acc.insert(xyz, child); child->addTileAndCache(level, xyz, value, state, acc); } else { setTile(iter, Tile(value, state));//this also deletes the existing child node } } else {//tile if (LEVEL > level) { ChildT* child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); acc.insert(xyz, child); setChild(iter, *child); child->addTileAndCache(level, xyz, value, state, acc); } else { setTile(iter, Tile(value, state)); } } } } //////////////////////////////////////// template<typename ChildT> inline typename ChildT::LeafNodeType* RootNode<ChildT>::touchLeaf(const Coord& xyz) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground, false); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } return child->touchLeaf(xyz); } template<typename ChildT> template<typename AccessorT> inline typename ChildT::LeafNodeType* RootNode<ChildT>::touchLeafAndCache(const Coord& xyz, AccessorT& acc) { ChildT* child = nullptr; MapIter iter = this->findCoord(xyz); if (iter == mTable.end()) { child = new ChildT(xyz, mBackground, false); mTable[this->coordToKey(xyz)] = NodeStruct(*child); } else if (isChild(iter)) { child = &getChild(iter); } else { child = new ChildT(xyz, getTile(iter).value, isTileOn(iter)); setChild(iter, *child); } acc.insert(xyz, child); return child->touchLeafAndCache(xyz, acc); } //////////////////////////////////////// template<typename ChildT> template<typename NodeT> inline NodeT* RootNode<ChildT>::probeNode(const Coord& xyz) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; ChildT* child = &getChild(iter); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template probeNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT> template<typename NodeT> inline const NodeT* RootNode<ChildT>::probeConstNode(const Coord& xyz) const { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapCIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; const ChildT* child = &getChild(iter); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<const NodeT*>(child) : child->template probeConstNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT> inline typename ChildT::LeafNodeType* RootNode<ChildT>::probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeType>(xyz); } template<typename ChildT> inline const typename ChildT::LeafNodeType* RootNode<ChildT>::probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeType>(xyz); } template<typename ChildT> template<typename AccessorT> inline typename ChildT::LeafNodeType* RootNode<ChildT>::probeLeafAndCache(const Coord& xyz, AccessorT& acc) { return this->template probeNodeAndCache<LeafNodeType>(xyz, acc); } template<typename ChildT> template<typename AccessorT> inline const typename ChildT::LeafNodeType* RootNode<ChildT>::probeConstLeafAndCache(const Coord& xyz, AccessorT& acc) const { return this->template probeConstNodeAndCache<LeafNodeType>(xyz, acc); } template<typename ChildT> template<typename AccessorT> inline const typename ChildT::LeafNodeType* RootNode<ChildT>::probeLeafAndCache(const Coord& xyz, AccessorT& acc) const { return this->probeConstLeafAndCache(xyz, acc); } template<typename ChildT> template<typename NodeT, typename AccessorT> inline NodeT* RootNode<ChildT>::probeNodeAndCache(const Coord& xyz, AccessorT& acc) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; ChildT* child = &getChild(iter); acc.insert(xyz, child); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template probeNodeAndCache<NodeT>(xyz, acc); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT> template<typename NodeT,typename AccessorT> inline const NodeT* RootNode<ChildT>::probeConstNodeAndCache(const Coord& xyz, AccessorT& acc) const { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN MapCIter iter = this->findCoord(xyz); if (iter == mTable.end() || isTile(iter)) return nullptr; const ChildT* child = &getChild(iter); acc.insert(xyz, child); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<const NodeT*>(child) : child->template probeConstNodeAndCache<NodeT>(xyz, acc); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT> template<typename ArrayT> inline void RootNode<ChildT>::getNodes(ArrayT& array) { using NodePtr = typename ArrayT::value_type; static_assert(std::is_pointer<NodePtr>::value, "argument to getNodes() must be a pointer array"); using NodeType = typename std::remove_pointer<NodePtr>::type; using NonConstNodeType = typename std::remove_const<NodeType>::type; static_assert(NodeChainType::template Contains<NonConstNodeType>, "can't extract non-const nodes from a const tree"); using ArrayChildT = typename std::conditional< std::is_const<NodeType>::value, const ChildT, ChildT>::type; for (MapIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (ChildT* child = iter->second.child) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<NodePtr, ArrayChildT*>::value) { array.push_back(reinterpret_cast<NodePtr>(iter->second.child)); } else { child->getNodes(array);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } } template<typename ChildT> template<typename ArrayT> inline void RootNode<ChildT>::getNodes(ArrayT& array) const { using NodePtr = typename ArrayT::value_type; static_assert(std::is_pointer<NodePtr>::value, "argument to getNodes() must be a pointer array"); using NodeType = typename std::remove_pointer<NodePtr>::type; static_assert(std::is_const<NodeType>::value, "argument to getNodes() must be an array of const node pointers"); using NonConstNodeType = typename std::remove_const<NodeType>::type; static_assert(NodeChainType::template Contains<NonConstNodeType>, "can't extract non-const nodes from a const tree"); for (MapCIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (const ChildNodeType *child = iter->second.child) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<NodePtr, const ChildT*>::value) { array.push_back(reinterpret_cast<NodePtr>(iter->second.child)); } else { child->getNodes(array);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } } //////////////////////////////////////// template<typename ChildT> template<typename ArrayT> inline void RootNode<ChildT>::stealNodes(ArrayT& array, const ValueType& value, bool state) { using NodePtr = typename ArrayT::value_type; static_assert(std::is_pointer<NodePtr>::value, "argument to stealNodes() must be a pointer array"); using NodeType = typename std::remove_pointer<NodePtr>::type; using NonConstNodeType = typename std::remove_const<NodeType>::type; static_assert(NodeChainType::template Contains<NonConstNodeType>, "can't extract non-const nodes from a const tree"); using ArrayChildT = typename std::conditional< std::is_const<NodeType>::value, const ChildT, ChildT>::type; for (MapIter iter=mTable.begin(); iter!=mTable.end(); ++iter) { if (ChildT* child = iter->second.child) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<NodePtr, ArrayChildT*>::value) { array.push_back(reinterpret_cast<NodePtr>(&stealChild(iter, Tile(value, state)))); } else { child->stealNodes(array, value, state);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } } //////////////////////////////////////// template<typename ChildT> template<MergePolicy Policy> inline void RootNode<ChildT>::merge(RootNode& other) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN switch (Policy) { default: case MERGE_ACTIVE_STATES: for (MapIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end()) { // insert other node's child ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); mTable[i->first] = NodeStruct(child); } else if (isTile(j)) { if (isTileOff(j)) { // replace inactive tile with other node's child ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); setChild(j, child); } } else { // merge both child nodes getChild(j).template merge<MERGE_ACTIVE_STATES>(getChild(i), other.mBackground, mBackground); } } else if (other.isTileOn(i)) { if (j == mTable.end()) { // insert other node's active tile mTable[i->first] = i->second; } else if (!isTileOn(j)) { // Replace anything except an active tile with the other node's active tile. setTile(j, Tile(other.getTile(i).value, true)); } } } break; case MERGE_NODES: for (MapIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end()) { // insert other node's child ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); mTable[i->first] = NodeStruct(child); } else if (isTile(j)) { // replace tile with other node's child ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); setChild(j, child); } else { // merge both child nodes getChild(j).template merge<MERGE_NODES>( getChild(i), other.mBackground, mBackground); } } } break; case MERGE_ACTIVE_STATES_AND_NODES: for (MapIter i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end()) { // Steal and insert the other node's child. ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); mTable[i->first] = NodeStruct(child); } else if (isTile(j)) { // Replace this node's tile with the other node's child. ChildNodeType& child = stealChild(i, Tile(other.mBackground, /*on=*/false)); child.resetBackground(other.mBackground, mBackground); const Tile tile = getTile(j); setChild(j, child); if (tile.active) { // Merge the other node's child with this node's active tile. child.template merge<MERGE_ACTIVE_STATES_AND_NODES>( tile.value, tile.active); } } else /*if (isChild(j))*/ { // Merge the other node's child into this node's child. getChild(j).template merge<MERGE_ACTIVE_STATES_AND_NODES>(getChild(i), other.mBackground, mBackground); } } else if (other.isTileOn(i)) { if (j == mTable.end()) { // Insert a copy of the other node's active tile. mTable[i->first] = i->second; } else if (isTileOff(j)) { // Replace this node's inactive tile with a copy of the other's active tile. setTile(j, Tile(other.getTile(i).value, true)); } else if (isChild(j)) { // Merge the other node's active tile into this node's child. const Tile& tile = getTile(i); getChild(j).template merge<MERGE_ACTIVE_STATES_AND_NODES>( tile.value, tile.active); } } // else if (other.isTileOff(i)) {} // ignore the other node's inactive tiles } break; } // Empty the other tree so as not to leave it in a partially cannibalized state. other.clear(); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::topologyUnion(const RootNode<OtherChildType>& other) { using OtherRootT = RootNode<OtherChildType>; using OtherCIterT = typename OtherRootT::MapCIter; enforceSameConfiguration(other); for (OtherCIterT i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end()) { // create child branch with identical topology mTable[i->first] = NodeStruct( *(new ChildT(other.getChild(i), mBackground, TopologyCopy()))); } else if (this->isChild(j)) { // union with child branch this->getChild(j).topologyUnion(other.getChild(i)); } else {// this is a tile so replace it with a child branch with identical topology ChildT* child = new ChildT( other.getChild(i), this->getTile(j).value, TopologyCopy()); if (this->isTileOn(j)) child->setValuesOn();//this is an active tile this->setChild(j, *child); } } else if (other.isTileOn(i)) { // other is an active tile if (j == mTable.end()) { // insert an active tile mTable[i->first] = NodeStruct(Tile(mBackground, true)); } else if (this->isChild(j)) { this->getChild(j).setValuesOn(); } else if (this->isTileOff(j)) { this->setTile(j, Tile(this->getTile(j).value, true)); } } } } template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::topologyIntersection(const RootNode<OtherChildType>& other) { using OtherRootT = RootNode<OtherChildType>; using OtherCIterT = typename OtherRootT::MapCIter; enforceSameConfiguration(other); std::set<Coord> tmp;//keys to erase for (MapIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { OtherCIterT j = other.mTable.find(i->first); if (this->isChild(i)) { if (j == other.mTable.end() || other.isTileOff(j)) { tmp.insert(i->first);//delete child branch } else if (other.isChild(j)) { // intersect with child branch this->getChild(i).topologyIntersection(other.getChild(j), mBackground); } } else if (this->isTileOn(i)) { if (j == other.mTable.end() || other.isTileOff(j)) { this->setTile(i, Tile(this->getTile(i).value, false));//turn inactive } else if (other.isChild(j)) { //replace with a child branch with identical topology ChildT* child = new ChildT(other.getChild(j), this->getTile(i).value, TopologyCopy()); this->setChild(i, *child); } } } for (std::set<Coord>::iterator i = tmp.begin(), e = tmp.end(); i != e; ++i) { MapIter it = this->findCoord(*i); setTile(it, Tile()); // delete any existing child node first mTable.erase(it); } } template<typename ChildT> template<typename OtherChildType> inline void RootNode<ChildT>::topologyDifference(const RootNode<OtherChildType>& other) { using OtherRootT = RootNode<OtherChildType>; using OtherCIterT = typename OtherRootT::MapCIter; enforceSameConfiguration(other); for (OtherCIterT i = other.mTable.begin(), e = other.mTable.end(); i != e; ++i) { MapIter j = mTable.find(i->first); if (other.isChild(i)) { if (j == mTable.end() || this->isTileOff(j)) { //do nothing } else if (this->isChild(j)) { // difference with child branch this->getChild(j).topologyDifference(other.getChild(i), mBackground); } else if (this->isTileOn(j)) { // this is an active tile so create a child node and descent ChildT* child = new ChildT(j->first, this->getTile(j).value, true); child->topologyDifference(other.getChild(i), mBackground); this->setChild(j, *child); } } else if (other.isTileOn(i)) { // other is an active tile if (j == mTable.end() || this->isTileOff(j)) { // do nothing } else if (this->isChild(j)) { setTile(j, Tile()); // delete any existing child node first mTable.erase(j); } else if (this->isTileOn(j)) { this->setTile(j, Tile(this->getTile(j).value, false)); } } } } //////////////////////////////////////// template<typename ChildT> template<typename CombineOp> inline void RootNode<ChildT>::combine(RootNode& other, CombineOp& op, bool prune) { CombineArgs<ValueType> args; CoordSet keys; this->insertKeys(keys); other.insertKeys(keys); for (CoordSetCIter i = keys.begin(), e = keys.end(); i != e; ++i) { MapIter iter = findOrAddCoord(*i), otherIter = other.findOrAddCoord(*i); if (isTile(iter) && isTile(otherIter)) { // Both this node and the other node have constant values (tiles). // Combine the two values and store the result as this node's new tile value. op(args.setARef(getTile(iter).value) .setAIsActive(isTileOn(iter)) .setBRef(getTile(otherIter).value) .setBIsActive(isTileOn(otherIter))); setTile(iter, Tile(args.result(), args.resultIsActive())); } else if (isChild(iter) && isTile(otherIter)) { // Combine this node's child with the other node's constant value. ChildT& child = getChild(iter); child.combine(getTile(otherIter).value, isTileOn(otherIter), op); } else if (isTile(iter) && isChild(otherIter)) { // Combine this node's constant value with the other node's child, // but use a new functor in which the A and B values are swapped, // since the constant value is the A value, not the B value. SwappedCombineOp<ValueType, CombineOp> swappedOp(op); ChildT& child = getChild(otherIter); child.combine(getTile(iter).value, isTileOn(iter), swappedOp); // Steal the other node's child. setChild(iter, stealChild(otherIter, Tile())); } else /*if (isChild(iter) && isChild(otherIter))*/ { // Combine this node's child with the other node's child. ChildT &child = getChild(iter), &otherChild = getChild(otherIter); child.combine(otherChild, op); } if (prune && isChild(iter)) getChild(iter).prune(); } // Combine background values. op(args.setARef(mBackground).setBRef(other.mBackground)); mBackground = args.result(); // Empty the other tree so as not to leave it in a partially cannibalized state. other.clear(); } //////////////////////////////////////// // This helper class is a friend of RootNode and is needed so that combine2 // can be specialized for compatible and incompatible pairs of RootNode types. template<typename CombineOp, typename RootT, typename OtherRootT, bool Compatible = false> struct RootNodeCombineHelper { static inline void combine2(RootT& self, const RootT&, const OtherRootT& other1, CombineOp&, bool) { // If the two root nodes have different configurations or incompatible ValueTypes, // throw an exception. self.enforceSameConfiguration(other1); self.enforceCompatibleValueTypes(other1); // One of the above two tests should throw, so we should never get here: std::ostringstream ostr; ostr << "cannot combine a " << typeid(OtherRootT).name() << " into a " << typeid(RootT).name(); OPENVDB_THROW(TypeError, ostr.str()); } }; // Specialization for root nodes of compatible types template<typename CombineOp, typename RootT, typename OtherRootT> struct RootNodeCombineHelper<CombineOp, RootT, OtherRootT, /*Compatible=*/true> { static inline void combine2(RootT& self, const RootT& other0, const OtherRootT& other1, CombineOp& op, bool prune) { self.doCombine2(other0, other1, op, prune); } }; template<typename ChildT> template<typename CombineOp, typename OtherRootNode> inline void RootNode<ChildT>::combine2(const RootNode& other0, const OtherRootNode& other1, CombineOp& op, bool prune) { using OtherValueType = typename OtherRootNode::ValueType; static const bool compatible = (SameConfiguration<OtherRootNode>::value && CanConvertType</*from=*/OtherValueType, /*to=*/ValueType>::value); RootNodeCombineHelper<CombineOp, RootNode, OtherRootNode, compatible>::combine2( *this, other0, other1, op, prune); } template<typename ChildT> template<typename CombineOp, typename OtherRootNode> inline void RootNode<ChildT>::doCombine2(const RootNode& other0, const OtherRootNode& other1, CombineOp& op, bool prune) { enforceSameConfiguration(other1); using OtherValueT = typename OtherRootNode::ValueType; using OtherTileT = typename OtherRootNode::Tile; using OtherNodeStructT = typename OtherRootNode::NodeStruct; using OtherMapCIterT = typename OtherRootNode::MapCIter; CombineArgs<ValueType, OtherValueT> args; CoordSet keys; other0.insertKeys(keys); other1.insertKeys(keys); const NodeStruct bg0(Tile(other0.mBackground, /*active=*/false)); const OtherNodeStructT bg1(OtherTileT(other1.mBackground, /*active=*/false)); for (CoordSetCIter i = keys.begin(), e = keys.end(); i != e; ++i) { MapIter thisIter = this->findOrAddCoord(*i); MapCIter iter0 = other0.findKey(*i); OtherMapCIterT iter1 = other1.findKey(*i); const NodeStruct& ns0 = (iter0 != other0.mTable.end()) ? iter0->second : bg0; const OtherNodeStructT& ns1 = (iter1 != other1.mTable.end()) ? iter1->second : bg1; if (ns0.isTile() && ns1.isTile()) { // Both input nodes have constant values (tiles). // Combine the two values and add a new tile to this node with the result. op(args.setARef(ns0.tile.value) .setAIsActive(ns0.isTileOn()) .setBRef(ns1.tile.value) .setBIsActive(ns1.isTileOn())); setTile(thisIter, Tile(args.result(), args.resultIsActive())); } else { if (!isChild(thisIter)) { // Add a new child with the same coordinates, etc. as the other node's child. const Coord& childOrigin = ns0.isChild() ? ns0.child->origin() : ns1.child->origin(); setChild(thisIter, *(new ChildT(childOrigin, getTile(thisIter).value))); } ChildT& child = getChild(thisIter); if (ns0.isTile()) { // Combine node1's child with node0's constant value // and write the result into this node's child. child.combine2(ns0.tile.value, *ns1.child, ns0.isTileOn(), op); } else if (ns1.isTile()) { // Combine node0's child with node1's constant value // and write the result into this node's child. child.combine2(*ns0.child, ns1.tile.value, ns1.isTileOn(), op); } else { // Combine node0's child with node1's child // and write the result into this node's child. child.combine2(*ns0.child, *ns1.child, op); } } if (prune && isChild(thisIter)) getChild(thisIter).prune(); } // Combine background values. op(args.setARef(other0.mBackground).setBRef(other1.mBackground)); mBackground = args.result(); } //////////////////////////////////////// template<typename ChildT> template<typename BBoxOp> inline void RootNode<ChildT>::visitActiveBBox(BBoxOp& op) const { const bool descent = op.template descent<LEVEL>(); for (MapCIter i = mTable.begin(), e = mTable.end(); i != e; ++i) { if (this->isTileOff(i)) continue; if (this->isChild(i) && descent) { this->getChild(i).visitActiveBBox(op); } else { op.template operator()<LEVEL>(CoordBBox::createCube(i->first, ChildT::DIM)); } } } template<typename ChildT> template<typename VisitorOp> inline void RootNode<ChildT>::visit(VisitorOp& op) { doVisit<RootNode, VisitorOp, ChildAllIter>(*this, op); } template<typename ChildT> template<typename VisitorOp> inline void RootNode<ChildT>::visit(VisitorOp& op) const { doVisit<const RootNode, VisitorOp, ChildAllCIter>(*this, op); } template<typename ChildT> template<typename RootNodeT, typename VisitorOp, typename ChildAllIterT> inline void RootNode<ChildT>::doVisit(RootNodeT& self, VisitorOp& op) { typename RootNodeT::ValueType val; for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { if (op(iter)) continue; if (typename ChildAllIterT::ChildNodeType* child = iter.probeChild(val)) { child->visit(op); } } } //////////////////////////////////////// template<typename ChildT> template<typename OtherRootNodeType, typename VisitorOp> inline void RootNode<ChildT>::visit2(OtherRootNodeType& other, VisitorOp& op) { doVisit2<RootNode, OtherRootNodeType, VisitorOp, ChildAllIter, typename OtherRootNodeType::ChildAllIter>(*this, other, op); } template<typename ChildT> template<typename OtherRootNodeType, typename VisitorOp> inline void RootNode<ChildT>::visit2(OtherRootNodeType& other, VisitorOp& op) const { doVisit2<const RootNode, OtherRootNodeType, VisitorOp, ChildAllCIter, typename OtherRootNodeType::ChildAllCIter>(*this, other, op); } template<typename ChildT> template< typename RootNodeT, typename OtherRootNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void RootNode<ChildT>::doVisit2(RootNodeT& self, OtherRootNodeT& other, VisitorOp& op) { enforceSameConfiguration(other); typename RootNodeT::ValueType val; typename OtherRootNodeT::ValueType otherVal; // The two nodes are required to have corresponding table entries, // but since that might require background tiles to be added to one or both, // and the nodes might be const, we operate on shallow copies of the nodes instead. RootNodeT copyOfSelf(self.mBackground); copyOfSelf.mTable = self.mTable; OtherRootNodeT copyOfOther(other.mBackground); copyOfOther.mTable = other.mTable; // Add background tiles to both nodes as needed. CoordSet keys; self.insertKeys(keys); other.insertKeys(keys); for (CoordSetCIter i = keys.begin(), e = keys.end(); i != e; ++i) { copyOfSelf.findOrAddCoord(*i); copyOfOther.findOrAddCoord(*i); } ChildAllIterT iter = copyOfSelf.beginChildAll(); OtherChildAllIterT otherIter = copyOfOther.beginChildAll(); for ( ; iter && otherIter; ++iter, ++otherIter) { const size_t skipBranch = static_cast<size_t>(op(iter, otherIter)); typename ChildAllIterT::ChildNodeType* child = (skipBranch & 1U) ? nullptr : iter.probeChild(val); typename OtherChildAllIterT::ChildNodeType* otherChild = (skipBranch & 2U) ? nullptr : otherIter.probeChild(otherVal); if (child != nullptr && otherChild != nullptr) { child->visit2Node(*otherChild, op); } else if (child != nullptr) { child->visit2(otherIter, op); } else if (otherChild != nullptr) { otherChild->visit2(iter, op, /*otherIsLHS=*/true); } } // Remove any background tiles that were added above, // as well as any that were created by the visitors. copyOfSelf.eraseBackgroundTiles(); copyOfOther.eraseBackgroundTiles(); // If either input node is non-const, replace its table with // the (possibly modified) copy. self.resetTable(copyOfSelf.mTable); other.resetTable(copyOfOther.mTable); } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_ROOTNODE_HAS_BEEN_INCLUDED
133,960
C
37.395242
101
0.633577
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/LeafNodeMask.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TREE_LEAF_NODE_MASK_HAS_BEEN_INCLUDED #define OPENVDB_TREE_LEAF_NODE_MASK_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Types.h> #include <openvdb/io/Compression.h> // for io::readData(), etc. #include <openvdb/math/Math.h> // for math::isZero() #include <openvdb/util/NodeMasks.h> #include "LeafNode.h" #include "Iterator.h" #include <iostream> #include <sstream> #include <string> #include <type_traits> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { /// @brief LeafNode specialization for values of type ValueMask that encodes both /// the active states and the boolean values of (2^Log2Dim)^3 voxels /// in a single bit mask, i.e. voxel values and states are indistinguishable! template<Index Log2Dim> class LeafNode<ValueMask, Log2Dim> { public: using LeafNodeType = LeafNode<ValueMask, Log2Dim>; using BuildType = ValueMask;// this is a rare case where using ValueType = bool;// value type != build type using Buffer = LeafBuffer<ValueType, Log2Dim>;// buffer uses the bool specialization using NodeMaskType = util::NodeMask<Log2Dim>; using Ptr = SharedPtr<LeafNodeType>; // These static declarations must be on separate lines to avoid VC9 compiler errors. static const Index LOG2DIM = Log2Dim; // needed by parent nodes static const Index TOTAL = Log2Dim; // needed by parent nodes static const Index DIM = 1 << TOTAL; // dimension along one coordinate direction static const Index NUM_VALUES = 1 << 3 * Log2Dim; static const Index NUM_VOXELS = NUM_VALUES; // total number of voxels represented by this node static const Index SIZE = NUM_VALUES; static const Index LEVEL = 0; // level 0 = leaf /// @brief ValueConverter<T>::Type is the type of a LeafNode having the same /// dimensions as this node but a different value type, T. template<typename OtherValueType> struct ValueConverter { using Type = LeafNode<OtherValueType, Log2Dim>; }; /// @brief SameConfiguration<OtherNodeType>::value is @c true if and only if /// OtherNodeType is the type of a LeafNode with the same dimensions as this node. template<typename OtherNodeType> struct SameConfiguration { static const bool value = SameLeafConfig<LOG2DIM, OtherNodeType>::value; }; /// Default constructor LeafNode(); /// Constructor /// @param xyz the coordinates of a voxel that lies within the node /// @param value the initial value = state for all of this node's voxels /// @param dummy dummy value explicit LeafNode(const Coord& xyz, bool value = false, bool dummy = false); /// "Partial creation" constructor used during file input LeafNode(PartialCreate, const Coord& xyz, bool value = false, bool dummy = false); /// Deep copy constructor LeafNode(const LeafNode&); /// Value conversion copy constructor template<typename OtherValueType> explicit LeafNode(const LeafNode<OtherValueType, Log2Dim>& other); /// Topology copy constructor template<typename ValueType> LeafNode(const LeafNode<ValueType, Log2Dim>& other, TopologyCopy); //@{ /// @brief Topology copy constructor /// @note This variant exists mainly to enable template instantiation. template<typename ValueType> LeafNode(const LeafNode<ValueType, Log2Dim>& other, bool offValue, bool onValue, TopologyCopy); template<typename ValueType> LeafNode(const LeafNode<ValueType, Log2Dim>& other, bool background, TopologyCopy); //@} /// Destructor ~LeafNode(); // // Statistics // /// Return log2 of the size of the buffer storage. static Index log2dim() { return Log2Dim; } /// Return the number of voxels in each dimension. static Index dim() { return DIM; } /// Return the total number of voxels represented by this LeafNode static Index size() { return SIZE; } /// Return the total number of voxels represented by this LeafNode static Index numValues() { return SIZE; } /// Return the level of this node, which by definition is zero for LeafNodes static Index getLevel() { return LEVEL; } /// Append the Log2Dim of this LeafNode to the specified vector static void getNodeLog2Dims(std::vector<Index>& dims) { dims.push_back(Log2Dim); } /// Return the dimension of child nodes of this LeafNode, which is one for voxels. static Index getChildDim() { return 1; } /// Return the leaf count for this node, which is one. static Index32 leafCount() { return 1; } /// no-op void nodeCount(std::vector<Index32> &) const {} /// Return the non-leaf count for this node, which is zero. static Index32 nonLeafCount() { return 0; } /// Return the number of active voxels. Index64 onVoxelCount() const { return mBuffer.mData.countOn(); } /// Return the number of inactive voxels. Index64 offVoxelCount() const { return mBuffer.mData.countOff(); } Index64 onLeafVoxelCount() const { return this->onVoxelCount(); } Index64 offLeafVoxelCount() const { return this->offVoxelCount(); } static Index64 onTileCount() { return 0; } static Index64 offTileCount() { return 0; } /// Return @c true if this node has no active voxels. bool isEmpty() const { return mBuffer.mData.isOff(); } /// Return @c true if this node only contains active voxels. bool isDense() const { return mBuffer.mData.isOn(); } /// @brief Return @c true if memory for this node's buffer has been allocated. /// @details Currently, boolean leaf nodes don't support partial creation, /// so this always returns @c true. bool isAllocated() const { return true; } /// @brief Allocate memory for this node's buffer if it has not already been allocated. /// @details Currently, boolean leaf nodes don't support partial creation, /// so this has no effect. bool allocate() { return true; } /// Return the memory in bytes occupied by this node. Index64 memUsage() const; /// Expand the given bounding box so that it includes this leaf node's active voxels. /// If visitVoxels is false this LeafNode will be approximated as dense, i.e. with all /// voxels active. Else the individual active voxels are visited to produce a tight bbox. void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// @brief Return the bounding box of this node, i.e., the full index space /// spanned by this leaf node. CoordBBox getNodeBoundingBox() const { return CoordBBox::createCube(mOrigin, DIM); } /// Set the grid index coordinates of this node's local origin. void setOrigin(const Coord& origin) { mOrigin = origin; } //@{ /// Return the grid index coordinates of this node's local origin. const Coord& origin() const { return mOrigin; } void getOrigin(Coord& origin) const { origin = mOrigin; } void getOrigin(Int32& x, Int32& y, Int32& z) const { mOrigin.asXYZ(x, y, z); } //@} /// Return the linear table offset of the given global or local coordinates. static Index coordToOffset(const Coord& xyz); /// @brief Return the local coordinates for a linear table offset, /// where offset 0 has coordinates (0, 0, 0). static Coord offsetToLocalCoord(Index n); /// Return the global coordinates for a linear table offset. Coord offsetToGlobalCoord(Index n) const; /// Return a string representation of this node. std::string str() const; /// @brief Return @c true if the given node (which may have a different @c ValueType /// than this node) has the same active value topology as this node. template<typename OtherType, Index OtherLog2Dim> bool hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const; /// Check for buffer equivalence by value. bool operator==(const LeafNode&) const; bool operator!=(const LeafNode&) const; // // Buffer management // /// @brief Exchange this node's data buffer with the given data buffer /// without changing the active states of the values. void swap(Buffer& other) { mBuffer.swap(other); } const Buffer& buffer() const { return mBuffer; } Buffer& buffer() { return mBuffer; } // // I/O methods // /// Read in just the topology. void readTopology(std::istream&, bool fromHalf = false); /// Write out just the topology. void writeTopology(std::ostream&, bool toHalf = false) const; /// Read in the topology and the origin. void readBuffers(std::istream&, bool fromHalf = false); void readBuffers(std::istream& is, const CoordBBox&, bool fromHalf = false); /// Write out the topology and the origin. void writeBuffers(std::ostream&, bool toHalf = false) const; // // Accessor methods // /// Return the value of the voxel at the given coordinates. const bool& getValue(const Coord& xyz) const; /// Return the value of the voxel at the given offset. const bool& getValue(Index offset) const; /// @brief Return @c true if the voxel at the given coordinates is active. /// @param xyz the coordinates of the voxel to be probed /// @param[out] val the value of the voxel at the given coordinates bool probeValue(const Coord& xyz, bool& val) const; /// Return the level (0) at which leaf node values reside. static Index getValueLevel(const Coord&) { return LEVEL; } /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the active state of the voxel at the given offset but don't change its value. void setActiveState(Index offset, bool on) { assert(offset<SIZE); mBuffer.mData.set(offset, on); } /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, bool val); /// Set the value of the voxel at the given offset but don't change its active state. void setValueOnly(Index offset, bool val) { assert(offset<SIZE); mBuffer.setValue(offset,val); } /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz) { mBuffer.mData.setOff(this->coordToOffset(xyz)); } /// Mark the voxel at the given offset as inactive but don't change its value. void setValueOff(Index offset) { assert(offset < SIZE); mBuffer.mData.setOff(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, bool val); /// Set the value of the voxel at the given offset and mark the voxel as inactive. void setValueOff(Index offset, bool val); /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz) { mBuffer.mData.setOn(this->coordToOffset(xyz)); } /// Mark the voxel at the given offset as active but don't change its value. void setValueOn(Index offset) { assert(offset < SIZE); mBuffer.mData.setOn(offset); } /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, bool val); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValue(const Coord& xyz, bool val) { this->setValueOn(xyz, val); } /// Set the value of the voxel at the given offset and mark the voxel as active. void setValueOn(Index offset, bool val); /// @brief Apply a functor to the value of the voxel at the given offset /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(Index offset, const ModifyOp& op); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op); /// Apply a functor to the voxel at the given coordinates. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op); /// Mark all voxels as active but don't change their values. void setValuesOn() { mBuffer.mData.setOn(); } /// Mark all voxels as inactive but don't change their values. void setValuesOff() { mBuffer.mData.setOff(); } /// Return @c true if the voxel at the given coordinates is active. bool isValueOn(const Coord& xyz) const { return mBuffer.mData.isOn(this->coordToOffset(xyz)); } /// Return @c true if the voxel at the given offset is active. bool isValueOn(Index offset) const { assert(offset < SIZE); return mBuffer.mData.isOn(offset); } /// Return @c false since leaf nodes never contain tiles. static bool hasActiveTiles() { return false; } /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&, bool background); /// Set all voxels within an axis-aligned box to the specified value. void fill(const CoordBBox& bbox, bool value, bool = false); /// Set all voxels within an axis-aligned box to the specified value. void denseFill(const CoordBBox& bbox, bool value, bool = false) { this->fill(bbox, value); } /// Set the state of all voxels to the specified active state. void fill(const bool& value, bool dummy = false); /// @brief Copy into a dense grid the values of the voxels that lie within /// a given bounding box. /// /// @param bbox inclusive bounding box of the voxels to be copied into the dense grid /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyToDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyToDense(const CoordBBox& bbox, DenseT& dense) const; /// @brief Copy from a dense grid into this node the values of the voxels /// that lie within a given bounding box. /// @details Only values that are different (by more than the given tolerance) /// from the background value will be active. Other values are inactive /// and truncated to the background value. /// /// @param bbox inclusive bounding box of the voxels to be copied into this node /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// @param background background value of the tree that this node belongs to /// @param tolerance tolerance within which a value equals the background value /// /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. /// @note Consider using tools::CopyFromDense in tools/Dense.h /// instead of calling this method directly. template<typename DenseT> void copyFromDense(const CoordBBox& bbox, const DenseT& dense, bool background, bool tolerance); /// @brief Return the value of the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename AccessorT> const bool& getValueAndCache(const Coord& xyz, AccessorT&) const {return this->getValue(xyz);} /// @brief Return @c true if the voxel at the given coordinates is active. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool isValueOnAndCache(const Coord& xyz, AccessorT&) const { return this->isValueOn(xyz); } /// @brief Change the value of the voxel at the given coordinates and mark it as active. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueAndCache(const Coord& xyz, bool val, AccessorT&) { this->setValueOn(xyz, val); } /// @brief Change the value of the voxel at the given coordinates /// but preserve its state. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOnlyAndCache(const Coord& xyz, bool val, AccessorT&) {this->setValueOnly(xyz,val);} /// @brief Change the value of the voxel at the given coordinates and mark it as inactive. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOffAndCache(const Coord& xyz, bool value, AccessorT&) { this->setValueOff(xyz, value); } /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValue(xyz, op); } /// Apply a functor to the voxel at the given coordinates. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&) { this->modifyValueAndActiveState(xyz, op); } /// @brief Set the active state of the voxel at the given coordinates /// without changing its value. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT&) { this->setActiveState(xyz, on); } /// @brief Return @c true if the voxel at the given coordinates is active /// and return the voxel value in @a val. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool probeValueAndCache(const Coord& xyz, bool& val, AccessorT&) const { return this->probeValue(xyz, val); } /// @brief Return the LEVEL (=0) at which leaf node values reside. /// @note Used internally by ValueAccessor. template<typename AccessorT> static Index getValueLevelAndCache(const Coord&, AccessorT&) { return LEVEL; } /// @brief Return a const reference to the first entry in the buffer. /// @note Since it's actually a reference to a static data member /// it should not be converted to a non-const pointer! const bool& getFirstValue() const { if (mBuffer.mData.isOn(0)) return Buffer::sOn; else return Buffer::sOff; } /// @brief Return a const reference to the last entry in the buffer. /// @note Since it's actually a reference to a static data member /// it should not be converted to a non-const pointer! const bool& getLastValue() const { if (mBuffer.mData.isOn(SIZE-1)) return Buffer::sOn; else return Buffer::sOff; } /// Return @c true if all of this node's voxels have the same active state /// and are equal to within the given tolerance, and return the value in /// @a constValue and the active state in @a state. bool isConstant(bool& constValue, bool& state, bool tolerance = 0) const; /// @brief Computes the median value of all the active and inactive voxels in this node. /// @return The median value. /// /// @details The median for boolean values is defined as the mode /// of the values, i.e. the value that occurs most often. bool medianAll() const; /// @brief Computes the median value of all the active voxels in this node. /// @return The number of active voxels. /// /// @param value Updated with the median value of all the active voxels. /// /// @note Since the value and state are shared for this /// specialization of the LeafNode the @a value will always be true! Index medianOn(ValueType &value) const; /// @brief Computes the median value of all the inactive voxels in this node. /// @return The number of inactive voxels. /// /// @param value Updated with the median value of all the inactive /// voxels. /// /// @note Since the value and state are shared for this /// specialization of the LeafNode the @a value will always be false! Index medianOff(ValueType &value) const; /// Return @c true if all of this node's values are inactive. bool isInactive() const { return mBuffer.mData.isOff(); } /// @brief no-op since for this temaplte specialization voxel /// values and states are indistinguishable. void resetBackground(bool, bool) {} /// @brief Invert the bits of the voxels, i.e. states and values void negate() { mBuffer.mData.toggle(); } template<MergePolicy Policy> void merge(const LeafNode& other, bool bg = false, bool otherBG = false); template<MergePolicy Policy> void merge(bool tileValue, bool tileActive=false); /// @brief No-op /// @details This function exists only to enable template instantiation. void voxelizeActiveTiles(bool = true) {} /// @brief Union this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active if either of the original voxels /// were active. /// /// @note This operation modifies only active states, not values. template<typename OtherType> void topologyUnion(const LeafNode<OtherType, Log2Dim>& other); /// @brief Intersect this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if both of the original voxels /// were active. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyIntersection. /// /// @note This operation modifies only active states, not /// values. Also note that this operation can result in all voxels /// being inactive so consider subsequnetly calling prune. template<typename OtherType> void topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const bool&); /// @brief Difference this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this LeafNode and inactive in the other LeafNode. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyDifference. /// /// @note This operation modifies only active states, not values. /// Also, because it can deactivate all of this node's voxels, /// consider subsequently calling prune. template<typename OtherType> void topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const bool&); template<typename CombineOp> void combine(const LeafNode& other, CombineOp& op); template<typename CombineOp> void combine(bool, bool valueIsActive, CombineOp& op); template<typename CombineOp, typename OtherType /*= bool*/> void combine2(const LeafNode& other, const OtherType&, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(bool, const OtherNodeT& other, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeT /*= LeafNode*/> void combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp&); /// @brief Calls the templated functor BBoxOp with bounding box information. /// An additional level argument is provided to the callback. /// /// @note The bounding boxes are guarenteed to be non-overlapping. template<typename BBoxOp> void visitActiveBBox(BBoxOp&) const; template<typename VisitorOp> void visit(VisitorOp&); template<typename VisitorOp> void visit(VisitorOp&) const; template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&); template<typename OtherLeafNodeType, typename VisitorOp> void visit2Node(OtherLeafNodeType& other, VisitorOp&) const; template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false); template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false) const; //@{ /// This function exists only to enable template instantiation. void prune(const ValueType& /*tolerance*/ = zeroVal<ValueType>()) {} void addLeaf(LeafNode*) {} template<typename AccessorT> void addLeafAndCache(LeafNode*, AccessorT&) {} template<typename NodeT> NodeT* stealNode(const Coord&, const ValueType&, bool) { return nullptr; } template<typename NodeT> NodeT* probeNode(const Coord&) { return nullptr; } template<typename NodeT> const NodeT* probeConstNode(const Coord&) const { return nullptr; } template<typename ArrayT> void getNodes(ArrayT&) const {} template<typename ArrayT> void stealNodes(ArrayT&, const ValueType&, bool) {} //@} void addTile(Index level, const Coord&, bool val, bool active); void addTile(Index offset, bool val, bool active); template<typename AccessorT> void addTileAndCache(Index level, const Coord&, bool val, bool active, AccessorT&); //@{ /// @brief Return a pointer to this node. LeafNode* touchLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* touchLeafAndCache(const Coord&, AccessorT&) { return this; } LeafNode* probeLeaf(const Coord&) { return this; } template<typename AccessorT> LeafNode* probeLeafAndCache(const Coord&, AccessorT&) { return this; } template<typename NodeT, typename AccessorT> NodeT* probeNodeAndCache(const Coord&, AccessorT&) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} //@{ /// @brief Return a @const pointer to this node. const LeafNode* probeLeaf(const Coord&) const { return this; } template<typename AccessorT> const LeafNode* probeLeafAndCache(const Coord&, AccessorT&) const { return this; } const LeafNode* probeConstLeaf(const Coord&) const { return this; } template<typename AccessorT> const LeafNode* probeConstLeafAndCache(const Coord&, AccessorT&) const { return this; } template<typename NodeT, typename AccessorT> const NodeT* probeConstNodeAndCache(const Coord&, AccessorT&) const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT, LeafNode>::value)) return nullptr; return reinterpret_cast<const NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} // // Iterators // protected: using MaskOnIter = typename NodeMaskType::OnIterator; using MaskOffIter = typename NodeMaskType::OffIterator; using MaskDenseIter = typename NodeMaskType::DenseIterator; template<typename MaskIterT, typename NodeT, typename ValueT> struct ValueIter: // Derives from SparseIteratorBase, but can also be used as a dense iterator, // if MaskIterT is a dense mask iterator type. public SparseIteratorBase<MaskIterT, ValueIter<MaskIterT, NodeT, ValueT>, NodeT, ValueT> { using BaseT = SparseIteratorBase<MaskIterT, ValueIter, NodeT, ValueT>; ValueIter() {} ValueIter(const MaskIterT& iter, NodeT* parent): BaseT(iter, parent) {} const bool& getItem(Index pos) const { return this->parent().getValue(pos); } const bool& getValue() const { return this->getItem(this->pos()); } // Note: setItem() can't be called on const iterators. void setItem(Index pos, bool value) const { this->parent().setValueOnly(pos, value); } // Note: setValue() can't be called on const iterators. void setValue(bool value) const { this->setItem(this->pos(), value); } // Note: modifyItem() can't be called on const iterators. template<typename ModifyOp> void modifyItem(Index n, const ModifyOp& op) const { this->parent().modifyValue(n, op); } // Note: modifyValue() can't be called on const iterators. template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { this->modifyItem(this->pos(), op); } }; /// Leaf nodes have no children, so their child iterators have no get/set accessors. template<typename MaskIterT, typename NodeT> struct ChildIter: public SparseIteratorBase<MaskIterT, ChildIter<MaskIterT, NodeT>, NodeT, bool> { ChildIter() {} ChildIter(const MaskIterT& iter, NodeT* parent): SparseIteratorBase< MaskIterT, ChildIter<MaskIterT, NodeT>, NodeT, bool>(iter, parent) {} }; template<typename NodeT, typename ValueT> struct DenseIter: public DenseIteratorBase< MaskDenseIter, DenseIter<NodeT, ValueT>, NodeT, /*ChildT=*/void, ValueT> { using BaseT = DenseIteratorBase<MaskDenseIter, DenseIter, NodeT, void, ValueT>; using NonConstValueT = typename BaseT::NonConstValueType; DenseIter() {} DenseIter(const MaskDenseIter& iter, NodeT* parent): BaseT(iter, parent) {} bool getItem(Index pos, void*& child, NonConstValueT& value) const { value = this->parent().getValue(pos); child = nullptr; return false; // no child } // Note: setItem() can't be called on const iterators. //void setItem(Index pos, void* child) const {} // Note: unsetItem() can't be called on const iterators. void unsetItem(Index pos, const ValueT& val) const {this->parent().setValueOnly(pos, val);} }; public: using ValueOnIter = ValueIter<MaskOnIter, LeafNode, const bool>; using ValueOnCIter = ValueIter<MaskOnIter, const LeafNode, const bool>; using ValueOffIter = ValueIter<MaskOffIter, LeafNode, const bool>; using ValueOffCIter = ValueIter<MaskOffIter, const LeafNode, const bool>; using ValueAllIter = ValueIter<MaskDenseIter, LeafNode, const bool>; using ValueAllCIter = ValueIter<MaskDenseIter, const LeafNode, const bool>; using ChildOnIter = ChildIter<MaskOnIter, LeafNode>; using ChildOnCIter = ChildIter<MaskOnIter, const LeafNode>; using ChildOffIter = ChildIter<MaskOffIter, LeafNode>; using ChildOffCIter = ChildIter<MaskOffIter, const LeafNode>; using ChildAllIter = DenseIter<LeafNode, bool>; using ChildAllCIter = DenseIter<const LeafNode, const bool>; ValueOnCIter cbeginValueOn() const { return ValueOnCIter(mBuffer.mData.beginOn(), this); } ValueOnCIter beginValueOn() const { return ValueOnCIter(mBuffer.mData.beginOn(), this); } ValueOnIter beginValueOn() { return ValueOnIter(mBuffer.mData.beginOn(), this); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(mBuffer.mData.beginOff(), this); } ValueOffCIter beginValueOff() const { return ValueOffCIter(mBuffer.mData.beginOff(), this); } ValueOffIter beginValueOff() { return ValueOffIter(mBuffer.mData.beginOff(), this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(mBuffer.mData.beginDense(), this); } ValueAllCIter beginValueAll() const { return ValueAllCIter(mBuffer.mData.beginDense(), this); } ValueAllIter beginValueAll() { return ValueAllIter(mBuffer.mData.beginDense(), this); } ValueOnCIter cendValueOn() const { return ValueOnCIter(mBuffer.mData.endOn(), this); } ValueOnCIter endValueOn() const { return ValueOnCIter(mBuffer.mData.endOn(), this); } ValueOnIter endValueOn() { return ValueOnIter(mBuffer.mData.endOn(), this); } ValueOffCIter cendValueOff() const { return ValueOffCIter(mBuffer.mData.endOff(), this); } ValueOffCIter endValueOff() const { return ValueOffCIter(mBuffer.mData.endOff(), this); } ValueOffIter endValueOff() { return ValueOffIter(mBuffer.mData.endOff(), this); } ValueAllCIter cendValueAll() const { return ValueAllCIter(mBuffer.mData.endDense(), this); } ValueAllCIter endValueAll() const { return ValueAllCIter(mBuffer.mData.endDense(), this); } ValueAllIter endValueAll() { return ValueAllIter(mBuffer.mData.endDense(), this); } // Note that [c]beginChildOn() and [c]beginChildOff() actually return end iterators, // because leaf nodes have no children. ChildOnCIter cbeginChildOn() const { return ChildOnCIter(mBuffer.mData.endOn(), this); } ChildOnCIter beginChildOn() const { return ChildOnCIter(mBuffer.mData.endOn(), this); } ChildOnIter beginChildOn() { return ChildOnIter(mBuffer.mData.endOn(), this); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(mBuffer.mData.endOff(), this); } ChildOffCIter beginChildOff() const { return ChildOffCIter(mBuffer.mData.endOff(), this); } ChildOffIter beginChildOff() { return ChildOffIter(mBuffer.mData.endOff(), this); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(mBuffer.mData.beginDense(), this); } ChildAllCIter beginChildAll() const { return ChildAllCIter(mBuffer.mData.beginDense(), this); } ChildAllIter beginChildAll() { return ChildAllIter(mBuffer.mData.beginDense(), this); } ChildOnCIter cendChildOn() const { return ChildOnCIter(mBuffer.mData.endOn(), this); } ChildOnCIter endChildOn() const { return ChildOnCIter(mBuffer.mData.endOn(), this); } ChildOnIter endChildOn() { return ChildOnIter(mBuffer.mData.endOn(), this); } ChildOffCIter cendChildOff() const { return ChildOffCIter(mBuffer.mData.endOff(), this); } ChildOffCIter endChildOff() const { return ChildOffCIter(mBuffer.mData.endOff(), this); } ChildOffIter endChildOff() { return ChildOffIter(mBuffer.mData.endOff(), this); } ChildAllCIter cendChildAll() const { return ChildAllCIter(mBuffer.mData.endDense(), this); } ChildAllCIter endChildAll() const { return ChildAllCIter(mBuffer.mData.endDense(), this); } ChildAllIter endChildAll() { return ChildAllIter(mBuffer.mData.endDense(), this); } // // Mask accessors // bool isValueMaskOn(Index n) const { return mBuffer.mData.isOn(n); } bool isValueMaskOn() const { return mBuffer.mData.isOn(); } bool isValueMaskOff(Index n) const { return mBuffer.mData.isOff(n); } bool isValueMaskOff() const { return mBuffer.mData.isOff(); } const NodeMaskType& getValueMask() const { return mBuffer.mData; } const NodeMaskType& valueMask() const { return mBuffer.mData; } NodeMaskType& getValueMask() { return mBuffer.mData; } void setValueMask(const NodeMaskType& mask) { mBuffer.mData = mask; } bool isChildMaskOn(Index) const { return false; } // leaf nodes have no children bool isChildMaskOff(Index) const { return true; } bool isChildMaskOff() const { return true; } protected: void setValueMask(Index n, bool on) { mBuffer.mData.set(n, on); } void setValueMaskOn(Index n) { mBuffer.mData.setOn(n); } void setValueMaskOff(Index n) { mBuffer.mData.setOff(n); } /// Compute the origin of the leaf node that contains the voxel with the given coordinates. static void evalNodeOrigin(Coord& xyz) { xyz &= ~(DIM - 1); } template<typename NodeT, typename VisitorOp, typename ChildAllIterT> static inline void doVisit(NodeT&, VisitorOp&); template<typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp&); template<typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2(NodeT& self, OtherChildAllIterT&, VisitorOp&, bool otherIsLHS); /// Bitmask representing the values AND state of voxels Buffer mBuffer; /// Global grid index coordinates (x,y,z) of the local origin of this node Coord mOrigin; private: /// @brief During topology-only construction, access is needed /// to protected/private members of other template instances. template<typename, Index> friend class LeafNode; friend struct ValueIter<MaskOnIter, LeafNode, bool>; friend struct ValueIter<MaskOffIter, LeafNode, bool>; friend struct ValueIter<MaskDenseIter, LeafNode, bool>; friend struct ValueIter<MaskOnIter, const LeafNode, bool>; friend struct ValueIter<MaskOffIter, const LeafNode, bool>; friend struct ValueIter<MaskDenseIter, const LeafNode, bool>; //@{ /// Allow iterators to call mask accessor methods (see below). /// @todo Make mask accessors public? friend class IteratorBase<MaskOnIter, LeafNode>; friend class IteratorBase<MaskOffIter, LeafNode>; friend class IteratorBase<MaskDenseIter, LeafNode>; //@} template<typename, Index> friend class LeafBuffer; }; // class LeafNode<ValueMask> //////////////////////////////////////// template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::LeafNode() : mOrigin(0, 0, 0) { } template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const Coord& xyz, bool value, bool active) : mBuffer(value || active) , mOrigin(xyz & (~(DIM - 1))) { } template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::LeafNode(PartialCreate, const Coord& xyz, bool value, bool active) : mBuffer(value || active) , mOrigin(xyz & (~(DIM - 1))) { } template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode &other) : mBuffer(other.mBuffer) , mOrigin(other.mOrigin) { } // Copy-construct from a leaf node with the same configuration but a different ValueType. template<Index Log2Dim> template<typename ValueT> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other) : mBuffer(other.valueMask()) , mOrigin(other.origin()) { } template<Index Log2Dim> template<typename ValueT> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other, bool, TopologyCopy) : mBuffer(other.valueMask())// value = active state , mOrigin(other.origin()) { } template<Index Log2Dim> template<typename ValueT> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other, TopologyCopy) : mBuffer(other.valueMask())// value = active state , mOrigin(other.origin()) { } template<Index Log2Dim> template<typename ValueT> inline LeafNode<ValueMask, Log2Dim>::LeafNode(const LeafNode<ValueT, Log2Dim>& other, bool offValue, bool onValue, TopologyCopy) : mBuffer(other.valueMask()) , mOrigin(other.origin()) { if (offValue==true) { if (onValue==false) { mBuffer.mData.toggle(); } else { mBuffer.mData.setOn(); } } } template<Index Log2Dim> inline LeafNode<ValueMask, Log2Dim>::~LeafNode() { } //////////////////////////////////////// template<Index Log2Dim> inline Index64 LeafNode<ValueMask, Log2Dim>::memUsage() const { // Use sizeof(*this) to capture alignment-related padding return sizeof(*this); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { CoordBBox this_bbox = this->getNodeBoundingBox(); if (bbox.isInside(this_bbox)) return;//this LeafNode is already enclosed in the bbox if (ValueOnCIter iter = this->cbeginValueOn()) {//any active values? if (visitVoxels) {//use voxel granularity? this_bbox.reset(); for(; iter; ++iter) this_bbox.expand(this->offsetToLocalCoord(iter.pos())); this_bbox.translate(this->origin()); } bbox.expand(this_bbox); } } template<Index Log2Dim> template<typename OtherType, Index OtherLog2Dim> inline bool LeafNode<ValueMask, Log2Dim>::hasSameTopology(const LeafNode<OtherType, OtherLog2Dim>* other) const { assert(other); return (Log2Dim == OtherLog2Dim && mBuffer.mData == other->getValueMask()); } template<Index Log2Dim> inline std::string LeafNode<ValueMask, Log2Dim>::str() const { std::ostringstream ostr; ostr << "LeafNode @" << mOrigin << ": "; for (Index32 n = 0; n < SIZE; ++n) ostr << (mBuffer.mData.isOn(n) ? '#' : '.'); return ostr.str(); } //////////////////////////////////////// template<Index Log2Dim> inline Index LeafNode<ValueMask, Log2Dim>::coordToOffset(const Coord& xyz) { assert ((xyz[0] & (DIM-1u)) < DIM && (xyz[1] & (DIM-1u)) < DIM && (xyz[2] & (DIM-1u)) < DIM); return ((xyz[0] & (DIM-1u)) << 2*Log2Dim) + ((xyz[1] & (DIM-1u)) << Log2Dim) + (xyz[2] & (DIM-1u)); } template<Index Log2Dim> inline Coord LeafNode<ValueMask, Log2Dim>::offsetToLocalCoord(Index n) { assert(n < (1 << 3*Log2Dim)); Coord xyz; xyz.setX(n >> 2*Log2Dim); n &= ((1 << 2*Log2Dim) - 1); xyz.setY(n >> Log2Dim); xyz.setZ(n & ((1 << Log2Dim) - 1)); return xyz; } template<Index Log2Dim> inline Coord LeafNode<ValueMask, Log2Dim>::offsetToGlobalCoord(Index n) const { return (this->offsetToLocalCoord(n) + this->origin()); } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::readTopology(std::istream& is, bool /*fromHalf*/) { mBuffer.mData.load(is); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::writeTopology(std::ostream& os, bool /*toHalf*/) const { mBuffer.mData.save(os); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::readBuffers(std::istream& is, const CoordBBox& clipBBox, bool fromHalf) { // Boolean LeafNodes don't currently implement lazy loading. // Instead, load the full buffer, then clip it. this->readBuffers(is, fromHalf); // Get this tree's background value. bool background = false; if (const void* bgPtr = io::getGridBackgroundValuePtr(is)) { background = *static_cast<const bool*>(bgPtr); } this->clip(clipBBox, background); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::readBuffers(std::istream& is, bool /*fromHalf*/) { // Read in the value mask = buffer. mBuffer.mData.load(is); // Read in the origin. is.read(reinterpret_cast<char*>(&mOrigin), sizeof(Coord::ValueType) * 3); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::writeBuffers(std::ostream& os, bool /*toHalf*/) const { // Write out the value mask = buffer. mBuffer.mData.save(os); // Write out the origin. os.write(reinterpret_cast<const char*>(&mOrigin), sizeof(Coord::ValueType) * 3); } //////////////////////////////////////// template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::operator==(const LeafNode& other) const { return mOrigin == other.mOrigin && mBuffer == other.mBuffer; } template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::operator!=(const LeafNode& other) const { return !(this->operator==(other)); } //////////////////////////////////////// template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::isConstant(bool& constValue, bool& state, bool) const { if (!mBuffer.mData.isConstant(state)) return false; constValue = state; return true; } //////////////////////////////////////// template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::medianAll() const { const Index countTrue = mBuffer.mData.countOn(); return countTrue > (NUM_VALUES >> 1); } template<Index Log2Dim> inline Index LeafNode<ValueMask, Log2Dim>::medianOn(bool& state) const { const Index countTrueOn = mBuffer.mData.countOn(); state = true;//since value and state are the same for this specialization of the leaf node return countTrueOn; } template<Index Log2Dim> inline Index LeafNode<ValueMask, Log2Dim>::medianOff(bool& state) const { const Index countFalseOff = mBuffer.mData.countOff(); state = false;//since value and state are the same for this specialization of the leaf node return countFalseOff; } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::addTile(Index /*level*/, const Coord& xyz, bool val, bool active) { this->addTile(this->coordToOffset(xyz), val, active); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::addTile(Index offset, bool val, bool active) { assert(offset < SIZE); this->setValueOnly(offset, val); this->setActiveState(offset, active); } template<Index Log2Dim> template<typename AccessorT> inline void LeafNode<ValueMask, Log2Dim>::addTileAndCache(Index level, const Coord& xyz, bool val, bool active, AccessorT&) { this->addTile(level, xyz, val, active); } //////////////////////////////////////// template<Index Log2Dim> inline const bool& LeafNode<ValueMask, Log2Dim>::getValue(const Coord& xyz) const { // This *CANNOT* use operator ? because Visual C++ if (mBuffer.mData.isOn(this->coordToOffset(xyz))) return Buffer::sOn; else return Buffer::sOff; } template<Index Log2Dim> inline const bool& LeafNode<ValueMask, Log2Dim>::getValue(Index offset) const { assert(offset < SIZE); // This *CANNOT* use operator ? for Windows if (mBuffer.mData.isOn(offset)) return Buffer::sOn; else return Buffer::sOff; } template<Index Log2Dim> inline bool LeafNode<ValueMask, Log2Dim>::probeValue(const Coord& xyz, bool& val) const { const Index offset = this->coordToOffset(xyz); val = mBuffer.mData.isOn(offset); return val; } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOn(const Coord& xyz, bool val) { this->setValueOn(this->coordToOffset(xyz), val); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOn(Index offset, bool val) { assert(offset < SIZE); mBuffer.mData.set(offset, val); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOnly(const Coord& xyz, bool val) { this->setValueOnly(this->coordToOffset(xyz), val); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setActiveState(const Coord& xyz, bool on) { mBuffer.mData.set(this->coordToOffset(xyz), on); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOff(const Coord& xyz, bool val) { this->setValueOff(this->coordToOffset(xyz), val); } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::setValueOff(Index offset, bool val) { assert(offset < SIZE); mBuffer.mData.set(offset, val); } template<Index Log2Dim> template<typename ModifyOp> inline void LeafNode<ValueMask, Log2Dim>::modifyValue(Index offset, const ModifyOp& op) { bool val = mBuffer.mData.isOn(offset); op(val); mBuffer.mData.set(offset, val); } template<Index Log2Dim> template<typename ModifyOp> inline void LeafNode<ValueMask, Log2Dim>::modifyValue(const Coord& xyz, const ModifyOp& op) { this->modifyValue(this->coordToOffset(xyz), op); } template<Index Log2Dim> template<typename ModifyOp> inline void LeafNode<ValueMask, Log2Dim>::modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { const Index offset = this->coordToOffset(xyz); bool val = mBuffer.mData.isOn(offset), state = val; op(val, state); mBuffer.mData.set(offset, val); } //////////////////////////////////////// template<Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<ValueMask, Log2Dim>::merge(const LeafNode& other, bool /*bg*/, bool /*otherBG*/) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy == MERGE_NODES) return; mBuffer.mData |= other.mBuffer.mData; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<Index Log2Dim> template<MergePolicy Policy> inline void LeafNode<ValueMask, Log2Dim>::merge(bool tileValue, bool) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy != MERGE_ACTIVE_STATES_AND_NODES) return; if (tileValue) mBuffer.mData.setOn(); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<Index Log2Dim> template<typename OtherType> inline void LeafNode<ValueMask, Log2Dim>::topologyUnion(const LeafNode<OtherType, Log2Dim>& other) { mBuffer.mData |= other.valueMask(); } template<Index Log2Dim> template<typename OtherType> inline void LeafNode<ValueMask, Log2Dim>::topologyIntersection(const LeafNode<OtherType, Log2Dim>& other, const bool&) { mBuffer.mData &= other.valueMask(); } template<Index Log2Dim> template<typename OtherType> inline void LeafNode<ValueMask, Log2Dim>::topologyDifference(const LeafNode<OtherType, Log2Dim>& other, const bool&) { mBuffer.mData &= !other.valueMask(); } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::clip(const CoordBBox& clipBBox, bool background) { CoordBBox nodeBBox = this->getNodeBoundingBox(); if (!clipBBox.hasOverlap(nodeBBox)) { // This node lies completely outside the clipping region. Fill it with background tiles. this->fill(nodeBBox, background, /*active=*/false); } else if (clipBBox.isInside(nodeBBox)) { // This node lies completely inside the clipping region. Leave it intact. return; } // This node isn't completely contained inside the clipping region. // Set any voxels that lie outside the region to the background value. // Construct a boolean mask that is on inside the clipping region and off outside it. NodeMaskType mask; nodeBBox.intersect(clipBBox); Coord xyz; int &x = xyz.x(), &y = xyz.y(), &z = xyz.z(); for (x = nodeBBox.min().x(); x <= nodeBBox.max().x(); ++x) { for (y = nodeBBox.min().y(); y <= nodeBBox.max().y(); ++y) { for (z = nodeBBox.min().z(); z <= nodeBBox.max().z(); ++z) { mask.setOn(static_cast<Index32>(this->coordToOffset(xyz))); } } } // Set voxels that lie in the inactive region of the mask (i.e., outside // the clipping region) to the background value. for (MaskOffIter maskIter = mask.beginOff(); maskIter; ++maskIter) { this->setValueOff(maskIter.pos(), background); } } //////////////////////////////////////// template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::fill(const CoordBBox& bbox, bool value, bool) { auto clippedBBox = this->getNodeBoundingBox(); clippedBBox.intersect(bbox); if (!clippedBBox) return; for (Int32 x = clippedBBox.min().x(); x <= clippedBBox.max().x(); ++x) { const Index offsetX = (x & (DIM-1u))<<2*Log2Dim; for (Int32 y = clippedBBox.min().y(); y <= clippedBBox.max().y(); ++y) { const Index offsetXY = offsetX + ((y & (DIM-1u))<< Log2Dim); for (Int32 z = clippedBBox.min().z(); z <= clippedBBox.max().z(); ++z) { const Index offset = offsetXY + (z & (DIM-1u)); mBuffer.mData.set(offset, value); } } } } template<Index Log2Dim> inline void LeafNode<ValueMask, Log2Dim>::fill(const bool& value, bool) { mBuffer.fill(value); } //////////////////////////////////////// template<Index Log2Dim> template<typename DenseT> inline void LeafNode<ValueMask, Log2Dim>::copyToDense(const CoordBBox& bbox, DenseT& dense) const { using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); DenseValueType* t0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // target array const Int32 n0 = bbox.min()[2] & (DIM-1u); for (Int32 x = bbox.min()[0], ex = bbox.max()[0] + 1; x < ex; ++x) { DenseValueType* t1 = t0 + xStride * (x - min[0]); const Int32 n1 = n0 + ((x & (DIM-1u)) << 2*LOG2DIM); for (Int32 y = bbox.min()[1], ey = bbox.max()[1] + 1; y < ey; ++y) { DenseValueType* t2 = t1 + yStride * (y - min[1]); Int32 n2 = n1 + ((y & (DIM-1u)) << LOG2DIM); for (Int32 z = bbox.min()[2], ez = bbox.max()[2] + 1; z < ez; ++z, t2 += zStride) { *t2 = DenseValueType(mBuffer.mData.isOn(n2++)); } } } } template<Index Log2Dim> template<typename DenseT> inline void LeafNode<ValueMask, Log2Dim>::copyFromDense(const CoordBBox& bbox, const DenseT& dense, bool background, bool tolerance) { using DenseValueType = typename DenseT::ValueType; struct Local { inline static bool toBool(const DenseValueType& v) { return !math::isZero(v); } }; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); const DenseValueType* s0 = dense.data() + zStride * (bbox.min()[2] - min[2]); // source const Int32 n0 = bbox.min()[2] & (DIM-1u); for (Int32 x = bbox.min()[0], ex = bbox.max()[0] + 1; x < ex; ++x) { const DenseValueType* s1 = s0 + xStride * (x - min[0]); const Int32 n1 = n0 + ((x & (DIM-1u)) << 2*LOG2DIM); for (Int32 y = bbox.min()[1], ey = bbox.max()[1] + 1; y < ey; ++y) { const DenseValueType* s2 = s1 + yStride * (y - min[1]); Int32 n2 = n1 + ((y & (DIM-1u)) << LOG2DIM); for (Int32 z = bbox.min()[2], ez = bbox.max()[2]+1; z < ez; ++z, ++n2, s2 += zStride) { // Note: if tolerance is true (i.e., 1), then all boolean values compare equal. if (tolerance || (background == Local::toBool(*s2))) { mBuffer.mData.set(n2, background); } else { mBuffer.mData.set(n2, Local::toBool(*s2)); } } } } } //////////////////////////////////////// template<Index Log2Dim> template<typename CombineOp> inline void LeafNode<ValueMask, Log2Dim>::combine(const LeafNode& other, CombineOp& op) { CombineArgs<bool> args; for (Index i = 0; i < SIZE; ++i) { bool result = false, aVal = mBuffer.mData.isOn(i), bVal = other.mBuffer.mData.isOn(i); op(args.setARef(aVal) .setAIsActive(aVal) .setBRef(bVal) .setBIsActive(bVal) .setResultRef(result)); mBuffer.mData.set(i, result); } } template<Index Log2Dim> template<typename CombineOp> inline void LeafNode<ValueMask, Log2Dim>::combine(bool value, bool valueIsActive, CombineOp& op) { CombineArgs<bool> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { bool result = false, aVal = mBuffer.mData.isOn(i); op(args.setARef(aVal) .setAIsActive(aVal) .setResultRef(result)); mBuffer.mData.set(i, result); } } //////////////////////////////////////// template<Index Log2Dim> template<typename CombineOp, typename OtherType> inline void LeafNode<ValueMask, Log2Dim>::combine2(const LeafNode& other, const OtherType& value, bool valueIsActive, CombineOp& op) { CombineArgs<bool, OtherType> args; args.setBRef(value).setBIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { bool result = false, aVal = other.mBuffer.mData.isOn(i); op(args.setARef(aVal) .setAIsActive(aVal) .setResultRef(result)); mBuffer.mData.set(i, result); } } template<Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<ValueMask, Log2Dim>::combine2(bool value, const OtherNodeT& other, bool valueIsActive, CombineOp& op) { CombineArgs<bool, typename OtherNodeT::ValueType> args; args.setARef(value).setAIsActive(valueIsActive); for (Index i = 0; i < SIZE; ++i) { bool result = false, bVal = other.mBuffer.mData.isOn(i); op(args.setBRef(bVal) .setBIsActive(bVal) .setResultRef(result)); mBuffer.mData.set(i, result); } } template<Index Log2Dim> template<typename CombineOp, typename OtherNodeT> inline void LeafNode<ValueMask, Log2Dim>::combine2(const LeafNode& b0, const OtherNodeT& b1, CombineOp& op) { CombineArgs<bool, typename OtherNodeT::ValueType> args; for (Index i = 0; i < SIZE; ++i) { bool result = false, b0Val = b0.mBuffer.mData.isOn(i), b1Val = b1.mBuffer.mData.isOn(i); op(args.setARef(b0Val) .setAIsActive(b0Val) .setBRef(b1Val) .setBIsActive(b1Val) .setResultRef(result)); mBuffer.mData.set(i, result); } } //////////////////////////////////////// template<Index Log2Dim> template<typename BBoxOp> inline void LeafNode<ValueMask, Log2Dim>::visitActiveBBox(BBoxOp& op) const { if (op.template descent<LEVEL>()) { for (ValueOnCIter i=this->cbeginValueOn(); i; ++i) { op.template operator()<LEVEL>(CoordBBox::createCube(i.getCoord(), 1)); } } else { op.template operator()<LEVEL>(this->getNodeBoundingBox()); } } template<Index Log2Dim> template<typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit(VisitorOp& op) { doVisit<LeafNode, VisitorOp, ChildAllIter>(*this, op); } template<Index Log2Dim> template<typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit(VisitorOp& op) const { doVisit<const LeafNode, VisitorOp, ChildAllCIter>(*this, op); } template<Index Log2Dim> template<typename NodeT, typename VisitorOp, typename ChildAllIterT> inline void LeafNode<ValueMask, Log2Dim>::doVisit(NodeT& self, VisitorOp& op) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter); } } //////////////////////////////////////// template<Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) { doVisit2Node<LeafNode, OtherLeafNodeType, VisitorOp, ChildAllIter, typename OtherLeafNodeType::ChildAllIter>(*this, other, op); } template<Index Log2Dim> template<typename OtherLeafNodeType, typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit2Node(OtherLeafNodeType& other, VisitorOp& op) const { doVisit2Node<const LeafNode, OtherLeafNodeType, VisitorOp, ChildAllCIter, typename OtherLeafNodeType::ChildAllCIter>(*this, other, op); } template<Index Log2Dim> template< typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<ValueMask, Log2Dim>::doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp& op) { // Allow the two nodes to have different ValueTypes, but not different dimensions. static_assert(OtherNodeT::SIZE == NodeT::SIZE, "can't visit nodes of different sizes simultaneously"); static_assert(OtherNodeT::LEVEL == NodeT::LEVEL, "can't visit nodes at different tree levels simultaneously"); ChildAllIterT iter = self.beginChildAll(); OtherChildAllIterT otherIter = other.beginChildAll(); for ( ; iter && otherIter; ++iter, ++otherIter) { op(iter, otherIter); } } //////////////////////////////////////// template<Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) { doVisit2<LeafNode, VisitorOp, ChildAllIter, IterT>(*this, otherIter, op, otherIsLHS); } template<Index Log2Dim> template<typename IterT, typename VisitorOp> inline void LeafNode<ValueMask, Log2Dim>::visit2(IterT& otherIter, VisitorOp& op, bool otherIsLHS) const { doVisit2<const LeafNode, VisitorOp, ChildAllCIter, IterT>(*this, otherIter, op, otherIsLHS); } template<Index Log2Dim> template< typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void LeafNode<ValueMask, Log2Dim>::doVisit2(NodeT& self, OtherChildAllIterT& otherIter, VisitorOp& op, bool otherIsLHS) { if (!otherIter) return; if (otherIsLHS) { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(otherIter, iter); } } else { for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { op(iter, otherIter); } } } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_LEAF_NODE_MASK_HAS_BEEN_INCLUDED
60,854
C
36.197433
118
0.676997
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/NodeUnion.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file NodeUnion.h /// /// @details NodeUnion is a templated helper class that controls access to either /// the child node pointer or the value for a particular element of a root /// or internal node. For space efficiency, the child pointer and the value /// are unioned when possible, since the two are never in use simultaneously. #ifndef OPENVDB_TREE_NODEUNION_HAS_BEEN_INCLUDED #define OPENVDB_TREE_NODEUNION_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Types.h> #include <cstring> // for std::memcpy() #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { #if OPENVDB_ABI_VERSION_NUMBER >= 8 /// @brief Default implementation of a NodeUnion that stores the child pointer /// and the value separately (i.e., not in a union). Types which select this /// specialization usually do not conform to the requirements of a union /// member, that is that the type ValueT is not trivially copyable. This /// implementation is thus NOT used for POD, math::Vec, math::Mat, math::Quat /// or math::Coord types, but is used (for example) with std::string template<typename ValueT, typename ChildT, typename Enable = void> class NodeUnion { private: ChildT* mChild; ValueT mValue; public: NodeUnion(): mChild(nullptr), mValue() {} ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } // Small check to ensure this class isn't // selected for some expected types static_assert(!ValueTraits<ValueT>::IsVec && !ValueTraits<ValueT>::IsMat && !ValueTraits<ValueT>::IsQuat && !std::is_same<ValueT, math::Coord>::value && !std::is_arithmetic<ValueT>::value, "Unexpected instantiation of NodeUnion"); }; /// @brief Template specialization of a NodeUnion that stores the child pointer /// and the value together (int, float, pointer, etc.) template<typename ValueT, typename ChildT> class NodeUnion<ValueT, ChildT, typename std::enable_if<std::is_trivially_copyable<ValueT>::value>::type> { private: union { ChildT* mChild; ValueT mValue; }; public: NodeUnion(): mChild(nullptr) {} ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } }; #else // Forward declaration of traits class template<typename T> struct CopyTraits; // Default implementation that stores the child pointer and the value separately // (i.e., not in a union) // This implementation is not used for POD, math::Vec or math::Coord value types. template<typename ValueT, typename ChildT, typename Enable = void> class NodeUnion { private: ChildT* mChild; ValueT mValue; public: NodeUnion(): mChild(nullptr), mValue() {} ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } }; // Template specialization for values of POD types (int, float, pointer, etc.) template<typename ValueT, typename ChildT> class NodeUnion<ValueT, ChildT, typename std::enable_if<std::is_pod<ValueT>::value>::type> { private: union { ChildT* mChild; ValueT mValue; }; public: NodeUnion(): mChild(nullptr) {} ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } }; // Template specialization for values of types such as math::Vec3f and math::Coord // for which CopyTraits<T>::IsCopyable is true template<typename ValueT, typename ChildT> class NodeUnion<ValueT, ChildT, typename std::enable_if<CopyTraits<ValueT>::IsCopyable>::type> { private: union { ChildT* mChild; ValueT mValue; }; public: NodeUnion(): mChild(nullptr) {} NodeUnion(const NodeUnion& other): mChild(nullptr) { std::memcpy(static_cast<void*>(this), &other, sizeof(*this)); } NodeUnion& operator=(const NodeUnion& rhs) { std::memcpy(static_cast<void*>(this), &rhs, sizeof(*this)); return *this; } ChildT* getChild() const { return mChild; } void setChild(ChildT* child) { mChild = child; } const ValueT& getValue() const { return mValue; } ValueT& getValue() { return mValue; } void setValue(const ValueT& val) { mValue = val; } }; /// @details A type T is copyable if /// # T stores member values by value (vs. by pointer or reference) /// and T's true byte size is given by sizeof(T). /// # T has a trivial destructor /// # T has a default constructor /// # T has an assignment operator template<typename T> struct CopyTraits { static const bool IsCopyable = false; }; template<typename T> struct CopyTraits<math::Vec2<T>> { static const bool IsCopyable = true; }; template<typename T> struct CopyTraits<math::Vec3<T>> { static const bool IsCopyable = true; }; template<typename T> struct CopyTraits<math::Vec4<T>> { static const bool IsCopyable = true; }; template<> struct CopyTraits<math::Coord> { static const bool IsCopyable = true; }; #endif //////////////////////////////////////// } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_NODEUNION_HAS_BEEN_INCLUDED
5,784
C
32.830409
95
0.6926
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/LeafBuffer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_TREE_LEAFBUFFER_HAS_BEEN_INCLUDED #define OPENVDB_TREE_LEAFBUFFER_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/io/Compression.h> // for io::readCompressedValues(), etc #include <openvdb/util/NodeMasks.h> #include <tbb/atomic.h> #include <tbb/spin_mutex.h> #include <algorithm> // for std::swap #include <cstddef> // for offsetof() #include <iostream> #include <memory> #include <type_traits> class TestLeaf; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { namespace internal { /// @internal For delayed loading to be threadsafe, LeafBuffer::mOutOfCore must be /// memory-fenced when it is set in LeafBuffer::doLoad(), otherwise that operation /// could be reordered ahead of others in doLoad(), with the possible result that /// other threads could see the buffer as in-core before it has been fully loaded. /// Making mOutOfCore a TBB atomic solves the problem, since TBB atomics are release-fenced /// by default (unlike STL atomics, which are not even guaranteed to be lock-free). /// However, TBB atomics have stricter alignment requirements than their underlying value_types, /// so a LeafBuffer with an atomic mOutOfCore is potentially ABI-incompatible with /// its non-atomic counterpart. /// This helper class conditionally declares mOutOfCore as an atomic only if doing so /// doesn't break ABI compatibility. template<typename T> struct LeafBufferFlags { /// The type of LeafBuffer::mOutOfCore using type = tbb::atomic<Index32>; static constexpr bool IsAtomic = true; }; } // namespace internal /// @brief Array of fixed size 2<SUP>3<I>Log2Dim</I></SUP> that stores /// the voxel values of a LeafNode template<typename T, Index Log2Dim> class LeafBuffer { public: using ValueType = T; using StorageType = ValueType; using NodeMaskType = util::NodeMask<Log2Dim>; static const Index SIZE = 1 << 3 * Log2Dim; struct FileInfo { FileInfo(): bufpos(0) , maskpos(0) {} std::streamoff bufpos; std::streamoff maskpos; io::MappedFile::Ptr mapping; SharedPtr<io::StreamMetadata> meta; }; /// Default constructor inline LeafBuffer(): mData(new ValueType[SIZE]) { mOutOfCore = 0; } /// Construct a buffer populated with the specified value. explicit inline LeafBuffer(const ValueType&); /// Copy constructor inline LeafBuffer(const LeafBuffer&); /// Construct a buffer but don't allocate memory for the full array of values. LeafBuffer(PartialCreate, const ValueType&): mData(nullptr) { mOutOfCore = 0; } /// Destructor inline ~LeafBuffer(); /// Return @c true if this buffer's values have not yet been read from disk. bool isOutOfCore() const { return bool(mOutOfCore); } /// Return @c true if memory for this buffer has not yet been allocated. bool empty() const { return !mData || this->isOutOfCore(); } /// Allocate memory for this buffer if it has not already been allocated. bool allocate() { if (mData == nullptr) mData = new ValueType[SIZE]; return true; } /// Populate this buffer with a constant value. inline void fill(const ValueType&); /// Return a const reference to the i'th element of this buffer. const ValueType& getValue(Index i) const { return this->at(i); } /// Return a const reference to the i'th element of this buffer. const ValueType& operator[](Index i) const { return this->at(i); } /// Set the i'th value of this buffer to the specified value. inline void setValue(Index i, const ValueType&); /// Copy the other buffer's values into this buffer. inline LeafBuffer& operator=(const LeafBuffer&); /// @brief Return @c true if the contents of the other buffer /// exactly equal the contents of this buffer. inline bool operator==(const LeafBuffer&) const; /// @brief Return @c true if the contents of the other buffer /// are not exactly equal to the contents of this buffer. inline bool operator!=(const LeafBuffer& other) const { return !(other == *this); } /// Exchange this buffer's values with the other buffer's values. inline void swap(LeafBuffer&); /// Return the memory footprint of this buffer in bytes. inline Index memUsage() const; /// Return the number of values contained in this buffer. static Index size() { return SIZE; } /// @brief Return a const pointer to the array of voxel values. /// @details This method guarantees that the buffer is allocated and loaded. /// @warning This method should only be used by experts seeking low-level optimizations. const ValueType* data() const; /// @brief Return a pointer to the array of voxel values. /// @details This method guarantees that the buffer is allocated and loaded. /// @warning This method should only be used by experts seeking low-level optimizations. ValueType* data(); private: /// If this buffer is empty, return zero, otherwise return the value at index @ i. inline const ValueType& at(Index i) const; /// @brief Return a non-const reference to the value at index @a i. /// @details This method is private since it makes assumptions about the /// buffer's memory layout. LeafBuffers associated with custom leaf node types /// (e.g., a bool buffer implemented as a bitmask) might not be able to /// return non-const references to their values. ValueType& operator[](Index i) { return const_cast<ValueType&>(this->at(i)); } bool deallocate(); inline void setOutOfCore(bool b) { mOutOfCore = b; } // To facilitate inlining in the common case in which the buffer is in-core, // the loading logic is split into a separate function, doLoad(). inline void loadValues() const { if (this->isOutOfCore()) this->doLoad(); } inline void doLoad() const; inline bool detachFromFile(); using FlagsType = typename internal::LeafBufferFlags<ValueType>::type; union { ValueType* mData; FileInfo* mFileInfo; }; FlagsType mOutOfCore; // interpreted as bool; extra bits reserved for future use tbb::spin_mutex mMutex; // 1 byte //int8_t mReserved[3]; // padding for alignment static const ValueType sZero; friend class ::TestLeaf; // Allow the parent LeafNode to access this buffer's data pointer. template<typename, Index> friend class LeafNode; }; // class LeafBuffer //////////////////////////////////////// template<typename T, Index Log2Dim> const T LeafBuffer<T, Log2Dim>::sZero = zeroVal<T>(); template<typename T, Index Log2Dim> inline LeafBuffer<T, Log2Dim>::LeafBuffer(const ValueType& val) : mData(new ValueType[SIZE]) { mOutOfCore = 0; this->fill(val); } template<typename T, Index Log2Dim> inline LeafBuffer<T, Log2Dim>::~LeafBuffer() { if (this->isOutOfCore()) { this->detachFromFile(); } else { this->deallocate(); } } template<typename T, Index Log2Dim> inline LeafBuffer<T, Log2Dim>::LeafBuffer(const LeafBuffer& other) : mData(nullptr) , mOutOfCore(other.mOutOfCore) { if (other.isOutOfCore()) { mFileInfo = new FileInfo(*other.mFileInfo); } else if (other.mData != nullptr) { this->allocate(); ValueType* target = mData; const ValueType* source = other.mData; Index n = SIZE; while (n--) *target++ = *source++; } } template<typename T, Index Log2Dim> inline void LeafBuffer<T, Log2Dim>::setValue(Index i, const ValueType& val) { assert(i < SIZE); this->loadValues(); if (mData) mData[i] = val; } template<typename T, Index Log2Dim> inline LeafBuffer<T, Log2Dim>& LeafBuffer<T, Log2Dim>::operator=(const LeafBuffer& other) { if (&other != this) { if (this->isOutOfCore()) { this->detachFromFile(); } else { if (other.isOutOfCore()) this->deallocate(); } if (other.isOutOfCore()) { mOutOfCore = other.mOutOfCore; mFileInfo = new FileInfo(*other.mFileInfo); } else if (other.mData != nullptr) { this->allocate(); ValueType* target = mData; const ValueType* source = other.mData; Index n = SIZE; while (n--) *target++ = *source++; } } return *this; } template<typename T, Index Log2Dim> inline void LeafBuffer<T, Log2Dim>::fill(const ValueType& val) { this->detachFromFile(); if (mData != nullptr) { ValueType* target = mData; Index n = SIZE; while (n--) *target++ = val; } } template<typename T, Index Log2Dim> inline bool LeafBuffer<T, Log2Dim>::operator==(const LeafBuffer& other) const { this->loadValues(); other.loadValues(); const ValueType *target = mData, *source = other.mData; if (!target && !source) return true; if (!target || !source) return false; Index n = SIZE; while (n && math::isExactlyEqual(*target++, *source++)) --n; return n == 0; } template<typename T, Index Log2Dim> inline void LeafBuffer<T, Log2Dim>::swap(LeafBuffer& other) { std::swap(mData, other.mData); std::swap(mOutOfCore, other.mOutOfCore); } template<typename T, Index Log2Dim> inline Index LeafBuffer<T, Log2Dim>::memUsage() const { size_t n = sizeof(*this); if (this->isOutOfCore()) n += sizeof(FileInfo); else if (mData) n += SIZE * sizeof(ValueType); return static_cast<Index>(n); } template<typename T, Index Log2Dim> inline const typename LeafBuffer<T, Log2Dim>::ValueType* LeafBuffer<T, Log2Dim>::data() const { this->loadValues(); if (mData == nullptr) { LeafBuffer* self = const_cast<LeafBuffer*>(this); // This lock will be contended at most once. tbb::spin_mutex::scoped_lock lock(self->mMutex); if (mData == nullptr) self->mData = new ValueType[SIZE]; } return mData; } template<typename T, Index Log2Dim> inline typename LeafBuffer<T, Log2Dim>::ValueType* LeafBuffer<T, Log2Dim>::data() { this->loadValues(); if (mData == nullptr) { // This lock will be contended at most once. tbb::spin_mutex::scoped_lock lock(mMutex); if (mData == nullptr) mData = new ValueType[SIZE]; } return mData; } template<typename T, Index Log2Dim> inline const typename LeafBuffer<T, Log2Dim>::ValueType& LeafBuffer<T, Log2Dim>::at(Index i) const { assert(i < SIZE); this->loadValues(); // We can't use the ternary operator here, otherwise Visual C++ returns // a reference to a temporary. if (mData) return mData[i]; else return sZero; } template<typename T, Index Log2Dim> inline bool LeafBuffer<T, Log2Dim>::deallocate() { if (mData != nullptr && !this->isOutOfCore()) { delete[] mData; mData = nullptr; return true; } return false; } template<typename T, Index Log2Dim> inline void LeafBuffer<T, Log2Dim>::doLoad() const { if (!this->isOutOfCore()) return; LeafBuffer<T, Log2Dim>* self = const_cast<LeafBuffer<T, Log2Dim>*>(this); // This lock will be contended at most once, after which this buffer // will no longer be out-of-core. tbb::spin_mutex::scoped_lock lock(self->mMutex); if (!this->isOutOfCore()) return; std::unique_ptr<FileInfo> info(self->mFileInfo); assert(info.get() != nullptr); assert(info->mapping.get() != nullptr); assert(info->meta.get() != nullptr); /// @todo For now, we have to clear the mData pointer in order for allocate() to take effect. self->mData = nullptr; self->allocate(); SharedPtr<std::streambuf> buf = info->mapping->createBuffer(); std::istream is(buf.get()); io::setStreamMetadataPtr(is, info->meta, /*transfer=*/true); NodeMaskType mask; is.seekg(info->maskpos); mask.load(is); is.seekg(info->bufpos); io::readCompressedValues(is, self->mData, SIZE, mask, io::getHalfFloat(is)); self->setOutOfCore(false); } template<typename T, Index Log2Dim> inline bool LeafBuffer<T, Log2Dim>::detachFromFile() { if (this->isOutOfCore()) { delete mFileInfo; mFileInfo = nullptr; this->setOutOfCore(false); return true; } return false; } //////////////////////////////////////// // Partial specialization for bool ValueType template<Index Log2Dim> class LeafBuffer<bool, Log2Dim> { public: using NodeMaskType = util::NodeMask<Log2Dim>; using WordType = typename NodeMaskType::Word; using ValueType = bool; using StorageType = WordType; static const Index WORD_COUNT = NodeMaskType::WORD_COUNT; static const Index SIZE = 1 << 3 * Log2Dim; // These static declarations must be on separate lines to avoid VC9 compiler errors. static const bool sOn; static const bool sOff; LeafBuffer() {} LeafBuffer(bool on): mData(on) {} LeafBuffer(const NodeMaskType& other): mData(other) {} LeafBuffer(const LeafBuffer& other): mData(other.mData) {} ~LeafBuffer() {} void fill(bool val) { mData.set(val); } LeafBuffer& operator=(const LeafBuffer& b) { if (&b != this) { mData=b.mData; } return *this; } const bool& getValue(Index i) const { assert(i < SIZE); // We can't use the ternary operator here, otherwise Visual C++ returns // a reference to a temporary. if (mData.isOn(i)) return sOn; else return sOff; } const bool& operator[](Index i) const { return this->getValue(i); } bool operator==(const LeafBuffer& other) const { return mData == other.mData; } bool operator!=(const LeafBuffer& other) const { return mData != other.mData; } void setValue(Index i, bool val) { assert(i < SIZE); mData.set(i, val); } void swap(LeafBuffer& other) { if (&other != this) std::swap(mData, other.mData); } Index memUsage() const { return sizeof(*this); } static Index size() { return SIZE; } /// @brief Return a pointer to the C-style array of words encoding the bits. /// @warning This method should only be used by experts seeking low-level optimizations. WordType* data() { return &(mData.template getWord<WordType>(0)); } /// @brief Return a const pointer to the C-style array of words encoding the bits. /// @warning This method should only be used by experts seeking low-level optimizations. const WordType* data() const { return const_cast<LeafBuffer*>(this)->data(); } private: // Allow the parent LeafNode to access this buffer's data. template<typename, Index> friend class LeafNode; NodeMaskType mData; }; // class LeafBuffer /// @internal For consistency with other nodes and with iterators, methods like /// LeafNode::getValue() return a reference to a value. Since it's not possible /// to return a reference to a bit in a node mask, we return a reference to one /// of the following static values instead. template<Index Log2Dim> const bool LeafBuffer<bool, Log2Dim>::sOn = true; template<Index Log2Dim> const bool LeafBuffer<bool, Log2Dim>::sOff = false; } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_LEAFBUFFER_HAS_BEEN_INCLUDED
15,253
C
31.317797
99
0.668459
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/InternalNode.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file InternalNode.h /// /// @brief Internal table nodes for OpenVDB trees #ifndef OPENVDB_TREE_INTERNALNODE_HAS_BEEN_INCLUDED #define OPENVDB_TREE_INTERNALNODE_HAS_BEEN_INCLUDED #include <openvdb/Platform.h> #include <openvdb/util/NodeMasks.h> #include <openvdb/io/Compression.h> // for io::readCompressedValues(), etc. #include <openvdb/math/Math.h> // for math::isExactlyEqual(), etc. #include <openvdb/version.h> #include <openvdb/Types.h> #include "Iterator.h" #include "NodeUnion.h" #include <tbb/parallel_for.h> #include <memory> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { template<typename, Index, typename> struct SameInternalConfig; // forward declaration template<typename _ChildNodeType, Index Log2Dim> class InternalNode { public: using ChildNodeType = _ChildNodeType; using LeafNodeType = typename ChildNodeType::LeafNodeType; using ValueType = typename ChildNodeType::ValueType; using BuildType = typename ChildNodeType::BuildType; using UnionType = NodeUnion<ValueType, ChildNodeType>; using NodeMaskType = util::NodeMask<Log2Dim>; static const Index LOG2DIM = Log2Dim, // log2 of tile count in one dimension TOTAL = Log2Dim + ChildNodeType::TOTAL, // log2 of voxel count in one dimension DIM = 1 << TOTAL, // total voxel count in one dimension NUM_VALUES = 1 << (3 * Log2Dim), // total voxel count represented by this node LEVEL = 1 + ChildNodeType::LEVEL; // level 0 = leaf static const Index64 NUM_VOXELS = uint64_t(1) << (3 * TOTAL); // total voxel count represented by this node /// @brief ValueConverter<T>::Type is the type of an InternalNode having the same /// child hierarchy and dimensions as this node but a different value type, T. template<typename OtherValueType> struct ValueConverter { using Type = InternalNode<typename ChildNodeType::template ValueConverter< OtherValueType>::Type, Log2Dim>; }; /// @brief SameConfiguration<OtherNodeType>::value is @c true if and only if OtherNodeType /// is the type of an InternalNode with the same dimensions as this node and whose /// ChildNodeType has the same configuration as this node's ChildNodeType. template<typename OtherNodeType> struct SameConfiguration { static const bool value = SameInternalConfig<ChildNodeType, Log2Dim, OtherNodeType>::value; }; /// @brief Default constructor /// @warning The resulting InternalNode is uninitialized InternalNode() {} /// @brief Constructor of an InternalNode with dense inactive tiles of the specified value. /// @param offValue Background value used for inactive values explicit InternalNode(const ValueType& offValue); /// @brief Constructs an InternalNode with dense tiles /// @param origin The location in index space of the fist tile value /// @param fillValue Value assigned to all the tiles /// @param active State assigned to all the tiles InternalNode(const Coord& origin, const ValueType& fillValue, bool active = false); InternalNode(PartialCreate, const Coord&, const ValueType& fillValue, bool active = false); /// @brief Deep copy constructor /// /// @note This method is multi-threaded! InternalNode(const InternalNode&); /// @brief Value conversion copy constructor /// /// @note This method is multi-threaded! template<typename OtherChildNodeType> explicit InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other); /// @brief Topology copy constructor /// /// @note This method is multi-threaded! template<typename OtherChildNodeType> InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& background, TopologyCopy); /// @brief Topology copy constructor /// /// @note This method is multi-threaded! template<typename OtherChildNodeType> InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& offValue, const ValueType& onValue, TopologyCopy); ~InternalNode(); protected: using MaskOnIterator = typename NodeMaskType::OnIterator; using MaskOffIterator = typename NodeMaskType::OffIterator; using MaskDenseIterator = typename NodeMaskType::DenseIterator; // Type tags to disambiguate template instantiations struct ValueOn {}; struct ValueOff {}; struct ValueAll {}; struct ChildOn {}; struct ChildOff {}; struct ChildAll {}; // The following class templates implement the iterator interfaces specified in Iterator.h // by providing getItem(), setItem() and/or modifyItem() methods. // Sparse iterator that visits child nodes of an InternalNode template<typename NodeT, typename ChildT, typename MaskIterT, typename TagT> struct ChildIter: public SparseIteratorBase< MaskIterT, ChildIter<NodeT, ChildT, MaskIterT, TagT>, NodeT, ChildT> { ChildIter() {} ChildIter(const MaskIterT& iter, NodeT* parent): SparseIteratorBase< MaskIterT, ChildIter<NodeT, ChildT, MaskIterT, TagT>, NodeT, ChildT>(iter, parent) {} ChildT& getItem(Index pos) const { assert(this->parent().isChildMaskOn(pos)); return *(this->parent().getChildNode(pos)); } // Note: setItem() can't be called on const iterators. void setItem(Index pos, const ChildT& c) const { this->parent().resetChildNode(pos, &c); } // Note: modifyItem() isn't implemented, since it's not useful for child node pointers. };// ChildIter // Sparse iterator that visits tile values of an InternalNode template<typename NodeT, typename ValueT, typename MaskIterT, typename TagT> struct ValueIter: public SparseIteratorBase< MaskIterT, ValueIter<NodeT, ValueT, MaskIterT, TagT>, NodeT, ValueT> { ValueIter() {} ValueIter(const MaskIterT& iter, NodeT* parent): SparseIteratorBase< MaskIterT, ValueIter<NodeT, ValueT, MaskIterT, TagT>, NodeT, ValueT>(iter, parent) {} const ValueT& getItem(Index pos) const { return this->parent().mNodes[pos].getValue(); } // Note: setItem() can't be called on const iterators. void setItem(Index pos, const ValueT& v) const { this->parent().mNodes[pos].setValue(v); } // Note: modifyItem() can't be called on const iterators. template<typename ModifyOp> void modifyItem(Index pos, const ModifyOp& op) const { op(this->parent().mNodes[pos].getValue()); } };// ValueIter // Dense iterator that visits both tiles and child nodes of an InternalNode template<typename NodeT, typename ChildT, typename ValueT, typename TagT> struct DenseIter: public DenseIteratorBase< MaskDenseIterator, DenseIter<NodeT, ChildT, ValueT, TagT>, NodeT, ChildT, ValueT> { using BaseT = DenseIteratorBase<MaskDenseIterator, DenseIter, NodeT, ChildT, ValueT>; using NonConstValueT = typename BaseT::NonConstValueType; DenseIter() {} DenseIter(const MaskDenseIterator& iter, NodeT* parent): DenseIteratorBase<MaskDenseIterator, DenseIter, NodeT, ChildT, ValueT>(iter, parent) {} bool getItem(Index pos, ChildT*& child, NonConstValueT& value) const { if (this->parent().isChildMaskOn(pos)) { child = this->parent().getChildNode(pos); return true; } child = nullptr; value = this->parent().mNodes[pos].getValue(); return false; } // Note: setItem() can't be called on const iterators. void setItem(Index pos, ChildT* child) const { this->parent().resetChildNode(pos, child); } // Note: unsetItem() can't be called on const iterators. void unsetItem(Index pos, const ValueT& value) const { this->parent().unsetChildNode(pos, value); } };// DenseIter public: // Iterators (see Iterator.h for usage) using ChildOnIter = ChildIter<InternalNode, ChildNodeType, MaskOnIterator, ChildOn>; using ChildOnCIter = ChildIter<const InternalNode,const ChildNodeType,MaskOnIterator,ChildOn>; using ChildOffIter = ValueIter<InternalNode, const ValueType, MaskOffIterator, ChildOff>; using ChildOffCIter = ValueIter<const InternalNode,const ValueType,MaskOffIterator,ChildOff>; using ChildAllIter = DenseIter<InternalNode, ChildNodeType, ValueType, ChildAll>; using ChildAllCIter = DenseIter<const InternalNode,const ChildNodeType, ValueType, ChildAll>; using ValueOnIter = ValueIter<InternalNode, const ValueType, MaskOnIterator, ValueOn>; using ValueOnCIter = ValueIter<const InternalNode, const ValueType, MaskOnIterator, ValueOn>; using ValueOffIter = ValueIter<InternalNode, const ValueType, MaskOffIterator, ValueOff>; using ValueOffCIter = ValueIter<const InternalNode,const ValueType,MaskOffIterator,ValueOff>; using ValueAllIter = ValueIter<InternalNode, const ValueType, MaskOffIterator, ValueAll>; using ValueAllCIter = ValueIter<const InternalNode,const ValueType,MaskOffIterator,ValueAll>; ChildOnCIter cbeginChildOn() const { return ChildOnCIter(mChildMask.beginOn(), this); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(mChildMask.beginOff(), this); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(mChildMask.beginDense(), this); } ChildOnCIter beginChildOn() const { return cbeginChildOn(); } ChildOffCIter beginChildOff() const { return cbeginChildOff(); } ChildAllCIter beginChildAll() const { return cbeginChildAll(); } ChildOnIter beginChildOn() { return ChildOnIter(mChildMask.beginOn(), this); } ChildOffIter beginChildOff() { return ChildOffIter(mChildMask.beginOff(), this); } ChildAllIter beginChildAll() { return ChildAllIter(mChildMask.beginDense(), this); } ValueOnCIter cbeginValueOn() const { return ValueOnCIter(mValueMask.beginOn(), this); } /// @warning This iterator will also visit child nodes so use isChildMaskOn to skip them! ValueOffCIter cbeginValueOff() const { return ValueOffCIter(mValueMask.beginOff(), this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(mChildMask.beginOff(), this); } ValueOnCIter beginValueOn() const { return cbeginValueOn(); } /// @warning This iterator will also visit child nodes so use isChildMaskOn to skip them! ValueOffCIter beginValueOff() const { return cbeginValueOff(); } ValueAllCIter beginValueAll() const { return cbeginValueAll(); } ValueOnIter beginValueOn() { return ValueOnIter(mValueMask.beginOn(), this); } /// @warning This iterator will also visit child nodes so use isChildMaskOn to skip them! ValueOffIter beginValueOff() { return ValueOffIter(mValueMask.beginOff(), this); } ValueAllIter beginValueAll() { return ValueAllIter(mChildMask.beginOff(), this); } /// @return The dimension of this InternalNode /// @details The number of voxels in one coordinate direction covered by this node static Index dim() { return DIM; } /// @return The level of this node /// @details Level 0 is by definition the level of the leaf nodes static Index getLevel() { return LEVEL; } /// @brief Populated an stil::vector with the dimension of all the /// nodes in the branch starting with this node. static void getNodeLog2Dims(std::vector<Index>& dims); /// @return The dimension of the child nodes of this node. /// @details The number of voxels in one coordinate direction /// covered by a child node of this node. static Index getChildDim() { return ChildNodeType::DIM; } /// Return the linear table offset of the given global or local coordinates. static Index coordToOffset(const Coord& xyz); /// @brief Return the local coordinates for a linear table offset, /// where offset 0 has coordinates (0, 0, 0). static void offsetToLocalCoord(Index n, Coord& xyz); /// Return the global coordinates for a linear table offset. Coord offsetToGlobalCoord(Index n) const; /// Return the grid index coordinates of this node's local origin. const Coord& origin() const { return mOrigin; } /// Set the grid index coordinates of this node's local origin. void setOrigin(const Coord& origin) { mOrigin = origin; } Index32 leafCount() const; void nodeCount(std::vector<Index32> &vec) const; Index32 nonLeafCount() const; Index32 childCount() const; Index64 onVoxelCount() const; Index64 offVoxelCount() const; Index64 onLeafVoxelCount() const; Index64 offLeafVoxelCount() const; Index64 onTileCount() const; /// Return the total amount of memory in bytes occupied by this node and its children. Index64 memUsage() const; /// @brief Expand the specified bounding box so that it includes the active tiles /// of this internal node as well as all the active values in its child nodes. /// If visitVoxels is false LeafNodes will be approximated as dense, i.e. with all /// voxels active. Else the individual active voxels are visited to produce a tight bbox. void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// @brief Return the bounding box of this node, i.e., the full index space /// spanned by the node regardless of its content. CoordBBox getNodeBoundingBox() const { return CoordBBox::createCube(mOrigin, DIM); } /// @return True if this node contains no child nodes. bool isEmpty() const { return mChildMask.isOff(); } /// Return @c true if all of this node's table entries have the same active state /// and the same constant value to within the given tolerance, /// and return that value in @a firstValue and the active state in @a state. /// /// @note This method also returns @c false if this node contains any child nodes. bool isConstant(ValueType& firstValue, bool& state, const ValueType& tolerance = zeroVal<ValueType>()) const; /// Return @c true if all of this node's tables entries have /// the same active @a state and the range of its values satisfy /// (@a maxValue - @a minValue) <= @a tolerance. /// /// @param minValue Is updated with the minimum of all values IF method /// returns @c true. Else the value is undefined! /// @param maxValue Is updated with the maximum of all values IF method /// returns @c true. Else the value is undefined! /// @param state Is updated with the state of all values IF method /// returns @c true. Else the value is undefined! /// @param tolerance The tolerance used to determine if values are /// approximatly constant. /// /// @note This method also returns @c false if this node contains any child nodes. bool isConstant(ValueType& minValue, ValueType& maxValue, bool& state, const ValueType& tolerance = zeroVal<ValueType>()) const; /// Return @c true if this node has no children and only contains inactive values. bool isInactive() const { return this->isChildMaskOff() && this->isValueMaskOff(); } /// Return @c true if the voxel at the given coordinates is active. bool isValueOn(const Coord& xyz) const; /// Return @c true if the voxel at the given offset is active. bool isValueOn(Index offset) const { return mValueMask.isOn(offset); } /// Return @c true if this node or any of its child nodes have any active tiles. bool hasActiveTiles() const; const ValueType& getValue(const Coord& xyz) const; bool probeValue(const Coord& xyz, ValueType& value) const; /// @brief Return the level of the tree (0 = leaf) at which the value /// at the given coordinates resides. Index getValueLevel(const Coord& xyz) const; /// @brief If the first entry in this node's table is a tile, return the tile's value. /// Otherwise, return the result of calling getFirstValue() on the child. const ValueType& getFirstValue() const; /// @brief If the last entry in this node's table is a tile, return the tile's value. /// Otherwise, return the result of calling getLastValue() on the child. const ValueType& getLastValue() const; /// Set the active state of the voxel at the given coordinates but don't change its value. void setActiveState(const Coord& xyz, bool on); /// Set the value of the voxel at the given coordinates but don't change its active state. void setValueOnly(const Coord& xyz, const ValueType& value); /// Mark the voxel at the given coordinates as active but don't change its value. void setValueOn(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as active. void setValueOn(const Coord& xyz, const ValueType& value); /// Mark the voxel at the given coordinates as inactive but don't change its value. void setValueOff(const Coord& xyz); /// Set the value of the voxel at the given coordinates and mark the voxel as inactive. void setValueOff(const Coord& xyz, const ValueType& value); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. template<typename ModifyOp> void modifyValue(const Coord& xyz, const ModifyOp& op); /// Apply a functor to the voxel at the given coordinates. template<typename ModifyOp> void modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op); /// Return the value of the voxel at the given coordinates and, if necessary, update /// the accessor with pointers to the nodes along the path from the root node to /// the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> const ValueType& getValueAndCache(const Coord& xyz, AccessorT&) const; /// Return @c true if the voxel at the given coordinates is active and, if necessary, /// update the accessor with pointers to the nodes along the path from the root node /// to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> bool isValueOnAndCache(const Coord& xyz, AccessorT&) const; /// Change the value of the voxel at the given coordinates and mark it as active. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Set the value of the voxel at the given coordinate but preserves its active state. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOnlyAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// @brief Apply a functor to the value of the voxel at the given coordinates /// and mark the voxel as active. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&); /// Apply a functor to the voxel at the given coordinates. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord& xyz, const ModifyOp& op, AccessorT&); /// Change the value of the voxel at the given coordinates and mark it as inactive. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT&); /// Set the active state of the voxel at the given coordinates without changing its value. /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT&); /// Return, in @a value, the value of the voxel at the given coordinates and, /// if necessary, update the accessor with pointers to the nodes along /// the path from the root node to the node containing the voxel. /// @return @c true if the voxel at the given coordinates is active /// @note Used internally by ValueAccessor. template<typename AccessorT> bool probeValueAndCache(const Coord& xyz, ValueType& value, AccessorT&) const; /// @brief Return the level of the tree (0 = leaf) at which the value /// at the given coordinates resides. /// /// If necessary, update the accessor with pointers to the nodes along the path /// from the root node to the node containing the voxel. /// @note Used internally by ValueAccessor. template<typename AccessorT> Index getValueLevelAndCache(const Coord& xyz, AccessorT&) const; /// Mark all values (both tiles and voxels) as active. void setValuesOn(); // // I/O // void writeTopology(std::ostream&, bool toHalf = false) const; void readTopology(std::istream&, bool fromHalf = false); void writeBuffers(std::ostream&, bool toHalf = false) const; void readBuffers(std::istream&, bool fromHalf = false); void readBuffers(std::istream&, const CoordBBox&, bool fromHalf = false); // // Aux methods // /// Change the sign of all the values represented in this node and its child nodes. void negate(); /// @brief Set all voxels within a given axis-aligned box to a constant value. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box /// @param value the value to which to set voxels within the box /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive /// @note This operation generates a sparse, but not always optimally sparse, /// representation of the filled box. Follow fill operations with a prune() /// operation for optimal sparseness. void fill(const CoordBBox& bbox, const ValueType& value, bool active = true); /// @brief Set all voxels within a given axis-aligned box to a constant value /// and ensure that those voxels are all represented at the leaf level. /// @param bbox inclusive coordinates of opposite corners of an axis-aligned box. /// @param value the value to which to set voxels within the box. /// @param active if true, mark voxels within the box as active, /// otherwise mark them as inactive. /// @sa voxelizeActiveTiles() void denseFill(const CoordBBox& bbox, const ValueType& value, bool active = true); /// @brief Densify active tiles, i.e., replace them with leaf-level active voxels. /// @param threaded if true, this operation is multi-threaded (over the internal nodes). /// @sa denseFill() void voxelizeActiveTiles(bool threaded = true); /// @brief Copy into a dense grid the values of the voxels that lie within /// a given bounding box. /// @param bbox inclusive bounding box of the voxels to be copied into the dense grid /// @param dense dense grid with a stride in @e z of one (see tools::Dense /// in tools/Dense.h for the required API) /// @note @a bbox is assumed to be identical to or contained in the coordinate domains /// of both the dense grid and this node, i.e., no bounds checking is performed. template<typename DenseT> void copyToDense(const CoordBBox& bbox, DenseT& dense) const; /// @brief Efficiently merge another tree into this tree using one of several schemes. /// @warning This operation cannibalizes the other tree. template<MergePolicy Policy> void merge(InternalNode& other, const ValueType& background, const ValueType& otherBackground); /// @brief Merge, using one of several schemes, this node (and its descendants) /// with a tile of the same dimensions and the given value and active state. template<MergePolicy Policy> void merge(const ValueType& tileValue, bool tileActive); /// @brief Union this branch's set of active values with the other branch's /// active values. The value type of the other branch can be different. /// @details The resulting state of a value is active if the corresponding value /// was already active OR if it is active in the other tree. Also, a resulting /// value maps to a voxel if the corresponding value already mapped to a voxel /// OR if it is a voxel in the other tree. Thus, a resulting value can only /// map to a tile if the corresponding value already mapped to a tile /// AND if it is a tile value in other tree. /// /// Specifically, active tiles and voxels in this branch are not changed, and /// tiles or voxels that were inactive in this branch but active in the other branch /// are marked as active in this branch but left with their original values. template<typename OtherChildNodeType> void topologyUnion(const InternalNode<OtherChildNodeType, Log2Dim>& other); /// @brief Intersects this tree's set of active values with the active values /// of the other tree, whose @c ValueType may be different. /// @details The resulting state of a value is active only if the corresponding /// value was already active AND if it is active in the other tree. Also, a /// resulting value maps to a voxel if the corresponding value /// already mapped to an active voxel in either of the two grids /// and it maps to an active tile or voxel in the other grid. /// /// @note This operation can delete branches in this grid if they /// overlap with inactive tiles in the other grid. Likewise active /// voxels can be turned into unactive voxels resulting in leaf /// nodes with no active values. Thus, it is recommended to /// subsequently call prune. template<typename OtherChildNodeType> void topologyIntersection(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& background); /// @brief Difference this node's set of active values with the active values /// of the other node, whose @c ValueType may be different. So a /// resulting voxel will be active only if the original voxel is /// active in this node and inactive in the other node. /// /// @details The last dummy argument is required to match the signature /// for InternalNode::topologyDifference. /// /// @note This operation modifies only active states, not /// values. Also note that this operation can result in all voxels /// being inactive so consider subsequnetly calling prune. template<typename OtherChildNodeType> void topologyDifference(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& background); template<typename CombineOp> void combine(InternalNode& other, CombineOp&); template<typename CombineOp> void combine(const ValueType& value, bool valueIsActive, CombineOp&); template<typename CombineOp, typename OtherNodeType /*= InternalNode*/> void combine2(const InternalNode& other0, const OtherNodeType& other1, CombineOp&); template<typename CombineOp, typename OtherNodeType /*= InternalNode*/> void combine2(const ValueType& value, const OtherNodeType& other, bool valIsActive, CombineOp&); template<typename CombineOp, typename OtherValueType> void combine2(const InternalNode& other, const OtherValueType&, bool valIsActive, CombineOp&); /// @brief Calls the templated functor BBoxOp with bounding box /// information for all active tiles and leaf nodes in this node. /// An additional level argument is provided for each callback. /// /// @note The bounding boxes are guarenteed to be non-overlapping. template<typename BBoxOp> void visitActiveBBox(BBoxOp&) const; template<typename VisitorOp> void visit(VisitorOp&); template<typename VisitorOp> void visit(VisitorOp&) const; template<typename OtherNodeType, typename VisitorOp> void visit2Node(OtherNodeType& other, VisitorOp&); template<typename OtherNodeType, typename VisitorOp> void visit2Node(OtherNodeType& other, VisitorOp&) const; template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false); template<typename IterT, typename VisitorOp> void visit2(IterT& otherIter, VisitorOp&, bool otherIsLHS = false) const; /// Set all voxels that lie outside the given axis-aligned box to the background. void clip(const CoordBBox&, const ValueType& background); /// @brief Reduce the memory footprint of this tree by replacing with tiles /// any nodes whose values are all the same (optionally to within a tolerance) /// and have the same active state. void prune(const ValueType& tolerance = zeroVal<ValueType>()); /// @brief Add the specified leaf to this node, possibly creating a child branch /// in the process. If the leaf node already exists, replace it. void addLeaf(LeafNodeType* leaf); /// @brief Same as addLeaf() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> void addLeafAndCache(LeafNodeType* leaf, AccessorT&); /// @brief Return a pointer to the node of type @c NodeT that contains voxel (x, y, z) /// and replace it with a tile of the specified value and state. /// If no such node exists, leave the tree unchanged and return @c nullptr. /// /// @note The caller takes ownership of the node and is responsible for deleting it. /// /// @warning Since this method potentially removes nodes and branches of the tree, /// it is important to clear the caches of all ValueAccessors associated with this tree. template<typename NodeT> NodeT* stealNode(const Coord& xyz, const ValueType& value, bool state); /// @brief Add the given child node at this level deducing the offset from it's origin. /// If a child node with this offset already exists, delete the old node and add the /// new node in its place (i.e. ownership of the new child node is transferred to /// this InternalNode) /// @return @c true if inserting the child has been successful, otherwise the caller /// retains ownership of the node and is responsible for deleting it. bool addChild(ChildNodeType* child); /// @brief Add a tile at the specified tree level that contains voxel (x, y, z), /// possibly creating a parent branch or deleting a child branch in the process. void addTile(Index level, const Coord& xyz, const ValueType& value, bool state); /// @brief Delete any existing child branch at the specified offset and add a tile. void addTile(Index offset, const ValueType& value, bool state); /// @brief Same as addTile() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing (x, y, z). template<typename AccessorT> void addTileAndCache(Index level, const Coord& xyz, const ValueType&, bool state, AccessorT&); //@{ /// @brief Return a pointer to the node that contains voxel (x, y, z). /// If no such node exists, return nullptr. template<typename NodeType> NodeType* probeNode(const Coord& xyz); template<typename NodeType> const NodeType* probeConstNode(const Coord& xyz) const; //@} //@{ /// @brief Same as probeNode() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing (x, y, z). template<typename NodeType, typename AccessorT> NodeType* probeNodeAndCache(const Coord& xyz, AccessorT&); template<typename NodeType, typename AccessorT> const NodeType* probeConstNodeAndCache(const Coord& xyz, AccessorT&) const; //@} //@{ /// @brief Return a pointer to the leaf node that contains voxel (x, y, z). /// If no such node exists, return @c nullptr. LeafNodeType* probeLeaf(const Coord& xyz); const LeafNodeType* probeConstLeaf(const Coord& xyz) const; const LeafNodeType* probeLeaf(const Coord& xyz) const; //@} //@{ /// @brief Same as probeLeaf() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing (x, y, z). template<typename AccessorT> LeafNodeType* probeLeafAndCache(const Coord& xyz, AccessorT& acc); template<typename AccessorT> const LeafNodeType* probeConstLeafAndCache(const Coord& xyz, AccessorT& acc) const; template<typename AccessorT> const LeafNodeType* probeLeafAndCache(const Coord& xyz, AccessorT& acc) const; //@} /// @brief Return the leaf node that contains voxel (x, y, z). /// If no such node exists, create one, but preserve the values and /// active states of all voxels. /// /// @details Use this method to preallocate a static tree topology /// over which to safely perform multithreaded processing. LeafNodeType* touchLeaf(const Coord& xyz); /// @brief Same as touchLeaf() except, if necessary, update the accessor with pointers /// to the nodes along the path from the root node to the node containing the coordinate. template<typename AccessorT> LeafNodeType* touchLeafAndCache(const Coord& xyz, AccessorT&); //@{ /// @brief Adds all nodes of a certain type to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...;// defines the type of nodes to be added to the array /// void push_back(value_type nodePtr);// method that add nodes to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.getNodes(array); /// @endcode template<typename ArrayT> void getNodes(ArrayT& array); template<typename ArrayT> void getNodes(ArrayT& array) const; //@} /// @brief Steals all nodes of a certain type from the tree and /// adds them to a container with the following API: /// @code /// struct ArrayT { /// using value_type = ...;// defines the type of nodes to be added to the array /// void push_back(value_type nodePtr);// method that add nodes to the array /// }; /// @endcode /// @details An example of a wrapper around a c-style array is: /// @code /// struct MyArray { /// using value_type = LeafType*; /// value_type* ptr; /// MyArray(value_type* array) : ptr(array) {} /// void push_back(value_type leaf) { *ptr++ = leaf; } ///}; /// @endcode /// @details An example that constructs a list of pointer to all leaf nodes is: /// @code /// std::vector<const LeafNodeType*> array;//most std contains have the required API /// array.reserve(tree.leafCount());//this is a fast preallocation. /// tree.stealNodes(array); /// @endcode template<typename ArrayT> void stealNodes(ArrayT& array, const ValueType& value, bool state); /// @brief Change inactive tiles or voxels with value oldBackground to newBackground /// or -oldBackground to -newBackground. Active values are unchanged. void resetBackground(const ValueType& oldBackground, const ValueType& newBackground); /// @brief Return @c true if the given tree branch has the same node and active value /// topology as this tree branch (but possibly a different @c ValueType). template<typename OtherChildNodeType, Index OtherLog2Dim> bool hasSameTopology(const InternalNode<OtherChildNodeType, OtherLog2Dim>* other) const; protected: //@{ /// Allow iterators to call mask accessor methods (setValueMask(), setChildMask(), etc.). /// @todo Make mask accessors public? friend class IteratorBase<MaskOnIterator, InternalNode>; friend class IteratorBase<MaskOffIterator, InternalNode>; friend class IteratorBase<MaskDenseIterator, InternalNode>; //@} /// @brief During topology-only construction, access is needed /// to protected/private members of other template instances. template<typename, Index> friend class InternalNode; // Mask accessors public: bool isValueMaskOn(Index n) const { return mValueMask.isOn(n); } bool isValueMaskOn() const { return mValueMask.isOn(); } bool isValueMaskOff(Index n) const { return mValueMask.isOff(n); } bool isValueMaskOff() const { return mValueMask.isOff(); } bool isChildMaskOn(Index n) const { return mChildMask.isOn(n); } bool isChildMaskOff(Index n) const { return mChildMask.isOff(n); } bool isChildMaskOff() const { return mChildMask.isOff(); } const NodeMaskType& getValueMask() const { return mValueMask; } const NodeMaskType& getChildMask() const { return mChildMask; } NodeMaskType getValueOffMask() const { NodeMaskType mask = mValueMask; mask |= mChildMask; mask.toggle(); return mask; } const UnionType* getTable() const { return mNodes; } protected: //@{ /// Use a mask accessor to ensure consistency between the child and value masks; /// i.e., the value mask should always be off wherever the child mask is on. void setValueMask(Index n, bool on) { mValueMask.set(n, mChildMask.isOn(n) ? false : on); } //@} void makeChildNodeEmpty(Index n, const ValueType& value); void setChildNode( Index i, ChildNodeType* child);//assumes a tile void resetChildNode(Index i, ChildNodeType* child);//checks for an existing child ChildNodeType* unsetChildNode(Index i, const ValueType& value); template<typename NodeT, typename VisitorOp, typename ChildAllIterT> static inline void doVisit(NodeT&, VisitorOp&); template<typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2Node(NodeT&, OtherNodeT&, VisitorOp&); template<typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> static inline void doVisit2(NodeT&, OtherChildAllIterT&, VisitorOp&, bool otherIsLHS); ///@{ /// @brief Returns a pointer to the child node at the linear offset n. /// @warning This protected method assumes that a child node exists at /// the specified linear offset! ChildNodeType* getChildNode(Index n); const ChildNodeType* getChildNode(Index n) const; ///@} ///@{ /// @brief Protected member classes for recursive multi-threading struct VoxelizeActiveTiles; template<typename OtherInternalNode> struct DeepCopy; template<typename OtherInternalNode> struct TopologyCopy1; template<typename OtherInternalNode> struct TopologyCopy2; template<typename OtherInternalNode> struct TopologyUnion; template<typename OtherInternalNode> struct TopologyDifference; template<typename OtherInternalNode> struct TopologyIntersection; ///@} UnionType mNodes[NUM_VALUES]; NodeMaskType mChildMask, mValueMask; /// Global grid index coordinates (x,y,z) of the local origin of this node Coord mOrigin; }; // class InternalNode //////////////////////////////////////// //@{ /// Helper metafunction used to implement InternalNode::SameConfiguration /// (which, as an inner class, can't be independently specialized) template<typename ChildT1, Index Dim1, typename NodeT2> struct SameInternalConfig { static const bool value = false; }; template<typename ChildT1, Index Dim1, typename ChildT2> struct SameInternalConfig<ChildT1, Dim1, InternalNode<ChildT2, Dim1> > { static const bool value = ChildT1::template SameConfiguration<ChildT2>::value; }; //@} //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::InternalNode(const ValueType& background) { for (Index i = 0; i < NUM_VALUES; ++i) mNodes[i].setValue(background); } template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::InternalNode(const Coord& origin, const ValueType& val, bool active): mOrigin(origin[0] & ~(DIM - 1), // zero out the low-order bits origin[1] & ~(DIM - 1), origin[2] & ~(DIM - 1)) { if (active) mValueMask.setOn(); for (Index i = 0; i < NUM_VALUES; ++i) mNodes[i].setValue(val); } // For InternalNodes, the PartialCreate constructor is identical to its // non-PartialCreate counterpart. template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::InternalNode(PartialCreate, const Coord& origin, const ValueType& val, bool active) : mOrigin(origin[0] & ~(DIM-1), origin[1] & ~(DIM-1), origin[2] & ~(DIM-1)) { if (active) mValueMask.setOn(); for (Index i = 0; i < NUM_VALUES; ++i) mNodes[i].setValue(val); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::DeepCopy { DeepCopy(const OtherInternalNode* source, InternalNode* target) : s(source), t(target) { tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//serial } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (s->mChildMask.isOff(i)) { t->mNodes[i].setValue(ValueType(s->mNodes[i].getValue())); } else { t->mNodes[i].setChild(new ChildNodeType(*(s->mNodes[i].getChild()))); } } } const OtherInternalNode* s; InternalNode* t; };// DeepCopy template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::InternalNode(const InternalNode& other): mChildMask(other.mChildMask), mValueMask(other.mValueMask), mOrigin(other.mOrigin) { DeepCopy<InternalNode<ChildT, Log2Dim> > tmp(&other, this); } // Copy-construct from a node with the same configuration but a different ValueType. template<typename ChildT, Index Log2Dim> template<typename OtherChildNodeType> inline InternalNode<ChildT, Log2Dim>::InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other) : mChildMask(other.mChildMask) , mValueMask(other.mValueMask) , mOrigin(other.mOrigin) { DeepCopy<InternalNode<OtherChildNodeType, Log2Dim> > tmp(&other, this); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyCopy1 { TopologyCopy1(const OtherInternalNode* source, InternalNode* target, const ValueType& background) : s(source), t(target), b(background) { tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//serial } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (s->isChildMaskOn(i)) { t->mNodes[i].setChild(new ChildNodeType(*(s->mNodes[i].getChild()), b, TopologyCopy())); } else { t->mNodes[i].setValue(b); } } } const OtherInternalNode* s; InternalNode* t; const ValueType &b; };// TopologyCopy1 template<typename ChildT, Index Log2Dim> template<typename OtherChildNodeType> inline InternalNode<ChildT, Log2Dim>::InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& background, TopologyCopy): mChildMask(other.mChildMask), mValueMask(other.mValueMask), mOrigin(other.mOrigin) { TopologyCopy1<InternalNode<OtherChildNodeType, Log2Dim> > tmp(&other, this, background); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyCopy2 { TopologyCopy2(const OtherInternalNode* source, InternalNode* target, const ValueType& offValue, const ValueType& onValue) : s(source), t(target), offV(offValue), onV(onValue) { tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (s->isChildMaskOn(i)) { t->mNodes[i].setChild(new ChildNodeType(*(s->mNodes[i].getChild()), offV, onV, TopologyCopy())); } else { t->mNodes[i].setValue(s->isValueMaskOn(i) ? onV : offV); } } } const OtherInternalNode* s; InternalNode* t; const ValueType &offV, &onV; };// TopologyCopy2 template<typename ChildT, Index Log2Dim> template<typename OtherChildNodeType> inline InternalNode<ChildT, Log2Dim>::InternalNode(const InternalNode<OtherChildNodeType, Log2Dim>& other, const ValueType& offValue, const ValueType& onValue, TopologyCopy): mChildMask(other.mChildMask), mValueMask(other.mValueMask), mOrigin(other.mOrigin) { TopologyCopy2<InternalNode<OtherChildNodeType, Log2Dim> > tmp(&other, this, offValue, onValue); } template<typename ChildT, Index Log2Dim> inline InternalNode<ChildT, Log2Dim>::~InternalNode() { for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { delete mNodes[iter.pos()].getChild(); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline Index32 InternalNode<ChildT, Log2Dim>::leafCount() const { if (ChildNodeType::getLevel() == 0) return mChildMask.countOn(); Index32 sum = 0; for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->leafCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::nodeCount(std::vector<Index32> &vec) const { assert(vec.size() > ChildNodeType::LEVEL); const auto count = mChildMask.countOn(); if (ChildNodeType::LEVEL > 0 && count > 0) { for (auto iter = this->cbeginChildOn(); iter; ++iter) iter->nodeCount(vec); } vec[ChildNodeType::LEVEL] += count; } template<typename ChildT, Index Log2Dim> inline Index32 InternalNode<ChildT, Log2Dim>::nonLeafCount() const { Index32 sum = 1; if (ChildNodeType::getLevel() == 0) return sum; for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->nonLeafCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index32 InternalNode<ChildT, Log2Dim>::childCount() const { return this->getChildMask().countOn(); } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::onVoxelCount() const { Index64 sum = ChildT::NUM_VOXELS * mValueMask.countOn(); for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->onVoxelCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::offVoxelCount() const { Index64 sum = ChildT::NUM_VOXELS * (NUM_VALUES-mValueMask.countOn()-mChildMask.countOn()); for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->offVoxelCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::onLeafVoxelCount() const { Index64 sum = 0; for (ChildOnCIter iter = this->beginChildOn(); iter; ++iter) { sum += mNodes[iter.pos()].getChild()->onLeafVoxelCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::offLeafVoxelCount() const { Index64 sum = 0; for (ChildOnCIter iter = this->beginChildOn(); iter; ++iter) { sum += mNodes[iter.pos()].getChild()->offLeafVoxelCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::onTileCount() const { Index64 sum = mValueMask.countOn(); for (ChildOnCIter iter = this->cbeginChildOn(); LEVEL>1 && iter; ++iter) { sum += iter->onTileCount(); } return sum; } template<typename ChildT, Index Log2Dim> inline Index64 InternalNode<ChildT, Log2Dim>::memUsage() const { Index64 sum = NUM_VALUES * sizeof(UnionType) + mChildMask.memUsage() + mValueMask.memUsage() + sizeof(mOrigin); for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { sum += iter->memUsage(); } return sum; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { if (bbox.isInside(this->getNodeBoundingBox())) return; for (ValueOnCIter i = this->cbeginValueOn(); i; ++i) { bbox.expand(i.getCoord(), ChildT::DIM); } for (ChildOnCIter i = this->cbeginChildOn(); i; ++i) { i->evalActiveBoundingBox(bbox, visitVoxels); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::prune(const ValueType& tolerance) { bool state = false; ValueType value = zeroVal<ValueType>(); for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { const Index i = iter.pos(); ChildT* child = mNodes[i].getChild(); child->prune(tolerance); if (child->isConstant(value, state, tolerance)) { delete child; mChildMask.setOff(i); mValueMask.set(i, state); mNodes[i].setValue(value); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename NodeT> inline NodeT* InternalNode<ChildT, Log2Dim>::stealNode(const Coord& xyz, const ValueType& value, bool state) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; ChildT* child = mNodes[n].getChild(); if (std::is_same<NodeT, ChildT>::value) { mChildMask.setOff(n); mValueMask.set(n, state); mNodes[n].setValue(value); } return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template stealNode<NodeT>(xyz, value, state); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename NodeT> inline NodeT* InternalNode<ChildT, Log2Dim>::probeNode(const Coord& xyz) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; ChildT* child = mNodes[n].getChild(); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template probeNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> template<typename NodeT, typename AccessorT> inline NodeT* InternalNode<ChildT, Log2Dim>::probeNodeAndCache(const Coord& xyz, AccessorT& acc) { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; ChildT* child = mNodes[n].getChild(); acc.insert(xyz, child); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<NodeT*>(child) : child->template probeNodeAndCache<NodeT>(xyz, acc); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> template<typename NodeT> inline const NodeT* InternalNode<ChildT, Log2Dim>::probeConstNode(const Coord& xyz) const { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; const ChildT* child = mNodes[n].getChild(); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<const NodeT*>(child) : child->template probeConstNode<NodeT>(xyz); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> template<typename NodeT, typename AccessorT> inline const NodeT* InternalNode<ChildT, Log2Dim>::probeConstNodeAndCache(const Coord& xyz, AccessorT& acc) const { if ((NodeT::LEVEL == ChildT::LEVEL && !(std::is_same<NodeT, ChildT>::value)) || NodeT::LEVEL > ChildT::LEVEL) return nullptr; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) return nullptr; const ChildT* child = mNodes[n].getChild(); acc.insert(xyz, child); return (std::is_same<NodeT, ChildT>::value) ? reinterpret_cast<const NodeT*>(child) : child->template probeConstNodeAndCache<NodeT>(xyz, acc); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeLeaf(const Coord& xyz) { return this->template probeNode<LeafNodeType>(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeLeafAndCache(const Coord& xyz, AccessorT& acc) { return this->template probeNodeAndCache<LeafNodeType>(xyz, acc); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline const typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeLeafAndCache(const Coord& xyz, AccessorT& acc) const { return this->probeConstLeafAndCache(xyz, acc); } template<typename ChildT, Index Log2Dim> inline const typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeConstLeaf(const Coord& xyz) const { return this->template probeConstNode<LeafNodeType>(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline const typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::probeConstLeafAndCache(const Coord& xyz, AccessorT& acc) const { return this->template probeConstNodeAndCache<LeafNodeType>(xyz, acc); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::addLeaf(LeafNodeType* leaf) { assert(leaf != nullptr); const Coord& xyz = leaf->origin(); const Index n = this->coordToOffset(xyz); ChildT* child = nullptr; if (mChildMask.isOff(n)) { if (ChildT::LEVEL>0) { child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); } else { child = reinterpret_cast<ChildT*>(leaf); } this->setChildNode(n, child); } else { if (ChildT::LEVEL>0) { child = mNodes[n].getChild(); } else { delete mNodes[n].getChild(); child = reinterpret_cast<ChildT*>(leaf); mNodes[n].setChild(child); } } child->addLeaf(leaf); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::addLeafAndCache(LeafNodeType* leaf, AccessorT& acc) { assert(leaf != nullptr); const Coord& xyz = leaf->origin(); const Index n = this->coordToOffset(xyz); ChildT* child = nullptr; if (mChildMask.isOff(n)) { if (ChildT::LEVEL>0) { child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); acc.insert(xyz, child);//we only cache internal nodes } else { child = reinterpret_cast<ChildT*>(leaf); } this->setChildNode(n, child); } else { if (ChildT::LEVEL>0) { child = mNodes[n].getChild(); acc.insert(xyz, child);//we only cache internal nodes } else { delete mNodes[n].getChild(); child = reinterpret_cast<ChildT*>(leaf); mNodes[n].setChild(child); } } child->addLeafAndCache(leaf, acc); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::addChild(ChildT* child) { assert(child); const Coord& xyz = child->origin(); // verify that the child belongs in this internal node if (Coord((xyz & ~(DIM-1))) != this->origin()) return false; // compute the offset and insert the child node const Index n = this->coordToOffset(xyz); // this also deletes an existing child node this->resetChildNode(n, child); return true; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::addTile(Index n, const ValueType& value, bool state) { assert(n < NUM_VALUES); this->makeChildNodeEmpty(n, value); mValueMask.set(n, state); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::addTile(Index level, const Coord& xyz, const ValueType& value, bool state) { if (LEVEL >= level) { const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) {// tile case if (LEVEL > level) { ChildT* child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); this->setChildNode(n, child); child->addTile(level, xyz, value, state); } else { mValueMask.set(n, state); mNodes[n].setValue(value); } } else {// child branch case ChildT* child = mNodes[n].getChild(); if (LEVEL > level) { child->addTile(level, xyz, value, state); } else { delete child; mChildMask.setOff(n); mValueMask.set(n, state); mNodes[n].setValue(value); } } } } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::addTileAndCache(Index level, const Coord& xyz, const ValueType& value, bool state, AccessorT& acc) { if (LEVEL >= level) { const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) {// tile case if (LEVEL > level) { ChildT* child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); this->setChildNode(n, child); acc.insert(xyz, child); child->addTileAndCache(level, xyz, value, state, acc); } else { mValueMask.set(n, state); mNodes[n].setValue(value); } } else {// child branch case ChildT* child = mNodes[n].getChild(); if (LEVEL > level) { acc.insert(xyz, child); child->addTileAndCache(level, xyz, value, state, acc); } else { delete child; mChildMask.setOff(n); mValueMask.set(n, state); mNodes[n].setValue(value); } } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::touchLeaf(const Coord& xyz) { const Index n = this->coordToOffset(xyz); ChildT* child = nullptr; if (mChildMask.isOff(n)) { child = new ChildT(xyz, mNodes[n].getValue(), mValueMask.isOn(n)); this->setChildNode(n, child); } else { child = mNodes[n].getChild(); } return child->touchLeaf(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline typename ChildT::LeafNodeType* InternalNode<ChildT, Log2Dim>::touchLeafAndCache(const Coord& xyz, AccessorT& acc) { const Index n = this->coordToOffset(xyz); if (mChildMask.isOff(n)) { this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), mValueMask.isOn(n))); } acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->touchLeafAndCache(xyz, acc); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::isConstant(ValueType& firstValue, bool& state, const ValueType& tolerance) const { if (!mChildMask.isOff() || !mValueMask.isConstant(state)) return false;// early termination firstValue = mNodes[0].getValue(); for (Index i = 1; i < NUM_VALUES; ++i) { if (!math::isApproxEqual(mNodes[i].getValue(), firstValue, tolerance)) { return false; // early termination } } return true; } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::isConstant(ValueType& minValue, ValueType& maxValue, bool& state, const ValueType& tolerance) const { if (!mChildMask.isOff() || !mValueMask.isConstant(state)) return false;// early termination minValue = maxValue = mNodes[0].getValue(); for (Index i = 1; i < NUM_VALUES; ++i) { const ValueType& v = mNodes[i].getValue(); if (v < minValue) { if ((maxValue - v) > tolerance) return false;// early termination minValue = v; } else if (v > maxValue) { if ((v - minValue) > tolerance) return false;// early termination maxValue = v; } } return true; } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::hasActiveTiles() const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN const bool anyActiveTiles = !mValueMask.isOff(); if (LEVEL==1 || anyActiveTiles) return anyActiveTiles; for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { if (iter->hasActiveTiles()) return true; } return false; OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::isValueOn(const Coord& xyz) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOff(n)) return this->isValueMaskOn(n); return mNodes[n].getChild()->isValueOn(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline bool InternalNode<ChildT, Log2Dim>::isValueOnAndCache(const Coord& xyz, AccessorT& acc) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOff(n)) return this->isValueMaskOn(n); acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->isValueOnAndCache(xyz, acc); } template<typename ChildT, Index Log2Dim> inline const typename ChildT::ValueType& InternalNode<ChildT, Log2Dim>::getValue(const Coord& xyz) const { const Index n = this->coordToOffset(xyz); return this->isChildMaskOff(n) ? mNodes[n].getValue() : mNodes[n].getChild()->getValue(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline const typename ChildT::ValueType& InternalNode<ChildT, Log2Dim>::getValueAndCache(const Coord& xyz, AccessorT& acc) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOn(n)) { acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->getValueAndCache(xyz, acc); } return mNodes[n].getValue(); } template<typename ChildT, Index Log2Dim> inline Index InternalNode<ChildT, Log2Dim>::getValueLevel(const Coord& xyz) const { const Index n = this->coordToOffset(xyz); return this->isChildMaskOff(n) ? LEVEL : mNodes[n].getChild()->getValueLevel(xyz); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline Index InternalNode<ChildT, Log2Dim>::getValueLevelAndCache(const Coord& xyz, AccessorT& acc) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOn(n)) { acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->getValueLevelAndCache(xyz, acc); } return LEVEL; } template<typename ChildT, Index Log2Dim> inline bool InternalNode<ChildT, Log2Dim>::probeValue(const Coord& xyz, ValueType& value) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOff(n)) { value = mNodes[n].getValue(); return this->isValueMaskOn(n); } return mNodes[n].getChild()->probeValue(xyz, value); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline bool InternalNode<ChildT, Log2Dim>::probeValueAndCache(const Coord& xyz, ValueType& value, AccessorT& acc) const { const Index n = this->coordToOffset(xyz); if (this->isChildMaskOn(n)) { acc.insert(xyz, mNodes[n].getChild()); return mNodes[n].getChild()->probeValueAndCache(xyz, value, acc); } value = mNodes[n].getValue(); return this->isValueMaskOn(n); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOff(const Coord& xyz) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild && this->isValueMaskOn(n)) { // If the voxel belongs to a constant tile that is active, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), /*active=*/true)); } if (hasChild) mNodes[n].getChild()->setValueOff(xyz); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOn(const Coord& xyz) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild && !this->isValueMaskOn(n)) { // If the voxel belongs to a constant tile that is inactive, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), /*active=*/false)); } if (hasChild) mNodes[n].getChild()->setValueOn(xyz); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOff(const Coord& xyz, const ValueType& value) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool active = this->isValueMaskOn(n); if (active || !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel belongs to a tile that is either active or that // has a constant value that is different from the one provided, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) mNodes[n].getChild()->setValueOff(xyz, value); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::setValueOffAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool active = this->isValueMaskOn(n); if (active || !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel belongs to a tile that is either active or that // has a constant value that is different from the one provided, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) { ChildT* child = mNodes[n].getChild(); acc.insert(xyz, child); child->setValueOffAndCache(xyz, value, acc); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOn(const Coord& xyz, const ValueType& value) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool active = this->isValueMaskOn(n); // tile's active state if (!active || !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel belongs to a tile that is either inactive or that // has a constant value that is different from the one provided, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) mNodes[n].getChild()->setValueOn(xyz, value); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::setValueAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool active = this->isValueMaskOn(n); if (!active || !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel belongs to a tile that is either inactive or that // has a constant value that is different from the one provided, // a child subtree must be constructed. hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) { acc.insert(xyz, mNodes[n].getChild()); mNodes[n].getChild()->setValueAndCache(xyz, value, acc); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValueOnly(const Coord& xyz, const ValueType& value) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild && !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel has a tile value that is different from the one provided, // a child subtree must be constructed. const bool active = this->isValueMaskOn(n); hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } if (hasChild) mNodes[n].getChild()->setValueOnly(xyz, value); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::setValueOnlyAndCache(const Coord& xyz, const ValueType& value, AccessorT& acc) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild && !math::isExactlyEqual(mNodes[n].getValue(), value)) { // If the voxel has a tile value that is different from the one provided, // a child subtree must be constructed. const bool active = this->isValueMaskOn(n); hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } if (hasChild) { acc.insert(xyz, mNodes[n].getChild()); mNodes[n].getChild()->setValueOnlyAndCache(xyz, value, acc); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setActiveState(const Coord& xyz, bool on) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { if (on != this->isValueMaskOn(n)) { // If the voxel belongs to a tile with the wrong active state, // then a child subtree must be constructed. // 'on' is the voxel's new state, therefore '!on' is the tile's current state hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), !on)); } } if (hasChild) mNodes[n].getChild()->setActiveState(xyz, on); } template<typename ChildT, Index Log2Dim> template<typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::setActiveStateAndCache(const Coord& xyz, bool on, AccessorT& acc) { const Index n = this->coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { if (on != this->isValueMaskOn(n)) { // If the voxel belongs to a tile with the wrong active state, // then a child subtree must be constructed. // 'on' is the voxel's new state, therefore '!on' is the tile's current state hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), !on)); } } if (hasChild) { ChildT* child = mNodes[n].getChild(); acc.insert(xyz, child); child->setActiveStateAndCache(xyz, on, acc); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setValuesOn() { mValueMask = !mChildMask; for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { mNodes[iter.pos()].getChild()->setValuesOn(); } } template<typename ChildT, Index Log2Dim> template<typename ModifyOp> inline void InternalNode<ChildT, Log2Dim>::modifyValue(const Coord& xyz, const ModifyOp& op) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { // Need to create a child if the tile is inactive, // in order to activate voxel (x, y, z). const bool active = this->isValueMaskOn(n); bool createChild = !active; if (!createChild) { // Need to create a child if applying the functor // to the tile value produces a different value. const ValueType& tileVal = mNodes[n].getValue(); ValueType modifiedVal = tileVal; op(modifiedVal); createChild = !math::isExactlyEqual(tileVal, modifiedVal); } if (createChild) { hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) mNodes[n].getChild()->modifyValue(xyz, op); } template<typename ChildT, Index Log2Dim> template<typename ModifyOp, typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::modifyValueAndCache(const Coord& xyz, const ModifyOp& op, AccessorT& acc) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { // Need to create a child if the tile is inactive, // in order to activate voxel (x, y, z). const bool active = this->isValueMaskOn(n); bool createChild = !active; if (!createChild) { // Need to create a child if applying the functor // to the tile value produces a different value. const ValueType& tileVal = mNodes[n].getValue(); ValueType modifiedVal = tileVal; op(modifiedVal); createChild = !math::isExactlyEqual(tileVal, modifiedVal); } if (createChild) { hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, mNodes[n].getValue(), active)); } } if (hasChild) { ChildNodeType* child = mNodes[n].getChild(); acc.insert(xyz, child); child->modifyValueAndCache(xyz, op, acc); } } template<typename ChildT, Index Log2Dim> template<typename ModifyOp> inline void InternalNode<ChildT, Log2Dim>::modifyValueAndActiveState(const Coord& xyz, const ModifyOp& op) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool tileState = this->isValueMaskOn(n); const ValueType& tileVal = mNodes[n].getValue(); bool modifiedState = !tileState; ValueType modifiedVal = tileVal; op(modifiedVal, modifiedState); // Need to create a child if applying the functor to the tile // produces a different value or active state. if (modifiedState != tileState || !math::isExactlyEqual(modifiedVal, tileVal)) { hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, tileVal, tileState)); } } if (hasChild) mNodes[n].getChild()->modifyValueAndActiveState(xyz, op); } template<typename ChildT, Index Log2Dim> template<typename ModifyOp, typename AccessorT> inline void InternalNode<ChildT, Log2Dim>::modifyValueAndActiveStateAndCache( const Coord& xyz, const ModifyOp& op, AccessorT& acc) { const Index n = InternalNode::coordToOffset(xyz); bool hasChild = this->isChildMaskOn(n); if (!hasChild) { const bool tileState = this->isValueMaskOn(n); const ValueType& tileVal = mNodes[n].getValue(); bool modifiedState = !tileState; ValueType modifiedVal = tileVal; op(modifiedVal, modifiedState); // Need to create a child if applying the functor to the tile // produces a different value or active state. if (modifiedState != tileState || !math::isExactlyEqual(modifiedVal, tileVal)) { hasChild = true; this->setChildNode(n, new ChildNodeType(xyz, tileVal, tileState)); } } if (hasChild) { ChildNodeType* child = mNodes[n].getChild(); acc.insert(xyz, child); child->modifyValueAndActiveStateAndCache(xyz, op, acc); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::clip(const CoordBBox& clipBBox, const ValueType& background) { CoordBBox nodeBBox = this->getNodeBoundingBox(); if (!clipBBox.hasOverlap(nodeBBox)) { // This node lies completely outside the clipping region. Fill it with background tiles. this->fill(nodeBBox, background, /*active=*/false); } else if (clipBBox.isInside(nodeBBox)) { // This node lies completely inside the clipping region. Leave it intact. return; } // This node isn't completely contained inside the clipping region. // Clip tiles and children, and replace any that lie outside the region // with background tiles. for (Index pos = 0; pos < NUM_VALUES; ++pos) { const Coord xyz = this->offsetToGlobalCoord(pos); // tile or child origin CoordBBox tileBBox(xyz, xyz.offsetBy(ChildT::DIM - 1)); // tile or child bounds if (!clipBBox.hasOverlap(tileBBox)) { // This table entry lies completely outside the clipping region. // Replace it with a background tile. this->makeChildNodeEmpty(pos, background); mValueMask.setOff(pos); } else if (!clipBBox.isInside(tileBBox)) { // This table entry does not lie completely inside the clipping region // and must be clipped. if (this->isChildMaskOn(pos)) { mNodes[pos].getChild()->clip(clipBBox, background); } else { // Replace this tile with a background tile, then fill the clip region // with the tile's original value. (This might create a child branch.) tileBBox.intersect(clipBBox); const ValueType val = mNodes[pos].getValue(); const bool on = this->isValueMaskOn(pos); mNodes[pos].setValue(background); mValueMask.setOff(pos); this->fill(tileBBox, val, on); } } else { // This table entry lies completely inside the clipping region. Leave it intact. } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::fill(const CoordBBox& bbox, const ValueType& value, bool active) { auto clippedBBox = this->getNodeBoundingBox(); clippedBBox.intersect(bbox); if (!clippedBBox) return; // Iterate over the fill region in axis-aligned, tile-sized chunks. // (The first and last chunks along each axis might be smaller than a tile.) Coord xyz, tileMin, tileMax; for (int x = clippedBBox.min().x(); x <= clippedBBox.max().x(); x = tileMax.x() + 1) { xyz.setX(x); for (int y = clippedBBox.min().y(); y <= clippedBBox.max().y(); y = tileMax.y() + 1) { xyz.setY(y); for (int z = clippedBBox.min().z(); z <= clippedBBox.max().z(); z = tileMax.z() + 1) { xyz.setZ(z); // Get the bounds of the tile that contains voxel (x, y, z). const Index n = this->coordToOffset(xyz); tileMin = this->offsetToGlobalCoord(n); tileMax = tileMin.offsetBy(ChildT::DIM - 1); if (xyz != tileMin || Coord::lessThan(clippedBBox.max(), tileMax)) { // If the box defined by (xyz, clippedBBox.max()) doesn't completely enclose // the tile to which xyz belongs, create a child node (or retrieve // the existing one). ChildT* child = nullptr; if (this->isChildMaskOff(n)) { // Replace the tile with a newly-created child that is initialized // with the tile's value and active state. child = new ChildT{xyz, mNodes[n].getValue(), this->isValueMaskOn(n)}; this->setChildNode(n, child); } else { child = mNodes[n].getChild(); } // Forward the fill request to the child. if (child) { const Coord tmp = Coord::minComponent(clippedBBox.max(), tileMax); child->fill(CoordBBox(xyz, tmp), value, active); } } else { // If the box given by (xyz, clippedBBox.max()) completely encloses // the tile to which xyz belongs, create the tile (if it // doesn't already exist) and give it the fill value. this->makeChildNodeEmpty(n, value); mValueMask.set(n, active); } } } } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::denseFill(const CoordBBox& bbox, const ValueType& value, bool active) { auto clippedBBox = this->getNodeBoundingBox(); clippedBBox.intersect(bbox); if (!clippedBBox) return; // Iterate over the fill region in axis-aligned, tile-sized chunks. // (The first and last chunks along each axis might be smaller than a tile.) Coord xyz, tileMin, tileMax; for (int x = clippedBBox.min().x(); x <= clippedBBox.max().x(); x = tileMax.x() + 1) { xyz.setX(x); for (int y = clippedBBox.min().y(); y <= clippedBBox.max().y(); y = tileMax.y() + 1) { xyz.setY(y); for (int z = clippedBBox.min().z(); z <= clippedBBox.max().z(); z = tileMax.z() + 1) { xyz.setZ(z); // Get the table index of the tile that contains voxel (x, y, z). const auto n = this->coordToOffset(xyz); // Retrieve the child node at index n, or replace the tile at index n with a child. ChildT* child = nullptr; if (this->isChildMaskOn(n)) { child = mNodes[n].getChild(); } else { // Replace the tile with a newly-created child that is filled // with the tile's value and active state. child = new ChildT{xyz, mNodes[n].getValue(), this->isValueMaskOn(n)}; this->setChildNode(n, child); } // Get the bounds of the tile that contains voxel (x, y, z). tileMin = this->offsetToGlobalCoord(n); tileMax = tileMin.offsetBy(ChildT::DIM - 1); // Forward the fill request to the child. child->denseFill(CoordBBox{xyz, clippedBBox.max()}, value, active); } } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename DenseT> inline void InternalNode<ChildT, Log2Dim>::copyToDense(const CoordBBox& bbox, DenseT& dense) const { using DenseValueType = typename DenseT::ValueType; const size_t xStride = dense.xStride(), yStride = dense.yStride(), zStride = dense.zStride(); const Coord& min = dense.bbox().min(); for (Coord xyz = bbox.min(), max; xyz[0] <= bbox.max()[0]; xyz[0] = max[0] + 1) { for (xyz[1] = bbox.min()[1]; xyz[1] <= bbox.max()[1]; xyz[1] = max[1] + 1) { for (xyz[2] = bbox.min()[2]; xyz[2] <= bbox.max()[2]; xyz[2] = max[2] + 1) { const Index n = this->coordToOffset(xyz); // Get max coordinates of the child node that contains voxel xyz. max = this->offsetToGlobalCoord(n).offsetBy(ChildT::DIM-1); // Get the bbox of the interection of bbox and the child node CoordBBox sub(xyz, Coord::minComponent(bbox.max(), max)); if (this->isChildMaskOn(n)) {//is a child mNodes[n].getChild()->copyToDense(sub, dense); } else {//a tile value const ValueType value = mNodes[n].getValue(); sub.translate(-min); DenseValueType* a0 = dense.data() + zStride*sub.min()[2]; for (Int32 x=sub.min()[0], ex=sub.max()[0]+1; x<ex; ++x) { DenseValueType* a1 = a0 + x*xStride; for (Int32 y=sub.min()[1], ey=sub.max()[1]+1; y<ey; ++y) { DenseValueType* a2 = a1 + y*yStride; for (Int32 z = sub.min()[2], ez = sub.max()[2]+1; z < ez; ++z, a2 += zStride) { *a2 = DenseValueType(value); } } } } } } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::writeTopology(std::ostream& os, bool toHalf) const { mChildMask.save(os); mValueMask.save(os); { // Copy all of this node's values into an array. std::unique_ptr<ValueType[]> valuePtr(new ValueType[NUM_VALUES]); ValueType* values = valuePtr.get(); const ValueType zero = zeroVal<ValueType>(); for (Index i = 0; i < NUM_VALUES; ++i) { values[i] = (mChildMask.isOff(i) ? mNodes[i].getValue() : zero); } // Compress (optionally) and write out the contents of the array. io::writeCompressedValues(os, values, NUM_VALUES, mValueMask, mChildMask, toHalf); } // Write out the child nodes in order. for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { iter->writeTopology(os, toHalf); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::readTopology(std::istream& is, bool fromHalf) { const ValueType background = (!io::getGridBackgroundValuePtr(is) ? zeroVal<ValueType>() : *static_cast<const ValueType*>(io::getGridBackgroundValuePtr(is))); mChildMask.load(is); mValueMask.load(is); if (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_INTERNALNODE_COMPRESSION) { for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOn(i)) { ChildNodeType* child = new ChildNodeType(PartialCreate(), offsetToGlobalCoord(i), background); mNodes[i].setChild(child); child->readTopology(is); } else { ValueType value; is.read(reinterpret_cast<char*>(&value), sizeof(ValueType)); mNodes[i].setValue(value); } } } else { const bool oldVersion = (io::getFormatVersion(is) < OPENVDB_FILE_VERSION_NODE_MASK_COMPRESSION); const Index numValues = (oldVersion ? mChildMask.countOff() : NUM_VALUES); { // Read in (and uncompress, if necessary) all of this node's values // into a contiguous array. std::unique_ptr<ValueType[]> valuePtr(new ValueType[numValues]); ValueType* values = valuePtr.get(); io::readCompressedValues(is, values, numValues, mValueMask, fromHalf); // Copy values from the array into this node's table. if (oldVersion) { Index n = 0; for (ValueAllIter iter = this->beginValueAll(); iter; ++iter) { mNodes[iter.pos()].setValue(values[n++]); } assert(n == numValues); } else { for (ValueAllIter iter = this->beginValueAll(); iter; ++iter) { mNodes[iter.pos()].setValue(values[iter.pos()]); } } } // Read in all child nodes and insert them into the table at their proper locations. for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { ChildNodeType* child = new ChildNodeType(PartialCreate(), iter.getCoord(), background); mNodes[iter.pos()].setChild(child); child->readTopology(is, fromHalf); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline const typename ChildT::ValueType& InternalNode<ChildT, Log2Dim>::getFirstValue() const { return (this->isChildMaskOn(0) ? mNodes[0].getChild()->getFirstValue() : mNodes[0].getValue()); } template<typename ChildT, Index Log2Dim> inline const typename ChildT::ValueType& InternalNode<ChildT, Log2Dim>::getLastValue() const { const Index n = NUM_VALUES - 1; return (this->isChildMaskOn(n) ? mNodes[n].getChild()->getLastValue() : mNodes[n].getValue()); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::negate() { for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOn(i)) { mNodes[i].getChild()->negate(); } else { mNodes[i].setValue(math::negative(mNodes[i].getValue())); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> struct InternalNode<ChildT, Log2Dim>::VoxelizeActiveTiles { VoxelizeActiveTiles(InternalNode &node) : mNode(&node) { //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//single thread for debugging tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); node.mChildMask |= node.mValueMask; node.mValueMask.setOff(); } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (mNode->mChildMask.isOn(i)) {// Loop over node's child nodes mNode->mNodes[i].getChild()->voxelizeActiveTiles(true); } else if (mNode->mValueMask.isOn(i)) {// Loop over node's active tiles const Coord &ijk = mNode->offsetToGlobalCoord(i); ChildNodeType *child = new ChildNodeType(ijk, mNode->mNodes[i].getValue(), true); child->voxelizeActiveTiles(true); mNode->mNodes[i].setChild(child); } } } InternalNode* mNode; };// VoxelizeActiveTiles template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::voxelizeActiveTiles(bool threaded) { if (threaded) { VoxelizeActiveTiles tmp(*this); } else { for (ValueOnIter iter = this->beginValueOn(); iter; ++iter) { this->setChildNode(iter.pos(), new ChildNodeType(iter.getCoord(), iter.getValue(), true)); } for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) iter->voxelizeActiveTiles(false); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<MergePolicy Policy> inline void InternalNode<ChildT, Log2Dim>::merge(InternalNode& other, const ValueType& background, const ValueType& otherBackground) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN switch (Policy) { case MERGE_ACTIVE_STATES: default: { for (ChildOnIter iter = other.beginChildOn(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge this node's child with the other node's child. mNodes[n].getChild()->template merge<MERGE_ACTIVE_STATES>(*iter, background, otherBackground); } else if (mValueMask.isOff(n)) { // Replace this node's inactive tile with the other node's child // and replace the other node's child with a tile of undefined value // (which is okay since the other tree is assumed to be cannibalized // in the process of merging). ChildNodeType* child = other.mNodes[n].getChild(); other.mChildMask.setOff(n); child->resetBackground(otherBackground, background); this->setChildNode(n, child); } } // Copy active tile values. for (ValueOnCIter iter = other.cbeginValueOn(); iter; ++iter) { const Index n = iter.pos(); if (mValueMask.isOff(n)) { // Replace this node's child or inactive tile with the other node's active tile. this->makeChildNodeEmpty(n, iter.getValue()); mValueMask.setOn(n); } } break; } case MERGE_NODES: { for (ChildOnIter iter = other.beginChildOn(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge this node's child with the other node's child. mNodes[n].getChild()->template merge<Policy>(*iter, background, otherBackground); } else { // Replace this node's tile (regardless of its active state) with // the other node's child and replace the other node's child with // a tile of undefined value (which is okay since the other tree // is assumed to be cannibalized in the process of merging). ChildNodeType* child = other.mNodes[n].getChild(); other.mChildMask.setOff(n); child->resetBackground(otherBackground, background); this->setChildNode(n, child); } } break; } case MERGE_ACTIVE_STATES_AND_NODES: { // Transfer children from the other tree to this tree. for (ChildOnIter iter = other.beginChildOn(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge this node's child with the other node's child. mNodes[n].getChild()->template merge<Policy>(*iter, background, otherBackground); } else { // Replace this node's tile with the other node's child, leaving the other // node with an inactive tile of undefined value (which is okay since // the other tree is assumed to be cannibalized in the process of merging). ChildNodeType* child = other.mNodes[n].getChild(); other.mChildMask.setOff(n); child->resetBackground(otherBackground, background); if (mValueMask.isOn(n)) { // Merge the child with this node's active tile. child->template merge<Policy>(mNodes[n].getValue(), /*on=*/true); mValueMask.setOff(n); } mChildMask.setOn(n); mNodes[n].setChild(child); } } // Merge active tiles into this tree. for (ValueOnCIter iter = other.cbeginValueOn(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge the other node's active tile into this node's child. mNodes[n].getChild()->template merge<Policy>(iter.getValue(), /*on=*/true); } else if (mValueMask.isOff(n)) { // Replace this node's inactive tile with the other node's active tile. mNodes[n].setValue(iter.getValue()); mValueMask.setOn(n); } } break; } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } template<typename ChildT, Index Log2Dim> template<MergePolicy Policy> inline void InternalNode<ChildT, Log2Dim>::merge(const ValueType& tileValue, bool tileActive) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (Policy != MERGE_ACTIVE_STATES_AND_NODES) return; // For MERGE_ACTIVE_STATES_AND_NODES, inactive tiles in the other tree are ignored. if (!tileActive) return; // Iterate over this node's children and inactive tiles. for (ValueOffIter iter = this->beginValueOff(); iter; ++iter) { const Index n = iter.pos(); if (mChildMask.isOn(n)) { // Merge the other node's active tile into this node's child. mNodes[n].getChild()->template merge<Policy>(tileValue, /*on=*/true); } else { // Replace this node's inactive tile with the other node's active tile. iter.setValue(tileValue); mValueMask.setOn(n); } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyUnion { using W = typename NodeMaskType::Word; struct A { inline void operator()(W &tV, const W& sV, const W& tC) const { tV = (tV | sV) & ~tC; } }; TopologyUnion(const OtherInternalNode* source, InternalNode* target) : s(source), t(target) { //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//single thread for debugging tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); // Bit processing is done in a single thread! t->mChildMask |= s->mChildMask;//serial but very fast bitwise post-process A op; t->mValueMask.foreach(s->mValueMask, t->mChildMask, op); assert((t->mValueMask & t->mChildMask).isOff());//no overlapping active tiles or child nodes } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (s->mChildMask.isOn(i)) {// Loop over other node's child nodes const typename OtherInternalNode::ChildNodeType& other = *(s->mNodes[i].getChild()); if (t->mChildMask.isOn(i)) {//this has a child node t->mNodes[i].getChild()->topologyUnion(other); } else {// this is a tile so replace it with a child branch with identical topology ChildT* child = new ChildT(other, t->mNodes[i].getValue(), TopologyCopy()); if (t->mValueMask.isOn(i)) child->setValuesOn();//activate all values t->mNodes[i].setChild(child); } } else if (s->mValueMask.isOn(i) && t->mChildMask.isOn(i)) { t->mNodes[i].getChild()->setValuesOn(); } } } const OtherInternalNode* s; InternalNode* t; };// TopologyUnion template<typename ChildT, Index Log2Dim> template<typename OtherChildT> inline void InternalNode<ChildT, Log2Dim>::topologyUnion(const InternalNode<OtherChildT, Log2Dim>& other) { TopologyUnion<InternalNode<OtherChildT, Log2Dim> > tmp(&other, this); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyIntersection { using W = typename NodeMaskType::Word; struct A { inline void operator()(W &tC, const W& sC, const W& sV, const W& tV) const { tC = (tC & (sC | sV)) | (tV & sC); } }; TopologyIntersection(const OtherInternalNode* source, InternalNode* target, const ValueType& background) : s(source), t(target), b(background) { //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//single thread for debugging tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); // Bit processing is done in a single thread! A op; t->mChildMask.foreach(s->mChildMask, s->mValueMask, t->mValueMask, op); t->mValueMask &= s->mValueMask; assert((t->mValueMask & t->mChildMask).isOff());//no overlapping active tiles or child nodes } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (t->mChildMask.isOn(i)) {// Loop over this node's child nodes ChildT* child = t->mNodes[i].getChild(); if (s->mChildMask.isOn(i)) {//other also has a child node child->topologyIntersection(*(s->mNodes[i].getChild()), b); } else if (s->mValueMask.isOff(i)) {//other is an inactive tile delete child;//convert child to an inactive tile t->mNodes[i].setValue(b); } } else if (t->mValueMask.isOn(i) && s->mChildMask.isOn(i)) {//active tile -> a branch t->mNodes[i].setChild(new ChildT(*(s->mNodes[i].getChild()), t->mNodes[i].getValue(), TopologyCopy())); } } } const OtherInternalNode* s; InternalNode* t; const ValueType& b; };// TopologyIntersection template<typename ChildT, Index Log2Dim> template<typename OtherChildT> inline void InternalNode<ChildT, Log2Dim>::topologyIntersection( const InternalNode<OtherChildT, Log2Dim>& other, const ValueType& background) { TopologyIntersection<InternalNode<OtherChildT, Log2Dim> > tmp(&other, this, background); } template<typename ChildT, Index Log2Dim> template<typename OtherInternalNode> struct InternalNode<ChildT, Log2Dim>::TopologyDifference { using W = typename NodeMaskType::Word; struct A {inline void operator()(W &tC, const W& sC, const W& sV, const W& tV) const { tC = (tC & (sC | ~sV)) | (tV & sC); } }; struct B {inline void operator()(W &tV, const W& sC, const W& sV, const W& tC) const { tV &= ~((tC & sV) | (sC | sV)); } }; TopologyDifference(const OtherInternalNode* source, InternalNode* target, const ValueType& background) : s(source), t(target), b(background) { //(*this)(tbb::blocked_range<Index>(0, NUM_VALUES));//single thread for debugging tbb::parallel_for(tbb::blocked_range<Index>(0, NUM_VALUES), *this); // Bit processing is done in a single thread! const NodeMaskType oldChildMask(t->mChildMask);//important to avoid cross pollution A op1; t->mChildMask.foreach(s->mChildMask, s->mValueMask, t->mValueMask, op1); B op2; t->mValueMask.foreach(t->mChildMask, s->mValueMask, oldChildMask, op2); assert((t->mValueMask & t->mChildMask).isOff());//no overlapping active tiles or child nodes } void operator()(const tbb::blocked_range<Index> &r) const { for (Index i = r.begin(), end=r.end(); i!=end; ++i) { if (t->mChildMask.isOn(i)) {// Loop over this node's child nodes ChildT* child = t->mNodes[i].getChild(); if (s->mChildMask.isOn(i)) { child->topologyDifference(*(s->mNodes[i].getChild()), b); } else if (s->mValueMask.isOn(i)) { delete child;//convert child to an inactive tile t->mNodes[i].setValue(b); } } else if (t->mValueMask.isOn(i)) {//this is an active tile if (s->mChildMask.isOn(i)) { const typename OtherInternalNode::ChildNodeType& other = *(s->mNodes[i].getChild()); ChildT* child = new ChildT(other.origin(), t->mNodes[i].getValue(), true); child->topologyDifference(other, b); t->mNodes[i].setChild(child);//replace the active tile with a child branch } } } } const OtherInternalNode* s; InternalNode* t; const ValueType& b; };// TopologyDifference template<typename ChildT, Index Log2Dim> template<typename OtherChildT> inline void InternalNode<ChildT, Log2Dim>::topologyDifference(const InternalNode<OtherChildT, Log2Dim>& other, const ValueType& background) { TopologyDifference<InternalNode<OtherChildT, Log2Dim> > tmp(&other, this, background); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename CombineOp> inline void InternalNode<ChildT, Log2Dim>::combine(InternalNode& other, CombineOp& op) { const ValueType zero = zeroVal<ValueType>(); CombineArgs<ValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOff(i) && other.isChildMaskOff(i)) { // Both this node and the other node have constant values (tiles). // Combine the two values and store the result as this node's new tile value. op(args.setARef(mNodes[i].getValue()) .setAIsActive(isValueMaskOn(i)) .setBRef(other.mNodes[i].getValue()) .setBIsActive(other.isValueMaskOn(i))); mNodes[i].setValue(args.result()); mValueMask.set(i, args.resultIsActive()); } else if (this->isChildMaskOn(i) && other.isChildMaskOff(i)) { // Combine this node's child with the other node's constant value. ChildNodeType* child = mNodes[i].getChild(); assert(child); if (child) { child->combine(other.mNodes[i].getValue(), other.isValueMaskOn(i), op); } } else if (this->isChildMaskOff(i) && other.isChildMaskOn(i)) { // Combine this node's constant value with the other node's child. ChildNodeType* child = other.mNodes[i].getChild(); assert(child); if (child) { // Combine this node's constant value with the other node's child, // but use a new functor in which the A and B values are swapped, // since the constant value is the A value, not the B value. SwappedCombineOp<ValueType, CombineOp> swappedOp(op); child->combine(mNodes[i].getValue(), isValueMaskOn(i), swappedOp); // Steal the other node's child. other.mChildMask.setOff(i); other.mNodes[i].setValue(zero); this->setChildNode(i, child); } } else /*if (isChildMaskOn(i) && other.isChildMaskOn(i))*/ { // Combine this node's child with the other node's child. ChildNodeType *child = mNodes[i].getChild(), *otherChild = other.mNodes[i].getChild(); assert(child); assert(otherChild); if (child && otherChild) { child->combine(*otherChild, op); } } } } template<typename ChildT, Index Log2Dim> template<typename CombineOp> inline void InternalNode<ChildT, Log2Dim>::combine(const ValueType& value, bool valueIsActive, CombineOp& op) { CombineArgs<ValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOff(i)) { // Combine this node's constant value with the given constant value. op(args.setARef(mNodes[i].getValue()) .setAIsActive(isValueMaskOn(i)) .setBRef(value) .setBIsActive(valueIsActive)); mNodes[i].setValue(args.result()); mValueMask.set(i, args.resultIsActive()); } else /*if (isChildMaskOn(i))*/ { // Combine this node's child with the given constant value. ChildNodeType* child = mNodes[i].getChild(); assert(child); if (child) child->combine(value, valueIsActive, op); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename CombineOp, typename OtherNodeType> inline void InternalNode<ChildT, Log2Dim>::combine2(const InternalNode& other0, const OtherNodeType& other1, CombineOp& op) { CombineArgs<ValueType, typename OtherNodeType::ValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (other0.isChildMaskOff(i) && other1.isChildMaskOff(i)) { op(args.setARef(other0.mNodes[i].getValue()) .setAIsActive(other0.isValueMaskOn(i)) .setBRef(other1.mNodes[i].getValue()) .setBIsActive(other1.isValueMaskOn(i))); // Replace child i with a constant value. this->makeChildNodeEmpty(i, args.result()); mValueMask.set(i, args.resultIsActive()); } else { if (this->isChildMaskOff(i)) { // Add a new child with the same coordinates, etc. as the other node's child. const Coord& childOrigin = other0.isChildMaskOn(i) ? other0.mNodes[i].getChild()->origin() : other1.mNodes[i].getChild()->origin(); this->setChildNode(i, new ChildNodeType(childOrigin, mNodes[i].getValue())); } if (other0.isChildMaskOff(i)) { // Combine node1's child with node0's constant value // and write the result into child i. mNodes[i].getChild()->combine2(other0.mNodes[i].getValue(), *other1.mNodes[i].getChild(), other0.isValueMaskOn(i), op); } else if (other1.isChildMaskOff(i)) { // Combine node0's child with node1's constant value // and write the result into child i. mNodes[i].getChild()->combine2(*other0.mNodes[i].getChild(), other1.mNodes[i].getValue(), other1.isValueMaskOn(i), op); } else { // Combine node0's child with node1's child // and write the result into child i. mNodes[i].getChild()->combine2(*other0.mNodes[i].getChild(), *other1.mNodes[i].getChild(), op); } } } } template<typename ChildT, Index Log2Dim> template<typename CombineOp, typename OtherNodeType> inline void InternalNode<ChildT, Log2Dim>::combine2(const ValueType& value, const OtherNodeType& other, bool valueIsActive, CombineOp& op) { CombineArgs<ValueType, typename OtherNodeType::ValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (other.isChildMaskOff(i)) { op(args.setARef(value) .setAIsActive(valueIsActive) .setBRef(other.mNodes[i].getValue()) .setBIsActive(other.isValueMaskOn(i))); // Replace child i with a constant value. this->makeChildNodeEmpty(i, args.result()); mValueMask.set(i, args.resultIsActive()); } else { typename OtherNodeType::ChildNodeType* otherChild = other.mNodes[i].getChild(); assert(otherChild); if (this->isChildMaskOff(i)) { // Add a new child with the same coordinates, etc. // as the other node's child. this->setChildNode(i, new ChildNodeType(*otherChild)); } // Combine the other node's child with a constant value // and write the result into child i. mNodes[i].getChild()->combine2(value, *otherChild, valueIsActive, op); } } } template<typename ChildT, Index Log2Dim> template<typename CombineOp, typename OtherValueType> inline void InternalNode<ChildT, Log2Dim>::combine2(const InternalNode& other, const OtherValueType& value, bool valueIsActive, CombineOp& op) { CombineArgs<ValueType, OtherValueType> args; for (Index i = 0; i < NUM_VALUES; ++i) { if (other.isChildMaskOff(i)) { op(args.setARef(other.mNodes[i].getValue()) .setAIsActive(other.isValueMaskOn(i)) .setBRef(value) .setBIsActive(valueIsActive)); // Replace child i with a constant value. this->makeChildNodeEmpty(i, args.result()); mValueMask.set(i, args.resultIsActive()); } else { ChildNodeType* otherChild = other.mNodes[i].getChild(); assert(otherChild); if (this->isChildMaskOff(i)) { // Add a new child with the same coordinates, etc. as the other node's child. this->setChildNode(i, new ChildNodeType(otherChild->origin(), mNodes[i].getValue())); } // Combine the other node's child with a constant value // and write the result into child i. mNodes[i].getChild()->combine2(*otherChild, value, valueIsActive, op); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename BBoxOp> inline void InternalNode<ChildT, Log2Dim>::visitActiveBBox(BBoxOp& op) const { for (ValueOnCIter i = this->cbeginValueOn(); i; ++i) { op.template operator()<LEVEL>(CoordBBox::createCube(i.getCoord(), ChildNodeType::DIM)); } if (op.template descent<LEVEL>()) { for (ChildOnCIter i = this->cbeginChildOn(); i; ++i) i->visitActiveBBox(op); } else { for (ChildOnCIter i = this->cbeginChildOn(); i; ++i) { op.template operator()<LEVEL>(i->getNodeBoundingBox()); } } } template<typename ChildT, Index Log2Dim> template<typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit(VisitorOp& op) { doVisit<InternalNode, VisitorOp, ChildAllIter>(*this, op); } template<typename ChildT, Index Log2Dim> template<typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit(VisitorOp& op) const { doVisit<const InternalNode, VisitorOp, ChildAllCIter>(*this, op); } template<typename ChildT, Index Log2Dim> template<typename NodeT, typename VisitorOp, typename ChildAllIterT> inline void InternalNode<ChildT, Log2Dim>::doVisit(NodeT& self, VisitorOp& op) { typename NodeT::ValueType val; for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { if (op(iter)) continue; if (typename ChildAllIterT::ChildNodeType* child = iter.probeChild(val)) { child->visit(op); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename OtherNodeType, typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit2Node(OtherNodeType& other, VisitorOp& op) { doVisit2Node<InternalNode, OtherNodeType, VisitorOp, ChildAllIter, typename OtherNodeType::ChildAllIter>(*this, other, op); } template<typename ChildT, Index Log2Dim> template<typename OtherNodeType, typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit2Node(OtherNodeType& other, VisitorOp& op) const { doVisit2Node<const InternalNode, OtherNodeType, VisitorOp, ChildAllCIter, typename OtherNodeType::ChildAllCIter>(*this, other, op); } template<typename ChildT, Index Log2Dim> template< typename NodeT, typename OtherNodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void InternalNode<ChildT, Log2Dim>::doVisit2Node(NodeT& self, OtherNodeT& other, VisitorOp& op) { // Allow the two nodes to have different ValueTypes, but not different dimensions. static_assert(OtherNodeT::NUM_VALUES == NodeT::NUM_VALUES, "visit2() requires nodes to have the same dimensions"); static_assert(OtherNodeT::LEVEL == NodeT::LEVEL, "visit2() requires nodes to be at the same tree level"); typename NodeT::ValueType val; typename OtherNodeT::ValueType otherVal; ChildAllIterT iter = self.beginChildAll(); OtherChildAllIterT otherIter = other.beginChildAll(); for ( ; iter && otherIter; ++iter, ++otherIter) { const size_t skipBranch = static_cast<size_t>(op(iter, otherIter)); typename ChildAllIterT::ChildNodeType* child = (skipBranch & 1U) ? nullptr : iter.probeChild(val); typename OtherChildAllIterT::ChildNodeType* otherChild = (skipBranch & 2U) ? nullptr : otherIter.probeChild(otherVal); if (child != nullptr && otherChild != nullptr) { child->visit2Node(*otherChild, op); } else if (child != nullptr) { child->visit2(otherIter, op); } else if (otherChild != nullptr) { otherChild->visit2(iter, op, /*otherIsLHS=*/true); } } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename OtherChildAllIterType, typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit2(OtherChildAllIterType& otherIter, VisitorOp& op, bool otherIsLHS) { doVisit2<InternalNode, VisitorOp, ChildAllIter, OtherChildAllIterType>( *this, otherIter, op, otherIsLHS); } template<typename ChildT, Index Log2Dim> template<typename OtherChildAllIterType, typename VisitorOp> inline void InternalNode<ChildT, Log2Dim>::visit2(OtherChildAllIterType& otherIter, VisitorOp& op, bool otherIsLHS) const { doVisit2<const InternalNode, VisitorOp, ChildAllCIter, OtherChildAllIterType>( *this, otherIter, op, otherIsLHS); } template<typename ChildT, Index Log2Dim> template<typename NodeT, typename VisitorOp, typename ChildAllIterT, typename OtherChildAllIterT> inline void InternalNode<ChildT, Log2Dim>::doVisit2(NodeT& self, OtherChildAllIterT& otherIter, VisitorOp& op, bool otherIsLHS) { if (!otherIter) return; const size_t skipBitMask = (otherIsLHS ? 2U : 1U); typename NodeT::ValueType val; for (ChildAllIterT iter = self.beginChildAll(); iter; ++iter) { const size_t skipBranch = static_cast<size_t>( otherIsLHS ? op(otherIter, iter) : op(iter, otherIter)); typename ChildAllIterT::ChildNodeType* child = (skipBranch & skipBitMask) ? nullptr : iter.probeChild(val); if (child != nullptr) child->visit2(otherIter, op, otherIsLHS); } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::writeBuffers(std::ostream& os, bool toHalf) const { for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { iter->writeBuffers(os, toHalf); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::readBuffers(std::istream& is, bool fromHalf) { for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { iter->readBuffers(is, fromHalf); } } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::readBuffers(std::istream& is, const CoordBBox& clipBBox, bool fromHalf) { for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { // Stream in the branch rooted at this child. // (We can't skip over children that lie outside the clipping region, // because buffers are serialized in depth-first order and need to be // unserialized in the same order.) iter->readBuffers(is, clipBBox, fromHalf); } // Get this tree's background value. ValueType background = zeroVal<ValueType>(); if (const void* bgPtr = io::getGridBackgroundValuePtr(is)) { background = *static_cast<const ValueType*>(bgPtr); } this->clip(clipBBox, background); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> void InternalNode<ChildT, Log2Dim>::getNodeLog2Dims(std::vector<Index>& dims) { dims.push_back(Log2Dim); ChildNodeType::getNodeLog2Dims(dims); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::offsetToLocalCoord(Index n, Coord &xyz) { assert(n<(1<<3*Log2Dim)); xyz.setX(n >> 2*Log2Dim); n &= ((1<<2*Log2Dim)-1); xyz.setY(n >> Log2Dim); xyz.setZ(n & ((1<<Log2Dim)-1)); } template<typename ChildT, Index Log2Dim> inline Index InternalNode<ChildT, Log2Dim>::coordToOffset(const Coord& xyz) { return (((xyz[0] & (DIM-1u)) >> ChildNodeType::TOTAL) << 2*Log2Dim) + (((xyz[1] & (DIM-1u)) >> ChildNodeType::TOTAL) << Log2Dim) + ((xyz[2] & (DIM-1u)) >> ChildNodeType::TOTAL); } template<typename ChildT, Index Log2Dim> inline Coord InternalNode<ChildT, Log2Dim>::offsetToGlobalCoord(Index n) const { Coord local; this->offsetToLocalCoord(n, local); local <<= ChildT::TOTAL; return local + this->origin(); } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename ArrayT> inline void InternalNode<ChildT, Log2Dim>::getNodes(ArrayT& array) { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to getNodes() must be a pointer array"); using ArrayChildT = typename std::conditional< std::is_const<typename std::remove_pointer<T>::type>::value, const ChildT, ChildT>::type; for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<T, ArrayChildT*>::value) { array.push_back(reinterpret_cast<T>(mNodes[iter.pos()].getChild())); } else { iter->getNodes(array);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } template<typename ChildT, Index Log2Dim> template<typename ArrayT> inline void InternalNode<ChildT, Log2Dim>::getNodes(ArrayT& array) const { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to getNodes() must be a pointer array"); static_assert(std::is_const<typename std::remove_pointer<T>::type>::value, "argument to getNodes() must be an array of const node pointers"); for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (std::is_same<T, const ChildT*>::value) { array.push_back(reinterpret_cast<T>(mNodes[iter.pos()].getChild())); } else { iter->getNodes(array);//descent } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> template<typename ArrayT> inline void InternalNode<ChildT, Log2Dim>::stealNodes(ArrayT& array, const ValueType& value, bool state) { using T = typename ArrayT::value_type; static_assert(std::is_pointer<T>::value, "argument to stealNodes() must be a pointer array"); using ArrayChildT = typename std::conditional< std::is_const<typename std::remove_pointer<T>::type>::value, const ChildT, ChildT>::type; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN for (ChildOnIter iter = this->beginChildOn(); iter; ++iter) { const Index n = iter.pos(); if (std::is_same<T, ArrayChildT*>::value) { array.push_back(reinterpret_cast<T>(mNodes[n].getChild())); mValueMask.set(n, state); mNodes[n].setValue(value); } else { iter->stealNodes(array, value, state);//descent } } if (std::is_same<T, ArrayChildT*>::value) mChildMask.setOff(); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //////////////////////////////////////// template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::resetBackground(const ValueType& oldBackground, const ValueType& newBackground) { if (math::isExactlyEqual(oldBackground, newBackground)) return; for (Index i = 0; i < NUM_VALUES; ++i) { if (this->isChildMaskOn(i)) { mNodes[i].getChild()->resetBackground(oldBackground, newBackground); } else if (this->isValueMaskOff(i)) { if (math::isApproxEqual(mNodes[i].getValue(), oldBackground)) { mNodes[i].setValue(newBackground); } else if (math::isApproxEqual(mNodes[i].getValue(), math::negative(oldBackground))) { mNodes[i].setValue(math::negative(newBackground)); } } } } template<typename ChildT, Index Log2Dim> template<typename OtherChildNodeType, Index OtherLog2Dim> inline bool InternalNode<ChildT, Log2Dim>::hasSameTopology( const InternalNode<OtherChildNodeType, OtherLog2Dim>* other) const { if (Log2Dim != OtherLog2Dim || mChildMask != other->mChildMask || mValueMask != other->mValueMask) return false; for (ChildOnCIter iter = this->cbeginChildOn(); iter; ++iter) { if (!iter->hasSameTopology(other->mNodes[iter.pos()].getChild())) return false; } return true; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::resetChildNode(Index i, ChildNodeType* child) { assert(child); if (this->isChildMaskOn(i)) { delete mNodes[i].getChild(); } else { mChildMask.setOn(i); mValueMask.setOff(i); } mNodes[i].setChild(child); } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::setChildNode(Index i, ChildNodeType* child) { assert(child); assert(mChildMask.isOff(i)); mChildMask.setOn(i); mValueMask.setOff(i); mNodes[i].setChild(child); } template<typename ChildT, Index Log2Dim> inline ChildT* InternalNode<ChildT, Log2Dim>::unsetChildNode(Index i, const ValueType& value) { if (this->isChildMaskOff(i)) { mNodes[i].setValue(value); return nullptr; } ChildNodeType* child = mNodes[i].getChild(); mChildMask.setOff(i); mNodes[i].setValue(value); return child; } template<typename ChildT, Index Log2Dim> inline void InternalNode<ChildT, Log2Dim>::makeChildNodeEmpty(Index n, const ValueType& value) { delete this->unsetChildNode(n, value); } template<typename ChildT, Index Log2Dim> inline ChildT* InternalNode<ChildT, Log2Dim>::getChildNode(Index n) { assert(this->isChildMaskOn(n)); return mNodes[n].getChild(); } template<typename ChildT, Index Log2Dim> inline const ChildT* InternalNode<ChildT, Log2Dim>::getChildNode(Index n) const { assert(this->isChildMaskOn(n)); return mNodes[n].getChild(); } } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_INTERNALNODE_HAS_BEEN_INCLUDED
129,215
C
38.239599
100
0.640529
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/tree/TreeIterator.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file tree/TreeIterator.h #ifndef OPENVDB_TREE_TREEITERATOR_HAS_BEEN_INCLUDED #define OPENVDB_TREE_TREEITERATOR_HAS_BEEN_INCLUDED #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <openvdb/version.h> #include <openvdb/Types.h> #include <algorithm> #include <sstream> #include <string> #include <type_traits> // Prior to 0.96.1, depth-bounded value iterators always descended to the leaf level // and iterated past leaf nodes. Now, they never descend past the maximum depth. // Comment out the following line to restore the older, less-efficient behavior: #define ENABLE_TREE_VALUE_DEPTH_BOUND_OPTIMIZATION namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace tree { namespace iter { template<typename HeadT, int HeadLevel> struct InvertedTree { using SubtreeT = typename InvertedTree<typename HeadT::ChildNodeType, HeadLevel-1>::Type; using Type = typename SubtreeT::template Append<HeadT>; }; template<typename HeadT> struct InvertedTree<HeadT, /*HeadLevel=*/1> { using Type = TypeList<typename HeadT::ChildNodeType, HeadT>; }; } // namespace iter //////////////////////////////////////// /// IterTraits provides the following for iterators of the standard types, /// i.e., for {Child,Value}{On,Off,All}{Iter,CIter}: /// - a NodeConverter template to convert an iterator for one type of node /// to an iterator of the same type for another type of node; for example, /// IterTraits<RootNode, RootNode::ValueOnIter>::NodeConverter<LeafNode>::Type /// is synonymous with LeafNode::ValueOnIter. /// - a begin(node) function that returns a begin iterator for a node of arbitrary type; /// for example, IterTraits<LeafNode, LeafNode::ValueOnIter>::begin(leaf) returns /// leaf.beginValueOn() /// - a getChild() function that returns a pointer to the child node to which the iterator /// is currently pointing (always null if the iterator is a Value iterator) template<typename NodeT, typename IterT> struct IterTraits { template<typename ChildT> static ChildT* getChild(const IterT&) { return nullptr; } }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildOnIter> { using IterT = typename NodeT::ChildOnIter; static IterT begin(NodeT& node) { return node.beginChildOn(); } template<typename ChildT> static ChildT* getChild(const IterT& iter) { return &iter.getValue(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildOnIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildOnCIter> { using IterT = typename NodeT::ChildOnCIter; static IterT begin(const NodeT& node) { return node.cbeginChildOn(); } template<typename ChildT> static const ChildT* getChild(const IterT& iter) { return &iter.getValue(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildOnCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildOffIter> { using IterT = typename NodeT::ChildOffIter; static IterT begin(NodeT& node) { return node.beginChildOff(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildOffIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildOffCIter> { using IterT = typename NodeT::ChildOffCIter; static IterT begin(const NodeT& node) { return node.cbeginChildOff(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildOffCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildAllIter> { using IterT = typename NodeT::ChildAllIter; static IterT begin(NodeT& node) { return node.beginChildAll(); } template<typename ChildT> static ChildT* getChild(const IterT& iter) { typename IterT::NonConstValueType val; return iter.probeChild(val); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildAllIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ChildAllCIter> { using IterT = typename NodeT::ChildAllCIter; static IterT begin(const NodeT& node) { return node.cbeginChildAll(); } template<typename ChildT> static ChildT* getChild(const IterT& iter) { typename IterT::NonConstValueType val; return iter.probeChild(val); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ChildAllCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueOnIter> { using IterT = typename NodeT::ValueOnIter; static IterT begin(NodeT& node) { return node.beginValueOn(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueOnIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueOnCIter> { using IterT = typename NodeT::ValueOnCIter; static IterT begin(const NodeT& node) { return node.cbeginValueOn(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueOnCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueOffIter> { using IterT = typename NodeT::ValueOffIter; static IterT begin(NodeT& node) { return node.beginValueOff(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueOffIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueOffCIter> { using IterT = typename NodeT::ValueOffCIter; static IterT begin(const NodeT& node) { return node.cbeginValueOff(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueOffCIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueAllIter> { using IterT = typename NodeT::ValueAllIter; static IterT begin(NodeT& node) { return node.beginValueAll(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueAllIter; }; }; template<typename NodeT> struct IterTraits<NodeT, typename NodeT::ValueAllCIter> { using IterT = typename NodeT::ValueAllCIter; static IterT begin(const NodeT& node) { return node.cbeginValueAll(); } template<typename OtherNodeT> struct NodeConverter { using Type = typename OtherNodeT::ValueAllCIter; }; }; //////////////////////////////////////// /// @brief An IterListItem is an element of a compile-time linked list of iterators /// to nodes of different types. /// /// The list is constructed by traversing the template hierarchy of a Tree in reverse order, /// so typically the elements will be a LeafNode iterator of some type (e.g., ValueOnCIter), /// followed by one or more InternalNode iterators of the same type, followed by a RootNode /// iterator of the same type. /// /// The length of the list is fixed at compile time, and because it is implemented using /// nested, templated classes, much of the list traversal logic can be optimized away. template<typename PrevItemT, typename NodeVecT, size_t VecSize, Index _Level> class IterListItem { public: /// The type of iterator stored in the previous list item using PrevIterT = typename PrevItemT::IterT; /// The type of node (non-const) whose iterator is stored in this list item using _NodeT = typename NodeVecT::Front; /// The type of iterator stored in this list item (e.g., InternalNode::ValueOnCIter) using IterT = typename IterTraits<typename PrevIterT::NonConstNodeType, PrevIterT>::template NodeConverter<_NodeT>::Type; /// The type of node (const or non-const) over which IterT iterates (e.g., const RootNode<...>) using NodeT = typename IterT::NodeType; /// The type of the node with const qualifiers removed ("Non-Const") using NCNodeT = typename IterT::NonConstNodeType; /// The type of value (with const qualifiers removed) to which the iterator points using NCValueT = typename IterT::NonConstValueType; /// NodeT's child node type, with the same constness (e.g., const InternalNode<...>) using ChildT = typename CopyConstness<NodeT, typename NodeT::ChildNodeType>::Type; /// NodeT's child node type with const qualifiers removed using NCChildT = typename CopyConstness<NCNodeT, typename NCNodeT::ChildNodeType>::Type; using ITraits = IterTraits<NCNodeT, IterT>; /// NodeT's level in its tree (0 = LeafNode) static const Index Level = _Level; IterListItem(PrevItemT* prev): mNext(this), mPrev(prev) {} IterListItem(const IterListItem& other): mIter(other.mIter), mNext(other.mNext), mPrev(nullptr) {} IterListItem& operator=(const IterListItem& other) { if (&other != this) { mIter = other.mIter; mNext = other.mNext; mPrev = nullptr; ///< @note external call to updateBackPointers() required } return *this; } void updateBackPointers(PrevItemT* prev) { mPrev = prev; mNext.updateBackPointers(this); } void setIter(const IterT& iter) { mIter = iter; } template<typename OtherIterT> void setIter(const OtherIterT& iter) { mNext.setIter(iter); } /// Return the node over which this list element's iterator iterates. void getNode(Index lvl, NodeT*& node) const { node = (lvl <= Level) ? mIter.getParentNode() : nullptr; } /// Return the node over which one of the following list elements' iterator iterates. template<typename OtherNodeT> void getNode(Index lvl, OtherNodeT*& node) const { mNext.getNode(lvl, node); } /// @brief Initialize the iterator for level @a lvl of the tree with the node /// over which the corresponding iterator of @a otherListItem is iterating. /// /// For example, if @a otherListItem contains a LeafNode::ValueOnIter, /// initialize this list's leaf iterator with the same LeafNode. template<typename OtherIterListItemT> void initLevel(Index lvl, OtherIterListItemT& otherListItem) { if (lvl == Level) { const NodeT* node = nullptr; otherListItem.getNode(lvl, node); mIter = (node == nullptr) ? IterT() : ITraits::begin(*const_cast<NodeT*>(node)); } else { // Forward to one of the following list elements. mNext.initLevel(lvl, otherListItem); } } /// Return The table offset of the iterator at level @a lvl of the tree. Index pos(Index lvl) const { return (lvl == Level) ? mIter.pos() : mNext.pos(lvl); } /// Return @c true if the iterator at level @a lvl of the tree has not yet reached its end. bool test(Index lvl) const { return (lvl == Level) ? mIter.test() : mNext.test(lvl); } /// Increment the iterator at level @a lvl of the tree. bool next(Index lvl) { return (lvl == Level) ? mIter.next() : mNext.next(lvl); } /// @brief If the iterator at level @a lvl of the tree points to a child node, /// initialize the next iterator in this list with that child node. bool down(Index lvl) { if (lvl == Level && mPrev != nullptr && mIter) { if (ChildT* child = ITraits::template getChild<ChildT>(mIter)) { mPrev->setIter(PrevItemT::ITraits::begin(*child)); return true; } } return (lvl > Level) ? mNext.down(lvl) : false; } /// @brief Return the global coordinates of the voxel or tile to which the iterator /// at level @a lvl of the tree is currently pointing. Coord getCoord(Index lvl) const { return (lvl == Level) ? mIter.getCoord() : mNext.getCoord(lvl); } Index getChildDim(Index lvl) const { return (lvl == Level) ? NodeT::getChildDim() : mNext.getChildDim(lvl); } /// Return the number of (virtual) voxels spanned by a tile value or child node Index64 getVoxelCount(Index lvl) const { return (lvl == Level) ? ChildT::NUM_VOXELS : mNext.getVoxelCount(lvl); } /// Return @c true if the iterator at level @a lvl of the tree points to an active value. bool isValueOn(Index lvl) const { return (lvl == Level) ? mIter.isValueOn() : mNext.isValueOn(lvl); } /// Return the value to which the iterator at level @a lvl of the tree points. const NCValueT& getValue(Index lvl) const { if (lvl == Level) return mIter.getValue(); return mNext.getValue(lvl); } /// @brief Set the value (to @a val) to which the iterator at level @a lvl /// of the tree points and mark the value as active. /// @note Not valid when @c IterT is a const iterator type void setValue(Index lvl, const NCValueT& val) const { if (lvl == Level) mIter.setValue(val); else mNext.setValue(lvl, val); } /// @brief Set the value (to @a val) to which the iterator at level @a lvl of the tree /// points and mark the value as active if @a on is @c true, or inactive otherwise. /// @note Not valid when @c IterT is a const iterator type void setValueOn(Index lvl, bool on = true) const { if (lvl == Level) mIter.setValueOn(on); else mNext.setValueOn(lvl, on); } /// @brief Mark the value to which the iterator at level @a lvl of the tree points /// as inactive. /// @note Not valid when @c IterT is a const iterator type void setValueOff(Index lvl) const { if (lvl == Level) mIter.setValueOff(); else mNext.setValueOff(lvl); } /// @brief Apply a functor to the item to which this iterator is pointing. /// @note Not valid when @c IterT is a const iterator type template<typename ModifyOp> void modifyValue(Index lvl, const ModifyOp& op) const { if (lvl == Level) mIter.modifyValue(op); else mNext.modifyValue(lvl, op); } private: using RestT = typename NodeVecT::PopFront; // NodeVecT minus its first item using NextItem = IterListItem<IterListItem, RestT, VecSize - 1, Level + 1>; IterT mIter; NextItem mNext; PrevItemT* mPrev; }; /// The initial element of a compile-time linked list of iterators to nodes of different types template<typename PrevItemT, typename NodeVecT, size_t VecSize> class IterListItem<PrevItemT, NodeVecT, VecSize, /*Level=*/0U> { public: /// The type of iterator stored in the previous list item using PrevIterT = typename PrevItemT::IterT; /// The type of node (non-const) whose iterator is stored in this list item using _NodeT = typename NodeVecT::Front; /// The type of iterator stored in this list item (e.g., InternalNode::ValueOnCIter) using IterT = typename IterTraits<typename PrevIterT::NonConstNodeType, PrevIterT>::template NodeConverter<_NodeT>::Type; /// The type of node (const or non-const) over which IterT iterates (e.g., const RootNode<...>) using NodeT = typename IterT::NodeType; /// The type of the node with const qualifiers removed ("Non-Const") using NCNodeT = typename IterT::NonConstNodeType; /// The type of value (with const qualifiers removed) to which the iterator points using NCValueT = typename IterT::NonConstValueType; using ITraits = IterTraits<NCNodeT, IterT>; /// NodeT's level in its tree (0 = LeafNode) static const Index Level = 0; IterListItem(PrevItemT*): mNext(this), mPrev(nullptr) {} IterListItem(const IterListItem& other): mIter(other.mIter), mNext(other.mNext), mPrev(nullptr) {} IterListItem& operator=(const IterListItem& other) { if (&other != this) { mIter = other.mIter; mNext = other.mNext; mPrev = nullptr; } return *this; } void updateBackPointers(PrevItemT* = nullptr) { mPrev = nullptr; mNext.updateBackPointers(this); } void setIter(const IterT& iter) { mIter = iter; } template<typename OtherIterT> void setIter(const OtherIterT& iter) { mNext.setIter(iter); } void getNode(Index lvl, NodeT*& node) const { node = (lvl == 0) ? mIter.getParentNode() : nullptr; } template<typename OtherNodeT> void getNode(Index lvl, OtherNodeT*& node) const { mNext.getNode(lvl, node); } template<typename OtherIterListItemT> void initLevel(Index lvl, OtherIterListItemT& otherListItem) { if (lvl == 0) { const NodeT* node = nullptr; otherListItem.getNode(lvl, node); mIter = (node == nullptr) ? IterT() : ITraits::begin(*const_cast<NodeT*>(node)); } else { mNext.initLevel(lvl, otherListItem); } } Index pos(Index lvl) const { return (lvl == 0) ? mIter.pos() : mNext.pos(lvl); } bool test(Index lvl) const { return (lvl == 0) ? mIter.test() : mNext.test(lvl); } bool next(Index lvl) { return (lvl == 0) ? mIter.next() : mNext.next(lvl); } bool down(Index lvl) { return (lvl == 0) ? false : mNext.down(lvl); } Coord getCoord(Index lvl) const { return (lvl == 0) ? mIter.getCoord() : mNext.getCoord(lvl); } Index getChildDim(Index lvl) const { return (lvl == 0) ? NodeT::getChildDim() : mNext.getChildDim(lvl); } Index64 getVoxelCount(Index lvl) const { return (lvl == 0) ? 1 : mNext.getVoxelCount(lvl); } bool isValueOn(Index lvl) const { return (lvl == 0) ? mIter.isValueOn() : mNext.isValueOn(lvl); } const NCValueT& getValue(Index lvl) const { if (lvl == 0) return mIter.getValue(); return mNext.getValue(lvl); } void setValue(Index lvl, const NCValueT& val) const { if (lvl == 0) mIter.setValue(val); else mNext.setValue(lvl, val); } void setValueOn(Index lvl, bool on = true) const { if (lvl == 0) mIter.setValueOn(on); else mNext.setValueOn(lvl, on); } void setValueOff(Index lvl) const { if (lvl == 0) mIter.setValueOff(); else mNext.setValueOff(lvl); } template<typename ModifyOp> void modifyValue(Index lvl, const ModifyOp& op) const { if (lvl == 0) mIter.modifyValue(op); else mNext.modifyValue(lvl, op); } private: using RestT = typename NodeVecT::PopFront; // NodeVecT minus its first item using NextItem = IterListItem<IterListItem, RestT, VecSize - 1, /*Level=*/1>; IterT mIter; NextItem mNext; PrevItemT* mPrev; }; /// The final element of a compile-time linked list of iterators to nodes of different types template<typename PrevItemT, typename NodeVecT, Index _Level> class IterListItem<PrevItemT, NodeVecT, /*VecSize=*/1, _Level> { public: using _NodeT = typename NodeVecT::Front; /// The type of iterator stored in the previous list item using PrevIterT = typename PrevItemT::IterT; /// The type of iterator stored in this list item (e.g., RootNode::ValueOnCIter) using IterT = typename IterTraits<typename PrevIterT::NonConstNodeType, PrevIterT>::template NodeConverter<_NodeT>::Type; /// The type of node over which IterT iterates (e.g., const RootNode<...>) using NodeT = typename IterT::NodeType; /// The type of the node with const qualifiers removed ("Non-Const") using NCNodeT = typename IterT::NonConstNodeType; /// The type of value (with const qualifiers removed) to which the iterator points using NCValueT = typename IterT::NonConstValueType; /// NodeT's child node type, with the same constness (e.g., const InternalNode<...>) using ChildT = typename CopyConstness<NodeT, typename NodeT::ChildNodeType>::Type; /// NodeT's child node type with const qualifiers removed using NCChildT = typename CopyConstness<NCNodeT, typename NCNodeT::ChildNodeType>::Type; using ITraits = IterTraits<NCNodeT, IterT>; /// NodeT's level in its tree (0 = LeafNode) static const Index Level = _Level; IterListItem(PrevItemT* prev): mPrev(prev) {} IterListItem(const IterListItem& other): mIter(other.mIter), mPrev(nullptr) {} IterListItem& operator=(const IterListItem& other) { if (&other != this) { mIter = other.mIter; mPrev = nullptr; ///< @note external call to updateBackPointers() required } return *this; } void updateBackPointers(PrevItemT* prev) { mPrev = prev; } // The following method specializations differ from the default template // implementations mainly in that they don't forward. void setIter(const IterT& iter) { mIter = iter; } void getNode(Index lvl, NodeT*& node) const { node = (lvl <= Level) ? mIter.getParentNode() : nullptr; } template<typename OtherIterListItemT> void initLevel(Index lvl, OtherIterListItemT& otherListItem) { if (lvl == Level) { const NodeT* node = nullptr; otherListItem.getNode(lvl, node); mIter = (node == nullptr) ? IterT() : ITraits::begin(*const_cast<NodeT*>(node)); } } Index pos(Index lvl) const { return (lvl == Level) ? mIter.pos() : Index(-1); } bool test(Index lvl) const { return (lvl == Level) ? mIter.test() : false; } bool next(Index lvl) { return (lvl == Level) ? mIter.next() : false; } bool down(Index lvl) { if (lvl == Level && mPrev != nullptr && mIter) { if (ChildT* child = ITraits::template getChild<ChildT>(mIter)) { mPrev->setIter(PrevItemT::ITraits::begin(*child)); return true; } } return false; } Coord getCoord(Index lvl) const { return (lvl == Level) ? mIter.getCoord() : Coord(); } Index getChildDim(Index lvl) const { return (lvl == Level) ? NodeT::getChildDim() : 0; } Index64 getVoxelCount(Index lvl) const { return (lvl == Level) ? ChildT::NUM_VOXELS : 0; } bool isValueOn(Index lvl) const { return (lvl == Level) ? mIter.isValueOn() : false; } const NCValueT& getValue(Index lvl) const { assert(lvl == Level); (void)lvl; // avoid unused variable warning in optimized builds return mIter.getValue(); } void setValue(Index lvl, const NCValueT& val) const { if (lvl == Level) mIter.setValue(val); } void setValueOn(Index lvl, bool on = true) const { if (lvl == Level) mIter.setValueOn(on); } void setValueOff(Index lvl) const { if (lvl == Level) mIter.setValueOff(); } template<typename ModifyOp> void modifyValue(Index lvl, const ModifyOp& op) const { if (lvl == Level) mIter.modifyValue(op); } private: IterT mIter; PrevItemT* mPrev; }; //////////////////////////////////////// //#define DEBUG_TREE_VALUE_ITERATOR /// @brief Base class for tree-traversal iterators over tile and voxel values template<typename _TreeT, typename _ValueIterT> class TreeValueIteratorBase { public: using TreeT = _TreeT; using ValueIterT = _ValueIterT; using NodeT = typename ValueIterT::NodeType; using ValueT = typename ValueIterT::NonConstValueType; using ChildOnIterT = typename NodeT::ChildOnCIter; static const Index ROOT_LEVEL = NodeT::LEVEL; static_assert(ValueIterT::NodeType::LEVEL == ROOT_LEVEL, "invalid value iterator node type"); static const Index LEAF_LEVEL = 0, ROOT_DEPTH = 0, LEAF_DEPTH = ROOT_LEVEL; TreeValueIteratorBase(TreeT&); TreeValueIteratorBase(const TreeValueIteratorBase& other); TreeValueIteratorBase& operator=(const TreeValueIteratorBase& other); /// Specify the depth of the highest level of the tree to which to ascend (depth 0 = root). void setMinDepth(Index minDepth); /// Return the depth of the highest level of the tree to which this iterator ascends. Index getMinDepth() const { return ROOT_LEVEL - Index(mMaxLevel); } /// Specify the depth of the lowest level of the tree to which to descend (depth 0 = root). void setMaxDepth(Index maxDepth); /// Return the depth of the lowest level of the tree to which this iterator ascends. Index getMaxDepth() const { return ROOT_LEVEL - Index(mMinLevel); } //@{ /// Return @c true if this iterator is not yet exhausted. bool test() const { return mValueIterList.test(mLevel); } operator bool() const { return this->test(); } //@} /// @brief Advance to the next tile or voxel value. /// Return @c true if this iterator is not yet exhausted. bool next(); /// Advance to the next tile or voxel value. TreeValueIteratorBase& operator++() { this->next(); return *this; } /// @brief Return the level in the tree (0 = leaf) of the node to which /// this iterator is currently pointing. Index getLevel() const { return mLevel; } /// @brief Return the depth in the tree (0 = root) of the node to which /// this iterator is currently pointing. Index getDepth() const { return ROOT_LEVEL - mLevel; } static Index getLeafDepth() { return LEAF_DEPTH; } /// @brief Return in @a node a pointer to the node over which this iterator is /// currently iterating or one of that node's parents, as determined by @a NodeType. /// @return a null pointer if @a NodeType specifies a node at a lower level /// of the tree than that given by getLevel(). template<typename NodeType> void getNode(NodeType*& node) const { mValueIterList.getNode(mLevel, node); } /// @brief Return the global coordinates of the voxel or tile to which /// this iterator is currently pointing. Coord getCoord() const { return mValueIterList.getCoord(mLevel); } /// @brief Return in @a bbox the axis-aligned bounding box of /// the voxel or tile to which this iterator is currently pointing. /// @return false if the bounding box is empty. bool getBoundingBox(CoordBBox&) const; /// @brief Return the axis-aligned bounding box of the voxel or tile to which /// this iterator is currently pointing. CoordBBox getBoundingBox() const { CoordBBox b; this->getBoundingBox(b); return b; } /// Return the number of (virtual) voxels corresponding to the value Index64 getVoxelCount() const { return mValueIterList.getVoxelCount(mLevel);} /// Return @c true if this iterator is currently pointing to a (non-leaf) tile value. bool isTileValue() const { return mLevel != 0 && this->test(); } /// Return @c true if this iterator is currently pointing to a (leaf) voxel value. bool isVoxelValue() const { return mLevel == 0 && this->test(); } /// Return @c true if the value to which this iterator is currently pointing is active. bool isValueOn() const { return mValueIterList.isValueOn(mLevel); } //@{ /// Return the tile or voxel value to which this iterator is currently pointing. const ValueT& getValue() const { return mValueIterList.getValue(mLevel); } const ValueT& operator*() const { return this->getValue(); } const ValueT* operator->() const { return &(this->operator*()); } //@} /// @brief Change the tile or voxel value to which this iterator is currently pointing /// and mark it as active. void setValue(const ValueT& val) const { mValueIterList.setValue(mLevel, val); } /// @brief Change the active/inactive state of the tile or voxel value to which /// this iterator is currently pointing. void setActiveState(bool on) const { mValueIterList.setValueOn(mLevel, on); } /// Mark the tile or voxel value to which this iterator is currently pointing as inactive. void setValueOff() const { mValueIterList.setValueOff(mLevel); } /// @brief Apply a functor to the item to which this iterator is pointing. /// (Not valid for const iterators.) /// @param op a functor of the form <tt>void op(ValueType&) const</tt> that modifies /// its argument in place /// @see Tree::modifyValue() template<typename ModifyOp> void modifyValue(const ModifyOp& op) const { mValueIterList.modifyValue(mLevel, op); } /// Return a pointer to the tree over which this iterator is iterating. TreeT* getTree() const { return mTree; } /// Return a string (for debugging, mainly) describing this iterator's current state. std::string summary() const; private: bool advance(bool dontIncrement = false); using InvTreeT = typename iter::InvertedTree<NodeT, NodeT::LEVEL>::Type; struct PrevChildItem { using IterT = ChildOnIterT; }; struct PrevValueItem { using IterT = ValueIterT; }; IterListItem<PrevChildItem, InvTreeT, /*VecSize=*/ROOT_LEVEL+1, /*Level=*/0> mChildIterList; IterListItem<PrevValueItem, InvTreeT, /*VecSize=*/ROOT_LEVEL+1, /*Level=*/0> mValueIterList; Index mLevel; int mMinLevel, mMaxLevel; TreeT* mTree; }; // class TreeValueIteratorBase template<typename TreeT, typename ValueIterT> inline TreeValueIteratorBase<TreeT, ValueIterT>::TreeValueIteratorBase(TreeT& tree): mChildIterList(nullptr), mValueIterList(nullptr), mLevel(ROOT_LEVEL), mMinLevel(int(LEAF_LEVEL)), mMaxLevel(int(ROOT_LEVEL)), mTree(&tree) { mChildIterList.setIter(IterTraits<NodeT, ChildOnIterT>::begin(tree.root())); mValueIterList.setIter(IterTraits<NodeT, ValueIterT>::begin(tree.root())); this->advance(/*dontIncrement=*/true); } template<typename TreeT, typename ValueIterT> inline TreeValueIteratorBase<TreeT, ValueIterT>::TreeValueIteratorBase(const TreeValueIteratorBase& other): mChildIterList(other.mChildIterList), mValueIterList(other.mValueIterList), mLevel(other.mLevel), mMinLevel(other.mMinLevel), mMaxLevel(other.mMaxLevel), mTree(other.mTree) { mChildIterList.updateBackPointers(); mValueIterList.updateBackPointers(); } template<typename TreeT, typename ValueIterT> inline TreeValueIteratorBase<TreeT, ValueIterT>& TreeValueIteratorBase<TreeT, ValueIterT>::operator=(const TreeValueIteratorBase& other) { if (&other != this) { mChildIterList = other.mChildIterList; mValueIterList = other.mValueIterList; mLevel = other.mLevel; mMinLevel = other.mMinLevel; mMaxLevel = other.mMaxLevel; mTree = other.mTree; mChildIterList.updateBackPointers(); mValueIterList.updateBackPointers(); } return *this; } template<typename TreeT, typename ValueIterT> inline void TreeValueIteratorBase<TreeT, ValueIterT>::setMinDepth(Index minDepth) { mMaxLevel = int(ROOT_LEVEL - minDepth); // level = ROOT_LEVEL - depth if (int(mLevel) > mMaxLevel) this->next(); } template<typename TreeT, typename ValueIterT> inline void TreeValueIteratorBase<TreeT, ValueIterT>::setMaxDepth(Index maxDepth) { // level = ROOT_LEVEL - depth mMinLevel = int(ROOT_LEVEL - std::min(maxDepth, this->getLeafDepth())); if (int(mLevel) < mMinLevel) this->next(); } template<typename TreeT, typename ValueIterT> inline bool TreeValueIteratorBase<TreeT, ValueIterT>::next() { do { if (!this->advance()) return false; } while (int(mLevel) < mMinLevel || int(mLevel) > mMaxLevel); return true; } template<typename TreeT, typename ValueIterT> inline bool TreeValueIteratorBase<TreeT, ValueIterT>::advance(bool dontIncrement) { bool recurse = false; do { recurse = false; Index vPos = mValueIterList.pos(mLevel), cPos = mChildIterList.pos(mLevel); if (vPos == cPos && mChildIterList.test(mLevel)) { /// @todo Once ValueOff iterators properly skip child pointers, remove this block. mValueIterList.next(mLevel); vPos = mValueIterList.pos(mLevel); } if (vPos < cPos) { if (dontIncrement) return true; if (mValueIterList.next(mLevel)) { if (mValueIterList.pos(mLevel) == cPos && mChildIterList.test(mLevel)) { /// @todo Once ValueOff iterators properly skip child pointers, /// remove this block. mValueIterList.next(mLevel); } // If there is a next value and it precedes the next child, return. if (mValueIterList.pos(mLevel) < cPos) return true; } } else { // Advance to the next child, which may or may not precede the next value. if (!dontIncrement) mChildIterList.next(mLevel); } #ifdef DEBUG_TREE_VALUE_ITERATOR std::cout << "\n" << this->summary() << std::flush; #endif // Descend to the lowest level at which the next value precedes the next child. while (mChildIterList.pos(mLevel) < mValueIterList.pos(mLevel)) { #ifdef ENABLE_TREE_VALUE_DEPTH_BOUND_OPTIMIZATION if (int(mLevel) == mMinLevel) { // If the current node lies at the lowest allowed level, none of its // children can be visited, so just advance its child iterator. mChildIterList.next(mLevel); if (mValueIterList.pos(mLevel) == mChildIterList.pos(mLevel) && mChildIterList.test(mLevel)) { /// @todo Once ValueOff iterators properly skip child pointers, /// remove this block. mValueIterList.next(mLevel); } } else #endif if (mChildIterList.down(mLevel)) { --mLevel; // descend one level mValueIterList.initLevel(mLevel, mChildIterList); if (mValueIterList.pos(mLevel) == mChildIterList.pos(mLevel) && mChildIterList.test(mLevel)) { /// @todo Once ValueOff iterators properly skip child pointers, /// remove this block. mValueIterList.next(mLevel); } } else break; #ifdef DEBUG_TREE_VALUE_ITERATOR std::cout << "\n" << this->summary() << std::flush; #endif } // Ascend to the nearest level at which one of the iterators is not yet exhausted. while (!mChildIterList.test(mLevel) && !mValueIterList.test(mLevel)) { if (mLevel == ROOT_LEVEL) return false; ++mLevel; mChildIterList.next(mLevel); dontIncrement = true; recurse = true; } } while (recurse); return true; } template<typename TreeT, typename ValueIterT> inline bool TreeValueIteratorBase<TreeT, ValueIterT>::getBoundingBox(CoordBBox& bbox) const { if (!this->test()) { bbox = CoordBBox(); return false; } bbox.min() = mValueIterList.getCoord(mLevel); bbox.max() = bbox.min().offsetBy(mValueIterList.getChildDim(mLevel) - 1); return true; } template<typename TreeT, typename ValueIterT> inline std::string TreeValueIteratorBase<TreeT, ValueIterT>::summary() const { std::ostringstream ostr; for (int lvl = int(ROOT_LEVEL); lvl >= 0 && lvl >= int(mLevel); --lvl) { if (lvl == 0) ostr << "leaf"; else if (lvl == int(ROOT_LEVEL)) ostr << "root"; else ostr << "int" << (ROOT_LEVEL - lvl); ostr << " v" << mValueIterList.pos(lvl) << " c" << mChildIterList.pos(lvl); if (lvl > int(mLevel)) ostr << " / "; } if (this->test() && mValueIterList.pos(mLevel) < mChildIterList.pos(mLevel)) { if (mLevel == 0) { ostr << " " << this->getCoord(); } else { ostr << " " << this->getBoundingBox(); } } return ostr.str(); } //////////////////////////////////////// /// @brief Base class for tree-traversal iterators over all nodes template<typename _TreeT, typename RootChildOnIterT> class NodeIteratorBase { public: using TreeT = _TreeT; using RootIterT = RootChildOnIterT; using RootNodeT = typename RootIterT::NodeType; using NCRootNodeT = typename RootIterT::NonConstNodeType; static const Index ROOT_LEVEL = RootNodeT::LEVEL; using InvTreeT = typename iter::InvertedTree<NCRootNodeT, ROOT_LEVEL>::Type; static const Index LEAF_LEVEL = 0, ROOT_DEPTH = 0, LEAF_DEPTH = ROOT_LEVEL; using RootIterTraits = IterTraits<NCRootNodeT, RootIterT>; NodeIteratorBase(); NodeIteratorBase(TreeT&); NodeIteratorBase(const NodeIteratorBase& other); NodeIteratorBase& operator=(const NodeIteratorBase& other); /// Specify the depth of the highest level of the tree to which to ascend (depth 0 = root). void setMinDepth(Index minDepth); /// Return the depth of the highest level of the tree to which this iterator ascends. Index getMinDepth() const { return ROOT_LEVEL - Index(mMaxLevel); } /// Specify the depth of the lowest level of the tree to which to descend (depth 0 = root). void setMaxDepth(Index maxDepth); /// Return the depth of the lowest level of the tree to which this iterator ascends. Index getMaxDepth() const { return ROOT_LEVEL - Index(mMinLevel); } //@{ /// Return @c true if this iterator is not yet exhausted. bool test() const { return !mDone; } operator bool() const { return this->test(); } //@} /// @brief Advance to the next tile or voxel value. /// @return @c true if this iterator is not yet exhausted. bool next(); /// Advance the iterator to the next leaf node. void increment() { this->next(); } NodeIteratorBase& operator++() { this->increment(); return *this; } /// Increment the iterator n times. void increment(Index n) { for (Index i = 0; i < n && this->next(); ++i) {} } /// @brief Return the level in the tree (0 = leaf) of the node to which /// this iterator is currently pointing. Index getLevel() const { return mLevel; } /// @brief Return the depth in the tree (0 = root) of the node to which /// this iterator is currently pointing. Index getDepth() const { return ROOT_LEVEL - mLevel; } static Index getLeafDepth() { return LEAF_DEPTH; } /// @brief Return the global coordinates of the voxel or tile to which /// this iterator is currently pointing. Coord getCoord() const; /// @brief Return in @a bbox the axis-aligned bounding box of /// the voxel or tile to which this iterator is currently pointing. /// @return false if the bounding box is empty. bool getBoundingBox(CoordBBox& bbox) const; /// @brief Return the axis-aligned bounding box of the voxel or tile to which /// this iterator is currently pointing. CoordBBox getBoundingBox() const { CoordBBox b; this->getBoundingBox(b); return b; } //@{ /// @brief Return the node to which the iterator is pointing. /// @note This iterator doesn't have the usual dereference operators (* and ->), /// because they would have to be overloaded by the returned node type. template<typename NodeT> void getNode(NodeT*& node) const { node = nullptr; mIterList.getNode(mLevel, node); } template<typename NodeT> void getNode(const NodeT*& node) const { node = nullptr; mIterList.getNode(mLevel, node); } //@} TreeT* getTree() const { return mTree; } std::string summary() const; private: struct PrevItem { using IterT = RootIterT; }; IterListItem<PrevItem, InvTreeT, /*VecSize=*/ROOT_LEVEL+1, LEAF_LEVEL> mIterList; Index mLevel; int mMinLevel, mMaxLevel; bool mDone; TreeT* mTree; }; // class NodeIteratorBase template<typename TreeT, typename RootChildOnIterT> inline NodeIteratorBase<TreeT, RootChildOnIterT>::NodeIteratorBase(): mIterList(nullptr), mLevel(ROOT_LEVEL), mMinLevel(int(LEAF_LEVEL)), mMaxLevel(int(ROOT_LEVEL)), mDone(true), mTree(nullptr) { } template<typename TreeT, typename RootChildOnIterT> inline NodeIteratorBase<TreeT, RootChildOnIterT>::NodeIteratorBase(TreeT& tree): mIterList(nullptr), mLevel(ROOT_LEVEL), mMinLevel(int(LEAF_LEVEL)), mMaxLevel(int(ROOT_LEVEL)), mDone(false), mTree(&tree) { mIterList.setIter(RootIterTraits::begin(tree.root())); } template<typename TreeT, typename RootChildOnIterT> inline NodeIteratorBase<TreeT, RootChildOnIterT>::NodeIteratorBase(const NodeIteratorBase& other): mIterList(other.mIterList), mLevel(other.mLevel), mMinLevel(other.mMinLevel), mMaxLevel(other.mMaxLevel), mDone(other.mDone), mTree(other.mTree) { mIterList.updateBackPointers(); } template<typename TreeT, typename RootChildOnIterT> inline NodeIteratorBase<TreeT, RootChildOnIterT>& NodeIteratorBase<TreeT, RootChildOnIterT>::operator=(const NodeIteratorBase& other) { if (&other != this) { mLevel = other.mLevel; mMinLevel = other.mMinLevel; mMaxLevel = other.mMaxLevel; mDone = other.mDone; mTree = other.mTree; mIterList = other.mIterList; mIterList.updateBackPointers(); } return *this; } template<typename TreeT, typename RootChildOnIterT> inline void NodeIteratorBase<TreeT, RootChildOnIterT>::setMinDepth(Index minDepth) { mMaxLevel = int(ROOT_LEVEL - minDepth); // level = ROOT_LEVEL - depth if (int(mLevel) > mMaxLevel) this->next(); } template<typename TreeT, typename RootChildOnIterT> inline void NodeIteratorBase<TreeT, RootChildOnIterT>::setMaxDepth(Index maxDepth) { // level = ROOT_LEVEL - depth mMinLevel = int(ROOT_LEVEL - std::min(maxDepth, this->getLeafDepth())); if (int(mLevel) < mMinLevel) this->next(); } template<typename TreeT, typename RootChildOnIterT> inline bool NodeIteratorBase<TreeT, RootChildOnIterT>::next() { do { if (mDone) return false; // If the iterator over the current node points to a child, // descend to the child (depth-first traversal). if (int(mLevel) > mMinLevel && mIterList.test(mLevel)) { if (!mIterList.down(mLevel)) return false; --mLevel; } else { // Ascend to the nearest ancestor that has other children. while (!mIterList.test(mLevel)) { if (mLevel == ROOT_LEVEL) { // Can't ascend higher than the root. mDone = true; return false; } ++mLevel; // ascend one level mIterList.next(mLevel); // advance to the next child, if there is one } // Descend to the child. if (!mIterList.down(mLevel)) return false; --mLevel; } } while (int(mLevel) < mMinLevel || int(mLevel) > mMaxLevel); return true; } template<typename TreeT, typename RootChildOnIterT> inline Coord NodeIteratorBase<TreeT, RootChildOnIterT>::getCoord() const { if (mLevel != ROOT_LEVEL) return mIterList.getCoord(mLevel + 1); RootNodeT* root = nullptr; this->getNode(root); return root ? root->getMinIndex() : Coord::min(); } template<typename TreeT, typename RootChildOnIterT> inline bool NodeIteratorBase<TreeT, RootChildOnIterT>::getBoundingBox(CoordBBox& bbox) const { if (mLevel == ROOT_LEVEL) { RootNodeT* root = nullptr; this->getNode(root); if (root == nullptr) { bbox = CoordBBox(); return false; } root->getIndexRange(bbox); return true; } bbox.min() = mIterList.getCoord(mLevel + 1); bbox.max() = bbox.min().offsetBy(mIterList.getChildDim(mLevel + 1) - 1); return true; } template<typename TreeT, typename RootChildOnIterT> inline std::string NodeIteratorBase<TreeT, RootChildOnIterT>::summary() const { std::ostringstream ostr; for (int lvl = int(ROOT_LEVEL); lvl >= 0 && lvl >= int(mLevel); --lvl) { if (lvl == 0) ostr << "leaf"; else if (lvl == int(ROOT_LEVEL)) ostr << "root"; else ostr << "int" << (ROOT_LEVEL - lvl); ostr << " c" << mIterList.pos(lvl); if (lvl > int(mLevel)) ostr << " / "; } CoordBBox bbox; this->getBoundingBox(bbox); ostr << " " << bbox; return ostr.str(); } //////////////////////////////////////// /// @brief Base class for tree-traversal iterators over all leaf nodes (but not leaf voxels) template<typename TreeT, typename RootChildOnIterT> class LeafIteratorBase { public: using RootIterT = RootChildOnIterT; using RootNodeT = typename RootIterT::NodeType; using NCRootNodeT = typename RootIterT::NonConstNodeType; static const Index ROOT_LEVEL = RootNodeT::LEVEL; using InvTreeT = typename iter::InvertedTree<NCRootNodeT, ROOT_LEVEL>::Type; using NCLeafNodeT = typename InvTreeT::Front; using LeafNodeT = typename CopyConstness<RootNodeT, NCLeafNodeT>::Type; static const Index LEAF_LEVEL = 0, LEAF_PARENT_LEVEL = LEAF_LEVEL + 1; using RootIterTraits = IterTraits<NCRootNodeT, RootIterT>; LeafIteratorBase(): mIterList(nullptr), mTree(nullptr) {} LeafIteratorBase(TreeT& tree): mIterList(nullptr), mTree(&tree) { // Initialize the iterator list with a root node iterator. mIterList.setIter(RootIterTraits::begin(tree.root())); // Descend along the first branch, initializing the node iterator at each level. Index lvl = ROOT_LEVEL; for ( ; lvl > 0 && mIterList.down(lvl); --lvl) {} // If the first branch terminated above the leaf level, backtrack to the next branch. if (lvl > 0) this->next(); } LeafIteratorBase(const LeafIteratorBase& other): mIterList(other.mIterList), mTree(other.mTree) { mIterList.updateBackPointers(); } LeafIteratorBase& operator=(const LeafIteratorBase& other) { if (&other != this) { mTree = other.mTree; mIterList = other.mIterList; mIterList.updateBackPointers(); } return *this; } //@{ /// Return the leaf node to which the iterator is pointing. LeafNodeT* getLeaf() const { LeafNodeT* n = nullptr; mIterList.getNode(LEAF_LEVEL, n); return n; } LeafNodeT& operator*() const { return *this->getLeaf(); } LeafNodeT* operator->() const { return this->getLeaf(); } //@} bool test() const { return mIterList.test(LEAF_PARENT_LEVEL); } operator bool() const { return this->test(); } //@{ /// Advance the iterator to the next leaf node. bool next(); void increment() { this->next(); } LeafIteratorBase& operator++() { this->increment(); return *this; } //@} /// Increment the iterator n times. void increment(Index n) { for (Index i = 0; i < n && this->next(); ++i) {} } TreeT* getTree() const { return mTree; } private: struct PrevItem { using IterT = RootIterT; }; /// @note Even though a LeafIterator doesn't iterate over leaf voxels, /// the first item of this linked list of node iterators is a leaf node iterator, /// whose purpose is only to provide access to its parent leaf node. IterListItem<PrevItem, InvTreeT, /*VecSize=*/ROOT_LEVEL+1, LEAF_LEVEL> mIterList; TreeT* mTree; }; // class LeafIteratorBase template<typename TreeT, typename RootChildOnIterT> inline bool LeafIteratorBase<TreeT, RootChildOnIterT>::next() { // If the iterator is valid for the current node one level above the leaf level, // advance the iterator to the node's next child. if (mIterList.test(LEAF_PARENT_LEVEL) && mIterList.next(LEAF_PARENT_LEVEL)) { mIterList.down(LEAF_PARENT_LEVEL); // initialize the leaf iterator return true; } Index lvl = LEAF_PARENT_LEVEL; while (!mIterList.test(LEAF_PARENT_LEVEL)) { if (mIterList.test(lvl)) { mIterList.next(lvl); } else { do { // Ascend to the nearest level at which // one of the iterators is not yet exhausted. if (lvl == ROOT_LEVEL) return false; ++lvl; if (mIterList.test(lvl)) mIterList.next(lvl); } while (!mIterList.test(lvl)); } // Descend to the lowest child, but not as far as the leaf iterator. while (lvl > LEAF_PARENT_LEVEL && mIterList.down(lvl)) --lvl; } mIterList.down(LEAF_PARENT_LEVEL); // initialize the leaf iterator return true; } //////////////////////////////////////// /// An IteratorRange wraps a tree or node iterator, giving the iterator TBB /// splittable range semantics. template<typename IterT> class IteratorRange { public: IteratorRange(const IterT& iter, size_t grainSize = 8): mIter(iter), mGrainSize(grainSize), mSize(0) { mSize = this->size(); } IteratorRange(IteratorRange& other, tbb::split): mIter(other.mIter), mGrainSize(other.mGrainSize), mSize(other.mSize >> 1) { other.increment(mSize); } /// @brief Return a reference to this range's iterator. /// @note The reference is const, because the iterator should not be /// incremented directly. Use this range object's increment() instead. const IterT& iterator() const { return mIter; } bool empty() const { return mSize == 0 || !mIter.test(); } bool test() const { return !this->empty(); } operator bool() const { return !this->empty(); } /// @brief Return @c true if this range is splittable (i.e., if the iterator /// can be advanced more than mGrainSize times). bool is_divisible() const { return mSize > mGrainSize; } /// Advance the iterator @a n times. void increment(Index n = 1) { for ( ; n > 0 && mSize > 0; --n, --mSize, ++mIter) {} } /// Advance the iterator to the next item. IteratorRange& operator++() { this->increment(); return *this; } /// @brief Advance the iterator to the next item. /// @return @c true if the iterator is not yet exhausted. bool next() { this->increment(); return this->test(); } private: Index size() const { Index n = 0; for (IterT it(mIter); it.test(); ++n, ++it) {} return n; } IterT mIter; size_t mGrainSize; /// @note mSize is only an estimate of the number of times mIter can be incremented /// before it is exhausted (because the topology of the underlying tree could change /// during iteration). For the purpose of range splitting, though, that should be /// sufficient, since the two halves need not be of exactly equal size. Index mSize; }; //////////////////////////////////////// /// @brief Base class for tree-traversal iterators over real and virtual voxel values /// @todo class TreeVoxelIteratorBase; } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TREE_TREEITERATOR_HAS_BEEN_INCLUDED
50,775
C
36.198535
100
0.657115
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyMetadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <boost/python.hpp> #include "openvdb/openvdb.h" namespace py = boost::python; using namespace openvdb::OPENVDB_VERSION_NAME; namespace { class MetadataWrap: public Metadata, public py::wrapper<Metadata> { public: Name typeName() const { return static_cast<const Name&>(this->get_override("typeName")()); } Metadata::Ptr copy() const { return static_cast<const Metadata::Ptr&>(this->get_override("copy")()); } void copy(const Metadata& other) { this->get_override("copy")(other); } std::string str() const {return static_cast<const std::string&>(this->get_override("str")());} bool asBool() const { return static_cast<const bool&>(this->get_override("asBool")()); } Index32 size() const { return static_cast<const Index32&>(this->get_override("size")()); } protected: void readValue(std::istream& is, Index32 numBytes) { this->get_override("readValue")(is, numBytes); } void writeValue(std::ostream& os) const { this->get_override("writeValue")(os); } }; // aliases disambiguate the different versions of copy Metadata::Ptr (MetadataWrap::*copy0)() const = &MetadataWrap::copy; void (MetadataWrap::*copy1)(const Metadata&) = &MetadataWrap::copy; } // end anonymous namespace void exportMetadata(); void exportMetadata() { py::class_<MetadataWrap, boost::noncopyable> clss( /*classname=*/"Metadata", /*docstring=*/ "Class that holds the value of a single item of metadata of a type\n" "for which no Python equivalent exists (typically a custom type)", /*ctor=*/py::no_init // can only be instantiated from C++, not from Python ); clss.def("copy", py::pure_virtual(copy0), "copy() -> Metadata\n\nReturn a copy of this value.") .def("copy", py::pure_virtual(copy1), "copy() -> Metadata\n\nReturn a copy of this value.") .def("type", py::pure_virtual(&Metadata::typeName), "type() -> str\n\nReturn the name of this value's type.") .def("size", py::pure_virtual(&Metadata::size), "size() -> int\n\nReturn the size of this value in bytes.") .def("__nonzero__", py::pure_virtual(&Metadata::asBool)) .def("__str__", py::pure_virtual(&Metadata::str)) ; py::register_ptr_to_python<Metadata::Ptr>(); }
2,422
C++
36.276923
98
0.639141
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyIntGrid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyIntGrid.cc /// @brief Boost.Python wrappers for scalar, integer-valued openvdb::Grid types #include "pyGrid.h" void exportIntGrid(); void exportIntGrid() { pyGrid::exportGrid<BoolGrid>(); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES pyGrid::exportGrid<Int32Grid>(); pyGrid::exportGrid<Int64Grid>(); #endif }
418
C++
18.045454
79
0.717703
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyopenvdb.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyopenvdb.h /// /// @brief Glue functions for access to pyOpenVDB objects from C++ code /// @details Use these functions in your own Python function implementations /// to extract an OpenVDB grid from or wrap a grid in a @c PyObject. /// For example (using Boost.Python), /// @code /// #include <openvdb.h> /// #include <pyopenvdb.h> /// #include <boost/python.hpp> /// /// // Implementation of a Python function that processes pyOpenVDB grids /// boost::python::object /// processGrid(boost::python::object inObj) /// { /// boost::python::object outObj; /// try { /// // Extract an OpenVDB grid from the input argument. /// if (openvdb::GridBase::Ptr grid = /// pyopenvdb::getGridFromPyObject(inObj)) /// { /// grid = grid->deepCopyGrid(); /// /// // Process the grid... /// /// // Wrap the processed grid in a PyObject. /// outObj = pyopenvdb::getPyObjectFromGrid(grid); /// } /// } catch (openvdb::TypeError& e) { /// PyErr_Format(PyExc_TypeError, e.what()); /// boost::python::throw_error_already_set(); /// } /// return outObj; /// } /// /// BOOST_PYTHON_MODULE(mymodule) /// { /// openvdb::initialize(); /// /// // Definition of a Python function that processes pyOpenVDB grids /// boost::python::def(/*name=*/"processGrid", &processGrid, /*argname=*/"grid"); /// } /// @endcode /// Then, from Python, /// @code /// import openvdb /// import mymodule /// /// grid = openvdb.read('myGrid.vdb', 'MyGrid') /// grid = mymodule.processGrid(grid) /// openvdb.write('myProcessedGrid.vdb', [grid]) /// @endcode #ifndef PYOPENVDB_HAS_BEEN_INCLUDED #define PYOPENVDB_HAS_BEEN_INCLUDED #include <boost/python.hpp> #include "openvdb/Grid.h" namespace pyopenvdb { //@{ /// @brief Return a pointer to the OpenVDB grid held by the given Python object. /// @throw openvdb::TypeError if the Python object is not one of the pyOpenVDB grid types. /// (See the Python module's GridTypes global variable for the list of supported grid types.) openvdb::GridBase::Ptr getGridFromPyObject(PyObject*); openvdb::GridBase::Ptr getGridFromPyObject(const boost::python::object&); //@} /// @brief Return a new Python object that holds the given OpenVDB grid. /// @return @c None if the given grid pointer is null. /// @throw openvdb::TypeError if the grid is not of a supported type. /// (See the Python module's GridTypes global variable for the list of supported grid types.) boost::python::object getPyObjectFromGrid(const openvdb::GridBase::Ptr&); } // namespace pyopenvdb #endif // PYOPENVDB_HAS_BEEN_INCLUDED
2,745
C
32.084337
97
0.655738
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyFloatGrid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyFloatGrid.cc /// @author Peter Cucka /// @brief Boost.Python wrappers for scalar, floating-point openvdb::Grid types #include "pyGrid.h" void exportFloatGrid(); /// Create a Python wrapper for each supported Grid type. void exportFloatGrid() { // Add a module-level list that gives the types of all supported Grid classes. py::scope().attr("GridTypes") = py::list(); #if defined(PY_OPENVDB_USE_NUMPY) && !defined(PY_OPENVDB_USE_BOOST_PYTHON_NUMPY) // Specify that py::numeric::array should refer to the Python type numpy.ndarray // (rather than the older Numeric.array). py::numeric::array::set_module_and_type("numpy", "ndarray"); #endif pyGrid::exportGrid<FloatGrid>(); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES pyGrid::exportGrid<DoubleGrid>(); #endif py::def("createLevelSetSphere", &pyGrid::createLevelSetSphere<FloatGrid>, (py::arg("radius"), py::arg("center")=openvdb::Coord(), py::arg("voxelSize")=1.0, py::arg("halfWidth")=openvdb::LEVEL_SET_HALF_WIDTH), "createLevelSetSphere(radius, center, voxelSize, halfWidth) -> FloatGrid\n\n" "Return a grid containing a narrow-band level set representation\n" "of a sphere."); }
1,318
C++
31.974999
89
0.685888
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyAccessor.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_PYACCESSOR_HAS_BEEN_INCLUDED #define OPENVDB_PYACCESSOR_HAS_BEEN_INCLUDED #include <boost/python.hpp> #include "openvdb/openvdb.h" #include "pyutil.h" namespace pyAccessor { namespace py = boost::python; using namespace openvdb::OPENVDB_VERSION_NAME; //@{ /// Type traits for grid accessors template<typename _GridT> struct AccessorTraits { using GridT = _GridT; using NonConstGridT = GridT; using GridPtrT = typename NonConstGridT::Ptr; using AccessorT = typename NonConstGridT::Accessor; using ValueT = typename AccessorT::ValueType; static const bool IsConst = false; static const char* typeName() { return "Accessor"; } static void setActiveState(AccessorT& acc, const Coord& ijk, bool on) { acc.setActiveState(ijk, on); } static void setValueOnly(AccessorT& acc, const Coord& ijk, const ValueT& val) { acc.setValueOnly(ijk, val); } static void setValueOn(AccessorT& acc, const Coord& ijk) { acc.setValueOn(ijk); } static void setValueOn(AccessorT& acc, const Coord& ijk, const ValueT& val) { acc.setValueOn(ijk, val); } static void setValueOff(AccessorT& acc, const Coord& ijk) { acc.setValueOff(ijk); } static void setValueOff(AccessorT& acc, const Coord& ijk, const ValueT& val) { acc.setValueOff(ijk, val); } }; // Partial specialization for const accessors template<typename _GridT> struct AccessorTraits<const _GridT> { using GridT = const _GridT; using NonConstGridT = _GridT; using GridPtrT = typename NonConstGridT::ConstPtr; using AccessorT = typename NonConstGridT::ConstAccessor; using ValueT = typename AccessorT::ValueType; static const bool IsConst = true; static const char* typeName() { return "ConstAccessor"; } static void setActiveState(AccessorT&, const Coord&, bool) { notWritable(); } static void setValueOnly(AccessorT&, const Coord&, const ValueT&) { notWritable(); } static void setValueOn(AccessorT&, const Coord&) { notWritable(); } static void setValueOn(AccessorT&, const Coord&, const ValueT&) { notWritable(); } static void setValueOff(AccessorT&, const Coord&) { notWritable(); } static void setValueOff(AccessorT&, const Coord&, const ValueT&) { notWritable(); } static void notWritable() { PyErr_SetString(PyExc_TypeError, "accessor is read-only"); py::throw_error_already_set(); } }; //@} //////////////////////////////////////// /// Variant of pyutil::extractArg() that extracts a Coord from a py::object /// argument to a given ValueAccessor method template<typename GridT> inline Coord extractCoordArg(py::object obj, const char* functionName, int argIdx = 0) { return pyutil::extractArg<Coord>(obj, functionName, AccessorTraits<GridT>::typeName(), argIdx, "tuple(int, int, int)"); } /// Variant of pyutil::extractArg() that extracts a value of type /// ValueAccessor::ValueType from an argument to a ValueAccessor method template<typename GridT> inline typename GridT::ValueType extractValueArg( py::object obj, const char* functionName, int argIdx = 0, // args are numbered starting from 1 const char* expectedType = nullptr) { return pyutil::extractArg<typename GridT::ValueType>( obj, functionName, AccessorTraits<GridT>::typeName(), argIdx, expectedType); } //////////////////////////////////////// /// @brief ValueAccessor wrapper class that also stores a grid pointer, /// so that the grid doesn't get deleted as long as the accessor is live /// /// @internal This class could have just been made to inherit from ValueAccessor, /// but the method wrappers allow for more Pythonic error messages. For example, /// if we constructed the Python getValue() method directly from the corresponding /// ValueAccessor method, as follows, /// /// .def("getValue", &Accessor::getValue, ...) /// /// then the conversion from a Python type to a Coord& would be done /// automatically. But if the Python method were called with an object of /// a type that is not convertible to a Coord, then the TypeError message /// would say something like "TypeError: No registered converter was able to /// produce a C++ rvalue of type openvdb::math::Coord...". /// Handling the type conversion manually is more work, but it allows us to /// instead generate messages like "TypeError: expected tuple(int, int, int), /// found str as argument to FloatGridAccessor.getValue()". template<typename _GridType> class AccessorWrap { public: using Traits = AccessorTraits<_GridType>; using Accessor = typename Traits::AccessorT; using ValueType = typename Traits::ValueT; using GridType = typename Traits::NonConstGridT; using GridPtrType = typename Traits::GridPtrT; AccessorWrap(GridPtrType grid): mGrid(grid), mAccessor(grid->getAccessor()) {} AccessorWrap copy() const { return *this; } void clear() { mAccessor.clear(); } GridPtrType parent() const { return mGrid; } ValueType getValue(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "getValue"); return mAccessor.getValue(ijk); } int getValueDepth(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "getValueDepth"); return mAccessor.getValueDepth(ijk); } int isVoxel(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "isVoxel"); return mAccessor.isVoxel(ijk); } py::tuple probeValue(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "probeValue"); ValueType value; bool on = mAccessor.probeValue(ijk, value); return py::make_tuple(value, on); } bool isValueOn(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "isValueOn"); return mAccessor.isValueOn(ijk); } void setActiveState(py::object coordObj, bool on) { const Coord ijk = extractCoordArg<GridType>(coordObj, "setActiveState", /*argIdx=*/1); Traits::setActiveState(mAccessor, ijk, on); } void setValueOnly(py::object coordObj, py::object valObj) { Coord ijk = extractCoordArg<GridType>(coordObj, "setValueOnly", 1); ValueType val = extractValueArg<GridType>(valObj, "setValueOnly", 2); Traits::setValueOnly(mAccessor, ijk, val); } void setValueOn(py::object coordObj, py::object valObj) { Coord ijk = extractCoordArg<GridType>(coordObj, "setValueOn", 1); if (valObj.is_none()) { Traits::setValueOn(mAccessor, ijk); } else { ValueType val = extractValueArg<GridType>(valObj, "setValueOn", 2); Traits::setValueOn(mAccessor, ijk, val); } } void setValueOff(py::object coordObj, py::object valObj) { Coord ijk = extractCoordArg<GridType>(coordObj, "setValueOff", 1); if (valObj.is_none()) { Traits::setValueOff(mAccessor, ijk); } else { ValueType val = extractValueArg<GridType>(valObj, "setValueOff", 2); Traits::setValueOff(mAccessor, ijk, val); } } int isCached(py::object coordObj) { const Coord ijk = extractCoordArg<GridType>(coordObj, "isCached"); return mAccessor.isCached(ijk); } /// @brief Define a Python wrapper class for this C++ class. static void wrap() { const std::string pyGridTypeName = pyutil::GridTraits<GridType>::name(), pyValueTypeName = openvdb::typeNameAsString<typename GridType::ValueType>(), pyAccessorTypeName = Traits::typeName(); py::class_<AccessorWrap> clss( pyAccessorTypeName.c_str(), (std::string(Traits::IsConst ? "Read-only" : "Read/write") + " access by (i, j, k) index coordinates to the voxels\nof a " + pyGridTypeName).c_str(), py::no_init); clss.def("copy", &AccessorWrap::copy, ("copy() -> " + pyAccessorTypeName + "\n\n" "Return a copy of this accessor.").c_str()) .def("clear", &AccessorWrap::clear, "clear()\n\n" "Clear this accessor of all cached data.") .add_property("parent", &AccessorWrap::parent, ("this accessor's parent " + pyGridTypeName).c_str()) // // Voxel access // .def("getValue", &AccessorWrap::getValue, py::arg("ijk"), ("getValue(ijk) -> " + pyValueTypeName + "\n\n" "Return the value of the voxel at coordinates (i, j, k).").c_str()) .def("getValueDepth", &AccessorWrap::getValueDepth, py::arg("ijk"), "getValueDepth(ijk) -> int\n\n" "Return the tree depth (0 = root) at which the value of voxel\n" "(i, j, k) resides. If (i, j, k) isn't explicitly represented in\n" "the tree (i.e., it is implicitly a background voxel), return -1.") .def("isVoxel", &AccessorWrap::isVoxel, py::arg("ijk"), "isVoxel(ijk) -> bool\n\n" "Return True if voxel (i, j, k) resides at the leaf level of the tree.") .def("probeValue", &AccessorWrap::probeValue, py::arg("ijk"), "probeValue(ijk) -> value, bool\n\n" "Return the value of the voxel at coordinates (i, j, k)\n" "together with the voxel's active state.") .def("isValueOn", &AccessorWrap::isValueOn, py::arg("ijk"), "isValueOn(ijk) -> bool\n\n" "Return the active state of the voxel at coordinates (i, j, k).") .def("setActiveState", &AccessorWrap::setActiveState, (py::arg("ijk"), py::arg("on")), "setActiveState(ijk, on)\n\n" "Mark voxel (i, j, k) as either active or inactive (True or False),\n" "but don't change its value.") .def("setValueOnly", &AccessorWrap::setValueOnly, (py::arg("ijk"), py::arg("value")), "setValueOnly(ijk, value)\n\n" "Set the value of voxel (i, j, k), but don't change its active state.") .def("setValueOn", &AccessorWrap::setValueOn, (py::arg("ijk"), py::arg("value") = py::object()), "setValueOn(ijk, value=None)\n\n" "Mark voxel (i, j, k) as active and, if the given value\n" "is not None, set the voxel's value.\n") .def("setValueOff", &AccessorWrap::setValueOff, (py::arg("ijk"), py::arg("value") = py::object()), "setValueOff(ijk, value=None)\n\n" "Mark voxel (i, j, k) as inactive and, if the given value\n" "is not None, set the voxel's value.") .def("isCached", &AccessorWrap::isCached, py::arg("ijk"), "isCached(ijk) -> bool\n\n" "Return True if this accessor has cached the path to voxel (i, j, k).") ; // py::class_<ValueAccessor> } private: const GridPtrType mGrid; Accessor mAccessor; }; // class AccessorWrap } // namespace pyAccessor #endif // OPENVDB_PYACCESSOR_HAS_BEEN_INCLUDED
11,521
C
35.811501
94
0.618002
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyOpenVDBModule.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <iostream> // must be included before python on macos #include <cstring> // for strncmp(), strrchr(), etc. #include <limits> #include <string> #include <utility> // for std::make_pair() #include <boost/python.hpp> #include <boost/python/stl_iterator.hpp> #include <boost/python/exception_translator.hpp> #include "openvdb/openvdb.h" #include "pyopenvdb.h" #include "pyGrid.h" #include "pyutil.h" namespace py = boost::python; // Forward declarations void exportTransform(); void exportMetadata(); void exportFloatGrid(); void exportIntGrid(); void exportVec3Grid(); void exportPointGrid(); namespace _openvdbmodule { using namespace openvdb; /// Helper class to convert between a Python numeric sequence /// (tuple, list, etc.) and an openvdb::Coord struct CoordConverter { /// @return a Python tuple object equivalent to the given Coord. static PyObject* convert(const openvdb::Coord& xyz) { py::object obj = py::make_tuple(xyz[0], xyz[1], xyz[2]); Py_INCREF(obj.ptr()); ///< @todo is this the right way to ensure that the object ///< doesn't get freed on exit? return obj.ptr(); } /// @return nullptr if the given Python object is not convertible to a Coord. static void* convertible(PyObject* obj) { if (!PySequence_Check(obj)) return nullptr; // not a Python sequence Py_ssize_t len = PySequence_Length(obj); if (len != 3 && len != 1) return nullptr; // not the right length return obj; } /// Convert from a Python object to a Coord. static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a Coord in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<openvdb::Coord>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) openvdb::Coord; // placement new data->convertible = storage; openvdb::Coord* xyz = static_cast<openvdb::Coord*>(storage); // Populate the Coord. switch (PySequence_Length(obj)) { case 1: xyz->reset(pyutil::getSequenceItem<openvdb::Int32>(obj, 0)); break; case 3: xyz->reset( pyutil::getSequenceItem<openvdb::Int32>(obj, 0), pyutil::getSequenceItem<openvdb::Int32>(obj, 1), pyutil::getSequenceItem<openvdb::Int32>(obj, 2)); break; default: PyErr_Format(PyExc_ValueError, "expected a sequence of three integers"); py::throw_error_already_set(); break; } } /// Register both the Coord-to-tuple and the sequence-to-Coord converters. static void registerConverter() { py::to_python_converter<openvdb::Coord, CoordConverter>(); py::converter::registry::push_back( &CoordConverter::convertible, &CoordConverter::construct, py::type_id<openvdb::Coord>()); } }; // struct CoordConverter /// @todo CoordBBoxConverter? //////////////////////////////////////// /// Helper class to convert between a Python numeric sequence /// (tuple, list, etc.) and an openvdb::Vec template<typename VecT> struct VecConverter { static PyObject* convert(const VecT& v) { py::object obj; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN switch (VecT::size) { // compile-time constant case 2: obj = py::make_tuple(v[0], v[1]); break; case 3: obj = py::make_tuple(v[0], v[1], v[2]); break; case 4: obj = py::make_tuple(v[0], v[1], v[2], v[3]); break; default: { py::list lst; for (int n = 0; n < VecT::size; ++n) lst.append(v[n]); obj = lst; } } OPENVDB_NO_UNREACHABLE_CODE_WARNING_END Py_INCREF(obj.ptr()); return obj.ptr(); } static void* convertible(PyObject* obj) { if (!PySequence_Check(obj)) return nullptr; // not a Python sequence Py_ssize_t len = PySequence_Length(obj); if (len != VecT::size) return nullptr; // Check that all elements of the Python sequence are convertible // to the Vec's value type. py::object seq = pyutil::pyBorrow(obj); for (int i = 0; i < VecT::size; ++i) { if (!py::extract<typename VecT::value_type>(seq[i]).check()) { return nullptr; } } return obj; } static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a Vec in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<VecT>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) VecT; // placement new data->convertible = storage; VecT* v = static_cast<VecT*>(storage); // Populate the vector. for (int n = 0; n < VecT::size; ++n) { (*v)[n] = pyutil::getSequenceItem<typename VecT::value_type>(obj, n); } } static void registerConverter() { py::to_python_converter<VecT, VecConverter<VecT> >(); py::converter::registry::push_back( &VecConverter<VecT>::convertible, &VecConverter<VecT>::construct, py::type_id<VecT>()); } }; // struct VecConverter //////////////////////////////////////// /// Helper class to convert between a 2D Python numeric sequence /// (tuple, list, etc.) and an openvdb::Mat template<typename MatT> struct MatConverter { /// Return the given matrix as a Python list of lists. static py::object toList(const MatT& m) { py::list obj; for (int i = 0; i < MatT::size; ++i) { py::list rowObj; for (int j = 0; j < MatT::size; ++j) { rowObj.append(m(i, j)); } obj.append(rowObj); } return std::move(obj); } /// Extract a matrix from a Python sequence of numeric sequences. static MatT fromSeq(py::object obj) { MatT m = MatT::zero(); if (py::len(obj) == MatT::size) { for (int i = 0; i < MatT::size; ++i) { py::object rowObj = obj[i]; if (py::len(rowObj) != MatT::size) return MatT::zero(); for (int j = 0; j < MatT::size; ++j) { m(i, j) = py::extract<typename MatT::value_type>(rowObj[j]); } } } return m; } static PyObject* convert(const MatT& m) { py::object obj = toList(m); Py_INCREF(obj.ptr()); return obj.ptr(); } static void* convertible(PyObject* obj) { if (!PySequence_Check(obj)) return nullptr; // not a Python sequence Py_ssize_t len = PySequence_Length(obj); if (len != MatT::size) return nullptr; py::object seq = pyutil::pyBorrow(obj); for (int i = 0; i < MatT::size; ++i) { py::object rowObj = seq[i]; if (py::len(rowObj) != MatT::size) return nullptr; // Check that all elements of the Python sequence are convertible // to the Mat's value type. for (int j = 0; j < MatT::size; ++j) { if (!py::extract<typename MatT::value_type>(rowObj[j]).check()) { return nullptr; } } } return obj; } static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a Mat in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<MatT>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) MatT; // placement new data->convertible = storage; *(static_cast<MatT*>(storage)) = fromSeq(pyutil::pyBorrow(obj)); } static void registerConverter() { py::to_python_converter<MatT, MatConverter<MatT> >(); py::converter::registry::push_back( &MatConverter<MatT>::convertible, &MatConverter<MatT>::construct, py::type_id<MatT>()); } }; // struct MatConverter //////////////////////////////////////// /// Helper class to convert between a Python integer and a openvdb::PointIndex template <typename PointIndexT> struct PointIndexConverter { using IntType = typename PointIndexT::IntType; /// @return a Python integer object equivalent to the given PointIndex. static PyObject* convert(const PointIndexT& index) { py::object obj(static_cast<IntType>(index)); Py_INCREF(obj.ptr()); return obj.ptr(); } /// @return nullptr if the given Python object is not convertible to the PointIndex. static void* convertible(PyObject* obj) { #if PY_MAJOR_VERSION >= 3 if (!PyLong_Check(obj)) return nullptr; // not a Python integer #else if (!PyInt_Check(obj)) return nullptr; // not a Python integer #endif return obj; } /// Convert from a Python object to a PointIndex. static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a PointIndex in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<PointIndexT>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) PointIndexT; // placement new data->convertible = storage; // Extract the PointIndex from the python integer PointIndexT* index = static_cast<PointIndexT*>(storage); #if PY_MAJOR_VERSION >= 3 *index = static_cast<IntType>(PyLong_AsLong(obj)); #else *index = static_cast<IntType>(PyInt_AsLong(obj)); #endif } /// Register both the PointIndex-to-integer and the integer-to-PointIndex converters. static void registerConverter() { py::to_python_converter<PointIndexT, PointIndexConverter>(); py::converter::registry::push_back( &PointIndexConverter::convertible, &PointIndexConverter::construct, py::type_id<PointIndexT>()); } }; // struct PointIndexConverter //////////////////////////////////////// /// Helper class to convert between a Python dict and an openvdb::MetaMap /// @todo Consider implementing a separate, templated converter for /// the various Metadata types. struct MetaMapConverter { static PyObject* convert(const MetaMap& metaMap) { py::dict ret; for (MetaMap::ConstMetaIterator it = metaMap.beginMeta(); it != metaMap.endMeta(); ++it) { if (Metadata::Ptr meta = it->second) { py::object obj(meta); const std::string typeName = meta->typeName(); if (typeName == StringMetadata::staticTypeName()) { obj = py::str(static_cast<StringMetadata&>(*meta).value()); } else if (typeName == DoubleMetadata::staticTypeName()) { obj = py::object(static_cast<DoubleMetadata&>(*meta).value()); } else if (typeName == FloatMetadata::staticTypeName()) { obj = py::object(static_cast<FloatMetadata&>(*meta).value()); } else if (typeName == Int32Metadata::staticTypeName()) { obj = py::object(static_cast<Int32Metadata&>(*meta).value()); } else if (typeName == Int64Metadata::staticTypeName()) { obj = py::object(static_cast<Int64Metadata&>(*meta).value()); } else if (typeName == BoolMetadata::staticTypeName()) { obj = py::object(static_cast<BoolMetadata&>(*meta).value()); } else if (typeName == Vec2DMetadata::staticTypeName()) { const Vec2d v = static_cast<Vec2DMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1]); } else if (typeName == Vec2IMetadata::staticTypeName()) { const Vec2i v = static_cast<Vec2IMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1]); } else if (typeName == Vec2SMetadata::staticTypeName()) { const Vec2s v = static_cast<Vec2SMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1]); } else if (typeName == Vec3DMetadata::staticTypeName()) { const Vec3d v = static_cast<Vec3DMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2]); } else if (typeName == Vec3IMetadata::staticTypeName()) { const Vec3i v = static_cast<Vec3IMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2]); } else if (typeName == Vec3SMetadata::staticTypeName()) { const Vec3s v = static_cast<Vec3SMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2]); } else if (typeName == Vec4DMetadata::staticTypeName()) { const Vec4d v = static_cast<Vec4DMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2], v[3]); } else if (typeName == Vec4IMetadata::staticTypeName()) { const Vec4i v = static_cast<Vec4IMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2], v[3]); } else if (typeName == Vec4SMetadata::staticTypeName()) { const Vec4s v = static_cast<Vec4SMetadata&>(*meta).value(); obj = py::make_tuple(v[0], v[1], v[2], v[3]); } else if (typeName == Mat4SMetadata::staticTypeName()) { const Mat4s m = static_cast<Mat4SMetadata&>(*meta).value(); obj = MatConverter<Mat4s>::toList(m); } else if (typeName == Mat4DMetadata::staticTypeName()) { const Mat4d m = static_cast<Mat4DMetadata&>(*meta).value(); obj = MatConverter<Mat4d>::toList(m); } ret[it->first] = obj; } } Py_INCREF(ret.ptr()); return ret.ptr(); } static void* convertible(PyObject* obj) { return (PyMapping_Check(obj) ? obj : nullptr); } static void construct(PyObject* obj, py::converter::rvalue_from_python_stage1_data* data) { // Construct a MetaMap in the provided memory location. using StorageT = py::converter::rvalue_from_python_storage<MetaMap>; void* storage = reinterpret_cast<StorageT*>(data)->storage.bytes; new (storage) MetaMap; // placement new data->convertible = storage; MetaMap* metaMap = static_cast<MetaMap*>(storage); // Populate the map. py::dict pyDict(pyutil::pyBorrow(obj)); py::list keys = pyDict.keys(); for (size_t i = 0, N = py::len(keys); i < N; ++i) { std::string name; py::object key = keys[i]; if (py::extract<std::string>(key).check()) { name = py::extract<std::string>(key); } else { const std::string keyAsStr = py::extract<std::string>(key.attr("__str__")()), keyType = pyutil::className(key); PyErr_Format(PyExc_TypeError, "expected string as metadata name, found object" " \"%s\" of type %s", keyAsStr.c_str(), keyType.c_str()); py::throw_error_already_set(); } // Note: the order of the following tests is significant, as it // avoids unnecessary type promotion (e.g., of ints to floats). py::object val = pyDict[keys[i]]; Metadata::Ptr value; if (py::extract<std::string>(val).check()) { value.reset(new StringMetadata(py::extract<std::string>(val))); } else if (bool(PyBool_Check(val.ptr()))) { value.reset(new BoolMetadata(py::extract<bool>(val))); } else if (py::extract<Int64>(val).check()) { const Int64 n = py::extract<Int64>(val); if (n <= std::numeric_limits<Int32>::max() && n >= std::numeric_limits<Int32>::min()) { value.reset(new Int32Metadata(static_cast<Int32>(n))); } else { value.reset(new Int64Metadata(n)); } //} else if (py::extract<float>(val).check()) { // value.reset(new FloatMetadata(py::extract<float>(val))); } else if (py::extract<double>(val).check()) { value.reset(new DoubleMetadata(py::extract<double>(val))); } else if (py::extract<Vec2i>(val).check()) { value.reset(new Vec2IMetadata(py::extract<Vec2i>(val))); } else if (py::extract<Vec2d>(val).check()) { value.reset(new Vec2DMetadata(py::extract<Vec2d>(val))); } else if (py::extract<Vec2s>(val).check()) { value.reset(new Vec2SMetadata(py::extract<Vec2s>(val))); } else if (py::extract<Vec3i>(val).check()) { value.reset(new Vec3IMetadata(py::extract<Vec3i>(val))); } else if (py::extract<Vec3d>(val).check()) { value.reset(new Vec3DMetadata(py::extract<Vec3d>(val))); } else if (py::extract<Vec3s>(val).check()) { value.reset(new Vec3SMetadata(py::extract<Vec3s>(val))); } else if (py::extract<Vec4i>(val).check()) { value.reset(new Vec4IMetadata(py::extract<Vec4i>(val))); } else if (py::extract<Vec4d>(val).check()) { value.reset(new Vec4DMetadata(py::extract<Vec4d>(val))); } else if (py::extract<Vec4s>(val).check()) { value.reset(new Vec4SMetadata(py::extract<Vec4s>(val))); } else if (py::extract<Mat4d>(val).check()) { value.reset(new Mat4DMetadata(py::extract<Mat4d>(val))); } else if (py::extract<Mat4s>(val).check()) { value.reset(new Mat4SMetadata(py::extract<Mat4s>(val))); } else if (py::extract<Metadata::Ptr>(val).check()) { value = py::extract<Metadata::Ptr>(val); } else { const std::string valAsStr = py::extract<std::string>(val.attr("__str__")()), valType = pyutil::className(val); PyErr_Format(PyExc_TypeError, "metadata value \"%s\" of type %s is not allowed", valAsStr.c_str(), valType.c_str()); py::throw_error_already_set(); } if (value) metaMap->insertMeta(name, *value); } } static void registerConverter() { py::to_python_converter<MetaMap, MetaMapConverter>(); py::converter::registry::push_back( &MetaMapConverter::convertible, &MetaMapConverter::construct, py::type_id<MetaMap>()); } }; // struct MetaMapConverter //////////////////////////////////////// template<typename T> void translateException(const T&) {} /// @brief Define a function that translates an OpenVDB exception into /// the equivalent Python exception. /// @details openvdb::Exception::what() typically returns a string of the form /// "<exception>: <description>". To avoid duplication of the exception name in Python /// stack traces, the function strips off the "<exception>: " prefix. To do that, /// it needs the class name in the form of a string, hence the preprocessor macro. #define PYOPENVDB_CATCH(_openvdbname, _pyname) \ template<> \ void translateException<_openvdbname>(const _openvdbname& e) \ { \ const char* name = #_openvdbname; \ if (const char* c = std::strrchr(name, ':')) name = c + 1; \ const int namelen = int(std::strlen(name)); \ const char* msg = e.what(); \ if (0 == std::strncmp(msg, name, namelen)) msg += namelen; \ if (0 == std::strncmp(msg, ": ", 2)) msg += 2; \ PyErr_SetString(_pyname, msg); \ } /// Define an overloaded function that translate all OpenVDB exceptions into /// their Python equivalents. /// @todo LookupError is redundant and should someday be removed. PYOPENVDB_CATCH(openvdb::ArithmeticError, PyExc_ArithmeticError) PYOPENVDB_CATCH(openvdb::IndexError, PyExc_IndexError) PYOPENVDB_CATCH(openvdb::IoError, PyExc_IOError) PYOPENVDB_CATCH(openvdb::KeyError, PyExc_KeyError) PYOPENVDB_CATCH(openvdb::LookupError, PyExc_LookupError) PYOPENVDB_CATCH(openvdb::NotImplementedError, PyExc_NotImplementedError) PYOPENVDB_CATCH(openvdb::ReferenceError, PyExc_ReferenceError) PYOPENVDB_CATCH(openvdb::RuntimeError, PyExc_RuntimeError) PYOPENVDB_CATCH(openvdb::TypeError, PyExc_TypeError) PYOPENVDB_CATCH(openvdb::ValueError, PyExc_ValueError) #undef PYOPENVDB_CATCH //////////////////////////////////////// py::object readFromFile(const std::string&, const std::string&); py::tuple readAllFromFile(const std::string&); py::dict readFileMetadata(const std::string&); py::object readGridMetadataFromFile(const std::string&, const std::string&); py::list readAllGridMetadataFromFile(const std::string&); void writeToFile(const std::string&, py::object, py::object); py::object readFromFile(const std::string& filename, const std::string& gridName) { io::File vdbFile(filename); vdbFile.open(); if (!vdbFile.hasGrid(gridName)) { PyErr_Format(PyExc_KeyError, "file %s has no grid named \"%s\"", filename.c_str(), gridName.c_str()); py::throw_error_already_set(); } return pyGrid::getGridFromGridBase(vdbFile.readGrid(gridName)); } py::tuple readAllFromFile(const std::string& filename) { io::File vdbFile(filename); vdbFile.open(); GridPtrVecPtr grids = vdbFile.getGrids(); MetaMap::Ptr metadata = vdbFile.getMetadata(); vdbFile.close(); py::list gridList; for (GridPtrVec::const_iterator it = grids->begin(); it != grids->end(); ++it) { gridList.append(pyGrid::getGridFromGridBase(*it)); } return py::make_tuple(gridList, py::dict(*metadata)); } py::dict readFileMetadata(const std::string& filename) { io::File vdbFile(filename); vdbFile.open(); MetaMap::Ptr metadata = vdbFile.getMetadata(); vdbFile.close(); return py::dict(*metadata); } py::object readGridMetadataFromFile(const std::string& filename, const std::string& gridName) { io::File vdbFile(filename); vdbFile.open(); if (!vdbFile.hasGrid(gridName)) { PyErr_Format(PyExc_KeyError, "file %s has no grid named \"%s\"", filename.c_str(), gridName.c_str()); py::throw_error_already_set(); } return pyGrid::getGridFromGridBase(vdbFile.readGridMetadata(gridName)); } py::list readAllGridMetadataFromFile(const std::string& filename) { io::File vdbFile(filename); vdbFile.open(); GridPtrVecPtr grids = vdbFile.readAllGridMetadata(); vdbFile.close(); py::list gridList; for (GridPtrVec::const_iterator it = grids->begin(); it != grids->end(); ++it) { gridList.append(pyGrid::getGridFromGridBase(*it)); } return gridList; } void writeToFile(const std::string& filename, py::object gridOrSeqObj, py::object dictObj) { GridPtrVec gridVec; try { GridBase::Ptr base = pyopenvdb::getGridFromPyObject(gridOrSeqObj); gridVec.push_back(base); } catch (openvdb::TypeError&) { for (py::stl_input_iterator<py::object> it(gridOrSeqObj), end; it != end; ++it) { if (GridBase::Ptr base = pyGrid::getGridBaseFromGrid(*it)) { gridVec.push_back(base); } } } io::File vdbFile(filename); if (dictObj.is_none()) { vdbFile.write(gridVec); } else { MetaMap metadata = py::extract<MetaMap>(dictObj); vdbFile.write(gridVec, metadata); } vdbFile.close(); } //////////////////////////////////////// std::string getLoggingLevel(); void setLoggingLevel(py::object); void setProgramName(py::object, bool); std::string getLoggingLevel() { switch (logging::getLevel()) { case logging::Level::Debug: return "debug"; case logging::Level::Info: return "info"; case logging::Level::Warn: return "warn"; case logging::Level::Error: return "error"; case logging::Level::Fatal: break; } return "fatal"; } void setLoggingLevel(py::object pyLevelObj) { std::string levelStr; if (!py::extract<py::str>(pyLevelObj).check()) { levelStr = py::extract<std::string>(pyLevelObj.attr("__str__")()); } else { const py::str pyLevelStr = py::extract<py::str>(pyLevelObj.attr("lower")().attr("lstrip")("-")); levelStr = py::extract<std::string>(pyLevelStr); if (levelStr == "debug") { logging::setLevel(logging::Level::Debug); return; } else if (levelStr == "info") { logging::setLevel(logging::Level::Info); return; } else if (levelStr == "warn") { logging::setLevel(logging::Level::Warn); return; } else if (levelStr == "error") { logging::setLevel(logging::Level::Error); return; } else if (levelStr == "fatal") { logging::setLevel(logging::Level::Fatal); return; } } PyErr_Format(PyExc_ValueError, "expected logging level \"debug\", \"info\", \"warn\", \"error\", or \"fatal\"," " got \"%s\"", levelStr.c_str()); py::throw_error_already_set(); } void setProgramName(py::object nameObj, bool color) { if (py::extract<std::string>(nameObj).check()) { logging::setProgramName(py::extract<std::string>(nameObj), color); } else { const std::string str = py::extract<std::string>(nameObj.attr("__str__")()), typ = pyutil::className(nameObj).c_str(); PyErr_Format(PyExc_TypeError, "expected string as program name, got \"%s\" of type %s", str.c_str(), typ.c_str()); py::throw_error_already_set(); } } //////////////////////////////////////// // Descriptor for the openvdb::GridClass enum (for use with pyutil::StringEnum) struct GridClassDescr { static const char* name() { return "GridClass"; } static const char* doc() { return "Classes of volumetric data (level set, fog volume, etc.)"; } static pyutil::CStringPair item(int i) { static const int sCount = 4; static const char* const sStrings[sCount][2] = { { "UNKNOWN", strdup(GridBase::gridClassToString(GRID_UNKNOWN).c_str()) }, { "LEVEL_SET", strdup(GridBase::gridClassToString(GRID_LEVEL_SET).c_str()) }, { "FOG_VOLUME", strdup(GridBase::gridClassToString(GRID_FOG_VOLUME).c_str()) }, { "STAGGERED", strdup(GridBase::gridClassToString(GRID_STAGGERED).c_str()) } }; if (i >= 0 && i < sCount) return pyutil::CStringPair(&sStrings[i][0], &sStrings[i][1]); return pyutil::CStringPair(static_cast<char**>(nullptr), static_cast<char**>(nullptr)); } }; // Descriptor for the openvdb::VecType enum (for use with pyutil::StringEnum) struct VecTypeDescr { static const char* name() { return "VectorType"; } static const char* doc() { return "The type of a vector determines how transforms are applied to it.\n" " - INVARIANT:\n" " does not transform (e.g., tuple, uvw, color)\n" " - COVARIANT:\n" " apply inverse-transpose transformation with w = 0\n" " and ignore translation (e.g., gradient/normal)\n" " - COVARIANT_NORMALIZE:\n" " apply inverse-transpose transformation with w = 0\n" " and ignore translation, vectors are renormalized\n" " (e.g., unit normal)\n" " - CONTRAVARIANT_RELATIVE:\n" " apply \"regular\" transformation with w = 0 and ignore\n" " translation (e.g., displacement, velocity, acceleration)\n" " - CONTRAVARIANT_ABSOLUTE:\n" " apply \"regular\" transformation with w = 1 so that\n" " vector translates (e.g., position)\n"; } static pyutil::CStringPair item(int i) { static const int sCount = 5; static const char* const sStrings[sCount][2] = { { "INVARIANT", strdup(GridBase::vecTypeToString(openvdb::VEC_INVARIANT).c_str()) }, { "COVARIANT", strdup(GridBase::vecTypeToString(openvdb::VEC_COVARIANT).c_str()) }, { "COVARIANT_NORMALIZE", strdup(GridBase::vecTypeToString(openvdb::VEC_COVARIANT_NORMALIZE).c_str()) }, { "CONTRAVARIANT_RELATIVE", strdup(GridBase::vecTypeToString(openvdb::VEC_CONTRAVARIANT_RELATIVE).c_str()) }, { "CONTRAVARIANT_ABSOLUTE", strdup(GridBase::vecTypeToString(openvdb::VEC_CONTRAVARIANT_ABSOLUTE).c_str()) } }; if (i >= 0 && i < sCount) return std::make_pair(&sStrings[i][0], &sStrings[i][1]); return pyutil::CStringPair(static_cast<char**>(nullptr), static_cast<char**>(nullptr)); } }; } // namespace _openvdbmodule //////////////////////////////////////// #ifdef DWA_OPENVDB #define PY_OPENVDB_MODULE_NAME _openvdb extern "C" { void init_openvdb(); } #else #define PY_OPENVDB_MODULE_NAME pyopenvdb extern "C" { void initpyopenvdb(); } #endif BOOST_PYTHON_MODULE(PY_OPENVDB_MODULE_NAME) { // Don't auto-generate ugly, C++-style function signatures. py::docstring_options docOptions; docOptions.disable_signatures(); docOptions.enable_user_defined(); #ifdef PY_OPENVDB_USE_NUMPY // Initialize NumPy. #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY boost::python::numpy::initialize(); #else #if PY_MAJOR_VERSION >= 3 if (_import_array()) {} #else import_array(); #endif #endif #endif using namespace openvdb::OPENVDB_VERSION_NAME; // Initialize OpenVDB. initialize(); _openvdbmodule::CoordConverter::registerConverter(); _openvdbmodule::VecConverter<Vec2i>::registerConverter(); _openvdbmodule::VecConverter<Vec2I>::registerConverter(); _openvdbmodule::VecConverter<Vec2s>::registerConverter(); _openvdbmodule::VecConverter<Vec2d>::registerConverter(); _openvdbmodule::VecConverter<Vec3i>::registerConverter(); _openvdbmodule::VecConverter<Vec3I>::registerConverter(); _openvdbmodule::VecConverter<Vec3s>::registerConverter(); _openvdbmodule::VecConverter<Vec3d>::registerConverter(); _openvdbmodule::VecConverter<Vec4i>::registerConverter(); _openvdbmodule::VecConverter<Vec4I>::registerConverter(); _openvdbmodule::VecConverter<Vec4s>::registerConverter(); _openvdbmodule::VecConverter<Vec4d>::registerConverter(); _openvdbmodule::MatConverter<Mat4s>::registerConverter(); _openvdbmodule::MatConverter<Mat4d>::registerConverter(); _openvdbmodule::PointIndexConverter<PointDataIndex32>::registerConverter(); _openvdbmodule::MetaMapConverter::registerConverter(); #define PYOPENVDB_TRANSLATE_EXCEPTION(_classname) \ py::register_exception_translator<_classname>(&_openvdbmodule::translateException<_classname>) PYOPENVDB_TRANSLATE_EXCEPTION(ArithmeticError); PYOPENVDB_TRANSLATE_EXCEPTION(IndexError); PYOPENVDB_TRANSLATE_EXCEPTION(IoError); PYOPENVDB_TRANSLATE_EXCEPTION(KeyError); PYOPENVDB_TRANSLATE_EXCEPTION(LookupError); PYOPENVDB_TRANSLATE_EXCEPTION(NotImplementedError); PYOPENVDB_TRANSLATE_EXCEPTION(ReferenceError); PYOPENVDB_TRANSLATE_EXCEPTION(RuntimeError); PYOPENVDB_TRANSLATE_EXCEPTION(TypeError); PYOPENVDB_TRANSLATE_EXCEPTION(ValueError); #undef PYOPENVDB_TRANSLATE_EXCEPTION // Export the python bindings. exportTransform(); exportMetadata(); exportFloatGrid(); exportIntGrid(); exportVec3Grid(); exportPointGrid(); py::def("read", &_openvdbmodule::readFromFile, (py::arg("filename"), py::arg("gridname")), "read(filename, gridname) -> Grid\n\n" "Read a single grid from a .vdb file."); py::def("readAll", &_openvdbmodule::readAllFromFile, py::arg("filename"), "readAll(filename) -> list, dict\n\n" "Read a .vdb file and return a list of grids and\n" "a dict of file-level metadata."); py::def("readMetadata", &_openvdbmodule::readFileMetadata, py::arg("filename"), "readMetadata(filename) -> dict\n\n" "Read file-level metadata from a .vdb file."); py::def("readGridMetadata", &_openvdbmodule::readGridMetadataFromFile, (py::arg("filename"), py::arg("gridname")), "readGridMetadata(filename, gridname) -> Grid\n\n" "Read a single grid's metadata and transform (but not its tree)\n" "from a .vdb file."); py::def("readAllGridMetadata", &_openvdbmodule::readAllGridMetadataFromFile, py::arg("filename"), "readAllGridMetadata(filename) -> list\n\n" "Read a .vdb file and return a list of grids populated with\n" "their metadata and transforms, but not their trees."); py::def("write", &_openvdbmodule::writeToFile, (py::arg("filename"), py::arg("grids"), py::arg("metadata") = py::object()), "write(filename, grids, metadata=None)\n\n" "Write a grid or a sequence of grids and, optionally, a dict\n" "of (name, value) metadata pairs to a .vdb file."); py::def("getLoggingLevel", &_openvdbmodule::getLoggingLevel, "getLoggingLevel() -> str\n\n" "Return the severity threshold (\"debug\", \"info\", \"warn\", \"error\",\n" "or \"fatal\") for error messages."); py::def("setLoggingLevel", &_openvdbmodule::setLoggingLevel, (py::arg("level")), "setLoggingLevel(level)\n\n" "Specify the severity threshold (\"debug\", \"info\", \"warn\", \"error\",\n" "or \"fatal\") for error messages. Messages of lower severity\n" "will be suppressed."); py::def("setProgramName", &_openvdbmodule::setProgramName, (py::arg("name"), py::arg("color") = true), "setProgramName(name, color=True)\n\n" "Specify the program name to be displayed in error messages,\n" "and optionally specify whether to print error messages in color."); // Add some useful module-level constants. py::scope().attr("LIBRARY_VERSION") = py::make_tuple( openvdb::OPENVDB_LIBRARY_MAJOR_VERSION, openvdb::OPENVDB_LIBRARY_MINOR_VERSION, openvdb::OPENVDB_LIBRARY_PATCH_VERSION); py::scope().attr("FILE_FORMAT_VERSION") = openvdb::OPENVDB_FILE_VERSION; py::scope().attr("COORD_MIN") = openvdb::Coord::min(); py::scope().attr("COORD_MAX") = openvdb::Coord::max(); py::scope().attr("LEVEL_SET_HALF_WIDTH") = openvdb::LEVEL_SET_HALF_WIDTH; pyutil::StringEnum<_openvdbmodule::GridClassDescr>::wrap(); pyutil::StringEnum<_openvdbmodule::VecTypeDescr>::wrap(); } // BOOST_PYTHON_MODULE
35,818
C++
36.903704
98
0.58465
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyTransform.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <boost/python.hpp> #include "openvdb/openvdb.h" #include "pyutil.h" namespace py = boost::python; using namespace openvdb::OPENVDB_VERSION_NAME; namespace pyTransform { inline void scale1(math::Transform& t, double s) { t.preScale(s); } inline void scale3(math::Transform& t, const Vec3d& xyz) { t.preScale(xyz); } inline Vec3d voxelDim0(math::Transform& t) { return t.voxelSize(); } inline Vec3d voxelDim1(math::Transform& t, const Vec3d& p) { return t.voxelSize(p); } inline double voxelVolume0(math::Transform& t) { return t.voxelVolume(); } inline double voxelVolume1(math::Transform& t, const Vec3d& p) { return t.voxelVolume(p); } inline Vec3d indexToWorld(math::Transform& t, const Vec3d& p) { return t.indexToWorld(p); } inline Vec3d worldToIndex(math::Transform& t, const Vec3d& p) { return t.worldToIndex(p); } inline Coord worldToIndexCellCentered(math::Transform& t, const Vec3d& p) { return t.worldToIndexCellCentered(p); } inline Coord worldToIndexNodeCentered(math::Transform& t, const Vec3d& p) { return t.worldToIndexNodeCentered(p); } inline std::string info(math::Transform& t) { std::ostringstream ostr; t.print(ostr); return ostr.str(); } inline math::Transform::Ptr createLinearFromDim(double dim) { return math::Transform::createLinearTransform(dim); } inline math::Transform::Ptr createLinearFromMat(py::object obj) { Mat4R m; // Verify that obj is a four-element sequence. bool is4x4Seq = (PySequence_Check(obj.ptr()) && PySequence_Length(obj.ptr()) == 4); if (is4x4Seq) { for (int row = 0; is4x4Seq && row < 4; ++row) { // Verify that each element of obj is itself a four-element sequence. py::object rowObj = obj[row]; if (PySequence_Check(rowObj.ptr()) && PySequence_Length(rowObj.ptr()) == 4) { // Extract four numeric values from this row of the sequence. for (int col = 0; is4x4Seq && col < 4; ++col) { if (py::extract<double>(rowObj[col]).check()) { m[row][col] = py::extract<double>(rowObj[col]); } else { is4x4Seq = false; } } } else { is4x4Seq = false; } } } if (!is4x4Seq) { PyErr_Format(PyExc_ValueError, "expected a 4 x 4 sequence of numeric values"); py::throw_error_already_set(); } return math::Transform::createLinearTransform(m); } inline math::Transform::Ptr createFrustum(const Coord& xyzMin, const Coord& xyzMax, double taper, double depth, double voxelDim = 1.0) { return math::Transform::createFrustumTransform( BBoxd(xyzMin.asVec3d(), xyzMax.asVec3d()), taper, depth, voxelDim); } //////////////////////////////////////// struct PickleSuite: public py::pickle_suite { enum { STATE_DICT = 0, STATE_MAJOR, STATE_MINOR, STATE_FORMAT, STATE_XFORM }; /// Return @c true, indicating that this pickler preserves a Transform's __dict__. static bool getstate_manages_dict() { return true; } /// Return a tuple representing the state of the given Transform. static py::tuple getstate(py::object xformObj) { py::tuple state; py::extract<math::Transform> x(xformObj); if (x.check()) { // Extract a Transform from the Python object. math::Transform xform = x(); std::ostringstream ostr(std::ios_base::binary); // Serialize the Transform to a string. xform.write(ostr); // Construct a state tuple comprising the Python object's __dict__, // the version numbers of the serialization format, // and the serialized Transform. #if PY_MAJOR_VERSION >= 3 // Convert the byte string to a "bytes" sequence. const std::string s = ostr.str(); py::object bytesObj = pyutil::pyBorrow(PyBytes_FromStringAndSize(s.data(), s.size())); #else py::str bytesObj(ostr.str()); #endif state = py::make_tuple( xformObj.attr("__dict__"), uint32_t(OPENVDB_LIBRARY_MAJOR_VERSION), uint32_t(OPENVDB_LIBRARY_MINOR_VERSION), uint32_t(OPENVDB_FILE_VERSION), bytesObj); } return state; } /// Restore the given Transform to a saved state. static void setstate(py::object xformObj, py::object stateObj) { math::Transform* xform = nullptr; { py::extract<math::Transform*> x(xformObj); if (x.check()) xform = x(); else return; } py::tuple state; { py::extract<py::tuple> x(stateObj); if (x.check()) state = x(); } bool badState = (py::len(state) != 5); if (!badState) { // Restore the object's __dict__. py::extract<py::dict> x(state[int(STATE_DICT)]); if (x.check()) { py::dict d = py::extract<py::dict>(xformObj.attr("__dict__"))(); d.update(x()); } else { badState = true; } } openvdb::VersionId libVersion; uint32_t formatVersion = 0; if (!badState) { // Extract the serialization format version numbers. const int idx[3] = { STATE_MAJOR, STATE_MINOR, STATE_FORMAT }; uint32_t version[3] = { 0, 0, 0 }; for (int i = 0; i < 3 && !badState; ++i) { py::extract<uint32_t> x(state[idx[i]]); if (x.check()) version[i] = x(); else badState = true; } libVersion.first = version[0]; libVersion.second = version[1]; formatVersion = version[2]; } std::string serialized; if (!badState) { // Extract the sequence containing the serialized Transform. py::object bytesObj = state[int(STATE_XFORM)]; #if PY_MAJOR_VERSION >= 3 badState = true; if (PyBytes_Check(bytesObj.ptr())) { // Convert the "bytes" sequence to a byte string. char* buf = NULL; Py_ssize_t length = 0; if (-1 != PyBytes_AsStringAndSize(bytesObj.ptr(), &buf, &length)) { if (buf != NULL && length > 0) { serialized.assign(buf, buf + length); badState = false; } } } #else py::extract<std::string> x(bytesObj); if (x.check()) serialized = x(); else badState = true; #endif } if (badState) { PyErr_SetObject(PyExc_ValueError, #if PY_MAJOR_VERSION >= 3 ("expected (dict, int, int, int, bytes) tuple in call to __setstate__; found %s" #else ("expected (dict, int, int, int, str) tuple in call to __setstate__; found %s" #endif % stateObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); } // Restore the internal state of the C++ object. std::istringstream istr(serialized, std::ios_base::binary); io::setVersion(istr, libVersion, formatVersion); xform->read(istr); } }; // struct PickleSuite } // namespace pyTransform void exportTransform(); void exportTransform() { py::enum_<math::Axis>("Axis") .value("X", math::X_AXIS) .value("Y", math::Y_AXIS) .value("Z", math::Z_AXIS); py::class_<math::Transform>("Transform", py::init<>()) .def("deepCopy", &math::Transform::copy, "deepCopy() -> Transform\n\n" "Return a copy of this transform.") /// @todo Should this also be __str__()? .def("info", &pyTransform::info, "info() -> str\n\n" "Return a string containing a description of this transform.\n") .def_pickle(pyTransform::PickleSuite()) .add_property("typeName", &math::Transform::mapType, "name of this transform's type") .add_property("isLinear", &math::Transform::isLinear, "True if this transform is linear") .def("rotate", &math::Transform::preRotate, (py::arg("radians"), py::arg("axis") = math::X_AXIS), "rotate(radians, axis)\n\n" "Accumulate a rotation about either Axis.X, Axis.Y or Axis.Z.") .def("translate", &math::Transform::postTranslate, py::arg("xyz"), "translate((x, y, z))\n\n" "Accumulate a translation.") .def("scale", &pyTransform::scale1, py::arg("s"), "scale(s)\n\n" "Accumulate a uniform scale.") .def("scale", &pyTransform::scale3, py::arg("sxyz"), "scale((sx, sy, sz))\n\n" "Accumulate a nonuniform scale.") .def("shear", &math::Transform::preShear, (py::arg("s"), py::arg("axis0"), py::arg("axis1")), "shear(s, axis0, axis1)\n\n" "Accumulate a shear (axis0 and axis1 are either\n" "Axis.X, Axis.Y or Axis.Z).") .def("voxelSize", &pyTransform::voxelDim0, "voxelSize() -> (dx, dy, dz)\n\n" "Return the size of voxels of the linear component of this transform.") .def("voxelSize", &pyTransform::voxelDim1, py::arg("xyz"), "voxelSize((x, y, z)) -> (dx, dy, dz)\n\n" "Return the size of the voxel at position (x, y, z).") .def("voxelVolume", &pyTransform::voxelVolume0, "voxelVolume() -> float\n\n" "Return the voxel volume of the linear component of this transform.") .def("voxelVolume", &pyTransform::voxelVolume1, py::arg("xyz"), "voxelVolume((x, y, z)) -> float\n\n" "Return the voxel volume at position (x, y, z).") .def("indexToWorld", &pyTransform::indexToWorld, py::arg("xyz"), "indexToWorld((x, y, z)) -> (x', y', z')\n\n" "Apply this transformation to the given coordinates.") .def("worldToIndex", &pyTransform::worldToIndex, py::arg("xyz"), "worldToIndex((x, y, z)) -> (x', y', z')\n\n" "Apply the inverse of this transformation to the given coordinates.") .def("worldToIndexCellCentered", &pyTransform::worldToIndexCellCentered, py::arg("xyz"), "worldToIndexCellCentered((x, y, z)) -> (i, j, k)\n\n" "Apply the inverse of this transformation to the given coordinates\n" "and round the result to the nearest integer coordinates.") .def("worldToIndexNodeCentered", &pyTransform::worldToIndexNodeCentered, py::arg("xyz"), "worldToIndexNodeCentered((x, y, z)) -> (i, j, k)\n\n" "Apply the inverse of this transformation to the given coordinates\n" "and round the result down to the nearest integer coordinates.") // Allow Transforms to be compared for equality and inequality. .def(py::self == py::other<math::Transform>()) .def(py::self != py::other<math::Transform>()) ; py::def("createLinearTransform", &pyTransform::createLinearFromMat, py::arg("matrix"), "createLinearTransform(matrix) -> Transform\n\n" "Create a new linear transform from a 4 x 4 matrix given as a sequence\n" "of the form [[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]],\n" "where [m, n, o, p] is the translation component."); py::def("createLinearTransform", &pyTransform::createLinearFromDim, (py::arg("voxelSize") = 1.0), "createLinearTransform(voxelSize) -> Transform\n\n" "Create a new linear transform with the given uniform voxel size."); py::def("createFrustumTransform", &pyTransform::createFrustum, (py::arg("xyzMin"), py::arg("xyzMax"), py::arg("taper"), py::arg("depth"), py::arg("voxelSize") = 1.0), "createFrustumTransform(xyzMin, xyzMax, taper, depth, voxelSize) -> Transform\n\n" "Create a new frustum transform with unit bounding box (xyzMin, xyzMax)\n" "and the given taper, depth and uniform voxel size."); // allows Transform::Ptr Grid::getTransform() to work py::register_ptr_to_python<math::Transform::Ptr>(); }
12,443
C++
37.055046
98
0.566021
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyPointGrid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyPointGrid.cc /// @brief Boost.Python wrappers for point openvdb::Grid types #include <boost/python.hpp> #include "pyGrid.h" namespace py = boost::python; void exportPointGrid(); void exportPointGrid() { #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES pyGrid::exportGrid<points::PointDataGrid>(); #endif }
406
C++
15.958333
62
0.729064
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyutil.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_PYUTIL_HAS_BEEN_INCLUDED #define OPENVDB_PYUTIL_HAS_BEEN_INCLUDED #include "openvdb/openvdb.h" #include "openvdb/points/PointDataGrid.h" #include <boost/python.hpp> #include <tbb/mutex.h> #include <map> // for std::pair #include <string> #include <sstream> namespace pyutil { /// Return a new @c boost::python::object that borrows (i.e., doesn't /// take over ownership of) the given @c PyObject's reference. inline boost::python::object pyBorrow(PyObject* obj) { return boost::python::object(boost::python::handle<>(boost::python::borrowed(obj))); } /// @brief Given a @c PyObject that implements the sequence protocol /// (e.g., a @c PyListObject), return the value of type @c ValueT /// at index @a idx in the sequence. /// @details Raise a Python @c TypeError exception if the value /// at index @a idx is not convertible to type @c ValueT. template<typename ValueT> inline ValueT getSequenceItem(PyObject* obj, int idx) { return boost::python::extract<ValueT>(pyBorrow(obj)[idx]); } //////////////////////////////////////// template<class GridType> struct GridTraitsBase { /// @brief Return the name of the Python class that wraps this grid type /// (e.g., "FloatGrid" for openvdb::FloatGrid). /// /// @note This name is not the same as GridType::type(). /// The latter returns a name like "Tree_float_5_4_3". static const char* name(); /// Return the name of this grid type's value type ("bool", "float", "vec3s", etc.). static const char* valueTypeName() { return openvdb::typeNameAsString<typename GridType::ValueType>(); } /// @brief Return a description of this grid type. /// /// @note This name is generated at runtime for each call to descr(). static const std::string descr() { return std::string("OpenVDB grid with voxels of type ") + valueTypeName(); } }; // struct GridTraitsBase template<class GridType> struct GridTraits: public GridTraitsBase<GridType> { }; /// Map a grid type to a traits class that derives from GridTraitsBase /// and that defines a name() method. #define GRID_TRAITS(_typ, _name) \ template<> struct GridTraits<_typ>: public GridTraitsBase<_typ> { \ static const char* name() { return _name; } \ } GRID_TRAITS(openvdb::FloatGrid, "FloatGrid"); GRID_TRAITS(openvdb::Vec3SGrid, "Vec3SGrid"); GRID_TRAITS(openvdb::BoolGrid, "BoolGrid"); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES GRID_TRAITS(openvdb::DoubleGrid, "DoubleGrid"); GRID_TRAITS(openvdb::Int32Grid, "Int32Grid"); GRID_TRAITS(openvdb::Int64Grid, "Int64Grid"); GRID_TRAITS(openvdb::Vec3IGrid, "Vec3IGrid"); GRID_TRAITS(openvdb::Vec3DGrid, "Vec3DGrid"); GRID_TRAITS(openvdb::points::PointDataGrid, "PointDataGrid"); #endif #undef GRID_TRAITS //////////////////////////////////////// // Note that the elements are pointers to C strings (char**), because // boost::python::class_::def_readonly() requires a pointer to a static member. typedef std::pair<const char* const*, const char* const*> CStringPair; /// @brief Enum-like mapping from string keys to string values, with characteristics /// of both (Python) classes and class instances (as well as NamedTuples) /// @details /// - (@e key, @e value) pairs can be accessed as class attributes (\"<tt>MyClass.MY_KEY</tt>\") /// - (@e key, @e value) pairs can be accessed via dict lookup on instances /// (\"<tt>MyClass()['MY_KEY']</tt>\") /// - (@e key, @e value) pairs can't be modified or reassigned /// - instances are iterable (\"<tt>for key in MyClass(): ...</tt>\") /// /// A @c Descr class must implement the following interface: /// @code /// struct MyDescr /// { /// // Return the Python name for the enum class. /// static const char* name(); /// // Return the docstring for the enum class. /// static const char* doc(); /// // Return the ith (key, value) pair, in the form of /// // a pair of *pointers* to C strings /// static CStringPair item(int i); /// }; /// @endcode template<typename Descr> struct StringEnum { /// Return the (key, value) map as a Python dict. static boost::python::dict items() { static tbb::mutex sMutex; static boost::python::dict itemDict; if (!itemDict) { // The first time this function is called, populate // the static dict with (key, value) pairs. tbb::mutex::scoped_lock lock(sMutex); if (!itemDict) { for (int i = 0; ; ++i) { const CStringPair item = Descr::item(i); OPENVDB_START_THREADSAFE_STATIC_WRITE if (item.first) { itemDict[boost::python::str(*item.first)] = boost::python::str(*item.second); } OPENVDB_FINISH_THREADSAFE_STATIC_WRITE else break; } } } return itemDict; } /// Return the keys as a Python list of strings. static boost::python::object keys() { return items().attr("keys")(); } /// Return the number of keys as a Python int. boost::python::object numItems() const { return boost::python::object(boost::python::len(items())); } /// Return the value (as a Python string) for the given key. boost::python::object getItem(boost::python::object keyObj) const { return items()[keyObj]; } /// Return a Python iterator over the keys. boost::python::object iter() const { return items().attr("__iter__")(); } /// Register this enum. static void wrap() { boost::python::class_<StringEnum> cls( /*classname=*/Descr::name(), /*docstring=*/Descr::doc()); cls.def("keys", &StringEnum::keys, "keys() -> list") .staticmethod("keys") .def("__len__", &StringEnum::numItems, "__len__() -> int") .def("__iter__", &StringEnum::iter, "__iter__() -> iterator") .def("__getitem__", &StringEnum::getItem, "__getitem__(str) -> str") /*end*/; // Add a read-only, class-level attribute for each (key, value) pair. for (int i = 0; ; ++i) { const CStringPair item = Descr::item(i); if (item.first) cls.def_readonly(*item.first, item.second); else break; } } }; //////////////////////////////////////// /// @brief From the given Python object, extract a value of type @c T. /// /// If the object cannot be converted to type @c T, raise a @c TypeError with a more /// Pythonic error message (incorporating the provided class and function names, etc.) /// than the one that would be generated by boost::python::extract(), e.g., /// "TypeError: expected float, found str as argument 2 to FloatGrid.prune()" instead of /// "TypeError: No registered converter was able to produce a C++ rvalue of type /// boost::shared_ptr<openvdb::Grid<openvdb::tree::Tree<openvdb::tree::RootNode<...". template<typename T> inline T extractArg( boost::python::object obj, const char* functionName, const char* className = nullptr, int argIdx = 0, // args are numbered starting from 1 const char* expectedType = nullptr) { boost::python::extract<T> val(obj); if (!val.check()) { // Generate an error string of the form // "expected <expectedType>, found <actualType> as argument <argIdx> // to <className>.<functionName>()", where <argIdx> and <className> // are optional. std::ostringstream os; os << "expected "; if (expectedType) os << expectedType; else os << openvdb::typeNameAsString<T>(); const std::string actualType = boost::python::extract<std::string>(obj.attr("__class__").attr("__name__")); os << ", found " << actualType << " as argument"; if (argIdx > 0) os << " " << argIdx; os << " to "; if (className) os << className << "."; os << functionName << "()"; PyErr_SetString(PyExc_TypeError, os.str().c_str()); boost::python::throw_error_already_set(); } return val(); } //////////////////////////////////////// /// Return str(val) for the given value. template<typename T> inline std::string str(const T& val) { return boost::python::extract<std::string>(boost::python::str(val)); } /// Return the name of the given Python object's class. inline std::string className(boost::python::object obj) { std::string s = boost::python::extract<std::string>( obj.attr("__class__").attr("__name__")); return s; } } // namespace pyutil #endif // OPENVDB_PYUTIL_HAS_BEEN_INCLUDED
8,741
C
33.148437
97
0.611715
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyVec3Grid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyVec3Grid.cc /// @brief Boost.Python wrappers for vector-valued openvdb::Grid types #include "pyGrid.h" void exportVec3Grid(); void exportVec3Grid() { pyGrid::exportGrid<Vec3SGrid>(); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES pyGrid::exportGrid<Vec3IGrid>(); pyGrid::exportGrid<Vec3DGrid>(); #endif }
413
C++
17.818181
70
0.719128
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/pyGrid.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file pyGrid.h /// @author Peter Cucka /// @brief Boost.Python wrapper for openvdb::Grid #ifndef OPENVDB_PYGRID_HAS_BEEN_INCLUDED #define OPENVDB_PYGRID_HAS_BEEN_INCLUDED #include <boost/python.hpp> #ifdef PY_OPENVDB_USE_NUMPY // boost::python::numeric was replaced with boost::python::numpy in Boost 1.65. // (boost::python::numpy requires NumPy 1.7 or later.) #include <boost/python/numpy.hpp> //#include <arrayobject.h> // for PyArray_Descr (see pyGrid::arrayTypeId()) #define PY_OPENVDB_USE_BOOST_PYTHON_NUMPY #include "openvdb/tools/MeshToVolume.h" #include "openvdb/tools/VolumeToMesh.h" // for tools::volumeToMesh() #endif #include "openvdb/openvdb.h" #include "openvdb/io/Stream.h" #include "openvdb/math/Math.h" // for math::isExactlyEqual() #include "openvdb/points/PointDataGrid.h" #include "openvdb/tools/LevelSetSphere.h" #include "openvdb/tools/Dense.h" #include "openvdb/tools/ChangeBackground.h" #include "openvdb/tools/Prune.h" #include "openvdb/tools/SignedFloodFill.h" #include "pyutil.h" #include "pyAccessor.h" // for pyAccessor::AccessorWrap #include "pyopenvdb.h" #include <algorithm> // for std::max() #include <cstring> // for memcpy() #include <iostream> #include <memory> #include <sstream> #include <string> #include <vector> namespace py = boost::python; #ifdef __clang__ // This is a private header, so it's OK to include a "using namespace" directive. #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wheader-hygiene" #endif using namespace openvdb::OPENVDB_VERSION_NAME; #ifdef __clang__ #pragma clang diagnostic pop #endif namespace pyopenvdb { inline py::object getPyObjectFromGrid(const GridBase::Ptr& grid) { if (!grid) return py::object(); #define CONVERT_BASE_TO_GRID(GridType, grid) \ if (grid->isType<GridType>()) { \ return py::object(gridPtrCast<GridType>(grid)); \ } CONVERT_BASE_TO_GRID(FloatGrid, grid); CONVERT_BASE_TO_GRID(Vec3SGrid, grid); CONVERT_BASE_TO_GRID(BoolGrid, grid); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES CONVERT_BASE_TO_GRID(DoubleGrid, grid); CONVERT_BASE_TO_GRID(Int32Grid, grid); CONVERT_BASE_TO_GRID(Int64Grid, grid); CONVERT_BASE_TO_GRID(Vec3IGrid, grid); CONVERT_BASE_TO_GRID(Vec3DGrid, grid); CONVERT_BASE_TO_GRID(points::PointDataGrid, grid); #endif #undef CONVERT_BASE_TO_GRID OPENVDB_THROW(TypeError, grid->type() + " is not a supported OpenVDB grid type"); } inline openvdb::GridBase::Ptr getGridFromPyObject(const boost::python::object& gridObj) { if (!gridObj) return GridBase::Ptr(); #define CONVERT_GRID_TO_BASE(GridPtrType) \ { \ py::extract<GridPtrType> x(gridObj); \ if (x.check()) return x(); \ } // Extract a grid pointer of one of the supported types // from the input object, then cast it to a base pointer. CONVERT_GRID_TO_BASE(FloatGrid::Ptr); CONVERT_GRID_TO_BASE(Vec3SGrid::Ptr); CONVERT_GRID_TO_BASE(BoolGrid::Ptr); #ifdef PY_OPENVDB_WRAP_ALL_GRID_TYPES CONVERT_GRID_TO_BASE(DoubleGrid::Ptr); CONVERT_GRID_TO_BASE(Int32Grid::Ptr); CONVERT_GRID_TO_BASE(Int64Grid::Ptr); CONVERT_GRID_TO_BASE(Vec3IGrid::Ptr); CONVERT_GRID_TO_BASE(Vec3DGrid::Ptr); CONVERT_GRID_TO_BASE(points::PointDataGrid::Ptr); #endif #undef CONVERT_GRID_TO_BASE OPENVDB_THROW(TypeError, pyutil::className(gridObj) + " is not a supported OpenVDB grid type"); } inline openvdb::GridBase::Ptr getGridFromPyObject(PyObject* gridObj) { return getGridFromPyObject(pyutil::pyBorrow(gridObj)); } } // namespace pyopenvdb //////////////////////////////////////// namespace pyGrid { inline py::object getGridFromGridBase(GridBase::Ptr grid) { py::object obj; try { obj = pyopenvdb::getPyObjectFromGrid(grid); } catch (openvdb::TypeError& e) { PyErr_SetString(PyExc_TypeError, e.what()); py::throw_error_already_set(); return py::object(); } return obj; } /// GridBase is not exposed in Python because it isn't really needed /// (and because exposing it would be complicated, requiring wrapping /// pure virtual functions like GridBase::baseTree()), but there are /// a few cases where, internally, we need to extract a GridBase::Ptr /// from a py::object. Hence this converter. inline GridBase::Ptr getGridBaseFromGrid(py::object gridObj) { GridBase::Ptr grid; try { grid = pyopenvdb::getGridFromPyObject(gridObj); } catch (openvdb::TypeError& e) { PyErr_SetString(PyExc_TypeError, e.what()); py::throw_error_already_set(); return GridBase::Ptr(); } return grid; } //////////////////////////////////////// /// Variant of pyutil::extractArg() that uses the class name of a given grid type template<typename GridType, typename T> inline T extractValueArg( py::object obj, const char* functionName, int argIdx = 0, // args are numbered starting from 1 const char* expectedType = nullptr) { return pyutil::extractArg<T>(obj, functionName, pyutil::GridTraits<GridType>::name(), argIdx, expectedType); } /// @brief Variant of pyutil::extractArg() that uses the class name /// and @c ValueType of a given grid type template<typename GridType> inline typename GridType::ValueType extractValueArg( py::object obj, const char* functionName, int argIdx = 0, // args are numbered starting from 1 const char* expectedType = nullptr) { return extractValueArg<GridType, typename GridType::ValueType>( obj, functionName, argIdx, expectedType); } //////////////////////////////////////// template<typename GridType> inline typename GridType::Ptr copyGrid(GridType& grid) { return grid.copy(); } template<typename GridType> inline bool sharesWith(const GridType& grid, py::object other) { py::extract<typename GridType::Ptr> x(other); if (x.check()) { typename GridType::ConstPtr otherGrid = x(); return (&otherGrid->tree() == &grid.tree()); } return false; } //////////////////////////////////////// template<typename GridType> inline std::string getValueType() { return pyutil::GridTraits<GridType>::valueTypeName(); } template<typename GridType> inline typename GridType::ValueType getZeroValue() { return openvdb::zeroVal<typename GridType::ValueType>(); } template<typename GridType> inline typename GridType::ValueType getOneValue() { using ValueT = typename GridType::ValueType; return ValueT(openvdb::zeroVal<ValueT>() + 1); } template<typename GridType> inline bool notEmpty(const GridType& grid) { return !grid.empty(); } template<typename GridType> inline typename GridType::ValueType getGridBackground(const GridType& grid) { return grid.background(); } template<typename GridType> inline void setGridBackground(GridType& grid, py::object obj) { tools::changeBackground(grid.tree(), extractValueArg<GridType>(obj, "setBackground")); } inline void setGridName(GridBase::Ptr grid, py::object strObj) { if (grid) { if (!strObj) { // if name is None grid->removeMeta(GridBase::META_GRID_NAME); } else { const std::string name = pyutil::extractArg<std::string>( strObj, "setName", /*className=*/nullptr, /*argIdx=*/1, "str"); grid->setName(name); } } } inline void setGridCreator(GridBase::Ptr grid, py::object strObj) { if (grid) { if (!strObj) { // if name is None grid->removeMeta(GridBase::META_GRID_CREATOR); } else { const std::string name = pyutil::extractArg<std::string>( strObj, "setCreator", /*className=*/nullptr, /*argIdx=*/1, "str"); grid->setCreator(name); } } } inline std::string getGridClass(GridBase::ConstPtr grid) { return GridBase::gridClassToString(grid->getGridClass()); } inline void setGridClass(GridBase::Ptr grid, py::object strObj) { if (!strObj) { grid->clearGridClass(); } else { const std::string name = pyutil::extractArg<std::string>( strObj, "setGridClass", /*className=*/nullptr, /*argIdx=*/1, "str"); grid->setGridClass(GridBase::stringToGridClass(name)); } } inline std::string getVecType(GridBase::ConstPtr grid) { return GridBase::vecTypeToString(grid->getVectorType()); } inline void setVecType(GridBase::Ptr grid, py::object strObj) { if (!strObj) { grid->clearVectorType(); } else { const std::string name = pyutil::extractArg<std::string>( strObj, "setVectorType", /*className=*/nullptr, /*argIdx=*/1, "str"); grid->setVectorType(GridBase::stringToVecType(name)); } } inline std::string gridInfo(GridBase::ConstPtr grid, int verbosity) { std::ostringstream ostr; grid->print(ostr, std::max<int>(1, verbosity)); return ostr.str(); } //////////////////////////////////////// inline void setGridTransform(GridBase::Ptr grid, py::object xformObj) { if (grid) { if (math::Transform::Ptr xform = pyutil::extractArg<math::Transform::Ptr>( xformObj, "setTransform", /*className=*/nullptr, /*argIdx=*/1, "Transform")) { grid->setTransform(xform); } else { PyErr_SetString(PyExc_ValueError, "null transform"); py::throw_error_already_set(); } } } //////////////////////////////////////// // Helper class to construct a pyAccessor::AccessorWrap for a given grid, // permitting partial specialization for const vs. non-const grids template<typename GridType> struct AccessorHelper { using Wrapper = typename pyAccessor::AccessorWrap<GridType>; static Wrapper wrap(typename GridType::Ptr grid) { if (!grid) { PyErr_SetString(PyExc_ValueError, "null grid"); py::throw_error_already_set(); } return Wrapper(grid); } }; // Specialization for const grids template<typename GridType> struct AccessorHelper<const GridType> { using Wrapper = typename pyAccessor::AccessorWrap<const GridType>; static Wrapper wrap(typename GridType::ConstPtr grid) { if (!grid) { PyErr_SetString(PyExc_ValueError, "null grid"); py::throw_error_already_set(); } return Wrapper(grid); } }; /// Return a non-const accessor (wrapped in a pyAccessor::AccessorWrap) for the given grid. template<typename GridType> inline typename AccessorHelper<GridType>::Wrapper getAccessor(typename GridType::Ptr grid) { return AccessorHelper<GridType>::wrap(grid); } /// @brief Return a const accessor (wrapped in a pyAccessor::AccessorWrap) for the given grid. /// @internal Note that the grid pointer is non-const, even though the grid is /// treated as const. This is because we don't expose a const grid type in Python. template<typename GridType> inline typename AccessorHelper<const GridType>::Wrapper getConstAccessor(typename GridType::Ptr grid) { return AccessorHelper<const GridType>::wrap(grid); } //////////////////////////////////////// template<typename GridType> inline py::tuple evalLeafBoundingBox(const GridType& grid) { CoordBBox bbox; grid.tree().evalLeafBoundingBox(bbox); return py::make_tuple(bbox.min(), bbox.max()); } template<typename GridType> inline Coord evalLeafDim(const GridType& grid) { Coord dim; grid.tree().evalLeafDim(dim); return dim; } template<typename GridType> inline py::tuple evalActiveVoxelBoundingBox(const GridType& grid) { CoordBBox bbox = grid.evalActiveVoxelBoundingBox(); return py::make_tuple(bbox.min(), bbox.max()); } template<typename GridType> inline py::tuple getNodeLog2Dims(const GridType& grid) { std::vector<Index> dims; grid.tree().getNodeLog2Dims(dims); py::list lst; for (size_t i = 0, N = dims.size(); i < N; ++i) { lst.append(dims[i]); } return py::tuple(lst); } template<typename GridType> inline Index treeDepth(const GridType& grid) { return grid.tree().treeDepth(); } template<typename GridType> inline Index32 leafCount(const GridType& grid) { return grid.tree().leafCount(); } template<typename GridType> inline Index32 nonLeafCount(const GridType& grid) { return grid.tree().nonLeafCount(); } template<typename GridType> inline Index64 activeLeafVoxelCount(const GridType& grid) { return grid.tree().activeLeafVoxelCount(); } template<typename GridType> inline py::tuple evalMinMax(const GridType& grid) { typename GridType::ValueType vmin, vmax; grid.tree().evalMinMax(vmin, vmax); return py::make_tuple(vmin, vmax); } template<typename GridType> inline py::tuple getIndexRange(const GridType& grid) { CoordBBox bbox; grid.tree().getIndexRange(bbox); return py::make_tuple(bbox.min(), bbox.max()); } //template<typename GridType> //inline void //expandIndexRange(GridType& grid, py::object coordObj) //{ // Coord xyz = extractValueArg<GridType, Coord>( // coordObj, "expand", 0, "tuple(int, int, int)"); // grid.tree().expand(xyz); //} //////////////////////////////////////// inline py::dict getAllMetadata(GridBase::ConstPtr grid) { if (grid) return py::dict(static_cast<const MetaMap&>(*grid)); return py::dict(); } inline void replaceAllMetadata(GridBase::Ptr grid, const MetaMap& metadata) { if (grid) { grid->clearMetadata(); for (MetaMap::ConstMetaIterator it = metadata.beginMeta(); it != metadata.endMeta(); ++it) { if (it->second) grid->insertMeta(it->first, *it->second); } } } inline void updateMetadata(GridBase::Ptr grid, const MetaMap& metadata) { if (grid) { for (MetaMap::ConstMetaIterator it = metadata.beginMeta(); it != metadata.endMeta(); ++it) { if (it->second) { grid->removeMeta(it->first); grid->insertMeta(it->first, *it->second); } } } } inline py::dict getStatsMetadata(GridBase::ConstPtr grid) { MetaMap::ConstPtr metadata; if (grid) metadata = grid->getStatsMetadata(); if (metadata) return py::dict(*metadata); return py::dict(); } inline py::object getMetadataKeys(GridBase::ConstPtr grid) { if (grid) { #if PY_MAJOR_VERSION >= 3 // Return an iterator over the "keys" view of a dict. return py::import("builtins").attr("iter")( py::dict(static_cast<const MetaMap&>(*grid)).keys()); #else return py::dict(static_cast<const MetaMap&>(*grid)).iterkeys(); #endif } return py::object(); } inline py::object getMetadata(GridBase::ConstPtr grid, py::object nameObj) { if (!grid) return py::object(); const std::string name = pyutil::extractArg<std::string>( nameObj, "__getitem__", nullptr, /*argIdx=*/1, "str"); Metadata::ConstPtr metadata = (*grid)[name]; if (!metadata) { PyErr_SetString(PyExc_KeyError, name.c_str()); py::throw_error_already_set(); } // Use the MetaMap-to-dict converter (see pyOpenVDBModule.cc) to convert // the Metadata value to a Python object of the appropriate type. /// @todo Would be more efficient to convert the Metadata object /// directly to a Python object. MetaMap metamap; metamap.insertMeta(name, *metadata); return py::dict(metamap)[name]; } inline void setMetadata(GridBase::Ptr grid, py::object nameObj, py::object valueObj) { if (!grid) return; const std::string name = pyutil::extractArg<std::string>( nameObj, "__setitem__", nullptr, /*argIdx=*/1, "str"); // Insert the Python object into a Python dict, then use the dict-to-MetaMap // converter (see pyOpenVDBModule.cc) to convert the dict to a MetaMap // containing a Metadata object of the appropriate type. /// @todo Would be more efficient to convert the Python object /// directly to a Metadata object. py::dict dictObj; dictObj[name] = valueObj; MetaMap metamap = py::extract<MetaMap>(dictObj); if (Metadata::Ptr metadata = metamap[name]) { grid->removeMeta(name); grid->insertMeta(name, *metadata); } } inline void removeMetadata(GridBase::Ptr grid, const std::string& name) { if (grid) { Metadata::Ptr metadata = (*grid)[name]; if (!metadata) { PyErr_SetString(PyExc_KeyError, name.c_str()); py::throw_error_already_set(); } grid->removeMeta(name); } } inline bool hasMetadata(GridBase::ConstPtr grid, const std::string& name) { if (grid) return ((*grid)[name].get() != nullptr); return false; } //////////////////////////////////////// template<typename GridType> inline void prune(GridType& grid, py::object tolerance) { tools::prune(grid.tree(), extractValueArg<GridType>(tolerance, "prune")); } template<typename GridType> inline void pruneInactive(GridType& grid, py::object valObj) { if (valObj.is_none()) { tools::pruneInactive(grid.tree()); } else { tools::pruneInactiveWithValue( grid.tree(), extractValueArg<GridType>(valObj, "pruneInactive")); } } template<typename GridType> inline void fill(GridType& grid, py::object minObj, py::object maxObj, py::object valObj, bool active) { const Coord bmin = extractValueArg<GridType, Coord>(minObj, "fill", 1, "tuple(int, int, int)"), bmax = extractValueArg<GridType, Coord>(maxObj, "fill", 2, "tuple(int, int, int)"); grid.fill(CoordBBox(bmin, bmax), extractValueArg<GridType>(valObj, "fill", 3), active); } template<typename GridType> inline void signedFloodFill(GridType& grid) { tools::signedFloodFill(grid.tree()); } //////////////////////////////////////// #ifndef PY_OPENVDB_USE_NUMPY template<typename GridType> inline void copyFromArray(GridType&, const py::object&, py::object, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); } template<typename GridType> inline void copyToArray(GridType&, const py::object&, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); } #else // if defined(PY_OPENVDB_USE_NUMPY) using ArrayDimVec = std::vector<size_t>; #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // ID numbers for supported value types enum class DtId { NONE, FLOAT, DOUBLE, BOOL, INT16, INT32, INT64, UINT32, UINT64/*, HALF*/ }; using NumPyArrayType = py::numpy::ndarray; #else // if !defined PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // NumPy type numbers for supported value types enum class DtId { NONE = NPY_NOTYPE, FLOAT = NPY_FLOAT, DOUBLE = NPY_DOUBLE, BOOL = NPY_BOOL, INT16 = NPY_INT16, INT32 = NPY_INT32, INT64 = NPY_INT64, UINT32 = NPY_UINT32, UINT64 = NPY_UINT64, //HALF = NPY_HALF }; using NumPyArrayType = py::numeric::array; #endif // PY_OPENVDB_USE_BOOST_PYTHON_NUMPY template<DtId TypeId> struct NumPyToCpp {}; template<> struct NumPyToCpp<DtId::FLOAT> { using type = float; }; template<> struct NumPyToCpp<DtId::DOUBLE> { using type = double; }; template<> struct NumPyToCpp<DtId::BOOL> { using type = bool; }; template<> struct NumPyToCpp<DtId::INT16> { using type = Int16; }; template<> struct NumPyToCpp<DtId::INT32> { using type = Int32; }; template<> struct NumPyToCpp<DtId::INT64> { using type = Int64; }; template<> struct NumPyToCpp<DtId::UINT32> { using type = Index32; }; template<> struct NumPyToCpp<DtId::UINT64> { using type = Index64; }; //template<> struct NumPyToCpp<DtId::HALF> { using type = half; }; #if 0 template<typename T> struct CppToNumPy { static const DtId typeId = DtId::NONE; }; template<> struct CppToNumPy<float> { static const DtId typeId = DtId::FLOAT; }; template<> struct CppToNumPy<double> { static const DtId typeId = DtId::DOUBLE; }; template<> struct CppToNumPy<bool> { static const DtId typeId = DtId::BOOL; }; template<> struct CppToNumPy<Int16> { static const DtId typeId = DtId::INT16; }; template<> struct CppToNumPy<Int32> { static const DtId typeId = DtId::INT32; }; template<> struct CppToNumPy<Int64> { static const DtId typeId = DtId::INT64; }; template<> struct CppToNumPy<Index32> { static const DtId typeId = DtId::UINT32; }; template<> struct CppToNumPy<Index64> { static const DtId typeId = DtId::UINT64; }; //template<> struct CppToNumPy<half> { static const DtId typeId = DtId::HALF; }; #endif #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Return the ID number of the given NumPy array's data type. /// @todo Revisit this if and when py::numpy::dtype ever provides a type number accessor. inline DtId arrayTypeId(const py::numpy::ndarray& arrayObj) { namespace np = py::numpy; const auto dtype = arrayObj.get_dtype(); #if 0 // More efficient than np::equivalent(), but requires NumPy headers. if (const auto* descr = reinterpret_cast<const PyArray_Descr*>(dtype.ptr())) { const auto typeId = static_cast<DtId>(descr->type_num); switch (typeId) { case DtId::NONE: break; case DtId::FLOAT: case DtId::DOUBLE: case DtId::BOOL: case DtId::INT16: case DtId::INT32: case DtId::INT64: case DtId::UINT32: case DtId::UINT64: return typeId; } throw openvdb::TypeError{}; } #else if (np::equivalent(dtype, np::dtype::get_builtin<float>())) return DtId::FLOAT; if (np::equivalent(dtype, np::dtype::get_builtin<double>())) return DtId::DOUBLE; if (np::equivalent(dtype, np::dtype::get_builtin<bool>())) return DtId::BOOL; if (np::equivalent(dtype, np::dtype::get_builtin<Int16>())) return DtId::INT16; if (np::equivalent(dtype, np::dtype::get_builtin<Int32>())) return DtId::INT32; if (np::equivalent(dtype, np::dtype::get_builtin<Int64>())) return DtId::INT64; if (np::equivalent(dtype, np::dtype::get_builtin<Index32>())) return DtId::UINT32; if (np::equivalent(dtype, np::dtype::get_builtin<Index64>())) return DtId::UINT64; //if (np::equivalent(dtype, np::dtype::get_builtin<half>())) return DtId::HALF; #endif throw openvdb::TypeError{}; } // Return a string description of the given NumPy array's data type. inline std::string arrayTypeName(const py::numpy::ndarray& arrayObj) { return pyutil::str(arrayObj.get_dtype()); } // Return the dimensions of the given NumPy array. inline ArrayDimVec arrayDimensions(const py::numpy::ndarray& arrayObj) { ArrayDimVec dims; for (int i = 0, N = arrayObj.get_nd(); i < N; ++i) { dims.push_back(static_cast<size_t>(arrayObj.shape(i))); } return dims; } #else // !defined PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Return the ID number of the given NumPy array's data type. inline DtId arrayTypeId(const py::numeric::array& arrayObj) { const PyArray_Descr* dtype = nullptr; if (PyArrayObject* arrayObjPtr = reinterpret_cast<PyArrayObject*>(arrayObj.ptr())) { dtype = PyArray_DESCR(arrayObjPtr); } if (dtype) return static_cast<DtId>(dtype->type_num); throw openvdb::TypeError{}; } // Return a string description of the given NumPy array's data type. inline std::string arrayTypeName(const py::numeric::array& arrayObj) { std::string name; if (PyObject_HasAttrString(arrayObj.ptr(), "dtype")) { name = pyutil::str(arrayObj.attr("dtype")); } else { name = "'_'"; PyArrayObject* arrayObjPtr = reinterpret_cast<PyArrayObject*>(arrayObj.ptr()); name[1] = PyArray_DESCR(arrayObjPtr)->kind; } return name; } // Return the dimensions of the given NumPy array. inline ArrayDimVec arrayDimensions(const py::numeric::array& arrayObj) { const py::object shape = arrayObj.attr("shape"); ArrayDimVec dims; for (long i = 0, N = py::len(shape); i < N; ++i) { dims.push_back(py::extract<size_t>(shape[i])); } return dims; } inline py::object copyNumPyArray(PyArrayObject* arrayObj, NPY_ORDER order = NPY_CORDER) { #ifdef __GNUC__ // Silence GCC "casting between pointer-to-function and pointer-to-object" warnings. __extension__ #endif auto obj = pyutil::pyBorrow(PyArray_NewCopy(arrayObj, order)); return obj; } #endif // PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Abstract base class for helper classes that copy data between // NumPy arrays of various types and grids of various types template<typename GridType> class CopyOpBase { public: using ValueT = typename GridType::ValueType; CopyOpBase(bool toGrid, GridType& grid, py::object arrObj, py::object coordObj, py::object tolObj) : mToGrid(toGrid) , mGrid(&grid) { const char* const opName[2] = { "copyToArray", "copyFromArray" }; // Extract the coordinates (i, j, k) of the voxel at which to start populating data. // Voxel (i, j, k) will correspond to array element (0, 0, 0). const Coord origin = extractValueArg<GridType, Coord>( coordObj, opName[toGrid], 1, "tuple(int, int, int)"); // Extract a reference to (not a copy of) the NumPy array, // or throw an exception if arrObj is not a NumPy array object. const auto arrayObj = pyutil::extractArg<NumPyArrayType>( arrObj, opName[toGrid], pyutil::GridTraits<GridType>::name(), /*argIdx=*/1, "numpy.ndarray"); #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY mArray = arrayObj.get_data(); #else mArray = PyArray_DATA(reinterpret_cast<PyArrayObject*>(arrayObj.ptr())); #endif mArrayTypeName = arrayTypeName(arrayObj); mArrayTypeId = arrayTypeId(arrayObj); mArrayDims = arrayDimensions(arrayObj); mTolerance = extractValueArg<GridType>(tolObj, opName[toGrid], 2); // Compute the bounding box of the region of the grid that is to be copied from or to. Coord bboxMax = origin; for (size_t n = 0, N = std::min<size_t>(mArrayDims.size(), 3); n < N; ++n) { bboxMax[n] += int(mArrayDims[n]) - 1; } mBBox.reset(origin, bboxMax); } virtual ~CopyOpBase() {} void operator()() const { try { if (mToGrid) { copyFromArray(); // copy data from the array to the grid } else { copyToArray(); // copy data from the grid to the array } } catch (openvdb::TypeError&) { PyErr_Format(PyExc_TypeError, "unsupported NumPy data type %s", mArrayTypeName.c_str()); boost::python::throw_error_already_set(); } } protected: virtual void validate() const = 0; virtual void copyFromArray() const = 0; virtual void copyToArray() const = 0; template<typename ArrayValueType> void fromArray() const { validate(); tools::Dense<ArrayValueType> valArray(mBBox, static_cast<ArrayValueType*>(mArray)); tools::copyFromDense(valArray, *mGrid, mTolerance); } template<typename ArrayValueType> void toArray() const { validate(); tools::Dense<ArrayValueType> valArray(mBBox, static_cast<ArrayValueType*>(mArray)); tools::copyToDense(*mGrid, valArray); } bool mToGrid; // if true, copy from the array to the grid, else vice-versa void* mArray; GridType* mGrid; DtId mArrayTypeId; ArrayDimVec mArrayDims; std::string mArrayTypeName; CoordBBox mBBox; ValueT mTolerance; }; // class CopyOpBase // Helper subclass that can be specialized for various grid and NumPy array types template<typename GridType, int VecSize> class CopyOp: public CopyOpBase<GridType> {}; // Specialization for scalar grids template<typename GridType> class CopyOp<GridType, /*VecSize=*/1>: public CopyOpBase<GridType> { public: CopyOp(bool toGrid, GridType& grid, py::object arrObj, py::object coordObj, py::object tolObj = py::object(zeroVal<typename GridType::ValueType>())): CopyOpBase<GridType>(toGrid, grid, arrObj, coordObj, tolObj) { } protected: void validate() const override { if (this->mArrayDims.size() != 3) { std::ostringstream os; os << "expected 3-dimensional array, found " << this->mArrayDims.size() << "-dimensional array"; PyErr_SetString(PyExc_ValueError, os.str().c_str()); boost::python::throw_error_already_set(); } } #ifdef __clang__ // Suppress "enum value not explicitly handled" warnings PRAGMA(clang diagnostic push) PRAGMA(clang diagnostic ignored "-Wswitch-enum") #endif void copyFromArray() const override { switch (this->mArrayTypeId) { case DtId::FLOAT: this->template fromArray<typename NumPyToCpp<DtId::FLOAT>::type>(); break; case DtId::DOUBLE:this->template fromArray<typename NumPyToCpp<DtId::DOUBLE>::type>();break; case DtId::BOOL: this->template fromArray<typename NumPyToCpp<DtId::BOOL>::type>(); break; case DtId::INT16: this->template fromArray<typename NumPyToCpp<DtId::INT16>::type>(); break; case DtId::INT32: this->template fromArray<typename NumPyToCpp<DtId::INT32>::type>(); break; case DtId::INT64: this->template fromArray<typename NumPyToCpp<DtId::INT64>::type>(); break; case DtId::UINT32:this->template fromArray<typename NumPyToCpp<DtId::UINT32>::type>();break; case DtId::UINT64:this->template fromArray<typename NumPyToCpp<DtId::UINT64>::type>();break; default: throw openvdb::TypeError(); break; } } void copyToArray() const override { switch (this->mArrayTypeId) { case DtId::FLOAT: this->template toArray<typename NumPyToCpp<DtId::FLOAT>::type>(); break; case DtId::DOUBLE: this->template toArray<typename NumPyToCpp<DtId::DOUBLE>::type>(); break; case DtId::BOOL: this->template toArray<typename NumPyToCpp<DtId::BOOL>::type>(); break; case DtId::INT16: this->template toArray<typename NumPyToCpp<DtId::INT16>::type>(); break; case DtId::INT32: this->template toArray<typename NumPyToCpp<DtId::INT32>::type>(); break; case DtId::INT64: this->template toArray<typename NumPyToCpp<DtId::INT64>::type>(); break; case DtId::UINT32: this->template toArray<typename NumPyToCpp<DtId::UINT32>::type>(); break; case DtId::UINT64: this->template toArray<typename NumPyToCpp<DtId::UINT64>::type>(); break; default: throw openvdb::TypeError(); break; } } #ifdef __clang__ PRAGMA(clang diagnostic pop) #endif }; // class CopyOp // Specialization for Vec3 grids template<typename GridType> class CopyOp<GridType, /*VecSize=*/3>: public CopyOpBase<GridType> { public: CopyOp(bool toGrid, GridType& grid, py::object arrObj, py::object coordObj, py::object tolObj = py::object(zeroVal<typename GridType::ValueType>())): CopyOpBase<GridType>(toGrid, grid, arrObj, coordObj, tolObj) { } protected: void validate() const override { if (this->mArrayDims.size() != 4) { std::ostringstream os; os << "expected 4-dimensional array, found " << this->mArrayDims.size() << "-dimensional array"; PyErr_SetString(PyExc_ValueError, os.str().c_str()); boost::python::throw_error_already_set(); } if (this->mArrayDims[3] != 3) { std::ostringstream os; os << "expected " << this->mArrayDims[0] << "x" << this->mArrayDims[1] << "x" << this->mArrayDims[2] << "x3 array, found " << this->mArrayDims[0] << "x" << this->mArrayDims[1] << "x" << this->mArrayDims[2] << "x" << this->mArrayDims[3] << " array"; PyErr_SetString(PyExc_ValueError, os.str().c_str()); boost::python::throw_error_already_set(); } } #ifdef __clang__ // Suppress "enum value not explicitly handled" warnings PRAGMA(clang diagnostic push) PRAGMA(clang diagnostic ignored "-Wswitch-enum") #endif void copyFromArray() const override { switch (this->mArrayTypeId) { case DtId::FLOAT: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::FLOAT>::type>>(); break; case DtId::DOUBLE: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::DOUBLE>::type>>(); break; case DtId::BOOL: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::BOOL>::type>>(); break; case DtId::INT16: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::INT16>::type>>(); break; case DtId::INT32: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::INT32>::type>>(); break; case DtId::INT64: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::INT64>::type>>(); break; case DtId::UINT32: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::UINT32>::type>>(); break; case DtId::UINT64: this->template fromArray<math::Vec3<typename NumPyToCpp<DtId::UINT64>::type>>(); break; default: throw openvdb::TypeError(); break; } } void copyToArray() const override { switch (this->mArrayTypeId) { case DtId::FLOAT: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::FLOAT>::type>>(); break; case DtId::DOUBLE: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::DOUBLE>::type>>(); break; case DtId::BOOL: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::BOOL>::type>>(); break; case DtId::INT16: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::INT16>::type>>(); break; case DtId::INT32: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::INT32>::type>>(); break; case DtId::INT64: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::INT64>::type>>(); break; case DtId::UINT32: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::UINT32>::type>>(); break; case DtId::UINT64: this->template toArray<math::Vec3<typename NumPyToCpp<DtId::UINT64>::type>>(); break; default: throw openvdb::TypeError(); break; } } #ifdef __clang__ PRAGMA(clang diagnostic pop) #endif }; // class CopyOp template<typename GridType> inline void copyFromArray(GridType& grid, py::object arrayObj, py::object coordObj, py::object toleranceObj) { using ValueT = typename GridType::ValueType; CopyOp<GridType, VecTraits<ValueT>::Size> op(/*toGrid=*/true, grid, arrayObj, coordObj, toleranceObj); op(); } template<typename GridType> inline void copyToArray(GridType& grid, py::object arrayObj, py::object coordObj) { using ValueT = typename GridType::ValueType; CopyOp<GridType, VecTraits<ValueT>::Size> op(/*toGrid=*/false, grid, arrayObj, coordObj); op(); } template<> inline void copyFromArray(points::PointDataGrid& /*grid*/, py::object /*arrayObj*/, py::object /*coordObj*/, py::object /*toleranceObj*/) { PyErr_SetString(PyExc_NotImplementedError, "copying NumPy arrays for PointDataGrids is not supported"); boost::python::throw_error_already_set(); } template<> inline void copyToArray(points::PointDataGrid& /*grid*/, py::object /*arrayObj*/, py::object /*coordObj*/) { PyErr_SetString(PyExc_NotImplementedError, "copying NumPy arrays for PointDataGrids is not supported"); boost::python::throw_error_already_set(); } #endif // defined(PY_OPENVDB_USE_NUMPY) //////////////////////////////////////// #ifndef PY_OPENVDB_USE_NUMPY template<typename GridType> inline typename GridType::Ptr meshToLevelSet(py::object, py::object, py::object, py::object, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); return typename GridType::Ptr(); } template<typename GridType> inline py::object volumeToQuadMesh(const GridType&, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); return py::object(); } template<typename GridType> inline py::object volumeToMesh(const GridType&, py::object, py::object) { PyErr_SetString(PyExc_NotImplementedError, "this module was built without NumPy support"); boost::python::throw_error_already_set(); return py::object(); } #else // if defined(PY_OPENVDB_USE_NUMPY) // Helper class for meshToLevelSet() template<typename SrcT, typename DstT> struct CopyVecOp { void operator()(const void* srcPtr, DstT* dst, size_t count) { const SrcT* src = static_cast<const SrcT*>(srcPtr); for (size_t i = count; i > 0; --i, ++src, ++dst) { *dst = static_cast<DstT>(*src); } } }; // Partial specialization for source and destination arrays of the same type template<typename T> struct CopyVecOp<T, T> { void operator()(const void* srcPtr, T* dst, size_t count) { const T* src = static_cast<const T*>(srcPtr); ::memcpy(dst, src, count * sizeof(T)); } }; // Helper function for use with meshToLevelSet() to copy vectors of various types // and sizes from NumPy arrays to STL vectors template<typename VecT> inline void copyVecArray(NumPyArrayType& arrayObj, std::vector<VecT>& vec) { using ValueT = typename VecT::ValueType; // Get the input array dimensions. const auto dims = arrayDimensions(arrayObj); const size_t M = dims.empty() ? 0 : dims[0]; const size_t N = VecT().numElements(); if (M == 0 || N == 0) return; // Preallocate the output vector. vec.resize(M); // Copy values from the input array to the output vector (with type conversion, if necessary). #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY const void* src = arrayObj.get_data(); #else PyArrayObject* arrayObjPtr = reinterpret_cast<PyArrayObject*>(arrayObj.ptr()); const void* src = PyArray_DATA(arrayObjPtr); #endif ValueT* dst = &vec[0][0]; switch (arrayTypeId(arrayObj)) { case DtId::FLOAT: CopyVecOp<NumPyToCpp<DtId::FLOAT>::type, ValueT>()(src, dst, M*N); break; case DtId::DOUBLE: CopyVecOp<NumPyToCpp<DtId::DOUBLE>::type, ValueT>()(src, dst, M*N); break; case DtId::INT16: CopyVecOp<NumPyToCpp<DtId::INT16>::type, ValueT>()(src, dst, M*N); break; case DtId::INT32: CopyVecOp<NumPyToCpp<DtId::INT32>::type, ValueT>()(src, dst, M*N); break; case DtId::INT64: CopyVecOp<NumPyToCpp<DtId::INT64>::type, ValueT>()(src, dst, M*N); break; case DtId::UINT32: CopyVecOp<NumPyToCpp<DtId::UINT32>::type, ValueT>()(src, dst, M*N); break; case DtId::UINT64: CopyVecOp<NumPyToCpp<DtId::UINT64>::type, ValueT>()(src, dst, M*N); break; default: break; } } /// @brief Given NumPy arrays of points, triangle indices and quad indices, /// call tools::meshToLevelSet() to generate a level set grid. template<typename GridType> inline typename GridType::Ptr meshToLevelSet(py::object pointsObj, py::object trianglesObj, py::object quadsObj, py::object xformObj, py::object halfWidthObj) { struct Local { // Return the name of the Python grid method (for use in error messages). static const char* methodName() { return "createLevelSetFromPolygons"; } // Raise a Python exception if the given NumPy array does not have dimensions M x N // or does not have an integer or floating-point data type. static void validate2DNumPyArray(NumPyArrayType arrayObj, const size_t N, const char* desiredType) { const auto dims = arrayDimensions(arrayObj); bool wrongArrayType = false; // Check array dimensions. if (dims.size() != 2 || dims[1] != N) { wrongArrayType = true; } else { // Check array data type. switch (arrayTypeId(arrayObj)) { case DtId::FLOAT: case DtId::DOUBLE: //case DtId::HALF: case DtId::INT16: case DtId::INT32: case DtId::INT64: case DtId::UINT32: case DtId::UINT64: break; default: wrongArrayType = true; break; } } if (wrongArrayType) { // Generate an error message and raise a Python TypeError. std::ostringstream os; os << "expected N x 3 numpy.ndarray of " << desiredType << ", found "; switch (dims.size()) { case 0: os << "zero-dimensional"; break; case 1: os << "one-dimensional"; break; default: os << dims[0]; for (size_t i = 1; i < dims.size(); ++i) { os << " x " << dims[i]; } break; } os << " " << arrayTypeName(arrayObj) << " array as argument 1 to " << pyutil::GridTraits<GridType>::name() << "." << methodName() << "()"; PyErr_SetString(PyExc_TypeError, os.str().c_str()); py::throw_error_already_set(); } } }; // Extract the narrow band half width from the arguments to this method. const float halfWidth = extractValueArg<GridType, float>( halfWidthObj, Local::methodName(), /*argIdx=*/5, "float"); // Extract the transform from the arguments to this method. math::Transform::Ptr xform = math::Transform::createLinearTransform(); if (!xformObj.is_none()) { xform = extractValueArg<GridType, math::Transform::Ptr>( xformObj, Local::methodName(), /*argIdx=*/4, "Transform"); } // Extract the list of mesh vertices from the arguments to this method. std::vector<Vec3s> points; if (!pointsObj.is_none()) { // Extract a reference to (not a copy of) a NumPy array argument, // or throw an exception if the argument is not a NumPy array object. auto arrayObj = extractValueArg<GridType, NumPyArrayType>( pointsObj, Local::methodName(), /*argIdx=*/1, "numpy.ndarray"); // Throw an exception if the array has the wrong type or dimensions. Local::validate2DNumPyArray(arrayObj, /*N=*/3, /*desiredType=*/"float"); // Copy values from the array to the vector. copyVecArray(arrayObj, points); } // Extract the list of triangle indices from the arguments to this method. std::vector<Vec3I> triangles; if (!trianglesObj.is_none()) { auto arrayObj = extractValueArg<GridType, NumPyArrayType>( trianglesObj, Local::methodName(), /*argIdx=*/2, "numpy.ndarray"); Local::validate2DNumPyArray(arrayObj, /*N=*/3, /*desiredType=*/"int"); copyVecArray(arrayObj, triangles); } // Extract the list of quad indices from the arguments to this method. std::vector<Vec4I> quads; if (!quadsObj.is_none()) { auto arrayObj = extractValueArg<GridType, NumPyArrayType>( quadsObj, Local::methodName(), /*argIdx=*/3, "numpy.ndarray"); Local::validate2DNumPyArray(arrayObj, /*N=*/4, /*desiredType=*/"int"); copyVecArray(arrayObj, quads); } // Generate and return a level set grid. return tools::meshToLevelSet<GridType>(*xform, points, triangles, quads, halfWidth); } template<typename GridType> inline py::object volumeToQuadMesh(const GridType& grid, py::object isovalueObj) { const double isovalue = pyutil::extractArg<double>( isovalueObj, "convertToQuads", /*className=*/nullptr, /*argIdx=*/2, "float"); // Mesh the input grid and populate lists of mesh vertices and face vertex indices. std::vector<Vec3s> points; std::vector<Vec4I> quads; tools::volumeToMesh(grid, points, quads, isovalue); #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY const py::object own; auto dtype = py::numpy::dtype::get_builtin<Vec3s::value_type>(); auto shape = py::make_tuple(points.size(), 3); auto stride = py::make_tuple(3 * sizeof(Vec3s::value_type), sizeof(Vec3s::value_type)); // Create a deep copy of the array (because the point vector will be destroyed // when this function returns). auto pointArrayObj = py::numpy::from_data(points.data(), dtype, shape, stride, own).copy(); dtype = py::numpy::dtype::get_builtin<Vec4I::value_type>(); shape = py::make_tuple(quads.size(), 4); stride = py::make_tuple(4 * sizeof(Vec4I::value_type), sizeof(Vec4I::value_type)); auto quadArrayObj = py::numpy::from_data( quads.data(), dtype, shape, stride, own).copy(); // deep copy #else // !defined PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Copy vertices into an N x 3 NumPy array. py::object pointArrayObj = py::numeric::array(py::list(), "float32"); if (!points.empty()) { npy_intp dims[2] = { npy_intp(points.size()), 3 }; // Construct a NumPy array that wraps the point vector. if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*nd=*/2, dims, NPY_FLOAT, &points[0]))) { // Create a deep copy of the array (because the point vector will be // destroyed when this function returns). pointArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } // Copy face indices into an N x 4 NumPy array. py::object quadArrayObj = py::numeric::array(py::list(), "uint32"); if (!quads.empty()) { npy_intp dims[2] = { npy_intp(quads.size()), 4 }; if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*dims=*/2, dims, NPY_UINT32, &quads[0]))) { quadArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } #endif // PY_OPENVDB_USE_BOOST_PYTHON_NUMPY return py::make_tuple(pointArrayObj, quadArrayObj); } template<typename GridType> inline py::object volumeToMesh(const GridType& grid, py::object isovalueObj, py::object adaptivityObj) { const double isovalue = pyutil::extractArg<double>( isovalueObj, "convertToPolygons", /*className=*/nullptr, /*argIdx=*/2, "float"); const double adaptivity = pyutil::extractArg<double>( adaptivityObj, "convertToPolygons", /*className=*/nullptr, /*argIdx=*/3, "float"); // Mesh the input grid and populate lists of mesh vertices and face vertex indices. std::vector<Vec3s> points; std::vector<Vec3I> triangles; std::vector<Vec4I> quads; tools::volumeToMesh(grid, points, triangles, quads, isovalue, adaptivity); #ifdef PY_OPENVDB_USE_BOOST_PYTHON_NUMPY const py::object own; auto dtype = py::numpy::dtype::get_builtin<Vec3s::value_type>(); auto shape = py::make_tuple(points.size(), 3); auto stride = py::make_tuple(3 * sizeof(Vec3s::value_type), sizeof(Vec3s::value_type)); // Create a deep copy of the array (because the point vector will be destroyed // when this function returns). auto pointArrayObj = py::numpy::from_data(points.data(), dtype, shape, stride, own).copy(); dtype = py::numpy::dtype::get_builtin<Vec3I::value_type>(); shape = py::make_tuple(triangles.size(), 3); stride = py::make_tuple(3 * sizeof(Vec3I::value_type), sizeof(Vec3I::value_type)); auto triangleArrayObj = py::numpy::from_data( triangles.data(), dtype, shape, stride, own).copy(); // deep copy dtype = py::numpy::dtype::get_builtin<Vec4I::value_type>(); shape = py::make_tuple(quads.size(), 4); stride = py::make_tuple(4 * sizeof(Vec4I::value_type), sizeof(Vec4I::value_type)); auto quadArrayObj = py::numpy::from_data( quads.data(), dtype, shape, stride, own).copy(); // deep copy #else // !defined PY_OPENVDB_USE_BOOST_PYTHON_NUMPY // Copy vertices into an N x 3 NumPy array. py::object pointArrayObj = py::numeric::array(py::list(), "float32"); if (!points.empty()) { npy_intp dims[2] = { npy_intp(points.size()), 3 }; // Construct a NumPy array that wraps the point vector. if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*dims=*/2, dims, NPY_FLOAT, &points[0]))) { // Create a deep copy of the array (because the point vector will be // destroyed when this function returns). pointArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } // Copy triangular face indices into an N x 3 NumPy array. py::object triangleArrayObj = py::numeric::array(py::list(), "uint32"); if (!triangles.empty()) { npy_intp dims[2] = { npy_intp(triangles.size()), 3 }; if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*dims=*/2, dims, NPY_UINT32, &triangles[0]))) { triangleArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } // Copy quadrilateral face indices into an N x 4 NumPy array. py::object quadArrayObj = py::numeric::array(py::list(), "uint32"); if (!quads.empty()) { npy_intp dims[2] = { npy_intp(quads.size()), 4 }; if (PyArrayObject* arrayObj = reinterpret_cast<PyArrayObject*>( PyArray_SimpleNewFromData(/*dims=*/2, dims, NPY_UINT32, &quads[0]))) { quadArrayObj = copyNumPyArray(arrayObj, NPY_CORDER); } } #endif // PY_OPENVDB_USE_BOOST_PYTHON_NUMPY return py::make_tuple(pointArrayObj, triangleArrayObj, quadArrayObj); } #endif // defined(PY_OPENVDB_USE_NUMPY) //////////////////////////////////////// template<typename GridType, typename IterType> inline void applyMap(const char* methodName, GridType& grid, py::object funcObj) { using ValueT = typename GridType::ValueType; for (IterType it = grid.tree().template begin<IterType>(); it; ++it) { // Evaluate the functor. py::object result = funcObj(*it); // Verify that the result is of type GridType::ValueType. py::extract<ValueT> val(result); if (!val.check()) { PyErr_Format(PyExc_TypeError, "expected callable argument to %s.%s() to return %s, found %s", pyutil::GridTraits<GridType>::name(), methodName, openvdb::typeNameAsString<ValueT>(), pyutil::className(result).c_str()); py::throw_error_already_set(); } it.setValue(val()); } } template<typename GridType> inline void mapOn(GridType& grid, py::object funcObj) { applyMap<GridType, typename GridType::ValueOnIter>("mapOn", grid, funcObj); } template<typename GridType> inline void mapOff(GridType& grid, py::object funcObj) { applyMap<GridType, typename GridType::ValueOffIter>("mapOff", grid, funcObj); } template<typename GridType> inline void mapAll(GridType& grid, py::object funcObj) { applyMap<GridType, typename GridType::ValueAllIter>("mapAll", grid, funcObj); } //////////////////////////////////////// template<typename GridType> struct TreeCombineOp { using TreeT = typename GridType::TreeType; using ValueT = typename GridType::ValueType; TreeCombineOp(py::object _op): op(_op) {} void operator()(const ValueT& a, const ValueT& b, ValueT& result) { py::object resultObj = op(a, b); py::extract<ValueT> val(resultObj); if (!val.check()) { PyErr_Format(PyExc_TypeError, "expected callable argument to %s.combine() to return %s, found %s", pyutil::GridTraits<GridType>::name(), openvdb::typeNameAsString<ValueT>(), pyutil::className(resultObj).c_str()); py::throw_error_already_set(); } result = val(); } py::object op; }; template<typename GridType> inline void combine(GridType& grid, py::object otherGridObj, py::object funcObj) { using GridPtr = typename GridType::Ptr; GridPtr otherGrid = extractValueArg<GridType, GridPtr>(otherGridObj, "combine", 1, pyutil::GridTraits<GridType>::name()); TreeCombineOp<GridType> op(funcObj); grid.tree().combine(otherGrid->tree(), op, /*prune=*/true); } //////////////////////////////////////// template<typename GridType> inline typename GridType::Ptr createLevelSetSphere(float radius, const openvdb::Vec3f& center, float voxelSize, float halfWidth) { return tools::createLevelSetSphere<GridType>(radius, center, voxelSize, halfWidth); } //////////////////////////////////////// template<typename GridT, typename IterT> class IterWrap; // forward declaration // // Type traits for various iterators // template<typename GridT, typename IterT> struct IterTraits { // IterT the type of the iterator // name() function returning the base name of the iterator type (e.g., "ValueOffIter") // descr() function returning a string describing the iterator // begin() function returning a begin iterator for a given grid }; template<typename GridT> struct IterTraits<GridT, typename GridT::ValueOnCIter> { using IterT = typename GridT::ValueOnCIter; static std::string name() { return "ValueOnCIter"; } static std::string descr() { return std::string("Read-only iterator over the active values (tile and voxel)\nof a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<const GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<const GridT, IterT>(g, g->cbeginValueOn()); } }; // IterTraits<ValueOnCIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueOffCIter> { using IterT = typename GridT::ValueOffCIter; static std::string name() { return "ValueOffCIter"; } static std::string descr() { return std::string("Read-only iterator over the inactive values (tile and voxel)\nof a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<const GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<const GridT, IterT>(g, g->cbeginValueOff()); } }; // IterTraits<ValueOffCIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueAllCIter> { using IterT = typename GridT::ValueAllCIter; static std::string name() { return "ValueAllCIter"; } static std::string descr() { return std::string("Read-only iterator over all tile and voxel values of a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<const GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<const GridT, IterT>(g, g->cbeginValueAll()); } }; // IterTraits<ValueAllCIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueOnIter> { using IterT = typename GridT::ValueOnIter; static std::string name() { return "ValueOnIter"; } static std::string descr() { return std::string("Read/write iterator over the active values (tile and voxel)\nof a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<GridT, IterT>(g, g->beginValueOn()); } }; // IterTraits<ValueOnIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueOffIter> { using IterT = typename GridT::ValueOffIter; static std::string name() { return "ValueOffIter"; } static std::string descr() { return std::string("Read/write iterator over the inactive values (tile and voxel)\nof a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<GridT, IterT>(g, g->beginValueOff()); } }; // IterTraits<ValueOffIter> template<typename GridT> struct IterTraits<GridT, typename GridT::ValueAllIter> { using IterT = typename GridT::ValueAllIter; static std::string name() { return "ValueAllIter"; } static std::string descr() { return std::string("Read/write iterator over all tile and voxel values of a ") + pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(); } static IterWrap<GridT, IterT> begin(typename GridT::Ptr g) { return IterWrap<GridT, IterT>(g, g->beginValueAll()); } }; // IterTraits<ValueAllIter> //////////////////////////////////////// // Helper class to modify a grid through a non-const iterator template<typename GridT, typename IterT> struct IterItemSetter { using ValueT = typename GridT::ValueType; static void setValue(const IterT& iter, const ValueT& val) { iter.setValue(val); } static void setActive(const IterT& iter, bool on) { iter.setActiveState(on); } }; // Partial specialization for const iterators template<typename GridT, typename IterT> struct IterItemSetter<const GridT, IterT> { using ValueT = typename GridT::ValueType; static void setValue(const IterT&, const ValueT&) { PyErr_SetString(PyExc_AttributeError, "can't set attribute 'value'"); py::throw_error_already_set(); } static void setActive(const IterT&, bool /*on*/) { PyErr_SetString(PyExc_AttributeError, "can't set attribute 'active'"); py::throw_error_already_set(); } }; /// @brief Value returned by the next() method of a grid's value iterator /// @details This class allows both dictionary-style (e.g., items["depth"]) and /// attribute access (e.g., items.depth) to the items returned by an iterator. /// @todo Create a reusable base class for "named dicts" like this? template<typename _GridT, typename _IterT> class IterValueProxy { public: using GridT = _GridT; using IterT = _IterT; using ValueT = typename GridT::ValueType; using SetterT = IterItemSetter<GridT, IterT>; IterValueProxy(typename GridT::ConstPtr grid, const IterT& iter): mGrid(grid), mIter(iter) {} IterValueProxy copy() const { return *this; } typename GridT::ConstPtr parent() const { return mGrid; } ValueT getValue() const { return *mIter; } bool getActive() const { return mIter.isValueOn(); } Index getDepth() const { return mIter.getDepth(); } Coord getBBoxMin() const { return mIter.getBoundingBox().min(); } Coord getBBoxMax() const { return mIter.getBoundingBox().max(); } Index64 getVoxelCount() const { return mIter.getVoxelCount(); } void setValue(const ValueT& val) { SetterT::setValue(mIter, val); } void setActive(bool on) { SetterT::setActive(mIter, on); } /// Return this dictionary's keys as a list of C strings. static const char* const * keys() { static const char* const sKeys[] = { "value", "active", "depth", "min", "max", "count", nullptr }; return sKeys; } /// Return @c true if the given string is a valid key. static bool hasKey(const std::string& key) { for (int i = 0; keys()[i] != nullptr; ++i) { if (key == keys()[i]) return true; } return false; } /// Return this dictionary's keys as a Python list of Python strings. static py::list getKeys() { py::list keyList; for (int i = 0; keys()[i] != nullptr; ++i) keyList.append(keys()[i]); return keyList; } /// @brief Return the value for the given key. /// @throw KeyError if the key is invalid py::object getItem(py::object keyObj) const { py::extract<std::string> x(keyObj); if (x.check()) { const std::string key = x(); if (key == "value") return py::object(this->getValue()); else if (key == "active") return py::object(this->getActive()); else if (key == "depth") return py::object(this->getDepth()); else if (key == "min") return py::object(this->getBBoxMin()); else if (key == "max") return py::object(this->getBBoxMax()); else if (key == "count") return py::object(this->getVoxelCount()); } PyErr_SetObject(PyExc_KeyError, ("%s" % keyObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); return py::object(); } /// @brief Set the value for the given key. /// @throw KeyError if the key is invalid /// @throw AttributeError if the key refers to a read-only item void setItem(py::object keyObj, py::object valObj) { py::extract<std::string> x(keyObj); if (x.check()) { const std::string key = x(); if (key == "value") { this->setValue(py::extract<ValueT>(valObj)); return; } else if (key == "active") { this->setActive(py::extract<bool>(valObj)); return; } else if (this->hasKey(key)) { PyErr_SetObject(PyExc_AttributeError, ("can't set attribute '%s'" % keyObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); } } PyErr_SetObject(PyExc_KeyError, ("'%s'" % keyObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); } bool operator==(const IterValueProxy& other) const { return (other.getActive() == this->getActive() && other.getDepth() == this->getDepth() && math::isExactlyEqual(other.getValue(), this->getValue()) && other.getBBoxMin() == this->getBBoxMin() && other.getBBoxMax() == this->getBBoxMax() && other.getVoxelCount() == this->getVoxelCount()); } bool operator!=(const IterValueProxy& other) const { return !(*this == other); } /// Print this dictionary to a stream. std::ostream& put(std::ostream& os) const { // valuesAsStrings = ["%s: %s" % key, repr(this[key]) for key in this.keys()] py::list valuesAsStrings; for (int i = 0; this->keys()[i] != nullptr; ++i) { py::str key(this->keys()[i]), val(this->getItem(key).attr("__repr__")()); valuesAsStrings.append("'%s': %s" % py::make_tuple(key, val)); } // print ", ".join(valuesAsStrings) py::object joined = py::str(", ").attr("join")(valuesAsStrings); std::string s = py::extract<std::string>(joined); os << "{" << s << "}"; return os; } /// Return a string describing this dictionary. std::string info() const { std::ostringstream os; os << *this; return os.str(); } private: // To keep the iterator's grid from being deleted (leaving the iterator dangling), // store a shared pointer to the grid. const typename GridT::ConstPtr mGrid; const IterT mIter; // the iterator may not be incremented }; // class IterValueProxy template<typename GridT, typename IterT> inline std::ostream& operator<<(std::ostream& os, const IterValueProxy<GridT, IterT>& iv) { return iv.put(os); } //////////////////////////////////////// /// Wrapper for a grid's value iterator classes template<typename _GridT, typename _IterT> class IterWrap { public: using GridT = _GridT; using IterT = _IterT; using ValueT = typename GridT::ValueType; using IterValueProxyT = IterValueProxy<GridT, IterT>; using Traits = IterTraits<GridT, IterT>; IterWrap(typename GridT::ConstPtr grid, const IterT& iter): mGrid(grid), mIter(iter) {} typename GridT::ConstPtr parent() const { return mGrid; } /// Return an IterValueProxy for the current iterator position. IterValueProxyT next() { if (!mIter) { PyErr_SetString(PyExc_StopIteration, "no more values"); py::throw_error_already_set(); } IterValueProxyT result(mGrid, mIter); ++mIter; return result; } static py::object returnSelf(const py::object& obj) { return obj; } /// @brief Define a Python wrapper class for this C++ class and another for /// the IterValueProxy class returned by iterators of this type. static void wrap() { const std::string gridClassName = pyutil::GridTraits<typename std::remove_const<GridT>::type>::name(), iterClassName = /*gridClassName +*/ Traits::name(), valueClassName = /*gridClassName +*/ "Value"; py::class_<IterWrap>( iterClassName.c_str(), /*docstring=*/Traits::descr().c_str(), /*ctor=*/py::no_init) // can only be instantiated from C++, not from Python .add_property("parent", &IterWrap::parent, ("the " + gridClassName + " over which to iterate").c_str()) .def("next", &IterWrap::next, ("next() -> " + valueClassName).c_str()) .def("__next__", &IterWrap::next, ("__next__() -> " + valueClassName).c_str()) .def("__iter__", &returnSelf); py::class_<IterValueProxyT>( valueClassName.c_str(), /*docstring=*/("Proxy for a tile or voxel value in a " + gridClassName).c_str(), /*ctor=*/py::no_init) // can only be instantiated from C++, not from Python .def("copy", &IterValueProxyT::copy, ("copy() -> " + valueClassName + "\n\n" "Return a shallow copy of this value, i.e., one that shares\n" "its data with the original.").c_str()) .add_property("parent", &IterValueProxyT::parent, ("the " + gridClassName + " to which this value belongs").c_str()) .def("__str__", &IterValueProxyT::info) .def("__repr__", &IterValueProxyT::info) .def("__eq__", &IterValueProxyT::operator==) .def("__ne__", &IterValueProxyT::operator!=) .add_property("value", &IterValueProxyT::getValue, &IterValueProxyT::setValue, "value of this tile or voxel") .add_property("active", &IterValueProxyT::getActive, &IterValueProxyT::setActive, "active state of this tile or voxel") .add_property("depth", &IterValueProxyT::getDepth, "tree depth at which this value is stored") .add_property("min", &IterValueProxyT::getBBoxMin, "lower bound of the axis-aligned bounding box of this tile or voxel") .add_property("max", &IterValueProxyT::getBBoxMax, "upper bound of the axis-aligned bounding box of this tile or voxel") .add_property("count", &IterValueProxyT::getVoxelCount, "number of voxels spanned by this value") .def("keys", &IterValueProxyT::getKeys, "keys() -> list\n\n" "Return a list of keys for this tile or voxel.") .staticmethod("keys") .def("__contains__", &IterValueProxyT::hasKey, "__contains__(key) -> bool\n\n" "Return True if the given key exists.") .staticmethod("__contains__") .def("__getitem__", &IterValueProxyT::getItem, "__getitem__(key) -> value\n\n" "Return the value of the item with the given key.") .def("__setitem__", &IterValueProxyT::getItem, "__setitem__(key, value)\n\n" "Set the value of the item with the given key."); } private: // To keep this iterator's grid from being deleted, leaving the iterator dangling, // store a shared pointer to the grid. const typename GridT::ConstPtr mGrid; IterT mIter; }; // class IterWrap //////////////////////////////////////// template<typename GridT> struct PickleSuite: public py::pickle_suite { using GridPtrT = typename GridT::Ptr; /// Return @c true, indicating that this pickler preserves a Grid's __dict__. static bool getstate_manages_dict() { return true; } /// Return a tuple representing the state of the given Grid. static py::tuple getstate(py::object gridObj) { py::tuple state; // Extract a Grid from the Python object. GridPtrT grid; py::extract<GridPtrT> x(gridObj); if (x.check()) grid = x(); if (grid) { // Serialize the Grid to a string. std::ostringstream ostr(std::ios_base::binary); { openvdb::io::Stream strm(ostr); strm.setGridStatsMetadataEnabled(false); strm.write(openvdb::GridPtrVec(1, grid)); } // Construct a state tuple comprising the Python object's __dict__ // and the serialized Grid. #if PY_MAJOR_VERSION >= 3 // Convert the byte string to a "bytes" sequence. const std::string s = ostr.str(); py::object bytesObj = pyutil::pyBorrow(PyBytes_FromStringAndSize(s.data(), s.size())); #else py::str bytesObj(ostr.str()); #endif state = py::make_tuple(gridObj.attr("__dict__"), bytesObj); } return state; } /// Restore the given Grid to a saved state. static void setstate(py::object gridObj, py::object stateObj) { GridPtrT grid; { py::extract<GridPtrT> x(gridObj); if (x.check()) grid = x(); } if (!grid) return; py::tuple state; { py::extract<py::tuple> x(stateObj); if (x.check()) state = x(); } bool badState = (py::len(state) != 2); if (!badState) { // Restore the object's __dict__. py::extract<py::dict> x(state[0]); if (x.check()) { py::dict d = py::extract<py::dict>(gridObj.attr("__dict__"))(); d.update(x()); } else { badState = true; } } std::string serialized; if (!badState) { // Extract the sequence containing the serialized Grid. py::object bytesObj = state[1]; #if PY_MAJOR_VERSION >= 3 badState = true; if (PyBytes_Check(bytesObj.ptr())) { // Convert the "bytes" sequence to a byte string. char* buf = nullptr; Py_ssize_t length = 0; if (-1 != PyBytes_AsStringAndSize(bytesObj.ptr(), &buf, &length)) { if (buf != nullptr && length > 0) { serialized.assign(buf, buf + length); badState = false; } } } #else py::extract<std::string> x(bytesObj); if (x.check()) serialized = x(); else badState = true; #endif } if (badState) { PyErr_SetObject(PyExc_ValueError, #if PY_MAJOR_VERSION >= 3 ("expected (dict, bytes) tuple in call to __setstate__; found %s" #else ("expected (dict, str) tuple in call to __setstate__; found %s" #endif % stateObj.attr("__repr__")()).ptr()); py::throw_error_already_set(); } // Restore the internal state of the C++ object. GridPtrVecPtr grids; { std::istringstream istr(serialized, std::ios_base::binary); io::Stream strm(istr); grids = strm.getGrids(); // (note: file-level metadata is ignored) } if (grids && !grids->empty()) { if (GridPtrT savedGrid = gridPtrCast<GridT>((*grids)[0])) { grid->MetaMap::operator=(*savedGrid); ///< @todo add a Grid::setMetadata() method? grid->setTransform(savedGrid->transformPtr()); grid->setTree(savedGrid->treePtr()); } } } }; // struct PickleSuite //////////////////////////////////////// /// Create a Python wrapper for a particular template instantiation of Grid. template<typename GridType> inline void exportGrid() { using ValueT = typename GridType::ValueType; using GridPtr = typename GridType::Ptr; using Traits = pyutil::GridTraits<GridType>; using ValueOnCIterT = typename GridType::ValueOnCIter; using ValueOffCIterT = typename GridType::ValueOffCIter; using ValueAllCIterT = typename GridType::ValueAllCIter; using ValueOnIterT = typename GridType::ValueOnIter; using ValueOffIterT = typename GridType::ValueOffIter; using ValueAllIterT = typename GridType::ValueAllIter; math::Transform::Ptr (GridType::*getTransform)() = &GridType::transformPtr; const std::string pyGridTypeName = Traits::name(); const std::string defaultCtorDescr = "Initialize with a background value of " + pyutil::str(pyGrid::getZeroValue<GridType>()) + "."; // Define the Grid wrapper class and make it the current scope. { py::class_<GridType, /*HeldType=*/GridPtr> clss( /*classname=*/pyGridTypeName.c_str(), /*docstring=*/(Traits::descr()).c_str(), /*ctor=*/py::init<>(defaultCtorDescr.c_str()) ); py::scope gridClassScope = clss; clss.def(py::init<const ValueT&>(py::args("background"), "Initialize with the given background value.")) .def("copy", &pyGrid::copyGrid<GridType>, ("copy() -> " + pyGridTypeName + "\n\n" "Return a shallow copy of this grid, i.e., a grid\n" "that shares its voxel data with this grid.").c_str()) .def("deepCopy", &GridType::deepCopy, ("deepCopy() -> " + pyGridTypeName + "\n\n" "Return a deep copy of this grid.\n").c_str()) .def_pickle(pyGrid::PickleSuite<GridType>()) .def("sharesWith", &pyGrid::sharesWith<GridType>, ("sharesWith(" + pyGridTypeName + ") -> bool\n\n" "Return True if this grid shares its voxel data with the given grid.").c_str()) /// @todo Any way to set a docstring for a class property? .add_static_property("valueTypeName", &pyGrid::getValueType<GridType>) /// @todo docstring = "name of this grid's value type" .add_static_property("zeroValue", &pyGrid::getZeroValue<GridType>) /// @todo docstring = "zero, as expressed in this grid's value type" .add_static_property("oneValue", &pyGrid::getOneValue<GridType>) /// @todo docstring = "one, as expressed in this grid's value type" /// @todo Is Grid.typeName ever needed? //.add_static_property("typeName", &GridType::gridType) /// @todo docstring = to "name of this grid's type" .add_property("background", &pyGrid::getGridBackground<GridType>, &pyGrid::setGridBackground<GridType>, "value of this grid's background voxels") .add_property("name", &GridType::getName, &pyGrid::setGridName, "this grid's name") .add_property("creator", &GridType::getCreator, &pyGrid::setGridCreator, "description of this grid's creator") .add_property("transform", getTransform, &pyGrid::setGridTransform, "transform associated with this grid") .add_property("gridClass", &pyGrid::getGridClass, &pyGrid::setGridClass, "the class of volumetric data (level set, fog volume, etc.)\nstored in this grid") .add_property("vectorType", &pyGrid::getVecType, &pyGrid::setVecType, "how transforms are applied to values stored in this grid") .def("getAccessor", &pyGrid::getAccessor<GridType>, ("getAccessor() -> " + pyGridTypeName + "Accessor\n\n" "Return an accessor that provides random read and write access\n" "to this grid's voxels.").c_str()) .def("getConstAccessor", &pyGrid::getConstAccessor<GridType>, ("getConstAccessor() -> " + pyGridTypeName + "Accessor\n\n" "Return an accessor that provides random read-only access\n" "to this grid's voxels.").c_str()) // // Metadata // .add_property("metadata", &pyGrid::getAllMetadata, &pyGrid::replaceAllMetadata, "dict of this grid's metadata\n\n" "Setting this attribute replaces all of this grid's metadata,\n" "but mutating it in place has no effect on the grid, since\n" "the value of this attribute is a only a copy of the metadata.\n" "Use either indexing or updateMetadata() to mutate metadata in place.") .def("updateMetadata", &pyGrid::updateMetadata, "updateMetadata(dict)\n\n" "Add metadata to this grid, replacing any existing items\n" "having the same names as the new items.") .def("addStatsMetadata", &GridType::addStatsMetadata, "addStatsMetadata()\n\n" "Add metadata to this grid comprising the current values\n" "of statistics like the active voxel count and bounding box.\n" "(This metadata is not automatically kept up-to-date with\n" "changes to this grid.)") .def("getStatsMetadata", &pyGrid::getStatsMetadata, "getStatsMetadata() -> dict\n\n" "Return a (possibly empty) dict containing just the metadata\n" "that was added to this grid with addStatsMetadata().") .def("__getitem__", &pyGrid::getMetadata, "__getitem__(name) -> value\n\n" "Return the metadata value associated with the given name.") .def("__setitem__", &pyGrid::setMetadata, "__setitem__(name, value)\n\n" "Add metadata to this grid, replacing any existing item having\n" "the same name as the new item.") .def("__delitem__", &pyGrid::removeMetadata, "__delitem__(name)\n\n" "Remove the metadata with the given name.") .def("__contains__", &pyGrid::hasMetadata, "__contains__(name) -> bool\n\n" "Return True if this grid contains metadata with the given name.") .def("__iter__", &pyGrid::getMetadataKeys, "__iter__() -> iterator\n\n" "Return an iterator over this grid's metadata keys.") .def("iterkeys", &pyGrid::getMetadataKeys, "iterkeys() -> iterator\n\n" "Return an iterator over this grid's metadata keys.") .add_property("saveFloatAsHalf", &GridType::saveFloatAsHalf, &GridType::setSaveFloatAsHalf, "if True, write floating-point voxel values as 16-bit half floats") // // Statistics // .def("memUsage", &GridType::memUsage, "memUsage() -> int\n\n" "Return the memory usage of this grid in bytes.") .def("evalLeafBoundingBox", &pyGrid::evalLeafBoundingBox<GridType>, "evalLeafBoundingBox() -> xyzMin, xyzMax\n\n" "Return the coordinates of opposite corners of the axis-aligned\n" "bounding box of all leaf nodes.") .def("evalLeafDim", &pyGrid::evalLeafDim<GridType>, "evalLeafDim() -> x, y, z\n\n" "Return the dimensions of the axis-aligned bounding box\n" "of all leaf nodes.") .def("evalActiveVoxelBoundingBox", &pyGrid::evalActiveVoxelBoundingBox<GridType>, "evalActiveVoxelBoundingBox() -> xyzMin, xyzMax\n\n" "Return the coordinates of opposite corners of the axis-aligned\n" "bounding box of all active voxels.") .def("evalActiveVoxelDim", &GridType::evalActiveVoxelDim, "evalActiveVoxelDim() -> x, y, z\n\n" "Return the dimensions of the axis-aligned bounding box of all\n" "active voxels.") .add_property("treeDepth", &pyGrid::treeDepth<GridType>, "depth of this grid's tree from root node to leaf node") .def("nodeLog2Dims", &pyGrid::getNodeLog2Dims<GridType>, "list of Log2Dims of the nodes of this grid's tree\n" "in order from root to leaf") .def("leafCount", &pyGrid::leafCount<GridType>, "leafCount() -> int\n\n" "Return the number of leaf nodes in this grid's tree.") .def("nonLeafCount", &pyGrid::nonLeafCount<GridType>, "nonLeafCount() -> int\n\n" "Return the number of non-leaf nodes in this grid's tree.") .def("activeVoxelCount", &GridType::activeVoxelCount, "activeVoxelCount() -> int\n\n" "Return the number of active voxels in this grid.") .def("activeLeafVoxelCount", &pyGrid::activeLeafVoxelCount<GridType>, "activeLeafVoxelCount() -> int\n\n" "Return the number of active voxels that are stored\n" "in the leaf nodes of this grid's tree.") .def("evalMinMax", &pyGrid::evalMinMax<GridType>, "evalMinMax() -> min, max\n\n" "Return the minimum and maximum active values in this grid.") .def("getIndexRange", &pyGrid::getIndexRange<GridType>, "getIndexRange() -> min, max\n\n" "Return the minimum and maximum coordinates that are represented\n" "in this grid. These might include background voxels.") //.def("expand", &pyGrid::expandIndexRange<GridType>, // py::arg("xyz"), // "expand(xyz)\n\n" // "Expand this grid's index range to include the given coordinates.") .def("info", &pyGrid::gridInfo, py::arg("verbosity")=1, "info(verbosity=1) -> str\n\n" "Return a string containing information about this grid\n" "with a specified level of verbosity.\n") // // Tools // .def("fill", &pyGrid::fill<GridType>, (py::arg("min"), py::arg("max"), py::arg("value"), py::arg("active")=true), "fill(min, max, value, active=True)\n\n" "Set all voxels within a given axis-aligned box to\n" "a constant value (either active or inactive).") .def("signedFloodFill", &pyGrid::signedFloodFill<GridType>, "signedFloodFill()\n\n" "Propagate the sign from a narrow-band level set into inactive\n" "voxels and tiles.") .def("copyFromArray", &pyGrid::copyFromArray<GridType>, (py::arg("array"), py::arg("ijk")=Coord(0), py::arg("tolerance")=pyGrid::getZeroValue<GridType>()), ("copyFromArray(array, ijk=(0, 0, 0), tolerance=0)\n\n" "Populate this grid, starting at voxel (i, j, k), with values\nfrom a " + std::string(openvdb::VecTraits<ValueT>::IsVec ? "four" : "three") + "-dimensional array. Mark voxels as inactive\n" "if and only if their values are equal to this grid's\n" "background value within the given tolerance.").c_str()) .def("copyToArray", &pyGrid::copyToArray<GridType>, (py::arg("array"), py::arg("ijk")=Coord(0)), ("copyToArray(array, ijk=(0, 0, 0))\n\nPopulate a " + std::string(openvdb::VecTraits<ValueT>::IsVec ? "four" : "three") + "-dimensional array with values\n" "from this grid, starting at voxel (i, j, k).").c_str()) .def("convertToQuads", &pyGrid::volumeToQuadMesh<GridType>, (py::arg("isovalue")=0), "convertToQuads(isovalue=0) -> points, quads\n\n" "Uniformly mesh a scalar grid that has a continuous isosurface\n" "at the given isovalue. Return a NumPy array of world-space\n" "points and a NumPy array of 4-tuples of point indices, which\n" "specify the vertices of the quadrilaterals that form the mesh.") .def("convertToPolygons", &pyGrid::volumeToMesh<GridType>, (py::arg("isovalue")=0, py::arg("adaptivity")=0), "convertToPolygons(isovalue=0, adaptivity=0) -> points, triangles, quads\n\n" "Adaptively mesh a scalar grid that has a continuous isosurface\n" "at the given isovalue. Return a NumPy array of world-space\n" "points and NumPy arrays of 3- and 4-tuples of point indices,\n" "which specify the vertices of the triangles and quadrilaterals\n" "that form the mesh. Adaptivity can vary from 0 to 1, where 0\n" "produces a high-polygon-count mesh that closely approximates\n" "the isosurface, and 1 produces a lower-polygon-count mesh\n" "with some loss of surface detail.") .def("createLevelSetFromPolygons", &pyGrid::meshToLevelSet<GridType>, (py::arg("points"), py::arg("triangles")=py::object(), py::arg("quads")=py::object(), py::arg("transform")=py::object(), py::arg("halfWidth")=openvdb::LEVEL_SET_HALF_WIDTH), ("createLevelSetFromPolygons(points, triangles=None, quads=None,\n" " transform=None, halfWidth=" + std::to_string(openvdb::LEVEL_SET_HALF_WIDTH) + ") -> " + pyGridTypeName + "\n\n" "Convert a triangle and/or quad mesh to a narrow-band level set volume.\n" "The mesh must form a closed surface, but the surface need not be\n" "manifold and may have self intersections and degenerate faces.\n" "The mesh is described by a NumPy array of world-space points\n" "and NumPy arrays of 3- and 4-tuples of point indices that specify\n" "the vertices of the triangles and quadrilaterals that form the mesh.\n" "Either the triangle or the quad array may be empty or None.\n" "The resulting volume will have the given transform (or the identity\n" "transform if no transform is given) and a narrow band width of\n" "2 x halfWidth voxels.").c_str()) .staticmethod("createLevelSetFromPolygons") .def("prune", &pyGrid::prune<GridType>, (py::arg("tolerance")=0), "prune(tolerance=0)\n\n" "Remove nodes whose values all have the same active state\n" "and are equal to within a given tolerance.") .def("pruneInactive", &pyGrid::pruneInactive<GridType>, (py::arg("value")=py::object()), "pruneInactive(value=None)\n\n" "Remove nodes whose values are all inactive and replace them\n" "with either background tiles or tiles of the given value\n" "(if the value is not None).") .def("empty", &GridType::empty, "empty() -> bool\n\n" "Return True if this grid contains only background voxels.") .def("__nonzero__", &pyGrid::notEmpty<GridType>) .def("clear", &GridType::clear, "clear()\n\n" "Remove all tiles from this grid and all nodes other than the root node.") .def("merge", &GridType::merge, ("merge(" + pyGridTypeName + ")\n\n" "Move child nodes from the other grid into this grid wherever\n" "those nodes correspond to constant-value tiles in this grid,\n" "and replace leaf-level inactive voxels in this grid with\n" "corresponding voxels in the other grid that are active.\n\n" "Note: this operation always empties the other grid.").c_str()) .def("mapOn", &pyGrid::mapOn<GridType>, py::arg("function"), "mapOn(function)\n\n" "Iterate over all the active (\"on\") values (tile and voxel)\n" "of this grid and replace each value with function(value).\n\n" "Example: grid.mapOn(lambda x: x * 2 if x < 0.5 else x)") .def("mapOff", &pyGrid::mapOff<GridType>, py::arg("function"), "mapOff(function)\n\n" "Iterate over all the inactive (\"off\") values (tile and voxel)\n" "of this grid and replace each value with function(value).\n\n" "Example: grid.mapOff(lambda x: x * 2 if x < 0.5 else x)") .def("mapAll", &pyGrid::mapAll<GridType>, py::arg("function"), "mapAll(function)\n\n" "Iterate over all values (tile and voxel) of this grid\n" "and replace each value with function(value).\n\n" "Example: grid.mapAll(lambda x: x * 2 if x < 0.5 else x)") .def("combine", &pyGrid::combine<GridType>, (py::arg("grid"), py::arg("function")), "combine(grid, function)\n\n" "Compute function(self, other) over all corresponding pairs\n" "of values (tile or voxel) of this grid and the other grid\n" "and store the result in this grid.\n\n" "Note: this operation always empties the other grid.\n\n" "Example: grid.combine(otherGrid, lambda a, b: min(a, b))") // // Iterators // .def("citerOnValues", &pyGrid::IterTraits<GridType, ValueOnCIterT>::begin, "citerOnValues() -> iterator\n\n" "Return a read-only iterator over this grid's active\ntile and voxel values.") .def("citerOffValues", &pyGrid::IterTraits<GridType, ValueOffCIterT>::begin, "iterOffValues() -> iterator\n\n" "Return a read-only iterator over this grid's inactive\ntile and voxel values.") .def("citerAllValues", &pyGrid::IterTraits<GridType, ValueAllCIterT>::begin, "iterAllValues() -> iterator\n\n" "Return a read-only iterator over all of this grid's\ntile and voxel values.") .def("iterOnValues", &pyGrid::IterTraits<GridType, ValueOnIterT>::begin, "iterOnValues() -> iterator\n\n" "Return a read/write iterator over this grid's active\ntile and voxel values.") .def("iterOffValues", &pyGrid::IterTraits<GridType, ValueOffIterT>::begin, "iterOffValues() -> iterator\n\n" "Return a read/write iterator over this grid's inactive\ntile and voxel values.") .def("iterAllValues", &pyGrid::IterTraits<GridType, ValueAllIterT>::begin, "iterAllValues() -> iterator\n\n" "Return a read/write iterator over all of this grid's\ntile and voxel values.") ; // py::class_<Grid> // Register the GridPtr-to-Python object converter explicitly // if it is not already implicitly registered. try { py::object testObj{GridPtr()}; } catch (py::error_already_set&) { PyErr_Clear(); py::register_ptr_to_python<GridPtr>(); } py::implicitly_convertible<GridPtr, GridBase::Ptr>(); py::implicitly_convertible<GridPtr, GridBase::ConstPtr>(); /// @todo Is there a way to implicitly convert GridType references to GridBase /// references without wrapping the GridBase class? The following doesn't compile, /// because GridBase has pure virtual functions: /// @code /// py::implicitly_convertible<GridType&, GridBase&>(); /// @endcode // Wrap const and non-const value accessors and expose them // as nested classes of the Grid class. pyAccessor::AccessorWrap<const GridType>::wrap(); pyAccessor::AccessorWrap<GridType>::wrap(); // Wrap tree value iterators and expose them as nested classes of the Grid class. IterWrap<const GridType, ValueOnCIterT>::wrap(); IterWrap<const GridType, ValueOffCIterT>::wrap(); IterWrap<const GridType, ValueAllCIterT>::wrap(); IterWrap<GridType, ValueOnIterT>::wrap(); IterWrap<GridType, ValueOffIterT>::wrap(); IterWrap<GridType, ValueAllIterT>::wrap(); } // gridClassScope // Add the Python type object for this grid type to the module-level list. py::extract<py::list>(py::scope().attr("GridTypes"))().append( py::scope().attr(pyGridTypeName.c_str())); } } // namespace pyGrid #endif // OPENVDB_PYGRID_HAS_BEEN_INCLUDED
93,311
C
35.549941
100
0.614247
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/python/test/TestOpenVDB.py
#!/usr/local/bin/python # Copyright Contributors to the OpenVDB Project # SPDX-License-Identifier: MPL-2.0 """ Unit tests for the OpenVDB Python module These are intended primarily to test the Python-to-C++ and C++-to-Python bindings, not the OpenVDB library itself. """ import os, os.path import sys import unittest try: from studio import openvdb except ImportError: import pyopenvdb as openvdb def valueFactory(zeroValue, elemValue): """ Return elemValue converted to a value of the same type as zeroValue. If zeroValue is a sequence, return a sequence of the same type and length, with each element set to elemValue. """ val = zeroValue typ = type(val) try: # If the type is a sequence type, return a sequence of the appropriate length. size = len(val) val = typ([elemValue]) * size except TypeError: # Return a scalar value of the appropriate type. val = typ(elemValue) return val class TestOpenVDB(unittest.TestCase): def run(self, result=None, *args, **kwargs): super(TestOpenVDB, self).run(result, *args, **kwargs) def setUp(self): # Make output files and directories world-writable. self.umask = os.umask(0) def tearDown(self): os.umask(self.umask) def testModule(self): # At a minimum, BoolGrid, FloatGrid and Vec3SGrid should exist. self.assertTrue(openvdb.BoolGrid in openvdb.GridTypes) self.assertTrue(openvdb.FloatGrid in openvdb.GridTypes) self.assertTrue(openvdb.Vec3SGrid in openvdb.GridTypes) # Verify that it is possible to construct a grid of each supported type. for cls in openvdb.GridTypes: grid = cls() acc = grid.getAccessor() acc.setValueOn((-1, -2, 3)) self.assertEqual(grid.activeVoxelCount(), 1) def testTransform(self): xform1 = openvdb.createLinearTransform( [[.5, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [1, 2, 3, 1]]) self.assertTrue(xform1.typeName != '') self.assertEqual(xform1.indexToWorld((1, 1, 1)), (1.5, 3, 5)) xform2 = xform1 self.assertEqual(xform2, xform1) xform2 = xform1.deepCopy() self.assertEqual(xform2, xform1) xform2 = openvdb.createFrustumTransform(taper=0.5, depth=100, xyzMin=(0, 0, 0), xyzMax=(100, 100, 100), voxelSize=0.25) self.assertNotEqual(xform2, xform1) worldp = xform2.indexToWorld((10, 10, 10)) worldp = [int(round(x * 1000000)) for x in worldp] self.assertEqual(worldp, [-110000, -110000, 2500000]) grid = openvdb.FloatGrid() self.assertEqual(grid.transform, openvdb.createLinearTransform()) grid.transform = openvdb.createLinearTransform(2.0) self.assertEqual(grid.transform, openvdb.createLinearTransform(2.0)) def testGridCopy(self): grid = openvdb.FloatGrid() self.assertTrue(grid.sharesWith(grid)) self.assertFalse(grid.sharesWith([])) # wrong type; Grid expected copyOfGrid = grid.copy() self.assertTrue(copyOfGrid.sharesWith(grid)) deepCopyOfGrid = grid.deepCopy() self.assertFalse(deepCopyOfGrid.sharesWith(grid)) self.assertFalse(deepCopyOfGrid.sharesWith(copyOfGrid)) def testGridProperties(self): expected = { openvdb.BoolGrid: ('bool', False, True), openvdb.FloatGrid: ('float', 0.0, 1.0), openvdb.Vec3SGrid: ('vec3s', (0, 0, 0), (-1, 0, 1)), } for factory in expected: valType, bg, newbg = expected[factory] grid = factory() self.assertEqual(grid.valueTypeName, valType) def setValueType(obj): obj.valueTypeName = 'double' # Grid.valueTypeName is read-only, so setting it raises an exception. self.assertRaises(AttributeError, lambda obj=grid: setValueType(obj)) self.assertEqual(grid.background, bg) grid.background = newbg self.assertEqual(grid.background, newbg) self.assertEqual(grid.name, '') grid.name = 'test' self.assertEqual(grid.name, 'test') self.assertFalse(grid.saveFloatAsHalf) grid.saveFloatAsHalf = True self.assertTrue(grid.saveFloatAsHalf) self.assertTrue(grid.treeDepth > 2) def testGridMetadata(self): grid = openvdb.BoolGrid() self.assertEqual(grid.metadata, {}) meta = { 'name': 'test', 'xyz': (-1, 0, 1), 'xyzw': (1.0, 2.25, 3.5, 4.0), 'intval': 42, 'floatval': 1.25, 'mat4val': [[1]*4]*4, 'saveFloatAsHalf': True, } grid.metadata = meta self.assertEqual(grid.metadata, meta) meta['xyz'] = (-100, 100, 0) grid.updateMetadata(meta) self.assertEqual(grid.metadata, meta) self.assertEqual(set(grid.iterkeys()), set(meta.keys())) for name in meta: self.assertTrue(name in grid) self.assertEqual(grid[name], meta[name]) self.assertEqual(type(grid[name]), type(meta[name])) for name in grid: self.assertTrue(name in grid) self.assertEqual(grid[name], meta[name]) self.assertEqual(type(grid[name]), type(meta[name])) self.assertTrue('xyz' in grid) del grid['xyz'] self.assertFalse('xyz' in grid) grid['xyz'] = meta['xyz'] self.assertTrue('xyz' in grid) grid.addStatsMetadata() meta = grid.getStatsMetadata() self.assertEqual(0, meta["file_voxel_count"]) def testGridFill(self): grid = openvdb.FloatGrid() acc = grid.getAccessor() ijk = (1, 1, 1) self.assertRaises(TypeError, lambda: grid.fill("", (7, 7, 7), 1, False)) self.assertRaises(TypeError, lambda: grid.fill((0, 0, 0), "", 1, False)) self.assertRaises(TypeError, lambda: grid.fill((0, 0, 0), (7, 7, 7), "", False)) self.assertFalse(acc.isValueOn(ijk)) grid.fill((0, 0, 0), (7, 7, 7), 1, active=False) self.assertEqual(acc.getValue(ijk), 1) self.assertFalse(acc.isValueOn(ijk)) grid.fill((0, 0, 0), (7, 7, 7), 2, active=True) self.assertEqual(acc.getValue(ijk), 2) self.assertTrue(acc.isValueOn(ijk)) activeCount = grid.activeVoxelCount() acc.setValueOn(ijk, 2.125) self.assertEqual(grid.activeVoxelCount(), activeCount) grid.fill(ijk, ijk, 2.125, active=True) self.assertEqual(acc.getValue(ijk), 2.125) self.assertTrue(acc.isValueOn(ijk)) self.assertEqual(grid.activeVoxelCount(), activeCount) leafCount = grid.leafCount() grid.prune() self.assertAlmostEqual(acc.getValue(ijk), 2.125) self.assertTrue(acc.isValueOn(ijk)) self.assertEqual(grid.leafCount(), leafCount) self.assertEqual(grid.activeVoxelCount(), activeCount) grid.prune(tolerance=0.2) self.assertEqual(grid.activeVoxelCount(), activeCount) self.assertEqual(acc.getValue(ijk), 2.0) # median self.assertTrue(acc.isValueOn(ijk)) self.assertTrue(grid.leafCount() < leafCount) def testGridIterators(self): onCoords = set([(-10, -10, -10), (0, 0, 0), (1, 1, 1)]) for factory in openvdb.GridTypes: grid = factory() acc = grid.getAccessor() for c in onCoords: acc.setValueOn(c) coords = set(value.min for value in grid.iterOnValues()) self.assertEqual(coords, onCoords) n = 0 for _ in grid.iterAllValues(): n += 1 for _ in grid.iterOffValues(): n -= 1 self.assertEqual(n, len(onCoords)) grid = factory() grid.fill((0, 0, 1), (18, 18, 18), grid.oneValue) # make active activeCount = grid.activeVoxelCount() # Iterate over active values (via a const iterator) and verify # that the cumulative active voxel count matches the grid's. count = 0 for value in grid.citerOnValues(): count += value.count self.assertEqual(count, activeCount) # Via a non-const iterator, turn off every other active value. # Then verify that the cumulative active voxel count is half the original count. state = True for value in grid.iterOnValues(): count -= value.count value.active = state state = not state self.assertEqual(grid.activeVoxelCount(), activeCount / 2) # Verify that writing through a const iterator is not allowed. value = grid.citerOnValues().next() self.assertRaises(AttributeError, lambda: setattr(value, 'active', 0)) self.assertRaises(AttributeError, lambda: setattr(value, 'depth', 0)) # Verify that some value attributes are immutable, even given a non-const iterator. value = grid.iterOnValues().next() self.assertRaises(AttributeError, lambda: setattr(value, 'min', (0, 0, 0))) self.assertRaises(AttributeError, lambda: setattr(value, 'max', (0, 0, 0))) self.assertRaises(AttributeError, lambda: setattr(value, 'count', 1)) def testMap(self): grid = openvdb.BoolGrid() grid.fill((-4, -4, -4), (5, 5, 5), grid.zeroValue) # make active grid.mapOn(lambda x: not x) # replace active False values with True n = sum(item.value for item in grid.iterOnValues()) self.assertEqual(n, 10 * 10 * 10) grid = openvdb.FloatGrid() grid.fill((-4, -4, -4), (5, 5, 5), grid.oneValue) grid.mapOn(lambda x: x * 2) n = sum(item.value for item in grid.iterOnValues()) self.assertEqual(n, 10 * 10 * 10 * 2) grid = openvdb.Vec3SGrid() grid.fill((-4, -4, -4), (5, 5, 5), grid.zeroValue) grid.mapOn(lambda x: (0, 1, 0)) n = sum(item.value[1] for item in grid.iterOnValues()) self.assertEqual(n, 10 * 10 * 10) def testValueAccessor(self): coords = set([(-10, -10, -10), (0, 0, 0), (1, 1, 1)]) for factory in openvdb.GridTypes: # skip value accessor tests for PointDataGrids (value setting methods are disabled) if factory.valueTypeName.startswith('ptdataidx'): continue grid = factory() zero, one = grid.zeroValue, grid.oneValue acc = grid.getAccessor() cacc = grid.getConstAccessor() leafDepth = grid.treeDepth - 1 self.assertRaises(TypeError, lambda: cacc.setValueOn((5, 5, 5), zero)) self.assertRaises(TypeError, lambda: cacc.setValueOff((5, 5, 5), zero)) self.assertRaises(TypeError, lambda: cacc.setActiveState((5, 5, 5), True)) self.assertRaises(TypeError, lambda: acc.setValueOn("", zero)) self.assertRaises(TypeError, lambda: acc.setValueOff("", zero)) if grid.valueTypeName != "bool": self.assertRaises(TypeError, lambda: acc.setValueOn((5, 5, 5), object())) self.assertRaises(TypeError, lambda: acc.setValueOff((5, 5, 5), object())) for c in coords: grid.clear() # All voxels are inactive, background (0), and stored at the root. self.assertEqual(acc.getValue(c), zero) self.assertEqual(cacc.getValue(c), zero) self.assertFalse(acc.isValueOn(c)) self.assertFalse(cacc.isValueOn(c)) self.assertEqual(acc.getValueDepth(c), -1) self.assertEqual(cacc.getValueDepth(c), -1) acc.setValueOn(c) # active / 0 / leaf self.assertEqual(acc.getValue(c), zero) self.assertEqual(cacc.getValue(c), zero) self.assertTrue(acc.isValueOn(c)) self.assertTrue(cacc.isValueOn(c)) self.assertEqual(acc.getValueDepth(c), leafDepth) self.assertEqual(cacc.getValueDepth(c), leafDepth) acc.setValueOff(c, grid.oneValue) # inactive / 1 / leaf self.assertEqual(acc.getValue(c), one) self.assertEqual(cacc.getValue(c), one) self.assertFalse(acc.isValueOn(c)) self.assertFalse(cacc.isValueOn(c)) self.assertEqual(acc.getValueDepth(c), leafDepth) self.assertEqual(cacc.getValueDepth(c), leafDepth) # Verify that an accessor remains valid even after its grid is deleted # (because the C++ wrapper retains a reference to the C++ grid). def scoped(): grid = factory() acc = grid.getAccessor() cacc = grid.getConstAccessor() one = grid.oneValue acc.setValueOn((0, 0, 0), one) del grid self.assertEqual(acc.getValue((0, 0, 0)), one) self.assertEqual(cacc.getValue((0, 0, 0)), one) scoped() def testValueAccessorCopy(self): xyz = (0, 0, 0) grid = openvdb.BoolGrid() acc = grid.getAccessor() self.assertEqual(acc.getValue(xyz), False) self.assertFalse(acc.isValueOn(xyz)) copyOfAcc = acc.copy() self.assertEqual(copyOfAcc.getValue(xyz), False) self.assertFalse(copyOfAcc.isValueOn(xyz)) # Verify that changes made to the grid through one accessor are reflected in the other. acc.setValueOn(xyz, True) self.assertEqual(acc.getValue(xyz), True) self.assertTrue(acc.isValueOn(xyz)) self.assertEqual(copyOfAcc.getValue(xyz), True) self.assertTrue(copyOfAcc.isValueOn(xyz)) copyOfAcc.setValueOff(xyz) self.assertEqual(acc.getValue(xyz), True) self.assertFalse(acc.isValueOn(xyz)) self.assertEqual(copyOfAcc.getValue(xyz), True) self.assertFalse(copyOfAcc.isValueOn(xyz)) # Verify that the two accessors are distinct, by checking that they # have cached different sets of nodes. xyz2 = (-1, -1, -1) copyOfAcc.setValueOn(xyz2) self.assertTrue(copyOfAcc.isCached(xyz2)) self.assertFalse(copyOfAcc.isCached(xyz)) self.assertTrue(acc.isCached(xyz)) self.assertFalse(acc.isCached(xyz2)) def testPickle(self): import pickle # Test pickling of transforms of various types. testXforms = [ openvdb.createLinearTransform(voxelSize=0.1), openvdb.createLinearTransform(matrix=[[1,0,0,0],[0,2,0,0],[0,0,3,0],[4,3,2,1]]), openvdb.createFrustumTransform((0,0,0), (10,10,10), taper=0.8, depth=10.0), ] for xform in testXforms: s = pickle.dumps(xform) restoredXform = pickle.loads(s) self.assertEqual(restoredXform, xform) # Test pickling of grids of various types. for factory in openvdb.GridTypes: # Construct a grid. grid = factory() # Add some metadata to the grid. meta = { 'name': 'test', 'saveFloatAsHalf': True, 'xyz': (-1, 0, 1) } grid.metadata = meta # Add some voxel data to the grid. active = True for width in range(63, 0, -10): val = valueFactory(grid.zeroValue, width) grid.fill((0, 0, 0), (width,)*3, val, active) active = not active # Pickle the grid to a string, then unpickle the string. s = pickle.dumps(grid) restoredGrid = pickle.loads(s) # Verify that the original and unpickled grids' metadata are equal. self.assertEqual(restoredGrid.metadata, meta) # Verify that the original and unpickled grids have the same active values. for restored, original in zip(restoredGrid.iterOnValues(), grid.iterOnValues()): self.assertEqual(restored, original) # Verify that the original and unpickled grids have the same inactive values. for restored, original in zip(restoredGrid.iterOffValues(), grid.iterOffValues()): self.assertEqual(restored, original) def testGridCombine(self): # Construct two grids and add some voxel data to each. aGrid, bGrid = openvdb.FloatGrid(), openvdb.FloatGrid(background=1.0) for width in range(63, 1, -10): aGrid.fill((0, 0, 0), (width,)*3, width) bGrid.fill((0, 0, 0), (width,)*3, 2 * width) # Save a copy of grid A. copyOfAGrid = aGrid.deepCopy() # Combine corresponding values of the two grids, storing the result in grid A. # (Since the grids have the same topology and B's active values are twice A's, # the function computes 2*min(a, 2*a) + 3*max(a, 2*a) = 2*a + 3*(2*a) = 8*a # for active values, and 2*min(0, 1) + 3*max(0, 1) = 2*0 + 3*1 = 3 # for inactive values.) aGrid.combine(bGrid, lambda a, b: 2 * min(a, b) + 3 * max(a, b)) self.assertTrue(bGrid.empty()) # Verify that the resulting grid's values are as expected. for original, combined in zip(copyOfAGrid.iterOnValues(), aGrid.iterOnValues()): self.assertEqual(combined.min, original.min) self.assertEqual(combined.max, original.max) self.assertEqual(combined.depth, original.depth) self.assertEqual(combined.value, 8 * original.value) for original, combined in zip(copyOfAGrid.iterOffValues(), aGrid.iterOffValues()): self.assertEqual(combined.min, original.min) self.assertEqual(combined.max, original.max) self.assertEqual(combined.depth, original.depth) self.assertEqual(combined.value, 3) def testLevelSetSphere(self): HALF_WIDTH = 4 sphere = openvdb.createLevelSetSphere(halfWidth=HALF_WIDTH, voxelSize=1.0, radius=100.0) lo, hi = sphere.evalMinMax() self.assertTrue(lo >= -HALF_WIDTH) self.assertTrue(hi <= HALF_WIDTH) def testCopyFromArray(self): import random import time # Skip this test if NumPy is not available. try: import numpy as np except ImportError: return # Skip this test if the OpenVDB module was built without NumPy support. arr = np.zeros((1, 2, 1)) grid = openvdb.FloatGrid() try: grid.copyFromArray(arr) except NotImplementedError: return # Verify that a non-three-dimensional array can't be copied into a grid. grid = openvdb.FloatGrid() self.assertRaises(TypeError, lambda: grid.copyFromArray('abc')) arr = np.zeros((1, 2)) self.assertRaises(ValueError, lambda: grid.copyFromArray(arr)) # Verify that complex-valued arrays are not supported. arr = np.zeros((1, 2, 1), dtype = complex) grid = openvdb.FloatGrid() self.assertRaises(TypeError, lambda: grid.copyFromArray(arr)) ARRAY_DIM = 201 BG, FG = 0, 1 # Generate some random voxel coordinates. random.seed(0) def randCoord(): return tuple(random.randint(0, ARRAY_DIM-1) for i in range(3)) coords = set(randCoord() for i in range(200)) def createArrays(): # Test both scalar- and vec3-valued (i.e., four-dimensional) arrays. for shape in ( (ARRAY_DIM, ARRAY_DIM, ARRAY_DIM), # scalar array (ARRAY_DIM, ARRAY_DIM, ARRAY_DIM, 3) # vec3 array ): for dtype in (np.float32, np.int32, np.float64, np.int64, np.uint32, np.bool): # Create a NumPy array, fill it with the background value, # then set some elements to the foreground value. arr = np.ndarray(shape, dtype) arr.fill(BG) bg = arr[0, 0, 0] for c in coords: arr[c] = FG yield arr # Test copying from arrays of various types to grids of various types. for cls in openvdb.GridTypes: # skip copying test for PointDataGrids if cls.valueTypeName.startswith('ptdataidx'): continue for arr in createArrays(): isScalarArray = (len(arr.shape) == 3) isScalarGrid = False try: len(cls.zeroValue) # values of vector grids are sequences, which have a length except TypeError: isScalarGrid = True # values of scalar grids have no length gridBG = valueFactory(cls.zeroValue, BG) gridFG = valueFactory(cls.zeroValue, FG) # Create an empty grid. grid = cls(gridBG) acc = grid.getAccessor() # Verify that scalar arrays can't be copied into vector grids # and vector arrays can't be copied into scalar grids. if isScalarGrid != isScalarArray: self.assertRaises(ValueError, lambda: grid.copyFromArray(arr)) continue # Copy values from the NumPy array to the grid, marking # background values as inactive and all other values as active. now = time.clock() grid.copyFromArray(arr) elapsed = time.clock() - now #print 'copied %d voxels from %s array to %s in %f sec' % ( # arr.shape[0] * arr.shape[1] * arr.shape[2], # str(arr.dtype) + ('' if isScalarArray else '[]'), # grid.__class__.__name__, elapsed) # Verify that the grid's active voxels match the array's foreground elements. self.assertEqual(grid.activeVoxelCount(), len(coords)) for c in coords: self.assertEqual(acc.getValue(c), gridFG) for value in grid.iterOnValues(): self.assertTrue(value.min in coords) def testCopyToArray(self): import random import time # Skip this test if NumPy is not available. try: import numpy as np except ImportError: return # Skip this test if the OpenVDB module was built without NumPy support. arr = np.zeros((1, 2, 1)) grid = openvdb.FloatGrid() try: grid.copyFromArray(arr) except NotImplementedError: return # Verify that a grid can't be copied into a non-three-dimensional array. grid = openvdb.FloatGrid() self.assertRaises(TypeError, lambda: grid.copyToArray('abc')) arr = np.zeros((1, 2)) self.assertRaises(ValueError, lambda: grid.copyToArray(arr)) # Verify that complex-valued arrays are not supported. arr = np.zeros((1, 2, 1), dtype = complex) grid = openvdb.FloatGrid() self.assertRaises(TypeError, lambda: grid.copyToArray(arr)) ARRAY_DIM = 201 BG, FG = 0, 1 # Generate some random voxel coordinates. random.seed(0) def randCoord(): return tuple(random.randint(0, ARRAY_DIM-1) for i in range(3)) coords = set(randCoord() for i in range(200)) def createArrays(): # Test both scalar- and vec3-valued (i.e., four-dimensional) arrays. for shape in ( (ARRAY_DIM, ARRAY_DIM, ARRAY_DIM), # scalar array (ARRAY_DIM, ARRAY_DIM, ARRAY_DIM, 3) # vec3 array ): for dtype in (np.float32, np.int32, np.float64, np.int64, np.uint32, np.bool): # Return a new NumPy array. arr = np.ndarray(shape, dtype) arr.fill(-100) yield arr # Test copying from arrays of various types to grids of various types. for cls in openvdb.GridTypes: # skip copying test for PointDataGrids if cls.valueTypeName.startswith('ptdataidx'): continue for arr in createArrays(): isScalarArray = (len(arr.shape) == 3) isScalarGrid = False try: len(cls.zeroValue) # values of vector grids are sequences, which have a length except TypeError: isScalarGrid = True # values of scalar grids have no length gridBG = valueFactory(cls.zeroValue, BG) gridFG = valueFactory(cls.zeroValue, FG) # Create an empty grid, fill it with the background value, # then set some elements to the foreground value. grid = cls(gridBG) acc = grid.getAccessor() for c in coords: acc.setValueOn(c, gridFG) # Verify that scalar grids can't be copied into vector arrays # and vector grids can't be copied into scalar arrays. if isScalarGrid != isScalarArray: self.assertRaises(ValueError, lambda: grid.copyToArray(arr)) continue # Copy values from the grid to the NumPy array. now = time.clock() grid.copyToArray(arr) elapsed = time.clock() - now #print 'copied %d voxels from %s to %s array in %f sec' % ( # arr.shape[0] * arr.shape[1] * arr.shape[2], grid.__class__.__name__, # str(arr.dtype) + ('' if isScalarArray else '[]'), elapsed) # Verify that the grid's active voxels match the array's foreground elements. for c in coords: self.assertEqual(arr[c] if isScalarArray else tuple(arr[c]), gridFG) arr[c] = gridBG self.assertEqual(np.amin(arr), BG) self.assertEqual(np.amax(arr), BG) def testMeshConversion(self): import time # Skip this test if NumPy is not available. try: import numpy as np except ImportError: return # Test mesh to volume conversion. # Generate the vertices of a cube. cubeVertices = [(x, y, z) for x in (0, 100) for y in (0, 100) for z in (0, 100)] cubePoints = np.array(cubeVertices, float) # Generate the faces of a cube. cubeQuads = np.array([ (0, 1, 3, 2), # left (0, 2, 6, 4), # front (4, 6, 7, 5), # right (5, 7, 3, 1), # back (2, 3, 7, 6), # top (0, 4, 5, 1), # bottom ], float) voxelSize = 2.0 halfWidth = 3.0 xform = openvdb.createLinearTransform(voxelSize) # Only scalar, floating-point grids support createLevelSetFromPolygons() # (and the OpenVDB module might have been compiled without DoubleGrid support). grids = [] for gridType in [n for n in openvdb.GridTypes if n.__name__ in ('FloatGrid', 'DoubleGrid')]: # Skip this test if the OpenVDB module was built without NumPy support. try: grid = gridType.createLevelSetFromPolygons( cubePoints, quads=cubeQuads, transform=xform, halfWidth=halfWidth) except NotImplementedError: return #openvdb.write('/tmp/testMeshConversion.vdb', grid) self.assertEqual(grid.transform, xform) self.assertEqual(grid.background, halfWidth * voxelSize) dim = grid.evalActiveVoxelDim() self.assertTrue(50 < dim[0] < 58) self.assertTrue(50 < dim[1] < 58) self.assertTrue(50 < dim[2] < 58) grids.append(grid) # Boolean-valued grids can't be used to store level sets. self.assertRaises(TypeError, lambda: openvdb.BoolGrid.createLevelSetFromPolygons( cubePoints, quads=cubeQuads, transform=xform, halfWidth=halfWidth)) # Vector-valued grids can't be used to store level sets. self.assertRaises(TypeError, lambda: openvdb.Vec3SGrid.createLevelSetFromPolygons( cubePoints, quads=cubeQuads, transform=xform, halfWidth=halfWidth)) # The "points" argument to createLevelSetFromPolygons() must be a NumPy array. self.assertRaises(TypeError, lambda: openvdb.FloatGrid.createLevelSetFromPolygons( cubeVertices, quads=cubeQuads, transform=xform, halfWidth=halfWidth)) # The "points" argument to createLevelSetFromPolygons() must be a NumPy float or int array. self.assertRaises(TypeError, lambda: openvdb.FloatGrid.createLevelSetFromPolygons( np.array(cubeVertices, bool), quads=cubeQuads, transform=xform, halfWidth=halfWidth)) # The "triangles" argument to createLevelSetFromPolygons() must be an N x 3 NumPy array. self.assertRaises(TypeError, lambda: openvdb.FloatGrid.createLevelSetFromPolygons( cubePoints, triangles=cubeQuads, transform=xform, halfWidth=halfWidth)) # Test volume to mesh conversion. # Vector-valued grids can't be meshed. self.assertRaises(TypeError, lambda: openvdb.Vec3SGrid().convertToQuads()) for grid in grids: points, quads = grid.convertToQuads() # These checks are intended mainly to test the Python/C++ bindings, # not the OpenVDB volume to mesh converter. self.assertTrue(len(points) > 8) self.assertTrue(len(quads) > 6) pmin, pmax = points.min(0), points.max(0) self.assertTrue(-2 < pmin[0] < 2) self.assertTrue(-2 < pmin[1] < 2) self.assertTrue(-2 < pmin[2] < 2) self.assertTrue(98 < pmax[0] < 102) self.assertTrue(98 < pmax[1] < 102) self.assertTrue(98 < pmax[2] < 102) points, triangles, quads = grid.convertToPolygons(adaptivity=1) self.assertTrue(len(points) > 8) pmin, pmax = points.min(0), points.max(0) self.assertTrue(-2 < pmin[0] < 2) self.assertTrue(-2 < pmin[1] < 2) self.assertTrue(-2 < pmin[2] < 2) self.assertTrue(98 < pmax[0] < 102) self.assertTrue(98 < pmax[1] < 102) self.assertTrue(98 < pmax[2] < 102) if __name__ == '__main__': print('Testing %s' % os.path.dirname(openvdb.__file__)) sys.stdout.flush() args = sys.argv # PyUnit doesn't use the "-t" flag to identify test names, # so for consistency, strip out any "-t" arguments, # so that, e.g., "TestOpenVDB.py -t TestOpenVDB.testTransform" # is equivalent to "TestOpenVDB.py TestOpenVDB.testTransform". args = [a for a in args if a != '-t'] unittest.main(argv=args)
31,342
Python
38.978316
99
0.579925
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Formats.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @author Ken Museth /// /// @file Formats.h /// /// @brief Utility routines to output nicely-formatted numeric values #ifndef OPENVDB_UTIL_FORMATS_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_FORMATS_HAS_BEEN_INCLUDED #include <iosfwd> #include <sstream> #include <string> #include <openvdb/version.h> #include <openvdb/Platform.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { /// Output a byte count with the correct binary suffix (KB, MB, GB or TB). /// @param os the output stream /// @param bytes the byte count to be output /// @param head a string to be output before the numeric text /// @param tail a string to be output after the numeric text /// @param exact if true, also output the unmodified count, e.g., "4.6 KB (4620 Bytes)" /// @param width a fixed width for the numeric text /// @param precision the number of digits after the decimal point /// @return 0, 1, 2, 3 or 4, denoting the order of magnitude of the count. OPENVDB_API int printBytes(std::ostream& os, uint64_t bytes, const std::string& head = "", const std::string& tail = "\n", bool exact = false, int width = 8, int precision = 3); /// Output a number with the correct SI suffix (thousand, million, billion or trillion) /// @param os the output stream /// @param number the number to be output /// @param head a string to be output before the numeric text /// @param tail a string to be output after the numeric text /// @param exact if true, also output the unmodified count, e.g., "4.6 Thousand (4620)" /// @param width a fixed width for the numeric text /// @param precision the number of digits after the decimal point /// @return 0, 1, 2, 3 or 4, denoting the order of magnitude of the number. OPENVDB_API int printNumber(std::ostream& os, uint64_t number, const std::string& head = "", const std::string& tail = "\n", bool exact = true, int width = 8, int precision = 3); /// Output a time in milliseconds with the correct suffix (days, hours, minutes, seconds and milliseconds) /// @param os the output stream /// @param milliseconds the time to be output /// @param head a string to be output before the time /// @param tail a string to be output after the time /// @param width a fixed width for the numeric text /// @param precision the number of digits after the decimal point /// @param verbose verbose level, 0 is compact format and 1 is long format /// @return 0, 1, 2, 3, or 4 denoting the order of magnitude of the time. OPENVDB_API int printTime(std::ostream& os, double milliseconds, const std::string& head = "", const std::string& tail = "\n", int width = 4, int precision = 1, int verbose = 0); //////////////////////////////////////// /// @brief I/O manipulator that formats integer values with thousands separators template<typename IntT> class FormattedInt { public: static char sep() { return ','; } FormattedInt(IntT n): mInt(n) {} std::ostream& put(std::ostream& os) const { // Convert the integer to a string. std::ostringstream ostr; ostr << mInt; std::string s = ostr.str(); // Prefix the string with spaces if its length is not a multiple of three. size_t padding = (s.size() % 3) ? 3 - (s.size() % 3) : 0; s = std::string(padding, ' ') + s; // Construct a new string in which groups of three digits are followed // by a separator character. ostr.str(""); for (size_t i = 0, N = s.size(); i < N; ) { ostr << s[i]; ++i; if (i >= padding && i % 3 == 0 && i < s.size()) { ostr << sep(); } } // Remove any padding that was added and output the string. s = ostr.str(); os << s.substr(padding, s.size()); return os; } private: IntT mInt; }; template<typename IntT> std::ostream& operator<<(std::ostream& os, const FormattedInt<IntT>& n) { return n.put(os); } /// @return an I/O manipulator that formats the given integer value for output to a stream. template<typename IntT> FormattedInt<IntT> formattedInt(IntT n) { return FormattedInt<IntT>(n); } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_FORMATS_HAS_BEEN_INCLUDED
4,518
C
35.152
106
0.63745
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Util.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Util.h" #include <limits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { const Index32 INVALID_IDX = std::numeric_limits<Index32>::max(); const Coord COORD_OFFSETS[26] = { Coord( 1, 0, 0), /// Voxel-face adjacent neghbours Coord(-1, 0, 0), /// 0 to 5 Coord( 0, 1, 0), Coord( 0, -1, 0), Coord( 0, 0, 1), Coord( 0, 0, -1), Coord( 1, 0, -1), /// Voxel-edge adjacent neghbours Coord(-1, 0, -1), /// 6 to 17 Coord( 1, 0, 1), Coord(-1, 0, 1), Coord( 1, 1, 0), Coord(-1, 1, 0), Coord( 1, -1, 0), Coord(-1, -1, 0), Coord( 0, -1, 1), Coord( 0, -1, -1), Coord( 0, 1, 1), Coord( 0, 1, -1), Coord(-1, -1, -1), /// Voxel-corner adjacent neghbours Coord(-1, -1, 1), /// 18 to 25 Coord( 1, -1, 1), Coord( 1, -1, -1), Coord(-1, 1, -1), Coord(-1, 1, 1), Coord( 1, 1, 1), Coord( 1, 1, -1) }; } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
1,144
C++
23.361702
64
0.527972
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/logging.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_UTIL_LOGGING_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_LOGGING_HAS_BEEN_INCLUDED #include <openvdb/version.h> #ifdef OPENVDB_USE_LOG4CPLUS #include <log4cplus/appender.h> #include <log4cplus/configurator.h> #include <log4cplus/consoleappender.h> #include <log4cplus/layout.h> #include <log4cplus/logger.h> #include <log4cplus/spi/loggingevent.h> #include <algorithm> // for std::remove() #include <cstring> // for ::strrchr() #include <memory> #include <sstream> #include <string> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace logging { /// @brief Message severity level enum class Level { Debug = log4cplus::DEBUG_LOG_LEVEL, Info = log4cplus::INFO_LOG_LEVEL, Warn = log4cplus::WARN_LOG_LEVEL, Error = log4cplus::ERROR_LOG_LEVEL, Fatal = log4cplus::FATAL_LOG_LEVEL }; namespace internal { /// @brief log4cplus layout that outputs text in different colors /// for different log levels, using ANSI escape codes class ColoredPatternLayout: public log4cplus::PatternLayout { public: explicit ColoredPatternLayout(const std::string& progName_, bool useColor = true) : log4cplus::PatternLayout( progName_.empty() ? std::string{"%5p: %m%n"} : (progName_ + " %5p: %m%n")) , mUseColor(useColor) , mProgName(progName_) { } ~ColoredPatternLayout() override {} const std::string& progName() const { return mProgName; } void formatAndAppend(log4cplus::tostream& strm, const log4cplus::spi::InternalLoggingEvent& event) override { if (!mUseColor) { log4cplus::PatternLayout::formatAndAppend(strm, event); return; } log4cplus::tostringstream s; switch (event.getLogLevel()) { case log4cplus::DEBUG_LOG_LEVEL: s << "\033[32m"; break; // green case log4cplus::ERROR_LOG_LEVEL: case log4cplus::FATAL_LOG_LEVEL: s << "\033[31m"; break; // red case log4cplus::INFO_LOG_LEVEL: s << "\033[36m"; break; // cyan case log4cplus::WARN_LOG_LEVEL: s << "\033[35m"; break; // magenta } log4cplus::PatternLayout::formatAndAppend(s, event); strm << s.str() << "\033[0m" << std::flush; } // Disable deprecation warnings for std::auto_ptr. #if defined(__ICC) #pragma warning push #pragma warning disable:1478 #elif defined(__clang__) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" #elif defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif #if defined(LOG4CPLUS_VERSION) && defined(LOG4CPLUS_MAKE_VERSION) #if LOG4CPLUS_VERSION >= LOG4CPLUS_MAKE_VERSION(2, 0, 0) // In log4cplus 2.0.0, std::auto_ptr was replaced with std::unique_ptr. using Ptr = std::unique_ptr<log4cplus::Layout>; #else using Ptr = std::auto_ptr<log4cplus::Layout>; #endif #else using Ptr = std::auto_ptr<log4cplus::Layout>; #endif static Ptr create(const std::string& progName_, bool useColor = true) { return Ptr{new ColoredPatternLayout{progName_, useColor}}; } #if defined(__ICC) #pragma warning pop #elif defined(__clang__) #pragma clang diagnostic pop #elif defined(__GNUC__) #pragma GCC diagnostic pop #endif private: bool mUseColor = true; std::string mProgName; }; // class ColoredPatternLayout inline log4cplus::Logger getLogger() { return log4cplus::Logger::getInstance(LOG4CPLUS_TEXT("openvdb")); } inline log4cplus::SharedAppenderPtr getAppender() { return getLogger().getAppender(LOG4CPLUS_TEXT("OPENVDB")); } } // namespace internal /// @brief Return the current logging level. inline Level getLevel() { switch (internal::getLogger().getLogLevel()) { case log4cplus::DEBUG_LOG_LEVEL: return Level::Debug; case log4cplus::INFO_LOG_LEVEL: return Level::Info; case log4cplus::WARN_LOG_LEVEL: return Level::Warn; case log4cplus::ERROR_LOG_LEVEL: return Level::Error; case log4cplus::FATAL_LOG_LEVEL: break; } return Level::Fatal; } /// @brief Set the logging level. (Lower-level messages will be suppressed.) inline void setLevel(Level lvl) { internal::getLogger().setLogLevel(static_cast<log4cplus::LogLevel>(lvl)); } /// @brief If "-debug", "-info", "-warn", "-error" or "-fatal" is found /// in the given array of command-line arguments, set the logging level /// appropriately and remove the relevant argument(s) from the array. inline void setLevel(int& argc, char* argv[]) { for (int i = 1; i < argc; ++i) { // note: skip argv[0] const std::string arg{argv[i]}; bool remove = true; if (arg == "-debug") { setLevel(Level::Debug); } else if (arg == "-error") { setLevel(Level::Error); } else if (arg == "-fatal") { setLevel(Level::Fatal); } else if (arg == "-info") { setLevel(Level::Info); } else if (arg == "-warn") { setLevel(Level::Warn); } else { remove = false; } if (remove) argv[i] = nullptr; } auto end = std::remove(argv + 1, argv + argc, nullptr); argc = static_cast<int>(end - argv); } /// @brief Specify a program name to be displayed in log messages. inline void setProgramName(const std::string& progName, bool useColor = true) { // Change the layout of the OpenVDB appender to use colored text // and to incorporate the supplied program name. if (auto appender = internal::getAppender()) { appender->setLayout(internal::ColoredPatternLayout::create(progName, useColor)); } } /// @brief Initialize the logging system if it is not already initialized. inline void initialize(bool useColor = true) { log4cplus::initialize(); if (internal::getAppender()) return; // already initialized // Create the OpenVDB logger if it doesn't already exist. auto logger = internal::getLogger(); // Disable "additivity", so that OpenVDB-related messages are directed // to the OpenVDB logger only and are not forwarded up the logger tree. logger.setAdditivity(false); // Attach a console appender to the OpenVDB logger. if (auto appender = log4cplus::SharedAppenderPtr{new log4cplus::ConsoleAppender}) { appender->setName(LOG4CPLUS_TEXT("OPENVDB")); logger.addAppender(appender); } setLevel(Level::Warn); setProgramName("", useColor); } /// @brief Initialize the logging system from command-line arguments. /// @details If "-debug", "-info", "-warn", "-error" or "-fatal" is found /// in the given array of command-line arguments, set the logging level /// appropriately and remove the relevant argument(s) from the array. inline void initialize(int& argc, char* argv[], bool useColor = true) { initialize(); setLevel(argc, argv); auto progName = (argc > 0 ? argv[0] : ""); if (const char* ptr = ::strrchr(progName, '/')) progName = ptr + 1; setProgramName(progName, useColor); } } // namespace logging } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #define OPENVDB_LOG(level, message) \ do { \ auto _log = openvdb::logging::internal::getLogger(); \ if (_log.isEnabledFor(log4cplus::level##_LOG_LEVEL)) { \ std::ostringstream _buf; \ _buf << message; \ _log.forcedLog(log4cplus::level##_LOG_LEVEL, _buf.str(), __FILE__, __LINE__); \ } \ } while (0); /// Log an info message of the form '<TT>someVar << "some text" << ...</TT>'. #define OPENVDB_LOG_INFO(message) OPENVDB_LOG(INFO, message) /// Log a warning message of the form '<TT>someVar << "some text" << ...</TT>'. #define OPENVDB_LOG_WARN(message) OPENVDB_LOG(WARN, message) /// Log an error message of the form '<TT>someVar << "some text" << ...</TT>'. #define OPENVDB_LOG_ERROR(message) OPENVDB_LOG(ERROR, message) /// Log a fatal error message of the form '<TT>someVar << "some text" << ...</TT>'. #define OPENVDB_LOG_FATAL(message) OPENVDB_LOG(FATAL, message) #ifdef DEBUG /// In debug builds only, log a debugging message of the form '<TT>someVar << "text" << ...</TT>'. #define OPENVDB_LOG_DEBUG(message) OPENVDB_LOG(DEBUG, message) #else /// In debug builds only, log a debugging message of the form '<TT>someVar << "text" << ...</TT>'. #define OPENVDB_LOG_DEBUG(message) #endif /// @brief Log a debugging message in both debug and optimized builds. /// @warning Don't use this in performance-critical code. #define OPENVDB_LOG_DEBUG_RUNTIME(message) OPENVDB_LOG(DEBUG, message) #else // ifdef OPENVDB_USE_LOG4CPLUS #include <iostream> #define OPENVDB_LOG_INFO(mesg) #define OPENVDB_LOG_WARN(mesg) do { std::cerr << "WARNING: " << mesg << std::endl; } while (0); #define OPENVDB_LOG_ERROR(mesg) do { std::cerr << "ERROR: " << mesg << std::endl; } while (0); #define OPENVDB_LOG_FATAL(mesg) do { std::cerr << "FATAL: " << mesg << std::endl; } while (0); #define OPENVDB_LOG_DEBUG(mesg) #define OPENVDB_LOG_DEBUG_RUNTIME(mesg) namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace logging { enum class Level { Debug, Info, Warn, Error, Fatal }; inline Level getLevel() { return Level::Warn; } inline void setLevel(Level) {} inline void setLevel(int&, char*[]) {} inline void setProgramName(const std::string&, bool = true) {} inline void initialize() {} inline void initialize(int&, char*[], bool = true) {} } // namespace logging } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_USE_LOG4CPLUS namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace logging { /// @brief A LevelScope object sets the logging level to a given level /// and restores it to the current level when the object goes out of scope. struct LevelScope { Level level; explicit LevelScope(Level newLevel): level(getLevel()) { setLevel(newLevel); } ~LevelScope() { setLevel(level); } }; } // namespace logging } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_LOGGING_HAS_BEEN_INCLUDED
10,256
C
31.053125
100
0.666537
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/NodeMasks.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @author Ken Museth /// /// @file NodeMasks.h #ifndef OPENVDB_UTIL_NODEMASKS_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_NODEMASKS_HAS_BEEN_INCLUDED #include <algorithm> // for std::min() #include <cassert> #include <cstring> #include <iostream>// for cout #include <openvdb/Platform.h> #include <openvdb/Types.h> //#include <strings.h> // for ffs namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { /// Return the number of on bits in the given 8-bit value. inline Index32 CountOn(Byte v) { #if defined(OPENVDB_USE_SSE42) && defined(_MSC_VER) return __popcnt16(v); #elif defined(OPENVDB_USE_SSE42) && (defined(__GNUC__) || defined(__clang__)) return __builtin_popcount(v); #else // Software Implementation - Simple LUT static const Byte numBits[256] = { #define COUNTONB2(n) n, n+1, n+1, n+2 #define COUNTONB4(n) COUNTONB2(n), COUNTONB2(n+1), COUNTONB2(n+1), COUNTONB2(n+2) #define COUNTONB6(n) COUNTONB4(n), COUNTONB4(n+1), COUNTONB4(n+1), COUNTONB4(n+2) COUNTONB6(0), COUNTONB6(1), COUNTONB6(1), COUNTONB6(2) }; return numBits[v]; #undef COUNTONB6 #undef COUNTONB4 #undef COUNTONB2 #endif } /// Return the number of off bits in the given 8-bit value. inline Index32 CountOff(Byte v) { return CountOn(static_cast<Byte>(~v)); } /// Return the number of on bits in the given 32-bit value. inline Index32 CountOn(Index32 v) { v = v - ((v >> 1) & 0x55555555U); v = (v & 0x33333333U) + ((v >> 2) & 0x33333333U); return (((v + (v >> 4)) & 0xF0F0F0FU) * 0x1010101U) >> 24; } /// Return the number of off bits in the given 32-bit value. inline Index32 CountOff(Index32 v) { return CountOn(~v); } /// Return the number of on bits in the given 64-bit value. inline Index32 CountOn(Index64 v) { #if defined(OPENVDB_USE_SSE42) && defined(_MSC_VER) && defined(_M_X64) v = __popcnt64(v); #elif defined(OPENVDB_USE_SSE42) && (defined(__GNUC__) || defined(__clang__)) v = __builtin_popcountll(v); #else // Software Implementation v = v - ((v >> 1) & UINT64_C(0x5555555555555555)); v = (v & UINT64_C(0x3333333333333333)) + ((v >> 2) & UINT64_C(0x3333333333333333)); v = (((v + (v >> 4)) & UINT64_C(0xF0F0F0F0F0F0F0F)) * UINT64_C(0x101010101010101)) >> 56; #endif return static_cast<Index32>(v); } /// Return the number of off bits in the given 64-bit value. inline Index32 CountOff(Index64 v) { return CountOn(~v); } /// Return the least significant on bit of the given 8-bit value. inline Index32 FindLowestOn(Byte v) { assert(v); #if defined(OPENVDB_USE_SSE42) && defined(_MSC_VER) unsigned long index; _BitScanForward(&index, static_cast<Index32>(v)); return static_cast<Index32>(index); #elif defined(OPENVDB_USE_SSE42) && (defined(__GNUC__) || defined(__clang__)) return __builtin_ctz(v); #else // Software Implementation static const Byte DeBruijn[8] = {0, 1, 6, 2, 7, 5, 4, 3}; return DeBruijn[Byte((v & -v) * 0x1DU) >> 5]; #endif } /// Return the least significant on bit of the given 32-bit value. inline Index32 FindLowestOn(Index32 v) { assert(v); //return ffs(v); static const Byte DeBruijn[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 }; // disable unary minus on unsigned warning #if defined(_MSC_VER) #pragma warning(push) #pragma warning(disable:4146) #endif return DeBruijn[Index32((v & -v) * 0x077CB531U) >> 27]; #if defined(_MSC_VER) #pragma warning(pop) #endif } /// Return the least significant on bit of the given 64-bit value. inline Index32 FindLowestOn(Index64 v) { assert(v); #if defined(OPENVDB_USE_SSE42) && defined(_MSC_VER) unsigned long index; _BitScanForward64(&index, v); return static_cast<Index32>(index); #elif defined(OPENVDB_USE_SSE42) && (defined(__GNUC__) || defined(__clang__)) return static_cast<Index32>(__builtin_ctzll(v)); #else // Software Implementation static const Byte DeBruijn[64] = { 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12, }; // disable unary minus on unsigned warning #if defined(_MSC_VER) #pragma warning(push) #pragma warning(disable:4146) #endif return DeBruijn[Index64((v & -v) * UINT64_C(0x022FDD63CC95386D)) >> 58]; #if defined(_MSC_VER) #pragma warning(pop) #endif #endif } /// Return the most significant on bit of the given 32-bit value. inline Index32 FindHighestOn(Index32 v) { static const Byte DeBruijn[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; v |= v >> 1; // first round down to one less than a power of 2 v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return DeBruijn[Index32(v * 0x07C4ACDDU) >> 27]; } //////////////////////////////////////// /// Base class for the bit mask iterators template<typename NodeMask> class BaseMaskIterator { protected: Index32 mPos; // bit position const NodeMask* mParent; // this iterator can't change the parent_mask! public: BaseMaskIterator(): mPos(NodeMask::SIZE), mParent(nullptr) {} BaseMaskIterator(const BaseMaskIterator&) = default; BaseMaskIterator(Index32 pos, const NodeMask* parent): mPos(pos), mParent(parent) { assert((parent == nullptr && pos == 0) || (parent != nullptr && pos <= NodeMask::SIZE)); } bool operator==(const BaseMaskIterator &iter) const {return mPos == iter.mPos;} bool operator!=(const BaseMaskIterator &iter) const {return mPos != iter.mPos;} bool operator< (const BaseMaskIterator &iter) const {return mPos < iter.mPos;} BaseMaskIterator& operator=(const BaseMaskIterator& iter) { mPos = iter.mPos; mParent = iter.mParent; return *this; } Index32 offset() const { return mPos; } Index32 pos() const { return mPos; } bool test() const { assert(mPos <= NodeMask::SIZE); return (mPos != NodeMask::SIZE); } operator bool() const { return this->test(); } }; // class BaseMaskIterator /// @note This happens to be a const-iterator! template <typename NodeMask> class OnMaskIterator: public BaseMaskIterator<NodeMask> { private: using BaseType = BaseMaskIterator<NodeMask>; using BaseType::mPos;//bit position; using BaseType::mParent;//this iterator can't change the parent_mask! public: OnMaskIterator() : BaseType() {} OnMaskIterator(Index32 pos,const NodeMask *parent) : BaseType(pos,parent) {} void increment() { assert(mParent != nullptr); mPos = mParent->findNextOn(mPos+1); assert(mPos <= NodeMask::SIZE); } void increment(Index n) { while(n-- && this->next()) ; } bool next() { this->increment(); return this->test(); } bool operator*() const {return true;} OnMaskIterator& operator++() { this->increment(); return *this; } }; // class OnMaskIterator template <typename NodeMask> class OffMaskIterator: public BaseMaskIterator<NodeMask> { private: using BaseType = BaseMaskIterator<NodeMask>; using BaseType::mPos;//bit position; using BaseType::mParent;//this iterator can't change the parent_mask! public: OffMaskIterator() : BaseType() {} OffMaskIterator(Index32 pos,const NodeMask *parent) : BaseType(pos,parent) {} void increment() { assert(mParent != nullptr); mPos=mParent->findNextOff(mPos+1); assert(mPos <= NodeMask::SIZE); } void increment(Index n) { while(n-- && this->next()) ; } bool next() { this->increment(); return this->test(); } bool operator*() const {return false;} OffMaskIterator& operator++() { this->increment(); return *this; } }; // class OffMaskIterator template <typename NodeMask> class DenseMaskIterator: public BaseMaskIterator<NodeMask> { private: using BaseType = BaseMaskIterator<NodeMask>; using BaseType::mPos;//bit position; using BaseType::mParent;//this iterator can't change the parent_mask! public: DenseMaskIterator() : BaseType() {} DenseMaskIterator(Index32 pos,const NodeMask *parent) : BaseType(pos,parent) {} void increment() { assert(mParent != nullptr); mPos += 1;//careful - the increment might go beyond the end assert(mPos<= NodeMask::SIZE); } void increment(Index n) { while(n-- && this->next()) ; } bool next() { this->increment(); return this->test(); } bool operator*() const {return mParent->isOn(mPos);} DenseMaskIterator& operator++() { this->increment(); return *this; } }; // class DenseMaskIterator /// @brief Bit mask for the internal and leaf nodes of VDB. This /// is a 64-bit implementation. /// /// @note A template specialization for Log2Dim=1 and Log2Dim=2 are /// given below. template<Index Log2Dim> class NodeMask { public: static_assert(Log2Dim > 2, "expected NodeMask template specialization, got base template"); static const Index32 LOG2DIM = Log2Dim; static const Index32 DIM = 1<<Log2Dim; static const Index32 SIZE = 1<<3*Log2Dim; static const Index32 WORD_COUNT = SIZE >> 6;// 2^6=64 using Word = Index64; private: // The bits are represented as a linear array of Words, and the // size of a Word is 32 or 64 bits depending on the platform. // The BIT_MASK is defined as the number of bits in a Word - 1 //static const Index32 BIT_MASK = sizeof(void*) == 8 ? 63 : 31; //static const Index32 LOG2WORD = BIT_MASK == 63 ? 6 : 5; //static const Index32 WORD_COUNT = SIZE >> LOG2WORD; //using Word = boost::mpl::if_c<BIT_MASK == 63, Index64, Index32>::type; Word mWords[WORD_COUNT];//only member data! public: /// Default constructor sets all bits off NodeMask() { this->setOff(); } /// All bits are set to the specified state NodeMask(bool on) { this->set(on); } /// Copy constructor NodeMask(const NodeMask &other) { *this = other; } /// Destructor ~NodeMask() {} /// Assignment operator NodeMask& operator=(const NodeMask& other) { Index32 n = WORD_COUNT; const Word* w2 = other.mWords; for (Word* w1 = mWords; n--; ++w1, ++w2) *w1 = *w2; return *this; } using OnIterator = OnMaskIterator<NodeMask>; using OffIterator = OffMaskIterator<NodeMask>; using DenseIterator = DenseMaskIterator<NodeMask>; OnIterator beginOn() const { return OnIterator(this->findFirstOn(),this); } OnIterator endOn() const { return OnIterator(SIZE,this); } OffIterator beginOff() const { return OffIterator(this->findFirstOff(),this); } OffIterator endOff() const { return OffIterator(SIZE,this); } DenseIterator beginDense() const { return DenseIterator(0,this); } DenseIterator endDense() const { return DenseIterator(SIZE,this); } bool operator == (const NodeMask &other) const { int n = WORD_COUNT; for (const Word *w1=mWords, *w2=other.mWords; n-- && *w1++ == *w2++;) ; return n == -1; } bool operator != (const NodeMask &other) const { return !(*this == other); } // // Bitwise logical operations // /// @brief Apply a functor to the words of the this and the other mask. /// /// @details An example that implements the "operator&=" method: /// @code /// struct Op { inline void operator()(W &w1, const W& w2) const { w1 &= w2; } }; /// @endcode template<typename WordOp> const NodeMask& foreach(const NodeMask& other, const WordOp& op) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) op( *w1, *w2); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const WordOp& op) { Word *w1 = mWords; const Word *w2 = other1.mWords, *w3 = other2.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2, ++w3) op( *w1, *w2, *w3); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const NodeMask& other3, const WordOp& op) { Word *w1 = mWords; const Word *w2 = other1.mWords, *w3 = other2.mWords, *w4 = other3.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2, ++w3, ++w4) op( *w1, *w2, *w3, *w4); return *this; } /// @brief Bitwise intersection const NodeMask& operator&=(const NodeMask& other) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) *w1 &= *w2; return *this; } /// @brief Bitwise union const NodeMask& operator|=(const NodeMask& other) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) *w1 |= *w2; return *this; } /// @brief Bitwise difference const NodeMask& operator-=(const NodeMask& other) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) *w1 &= ~*w2; return *this; } /// @brief Bitwise XOR const NodeMask& operator^=(const NodeMask& other) { Word *w1 = mWords; const Word *w2 = other.mWords; for (Index32 n = WORD_COUNT; n--; ++w1, ++w2) *w1 ^= *w2; return *this; } NodeMask operator!() const { NodeMask m(*this); m.toggle(); return m; } NodeMask operator&(const NodeMask& other) const { NodeMask m(*this); m &= other; return m; } NodeMask operator|(const NodeMask& other) const { NodeMask m(*this); m |= other; return m; } NodeMask operator^(const NodeMask& other) const { NodeMask m(*this); m ^= other; return m; } /// Return the byte size of this NodeMask static Index32 memUsage() { return static_cast<Index32>(WORD_COUNT*sizeof(Word)); } /// Return the total number of on bits Index32 countOn() const { Index32 sum = 0, n = WORD_COUNT; for (const Word* w = mWords; n--; ++w) sum += CountOn(*w); return sum; } /// Return the total number of on bits Index32 countOff() const { return SIZE-this->countOn(); } /// Set the <i>n</i>th bit on void setOn(Index32 n) { assert( (n >> 6) < WORD_COUNT ); mWords[n >> 6] |= Word(1) << (n & 63); } /// Set the <i>n</i>th bit off void setOff(Index32 n) { assert( (n >> 6) < WORD_COUNT ); mWords[n >> 6] &= ~(Word(1) << (n & 63)); } /// Set the <i>n</i>th bit to the specified state void set(Index32 n, bool On) { On ? this->setOn(n) : this->setOff(n); } /// Set all bits to the specified state void set(bool on) { const Word state = on ? ~Word(0) : Word(0); Index32 n = WORD_COUNT; for (Word* w = mWords; n--; ++w) *w = state; } /// Set all bits on void setOn() { Index32 n = WORD_COUNT; for (Word* w = mWords; n--; ++w) *w = ~Word(0); } /// Set all bits off void setOff() { Index32 n = WORD_COUNT; for (Word* w = mWords; n--; ++w) *w = Word(0); } /// Toggle the state of the <i>n</i>th bit void toggle(Index32 n) { assert( (n >> 6) < WORD_COUNT ); mWords[n >> 6] ^= Word(1) << (n & 63); } /// Toggle the state of all bits in the mask void toggle() { Index32 n = WORD_COUNT; for (Word* w = mWords; n--; ++w) *w = ~*w; } /// Set the first bit on void setFirstOn() { this->setOn(0); } /// Set the last bit on void setLastOn() { this->setOn(SIZE-1); } /// Set the first bit off void setFirstOff() { this->setOff(0); } /// Set the last bit off void setLastOff() { this->setOff(SIZE-1); } /// Return @c true if the <i>n</i>th bit is on bool isOn(Index32 n) const { assert( (n >> 6) < WORD_COUNT ); return 0 != (mWords[n >> 6] & (Word(1) << (n & 63))); } /// Return @c true if the <i>n</i>th bit is off bool isOff(Index32 n) const {return !this->isOn(n); } /// Return @c true if all the bits are on bool isOn() const { int n = WORD_COUNT; for (const Word *w = mWords; n-- && *w++ == ~Word(0);) ; return n == -1; } /// Return @c true if all the bits are off bool isOff() const { int n = WORD_COUNT; for (const Word *w = mWords; n-- && *w++ == Word(0);) ; return n == -1; } /// Return @c true if bits are either all off OR all on. /// @param isOn Takes on the values of all bits if the method /// returns true - else it is undefined. bool isConstant(bool &isOn) const { isOn = (mWords[0] == ~Word(0));//first word has all bits on if ( !isOn && mWords[0] != Word(0)) return false;//early out const Word *w = mWords + 1, *n = mWords + WORD_COUNT; while( w<n && *w == mWords[0] ) ++w; return w == n; } Index32 findFirstOn() const { Index32 n = 0; const Word* w = mWords; for (; n<WORD_COUNT && !*w; ++w, ++n) ; return n==WORD_COUNT ? SIZE : (n << 6) + FindLowestOn(*w); } Index32 findFirstOff() const { Index32 n = 0; const Word* w = mWords; for (; n<WORD_COUNT && !~*w; ++w, ++n) ; return n==WORD_COUNT ? SIZE : (n << 6) + FindLowestOn(~*w); } //@{ /// Return the <i>n</i>th word of the bit mask, for a word of arbitrary size. template<typename WordT> WordT getWord(Index n) const { assert(n*8*sizeof(WordT) < SIZE); return reinterpret_cast<const WordT*>(mWords)[n]; } template<typename WordT> WordT& getWord(Index n) { assert(n*8*sizeof(WordT) < SIZE); return reinterpret_cast<WordT*>(mWords)[n]; } //@} void save(std::ostream& os) const { os.write(reinterpret_cast<const char*>(mWords), this->memUsage()); } void load(std::istream& is) { is.read(reinterpret_cast<char*>(mWords), this->memUsage()); } void seek(std::istream& is) const { is.seekg(this->memUsage(), std::ios_base::cur); } /// @brief simple print method for debugging void printInfo(std::ostream& os=std::cout) const { os << "NodeMask: Dim=" << DIM << " Log2Dim=" << Log2Dim << " Bit count=" << SIZE << " word count=" << WORD_COUNT << std::endl; } void printBits(std::ostream& os=std::cout, Index32 max_out=80u) const { const Index32 n=(SIZE>max_out ? max_out : SIZE); for (Index32 i=0; i < n; ++i) { if ( !(i & 63) ) os << "||"; else if ( !(i%8) ) os << "|"; os << this->isOn(i); } os << "|" << std::endl; } void printAll(std::ostream& os=std::cout, Index32 max_out=80u) const { this->printInfo(os); this->printBits(os, max_out); } Index32 findNextOn(Index32 start) const { Index32 n = start >> 6;//initiate if (n >= WORD_COUNT) return SIZE; // check for out of bounds Index32 m = start & 63; Word b = mWords[n]; if (b & (Word(1) << m)) return start;//simpel case: start is on b &= ~Word(0) << m;// mask out lower bits while(!b && ++n<WORD_COUNT) b = mWords[n];// find next none-zero word return (!b ? SIZE : (n << 6) + FindLowestOn(b));//catch last word=0 } Index32 findNextOff(Index32 start) const { Index32 n = start >> 6;//initiate if (n >= WORD_COUNT) return SIZE; // check for out of bounds Index32 m = start & 63; Word b = ~mWords[n]; if (b & (Word(1) << m)) return start;//simpel case: start is on b &= ~Word(0) << m;// mask out lower bits while(!b && ++n<WORD_COUNT) b = ~mWords[n];// find next none-zero word return (!b ? SIZE : (n << 6) + FindLowestOn(b));//catch last word=0 } };// NodeMask /// @brief Template specialization of NodeMask for Log2Dim=1, i.e. 2^3 nodes template<> class NodeMask<1> { public: static const Index32 LOG2DIM = 1; static const Index32 DIM = 2; static const Index32 SIZE = 8; static const Index32 WORD_COUNT = 1; using Word = Byte; private: Byte mByte;//only member data! public: /// Default constructor sets all bits off NodeMask() : mByte(0x00U) {} /// All bits are set to the specified state NodeMask(bool on) : mByte(on ? 0xFFU : 0x00U) {} /// Copy constructor NodeMask(const NodeMask &other) : mByte(other.mByte) {} /// Destructor ~NodeMask() {} /// Assignment operator void operator = (const NodeMask &other) { mByte = other.mByte; } using OnIterator = OnMaskIterator<NodeMask>; using OffIterator = OffMaskIterator<NodeMask>; using DenseIterator = DenseMaskIterator<NodeMask>; OnIterator beginOn() const { return OnIterator(this->findFirstOn(),this); } OnIterator endOn() const { return OnIterator(SIZE,this); } OffIterator beginOff() const { return OffIterator(this->findFirstOff(),this); } OffIterator endOff() const { return OffIterator(SIZE,this); } DenseIterator beginDense() const { return DenseIterator(0,this); } DenseIterator endDense() const { return DenseIterator(SIZE,this); } bool operator == (const NodeMask &other) const { return mByte == other.mByte; } bool operator != (const NodeMask &other) const {return mByte != other.mByte; } // // Bitwise logical operations // /// @brief Apply a functor to the words of the this and the other mask. /// /// @details An example that implements the "operator&=" method: /// @code /// struct Op { inline void operator()(Word &w1, const Word& w2) const { w1 &= w2; } }; /// @endcode template<typename WordOp> const NodeMask& foreach(const NodeMask& other, const WordOp& op) { op(mByte, other.mByte); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const WordOp& op) { op(mByte, other1.mByte, other2.mByte); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const NodeMask& other3, const WordOp& op) { op(mByte, other1.mByte, other2.mByte, other3.mByte); return *this; } /// @brief Bitwise intersection const NodeMask& operator&=(const NodeMask& other) { mByte &= other.mByte; return *this; } /// @brief Bitwise union const NodeMask& operator|=(const NodeMask& other) { mByte |= other.mByte; return *this; } /// @brief Bitwise difference const NodeMask& operator-=(const NodeMask& other) { mByte &= static_cast<Byte>(~other.mByte); return *this; } /// @brief Bitwise XOR const NodeMask& operator^=(const NodeMask& other) { mByte ^= other.mByte; return *this; } NodeMask operator!() const { NodeMask m(*this); m.toggle(); return m; } NodeMask operator&(const NodeMask& other) const { NodeMask m(*this); m &= other; return m; } NodeMask operator|(const NodeMask& other) const { NodeMask m(*this); m |= other; return m; } NodeMask operator^(const NodeMask& other) const { NodeMask m(*this); m ^= other; return m; } /// Return the byte size of this NodeMask static Index32 memUsage() { return 1; } /// Return the total number of on bits Index32 countOn() const { return CountOn(mByte); } /// Return the total number of on bits Index32 countOff() const { return CountOff(mByte); } /// Set the <i>n</i>th bit on void setOn(Index32 n) { assert( n < 8 ); mByte = static_cast<Byte>(mByte | 0x01U << (n & 7)); } /// Set the <i>n</i>th bit off void setOff(Index32 n) { assert( n < 8 ); mByte = static_cast<Byte>(mByte & ~(0x01U << (n & 7))); } /// Set the <i>n</i>th bit to the specified state void set(Index32 n, bool On) { On ? this->setOn(n) : this->setOff(n); } /// Set all bits to the specified state void set(bool on) { mByte = on ? 0xFFU : 0x00U; } /// Set all bits on void setOn() { mByte = 0xFFU; } /// Set all bits off void setOff() { mByte = 0x00U; } /// Toggle the state of the <i>n</i>th bit void toggle(Index32 n) { assert( n < 8 ); mByte = static_cast<Byte>(mByte ^ 0x01U << (n & 7)); } /// Toggle the state of all bits in the mask void toggle() { mByte = static_cast<Byte>(~mByte); } /// Set the first bit on void setFirstOn() { this->setOn(0); } /// Set the last bit on void setLastOn() { this->setOn(7); } /// Set the first bit off void setFirstOff() { this->setOff(0); } /// Set the last bit off void setLastOff() { this->setOff(7); } /// Return true if the <i>n</i>th bit is on bool isOn(Index32 n) const { assert( n < 8 ); return mByte & (0x01U << (n & 7)); } /// Return true if the <i>n</i>th bit is off bool isOff(Index32 n) const {return !this->isOn(n); } /// Return true if all the bits are on bool isOn() const { return mByte == 0xFFU; } /// Return true if all the bits are off bool isOff() const { return mByte == 0; } /// Return @c true if bits are either all off OR all on. /// @param isOn Takes on the values of all bits if the method /// returns true - else it is undefined. bool isConstant(bool &isOn) const { isOn = this->isOn(); return isOn || this->isOff(); } Index32 findFirstOn() const { return mByte ? FindLowestOn(mByte) : 8; } Index32 findFirstOff() const { const Byte b = static_cast<Byte>(~mByte); return b ? FindLowestOn(b) : 8; } /* //@{ /// Return the <i>n</i>th word of the bit mask, for a word of arbitrary size. /// @note This version assumes WordT=Byte and n=0! template<typename WordT> WordT getWord(Index n) const { static_assert(sizeof(WordT) == sizeof(Byte), "expected word size to be one byte"); assert(n == 0); return reinterpret_cast<WordT>(mByte); } template<typename WordT> WordT& getWord(Index n) { static_assert(sizeof(WordT) == sizeof(Byte), "expected word size to be one byte"); assert(n == 0); return reinterpret_cast<WordT&>(mByte); } //@} */ void save(std::ostream& os) const { os.write(reinterpret_cast<const char*>(&mByte), 1); } void load(std::istream& is) { is.read(reinterpret_cast<char*>(&mByte), 1); } void seek(std::istream& is) const { is.seekg(1, std::ios_base::cur); } /// @brief simple print method for debugging void printInfo(std::ostream& os=std::cout) const { os << "NodeMask: Dim=2, Log2Dim=1, Bit count=8, Word count=1"<<std::endl; } void printBits(std::ostream& os=std::cout) const { os << "||"; for (Index32 i=0; i < 8; ++i) os << this->isOn(i); os << "||" << std::endl; } void printAll(std::ostream& os=std::cout) const { this->printInfo(os); this->printBits(os); } Index32 findNextOn(Index32 start) const { if (start>=8) return 8; const Byte b = static_cast<Byte>(mByte & (0xFFU << start)); return b ? FindLowestOn(b) : 8; } Index32 findNextOff(Index32 start) const { if (start>=8) return 8; const Byte b = static_cast<Byte>(~mByte & (0xFFU << start)); return b ? FindLowestOn(b) : 8; } };// NodeMask<1> /// @brief Template specialization of NodeMask for Log2Dim=2, i.e. 4^3 nodes template<> class NodeMask<2> { public: static const Index32 LOG2DIM = 2; static const Index32 DIM = 4; static const Index32 SIZE = 64; static const Index32 WORD_COUNT = 1; using Word = Index64; private: Word mWord;//only member data! public: /// Default constructor sets all bits off NodeMask() : mWord(UINT64_C(0x00)) {} /// All bits are set to the specified state NodeMask(bool on) : mWord(on ? UINT64_C(0xFFFFFFFFFFFFFFFF) : UINT64_C(0x00)) {} /// Copy constructor NodeMask(const NodeMask &other) : mWord(other.mWord) {} /// Destructor ~NodeMask() {} /// Assignment operator void operator = (const NodeMask &other) { mWord = other.mWord; } using OnIterator = OnMaskIterator<NodeMask>; using OffIterator = OffMaskIterator<NodeMask>; using DenseIterator = DenseMaskIterator<NodeMask>; OnIterator beginOn() const { return OnIterator(this->findFirstOn(),this); } OnIterator endOn() const { return OnIterator(SIZE,this); } OffIterator beginOff() const { return OffIterator(this->findFirstOff(),this); } OffIterator endOff() const { return OffIterator(SIZE,this); } DenseIterator beginDense() const { return DenseIterator(0,this); } DenseIterator endDense() const { return DenseIterator(SIZE,this); } bool operator == (const NodeMask &other) const { return mWord == other.mWord; } bool operator != (const NodeMask &other) const {return mWord != other.mWord; } // // Bitwise logical operations // /// @brief Apply a functor to the words of the this and the other mask. /// /// @details An example that implements the "operator&=" method: /// @code /// struct Op { inline void operator()(Word &w1, const Word& w2) const { w1 &= w2; } }; /// @endcode template<typename WordOp> const NodeMask& foreach(const NodeMask& other, const WordOp& op) { op(mWord, other.mWord); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const WordOp& op) { op(mWord, other1.mWord, other2.mWord); return *this; } template<typename WordOp> const NodeMask& foreach(const NodeMask& other1, const NodeMask& other2, const NodeMask& other3, const WordOp& op) { op(mWord, other1.mWord, other2.mWord, other3.mWord); return *this; } /// @brief Bitwise intersection const NodeMask& operator&=(const NodeMask& other) { mWord &= other.mWord; return *this; } /// @brief Bitwise union const NodeMask& operator|=(const NodeMask& other) { mWord |= other.mWord; return *this; } /// @brief Bitwise difference const NodeMask& operator-=(const NodeMask& other) { mWord &= ~other.mWord; return *this; } /// @brief Bitwise XOR const NodeMask& operator^=(const NodeMask& other) { mWord ^= other.mWord; return *this; } NodeMask operator!() const { NodeMask m(*this); m.toggle(); return m; } NodeMask operator&(const NodeMask& other) const { NodeMask m(*this); m &= other; return m; } NodeMask operator|(const NodeMask& other) const { NodeMask m(*this); m |= other; return m; } NodeMask operator^(const NodeMask& other) const { NodeMask m(*this); m ^= other; return m; } /// Return the byte size of this NodeMask static Index32 memUsage() { return 8; } /// Return the total number of on bits Index32 countOn() const { return CountOn(mWord); } /// Return the total number of on bits Index32 countOff() const { return CountOff(mWord); } /// Set the <i>n</i>th bit on void setOn(Index32 n) { assert( n < 64 ); mWord |= UINT64_C(0x01) << (n & 63); } /// Set the <i>n</i>th bit off void setOff(Index32 n) { assert( n < 64 ); mWord &= ~(UINT64_C(0x01) << (n & 63)); } /// Set the <i>n</i>th bit to the specified state void set(Index32 n, bool On) { On ? this->setOn(n) : this->setOff(n); } /// Set all bits to the specified state void set(bool on) { mWord = on ? UINT64_C(0xFFFFFFFFFFFFFFFF) : UINT64_C(0x00); } /// Set all bits on void setOn() { mWord = UINT64_C(0xFFFFFFFFFFFFFFFF); } /// Set all bits off void setOff() { mWord = UINT64_C(0x00); } /// Toggle the state of the <i>n</i>th bit void toggle(Index32 n) { assert( n < 64 ); mWord ^= UINT64_C(0x01) << (n & 63); } /// Toggle the state of all bits in the mask void toggle() { mWord = ~mWord; } /// Set the first bit on void setFirstOn() { this->setOn(0); } /// Set the last bit on void setLastOn() { this->setOn(63); } /// Set the first bit off void setFirstOff() { this->setOff(0); } /// Set the last bit off void setLastOff() { this->setOff(63); } /// Return true if the <i>n</i>th bit is on bool isOn(Index32 n) const { assert( n < 64 ); return 0 != (mWord & (UINT64_C(0x01) << (n & 63))); } /// Return true if the <i>n</i>th bit is off bool isOff(Index32 n) const {return !this->isOn(n); } /// Return true if all the bits are on bool isOn() const { return mWord == UINT64_C(0xFFFFFFFFFFFFFFFF); } /// Return true if all the bits are off bool isOff() const { return mWord == 0; } /// Return @c true if bits are either all off OR all on. /// @param isOn Takes on the values of all bits if the method /// returns true - else it is undefined. bool isConstant(bool &isOn) const { isOn = this->isOn(); return isOn || this->isOff(); } Index32 findFirstOn() const { return mWord ? FindLowestOn(mWord) : 64; } Index32 findFirstOff() const { const Word w = ~mWord; return w ? FindLowestOn(w) : 64; } //@{ /// Return the <i>n</i>th word of the bit mask, for a word of arbitrary size. template<typename WordT> WordT getWord(Index n) const { assert(n*8*sizeof(WordT) < SIZE); return reinterpret_cast<const WordT*>(&mWord)[n]; } template<typename WordT> WordT& getWord(Index n) { assert(n*8*sizeof(WordT) < SIZE); return reinterpret_cast<WordT*>(mWord)[n]; } //@} void save(std::ostream& os) const { os.write(reinterpret_cast<const char*>(&mWord), 8); } void load(std::istream& is) { is.read(reinterpret_cast<char*>(&mWord), 8); } void seek(std::istream& is) const { is.seekg(8, std::ios_base::cur); } /// @brief simple print method for debugging void printInfo(std::ostream& os=std::cout) const { os << "NodeMask: Dim=4, Log2Dim=2, Bit count=64, Word count=1"<<std::endl; } void printBits(std::ostream& os=std::cout) const { os << "|"; for (Index32 i=0; i < 64; ++i) { if ( !(i%8) ) os << "|"; os << this->isOn(i); } os << "||" << std::endl; } void printAll(std::ostream& os=std::cout) const { this->printInfo(os); this->printBits(os); } Index32 findNextOn(Index32 start) const { if (start>=64) return 64; const Word w = mWord & (UINT64_C(0xFFFFFFFFFFFFFFFF) << start); return w ? FindLowestOn(w) : 64; } Index32 findNextOff(Index32 start) const { if (start>=64) return 64; const Word w = ~mWord & (UINT64_C(0xFFFFFFFFFFFFFFFF) << start); return w ? FindLowestOn(w) : 64; } };// NodeMask<2> // Unlike NodeMask above this RootNodeMask has a run-time defined size. // It is only included for backward compatibility and will likely be // deprecated in the future! // This class is 32-bit specefic, hence the use if Index32 vs Index! class RootNodeMask { protected: Index32 mBitSize, mIntSize; Index32 *mBits; public: RootNodeMask(): mBitSize(0), mIntSize(0), mBits(nullptr) {} RootNodeMask(Index32 bit_size): mBitSize(bit_size), mIntSize(((bit_size-1)>>5)+1), mBits(new Index32[mIntSize]) { for (Index32 i=0; i<mIntSize; ++i) mBits[i]=0x00000000; } RootNodeMask(const RootNodeMask& B): mBitSize(B.mBitSize), mIntSize(B.mIntSize), mBits(new Index32[mIntSize]) { for (Index32 i=0; i<mIntSize; ++i) mBits[i]=B.mBits[i]; } ~RootNodeMask() {delete [] mBits;} void init(Index32 bit_size) { mBitSize = bit_size; mIntSize =((bit_size-1)>>5)+1; delete [] mBits; mBits = new Index32[mIntSize]; for (Index32 i=0; i<mIntSize; ++i) mBits[i]=0x00000000; } Index getBitSize() const {return mBitSize;} Index getIntSize() const {return mIntSize;} RootNodeMask& operator=(const RootNodeMask& B) { if (mBitSize!=B.mBitSize) { mBitSize=B.mBitSize; mIntSize=B.mIntSize; delete [] mBits; mBits = new Index32[mIntSize]; } for (Index32 i=0; i<mIntSize; ++i) mBits[i]=B.mBits[i]; return *this; } class BaseIterator { protected: Index32 mPos;//bit position Index32 mBitSize; const RootNodeMask* mParent;//this iterator can't change the parent_mask! public: BaseIterator() : mPos(0), mBitSize(0), mParent(nullptr) {} BaseIterator(const BaseIterator&) = default; BaseIterator(Index32 pos, const RootNodeMask* parent): mPos(pos), mBitSize(parent->getBitSize()), mParent(parent) { assert(pos <= mBitSize); } bool operator==(const BaseIterator &iter) const {return mPos == iter.mPos;} bool operator!=(const BaseIterator &iter) const {return mPos != iter.mPos;} bool operator< (const BaseIterator &iter) const {return mPos < iter.mPos;} BaseIterator& operator=(const BaseIterator& iter) { mPos = iter.mPos; mBitSize = iter.mBitSize; mParent = iter.mParent; return *this; } Index32 offset() const {return mPos;} Index32 pos() const {return mPos;} bool test() const { assert(mPos <= mBitSize); return (mPos != mBitSize); } operator bool() const {return this->test();} }; // class BaseIterator /// @note This happens to be a const-iterator! class OnIterator: public BaseIterator { protected: using BaseIterator::mPos;//bit position; using BaseIterator::mBitSize;//bit size; using BaseIterator::mParent;//this iterator can't change the parent_mask! public: OnIterator() : BaseIterator() {} OnIterator(Index32 pos,const RootNodeMask *parent) : BaseIterator(pos,parent) {} void increment() { assert(mParent != nullptr); mPos=mParent->findNextOn(mPos+1); assert(mPos <= mBitSize); } void increment(Index n) { for (Index i=0; i<n && this->next(); ++i) {} } bool next() { this->increment(); return this->test(); } bool operator*() const {return true;} OnIterator& operator++() { this->increment(); return *this; } }; // class OnIterator class OffIterator: public BaseIterator { protected: using BaseIterator::mPos;//bit position; using BaseIterator::mBitSize;//bit size; using BaseIterator::mParent;//this iterator can't change the parent_mask! public: OffIterator() : BaseIterator() {} OffIterator(Index32 pos,const RootNodeMask *parent) : BaseIterator(pos,parent) {} void increment() { assert(mParent != nullptr); mPos=mParent->findNextOff(mPos+1); assert(mPos <= mBitSize); } void increment(Index n) { for (Index i=0; i<n && this->next(); ++i) {} } bool next() { this->increment(); return this->test(); } bool operator*() const {return true;} OffIterator& operator++() { this->increment(); return *this; } }; // class OffIterator class DenseIterator: public BaseIterator { protected: using BaseIterator::mPos;//bit position; using BaseIterator::mBitSize;//bit size; using BaseIterator::mParent;//this iterator can't change the parent_mask! public: DenseIterator() : BaseIterator() {} DenseIterator(Index32 pos,const RootNodeMask *parent) : BaseIterator(pos,parent) {} void increment() { assert(mParent != nullptr); mPos += 1;//carefull - the increament might go beyond the end assert(mPos<= mBitSize); } void increment(Index n) { for (Index i=0; i<n && this->next(); ++i) {} } bool next() { this->increment(); return this->test(); } bool operator*() const {return mParent->isOn(mPos);} DenseIterator& operator++() { this->increment(); return *this; } }; // class DenseIterator OnIterator beginOn() const { return OnIterator(this->findFirstOn(),this); } OnIterator endOn() const { return OnIterator(mBitSize,this); } OffIterator beginOff() const { return OffIterator(this->findFirstOff(),this); } OffIterator endOff() const { return OffIterator(mBitSize,this); } DenseIterator beginDense() const { return DenseIterator(0,this); } DenseIterator endDense() const { return DenseIterator(mBitSize,this); } bool operator == (const RootNodeMask &B) const { if (mBitSize != B.mBitSize) return false; for (Index32 i=0; i<mIntSize; ++i) if (mBits[i] != B.mBits[i]) return false; return true; } bool operator != (const RootNodeMask &B) const { if (mBitSize != B.mBitSize) return true; for (Index32 i=0; i<mIntSize; ++i) if (mBits[i] != B.mBits[i]) return true; return false; } // // Bitwise logical operations // RootNodeMask operator!() const { RootNodeMask m = *this; m.toggle(); return m; } const RootNodeMask& operator&=(const RootNodeMask& other) { assert(mIntSize == other.mIntSize); for (Index32 i = 0, N = std::min(mIntSize, other.mIntSize); i < N; ++i) { mBits[i] &= other.mBits[i]; } for (Index32 i = other.mIntSize; i < mIntSize; ++i) mBits[i] = 0x00000000; return *this; } const RootNodeMask& operator|=(const RootNodeMask& other) { assert(mIntSize == other.mIntSize); for (Index32 i = 0, N = std::min(mIntSize, other.mIntSize); i < N; ++i) { mBits[i] |= other.mBits[i]; } return *this; } const RootNodeMask& operator^=(const RootNodeMask& other) { assert(mIntSize == other.mIntSize); for (Index32 i = 0, N = std::min(mIntSize, other.mIntSize); i < N; ++i) { mBits[i] ^= other.mBits[i]; } return *this; } RootNodeMask operator&(const RootNodeMask& other) const { RootNodeMask m(*this); m &= other; return m; } RootNodeMask operator|(const RootNodeMask& other) const { RootNodeMask m(*this); m |= other; return m; } RootNodeMask operator^(const RootNodeMask& other) const { RootNodeMask m(*this); m ^= other; return m; } Index32 getMemUsage() const { return static_cast<Index32>(mIntSize*sizeof(Index32) + sizeof(*this)); } Index32 countOn() const { assert(mBits); Index32 n=0; for (Index32 i=0; i< mIntSize; ++i) n += CountOn(mBits[i]); return n; } Index32 countOff() const { return mBitSize-this->countOn(); } void setOn(Index32 i) { assert(mBits); assert( (i>>5) < mIntSize); mBits[i>>5] |= 1<<(i&31); } void setOff(Index32 i) { assert(mBits); assert( (i>>5) < mIntSize); mBits[i>>5] &= ~(1<<(i&31)); } void set(Index32 i, bool On) { On ? this->setOn(i) : this->setOff(i); } void setOn() { assert(mBits); for (Index32 i=0; i<mIntSize; ++i) mBits[i]=0xFFFFFFFF; } void setOff() { assert(mBits); for (Index32 i=0; i<mIntSize; ++i) mBits[i]=0x00000000; } void toggle(Index32 i) { assert(mBits); assert( (i>>5) < mIntSize); mBits[i>>5] ^= 1<<(i&31); } void toggle() { assert(mBits); for (Index32 i=0; i<mIntSize; ++i) mBits[i]=~mBits[i]; } void setFirstOn() { this->setOn(0); } void setLastOn() { this->setOn(mBitSize-1); } void setFirstOff() { this->setOff(0); } void setLastOff() { this->setOff(mBitSize-1); } bool isOn(Index32 i) const { assert(mBits); assert( (i>>5) < mIntSize); return ( mBits[i >> 5] & (1<<(i&31)) ); } bool isOff(Index32 i) const { assert(mBits); assert( (i>>5) < mIntSize); return ( ~mBits[i >> 5] & (1<<(i&31)) ); } bool isOn() const { if (!mBits) return false;//undefined is off for (Index32 i=0; i<mIntSize; ++i) if (mBits[i] != 0xFFFFFFFF) return false; return true; } bool isOff() const { if (!mBits) return true;//undefined is off for (Index32 i=0; i<mIntSize; ++i) if (mBits[i] != 0) return false; return true; } Index32 findFirstOn() const { assert(mBits); Index32 i=0; while(!mBits[i]) if (++i == mIntSize) return mBitSize;//reached end return 32*i + FindLowestOn(mBits[i]); } Index32 findFirstOff() const { assert(mBits); Index32 i=0; while(!(~mBits[i])) if (++i == mIntSize) return mBitSize;//reached end return 32*i + FindLowestOn(~mBits[i]); } void save(std::ostream& os) const { assert(mBits); os.write(reinterpret_cast<const char*>(mBits), mIntSize * sizeof(Index32)); } void load(std::istream& is) { assert(mBits); is.read(reinterpret_cast<char*>(mBits), mIntSize * sizeof(Index32)); } void seek(std::istream& is) const { assert(mBits); is.seekg(mIntSize * sizeof(Index32), std::ios_base::cur); } /// @brief simple print method for debugging void printInfo(std::ostream& os=std::cout) const { os << "RootNodeMask: Bit-size="<<mBitSize<<" Int-size="<<mIntSize<<std::endl; } void printBits(std::ostream& os=std::cout, Index32 max_out=80u) const { const Index32 n=(mBitSize>max_out?max_out:mBitSize); for (Index32 i=0; i < n; ++i) { if ( !(i&31) ) os << "||"; else if ( !(i%8) ) os << "|"; os << this->isOn(i); } os << "|" << std::endl; } void printAll(std::ostream& os=std::cout, Index32 max_out=80u) const { this->printInfo(os); this->printBits(os,max_out); } Index32 findNextOn(Index32 start) const { assert(mBits); Index32 n = start >> 5, m = start & 31;//initiate if (n>=mIntSize) return mBitSize; // check for out of bounds Index32 b = mBits[n]; if (b & (1<<m)) return start;//simple case b &= 0xFFFFFFFF << m;// mask lower bits while(!b && ++n<mIntSize) b = mBits[n];// find next nonzero int return (!b ? mBitSize : 32*n + FindLowestOn(b));//catch last-int=0 } Index32 findNextOff(Index32 start) const { assert(mBits); Index32 n = start >> 5, m = start & 31;//initiate if (n>=mIntSize) return mBitSize; // check for out of bounds Index32 b = ~mBits[n]; if (b & (1<<m)) return start;//simple case b &= 0xFFFFFFFF<<m;// mask lower bits while(!b && ++n<mIntSize) b = ~mBits[n];// find next nonzero int return (!b ? mBitSize : 32*n + FindLowestOn(b));//catch last-int=0 } Index32 memUsage() const { assert(mBits); return static_cast<Index32>(sizeof(Index32*)+(2+mIntSize)*sizeof(Index32));//in bytes } }; // class RootNodeMask } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_NODEMASKS_HAS_BEEN_INCLUDED
48,829
C
33.027875
99
0.5824
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/MapsUtil.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file MapsUtil.h #ifndef OPENVDB_UTIL_MAPSUTIL_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_MAPSUTIL_HAS_BEEN_INCLUDED #include <openvdb/math/Maps.h> #include <algorithm> // for std::min(), std::max() #include <cmath> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { // Utility methods for calculating bounding boxes /// @brief Calculate an axis-aligned bounding box in the given map's domain /// (e.g., index space) from an axis-aligned bounding box in its range /// (e.g., world space) template<typename MapType> inline void calculateBounds(const MapType& map, const BBoxd& in, BBoxd& out) { const Vec3d& min = in.min(); const Vec3d& max = in.max(); // the pre-image of the 8 corners of the box Vec3d corners[8]; corners[0] = in.min();; corners[1] = Vec3d(min(0), min(1), min(2)); corners[2] = Vec3d(max(0), max(1), min(2)); corners[3] = Vec3d(min(0), max(1), min(2)); corners[4] = Vec3d(min(0), min(1), max(2)); corners[5] = Vec3d(max(0), min(1), max(2)); corners[6] = max; corners[7] = Vec3d(min(0), max(1), max(2)); Vec3d pre_image; Vec3d& out_min = out.min(); Vec3d& out_max = out.max(); out_min = map.applyInverseMap(corners[0]); out_max = min; for (int i = 1; i < 8; ++i) { pre_image = map.applyInverseMap(corners[i]); for (int j = 0; j < 3; ++j) { out_min(j) = std::min( out_min(j), pre_image(j)); out_max(j) = std::max( out_max(j), pre_image(j)); } } } /// @brief Calculate an axis-aligned bounding box in the given map's domain /// from a spherical bounding box in its range. template<typename MapType> inline void calculateBounds(const MapType& map, const Vec3d& center, const Real radius, BBoxd& out) { // On return, out gives a bounding box in continuous index space // that encloses the sphere. // // the image of a sphere under the inverse of the linearMap will be an ellipsoid. if (math::is_linear<MapType>::value) { // I want to find extrema for three functions f(x', y', z') = x', or = y', or = z' // with the constraint that g = (x-xo)^2 + (y-yo)^2 + (z-zo)^2 = r^2. // Where the point x,y,z is the image of x',y',z' // Solve: \lambda Grad(g) = Grad(f) and g = r^2. // Note: here (x,y,z) is the image of (x',y',z'), and the gradient // is w.r.t the (') space. // // This can be solved exactly: e_a^T (x' -xo') =\pm r\sqrt(e_a^T J^(-1)J^(-T)e_a) // where e_a is one of the three unit vectors. - djh. /// find the image of the center of the sphere Vec3d center_pre_image = map.applyInverseMap(center); std::vector<Vec3d> coordinate_units; coordinate_units.push_back(Vec3d(1,0,0)); coordinate_units.push_back(Vec3d(0,1,0)); coordinate_units.push_back(Vec3d(0,0,1)); Vec3d& out_min = out.min(); Vec3d& out_max = out.max(); for (int direction = 0; direction < 3; ++direction) { Vec3d temp = map.applyIJT(coordinate_units[direction]); double offset = radius * sqrt(temp.x()*temp.x() + temp.y()*temp.y() + temp.z()*temp.z()); out_min(direction) = center_pre_image(direction) - offset; out_max(direction) = center_pre_image(direction) + offset; } } else { // This is some unknown map type. In this case, we form an axis-aligned // bounding box for the sphere in world space and find the pre-images of // the corners in index space. From these corners we compute an axis-aligned // bounding box in index space. BBoxd bounding_box(center - radius*Vec3d(1,1,1), center + radius*Vec3d(1,1,1)); calculateBounds<MapType>(map, bounding_box, out); } } namespace { // anonymous namespace for this helper function /// @brief Find the intersection of a line passing through the point /// (<I>x</I>=0,&nbsp;<I>z</I>=&minus;1/<I>g</I>) with the circle /// (<I>x</I> &minus; <I>xo</I>)&sup2; + (<I>z</I> &minus; <I>zo</I>)&sup2; = <I>r</I>&sup2; /// at a point tangent to the circle. /// @return 0 if the focal point (0, -1/<I>g</I>) is inside the circle, /// 1 if the focal point touches the circle, or 2 when both points are found. inline int findTangentPoints(const double g, const double xo, const double zo, const double r, double& xp, double& zp, double& xm, double& zm) { double x2 = xo * xo; double r2 = r * r; double xd = g * xo; double xd2 = xd*xd; double zd = g * zo + 1.; double zd2 = zd*zd; double rd2 = r2*g*g; double distA = xd2 + zd2; double distB = distA - rd2; if (distB > 0) { double discriminate = sqrt(distB); xp = xo - xo*rd2/distA + r * zd *discriminate / distA; xm = xo - xo*rd2/distA - r * zd *discriminate / distA; zp = (zo*zd2 + zd*g*(x2 - r2) - xo*xo*g - r*xd*discriminate) / distA; zm = (zo*zd2 + zd*g*(x2 - r2) - xo*xo*g + r*xd*discriminate) / distA; return 2; } if (0 >= distB && distB >= -1e-9) { // the circle touches the focal point (x=0, z = -1/g) xp = 0; xm = 0; zp = -1/g; zm = -1/g; return 1; } return 0; } } // end anonymous namespace /// @brief Calculate an axis-aligned bounding box in index space /// from a spherical bounding box in world space. /// @note This specialization is optimized for a frustum map template<> inline void calculateBounds<math::NonlinearFrustumMap>(const math::NonlinearFrustumMap& frustum, const Vec3d& center, const Real radius, BBoxd& out) { // The frustum is a nonlinear map followed by a uniform scale, rotation, translation. // First we invert the translation, rotation and scale to find the spherical pre-image // of the sphere in "local" coordinates where the frustum is aligned with the near plane // on the z=0 plane and the "camera" is located at (x=0, y=0, z=-1/g). // check that the internal map has no shear. const math::AffineMap& secondMap = frustum.secondMap(); // test if the linear part has shear or non-uniform scaling if (!frustum.hasSimpleAffine()) { // In this case, we form an axis-aligned bounding box for sphere in world space // and find the pre_images of the corners in voxel space. From these corners we // compute an axis-algined bounding box in voxel-spae BBoxd bounding_box(center - radius*Vec3d(1,1,1), center + radius*Vec3d(1,1,1)); calculateBounds<math::NonlinearFrustumMap>(frustum, bounding_box, out); return; } // for convenience Vec3d& out_min = out.min(); Vec3d& out_max = out.max(); Vec3d centerLS = secondMap.applyInverseMap(center); Vec3d voxelSize = secondMap.voxelSize(); // all the voxels have the same size since we know this is a simple affine map double radiusLS = radius / voxelSize(0); double gamma = frustum.getGamma(); double xp; double zp; double xm; double zm; int soln_number; // the bounding box in index space for the points in the frustum const BBoxd& bbox = frustum.getBBox(); // initialize min and max const double x_min = bbox.min().x(); const double y_min = bbox.min().y(); const double z_min = bbox.min().z(); const double x_max = bbox.max().x(); const double y_max = bbox.max().y(); const double z_max = bbox.max().z(); out_min.x() = x_min; out_max.x() = x_max; out_min.y() = y_min; out_max.y() = y_max; Vec3d extreme; Vec3d extreme2; Vec3d pre_image; // find the x-range soln_number = findTangentPoints(gamma, centerLS.x(), centerLS.z(), radiusLS, xp, zp, xm, zm); if (soln_number == 2) { extreme.x() = xp; extreme.y() = centerLS.y(); extreme.z() = zp; // location in world space of the tangent point extreme2 = secondMap.applyMap(extreme); // convert back to voxel space pre_image = frustum.applyInverseMap(extreme2); out_max.x() = std::max(x_min, std::min(x_max, pre_image.x())); extreme.x() = xm; extreme.y() = centerLS.y(); extreme.z() = zm; // location in world space of the tangent point extreme2 = secondMap.applyMap(extreme); // convert back to voxel space pre_image = frustum.applyInverseMap(extreme2); out_min.x() = std::max(x_min, std::min(x_max, pre_image.x())); } else if (soln_number == 1) { // the circle was tangent at the focal point } else if (soln_number == 0) { // the focal point was inside the circle } // find the y-range soln_number = findTangentPoints(gamma, centerLS.y(), centerLS.z(), radiusLS, xp, zp, xm, zm); if (soln_number == 2) { extreme.x() = centerLS.x(); extreme.y() = xp; extreme.z() = zp; // location in world space of the tangent point extreme2 = secondMap.applyMap(extreme); // convert back to voxel space pre_image = frustum.applyInverseMap(extreme2); out_max.y() = std::max(y_min, std::min(y_max, pre_image.y())); extreme.x() = centerLS.x(); extreme.y() = xm; extreme.z() = zm; extreme2 = secondMap.applyMap(extreme); // convert back to voxel space pre_image = frustum.applyInverseMap(extreme2); out_min.y() = std::max(y_min, std::min(y_max, pre_image.y())); } else if (soln_number == 1) { // the circle was tangent at the focal point } else if (soln_number == 0) { // the focal point was inside the circle } // the near and far // the closest point. The front of the frustum is at 0 in index space double near_dist = std::max(centerLS.z() - radiusLS, 0.); // the farthest point. The back of the frustum is at mDepth in index space double far_dist = std::min(centerLS.z() + radiusLS, frustum.getDepth() ); Vec3d near_point(0.f, 0.f, near_dist); Vec3d far_point(0.f, 0.f, far_dist); out_min.z() = std::max(z_min, frustum.applyInverseMap(secondMap.applyMap(near_point)).z()); out_max.z() = std::min(z_max, frustum.applyInverseMap(secondMap.applyMap(far_point)).z()); } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_MAPSUTIL_HAS_BEEN_INCLUDED
10,511
C
34.633898
97
0.608315
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Name.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_UTIL_NAME_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_NAME_HAS_BEEN_INCLUDED #include <openvdb/Platform.h> #include <openvdb/version.h> #include <string> #include <iostream> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { typedef std::string Name; inline Name readString(std::istream& is) { uint32_t size; is.read(reinterpret_cast<char*>(&size), sizeof(uint32_t)); std::string buffer(size, ' '); if (size>0) is.read(&buffer[0], size); return buffer; } inline void writeString(std::ostream& os, const Name& name) { uint32_t size = uint32_t(name.size()); os.write(reinterpret_cast<char*>(&size), sizeof(uint32_t)); os.write(&name[0], size); } } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_NAME_HAS_BEEN_INCLUDED
936
C
21.309523
63
0.707265
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/NullInterrupter.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file NullInterrupter.h #ifndef OPENVDB_UTIL_NULL_INTERRUPTER_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_NULL_INTERRUPTER_HAS_BEEN_INCLUDED #include <openvdb/version.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { /// @brief Dummy NOOP interrupter class defining interface /// /// This shows the required interface for the @c InterrupterType template argument /// using by several threaded applications (e.g. tools/PointAdvect.h). The host /// application calls start() at the beginning of an interruptible operation, end() /// at the end of the operation, and wasInterrupted() periodically during the operation. /// If any call to wasInterrupted() returns @c true, the operation will be aborted. /// @note This Dummy interrupter will NEVER interrupt since wasInterrupted() always /// returns false! struct NullInterrupter { /// Default constructor NullInterrupter () {} /// Signal the start of an interruptible operation. /// @param name an optional descriptive name for the operation void start(const char* name = nullptr) { (void)name; } /// Signal the end of an interruptible operation. void end() {} /// Check if an interruptible operation should be aborted. /// @param percent an optional (when >= 0) percentage indicating /// the fraction of the operation that has been completed /// @note this method is assumed to be thread-safe. The current /// implementation is clearly a NOOP and should compile out during /// optimization! inline bool wasInterrupted(int percent = -1) { (void)percent; return false; } }; /// This method allows NullInterrupter::wasInterrupted to be compiled /// out when client code only has a pointer (vs reference) to the interrupter. /// /// @note This is a free-standing function since C++ doesn't allow for /// partial template specialization (in client code of the interrupter). template <typename T> inline bool wasInterrupted(T* i, int percent = -1) { return i && i->wasInterrupted(percent); } /// Specialization for NullInterrupter template<> inline bool wasInterrupted<util::NullInterrupter>(util::NullInterrupter*, int) { return false; } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_NULL_INTERRUPTER_HAS_BEEN_INCLUDED
2,428
C
39.483333
96
0.73682
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/CpuTimer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_UTIL_CPUTIMER_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_CPUTIMER_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <string> #include <chrono> #include <iostream>// for std::cerr #include <sstream>// for ostringstream #include <iomanip>// for setprecision #include "Formats.h"// for printTime namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { /// @brief Simple timer for basic profiling. /// /// @code /// util::CpuTimer timer; /// // code here will not be timed! /// timer.start("algorithm"); /// // code to be timed goes here /// timer.stop(); /// @endcode /// /// or to time multiple blocks of code /// /// @code /// util::CpuTimer timer("algorithm 1"); /// // code to be timed goes here /// timer.restart("algorithm 2"); /// // code to be timed goes here /// timer.stop(); /// @endcode /// /// or to measure speedup between multiple runs /// /// @code /// util::CpuTimer timer("algorithm 1"); /// // code for the first run goes here /// const double t1 = timer.restart("algorithm 2"); /// // code for the second run goes here /// const double t2 = timer.stop(); /// std::cerr << "Algorithm 1 is " << (t2/t1) /// << " timers faster than algorithm 2\n"; /// @endcode /// /// or to measure multiple blocks of code with deferred output /// /// @code /// util::CpuTimer timer(); /// // code here will not be timed! /// timer.start(); /// // code for the first run goes here /// const double t1 = timer.restart();//time in milliseconds /// // code for the second run goes here /// const double t2 = timer.restart();//time in milliseconds /// // code here will not be timed! /// util::printTime(std::cout, t1, "Algorithm 1 completed in "); /// util::printTime(std::cout, t2, "Algorithm 2 completed in "); /// @endcode class CpuTimer { public: /// @brief Initiate timer CpuTimer(std::ostream& os = std::cerr) : mOutStream(os), mT0(this->now()) {} /// @brief Prints message and start timer. /// /// @note Should normally be followed by a call to stop() CpuTimer(const std::string& msg, std::ostream& os = std::cerr) : mOutStream(os) { this->start(msg); } /// @brief Start timer. /// /// @note Should normally be followed by a call to milliseconds() or stop(std::string) inline void start() { mT0 = this->now(); } /// @brief Print message and start timer. /// /// @note Should normally be followed by a call to stop() inline void start(const std::string& msg) { mOutStream << msg << " ..."; this->start(); } /// @brief Return Time difference in microseconds since construction or start was called. /// /// @note Combine this method with start() to get timing without any outputs. inline int64_t microseconds() const { return (this->now() - mT0); } /// @brief Return Time difference in milliseconds since construction or start was called. /// /// @note Combine this method with start() to get timing without any outputs. inline double milliseconds() const { static constexpr double resolution = 1.0 / 1E3; return static_cast<double>(this->microseconds()) * resolution; } /// @brief Return Time difference in seconds since construction or start was called. /// /// @note Combine this method with start() to get timing without any outputs. inline double seconds() const { static constexpr double resolution = 1.0 / 1E6; return static_cast<double>(this->microseconds()) * resolution; } inline std::string time() const { const double msec = this->milliseconds(); std::ostringstream os; printTime(os, msec, "", "", 4, 1, 1); return os.str(); } /// @brief Returns and prints time in milliseconds since construction or start was called. /// /// @note Combine this method with start(std::string) to print at start and stop of task being timed. inline double stop() const { const double msec = this->milliseconds(); printTime(mOutStream, msec, " completed in ", "\n", 4, 3, 1); return msec; } /// @brief Returns and prints time in milliseconds since construction or start was called. /// /// @note Combine this method with start() to delay output of task being timed. inline double stop(const std::string& msg) const { const double msec = this->milliseconds(); mOutStream << msg << " ..."; printTime(mOutStream, msec, " completed in ", "\n", 4, 3, 1); return msec; } /// @brief Re-start timer. /// @return time in milliseconds since previous start or restart. /// /// @note Should normally be followed by a call to stop() or restart() inline double restart() { const double msec = this->milliseconds(); this->start(); return msec; } /// @brief Stop previous timer, print message and re-start timer. /// @return time in milliseconds since previous start or restart. /// /// @note Should normally be followed by a call to stop() or restart() inline double restart(const std::string& msg) { const double delta = this->stop(); this->start(msg); return delta; } private: static int64_t now() { // steady_clock is a monotonically increasing clock designed for timing duration // note that high_resolution_clock is aliased to either steady_clock or system_clock // depending on the platform, so it is preferrable to use steady_clock const auto time_since_epoch = std::chrono::steady_clock::now().time_since_epoch(); // cast time since epoch into microseconds (1 / 1000000 seconds) const auto microseconds = std::chrono::duration_cast<std::chrono::microseconds>(time_since_epoch).count(); // cast to a a 64-bit signed integer as this will overflow in 2262! return static_cast<int64_t>(microseconds); } std::ostream& mOutStream; int64_t mT0{0}; };// CpuTimer } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_CPUTIMER_HAS_BEEN_INCLUDED
6,397
C
32.150259
105
0.626231
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Util.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_UTIL_UTIL_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_UTIL_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/tree/Tree.h> #include <openvdb/tools/ValueTransformer.h> #include <openvdb/tools/Prune.h>// for tree::pruneInactive namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { OPENVDB_API extern const Index32 INVALID_IDX; /// @brief coordinate offset table for neighboring voxels OPENVDB_API extern const Coord COORD_OFFSETS[26]; //////////////////////////////////////// /// Return @a voxelCoord rounded to the closest integer coordinates. inline Coord nearestCoord(const Vec3d& voxelCoord) { Coord ijk; ijk[0] = int(std::floor(voxelCoord[0])); ijk[1] = int(std::floor(voxelCoord[1])); ijk[2] = int(std::floor(voxelCoord[2])); return ijk; } //////////////////////////////////////// /// @brief Functor for use with tools::foreach() to compute the boolean intersection /// between the value masks of corresponding leaf nodes in two trees template<class TreeType1, class TreeType2> class LeafTopologyIntOp { public: LeafTopologyIntOp(const TreeType2& tree): mOtherTree(&tree) {} inline void operator()(const typename TreeType1::LeafIter& lIter) const { const Coord xyz = lIter->origin(); const typename TreeType2::LeafNodeType* leaf = mOtherTree->probeConstLeaf(xyz); if (leaf) {//leaf node lIter->topologyIntersection(*leaf, zeroVal<typename TreeType1::ValueType>()); } else if (!mOtherTree->isValueOn(xyz)) {//inactive tile lIter->setValuesOff(); } } private: const TreeType2* mOtherTree; }; /// @brief Functor for use with tools::foreach() to compute the boolean difference /// between the value masks of corresponding leaf nodes in two trees template<class TreeType1, class TreeType2> class LeafTopologyDiffOp { public: LeafTopologyDiffOp(const TreeType2& tree): mOtherTree(&tree) {} inline void operator()(const typename TreeType1::LeafIter& lIter) const { const Coord xyz = lIter->origin(); const typename TreeType2::LeafNodeType* leaf = mOtherTree->probeConstLeaf(xyz); if (leaf) {//leaf node lIter->topologyDifference(*leaf, zeroVal<typename TreeType1::ValueType>()); } else if (mOtherTree->isValueOn(xyz)) {//active tile lIter->setValuesOff(); } } private: const TreeType2* mOtherTree; }; //////////////////////////////////////// /// @brief Perform a boolean intersection between two leaf nodes' topology masks. /// @return a pointer to a new, boolean-valued tree containing the overlapping voxels. template<class TreeType1, class TreeType2> inline typename TreeType1::template ValueConverter<bool>::Type::Ptr leafTopologyIntersection(const TreeType1& lhs, const TreeType2& rhs, bool threaded = true) { typedef typename TreeType1::template ValueConverter<bool>::Type BoolTreeType; typename BoolTreeType::Ptr topologyTree(new BoolTreeType( lhs, /*inactiveValue=*/false, /*activeValue=*/true, TopologyCopy())); tools::foreach(topologyTree->beginLeaf(), LeafTopologyIntOp<BoolTreeType, TreeType2>(rhs), threaded); tools::pruneInactive(*topologyTree, threaded); return topologyTree; } /// @brief Perform a boolean difference between two leaf nodes' topology masks. /// @return a pointer to a new, boolean-valued tree containing the non-overlapping /// voxels from the lhs. template<class TreeType1, class TreeType2> inline typename TreeType1::template ValueConverter<bool>::Type::Ptr leafTopologyDifference(const TreeType1& lhs, const TreeType2& rhs, bool threaded = true) { typedef typename TreeType1::template ValueConverter<bool>::Type BoolTreeType; typename BoolTreeType::Ptr topologyTree(new BoolTreeType( lhs, /*inactiveValue=*/false, /*activeValue=*/true, TopologyCopy())); tools::foreach(topologyTree->beginLeaf(), LeafTopologyDiffOp<BoolTreeType, TreeType2>(rhs), threaded); tools::pruneInactive(*topologyTree, threaded); return topologyTree; } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_UTIL_HAS_BEEN_INCLUDED
4,333
C
30.867647
90
0.701823
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/PagedArray.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// /// @file PagedArray.h /// /// @author Ken Museth /// /// @brief Concurrent, page-based, dynamically-sized linear data /// structure with O(1) random access and STL-compliant /// iterators. It is primarily intended for applications /// that involve multi-threading push_back of (a possibly /// unkown number of) elements into a dynamically growing /// linear array, and fast random access to said elements. #ifndef OPENVDB_UTIL_PAGED_ARRAY_HAS_BEEN_INCLUDED #define OPENVDB_UTIL_PAGED_ARRAY_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Types.h>// SharedPtr #include <deque> #include <cassert> #include <iostream> #include <algorithm>// std::swap #include <tbb/atomic.h> #include <tbb/spin_mutex.h> #include <tbb/parallel_for.h> #include <tbb/parallel_sort.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { //////////////////////////////////////// /// @brief Concurrent, page-based, dynamically-sized linear data structure /// with O(1) random access and STL-compliant iterators. It is /// primarily intended for applications that concurrently insert /// (a possibly unkown number of) elements into a dynamically /// growing linear array, and fast random access to said elements. /// /// @note Multiple threads can grow the page-table and push_back /// new elements concurrently. A ValueBuffer provides accelerated /// and threadsafe push_back at the cost of potentially re-ordering /// elements (when multiple instances are used). /// /// @details This data structure employes contiguous pages of elements /// (a std::deque) which avoids moving data when the /// capacity is out-grown and new pages are allocated. The /// size of the pages can be controlled with the Log2PageSize /// template parameter (defaults to 1024 elements of type ValueT). /// /// There are three fundamentally different ways to insert elements to /// this container - each with different advanteges and disadvanteges. /// /// The simplest way to insert elements is to use PagedArray::push_back_unsafe /// which is @a not thread-safe: /// @code /// PagedArray<size_t> array; /// for (size_t i=0; i<100000; ++i) array.push_back_unsafe(i); /// @endcode /// /// The fastest way (by far) to insert elements is by means of a PagedArray::ValueBuffer: /// @code /// PagedArray<size_t> array; /// auto buffer = array.getBuffer(); /// for (size_t i=0; i<100000; ++i) buffer.push_back(i); /// buffer.flush(); /// @endcode /// or /// @code /// PagedArray<size_t> array; /// { /// //local scope of a single thread /// auto buffer = array.getBuffer(); /// for (size_t i=0; i<100000; ++i) buffer.push_back(i); /// } /// @endcode /// or with TBB task-based multi-threading: /// @code /// PagedArray<size_t> array; /// tbb::parallel_for( /// tbb::blocked_range<size_t>(0, 10000, array.pageSize()), /// [&array](const tbb::blocked_range<size_t>& range) { /// auto buffer = array.getBuffer(); /// for (size_t i=range.begin(); i!=range.end(); ++i) buffer.push_back(i); /// } /// ); /// @endcode /// or with TBB thread-local storage for even better performance (due /// to fewer concurrent instantiations of partially full ValueBuffers) /// @code /// PagedArray<size_t> array; /// auto exemplar = array.getBuffer();//dummy used for initialization /// tbb::enumerable_thread_specific<PagedArray<size_t>::ValueBuffer> /// pool(exemplar);//thread local storage pool of ValueBuffers /// tbb::parallel_for( /// tbb::blocked_range<size_t>(0, 10000, array.pageSize()), /// [&pool](const tbb::blocked_range<size_t>& range) { /// auto &buffer = pool.local(); /// for (size_t i=range.begin(); i!=range.end(); ++i) buffer.push_back(i); /// } /// ); /// for (auto i=pool.begin(); i!=pool.end(); ++i) i->flush(); /// @endcode /// This technique generally outperforms PagedArray::push_back_unsafe, /// std::vector::push_back, std::deque::push_back and even /// tbb::concurrent_vector::push_back. Additionally it /// is thread-safe as long as each thread has it's own instance of a /// PagedArray::ValueBuffer. The only disadvantage is the ordering of /// the elements is undefined if multiple instance of a /// PagedArray::ValueBuffer are employed. This is typically the case /// in the context of multi-threading, where the /// ordering of inserts are undefined anyway. Note that a local scope /// can be used to guarentee that the ValueBuffer has inserted all its /// elements by the time the scope ends. Alternatively the ValueBuffer /// can be explicitly flushed by calling ValueBuffer::flush. /// /// The third way to insert elements is to resize the container and use /// random access, e.g. /// @code /// PagedArray<int> array; /// array.resize(100000); /// for (int i=0; i<100000; ++i) array[i] = i; /// @endcode /// or in terms of the random access iterator /// @code /// PagedArray<int> array; /// array.resize(100000); /// for (auto i=array.begin(); i!=array.end(); ++i) *i = i.pos(); /// @endcode /// While this approach is both fast and thread-safe it suffers from the /// major disadvantage that the problem size, i.e. number of elements, needs to /// be known in advance. If that's the case you might as well consider /// using std::vector or a raw c-style array! In other words the /// PagedArray is most useful in the context of applications that /// involve multi-threading of dynamically growing linear arrays that /// require fast random access. template<typename ValueT, size_t Log2PageSize = 10UL> class PagedArray { private: static_assert(Log2PageSize > 1UL, "Expected Log2PageSize > 1"); class Page; // must allow mutiple threads to call operator[] as long as only one thread calls push_back using PageTableT = std::deque<Page*>; public: using ValueType = ValueT; using Ptr = SharedPtr<PagedArray>; /// @brief Default constructor PagedArray() : mCapacity{0} { mSize = 0; } /// @brief Destructor removed all allocated pages ~PagedArray() { this->clear(); } // Disallow copy construction and assignment PagedArray(const PagedArray&) = delete; PagedArray& operator=(const PagedArray&) = delete; /// @brief Return a shared pointer to a new instance of this class static Ptr create() { return Ptr(new PagedArray); } /// @brief Caches values into a local memory Page to improve /// performance of push_back into a PagedArray. /// /// @note The ordering of inserted elements is undefined when /// multiple ValueBuffers are used! /// /// @warning By design this ValueBuffer is not threadsafe so /// make sure to create an instance per thread! class ValueBuffer; /// @return a new instance of a ValueBuffer which supports thread-safe push_back! ValueBuffer getBuffer() { return ValueBuffer(*this); } /// Const std-compliant iterator class ConstIterator; /// Non-const std-compliant iterator class Iterator; /// @brief This method is deprecated and will be removed shortly! [[deprecated]] size_t push_back(const ValueType& value) { return this->push_back_unsafe(value); } /// @param value value to be added to this PagedArray /// /// @note For best performance consider using the ValueBuffer! /// /// @warning Not thread-safe and mostly intended for debugging! size_t push_back_unsafe(const ValueType& value) { const size_t index = mSize.fetch_and_increment(); if (index >= mCapacity) { mPageTable.push_back( new Page() ); mCapacity += Page::Size; } (*mPageTable[index >> Log2PageSize])[index] = value; return index; } /// @brief Reduce the page table to fix the current size. /// /// @warning Not thread-safe! void shrink_to_fit(); /// @brief Return a reference to the value at the specified offset /// /// @param i linear offset of the value to be accessed. /// /// @note This random access has constant time complexity. /// /// @warning It is assumed that the i'th element is already allocated! ValueType& operator[](size_t i) { assert(i<mCapacity); return (*mPageTable[i>>Log2PageSize])[i]; } /// @brief Return a const-reference to the value at the specified offset /// /// @param i linear offset of the value to be accessed. /// /// @note This random access has constant time complexity. /// /// @warning It is assumed that the i'th element is already allocated! const ValueType& operator[](size_t i) const { assert(i<mCapacity); return (*mPageTable[i>>Log2PageSize])[i]; } /// @brief Set all elements in the page table to the specified value /// /// @param v value to be filled in all the existing pages of this PagedArray. /// /// @note Multi-threaded void fill(const ValueType& v) { auto op = [&](const tbb::blocked_range<size_t>& r){ for(size_t i=r.begin(); i!=r.end(); ++i) mPageTable[i]->fill(v); }; tbb::parallel_for(tbb::blocked_range<size_t>(0, this->pageCount()), op); } /// @brief Copy the first @a count values in this PageArray into /// a raw c-style array, assuming it to be at least @a count /// elements long. /// /// @param p pointer to an array that will used as the destination of the copy. /// @param count number of elements to be copied. /// bool copy(ValueType *p, size_t count) const { size_t last_page = count >> Log2PageSize; if (last_page >= this->pageCount()) return false; auto op = [&](const tbb::blocked_range<size_t>& r){ for (size_t i=r.begin(); i!=r.end(); ++i) { mPageTable[i]->copy(p+i*Page::Size, Page::Size); } }; if (size_t m = count & Page::Mask) {//count is not divisible by page size tbb::parallel_for(tbb::blocked_range<size_t>(0, last_page, 32), op); mPageTable[last_page]->copy(p+last_page*Page::Size, m); } else { tbb::parallel_for(tbb::blocked_range<size_t>(0, last_page+1, 32), op); } return true; } void copy(ValueType *p) const { this->copy(p, mSize); } /// @brief Resize this array to the specified size. /// /// @param size number of elements that this PageArray will contain. /// /// @details Will grow or shrink the page table to contain /// the specified number of elements. It will affect the size(), /// iteration will go over all those elements, push_back will /// insert after them and operator[] can be used directly access /// them. /// /// @note No reserve method is implemented due to efficiency concerns /// (especially for the ValueBuffer) from having to deal with empty pages. /// /// @warning Not thread-safe! void resize(size_t size) { mSize = size; if (size > mCapacity) { this->grow(size-1); } else { this->shrink_to_fit(); } } /// @brief Resize this array to the specified size and initialize /// all values to @a v. /// /// @param size number of elements that this PageArray will contain. /// @param v value of all the @a size values. /// /// @details Will grow or shrink the page table to contain /// the specified number of elements. It will affect the size(), /// iteration will go over all those elements, push_back will /// insert after them and operator[] can be used directly access them. /// /// @note No reserve method is implemented due to efficiency concerns /// (especially for the ValueBuffer) from having to deal with empty pages. /// /// @warning Not thread-safe! void resize(size_t size, const ValueType& v) { this->resize(size); this->fill(v); } /// @brief Return the number of elements in this array. size_t size() const { return mSize; } /// @brief Return the maximum number of elements that this array /// can contain without allocating more memory pages. size_t capacity() const { return mCapacity; } /// @brief Return the number of additional elements that can be /// added to this array without allocating more memory pages. size_t freeCount() const { return mCapacity - mSize; } /// @brief Return the number of allocated memory pages. size_t pageCount() const { return mPageTable.size(); } /// @brief Return the number of elements per memory page. static size_t pageSize() { return Page::Size; } /// @brief Return log2 of the number of elements per memory page. static size_t log2PageSize() { return Log2PageSize; } /// @brief Return the memory footprint of this array in bytes. size_t memUsage() const { return sizeof(*this) + mPageTable.size() * Page::memUsage(); } /// @brief Return true if the container contains no elements. bool isEmpty() const { return mSize == 0; } /// @brief Return true if the page table is partially full, i.e. the /// last non-empty page contains less than pageSize() elements. /// /// @details When the page table is partially full calling merge() /// or using a ValueBuffer will rearrange the ordering of /// existing elements. bool isPartiallyFull() const { return (mSize & Page::Mask) > 0; } /// @brief Removes all elements from the array and delete all pages. /// /// @warning Not thread-safe! void clear() { for (size_t i=0, n=mPageTable.size(); i<n; ++i) delete mPageTable[i]; PageTableT().swap(mPageTable); mSize = 0; mCapacity = 0; } /// @brief Return a non-const iterator pointing to the first element Iterator begin() { return Iterator(*this, 0); } /// @brief Return a non-const iterator pointing to the /// past-the-last element. /// /// @warning Iterator does not point to a valid element and should not /// be dereferenced! Iterator end() { return Iterator(*this, mSize); } //@{ /// @brief Return a const iterator pointing to the first element ConstIterator cbegin() const { return ConstIterator(*this, 0); } ConstIterator begin() const { return ConstIterator(*this, 0); } //@} //@{ /// @brief Return a const iterator pointing to the /// past-the-last element. /// /// @warning Iterator does not point to a valid element and should not /// be dereferenced! ConstIterator cend() const { return ConstIterator(*this, mSize); } ConstIterator end() const { return ConstIterator(*this, mSize); } //@} /// @brief Parallel sort of all the elements in ascending order. void sort() { tbb::parallel_sort(this->begin(), this->end(), std::less<ValueT>() ); } /// @brief Parallel sort of all the elements in descending order. void invSort() { tbb::parallel_sort(this->begin(), this->end(), std::greater<ValueT>()); } //@{ /// @brief Parallel sort of all the elements based on a custom /// functor with the api: /// @code bool operator()(const ValueT& a, const ValueT& b) @endcode /// which returns true if a comes before b. template <typename Functor> void sort(Functor func) { tbb::parallel_sort(this->begin(), this->end(), func ); } //@} /// @brief Transfer all the elements (and pages) from the other array to this array. /// /// @param other non-const reference to the PagedArray that will be merged into this PagedArray. /// /// @note The other PagedArray is empty on return. /// /// @warning The ordering of elements is undefined if this page table is partially full! void merge(PagedArray& other); /// @brief Print information for debugging void print(std::ostream& os = std::cout) const { os << "PagedArray:\n" << "\tSize: " << this->size() << " elements\n" << "\tPage table: " << this->pageCount() << " pages\n" << "\tPage size: " << this->pageSize() << " elements\n" << "\tCapacity: " << this->capacity() << " elements\n" << "\tFootprint: " << this->memUsage() << " bytes\n"; } private: friend class ValueBuffer; void grow(size_t index) { tbb::spin_mutex::scoped_lock lock(mGrowthMutex); while(index >= mCapacity) { mPageTable.push_back( new Page() ); mCapacity += Page::Size; } } void add_full(Page*& page, size_t size); void add_partially_full(Page*& page, size_t size); void add(Page*& page, size_t size) { tbb::spin_mutex::scoped_lock lock(mGrowthMutex); if (size == Page::Size) {//page is full this->add_full(page, size); } else if (size>0) {//page is only partially full this->add_partially_full(page, size); } } PageTableT mPageTable;//holds points to allocated pages tbb::atomic<size_t> mSize;// current number of elements in array size_t mCapacity;//capacity of array given the current page count tbb::spin_mutex mGrowthMutex;//Mutex-lock required to grow pages }; // Public class PagedArray //////////////////////////////////////////////////////////////////////////////// template <typename ValueT, size_t Log2PageSize> void PagedArray<ValueT, Log2PageSize>::shrink_to_fit() { if (mPageTable.size() > (mSize >> Log2PageSize) + 1) { tbb::spin_mutex::scoped_lock lock(mGrowthMutex); const size_t pageCount = (mSize >> Log2PageSize) + 1; if (mPageTable.size() > pageCount) { delete mPageTable.back(); mPageTable.pop_back(); mCapacity -= Page::Size; } } } template <typename ValueT, size_t Log2PageSize> void PagedArray<ValueT, Log2PageSize>::merge(PagedArray& other) { if (&other != this && !other.isEmpty()) { tbb::spin_mutex::scoped_lock lock(mGrowthMutex); // extract last partially full page if it exists Page* page = nullptr; const size_t size = mSize & Page::Mask; //number of elements in the last page if ( size > 0 ) { page = mPageTable.back(); mPageTable.pop_back(); mSize -= size; } // transfer all pages from the other page table mPageTable.insert(mPageTable.end(), other.mPageTable.begin(), other.mPageTable.end()); mSize += other.mSize; mCapacity = Page::Size*mPageTable.size(); other.mSize = 0; other.mCapacity = 0; PageTableT().swap(other.mPageTable); // add back last partially full page if (page) this->add_partially_full(page, size); } } template <typename ValueT, size_t Log2PageSize> void PagedArray<ValueT, Log2PageSize>::add_full(Page*& page, size_t size) { assert(size == Page::Size);//page must be full if (mSize & Page::Mask) {//page-table is partially full Page*& tmp = mPageTable.back(); std::swap(tmp, page);//swap last table entry with page } mPageTable.push_back(page); mCapacity += Page::Size; mSize += size; page = nullptr; } template <typename ValueT, size_t Log2PageSize> void PagedArray<ValueT, Log2PageSize>::add_partially_full(Page*& page, size_t size) { assert(size > 0 && size < Page::Size);//page must be partially full if (size_t m = mSize & Page::Mask) {//page table is also partially full ValueT *s = page->data(), *t = mPageTable.back()->data() + m; for (size_t i=std::min(mSize+size, mCapacity)-mSize; i; --i) *t++ = *s++; if (mSize+size > mCapacity) {//grow page table mPageTable.push_back( new Page() ); t = mPageTable.back()->data(); for (size_t i=mSize+size-mCapacity; i; --i) *t++ = *s++; mCapacity += Page::Size; } } else {//page table is full so simply append page mPageTable.push_back( page ); mCapacity += Page::Size; page = nullptr; } mSize += size; } //////////////////////////////////////////////////////////////////////////////// // Public member-class of PagedArray template <typename ValueT, size_t Log2PageSize> class PagedArray<ValueT, Log2PageSize>:: ValueBuffer { public: using PagedArrayType = PagedArray<ValueT, Log2PageSize>; /// @brief Constructor from a PageArray ValueBuffer(PagedArray& parent) : mParent(&parent), mPage(new Page()), mSize(0) {} /// @warning This copy-constructor is shallow in the sense that no /// elements are copied, i.e. size = 0. ValueBuffer(const ValueBuffer& other) : mParent(other.mParent), mPage(new Page()), mSize(0) {} /// @brief Destructor that transfers an buffered values to the parent PagedArray. ~ValueBuffer() { mParent->add(mPage, mSize); delete mPage; } ValueBuffer& operator=(const ValueBuffer&) = delete;// disallow copy assignment /// @brief Add a value to the buffer and increment the size. /// /// @details If the internal memory page is full it will /// automaically flush the page to the parent PagedArray. void push_back(const ValueT& v) { (*mPage)[mSize++] = v; if (mSize == Page::Size) this->flush(); } /// @brief Manually transfers the values in this buffer to the parent PagedArray. /// /// @note This method is also called by the destructor and /// push_back so it should only be called if one manually wants to /// sync up the buffer with the array, e.g. during debugging. void flush() { mParent->add(mPage, mSize); if (mPage == nullptr) mPage = new Page(); mSize = 0; } /// @brief Return a reference to the parent PagedArray PagedArrayType& parent() const { return *mParent; } /// @brief Return the current number of elements cached in this buffer. size_t size() const { return mSize; } static size_t pageSize() { return 1UL << Log2PageSize; } private: PagedArray* mParent; Page* mPage; size_t mSize; };// Public class PagedArray::ValueBuffer //////////////////////////////////////////////////////////////////////////////// // Const std-compliant iterator // Public member-class of PagedArray template <typename ValueT, size_t Log2PageSize> class PagedArray<ValueT, Log2PageSize>:: ConstIterator : public std::iterator<std::random_access_iterator_tag, ValueT> { public: using BaseT = std::iterator<std::random_access_iterator_tag, ValueT>; using difference_type = typename BaseT::difference_type; // constructors and assignment ConstIterator() : mPos(0), mParent(nullptr) {} ConstIterator(const PagedArray& parent, size_t pos=0) : mPos(pos), mParent(&parent) {} ConstIterator(const ConstIterator& other) : mPos(other.mPos), mParent(other.mParent) {} ConstIterator& operator=(const ConstIterator& other) { mPos=other.mPos; mParent=other.mParent; return *this; } // prefix ConstIterator& operator++() { ++mPos; return *this; } ConstIterator& operator--() { --mPos; return *this; } // postfix ConstIterator operator++(int) { ConstIterator tmp(*this); ++mPos; return tmp; } ConstIterator operator--(int) { ConstIterator tmp(*this); --mPos; return tmp; } // value access const ValueT& operator*() const { return (*mParent)[mPos]; } const ValueT* operator->() const { return &(this->operator*()); } const ValueT& operator[](const difference_type& pos) const { return (*mParent)[mPos+pos]; } // offset ConstIterator& operator+=(const difference_type& pos) { mPos += pos; return *this; } ConstIterator& operator-=(const difference_type& pos) { mPos -= pos; return *this; } ConstIterator operator+(const difference_type &pos) const { return Iterator(*mParent,mPos+pos); } ConstIterator operator-(const difference_type &pos) const { return Iterator(*mParent,mPos-pos); } difference_type operator-(const ConstIterator& other) const { return mPos - other.pos(); } // comparisons bool operator==(const ConstIterator& other) const { return mPos == other.mPos; } bool operator!=(const ConstIterator& other) const { return mPos != other.mPos; } bool operator>=(const ConstIterator& other) const { return mPos >= other.mPos; } bool operator<=(const ConstIterator& other) const { return mPos <= other.mPos; } bool operator< (const ConstIterator& other) const { return mPos < other.mPos; } bool operator> (const ConstIterator& other) const { return mPos > other.mPos; } // non-std methods bool isValid() const { return mParent != nullptr && mPos < mParent->size(); } size_t pos() const { return mPos; } private: size_t mPos; const PagedArray* mParent; };// Public class PagedArray::ConstIterator //////////////////////////////////////////////////////////////////////////////// // Non-const std-compliant iterator // Public member-class of PagedArray template <typename ValueT, size_t Log2PageSize> class PagedArray<ValueT, Log2PageSize>:: Iterator : public std::iterator<std::random_access_iterator_tag, ValueT> { public: using BaseT = std::iterator<std::random_access_iterator_tag, ValueT>; using difference_type = typename BaseT::difference_type; // constructors and assignment Iterator() : mPos(0), mParent(nullptr) {} Iterator(PagedArray& parent, size_t pos=0) : mPos(pos), mParent(&parent) {} Iterator(const Iterator& other) : mPos(other.mPos), mParent(other.mParent) {} Iterator& operator=(const Iterator& other) { mPos=other.mPos; mParent=other.mParent; return *this; } // prefix Iterator& operator++() { ++mPos; return *this; } Iterator& operator--() { --mPos; return *this; } // postfix Iterator operator++(int) { Iterator tmp(*this); ++mPos; return tmp; } Iterator operator--(int) { Iterator tmp(*this); --mPos; return tmp; } // value access ValueT& operator*() const { return (*mParent)[mPos]; } ValueT* operator->() const { return &(this->operator*()); } ValueT& operator[](const difference_type& pos) const { return (*mParent)[mPos+pos]; } // offset Iterator& operator+=(const difference_type& pos) { mPos += pos; return *this; } Iterator& operator-=(const difference_type& pos) { mPos -= pos; return *this; } Iterator operator+(const difference_type &pos) const { return Iterator(*mParent, mPos+pos); } Iterator operator-(const difference_type &pos) const { return Iterator(*mParent, mPos-pos); } difference_type operator-(const Iterator& other) const { return mPos - other.pos(); } // comparisons bool operator==(const Iterator& other) const { return mPos == other.mPos; } bool operator!=(const Iterator& other) const { return mPos != other.mPos; } bool operator>=(const Iterator& other) const { return mPos >= other.mPos; } bool operator<=(const Iterator& other) const { return mPos <= other.mPos; } bool operator< (const Iterator& other) const { return mPos < other.mPos; } bool operator> (const Iterator& other) const { return mPos > other.mPos; } // non-std methods bool isValid() const { return mParent != nullptr && mPos < mParent->size(); } size_t pos() const { return mPos; } private: size_t mPos; PagedArray* mParent; };// Public class PagedArray::Iterator //////////////////////////////////////////////////////////////////////////////// // Private member-class of PagedArray implementing a memory page template <typename ValueT, size_t Log2PageSize> class PagedArray<ValueT, Log2PageSize>:: Page { public: static const size_t Size = 1UL << Log2PageSize; static const size_t Mask = Size - 1UL; static size_t memUsage() { return sizeof(ValueT)*Size; } // Raw memory allocation without any initialization Page() : mData(reinterpret_cast<ValueT*>(new char[sizeof(ValueT)*Size])) {} ~Page() { delete [] mData; } Page(const Page&) = delete;//copy construction is not implemented Page& operator=(const Page&) = delete;//copy assignment is not implemented ValueT& operator[](const size_t i) { return mData[i & Mask]; } const ValueT& operator[](const size_t i) const { return mData[i & Mask]; } void fill(const ValueT& v) { ValueT* dst = mData; for (size_t i=Size; i; --i) *dst++ = v; } ValueT* data() { return mData; } // Copy the first n elements of this Page to dst (which is assumed to large // enough to hold the n elements). void copy(ValueType *dst, size_t n) const { const ValueT* src = mData; for (size_t i=n; i; --i) *dst++ = *src++; } protected: ValueT* mData; };// Private class PagedArray::Page //////////////////////////////////////////////////////////////////////////////// } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_UTIL_PAGED_ARRAY_HAS_BEEN_INCLUDED
29,348
C
39.20411
101
0.626755
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/util/Formats.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "Formats.h" #include <openvdb/Platform.h> #include <iostream> #include <iomanip> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace util { int printBytes(std::ostream& os, uint64_t bytes, const std::string& head, const std::string& tail, bool exact, int width, int precision) { const uint64_t one = 1; int group = 0; // Write to a string stream so that I/O manipulators like // std::setprecision() don't alter the output stream. std::ostringstream ostr; ostr << head; ostr << std::setprecision(precision) << std::setiosflags(std::ios::fixed); if (bytes >> 40) { ostr << std::setw(width) << (double(bytes) / double(one << 40)) << " TB"; group = 4; } else if (bytes >> 30) { ostr << std::setw(width) << (double(bytes) / double(one << 30)) << " GB"; group = 3; } else if (bytes >> 20) { ostr << std::setw(width) << (double(bytes) / double(one << 20)) << " MB"; group = 2; } else if (bytes >> 10) { ostr << std::setw(width) << (double(bytes) / double(one << 10)) << " KB"; group = 1; } else { ostr << std::setw(width) << bytes << " Bytes"; } if (exact && group) ostr << " (" << bytes << " Bytes)"; ostr << tail; os << ostr.str(); return group; } int printNumber(std::ostream& os, uint64_t number, const std::string& head, const std::string& tail, bool exact, int width, int precision) { int group = 0; // Write to a string stream so that I/O manipulators like // std::setprecision() don't alter the output stream. std::ostringstream ostr; ostr << head; ostr << std::setprecision(precision) << std::setiosflags(std::ios::fixed); if (number / UINT64_C(1000000000000)) { ostr << std::setw(width) << (double(number) / 1000000000000.0) << " trillion"; group = 4; } else if (number / UINT64_C(1000000000)) { ostr << std::setw(width) << (double(number) / 1000000000.0) << " billion"; group = 3; } else if (number / UINT64_C(1000000)) { ostr << std::setw(width) << (double(number) / 1000000.0) << " million"; group = 2; } else if (number / UINT64_C(1000)) { ostr << std::setw(width) << (double(number) / 1000.0) << " thousand"; group = 1; } else { ostr << std::setw(width) << number; } if (exact && group) ostr << " (" << number << ")"; ostr << tail; os << ostr.str(); return group; } int printTime(std::ostream& os, double milliseconds, const std::string& head, const std::string& tail, int width, int precision, int verbose) { int group = 0; // Write to a string stream so that I/O manipulators like // std::setprecision() don't alter the output stream. std::ostringstream ostr; ostr << head; ostr << std::setprecision(precision) << std::setiosflags(std::ios::fixed); if (milliseconds >= 1000.0) {// one second or longer const uint32_t seconds = static_cast<uint32_t>(milliseconds / 1000.0) % 60 ; const uint32_t minutes = static_cast<uint32_t>(milliseconds / (1000.0*60)) % 60; const uint32_t hours = static_cast<uint32_t>(milliseconds / (1000.0*60*60)) % 24; const uint32_t days = static_cast<uint32_t>(milliseconds / (1000.0*60*60*24)); if (days>0) { ostr << days << (verbose==0 ? "d " : days>1 ? " days, " : " day, "); group = 4; } if (hours>0) { ostr << hours << (verbose==0 ? "h " : hours>1 ? " hours, " : " hour, "); if (!group) group = 3; } if (minutes>0) { ostr << minutes << (verbose==0 ? "m " : minutes>1 ? " minutes, " : " minute, "); if (!group) group = 2; } if (seconds>0) { if (verbose) { ostr << seconds << (seconds>1 ? " seconds and " : " second and "); const double msec = milliseconds - (seconds + (minutes + (hours + days * 24) * 60) * 60) * 1000.0; ostr << std::setw(width) << msec << " milliseconds (" << milliseconds << "ms)"; } else { const double sec = milliseconds/1000.0 - (minutes + (hours + days * 24) * 60) * 60; ostr << std::setw(width) << sec << "s"; } } else {// zero seconds const double msec = milliseconds - (minutes + (hours + days * 24) * 60) * 60 * 1000.0; if (verbose) { ostr << std::setw(width) << msec << " milliseconds (" << milliseconds << "ms)"; } else { ostr << std::setw(width) << msec << "ms"; } } if (!group) group = 1; } else {// less than a second ostr << std::setw(width) << milliseconds << (verbose ? " milliseconds" : "ms"); } ostr << tail; os << ostr.str(); return group; } } // namespace util } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
4,970
C++
32.362416
108
0.551911
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestNodeMask.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/util/NodeMasks.h> #include <openvdb/io/Compression.h> using openvdb::Index; template<typename MaskType> void TestAll(); class TestNodeMask: public ::testing::Test { }; template<typename MaskType> void TestAll() { EXPECT_TRUE(MaskType::memUsage() == MaskType::SIZE/8); const Index SIZE = MaskType::SIZE > 512 ? 512 : MaskType::SIZE; {// default constructor MaskType m;//all bits are off for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(m.isOff(i)); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(!m.isOn(i)); EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.countOff()== MaskType::SIZE); m.toggle();//all bits are on EXPECT_TRUE(m.isOn()); EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(m.countOn() == MaskType::SIZE); EXPECT_TRUE(m.countOff()== 0); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(!m.isOff(i)); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(m.isOn(i)); } {// On constructor MaskType m(true);//all bits are on EXPECT_TRUE(m.isOn()); EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(m.countOn() == MaskType::SIZE); EXPECT_TRUE(m.countOff()== 0); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(!m.isOff(i)); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(m.isOn(i)); m.toggle(); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(m.isOff(i)); for (Index i=0; i<SIZE; ++i) EXPECT_TRUE(!m.isOn(i)); EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.countOff()== MaskType::SIZE); } {// Off constructor MaskType m(false); EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.countOff()== MaskType::SIZE); m.setOn(); EXPECT_TRUE(m.isOn()); EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(m.countOn() == MaskType::SIZE); EXPECT_TRUE(m.countOff()== 0); m = MaskType();//copy asignment EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.countOff()== MaskType::SIZE); } {// test setOn, setOff, findFirstOn and findFiratOff MaskType m; for (Index i=0; i<SIZE; ++i) { m.setOn(i); EXPECT_TRUE(m.countOn() == 1); EXPECT_TRUE(m.findFirstOn() == i); EXPECT_TRUE(m.findFirstOff() == (i==0 ? 1 : 0)); for (Index j=0; j<SIZE; ++j) { EXPECT_TRUE( i==j ? m.isOn(j) : m.isOff(j) ); } m.setOff(i); EXPECT_TRUE(m.countOn() == 0); EXPECT_TRUE(m.findFirstOn() == MaskType::SIZE); } } {// OnIterator MaskType m; for (Index i=0; i<SIZE; ++i) { m.setOn(i); for (typename MaskType::OnIterator iter=m.beginOn(); iter; ++iter) { EXPECT_TRUE( iter.pos() == i ); } EXPECT_TRUE(m.countOn() == 1); m.setOff(i); EXPECT_TRUE(m.countOn() == 0); } } {// OffIterator MaskType m(true); for (Index i=0; i<SIZE; ++i) { m.setOff(i); EXPECT_TRUE(m.countOff() == 1); for (typename MaskType::OffIterator iter=m.beginOff(); iter; ++iter) { EXPECT_TRUE( iter.pos() == i ); } EXPECT_TRUE(m.countOn() == MaskType::SIZE-1); m.setOn(i); EXPECT_TRUE(m.countOff() == 0); EXPECT_TRUE(m.countOn() == MaskType::SIZE); } } {// isConstant MaskType m(true);//all bits are on bool isOn = false; EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(m.isOn()); EXPECT_TRUE(m.isConstant(isOn)); EXPECT_TRUE(isOn); m.setOff(MaskType::SIZE-1);//sets last bit off EXPECT_TRUE(!m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(!m.isConstant(isOn)); m.setOff();//sets all bits off EXPECT_TRUE(m.isOff()); EXPECT_TRUE(!m.isOn()); EXPECT_TRUE(m.isConstant(isOn)); EXPECT_TRUE(!isOn); } {// DenseIterator MaskType m(false); for (Index i=0; i<SIZE; ++i) { m.setOn(i); EXPECT_TRUE(m.countOn() == 1); for (typename MaskType::DenseIterator iter=m.beginDense(); iter; ++iter) { EXPECT_TRUE( iter.pos()==i ? *iter : !*iter ); } m.setOff(i); EXPECT_TRUE(m.countOn() == 0); } } } TEST_F(TestNodeMask, testCompress) { using namespace openvdb; using ValueT = int; using MaskT = openvdb::util::NodeMask<1>; { // no inactive values MaskT valueMask(true); MaskT childMask; std::vector<int> values = {0,1,2,3,4,5,6,7}; int background = 0; EXPECT_EQ(valueMask.countOn(), Index32(8)); EXPECT_EQ(childMask.countOn(), Index32(0)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(maskCompress.metadata, int8_t(openvdb::io::NO_MASK_OR_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // all inactive values are +background MaskT valueMask; MaskT childMask; std::vector<int> values = {10,10,10,10,10,10,10,10}; int background = 10; EXPECT_EQ(valueMask.countOn(), Index32(0)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(maskCompress.metadata, int8_t(openvdb::io::NO_MASK_OR_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // all inactive values are -background MaskT valueMask; MaskT childMask; std::vector<int> values = {-10,-10,-10,-10,-10,-10,-10,-10}; int background = 10; EXPECT_EQ(valueMask.countOn(), Index32(0)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(maskCompress.metadata, int8_t(openvdb::io::NO_MASK_AND_MINUS_BG)); EXPECT_EQ(maskCompress.inactiveVal[0], -background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // all inactive vals have the same non-background val MaskT valueMask(true); MaskT childMask; std::vector<int> values = {0,1,500,500,4,500,500,7}; int background = 10; valueMask.setOff(2); valueMask.setOff(3); valueMask.setOff(5); valueMask.setOff(6); EXPECT_EQ(valueMask.countOn(), Index32(4)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::NO_MASK_AND_ONE_INACTIVE_VAL)); EXPECT_EQ(maskCompress.inactiveVal[0], 500); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // mask selects between -background and +background MaskT valueMask; MaskT childMask; std::vector<int> values = {0,10,10,-10,4,10,-10,10}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_NO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], -background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // mask selects between -background and +background MaskT valueMask; MaskT childMask; std::vector<int> values = {0,-10,-10,10,4,-10,10,-10}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_NO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], -background); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // mask selects between backgd and one other inactive val MaskT valueMask; MaskT childMask; std::vector<int> values = {0,500,500,10,4,500,10,500}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_ONE_INACTIVE_VAL)); EXPECT_EQ(maskCompress.inactiveVal[0], 500); EXPECT_EQ(maskCompress.inactiveVal[1], background); } { // mask selects between two non-background inactive vals MaskT valueMask; MaskT childMask; std::vector<int> values = {0,500,500,2000,4,500,2000,500}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_TWO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], 500); // first unique value EXPECT_EQ(maskCompress.inactiveVal[1], 2000); // second unique value } { // mask selects between two non-background inactive vals MaskT valueMask; MaskT childMask; std::vector<int> values = {0,2000,2000,500,4,2000,500,2000}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_TWO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], 2000); // first unique value EXPECT_EQ(maskCompress.inactiveVal[1], 500); // second unique value } { // > 2 inactive vals, so no mask compression at all MaskT valueMask; MaskT childMask; std::vector<int> values = {0,1000,2000,3000,4,2000,500,2000}; int background = 10; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::NO_MASK_AND_ALL_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], 1000); // first unique value EXPECT_EQ(maskCompress.inactiveVal[1], 2000); // second unique value } { // mask selects between two non-background inactive vals (selective child mask) MaskT valueMask; MaskT childMask; std::vector<int> values = {0,1000,2000,3000,4,2000,500,2000}; int background = 0; valueMask.setOn(0); valueMask.setOn(4); EXPECT_EQ(valueMask.countOn(), Index32(2)); childMask.setOn(3); childMask.setOn(6); EXPECT_EQ(childMask.countOn(), Index32(2)); openvdb::io::MaskCompress<ValueT, MaskT> maskCompress( valueMask, childMask, values.data(), background); EXPECT_EQ(int(maskCompress.metadata), int(openvdb::io::MASK_AND_TWO_INACTIVE_VALS)); EXPECT_EQ(maskCompress.inactiveVal[0], 1000); // first unique value EXPECT_EQ(maskCompress.inactiveVal[1], 2000); // secone unique value } } TEST_F(TestNodeMask, testAll4) { TestAll<openvdb::util::NodeMask<4> >(); } TEST_F(TestNodeMask, testAll3) { TestAll<openvdb::util::NodeMask<3> >(); } TEST_F(TestNodeMask, testAll2) { TestAll<openvdb::util::NodeMask<2> >(); } TEST_F(TestNodeMask, testAll1) { TestAll<openvdb::util::NodeMask<1> >(); }
12,614
C++
34.435393
94
0.585064
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestDelayedLoadMetadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <openvdb/io/DelayedLoadMetadata.h> class TestDelayedLoadMetadata : public ::testing::Test { }; TEST_F(TestDelayedLoadMetadata, test) { using namespace openvdb::io; // registration EXPECT_TRUE(!DelayedLoadMetadata::isRegisteredType()); DelayedLoadMetadata::registerType(); EXPECT_TRUE(DelayedLoadMetadata::isRegisteredType()); DelayedLoadMetadata::unregisterType(); EXPECT_TRUE(!DelayedLoadMetadata::isRegisteredType()); openvdb::initialize(); EXPECT_TRUE(DelayedLoadMetadata::isRegisteredType()); // construction DelayedLoadMetadata metadata; EXPECT_TRUE(metadata.empty()); metadata.resizeMask(size_t(2)); EXPECT_TRUE(!metadata.empty()); metadata.setMask(0, DelayedLoadMetadata::MaskType(5)); metadata.setMask(1, DelayedLoadMetadata::MaskType(-3)); EXPECT_EQ(metadata.getMask(0), DelayedLoadMetadata::MaskType(5)); EXPECT_EQ(metadata.getMask(1), DelayedLoadMetadata::MaskType(-3)); metadata.resizeCompressedSize(size_t(3)); metadata.setCompressedSize(0, DelayedLoadMetadata::CompressedSizeType(6)); metadata.setCompressedSize(1, DelayedLoadMetadata::CompressedSizeType(101)); metadata.setCompressedSize(2, DelayedLoadMetadata::CompressedSizeType(-13522)); EXPECT_EQ(metadata.getCompressedSize(0), DelayedLoadMetadata::CompressedSizeType(6)); EXPECT_EQ(metadata.getCompressedSize(1), DelayedLoadMetadata::CompressedSizeType(101)); EXPECT_EQ(metadata.getCompressedSize(2), DelayedLoadMetadata::CompressedSizeType(-13522)); // copy construction DelayedLoadMetadata metadataCopy1(metadata); EXPECT_TRUE(!metadataCopy1.empty()); EXPECT_EQ(metadataCopy1.getMask(0), DelayedLoadMetadata::MaskType(5)); EXPECT_EQ(metadataCopy1.getCompressedSize(2), DelayedLoadMetadata::CompressedSizeType(-13522)); openvdb::Metadata::Ptr baseMetadataCopy2 = metadata.copy(); DelayedLoadMetadata::Ptr metadataCopy2 = openvdb::StaticPtrCast<DelayedLoadMetadata>(baseMetadataCopy2); EXPECT_EQ(metadataCopy2->getMask(0), DelayedLoadMetadata::MaskType(5)); EXPECT_EQ(metadataCopy2->getCompressedSize(2), DelayedLoadMetadata::CompressedSizeType(-13522)); // I/O metadata.clear(); EXPECT_TRUE(metadata.empty()); const size_t headerInitialSize(sizeof(openvdb::Index32)); const size_t headerCountSize(sizeof(openvdb::Index32)); const size_t headerMaskSize(sizeof(openvdb::Index32)); const size_t headerCompressedSize(sizeof(openvdb::Index32)); const size_t headerTotalSize(headerInitialSize + headerCountSize + headerMaskSize + headerCompressedSize); { // empty buffer std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); metadata.write(ss); EXPECT_EQ(ss.tellp(), std::streampos(headerInitialSize)); DelayedLoadMetadata newMetadata; newMetadata.read(ss); EXPECT_TRUE(newMetadata.empty()); } { // single value, no compressed sizes metadata.clear(); metadata.resizeMask(size_t(1)); metadata.setMask(0, DelayedLoadMetadata::MaskType(5)); std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); metadata.write(ss); std::streampos expectedPos(headerTotalSize + sizeof(int8_t)); EXPECT_EQ(ss.tellp(), expectedPos); EXPECT_EQ(static_cast<size_t>(expectedPos)-headerInitialSize, size_t(metadata.size())); DelayedLoadMetadata newMetadata; newMetadata.read(ss); EXPECT_TRUE(!newMetadata.empty()); EXPECT_EQ(newMetadata.getMask(0), DelayedLoadMetadata::MaskType(5)); } { // single value, with compressed sizes metadata.clear(); metadata.resizeMask(size_t(1)); metadata.setMask(0, DelayedLoadMetadata::MaskType(5)); metadata.resizeCompressedSize(size_t(1)); metadata.setCompressedSize(0, DelayedLoadMetadata::CompressedSizeType(-10322)); std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); metadata.write(ss); std::streampos expectedPos(headerTotalSize + sizeof(int8_t) + sizeof(int64_t)); EXPECT_EQ(expectedPos, ss.tellp()); EXPECT_EQ(static_cast<size_t>(ss.tellp())-headerInitialSize, size_t(metadata.size())); DelayedLoadMetadata newMetadata; newMetadata.read(ss); EXPECT_TRUE(!newMetadata.empty()); EXPECT_EQ(newMetadata.getMask(0), DelayedLoadMetadata::MaskType(5)); EXPECT_EQ(newMetadata.getCompressedSize(0), DelayedLoadMetadata::CompressedSizeType(-10322)); } { // larger, but compressible buffer metadata.clear(); const size_t size = 1000; const size_t uncompressedBufferSize = (sizeof(int8_t)+sizeof(int64_t))*size; metadata.resizeMask(size); metadata.resizeCompressedSize(size); for (size_t i = 0; i < size; i++) { metadata.setMask(i, DelayedLoadMetadata::MaskType(static_cast<int8_t>((i%32)*2))); metadata.setCompressedSize(i, DelayedLoadMetadata::CompressedSizeType(static_cast<int64_t>((i%64)*200))); } std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); metadata.write(ss); EXPECT_EQ(static_cast<size_t>(ss.tellp())-headerInitialSize, size_t(metadata.size())); std::streampos uncompressedSize(uncompressedBufferSize + headerTotalSize); #ifdef OPENVDB_USE_BLOSC // expect a compression ratio of more than 10x EXPECT_TRUE(ss.tellp() * 10 < uncompressedSize); #else EXPECT_TRUE(ss.tellp() == uncompressedSize); #endif DelayedLoadMetadata newMetadata; newMetadata.read(ss); EXPECT_EQ(metadata.size(), newMetadata.size()); for (size_t i = 0; i < size; i++) { EXPECT_EQ(metadata.getMask(i), newMetadata.getMask(i)); } } // when read as unknown metadata should be treated as temporary metadata { metadata.clear(); metadata.resizeMask(size_t(1)); metadata.setMask(0, DelayedLoadMetadata::MaskType(5)); std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); openvdb::MetaMap metamap; metamap.insertMeta("delayload", metadata); EXPECT_EQ(size_t(1), metamap.metaCount()); metamap.writeMeta(ss); { openvdb::MetaMap newMetamap; newMetamap.readMeta(ss); EXPECT_EQ(size_t(1), newMetamap.metaCount()); } { DelayedLoadMetadata::unregisterType(); openvdb::MetaMap newMetamap; newMetamap.readMeta(ss); EXPECT_EQ(size_t(0), newMetamap.metaCount()); } } }
6,991
C++
32.941747
110
0.672865
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestDiagnostics.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <limits> #include <openvdb/openvdb.h> #include <openvdb/Exceptions.h> #include <openvdb/math/Math.h> #include <openvdb/math/Stats.h> #include <openvdb/tools/Diagnostics.h> #include <openvdb/tools/Statistics.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/LevelSetUtil.h> class TestDiagnostics: public ::testing::Test { }; //////////////////////////////////////// TEST_F(TestDiagnostics, testCheck) { const float val = 1.0f; const float nan = std::numeric_limits<float>::quiet_NaN(); const float inf1= std::numeric_limits<float>::infinity(); const openvdb::math::Vec3<float> inf2(val, inf1, val); {//test CheckNan openvdb::tools::CheckNan<openvdb::FloatGrid> c; EXPECT_TRUE(!c(val)); EXPECT_TRUE( c(nan)); EXPECT_TRUE( c(nan)); EXPECT_TRUE(!c(inf1)); EXPECT_TRUE(!c(inf2)); } {//test CheckInf openvdb::tools::CheckInf<openvdb::FloatGrid> c; EXPECT_TRUE(!c(val)); EXPECT_TRUE(!c(nan)); EXPECT_TRUE(!c(nan)); EXPECT_TRUE( c(inf1)); EXPECT_TRUE( c(inf2)); } {//test CheckFinite openvdb::tools::CheckFinite<openvdb::FloatGrid> c; EXPECT_TRUE(!c(val)); EXPECT_TRUE( c(nan)); EXPECT_TRUE( c(nan)); EXPECT_TRUE( c(inf1)); EXPECT_TRUE( c(inf2)); } {//test CheckMin openvdb::tools::CheckMin<openvdb::FloatGrid> c(0.0f); EXPECT_TRUE(!c( 0.5f)); EXPECT_TRUE(!c( 0.0f)); EXPECT_TRUE(!c( 1.0f)); EXPECT_TRUE(!c( 1.1f)); EXPECT_TRUE( c(-0.1f)); } {//test CheckMax openvdb::tools::CheckMax<openvdb::FloatGrid> c(0.0f); EXPECT_TRUE( c( 0.5f)); EXPECT_TRUE(!c( 0.0f)); EXPECT_TRUE( c( 1.0f)); EXPECT_TRUE( c( 1.1f)); EXPECT_TRUE(!c(-0.1f)); } {//test CheckRange // first check throw on construction from an invalid range EXPECT_THROW(openvdb::tools::CheckRange<openvdb::FloatGrid> c(1.0f, 0.0f), openvdb::ValueError); openvdb::tools::CheckRange<openvdb::FloatGrid> c(0.0f, 1.0f); EXPECT_TRUE(!c(0.5f)); EXPECT_TRUE(!c(0.0f)); EXPECT_TRUE(!c(1.0f)); EXPECT_TRUE( c(1.1f)); EXPECT_TRUE(c(-0.1f)); } }//testCheck TEST_F(TestDiagnostics, testDiagnose) { using namespace openvdb; const float val = 1.0f; const float nan = std::numeric_limits<float>::quiet_NaN(); const float inf = std::numeric_limits<float>::infinity(); {//empty grid FloatGrid grid; tools::Diagnose<FloatGrid> d(grid); tools::CheckNan<FloatGrid> c; std::string str = d.check(c); //std::cerr << "Empty grid:\n" << str; EXPECT_EQ(std::string(), str); EXPECT_EQ(0, int(d.failureCount())); } {//non-empty grid FloatGrid grid; grid.tree().setValue(Coord(-1,3,6), val); tools::Diagnose<FloatGrid> d(grid); tools::CheckNan<FloatGrid> c; std::string str = d.check(c); //std::cerr << "Non-Empty grid:\n" << str; EXPECT_EQ(std::string(), str); EXPECT_EQ(0, int(d.failureCount())); } {//nan grid FloatGrid grid; grid.tree().setValue(Coord(-1,3,6), nan); tools::Diagnose<FloatGrid> d(grid); tools::CheckNan<FloatGrid> c; std::string str = d.check(c); //std::cerr << "NaN grid:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(1, int(d.failureCount())); } {//nan and infinite grid FloatGrid grid; grid.tree().setValue(Coord(-1,3,6), nan); grid.tree().setValue(Coord(10,30,60), inf); tools::Diagnose<FloatGrid> d(grid); tools::CheckFinite<FloatGrid> c; std::string str = d.check(c); //std::cerr << "Not Finite grid:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(2, int(d.failureCount())); } {//out-of-range grid FloatGrid grid(10.0f); grid.tree().setValue(Coord(-1,3,6), 1.0f); grid.tree().setValue(Coord(10,30,60), 1.5); grid.tree().fill(math::CoordBBox::createCube(math::Coord(0),8), 20.0f, true); tools::Diagnose<FloatGrid> d(grid); tools::CheckRange<FloatGrid> c(0.0f, 1.0f); std::string str = d.check(c); //std::cerr << "out-of-range grid:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(3, int(d.failureCount())); } const float radius = 4.3f; const openvdb::Vec3f center(15.8f, 13.2f, 16.7f); const float voxelSize = 0.1f, width = 2.0f, gamma=voxelSize*width; FloatGrid::Ptr gridSphere = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); //gridSphere->print(std::cerr, 2); {// Check min/max of active values math::Extrema ex = tools::extrema(gridSphere->cbeginValueOn()); //std::cerr << "Min = " << ex.min() << " max = " << ex.max() << std::endl; EXPECT_TRUE(ex.min() > -voxelSize*width); EXPECT_TRUE(ex.max() < voxelSize*width); } {// Check min/max of all values math::Extrema ex = tools::extrema(gridSphere->cbeginValueAll()); //std::cerr << "Min = " << ex.min() << " max = " << ex.max() << std::endl; EXPECT_TRUE(ex.min() >= -voxelSize*width); EXPECT_TRUE(ex.max() <= voxelSize*width); } {// check range of all values in a sphere w/o mask tools::CheckRange<FloatGrid, true, true, FloatGrid::ValueAllCIter> c(-gamma, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check range of on values in a sphere w/o mask tools::CheckRange<FloatGrid, true, true, FloatGrid::ValueOnCIter> c(-gamma, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check range of off tiles in a sphere w/o mask tools::CheckRange<FloatGrid, true, true, FloatGrid::ValueOffCIter> c(-gamma, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); {// check off tile iterator FloatGrid::ValueOffCIter i(gridSphere->tree()); i.setMaxDepth(FloatGrid::ValueOffCIter::LEAF_DEPTH - 1); for (; i; ++i) EXPECT_TRUE( math::Abs(*i) <= gamma); } std::string str = d.check(c); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check range of sphere w/o mask tools::CheckRange<FloatGrid> c(0.0f, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_TRUE(d.failureCount() < gridSphere->activeVoxelCount()); } {// check range of sphere w mask tools::CheckRange<FloatGrid> c(0.0f, gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c, true); //std::cerr << "Values out of range:\n" << str; EXPECT_TRUE(!str.empty()); EXPECT_EQ(d.valueCount(), d.valueCount()); EXPECT_TRUE(d.failureCount() < gridSphere->activeVoxelCount()); } {// check min of sphere w/o mask tools::CheckMin<FloatGrid> c(-gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Min values:\n" << str; EXPECT_EQ(std::string(), str); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check max of sphere w/o mask tools::CheckMax<FloatGrid> c(gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "MAX values:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check norm of gradient of sphere w/o mask tools::CheckEikonal<FloatGrid> c(*gridSphere, 0.97f, 1.03f); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c, false, true, false, false); //std::cerr << "NormGrad:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check norm of gradient of sphere w/o mask tools::CheckNormGrad<FloatGrid> c(*gridSphere, 0.75f, 1.25f); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c, false, true, false, false); //std::cerr << "NormGrad:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } {// check inactive values tools::CheckMagnitude<FloatGrid, FloatGrid::ValueOffCIter> c(gamma); tools::Diagnose<FloatGrid> d(*gridSphere); std::string str = d.check(c); //std::cerr << "Magnitude:\n" << str; EXPECT_TRUE(str.empty()); EXPECT_EQ(0, int(d.valueCount())); EXPECT_EQ(0, int(d.failureCount())); } }// testDiagnose TEST_F(TestDiagnostics, testCheckLevelSet) { using namespace openvdb; const float radius = 4.3f; const Vec3f center(15.8f, 13.2f, 16.7f); const float voxelSize = 0.1f, width = LEVEL_SET_HALF_WIDTH; FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); //tools::CheckLevelSet<FloatGrid> c(*grid); //std::string str = c.check(); std::string str = tools::checkLevelSet(*grid); EXPECT_TRUE(str.empty()); //std::cerr << "\n" << str << std::endl; grid->tree().setValue(Coord(0,0,0), voxelSize*(width+0.5f)); //str = c.check(); str = tools::checkLevelSet(*grid); EXPECT_TRUE(!str.empty()); //std::cerr << "\n" << str << std::endl; //str = c.check(6); str = tools::checkLevelSet(*grid, 6); EXPECT_TRUE(str.empty()); }// testCheckLevelSet TEST_F(TestDiagnostics, testCheckFogVolume) { using namespace openvdb; const float radius = 4.3f; const Vec3f center(15.8f, 13.2f, 16.7f); const float voxelSize = 0.1f, width = LEVEL_SET_HALF_WIDTH; FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); tools::sdfToFogVolume(*grid); //tools::CheckFogVolume<FloatGrid> c(*grid); //std::string str = c.check(); std::string str = tools::checkFogVolume(*grid); EXPECT_TRUE(str.empty()); //std::cerr << "\n" << str << std::endl; grid->tree().setValue(Coord(0,0,0), 1.5f); //str = c.check(); str = tools::checkFogVolume(*grid); EXPECT_TRUE(!str.empty()); //std::cerr << "\n" << str << std::endl; str = tools::checkFogVolume(*grid, 5); //str = c.check(5); EXPECT_TRUE(str.empty()); }// testCheckFogVolume TEST_F(TestDiagnostics, testUniqueInactiveValues) { openvdb::FloatGrid grid; grid.tree().setValueOff(openvdb::Coord(0,0,0), -1); grid.tree().setValueOff(openvdb::Coord(0,0,1), -2); grid.tree().setValueOff(openvdb::Coord(0,1,0), -3); grid.tree().setValue(openvdb::Coord(1,0,0), 1); std::vector<float> values; EXPECT_TRUE(openvdb::tools::uniqueInactiveValues(grid, values, 4)); EXPECT_EQ(4, int(values.size())); EXPECT_TRUE(openvdb::math::isApproxEqual(values[0], -3.0f)); EXPECT_TRUE(openvdb::math::isApproxEqual(values[1], -2.0f)); EXPECT_TRUE(openvdb::math::isApproxEqual(values[2], -1.0f)); EXPECT_TRUE(openvdb::math::isApproxEqual(values[3], 0.0f)); // test with level set sphere const float radius = 4.3f; const openvdb::Vec3f center(15.8f, 13.2f, 16.7f); const float voxelSize = 0.5f, width = 2.0f; openvdb::FloatGrid::Ptr gridSphere = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>(radius, center, voxelSize, width); EXPECT_TRUE(openvdb::tools::uniqueInactiveValues(*gridSphere.get(), values, 2)); EXPECT_EQ(2, int(values.size())); EXPECT_TRUE(openvdb::math::isApproxEqual(values[0], -voxelSize * width)); EXPECT_TRUE(openvdb::math::isApproxEqual(values[1], voxelSize * width)); // test with fog volume openvdb::tools::sdfToFogVolume(*gridSphere); EXPECT_TRUE(openvdb::tools::uniqueInactiveValues(*gridSphere.get(), values, 1)); EXPECT_EQ(1, int(values.size())); EXPECT_TRUE(openvdb::math::isApproxEqual(values[0], 0.0f)); }
13,017
C++
34.763736
99
0.586464
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestInit.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> class TestInit: public ::testing::Test { }; TEST_F(TestInit, test) { using namespace openvdb; initialize(); // data types EXPECT_TRUE(DoubleMetadata::isRegisteredType()); EXPECT_TRUE(FloatMetadata::isRegisteredType()); EXPECT_TRUE(Int32Metadata::isRegisteredType()); EXPECT_TRUE(Int64Metadata::isRegisteredType()); EXPECT_TRUE(StringMetadata::isRegisteredType()); EXPECT_TRUE(Vec2IMetadata::isRegisteredType()); EXPECT_TRUE(Vec2SMetadata::isRegisteredType()); EXPECT_TRUE(Vec2DMetadata::isRegisteredType()); EXPECT_TRUE(Vec3IMetadata::isRegisteredType()); EXPECT_TRUE(Vec3SMetadata::isRegisteredType()); EXPECT_TRUE(Vec3DMetadata::isRegisteredType()); // map types EXPECT_TRUE(math::AffineMap::isRegistered()); EXPECT_TRUE(math::UnitaryMap::isRegistered()); EXPECT_TRUE(math::ScaleMap::isRegistered()); EXPECT_TRUE(math::TranslationMap::isRegistered()); EXPECT_TRUE(math::ScaleTranslateMap::isRegistered()); EXPECT_TRUE(math::NonlinearFrustumMap::isRegistered()); // grid types EXPECT_TRUE(BoolGrid::isRegistered()); EXPECT_TRUE(FloatGrid::isRegistered()); EXPECT_TRUE(DoubleGrid::isRegistered()); EXPECT_TRUE(Int32Grid::isRegistered()); EXPECT_TRUE(Int64Grid::isRegistered()); EXPECT_TRUE(StringGrid::isRegistered()); EXPECT_TRUE(Vec3IGrid::isRegistered()); EXPECT_TRUE(Vec3SGrid::isRegistered()); EXPECT_TRUE(Vec3DGrid::isRegistered()); uninitialize(); EXPECT_TRUE(!DoubleMetadata::isRegisteredType()); EXPECT_TRUE(!FloatMetadata::isRegisteredType()); EXPECT_TRUE(!Int32Metadata::isRegisteredType()); EXPECT_TRUE(!Int64Metadata::isRegisteredType()); EXPECT_TRUE(!StringMetadata::isRegisteredType()); EXPECT_TRUE(!Vec2IMetadata::isRegisteredType()); EXPECT_TRUE(!Vec2SMetadata::isRegisteredType()); EXPECT_TRUE(!Vec2DMetadata::isRegisteredType()); EXPECT_TRUE(!Vec3IMetadata::isRegisteredType()); EXPECT_TRUE(!Vec3SMetadata::isRegisteredType()); EXPECT_TRUE(!Vec3DMetadata::isRegisteredType()); EXPECT_TRUE(!math::AffineMap::isRegistered()); EXPECT_TRUE(!math::UnitaryMap::isRegistered()); EXPECT_TRUE(!math::ScaleMap::isRegistered()); EXPECT_TRUE(!math::TranslationMap::isRegistered()); EXPECT_TRUE(!math::ScaleTranslateMap::isRegistered()); EXPECT_TRUE(!math::NonlinearFrustumMap::isRegistered()); EXPECT_TRUE(!BoolGrid::isRegistered()); EXPECT_TRUE(!FloatGrid::isRegistered()); EXPECT_TRUE(!DoubleGrid::isRegistered()); EXPECT_TRUE(!Int32Grid::isRegistered()); EXPECT_TRUE(!Int64Grid::isRegistered()); EXPECT_TRUE(!StringGrid::isRegistered()); EXPECT_TRUE(!Vec3IGrid::isRegistered()); EXPECT_TRUE(!Vec3SGrid::isRegistered()); EXPECT_TRUE(!Vec3DGrid::isRegistered()); } TEST_F(TestInit, testMatGrids) { // small test to ensure matrix grid types compile using Mat3sGrid = openvdb::BoolGrid::ValueConverter<openvdb::Mat3s>::Type; using Mat3dGrid = openvdb::BoolGrid::ValueConverter<openvdb::Mat3d>::Type; using Mat4sGrid = openvdb::BoolGrid::ValueConverter<openvdb::Mat4s>::Type; using Mat4dGrid = openvdb::BoolGrid::ValueConverter<openvdb::Mat4d>::Type; Mat3sGrid a; (void)(a); Mat3dGrid b; (void)(b); Mat4sGrid c; (void)(c); Mat4dGrid d; (void)(d); }
3,508
C++
35.175257
78
0.711231
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestClip.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/math/Maps.h> // for math::NonlinearFrustumMap #include <openvdb/tools/Clip.h> // See also TestGrid::testClipping() class TestClip: public ::testing::Test { public: static const openvdb::CoordBBox kCubeBBox, kInnerBBox; TestClip(): mCube{ []() { auto cube = openvdb::FloatGrid{0.0f}; cube.fill(kCubeBBox, /*value=*/5.0f, /*active=*/true); return cube; }()} {} void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::initialize(); } protected: void validate(const openvdb::FloatGrid&); const openvdb::FloatGrid mCube; }; const openvdb::CoordBBox // The volume to be clipped is a 21 x 21 x 21 solid cube. TestClip::kCubeBBox{openvdb::Coord{-10}, openvdb::Coord{10}}, // The clipping mask is a 1 x 1 x 13 segment extending along the Z axis inside the cube. TestClip::kInnerBBox{openvdb::Coord{4, 4, -6}, openvdb::Coord{4, 4, 6}}; //////////////////////////////////////// void TestClip::validate(const openvdb::FloatGrid& clipped) { using namespace openvdb; const CoordBBox bbox = clipped.evalActiveVoxelBoundingBox(); EXPECT_EQ(kInnerBBox.min().x(), bbox.min().x()); EXPECT_EQ(kInnerBBox.min().y(), bbox.min().y()); EXPECT_EQ(kInnerBBox.min().z(), bbox.min().z()); EXPECT_EQ(kInnerBBox.max().x(), bbox.max().x()); EXPECT_EQ(kInnerBBox.max().y(), bbox.max().y()); EXPECT_EQ(kInnerBBox.max().z(), bbox.max().z()); EXPECT_EQ(6 + 6 + 1, int(clipped.activeVoxelCount())); EXPECT_EQ(2, int(clipped.constTree().leafCount())); FloatGrid::ConstAccessor acc = clipped.getConstAccessor(); const float bg = clipped.background(); Coord xyz; int &x = xyz[0], &y = xyz[1], &z = xyz[2]; for (x = kCubeBBox.min().x(); x <= kCubeBBox.max().x(); ++x) { for (y = kCubeBBox.min().y(); y <= kCubeBBox.max().y(); ++y) { for (z = kCubeBBox.min().z(); z <= kCubeBBox.max().z(); ++z) { if (x == 4 && y == 4 && z >= -6 && z <= 6) { EXPECT_EQ(5.f, acc.getValue(Coord(4, 4, z))); } else { EXPECT_EQ(bg, acc.getValue(Coord(x, y, z))); } } } } } //////////////////////////////////////// // Test clipping against a bounding box. TEST_F(TestClip, testBBox) { using namespace openvdb; BBoxd clipBox(Vec3d(4.0, 4.0, -6.0), Vec3d(4.9, 4.9, 6.0)); FloatGrid::Ptr clipped = tools::clip(mCube, clipBox); validate(*clipped); } // Test clipping against a camera frustum. TEST_F(TestClip, testFrustum) { using namespace openvdb; const auto d = double(kCubeBBox.max().z()); const math::NonlinearFrustumMap frustum{ /*position=*/Vec3d{0.0, 0.0, 5.0 * d}, /*direction=*/Vec3d{0.0, 0.0, -1.0}, /*up=*/Vec3d{0.0, d / 2.0, 0.0}, /*aspect=*/1.0, /*near=*/4.0 * d + 1.0, /*depth=*/kCubeBBox.dim().z() - 2.0, /*x_count=*/100, /*z_count=*/100}; const auto frustumIndexBBox = frustum.getBBox(); { auto clipped = tools::clip(mCube, frustum); const auto bbox = clipped->evalActiveVoxelBoundingBox(); const auto cubeDim = kCubeBBox.dim(); EXPECT_EQ(kCubeBBox.min().z() + 1, bbox.min().z()); EXPECT_EQ(kCubeBBox.max().z() - 1, bbox.max().z()); EXPECT_TRUE(int(bbox.volume()) < int(cubeDim.x() * cubeDim.y() * (cubeDim.z() - 2))); // Note: mCube index space corresponds to world space. for (auto it = clipped->beginValueOn(); it; ++it) { const auto xyz = frustum.applyInverseMap(it.getCoord().asVec3d()); EXPECT_TRUE(frustumIndexBBox.isInside(xyz)); } } { auto tile = openvdb::FloatGrid{0.0f}; tile.tree().addTile(/*level=*/2, Coord{0}, /*value=*/5.0f, /*active=*/true); auto clipped = tools::clip(tile, frustum); EXPECT_TRUE(!clipped->empty()); for (auto it = clipped->beginValueOn(); it; ++it) { const auto xyz = frustum.applyInverseMap(it.getCoord().asVec3d()); EXPECT_TRUE(frustumIndexBBox.isInside(xyz)); } clipped = tools::clip(tile, frustum, /*keepInterior=*/false); EXPECT_TRUE(!clipped->empty()); for (auto it = clipped->beginValueOn(); it; ++it) { const auto xyz = frustum.applyInverseMap(it.getCoord().asVec3d()); EXPECT_TRUE(!frustumIndexBBox.isInside(xyz)); } } } // Test clipping against a MaskGrid. TEST_F(TestClip, testMaskGrid) { using namespace openvdb; MaskGrid mask(false); mask.fill(kInnerBBox, true, true); FloatGrid::Ptr clipped = tools::clip(mCube, mask); validate(*clipped); } // Test clipping against a boolean mask grid. TEST_F(TestClip, testBoolMask) { using namespace openvdb; BoolGrid mask(false); mask.fill(kInnerBBox, true, true); FloatGrid::Ptr clipped = tools::clip(mCube, mask); validate(*clipped); } // Test clipping against a boolean mask grid with mask inversion. TEST_F(TestClip, testInvertedBoolMask) { using namespace openvdb; // Construct a mask grid that is the "inverse" of the mask used in the other tests. // (This is not a true inverse, since the mask's active voxel bounds are finite.) BoolGrid mask(false); mask.fill(kCubeBBox, true, true); mask.fill(kInnerBBox, false, false); // Clipping against the "inverted" mask with mask inversion enabled // should give the same results as clipping normally against the normal mask. FloatGrid::Ptr clipped = tools::clip(mCube, mask, /*keepInterior=*/false); clipped->pruneGrid(); validate(*clipped); } // Test clipping against a non-boolean mask grid. TEST_F(TestClip, testNonBoolMask) { using namespace openvdb; Int32Grid mask(0); mask.fill(kInnerBBox, -5, true); FloatGrid::Ptr clipped = tools::clip(mCube, mask); validate(*clipped); } // Test clipping against a non-boolean mask grid with mask inversion. TEST_F(TestClip, testInvertedNonBoolMask) { using namespace openvdb; // Construct a mask grid that is the "inverse" of the mask used in the other tests. // (This is not a true inverse, since the mask's active voxel bounds are finite.) Grid<UInt32Tree> mask(0); auto paddedCubeBBox = kCubeBBox; paddedCubeBBox.expand(2); mask.fill(paddedCubeBBox, 99, true); mask.fill(kInnerBBox, 0, false); // Clipping against the "inverted" mask with mask inversion enabled // should give the same results as clipping normally against the normal mask. FloatGrid::Ptr clipped = tools::clip(mCube, mask, /*keepInterior=*/false); clipped->pruneGrid(); validate(*clipped); }
6,893
C++
31.985646
93
0.6112
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestNodeManager.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/tree/NodeManager.h> #include <openvdb/tree/LeafManager.h> #include "util.h" // for unittest_util::makeSphere() #include "gtest/gtest.h" class TestNodeManager: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; namespace { template<typename TreeT> struct NodeCountOp { NodeCountOp() : nodeCount(TreeT::DEPTH, 0), totalCount(0) { } NodeCountOp(const NodeCountOp&, tbb::split) : nodeCount(TreeT::DEPTH, 0), totalCount(0) { } void join(const NodeCountOp& other) { for (size_t i = 0; i < nodeCount.size(); ++i) { nodeCount[i] += other.nodeCount[i]; } totalCount += other.totalCount; } // do nothing for the root node void operator()(const typename TreeT::RootNodeType&) { } // count the internal and leaf nodes template<typename NodeT> void operator()(const NodeT&) { ++(nodeCount[NodeT::LEVEL]); ++totalCount; } std::vector<openvdb::Index64> nodeCount; openvdb::Index64 totalCount; };// NodeCountOp }//unnamed namespace TEST_F(TestNodeManager, testAll) { using openvdb::CoordBBox; using openvdb::Coord; using openvdb::Vec3f; using openvdb::Index64; using openvdb::FloatGrid; using openvdb::FloatTree; const Vec3f center(0.35f, 0.35f, 0.35f); const float radius = 0.15f; const int dim = 128, half_width = 5; const float voxel_size = 1.0f/dim; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/half_width*voxel_size); FloatTree& tree = grid->tree(); grid->setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/voxel_size)); unittest_util::makeSphere<FloatGrid>(Coord(dim), center, radius, *grid, unittest_util::SPHERE_SPARSE_NARROW_BAND); EXPECT_EQ(4, int(FloatTree::DEPTH)); EXPECT_EQ(3, int(openvdb::tree::NodeManager<FloatTree>::LEVELS)); std::vector<Index64> nodeCount; for (openvdb::Index i=0; i<FloatTree::DEPTH; ++i) nodeCount.push_back(0); for (FloatTree::NodeCIter it = tree.cbeginNode(); it; ++it) ++(nodeCount[it.getLevel()]); //for (size_t i=0; i<nodeCount.size(); ++i) {//includes the root node // std::cerr << "Level=" << i << " nodes=" << nodeCount[i] << std::endl; //} {// test tree constructor openvdb::tree::NodeManager<FloatTree> manager(tree); //for (openvdb::Index i=0; i<openvdb::tree::NodeManager<FloatTree>::LEVELS; ++i) { // std::cerr << "Level=" << i << " nodes=" << manager.nodeCount(i) << std::endl; //} Index64 totalCount = 0; for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount //std::cerr << "Level=" << i << " expected=" << nodeCount[i] // << " cached=" << manager.nodeCount(i) << std::endl; EXPECT_EQ(nodeCount[i], manager.nodeCount(i)); totalCount += nodeCount[i]; } EXPECT_EQ(totalCount, manager.nodeCount()); // test the map reduce functionality NodeCountOp<FloatTree> bottomUpOp; NodeCountOp<FloatTree> topDownOp; manager.reduceBottomUp(bottomUpOp); manager.reduceTopDown(topDownOp); for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount EXPECT_EQ(bottomUpOp.nodeCount[i], manager.nodeCount(i)); EXPECT_EQ(topDownOp.nodeCount[i], manager.nodeCount(i)); } EXPECT_EQ(bottomUpOp.totalCount, manager.nodeCount()); EXPECT_EQ(topDownOp.totalCount, manager.nodeCount()); } {// test LeafManager constructor typedef openvdb::tree::LeafManager<FloatTree> LeafManagerT; LeafManagerT manager1(tree); EXPECT_EQ(nodeCount[0], Index64(manager1.leafCount())); openvdb::tree::NodeManager<LeafManagerT> manager2(manager1); Index64 totalCount = 0; for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount //std::cerr << "Level=" << i << " expected=" << nodeCount[i] // << " cached=" << manager2.nodeCount(i) << std::endl; EXPECT_EQ(nodeCount[i], Index64(manager2.nodeCount(i))); totalCount += nodeCount[i]; } EXPECT_EQ(totalCount, Index64(manager2.nodeCount())); // test the map reduce functionality NodeCountOp<FloatTree> bottomUpOp; NodeCountOp<FloatTree> topDownOp; manager2.reduceBottomUp(bottomUpOp); manager2.reduceTopDown(topDownOp); for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount EXPECT_EQ(bottomUpOp.nodeCount[i], manager2.nodeCount(i)); EXPECT_EQ(topDownOp.nodeCount[i], manager2.nodeCount(i)); } EXPECT_EQ(bottomUpOp.totalCount, manager2.nodeCount()); EXPECT_EQ(topDownOp.totalCount, manager2.nodeCount()); } } TEST_F(TestNodeManager, testConst) { using namespace openvdb; const Vec3f center(0.35f, 0.35f, 0.35f); const int dim = 128, half_width = 5; const float voxel_size = 1.0f/dim; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/half_width*voxel_size); const FloatTree& tree = grid->constTree(); tree::NodeManager<const FloatTree> manager(tree); NodeCountOp<const FloatTree> topDownOp; manager.reduceTopDown(topDownOp); std::vector<Index64> nodeCount; for (openvdb::Index i=0; i<FloatTree::DEPTH; ++i) nodeCount.push_back(0); for (FloatTree::NodeCIter it = tree.cbeginNode(); it; ++it) ++(nodeCount[it.getLevel()]); Index64 totalCount = 0; for (openvdb::Index i=0; i<FloatTree::RootNodeType::LEVEL; ++i) {//exclude root in nodeCount EXPECT_EQ(nodeCount[i], manager.nodeCount(i)); totalCount += nodeCount[i]; } EXPECT_EQ(totalCount, manager.nodeCount()); } namespace { template<typename TreeT> struct ExpandOp { using RootT = typename TreeT::RootNodeType; using LeafT = typename TreeT::LeafNodeType; explicit ExpandOp(bool zeroOnly = false) : mZeroOnly(zeroOnly) { } // do nothing for the root node bool operator()(RootT&, size_t = 1) const { return true; } // count the internal and leaf nodes template<typename NodeT> bool operator()(NodeT& node, size_t idx = 1) const { for (auto iter = node.cbeginValueAll(); iter; ++iter) { const openvdb::Coord ijk = iter.getCoord(); if (ijk.x() < 256 && ijk.y() < 256 && ijk.z() < 256) { node.addChild(new typename NodeT::ChildNodeType(iter.getCoord(), NodeT::LEVEL, true)); } } if (mZeroOnly) return idx == 0; return true; } bool operator()(LeafT& leaf, size_t /*idx*/ = 1) const { for (auto iter = leaf.beginValueAll(); iter; ++iter) { iter.setValue(iter.pos()); } return true; } bool mZeroOnly = false; };// ExpandOp template<typename TreeT> struct RootOnlyOp { using RootT = typename TreeT::RootNodeType; RootOnlyOp() = default; RootOnlyOp(const RootOnlyOp&, tbb::split) { } void join(const RootOnlyOp&) { } // do nothing for the root node but return false bool operator()(RootT&, size_t) const { return false; } // throw on internal or leaf nodes template<typename NodeOrLeafT> bool operator()(NodeOrLeafT&, size_t) const { OPENVDB_THROW(openvdb::RuntimeError, "Should not process nodes below root."); } };// RootOnlyOp template<typename TreeT> struct SumOp { using RootT = typename TreeT::RootNodeType; using LeafT = typename TreeT::LeafNodeType; explicit SumOp(bool zeroOnly = false) : mZeroOnly(zeroOnly) { } SumOp(const SumOp& other, tbb::split): totalCount(0), mZeroOnly(other.mZeroOnly) { } void join(const SumOp& other) { totalCount += other.totalCount; } // do nothing for the root node bool operator()(const typename TreeT::RootNodeType&, size_t /*idx*/ = 0) { return true; } // count the internal nodes template<typename NodeT> bool operator()(const NodeT& node, size_t idx = 0) { for (auto iter = node.cbeginValueAll(); iter; ++iter) { totalCount += *iter; } if (mZeroOnly) return idx == 0; return true; } // count the leaf nodes bool operator()(const LeafT& leaf, size_t /*idx*/ = 0) { for (auto iter = leaf.cbeginValueAll(); iter; ++iter) { totalCount += *iter; } return true; } openvdb::Index64 totalCount = openvdb::Index64(0); bool mZeroOnly = false; };// SumOp }//unnamed namespace TEST_F(TestNodeManager, testDynamic) { using openvdb::Coord; using openvdb::Index32; using openvdb::Index64; using openvdb::Int32Tree; using RootNodeType = Int32Tree::RootNodeType; using Internal1NodeType = RootNodeType::ChildNodeType; Int32Tree sourceTree(0); auto child = std::make_unique<Internal1NodeType>(Coord(0, 0, 0), /*value=*/1.0f); EXPECT_TRUE(sourceTree.root().addChild(child.release())); EXPECT_EQ(Index32(0), sourceTree.leafCount()); EXPECT_EQ(Index32(2), sourceTree.nonLeafCount()); ExpandOp<Int32Tree> expandOp; { // use NodeManager::foreachTopDown Int32Tree tree(sourceTree); openvdb::tree::NodeManager<Int32Tree> manager(tree); EXPECT_EQ(Index64(1), manager.nodeCount()); manager.foreachTopDown(expandOp); EXPECT_EQ(Index32(0), tree.leafCount()); // first level has been expanded, but node manager cache does not include the new nodes SumOp<Int32Tree> sumOp; manager.reduceBottomUp(sumOp); EXPECT_EQ(Index64(32760), sumOp.totalCount); } { // use DynamicNodeManager::foreachTopDown and filter out nodes below root Int32Tree tree(sourceTree); openvdb::tree::DynamicNodeManager<Int32Tree> manager(tree); RootOnlyOp<Int32Tree> rootOnlyOp; EXPECT_NO_THROW(manager.foreachTopDown(rootOnlyOp)); EXPECT_NO_THROW(manager.reduceTopDown(rootOnlyOp)); } { // use DynamicNodeManager::foreachTopDown Int32Tree tree(sourceTree); openvdb::tree::DynamicNodeManager<Int32Tree> manager(tree); manager.foreachTopDown(expandOp); EXPECT_EQ(Index32(32768), tree.leafCount()); SumOp<Int32Tree> sumOp; manager.reduceTopDown(sumOp); EXPECT_EQ(Index64(4286611448), sumOp.totalCount); SumOp<Int32Tree> zeroSumOp(true); manager.reduceTopDown(zeroSumOp); EXPECT_EQ(Index64(535855096), zeroSumOp.totalCount); } { // use DynamicNodeManager::foreachTopDown but filter nodes with non-zero index Int32Tree tree(sourceTree); openvdb::tree::DynamicNodeManager<Int32Tree> manager(tree); ExpandOp<Int32Tree> zeroExpandOp(true); manager.foreachTopDown(zeroExpandOp); EXPECT_EQ(Index32(32768), tree.leafCount()); SumOp<Int32Tree> sumOp; manager.reduceTopDown(sumOp); EXPECT_EQ(Index64(550535160), sumOp.totalCount); } }
11,429
C++
32.519061
102
0.633039
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestDivergence.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/tools/GridOperators.h> #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); namespace { const int GRID_DIM = 10; } class TestDivergence: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestDivergence, testDivergenceTool) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); FloatGrid::Ptr divGrid = tools::divergence(*inGrid); EXPECT_EQ(math::Pow3(2*dim), int(divGrid->activeVoxelCount())); FloatGrid::ConstAccessor accessor = divGrid->getConstAccessor(); --dim;//ignore boundary divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); const float d = accessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } } TEST_F(TestDivergence, testDivergenceMaskedTool) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); /// maked region openvdb::CoordBBox maskBBox(openvdb::Coord(0), openvdb::Coord(dim)); BoolGrid::Ptr maskGrid = BoolGrid::create(false); maskGrid->fill(maskBBox, true /*value*/, true /*activate*/); FloatGrid::Ptr divGrid = tools::divergence(*inGrid, *maskGrid); EXPECT_EQ(math::Pow3(dim), int(divGrid->activeVoxelCount())); FloatGrid::ConstAccessor accessor = divGrid->getConstAccessor(); --dim;//ignore boundary divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); const float d = accessor.getValue(xyz); if (maskBBox.isInside(xyz)) { ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } else { ASSERT_DOUBLES_EXACTLY_EQUAL(0, d); } } } } } TEST_F(TestDivergence, testStaggeredDivergence) { // This test is slightly different than the one above for sanity // checking purposes. using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); inGrid->setGridClass( GRID_STAGGERED ); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), float(z))); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); FloatGrid::Ptr divGrid = tools::divergence(*inGrid); EXPECT_EQ(math::Pow3(2*dim), int(divGrid->activeVoxelCount())); FloatGrid::ConstAccessor accessor = divGrid->getConstAccessor(); --dim;//ignore boundary divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(z, v[2]); const float d = accessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(3, d); } } } } TEST_F(TestDivergence, testISDivergence) { using namespace openvdb; typedef VectorGrid::ConstAccessor Accessor; VectorGrid::Ptr inGrid = VectorGrid::create(); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), 0.f)); } } } Accessor inAccessor = inGrid->getConstAccessor(); EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); float d; d = math::ISDivergence<math::CD_2ND>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::BD_1ST>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_1ST>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); float d; d = math::ISDivergence<math::CD_4TH>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_2ND>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::BD_2ND>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); float d; d = math::ISDivergence<math::CD_6TH>::result(inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_3RD>::result(inAccessor, xyz); EXPECT_NEAR(2, d, /*tolerance=*/0.00001); d = math::ISDivergence<math::BD_3RD>::result(inAccessor, xyz); EXPECT_NEAR(2, d, /*tolerance=*/0.00001); } } } } TEST_F(TestDivergence, testISDivergenceStencil) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(x), float(y), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); math::SevenPointStencil<VectorGrid> sevenpt(*inGrid); math::ThirteenPointStencil<VectorGrid> thirteenpt(*inGrid); math::NineteenPointStencil<VectorGrid> nineteenpt(*inGrid); --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); sevenpt.moveTo(xyz); float d; d = math::ISDivergence<math::CD_2ND>::result(sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::BD_1ST>::result(sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_1ST>::result(sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); thirteenpt.moveTo(xyz); float d; d = math::ISDivergence<math::CD_4TH>::result(thirteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_2ND>::result(thirteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::BD_2ND>::result(thirteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } --dim;//ignore boundary divergence // test index space divergence for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inTree.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(x, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(y, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0, v[2]); nineteenpt.moveTo(xyz); float d; d = math::ISDivergence<math::CD_6TH>::result(nineteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::ISDivergence<math::FD_3RD>::result(nineteenpt); EXPECT_NEAR(2, d, /*tolerance=*/0.00001); d = math::ISDivergence<math::BD_3RD>::result(nineteenpt); EXPECT_NEAR(2, d, /*tolerance=*/0.00001); } } } } TEST_F(TestDivergence, testWSDivergence) { using namespace openvdb; typedef VectorGrid::ConstAccessor Accessor; { // non-unit voxel size double voxel_size = 0.5; VectorGrid::Ptr inGrid = VectorGrid::create(); inGrid->setTransform(math::Transform::createLinearTransform(voxel_size)); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Vec3d location = inGrid->indexToWorld(Vec3d(x,y,z)); inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(location.x()), float(location.y()), 0.f)); } } } Accessor inAccessor = inGrid->getConstAccessor(); EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test with a map // test with a map math::AffineMap map(voxel_size*math::Mat3d::identity()); math::UniformScaleMap uniform_map(voxel_size); math::UniformScaleTranslateMap uniform_translate_map(voxel_size, Vec3d(0,0,0)); for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { openvdb::Coord xyz(x,y,z); //openvdb::VectorTree::ValueType v = inTree.getValue(xyz); //std::cout << "vec(" << xyz << ")=" << v << std::endl; float d; d = math::Divergence<math::AffineMap, math::CD_2ND>::result( map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::AffineMap, math::BD_1ST>::result( map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::AffineMap, math::FD_1ST>::result( map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::CD_2ND>::result( uniform_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::BD_1ST>::result( uniform_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::FD_1ST>::result( uniform_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::CD_2ND>::result( uniform_translate_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::BD_1ST>::result( uniform_translate_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::FD_1ST>::result( uniform_translate_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } } { // non-uniform scaling and rotation Vec3d voxel_sizes(0.25, 0.45, 0.75); VectorGrid::Ptr inGrid = VectorGrid::create(); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); // apply rotation math::MapBase::Ptr rotated_map = base_map->preRotate(1.5, math::X_AXIS); inGrid->setTransform(math::Transform::Ptr(new math::Transform(rotated_map))); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Vec3d location = inGrid->indexToWorld(Vec3d(x,y,z)); inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(location.x()), float(location.y()), 0.f)); } } } Accessor inAccessor = inGrid->getConstAccessor(); EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test with a map math::AffineMap::ConstPtr map = inGrid->transform().map<math::AffineMap>(); for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { openvdb::Coord xyz(x,y,z); //openvdb::VectorTree::ValueType v = inTree.getValue(xyz); //std::cout << "vec(" << xyz << ")=" << v << std::endl; float d; d = math::Divergence<math::AffineMap, math::CD_2ND>::result( *map, inAccessor, xyz); EXPECT_NEAR(2.0, d, 0.01); d = math::Divergence<math::AffineMap, math::BD_1ST>::result( *map, inAccessor, xyz); EXPECT_NEAR(2.0, d, 0.01); d = math::Divergence<math::AffineMap, math::FD_1ST>::result( *map, inAccessor, xyz); EXPECT_NEAR(2.0, d, 0.01); } } } } } TEST_F(TestDivergence, testWSDivergenceStencil) { using namespace openvdb; { // non-unit voxel size double voxel_size = 0.5; VectorGrid::Ptr inGrid = VectorGrid::create(); inGrid->setTransform(math::Transform::createLinearTransform(voxel_size)); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Vec3d location = inGrid->indexToWorld(Vec3d(x,y,z)); inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(location.x()), float(location.y()), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test with a map math::AffineMap map(voxel_size*math::Mat3d::identity()); math::UniformScaleMap uniform_map(voxel_size); math::UniformScaleTranslateMap uniform_translate_map(voxel_size, Vec3d(0,0,0)); math::SevenPointStencil<VectorGrid> sevenpt(*inGrid); math::SecondOrderDenseStencil<VectorGrid> dense_2ndOrder(*inGrid); for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { openvdb::Coord xyz(x,y,z); //openvdb::VectorTree::ValueType v = inTree.getValue(xyz); //std::cout << "vec(" << xyz << ")=" << v << std::endl; float d; sevenpt.moveTo(xyz); dense_2ndOrder.moveTo(xyz); d = math::Divergence<math::AffineMap, math::CD_2ND>::result( map, dense_2ndOrder); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::AffineMap, math::BD_1ST>::result( map, dense_2ndOrder); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::AffineMap, math::FD_1ST>::result( map, dense_2ndOrder); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::CD_2ND>::result( uniform_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::BD_1ST>::result( uniform_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleMap, math::FD_1ST>::result( uniform_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::CD_2ND>::result( uniform_translate_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::BD_1ST>::result( uniform_translate_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); d = math::Divergence<math::UniformScaleTranslateMap, math::FD_1ST>::result( uniform_translate_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(2, d); } } } } { // non-uniform scaling and rotation Vec3d voxel_sizes(0.25, 0.45, 0.75); VectorGrid::Ptr inGrid = VectorGrid::create(); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); // apply rotation math::MapBase::Ptr rotated_map = base_map->preRotate(1.5, math::X_AXIS); inGrid->setTransform(math::Transform::Ptr(new math::Transform(rotated_map))); VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Vec3d location = inGrid->indexToWorld(Vec3d(x,y,z)); inTree.setValue(Coord(x,y,z), VectorTree::ValueType(float(location.x()), float(location.y()), 0.f)); } } } //Accessor inAccessor = inGrid->getConstAccessor(); EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); --dim;//ignore boundary divergence // test with a map math::AffineMap::ConstPtr map = inGrid->transform().map<math::AffineMap>(); math::SecondOrderDenseStencil<VectorGrid> dense_2ndOrder(*inGrid); for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { openvdb::Coord xyz(x,y,z); dense_2ndOrder.moveTo(xyz); float d; d = math::Divergence<math::AffineMap, math::CD_2ND>::result( *map, dense_2ndOrder); EXPECT_NEAR(2.0, d, 0.01); d = math::Divergence<math::AffineMap, math::BD_1ST>::result( *map, dense_2ndOrder); EXPECT_NEAR(2.0, d, 0.01); d = math::Divergence<math::AffineMap, math::FD_1ST>::result( *map, dense_2ndOrder); EXPECT_NEAR(2.0, d, 0.01); } } } } }
23,628
C++
35.185299
95
0.511427
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestQuat.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/math/Math.h> #include <openvdb/math/Quat.h> #include <openvdb/math/Mat4.h> using namespace openvdb::math; class TestQuat: public ::testing::Test { }; TEST_F(TestQuat, testConstructor) { { Quat<float> qq(1.23f, 2.34f, 3.45f, 4.56f); EXPECT_TRUE( isExactlyEqual(qq.x(), 1.23f) ); EXPECT_TRUE( isExactlyEqual(qq.y(), 2.34f) ); EXPECT_TRUE( isExactlyEqual(qq.z(), 3.45f) ); EXPECT_TRUE( isExactlyEqual(qq.w(), 4.56f) ); } { float a[] = { 1.23f, 2.34f, 3.45f, 4.56f }; Quat<float> qq(a); EXPECT_TRUE( isExactlyEqual(qq.x(), 1.23f) ); EXPECT_TRUE( isExactlyEqual(qq.y(), 2.34f) ); EXPECT_TRUE( isExactlyEqual(qq.z(), 3.45f) ); EXPECT_TRUE( isExactlyEqual(qq.w(), 4.56f) ); } } TEST_F(TestQuat, testAxisAngle) { float TOL = 1e-6f; Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); Vec3s v(1, 2, 3); v.normalize(); float a = float(M_PI / 4.f); Quat<float> q(v,a); float b = q.angle(); Vec3s vv = q.axis(); EXPECT_TRUE( isApproxEqual(a, b, TOL) ); EXPECT_TRUE( v.eq(vv, TOL) ); q1.setAxisAngle(v,a); b = q1.angle(); vv = q1.axis(); EXPECT_TRUE( isApproxEqual(a, b, TOL) ); EXPECT_TRUE( v.eq(vv, TOL) ); } TEST_F(TestQuat, testOpPlus) { Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); Quat<float> q = q1 + q2; float x=q1.x()+q2.x(), y=q1.y()+q2.y(), z=q1.z()+q2.z(), w=q1.w()+q2.w(); EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); q = q1; q += q2; EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); q.add(q1,q2); EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); } TEST_F(TestQuat, testOpMinus) { Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); Quat<float> q = q1 - q2; float x=q1.x()-q2.x(), y=q1.y()-q2.y(), z=q1.z()-q2.z(), w=q1.w()-q2.w(); EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); q = q1; q -= q2; EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); q.sub(q1,q2); EXPECT_TRUE( isExactlyEqual(q.x(), x) ); EXPECT_TRUE( isExactlyEqual(q.y(), y) ); EXPECT_TRUE( isExactlyEqual(q.z(), z) ); EXPECT_TRUE( isExactlyEqual(q.w(), w) ); } TEST_F(TestQuat, testOpMultiply) { Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); Quat<float> q = q1 * 1.5f; EXPECT_TRUE( isExactlyEqual(q.x(), float(1.5f)*q1.x()) ); EXPECT_TRUE( isExactlyEqual(q.y(), float(1.5f)*q1.y()) ); EXPECT_TRUE( isExactlyEqual(q.z(), float(1.5f)*q1.z()) ); EXPECT_TRUE( isExactlyEqual(q.w(), float(1.5f)*q1.w()) ); q = q1; q *= 1.5f; EXPECT_TRUE( isExactlyEqual(q.x(), float(1.5f)*q1.x()) ); EXPECT_TRUE( isExactlyEqual(q.y(), float(1.5f)*q1.y()) ); EXPECT_TRUE( isExactlyEqual(q.z(), float(1.5f)*q1.z()) ); EXPECT_TRUE( isExactlyEqual(q.w(), float(1.5f)*q1.w()) ); q.scale(1.5f, q1); EXPECT_TRUE( isExactlyEqual(q.x(), float(1.5f)*q1.x()) ); EXPECT_TRUE( isExactlyEqual(q.y(), float(1.5f)*q1.y()) ); EXPECT_TRUE( isExactlyEqual(q.z(), float(1.5f)*q1.z()) ); EXPECT_TRUE( isExactlyEqual(q.w(), float(1.5f)*q1.w()) ); } TEST_F(TestQuat, testInvert) { float TOL = 1e-6f; Quat<float> q1(1.0f, 2.0f, 3.0f, 4.0f); Quat<float> q2(1.2f, 2.3f, 3.4f, 4.5f); q1 = q2; q2 = q2.inverse(); Quat<float> q = q1*q2; EXPECT_TRUE( q.eq( Quat<float>(0,0,0,1), TOL ) ); q1.normalize(); q2 = q1.conjugate(); q = q1*q2; EXPECT_TRUE( q.eq( Quat<float>(0,0,0,1), TOL ) ); } TEST_F(TestQuat, testEulerAngles) { { double TOL = 1e-7; Mat4d rx, ry, rz; const double angle1 = 20. * M_PI / 180.; const double angle2 = 64. * M_PI / 180.; const double angle3 = 125. *M_PI / 180.; rx.setToRotation(Vec3d(1,0,0), angle1); ry.setToRotation(Vec3d(0,1,0), angle2); rz.setToRotation(Vec3d(0,0,1), angle3); Mat4d r = rx * ry * rz; const Quat<double> rot(r.getMat3()); Vec3d result = rot.eulerAngles(ZYX_ROTATION); rx.setToRotation(Vec3d(1,0,0), result[0]); ry.setToRotation(Vec3d(0,1,0), result[1]); rz.setToRotation(Vec3d(0,0,1), result[2]); Mat4d rtest = rx * ry * rz; EXPECT_TRUE(r.eq(rtest, TOL)); } { double TOL = 1e-7; Mat4d rx, ry, rz; const double angle1 = 20. * M_PI / 180.; const double angle2 = 64. * M_PI / 180.; const double angle3 = 125. *M_PI / 180.; rx.setToRotation(Vec3d(1,0,0), angle1); ry.setToRotation(Vec3d(0,1,0), angle2); rz.setToRotation(Vec3d(0,0,1), angle3); Mat4d r = rz * ry * rx; const Quat<double> rot(r.getMat3()); Vec3d result = rot.eulerAngles(XYZ_ROTATION); rx.setToRotation(Vec3d(1,0,0), result[0]); ry.setToRotation(Vec3d(0,1,0), result[1]); rz.setToRotation(Vec3d(0,0,1), result[2]); Mat4d rtest = rz * ry * rx; EXPECT_TRUE(r.eq(rtest, TOL)); } { double TOL = 1e-7; Mat4d rx, ry, rz; const double angle1 = 20. * M_PI / 180.; const double angle2 = 64. * M_PI / 180.; const double angle3 = 125. *M_PI / 180.; rx.setToRotation(Vec3d(1,0,0), angle1); ry.setToRotation(Vec3d(0,1,0), angle2); rz.setToRotation(Vec3d(0,0,1), angle3); Mat4d r = rz * rx * ry; const Quat<double> rot(r.getMat3()); Vec3d result = rot.eulerAngles(YXZ_ROTATION); rx.setToRotation(Vec3d(1,0,0), result[0]); ry.setToRotation(Vec3d(0,1,0), result[1]); rz.setToRotation(Vec3d(0,0,1), result[2]); Mat4d rtest = rz * rx * ry; EXPECT_TRUE(r.eq(rtest, TOL)); } { const Quat<float> rot(X_AXIS, 1.0); Vec3s result = rot.eulerAngles(XZY_ROTATION); EXPECT_EQ(result, Vec3s(1,0,0)); } }
6,816
C++
25.628906
75
0.553991
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointPartitioner.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/tools/PointPartitioner.h> #include <vector> class TestPointPartitioner: public ::testing::Test { }; //////////////////////////////////////// namespace { struct PointList { typedef openvdb::Vec3s PosType; PointList(const std::vector<PosType>& points) : mPoints(&points) {} size_t size() const { return mPoints->size(); } void getPos(size_t n, PosType& xyz) const { xyz = (*mPoints)[n]; } protected: std::vector<PosType> const * const mPoints; }; // PointList } // namespace //////////////////////////////////////// TEST_F(TestPointPartitioner, testPartitioner) { const size_t pointCount = 10000; const float voxelSize = 0.1f; std::vector<openvdb::Vec3s> points(pointCount, openvdb::Vec3s(0.f)); for (size_t n = 1; n < pointCount; ++n) { points[n].x() = points[n-1].x() + voxelSize; } PointList pointList(points); const openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(voxelSize); typedef openvdb::tools::UInt32PointPartitioner PointPartitioner; PointPartitioner::Ptr partitioner = PointPartitioner::create(pointList, *transform); EXPECT_TRUE(!partitioner->empty()); // The default interpretation should be cell-centered. EXPECT_TRUE(partitioner->usingCellCenteredTransform()); const size_t expectedPageCount = pointCount / (1u << PointPartitioner::LOG2DIM); EXPECT_EQ(expectedPageCount, partitioner->size()); EXPECT_EQ(openvdb::Coord(0), partitioner->origin(0)); PointPartitioner::IndexIterator it = partitioner->indices(0); EXPECT_TRUE(it.test()); EXPECT_EQ(it.size(), size_t(1 << PointPartitioner::LOG2DIM)); PointPartitioner::IndexIterator itB = partitioner->indices(0); EXPECT_EQ(++it, ++itB); EXPECT_TRUE(it != ++itB); std::vector<PointPartitioner::IndexType> indices; for (it.reset(); it; ++it) { indices.push_back(*it); } EXPECT_EQ(it.size(), indices.size()); size_t idx = 0; for (itB.reset(); itB; ++itB) { EXPECT_EQ(indices[idx++], *itB); } }
2,234
C++
23.560439
84
0.636079
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLinearInterp.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <openvdb/tools/Interpolation.h> #include <openvdb/math/Stencils.h> namespace { // Absolute tolerance for floating-point equality comparisons const double TOLERANCE = 1.e-6; } class TestLinearInterp: public ::testing::Test { public: template<typename GridType> void test(); template<typename GridType> void testTree(); template<typename GridType> void testAccessor(); template<typename GridType> void testConstantValues(); template<typename GridType> void testFillValues(); template<typename GridType> void testNegativeIndices(); template<typename GridType> void testStencilsMatch(); }; template<typename GridType> void TestLinearInterp::test() { typename GridType::TreeType TreeType; float fillValue = 256.0f; GridType grid(fillValue); typename GridType::TreeType& tree = grid.tree(); tree.setValue(openvdb::Coord(10, 10, 10), 1.0); tree.setValue(openvdb::Coord(11, 10, 10), 2.0); tree.setValue(openvdb::Coord(11, 11, 10), 2.0); tree.setValue(openvdb::Coord(10, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 10, 10), 2.0); tree.setValue(openvdb::Coord( 9, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 9, 10), 2.0); tree.setValue(openvdb::Coord(11, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 10, 11), 3.0); tree.setValue(openvdb::Coord(11, 10, 11), 3.0); tree.setValue(openvdb::Coord(11, 11, 11), 3.0); tree.setValue(openvdb::Coord(10, 11, 11), 3.0); tree.setValue(openvdb::Coord( 9, 11, 11), 3.0); tree.setValue(openvdb::Coord( 9, 10, 11), 3.0); tree.setValue(openvdb::Coord( 9, 9, 11), 3.0); tree.setValue(openvdb::Coord(10, 9, 11), 3.0); tree.setValue(openvdb::Coord(11, 9, 11), 3.0); tree.setValue(openvdb::Coord(10, 10, 9), 4.0); tree.setValue(openvdb::Coord(11, 10, 9), 4.0); tree.setValue(openvdb::Coord(11, 11, 9), 4.0); tree.setValue(openvdb::Coord(10, 11, 9), 4.0); tree.setValue(openvdb::Coord( 9, 11, 9), 4.0); tree.setValue(openvdb::Coord( 9, 10, 9), 4.0); tree.setValue(openvdb::Coord( 9, 9, 9), 4.0); tree.setValue(openvdb::Coord(10, 9, 9), 4.0); tree.setValue(openvdb::Coord(11, 9, 9), 4.0); {//using BoxSampler // transform used for worldspace interpolation) openvdb::tools::GridSampler<GridType, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } {//using Sampler<1> // transform used for worldspace interpolation) openvdb::tools::GridSampler<GridType, openvdb::tools::Sampler<1> > interpolator(grid); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } } TEST_F(TestLinearInterp, testFloat) { test<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testDouble) { test<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); Vec3STree& tree = grid.tree(); tree.setValue(openvdb::Coord(10, 10, 10), Vec3s(1.0, 1.0, 1.0)); tree.setValue(openvdb::Coord(11, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(10, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 9, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(10, 9, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 9, 9), Vec3s(4.0, 4.0, 4.0)); openvdb::tools::GridSampler<Vec3SGrid, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.375f))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.f))); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.f))); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.f))); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_TRUE(val.eq(Vec3s(3.f))); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.f))); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.f))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.1f))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.792f))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.71f))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(2.01f))); } template<typename GridType> void TestLinearInterp::testTree() { float fillValue = 256.0f; typedef typename GridType::TreeType TreeType; TreeType tree(fillValue); tree.setValue(openvdb::Coord(10, 10, 10), 1.0); tree.setValue(openvdb::Coord(11, 10, 10), 2.0); tree.setValue(openvdb::Coord(11, 11, 10), 2.0); tree.setValue(openvdb::Coord(10, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 10, 10), 2.0); tree.setValue(openvdb::Coord( 9, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 9, 10), 2.0); tree.setValue(openvdb::Coord(11, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 10, 11), 3.0); tree.setValue(openvdb::Coord(11, 10, 11), 3.0); tree.setValue(openvdb::Coord(11, 11, 11), 3.0); tree.setValue(openvdb::Coord(10, 11, 11), 3.0); tree.setValue(openvdb::Coord( 9, 11, 11), 3.0); tree.setValue(openvdb::Coord( 9, 10, 11), 3.0); tree.setValue(openvdb::Coord( 9, 9, 11), 3.0); tree.setValue(openvdb::Coord(10, 9, 11), 3.0); tree.setValue(openvdb::Coord(11, 9, 11), 3.0); tree.setValue(openvdb::Coord(10, 10, 9), 4.0); tree.setValue(openvdb::Coord(11, 10, 9), 4.0); tree.setValue(openvdb::Coord(11, 11, 9), 4.0); tree.setValue(openvdb::Coord(10, 11, 9), 4.0); tree.setValue(openvdb::Coord( 9, 11, 9), 4.0); tree.setValue(openvdb::Coord( 9, 10, 9), 4.0); tree.setValue(openvdb::Coord( 9, 9, 9), 4.0); tree.setValue(openvdb::Coord(10, 9, 9), 4.0); tree.setValue(openvdb::Coord(11, 9, 9), 4.0); // transform used for worldspace interpolation) openvdb::tools::GridSampler<TreeType, openvdb::tools::BoxSampler> interpolator(tree, openvdb::math::Transform()); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } TEST_F(TestLinearInterp, testTreeFloat) { testTree<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testTreeDouble) { testTree<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testTreeVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3STree tree(fillValue); tree.setValue(openvdb::Coord(10, 10, 10), Vec3s(1.0, 1.0, 1.0)); tree.setValue(openvdb::Coord(11, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 11, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 10, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( 9, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(11, 9, 11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(10, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(10, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 11, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 10, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( 9, 9, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(10, 9, 9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(11, 9, 9), Vec3s(4.0, 4.0, 4.0)); openvdb::tools::GridSampler<Vec3STree, openvdb::tools::BoxSampler> interpolator(tree, openvdb::math::Transform()); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.375f))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.f))); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.f))); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.f))); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_TRUE(val.eq(Vec3s(3.f))); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.f))); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.f))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.1f))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.792f))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.71f))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(2.01f))); } template<typename GridType> void TestLinearInterp::testAccessor() { float fillValue = 256.0f; GridType grid(fillValue); typedef typename GridType::Accessor AccessorType; AccessorType acc = grid.getAccessor(); acc.setValue(openvdb::Coord(10, 10, 10), 1.0); acc.setValue(openvdb::Coord(11, 10, 10), 2.0); acc.setValue(openvdb::Coord(11, 11, 10), 2.0); acc.setValue(openvdb::Coord(10, 11, 10), 2.0); acc.setValue(openvdb::Coord( 9, 11, 10), 2.0); acc.setValue(openvdb::Coord( 9, 10, 10), 2.0); acc.setValue(openvdb::Coord( 9, 9, 10), 2.0); acc.setValue(openvdb::Coord(10, 9, 10), 2.0); acc.setValue(openvdb::Coord(11, 9, 10), 2.0); acc.setValue(openvdb::Coord(10, 10, 11), 3.0); acc.setValue(openvdb::Coord(11, 10, 11), 3.0); acc.setValue(openvdb::Coord(11, 11, 11), 3.0); acc.setValue(openvdb::Coord(10, 11, 11), 3.0); acc.setValue(openvdb::Coord( 9, 11, 11), 3.0); acc.setValue(openvdb::Coord( 9, 10, 11), 3.0); acc.setValue(openvdb::Coord( 9, 9, 11), 3.0); acc.setValue(openvdb::Coord(10, 9, 11), 3.0); acc.setValue(openvdb::Coord(11, 9, 11), 3.0); acc.setValue(openvdb::Coord(10, 10, 9), 4.0); acc.setValue(openvdb::Coord(11, 10, 9), 4.0); acc.setValue(openvdb::Coord(11, 11, 9), 4.0); acc.setValue(openvdb::Coord(10, 11, 9), 4.0); acc.setValue(openvdb::Coord( 9, 11, 9), 4.0); acc.setValue(openvdb::Coord( 9, 10, 9), 4.0); acc.setValue(openvdb::Coord( 9, 9, 9), 4.0); acc.setValue(openvdb::Coord(10, 9, 9), 4.0); acc.setValue(openvdb::Coord(11, 9, 9), 4.0); // transform used for worldspace interpolation) openvdb::tools::GridSampler<AccessorType, openvdb::tools::BoxSampler> interpolator(acc, grid.transform()); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } TEST_F(TestLinearInterp, testAccessorFloat) { testAccessor<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testAccessorDouble) { testAccessor<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testAccessorVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); typedef Vec3SGrid::Accessor AccessorType; AccessorType acc = grid.getAccessor(); acc.setValue(openvdb::Coord(10, 10, 10), Vec3s(1.0, 1.0, 1.0)); acc.setValue(openvdb::Coord(11, 10, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(11, 11, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(10, 11, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord( 9, 11, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord( 9, 10, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord( 9, 9, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(10, 9, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(11, 9, 10), Vec3s(2.0, 2.0, 2.0)); acc.setValue(openvdb::Coord(10, 10, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(11, 10, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(11, 11, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(10, 11, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord( 9, 11, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord( 9, 10, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord( 9, 9, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(10, 9, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(11, 9, 11), Vec3s(3.0, 3.0, 3.0)); acc.setValue(openvdb::Coord(10, 10, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(11, 10, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(11, 11, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(10, 11, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord( 9, 11, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord( 9, 10, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord( 9, 9, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(10, 9, 9), Vec3s(4.0, 4.0, 4.0)); acc.setValue(openvdb::Coord(11, 9, 9), Vec3s(4.0, 4.0, 4.0)); openvdb::tools::GridSampler<AccessorType, openvdb::tools::BoxSampler> interpolator(acc, grid.transform()); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.375f))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.0f))); val = interpolator.sampleVoxel(11.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.0f))); val = interpolator.sampleVoxel(11.0, 11.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.0f))); val = interpolator.sampleVoxel(11.0, 11.0, 11.0); EXPECT_TRUE(val.eq(Vec3s(3.0f))); val = interpolator.sampleVoxel(9.0, 11.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.0f))); val = interpolator.sampleVoxel(9.0, 10.0, 9.0); EXPECT_TRUE(val.eq(Vec3s(4.0f))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(1.1f))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.792f))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.71f))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(2.01f))); } template<typename GridType> void TestLinearInterp::testConstantValues() { typedef typename GridType::TreeType TreeType; float fillValue = 256.0f; GridType grid(fillValue); TreeType& tree = grid.tree(); // Add values to buffer zero. tree.setValue(openvdb::Coord(10, 10, 10), 2.0); tree.setValue(openvdb::Coord(11, 10, 10), 2.0); tree.setValue(openvdb::Coord(11, 11, 10), 2.0); tree.setValue(openvdb::Coord(10, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 11, 10), 2.0); tree.setValue(openvdb::Coord( 9, 10, 10), 2.0); tree.setValue(openvdb::Coord( 9, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 9, 10), 2.0); tree.setValue(openvdb::Coord(11, 9, 10), 2.0); tree.setValue(openvdb::Coord(10, 10, 11), 2.0); tree.setValue(openvdb::Coord(11, 10, 11), 2.0); tree.setValue(openvdb::Coord(11, 11, 11), 2.0); tree.setValue(openvdb::Coord(10, 11, 11), 2.0); tree.setValue(openvdb::Coord( 9, 11, 11), 2.0); tree.setValue(openvdb::Coord( 9, 10, 11), 2.0); tree.setValue(openvdb::Coord( 9, 9, 11), 2.0); tree.setValue(openvdb::Coord(10, 9, 11), 2.0); tree.setValue(openvdb::Coord(11, 9, 11), 2.0); tree.setValue(openvdb::Coord(10, 10, 9), 2.0); tree.setValue(openvdb::Coord(11, 10, 9), 2.0); tree.setValue(openvdb::Coord(11, 11, 9), 2.0); tree.setValue(openvdb::Coord(10, 11, 9), 2.0); tree.setValue(openvdb::Coord( 9, 11, 9), 2.0); tree.setValue(openvdb::Coord( 9, 10, 9), 2.0); tree.setValue(openvdb::Coord( 9, 9, 9), 2.0); tree.setValue(openvdb::Coord(10, 9, 9), 2.0); tree.setValue(openvdb::Coord(11, 9, 9), 2.0); openvdb::tools::GridSampler<TreeType, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(2.0, val, TOLERANCE); } TEST_F(TestLinearInterp, testConstantValuesFloat) { testConstantValues<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testConstantValuesDouble) { testConstantValues<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testConstantValuesVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); Vec3STree& tree = grid.tree(); // Add values to buffer zero. tree.setValue(openvdb::Coord(10, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 10, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 10, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 11), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 10, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 10, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 11, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 11, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 11, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 10, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( 9, 9, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(10, 9, 9), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(11, 9, 9), Vec3s(2.0, 2.0, 2.0)); openvdb::tools::GridSampler<Vec3STree, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(2.0, 2.0, 2.0))); } template<typename GridType> void TestLinearInterp::testFillValues() { //typedef typename GridType::TreeType TreeType; float fillValue = 256.0f; GridType grid(fillValue); //typename GridType::TreeType& tree = grid.tree(); openvdb::tools::GridSampler<GridType, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); typename GridType::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_NEAR(256.0, val, TOLERANCE); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_NEAR(256.0, val, TOLERANCE); } TEST_F(TestLinearInterp, testFillValuesFloat) { testFillValues<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testFillValuesDouble) { testFillValues<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testFillValuesVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); //Vec3STree& tree = grid.tree(); openvdb::tools::GridSampler<Vec3SGrid, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(10.5, 10.5, 10.5); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.0, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.1, 10.0, 10.0); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.8, 10.8, 10.8); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.1, 10.8, 10.5); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.8, 10.1, 10.5); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.5, 10.1, 10.8); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); val = interpolator.sampleVoxel(10.5, 10.8, 10.1); EXPECT_TRUE(val.eq(Vec3s(256.0, 256.0, 256.0))); } template<typename GridType> void TestLinearInterp::testNegativeIndices() { typedef typename GridType::TreeType TreeType; float fillValue = 256.0f; GridType grid(fillValue); TreeType& tree = grid.tree(); tree.setValue(openvdb::Coord(-10, -10, -10), 1.0); tree.setValue(openvdb::Coord(-11, -10, -10), 2.0); tree.setValue(openvdb::Coord(-11, -11, -10), 2.0); tree.setValue(openvdb::Coord(-10, -11, -10), 2.0); tree.setValue(openvdb::Coord( -9, -11, -10), 2.0); tree.setValue(openvdb::Coord( -9, -10, -10), 2.0); tree.setValue(openvdb::Coord( -9, -9, -10), 2.0); tree.setValue(openvdb::Coord(-10, -9, -10), 2.0); tree.setValue(openvdb::Coord(-11, -9, -10), 2.0); tree.setValue(openvdb::Coord(-10, -10, -11), 3.0); tree.setValue(openvdb::Coord(-11, -10, -11), 3.0); tree.setValue(openvdb::Coord(-11, -11, -11), 3.0); tree.setValue(openvdb::Coord(-10, -11, -11), 3.0); tree.setValue(openvdb::Coord( -9, -11, -11), 3.0); tree.setValue(openvdb::Coord( -9, -10, -11), 3.0); tree.setValue(openvdb::Coord( -9, -9, -11), 3.0); tree.setValue(openvdb::Coord(-10, -9, -11), 3.0); tree.setValue(openvdb::Coord(-11, -9, -11), 3.0); tree.setValue(openvdb::Coord(-10, -10, -9), 4.0); tree.setValue(openvdb::Coord(-11, -10, -9), 4.0); tree.setValue(openvdb::Coord(-11, -11, -9), 4.0); tree.setValue(openvdb::Coord(-10, -11, -9), 4.0); tree.setValue(openvdb::Coord( -9, -11, -9), 4.0); tree.setValue(openvdb::Coord( -9, -10, -9), 4.0); tree.setValue(openvdb::Coord( -9, -9, -9), 4.0); tree.setValue(openvdb::Coord(-10, -9, -9), 4.0); tree.setValue(openvdb::Coord(-11, -9, -9), 4.0); //openvdb::tools::LinearInterp<GridType> interpolator(*tree); openvdb::tools::GridSampler<TreeType, openvdb::tools::BoxSampler> interpolator(grid); typename GridType::ValueType val = interpolator.sampleVoxel(-10.5, -10.5, -10.5); EXPECT_NEAR(2.375, val, TOLERANCE); val = interpolator.sampleVoxel(-10.0, -10.0, -10.0); EXPECT_NEAR(1.0, val, TOLERANCE); val = interpolator.sampleVoxel(-11.0, -10.0, -10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(-11.0, -11.0, -10.0); EXPECT_NEAR(2.0, val, TOLERANCE); val = interpolator.sampleVoxel(-11.0, -11.0, -11.0); EXPECT_NEAR(3.0, val, TOLERANCE); val = interpolator.sampleVoxel(-9.0, -11.0, -9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(-9.0, -10.0, -9.0); EXPECT_NEAR(4.0, val, TOLERANCE); val = interpolator.sampleVoxel(-10.1, -10.0, -10.0); EXPECT_NEAR(1.1, val, TOLERANCE); val = interpolator.sampleVoxel(-10.8, -10.8, -10.8); EXPECT_NEAR(2.792, val, TOLERANCE); val = interpolator.sampleVoxel(-10.1, -10.8, -10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(-10.8, -10.1, -10.5); EXPECT_NEAR(2.41, val, TOLERANCE); val = interpolator.sampleVoxel(-10.5, -10.1, -10.8); EXPECT_NEAR(2.71, val, TOLERANCE); val = interpolator.sampleVoxel(-10.5, -10.8, -10.1); EXPECT_NEAR(2.01, val, TOLERANCE); } TEST_F(TestLinearInterp, testNegativeIndicesFloat) { testNegativeIndices<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testNegativeIndicesDouble) { testNegativeIndices<openvdb::DoubleGrid>(); } TEST_F(TestLinearInterp, testNegativeIndicesVec3S) { using namespace openvdb; Vec3s fillValue = Vec3s(256.0f, 256.0f, 256.0f); Vec3SGrid grid(fillValue); Vec3STree& tree = grid.tree(); tree.setValue(openvdb::Coord(-10, -10, -10), Vec3s(1.0, 1.0, 1.0)); tree.setValue(openvdb::Coord(-11, -10, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-11, -11, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-10, -11, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( -9, -11, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( -9, -10, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord( -9, -9, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-10, -9, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-11, -9, -10), Vec3s(2.0, 2.0, 2.0)); tree.setValue(openvdb::Coord(-10, -10, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-11, -10, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-11, -11, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-10, -11, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( -9, -11, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( -9, -10, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord( -9, -9, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-10, -9, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-11, -9, -11), Vec3s(3.0, 3.0, 3.0)); tree.setValue(openvdb::Coord(-10, -10, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-11, -10, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-11, -11, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-10, -11, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( -9, -11, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( -9, -10, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord( -9, -9, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-10, -9, -9), Vec3s(4.0, 4.0, 4.0)); tree.setValue(openvdb::Coord(-11, -9, -9), Vec3s(4.0, 4.0, 4.0)); openvdb::tools::GridSampler<Vec3SGrid, openvdb::tools::BoxSampler> interpolator(grid); //openvdb::tools::LinearInterp<Vec3STree> interpolator(*tree); Vec3SGrid::ValueType val = interpolator.sampleVoxel(-10.5, -10.5, -10.5); EXPECT_TRUE(val.eq(Vec3s(2.375f))); val = interpolator.sampleVoxel(-10.0, -10.0, -10.0); EXPECT_TRUE(val.eq(Vec3s(1.0f))); val = interpolator.sampleVoxel(-11.0, -10.0, -10.0); EXPECT_TRUE(val.eq(Vec3s(2.0f))); val = interpolator.sampleVoxel(-11.0, -11.0, -10.0); EXPECT_TRUE(val.eq(Vec3s(2.0f))); val = interpolator.sampleVoxel(-11.0, -11.0, -11.0); EXPECT_TRUE(val.eq(Vec3s(3.0f))); val = interpolator.sampleVoxel(-9.0, -11.0, -9.0); EXPECT_TRUE(val.eq(Vec3s(4.0f))); val = interpolator.sampleVoxel(-9.0, -10.0, -9.0); EXPECT_TRUE(val.eq(Vec3s(4.0f))); val = interpolator.sampleVoxel(-10.1, -10.0, -10.0); EXPECT_TRUE(val.eq(Vec3s(1.1f))); val = interpolator.sampleVoxel(-10.8, -10.8, -10.8); EXPECT_TRUE(val.eq(Vec3s(2.792f))); val = interpolator.sampleVoxel(-10.1, -10.8, -10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(-10.8, -10.1, -10.5); EXPECT_TRUE(val.eq(Vec3s(2.41f))); val = interpolator.sampleVoxel(-10.5, -10.1, -10.8); EXPECT_TRUE(val.eq(Vec3s(2.71f))); val = interpolator.sampleVoxel(-10.5, -10.8, -10.1); EXPECT_TRUE(val.eq(Vec3s(2.01f))); } template<typename GridType> void TestLinearInterp::testStencilsMatch() { typedef typename GridType::ValueType ValueType; GridType grid; typename GridType::TreeType& tree = grid.tree(); // using mostly recurring numbers tree.setValue(openvdb::Coord(0, 0, 0), ValueType(1.0/3.0)); tree.setValue(openvdb::Coord(0, 1, 0), ValueType(1.0/11.0)); tree.setValue(openvdb::Coord(0, 0, 1), ValueType(1.0/81.0)); tree.setValue(openvdb::Coord(1, 0, 0), ValueType(1.0/97.0)); tree.setValue(openvdb::Coord(1, 1, 0), ValueType(1.0/61.0)); tree.setValue(openvdb::Coord(0, 1, 1), ValueType(9.0/7.0)); tree.setValue(openvdb::Coord(1, 0, 1), ValueType(9.0/11.0)); tree.setValue(openvdb::Coord(1, 1, 1), ValueType(22.0/7.0)); const openvdb::Vec3f pos(7.0f/12.0f, 1.0f/3.0f, 2.0f/3.0f); {//using BoxSampler and BoxStencil openvdb::tools::GridSampler<GridType, openvdb::tools::BoxSampler> interpolator(grid); openvdb::math::BoxStencil<const GridType> stencil(grid); typename GridType::ValueType val1 = interpolator.sampleVoxel(pos.x(), pos.y(), pos.z()); stencil.moveTo(pos); typename GridType::ValueType val2 = stencil.interpolation(pos); EXPECT_EQ(val1, val2); } } TEST_F(TestLinearInterp, testStencilsMatchFloat) { testStencilsMatch<openvdb::FloatGrid>(); } TEST_F(TestLinearInterp, testStencilsMatchDouble) { testStencilsMatch<openvdb::DoubleGrid>(); }
39,428
C++
37.022179
99
0.622908
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLeaf.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/tree/LeafNode.h> #include <openvdb/math/Math.h>// for math::Random01(), math::Pow3() class TestLeaf: public ::testing::Test { public: void testBuffer(); void testGetValue(); }; typedef openvdb::tree::LeafNode<int, 3> LeafType; typedef LeafType::Buffer BufferType; using openvdb::Index; void TestLeaf::testBuffer() { {// access BufferType buf; for (Index i = 0; i < BufferType::size(); ++i) { buf.mData[i] = i; EXPECT_TRUE(buf[i] == buf.mData[i]); } for (Index i = 0; i < BufferType::size(); ++i) { buf[i] = i; EXPECT_EQ(int(i), buf[i]); } } {// swap BufferType buf0, buf1, buf2; int *buf0Data = buf0.mData; int *buf1Data = buf1.mData; for (Index i = 0; i < BufferType::size(); ++i) { buf0[i] = i; buf1[i] = i * 2; } buf0.swap(buf1); EXPECT_TRUE(buf0.mData == buf1Data); EXPECT_TRUE(buf1.mData == buf0Data); buf1.swap(buf0); EXPECT_TRUE(buf0.mData == buf0Data); EXPECT_TRUE(buf1.mData == buf1Data); buf0.swap(buf2); EXPECT_TRUE(buf2.mData == buf0Data); buf2.swap(buf0); EXPECT_TRUE(buf0.mData == buf0Data); } } TEST_F(TestLeaf, testBuffer) { testBuffer(); } void TestLeaf::testGetValue() { LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.mBuffer[0] = 2; leaf.mBuffer[1] = 3; leaf.mBuffer[2] = 4; leaf.mBuffer[65] = 10; EXPECT_EQ(2, leaf.getValue(openvdb::Coord(0, 0, 0))); EXPECT_EQ(3, leaf.getValue(openvdb::Coord(0, 0, 1))); EXPECT_EQ(4, leaf.getValue(openvdb::Coord(0, 0, 2))); EXPECT_EQ(10, leaf.getValue(openvdb::Coord(1, 0, 1))); } TEST_F(TestLeaf, testGetValue) { testGetValue(); } TEST_F(TestLeaf, testSetValue) { LeafType leaf(openvdb::Coord(0, 0, 0), 3); openvdb::Coord xyz(0, 0, 0); leaf.setValueOn(xyz, 10); EXPECT_EQ(10, leaf.getValue(xyz)); xyz.reset(7, 7, 7); leaf.setValueOn(xyz, 7); EXPECT_EQ(7, leaf.getValue(xyz)); leaf.setValueOnly(xyz, 10); EXPECT_EQ(10, leaf.getValue(xyz)); xyz.reset(2, 3, 6); leaf.setValueOn(xyz, 236); EXPECT_EQ(236, leaf.getValue(xyz)); leaf.setValueOff(xyz, 1); EXPECT_EQ(1, leaf.getValue(xyz)); EXPECT_TRUE(!leaf.isValueOn(xyz)); } TEST_F(TestLeaf, testIsValueSet) { LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.setValueOn(openvdb::Coord(1, 5, 7), 10); EXPECT_TRUE(leaf.isValueOn(openvdb::Coord(1, 5, 7))); EXPECT_TRUE(!leaf.isValueOn(openvdb::Coord(0, 5, 7))); EXPECT_TRUE(!leaf.isValueOn(openvdb::Coord(1, 6, 7))); EXPECT_TRUE(!leaf.isValueOn(openvdb::Coord(0, 5, 6))); } TEST_F(TestLeaf, testProbeValue) { LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.setValueOn(openvdb::Coord(1, 6, 5), 10); LeafType::ValueType val; EXPECT_TRUE(leaf.probeValue(openvdb::Coord(1, 6, 5), val)); EXPECT_TRUE(!leaf.probeValue(openvdb::Coord(1, 6, 4), val)); } TEST_F(TestLeaf, testIterators) { LeafType leaf(openvdb::Coord(0, 0, 0), 2); leaf.setValueOn(openvdb::Coord(1, 2, 3), -3); leaf.setValueOn(openvdb::Coord(5, 2, 3), 4); LeafType::ValueType sum = 0; for (LeafType::ValueOnIter iter = leaf.beginValueOn(); iter; ++iter) sum += *iter; EXPECT_EQ((-3 + 4), sum); } TEST_F(TestLeaf, testEquivalence) { LeafType leaf( openvdb::Coord(0, 0, 0), 2); LeafType leaf2(openvdb::Coord(0, 0, 0), 3); EXPECT_TRUE(leaf != leaf2); for(openvdb::Index32 i = 0; i < LeafType::size(); ++i) { leaf.setValueOnly(i, i); leaf2.setValueOnly(i, i); } EXPECT_TRUE(leaf == leaf2); // set some values. leaf.setValueOn(openvdb::Coord(0, 0, 0), 1); leaf.setValueOn(openvdb::Coord(0, 1, 0), 1); leaf.setValueOn(openvdb::Coord(1, 1, 0), 1); leaf.setValueOn(openvdb::Coord(1, 1, 2), 1); leaf2.setValueOn(openvdb::Coord(0, 0, 0), 1); leaf2.setValueOn(openvdb::Coord(0, 1, 0), 1); leaf2.setValueOn(openvdb::Coord(1, 1, 0), 1); leaf2.setValueOn(openvdb::Coord(1, 1, 2), 1); EXPECT_TRUE(leaf == leaf2); leaf2.setValueOn(openvdb::Coord(0, 0, 1), 1); EXPECT_TRUE(leaf != leaf2); leaf2.setValueOff(openvdb::Coord(0, 0, 1), 1); EXPECT_TRUE(leaf == leaf2); } TEST_F(TestLeaf, testGetOrigin) { { LeafType leaf(openvdb::Coord(1, 0, 0), 1); EXPECT_EQ(openvdb::Coord(0, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(0, 0, 0), 1); EXPECT_EQ(openvdb::Coord(0, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(8, 0, 0), 1); EXPECT_EQ(openvdb::Coord(8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(8, 1, 0), 1); EXPECT_EQ(openvdb::Coord(8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(1024, 1, 3), 1); EXPECT_EQ(openvdb::Coord(128*8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(1023, 1, 3), 1); EXPECT_EQ(openvdb::Coord(127*8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(512, 512, 512), 1); EXPECT_EQ(openvdb::Coord(512, 512, 512), leaf.origin()); } { LeafType leaf(openvdb::Coord(2, 52, 515), 1); EXPECT_EQ(openvdb::Coord(0, 48, 512), leaf.origin()); } } TEST_F(TestLeaf, testIteratorGetCoord) { using namespace openvdb; LeafType leaf(openvdb::Coord(8, 8, 0), 2); EXPECT_EQ(Coord(8, 8, 0), leaf.origin()); leaf.setValueOn(Coord(1, 2, 3), -3); leaf.setValueOn(Coord(5, 2, 3), 4); LeafType::ValueOnIter iter = leaf.beginValueOn(); Coord xyz = iter.getCoord(); EXPECT_EQ(Coord(9, 10, 3), xyz); ++iter; xyz = iter.getCoord(); EXPECT_EQ(Coord(13, 10, 3), xyz); } TEST_F(TestLeaf, testNegativeIndexing) { using namespace openvdb; LeafType leaf(openvdb::Coord(-9, -2, -8), 1); EXPECT_EQ(Coord(-16, -8, -8), leaf.origin()); leaf.setValueOn(Coord(1, 2, 3), -3); leaf.setValueOn(Coord(5, 2, 3), 4); EXPECT_EQ(-3, leaf.getValue(Coord(1, 2, 3))); EXPECT_EQ(4, leaf.getValue(Coord(5, 2, 3))); LeafType::ValueOnIter iter = leaf.beginValueOn(); Coord xyz = iter.getCoord(); EXPECT_EQ(Coord(-15, -6, -5), xyz); ++iter; xyz = iter.getCoord(); EXPECT_EQ(Coord(-11, -6, -5), xyz); } TEST_F(TestLeaf, testIsConstant) { using namespace openvdb; const Coord origin(-9, -2, -8); {// check old version (v3.0 and older) with float // Acceptable range: first-value +/- tolerance const float val = 1.0f, tol = 0.01f; tree::LeafNode<float, 3> leaf(origin, val, true); float v = 0.0f; bool stat = false; EXPECT_TRUE(leaf.isConstant(v, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val, v); leaf.setValueOff(0); EXPECT_TRUE(!leaf.isConstant(v, stat, tol)); leaf.setValueOn(0); EXPECT_TRUE(leaf.isConstant(v, stat, tol)); leaf.setValueOn(0, val + 0.99f*tol); EXPECT_TRUE(leaf.isConstant(v, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val + 0.99f*tol, v); leaf.setValueOn(0, val + 1.01f*tol); EXPECT_TRUE(!leaf.isConstant(v, stat, tol)); } {// check old version (v3.0 and older) with double // Acceptable range: first-value +/- tolerance const double val = 1.0, tol = 0.00001; tree::LeafNode<double, 3> leaf(origin, val, true); double v = 0.0; bool stat = false; EXPECT_TRUE(leaf.isConstant(v, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val, v); leaf.setValueOff(0); EXPECT_TRUE(!leaf.isConstant(v, stat, tol)); leaf.setValueOn(0); EXPECT_TRUE(leaf.isConstant(v, stat, tol)); leaf.setValueOn(0, val + 0.99*tol); EXPECT_TRUE(leaf.isConstant(v, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val + 0.99*tol, v); leaf.setValueOn(0, val + 1.01*tol); EXPECT_TRUE(!leaf.isConstant(v, stat, tol)); } {// check newer version (v3.2 and newer) with float // Acceptable range: max - min <= tolerance const float val = 1.0, tol = 0.01f; tree::LeafNode<float, 3> leaf(origin, val, true); float vmin = 0.0f, vmax = 0.0f; bool stat = false; EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val, vmin); EXPECT_EQ(val, vmax); leaf.setValueOff(0); EXPECT_TRUE(!leaf.isConstant(vmin, vmax, stat, tol)); leaf.setValueOn(0); EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); leaf.setValueOn(0, val + tol); EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_EQ(val, vmin); EXPECT_EQ(val + tol, vmax); leaf.setValueOn(0, val + 1.01f*tol); EXPECT_TRUE(!leaf.isConstant(vmin, vmax, stat, tol)); } {// check newer version (v3.2 and newer) with double // Acceptable range: (max- min) <= tolerance const double val = 1.0, tol = 0.000001; tree::LeafNode<double, 3> leaf(origin, val, true); double vmin = 0.0, vmax = 0.0; bool stat = false; EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_TRUE(stat); EXPECT_EQ(val, vmin); EXPECT_EQ(val, vmax); leaf.setValueOff(0); EXPECT_TRUE(!leaf.isConstant(vmin, vmax, stat, tol)); leaf.setValueOn(0); EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); leaf.setValueOn(0, val + tol); EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_EQ(val, vmin); EXPECT_EQ(val + tol, vmax); leaf.setValueOn(0, val + 1.01*tol); EXPECT_TRUE(!leaf.isConstant(vmin, vmax, stat, tol)); } {// check newer version (v3.2 and newer) with float and random values typedef tree::LeafNode<float,3> LeafNodeT; const float val = 1.0, tol = 1.0f; LeafNodeT leaf(origin, val, true); float min = 2.0f, max = -min; math::Random01 r(145);// random values in the range [0,1] for (Index i=0; i<LeafNodeT::NUM_VALUES; ++i) { const float v = float(r()); if (v < min) min = v; if (v > max) max = v; leaf.setValueOnly(i, v); } float vmin = 0.0f, vmax = 0.0f; bool stat = false; EXPECT_TRUE(leaf.isConstant(vmin, vmax, stat, tol)); EXPECT_TRUE(stat); EXPECT_TRUE(math::isApproxEqual(min, vmin)); EXPECT_TRUE(math::isApproxEqual(max, vmax)); } } TEST_F(TestLeaf, testMedian) { using namespace openvdb; const Coord origin(-9, -2, -8); std::vector<float> v{5, 6, 4, 3, 2, 6, 7, 9, 3}; tree::LeafNode<float, 3> leaf(origin, 1.0f, false); float val = 0.0f; EXPECT_EQ(Index(0), leaf.medianOn(val)); EXPECT_EQ(0.0f, val); EXPECT_EQ(leaf.numValues(), leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(0,0,0), v[0]); EXPECT_EQ(Index(1), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-1, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(0,0,1), v[1]); EXPECT_EQ(Index(2), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-2, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(0,2,1), v[2]); EXPECT_EQ(Index(3), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-3, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(1,2,1), v[3]); EXPECT_EQ(Index(4), leaf.medianOn(val)); EXPECT_EQ(v[2], val); EXPECT_EQ(leaf.numValues()-4, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(1,2,3), v[4]); EXPECT_EQ(Index(5), leaf.medianOn(val)); EXPECT_EQ(v[2], val); EXPECT_EQ(leaf.numValues()-5, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(2,2,1), v[5]); EXPECT_EQ(Index(6), leaf.medianOn(val)); EXPECT_EQ(v[2], val); EXPECT_EQ(leaf.numValues()-6, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(2,4,1), v[6]); EXPECT_EQ(Index(7), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-7, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(2,6,1), v[7]); EXPECT_EQ(Index(8), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-8, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.setValue(Coord(7,2,1), v[8]); EXPECT_EQ(Index(9), leaf.medianOn(val)); EXPECT_EQ(v[0], val); EXPECT_EQ(leaf.numValues()-9, leaf.medianOff(val)); EXPECT_EQ(1.0f, val); EXPECT_EQ(1.0f, leaf.medianAll()); leaf.fill(2.0f, true); EXPECT_EQ(leaf.numValues(), leaf.medianOn(val)); EXPECT_EQ(2.0f, val); EXPECT_EQ(Index(0), leaf.medianOff(val)); EXPECT_EQ(2.0f, val); EXPECT_EQ(2.0f, leaf.medianAll()); } TEST_F(TestLeaf, testFill) { using namespace openvdb; const Coord origin(-9, -2, -8); const float bg = 0.0f, fg = 1.0f; tree::LeafNode<float, 3> leaf(origin, bg, false); const int bboxDim = 1 + int(leaf.dim() >> 1); auto bbox = CoordBBox::createCube(leaf.origin(), bboxDim); EXPECT_EQ(math::Pow3(bboxDim), int(bbox.volume())); bbox = leaf.getNodeBoundingBox(); leaf.fill(bbox, bg, false); EXPECT_TRUE(leaf.isEmpty()); leaf.fill(bbox, fg, true); EXPECT_TRUE(leaf.isDense()); leaf.fill(bbox, bg, false); EXPECT_TRUE(leaf.isEmpty()); // Fill a region that is larger than the node but that doesn't completely enclose it. bbox.max() = bbox.min() + (bbox.dim() >> 1); bbox.expand(bbox.min() - Coord{10}); leaf.fill(bbox, fg, true); // Verify that fill() correctly clips the fill region to the node. auto clippedBBox = leaf.getNodeBoundingBox(); clippedBBox.intersect(bbox); EXPECT_EQ(int(clippedBBox.volume()), int(leaf.onVoxelCount())); } TEST_F(TestLeaf, testCount) { using namespace openvdb; const Coord origin(-9, -2, -8); tree::LeafNode<float, 3> leaf(origin, 1.0f, false); EXPECT_EQ(Index(3), leaf.log2dim()); EXPECT_EQ(Index(8), leaf.dim()); EXPECT_EQ(Index(512), leaf.size()); EXPECT_EQ(Index(512), leaf.numValues()); EXPECT_EQ(Index(0), leaf.getLevel()); EXPECT_EQ(Index(1), leaf.getChildDim()); EXPECT_EQ(Index(1), leaf.leafCount()); EXPECT_EQ(Index(0), leaf.nonLeafCount()); EXPECT_EQ(Index(0), leaf.childCount()); std::vector<Index> dims; leaf.getNodeLog2Dims(dims); EXPECT_EQ(size_t(1), dims.size()); EXPECT_EQ(Index(3), dims[0]); }
15,258
C++
28.344231
89
0.585463
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestQuantizedUnitVec.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/math/QuantizedUnitVec.h> #include <openvdb/math/Math.h> #include <openvdb/math/Vec3.h> #include <sstream> #include <algorithm> #include <cmath> #include <ctime> class TestQuantizedUnitVec: public ::testing::Test { protected: // Generate a random number in the range [0, 1]. double randNumber() { return double(rand()) / (double(RAND_MAX) + 1.0); } }; //////////////////////////////////////// namespace { const uint16_t MASK_XSIGN = 0x8000, // 1000000000000000 MASK_YSIGN = 0x4000, // 0100000000000000 MASK_ZSIGN = 0x2000; // 0010000000000000 } //////////////////////////////////////// TEST_F(TestQuantizedUnitVec, testQuantization) { using namespace openvdb; using namespace openvdb::math; // // Check sign bits // Vec3s unitVec = Vec3s(-1.0, -1.0, -1.0); unitVec.normalize(); uint16_t quantizedVec = QuantizedUnitVec::pack(unitVec); EXPECT_TRUE((quantizedVec & MASK_XSIGN)); EXPECT_TRUE((quantizedVec & MASK_YSIGN)); EXPECT_TRUE((quantizedVec & MASK_ZSIGN)); unitVec[0] = -unitVec[0]; unitVec[2] = -unitVec[2]; quantizedVec = QuantizedUnitVec::pack(unitVec); EXPECT_TRUE(!(quantizedVec & MASK_XSIGN)); EXPECT_TRUE((quantizedVec & MASK_YSIGN)); EXPECT_TRUE(!(quantizedVec & MASK_ZSIGN)); unitVec[1] = -unitVec[1]; quantizedVec = QuantizedUnitVec::pack(unitVec); EXPECT_TRUE(!(quantizedVec & MASK_XSIGN)); EXPECT_TRUE(!(quantizedVec & MASK_YSIGN)); EXPECT_TRUE(!(quantizedVec & MASK_ZSIGN)); QuantizedUnitVec::flipSignBits(quantizedVec); EXPECT_TRUE((quantizedVec & MASK_XSIGN)); EXPECT_TRUE((quantizedVec & MASK_YSIGN)); EXPECT_TRUE((quantizedVec & MASK_ZSIGN)); unitVec[2] = -unitVec[2]; quantizedVec = QuantizedUnitVec::pack(unitVec); QuantizedUnitVec::flipSignBits(quantizedVec); EXPECT_TRUE((quantizedVec & MASK_XSIGN)); EXPECT_TRUE((quantizedVec & MASK_YSIGN)); EXPECT_TRUE(!(quantizedVec & MASK_ZSIGN)); // // Check conversion error // const double tol = 0.05; // component error tolerance const int numNormals = 40000; // init srand(0); const int n = int(std::sqrt(double(numNormals))); const double xScale = (2.0 * M_PI) / double(n); const double yScale = M_PI / double(n); double x, y, theta, phi; Vec3s n0, n1; // generate random normals, by uniformly distributing points on a unit-sphere. // loop over a [0 to n) x [0 to n) grid. for (int a = 0; a < n; ++a) { for (int b = 0; b < n; ++b) { // jitter, move to random pos. inside the current cell x = double(a) + randNumber(); y = double(b) + randNumber(); // remap to a lat/long map theta = y * yScale; // [0 to PI] phi = x * xScale; // [0 to 2PI] // convert to cartesian coordinates on a unit sphere. // spherical coordinate triplet (r=1, theta, phi) n0[0] = float(std::sin(theta)*std::cos(phi)); n0[1] = float(std::sin(theta)*std::sin(phi)); n0[2] = float(std::cos(theta)); EXPECT_NEAR(1.0, n0.length(), 1e-6); n1 = QuantizedUnitVec::unpack(QuantizedUnitVec::pack(n0)); EXPECT_NEAR(1.0, n1.length(), 1e-6); EXPECT_NEAR(n0[0], n1[0], tol); EXPECT_NEAR(n0[1], n1[1], tol); EXPECT_NEAR(n0[2], n1[2], tol); float sumDiff = std::abs(n0[0] - n1[0]) + std::abs(n0[1] - n1[1]) + std::abs(n0[2] - n1[2]); EXPECT_TRUE(sumDiff < (2.0 * tol)); } } }
3,772
C++
26.540146
82
0.587487
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestAttributeArray.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/AttributeArray.h> #include <openvdb/points/AttributeSet.h> #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <openvdb/io/File.h> #ifdef __clang__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-macros" #endif // Boost.Interprocess uses a header-only portion of Boost.DateTime #define BOOST_DATE_TIME_NO_LIB #ifdef __clang__ #pragma GCC diagnostic pop #endif #include <boost/interprocess/file_mapping.hpp> #include <boost/interprocess/mapped_region.hpp> #include <tbb/tick_count.h> #include <tbb/atomic.h> #include <cstdio> // for std::remove() #include <fstream> #include <sstream> #include <iostream> #ifdef _MSC_VER #include <boost/interprocess/detail/os_file_functions.hpp> // open_existing_file(), close_file() // boost::interprocess::detail was renamed to boost::interprocess::ipcdetail in Boost 1.48. // Ensure that both namespaces exist. namespace boost { namespace interprocess { namespace detail {} namespace ipcdetail {} } } #include <windows.h> #else #include <sys/types.h> // for struct stat #include <sys/stat.h> // for stat() #endif /// @brief io::MappedFile has a private constructor, so declare a class that acts as the friend class TestMappedFile { public: static openvdb::io::MappedFile::Ptr create(const std::string& filename) { return openvdb::SharedPtr<openvdb::io::MappedFile>(new openvdb::io::MappedFile(filename)); } }; /// @brief Functionality similar to openvdb::util::CpuTimer except with prefix padding and no decimals. /// /// @code /// ProfileTimer timer("algorithm 1"); /// // code to be timed goes here /// timer.stop(); /// @endcode class ProfileTimer { public: /// @brief Prints message and starts timer. /// /// @note Should normally be followed by a call to stop() ProfileTimer(const std::string& msg) { (void)msg; #ifdef PROFILE // padd string to 50 characters std::string newMsg(msg); if (newMsg.size() < 50) newMsg.insert(newMsg.end(), 50 - newMsg.size(), ' '); std::cerr << newMsg << " ... "; #endif mT0 = tbb::tick_count::now(); } ~ProfileTimer() { this->stop(); } /// Return Time diference in milliseconds since construction or start was called. inline double delta() const { tbb::tick_count::interval_t dt = tbb::tick_count::now() - mT0; return 1000.0*dt.seconds(); } /// @brief Print time in milliseconds since construction or start was called. inline void stop() const { #ifdef PROFILE std::stringstream ss; ss << std::setw(6) << ::round(this->delta()); std::cerr << "completed in " << ss.str() << " ms\n"; #endif } private: tbb::tick_count mT0; };// ProfileTimer struct ScopedFile { explicit ScopedFile(const std::string& s): pathname(s) {} ~ScopedFile() { if (!pathname.empty()) std::remove(pathname.c_str()); } const std::string pathname; }; using namespace openvdb; using namespace openvdb::points; class TestAttributeArray: public ::testing::Test { public: void SetUp() override { AttributeArray::clearRegistry(); } void TearDown() override { AttributeArray::clearRegistry(); } void testRegistry(); void testAccessorEval(); void testDelayedLoad(); }; // class TestAttributeArray //////////////////////////////////////// namespace { bool matchingNamePairs(const openvdb::NamePair& lhs, const openvdb::NamePair& rhs) { if (lhs.first != rhs.first) return false; if (lhs.second != rhs.second) return false; return true; } } // namespace //////////////////////////////////////// TEST_F(TestAttributeArray, testFixedPointConversion) { openvdb::math::Transform::Ptr transform(openvdb::math::Transform::createLinearTransform(/*voxelSize=*/0.1)); const float value = 33.5688040469035f; { // convert to fixed-point value const openvdb::Vec3f worldSpaceValue(value); const openvdb::Vec3f indexSpaceValue = transform->worldToIndex(worldSpaceValue); const float voxelSpaceValue = indexSpaceValue.x() - math::Round(indexSpaceValue.x()) + 0.5f; const uint32_t intValue = floatingPointToFixedPoint<uint32_t>(voxelSpaceValue); // convert back to floating-point value const float newVoxelSpaceValue = fixedPointToFloatingPoint<float>(intValue); const openvdb::Vec3f newIndexSpaceValue(newVoxelSpaceValue + math::Round(indexSpaceValue.x()) - 0.5f); const openvdb::Vec3f newWorldSpaceValue = transform->indexToWorld(newIndexSpaceValue); const float newValue = newWorldSpaceValue.x(); EXPECT_NEAR(value, newValue, /*tolerance=*/1e-6); } { // convert to fixed-point value (vector) const openvdb::Vec3f worldSpaceValue(value, value+1, value+2); const openvdb::Vec3f indexSpaceValue = transform->worldToIndex(worldSpaceValue); const float voxelSpaceValueX = indexSpaceValue.x() - math::Round(indexSpaceValue.x()) + 0.5f; const float voxelSpaceValueY = indexSpaceValue.y() - math::Round(indexSpaceValue.y()) + 0.5f; const float voxelSpaceValueZ = indexSpaceValue.z() - math::Round(indexSpaceValue.z()) + 0.5f; const openvdb::Vec3f voxelSpaceValue(voxelSpaceValueX, voxelSpaceValueY, voxelSpaceValueZ); const openvdb::math::Vec3<uint32_t> intValue = floatingPointToFixedPoint<openvdb::math::Vec3<uint32_t>>(voxelSpaceValue); // convert back to floating-point value (vector) const openvdb::Vec3f newVoxelSpaceValue = fixedPointToFloatingPoint<openvdb::Vec3f>(intValue); const float newIndexSpaceValueX = newVoxelSpaceValue.x() + math::Round(indexSpaceValue.x()) - 0.5f; const float newIndexSpaceValueY = newVoxelSpaceValue.y() + math::Round(indexSpaceValue.y()) - 0.5f; const float newIndexSpaceValueZ = newVoxelSpaceValue.z() + math::Round(indexSpaceValue.z()) - 0.5f; const openvdb::Vec3f newIndexSpaceValue(newIndexSpaceValueX, newIndexSpaceValueY, newIndexSpaceValueZ); const openvdb::Vec3f newWorldSpaceValue = transform->indexToWorld(newIndexSpaceValue); EXPECT_NEAR(worldSpaceValue.x(), newWorldSpaceValue.x(), /*tolerance=*/1e-6); EXPECT_NEAR(worldSpaceValue.y(), newWorldSpaceValue.y(), /*tolerance=*/1e-6); EXPECT_NEAR(worldSpaceValue.z(), newWorldSpaceValue.z(), /*tolerance=*/1e-6); } } namespace { // use a dummy factory as TypedAttributeArray::factory is private static AttributeArray::Ptr factoryInt(Index n, Index strideOrTotalSize, bool constantStride, const Metadata*) { return TypedAttributeArray<int>::create(n, strideOrTotalSize, constantStride); } } // namespace void TestAttributeArray::testRegistry() { using AttributeF = TypedAttributeArray<float>; using AttributeFTrnc = TypedAttributeArray<float, TruncateCodec>; AttributeArray::clearRegistry(); { // cannot create AttributeArray that is not registered EXPECT_TRUE(!AttributeArray::isRegistered(AttributeF::attributeType())); EXPECT_THROW(AttributeArray::create(AttributeF::attributeType(), Index(5)), LookupError); } { // throw when attempting to register a float type with an integer factory EXPECT_THROW(AttributeArray::registerType( AttributeF::attributeType(), factoryInt), KeyError); } // register the attribute array AttributeF::registerType(); { // can register an AttributeArray with the same value type but different codec EXPECT_NO_THROW(AttributeFTrnc::registerType()); EXPECT_TRUE(AttributeArray::isRegistered(AttributeF::attributeType())); EXPECT_TRUE(AttributeArray::isRegistered(AttributeFTrnc::attributeType())); } { // un-registering AttributeArray::unregisterType(AttributeF::attributeType()); EXPECT_TRUE(!AttributeArray::isRegistered(AttributeF::attributeType())); EXPECT_TRUE(AttributeArray::isRegistered(AttributeFTrnc::attributeType())); } { // clearing registry AttributeF::registerType(); AttributeArray::clearRegistry(); EXPECT_TRUE(!AttributeArray::isRegistered(AttributeF::attributeType())); } } TEST_F(TestAttributeArray, testRegistry) { testRegistry(); } TEST_F(TestAttributeArray, testAttributeArray) { using AttributeArrayF = TypedAttributeArray<float>; using AttributeArrayD = TypedAttributeArray<double>; { AttributeArray::Ptr attr(new AttributeArrayD(50)); EXPECT_EQ(Index(50), attr->size()); } { AttributeArray::Ptr attr(new AttributeArrayD(50)); EXPECT_EQ(Index(50), attr->size()); AttributeArrayD& typedAttr = static_cast<AttributeArrayD&>(*attr); typedAttr.set(0, 0.5); double value = 0.0; typedAttr.get(0, value); EXPECT_NEAR(double(0.5), value, /*tolerance=*/double(0.0)); // test unsafe methods for get() and set() typedAttr.setUnsafe(0, 1.5); typedAttr.getUnsafe(0, value); EXPECT_NEAR(double(1.5), value, /*tolerance=*/double(0.0)); // out-of-range get() and set() EXPECT_THROW(typedAttr.set(100, 0.5), openvdb::IndexError); EXPECT_THROW(typedAttr.set(100, 1), openvdb::IndexError); EXPECT_THROW(typedAttr.get(100, value), openvdb::IndexError); EXPECT_THROW(typedAttr.get(100), openvdb::IndexError); } { // test copy constructor and copy assignment operator AttributeArrayD attr1(10); AttributeArrayD attr2(5); attr1.set(9, 4.6); // copy constructor AttributeArrayD attr3(attr1); EXPECT_EQ(Index(10), attr3.size()); EXPECT_EQ(4.6, attr3.get(9)); // copy assignment operator attr2 = attr1; EXPECT_EQ(Index(10), attr2.size()); EXPECT_EQ(4.6, attr2.get(9)); } #ifdef NDEBUG { // test setUnsafe and getUnsafe on uniform arrays AttributeArrayD::Ptr attr(new AttributeArrayD(50)); EXPECT_EQ(Index(50), attr->size()); attr->collapse(5.0); EXPECT_TRUE(attr->isUniform()); EXPECT_NEAR(attr->getUnsafe(10), 5.0, /*tolerance=*/double(0.0)); EXPECT_TRUE(attr->isUniform()); // this is expected behaviour because for performance reasons, array is not implicitly expanded attr->setUnsafe(10, 15.0); EXPECT_TRUE(attr->isUniform()); EXPECT_NEAR(attr->getUnsafe(5), 15.0, /*tolerance=*/double(0.0)); attr->expand(); EXPECT_TRUE(!attr->isUniform()); attr->setUnsafe(10, 25.0); EXPECT_NEAR(attr->getUnsafe(5), 15.0, /*tolerance=*/double(0.0)); EXPECT_NEAR(attr->getUnsafe(10), 25.0, /*tolerance=*/double(0.0)); } #endif using AttributeArrayC = TypedAttributeArray<double, FixedPointCodec<false>>; { // test hasValueType() AttributeArray::Ptr attrC(new AttributeArrayC(50)); AttributeArray::Ptr attrD(new AttributeArrayD(50)); AttributeArray::Ptr attrF(new AttributeArrayF(50)); EXPECT_TRUE(attrD->hasValueType<double>()); EXPECT_TRUE(attrC->hasValueType<double>()); EXPECT_TRUE(!attrF->hasValueType<double>()); EXPECT_TRUE(!attrD->hasValueType<float>()); EXPECT_TRUE(!attrC->hasValueType<float>()); EXPECT_TRUE(attrF->hasValueType<float>()); } { // lots of type checking #if OPENVDB_ABI_VERSION_NUMBER >= 6 Index size(50); { TypedAttributeArray<bool> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("bool"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(1), attr.valueTypeSize()); EXPECT_EQ(Index(1), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<int8_t> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("int8"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(1), attr.valueTypeSize()); EXPECT_EQ(Index(1), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<int16_t> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("int16"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(2), attr.valueTypeSize()); EXPECT_EQ(Index(2), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<int32_t> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("int32"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(4), attr.valueTypeSize()); EXPECT_EQ(Index(4), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<int64_t> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("int64"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(8), attr.valueTypeSize()); EXPECT_EQ(Index(8), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { // half is not registered by default, but for complete-ness TypedAttributeArray<half> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("half"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(2), attr.valueTypeSize()); EXPECT_EQ(Index(2), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<float> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("float"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(4), attr.valueTypeSize()); EXPECT_EQ(Index(4), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<double> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("double"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(8), attr.valueTypeSize()); EXPECT_EQ(Index(8), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Vec3<int32_t>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("vec3i"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(12), attr.valueTypeSize()); EXPECT_EQ(Index(12), attr.storageTypeSize()); EXPECT_TRUE(!attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Vec3<double>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("vec3d"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(24), attr.valueTypeSize()); EXPECT_EQ(Index(24), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Mat3<float>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("mat3s"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(36), attr.valueTypeSize()); EXPECT_EQ(Index(36), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Mat4<double>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("mat4d"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(128), attr.valueTypeSize()); EXPECT_EQ(Index(128), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(attr.valueTypeIsMatrix()); } { TypedAttributeArray<math::Quat<float>> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("quats"), attr.valueType()); EXPECT_EQ(Name("null"), attr.codecType()); EXPECT_EQ(Index(16), attr.valueTypeSize()); EXPECT_EQ(Index(16), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } { TypedAttributeArray<float, TruncateCodec> typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_EQ(Name("float"), attr.valueType()); EXPECT_EQ(Name("trnc"), attr.codecType()); EXPECT_EQ(Index(4), attr.valueTypeSize()); EXPECT_EQ(Index(2), attr.storageTypeSize()); EXPECT_TRUE(attr.valueTypeIsFloatingPoint()); EXPECT_TRUE(!attr.valueTypeIsClass()); EXPECT_TRUE(!attr.valueTypeIsVector()); EXPECT_TRUE(!attr.valueTypeIsQuaternion()); EXPECT_TRUE(!attr.valueTypeIsMatrix()); } #endif } { AttributeArray::Ptr attr(new AttributeArrayC(50)); AttributeArrayC& typedAttr = static_cast<AttributeArrayC&>(*attr); typedAttr.set(0, 0.5); double value = 0.0; typedAttr.get(0, value); EXPECT_NEAR(double(0.5), value, /*tolerance=*/double(0.0001)); // test unsafe methods for get() and set() double value2 = 0.0; typedAttr.setUnsafe(0, double(0.2)); typedAttr.getUnsafe(0, value2); EXPECT_NEAR(double(0.2), value2, /*tolerance=*/double(0.0001)); } using AttributeArrayI = TypedAttributeArray<int32_t>; { // Base class API AttributeArray::Ptr attr(new AttributeArrayI(50)); EXPECT_EQ(Index(50), attr->size()); EXPECT_EQ((sizeof(AttributeArrayI) + sizeof(int)), attr->memUsage()); EXPECT_TRUE(attr->isType<AttributeArrayI>()); EXPECT_TRUE(!attr->isType<AttributeArrayD>()); EXPECT_TRUE(*attr == *attr); } { // Typed class API const Index count = 50; const size_t uniformMemUsage = sizeof(AttributeArrayI) + sizeof(int); const size_t expandedMemUsage = sizeof(AttributeArrayI) + count * sizeof(int); AttributeArrayI attr(count); EXPECT_EQ(Index(count), attr.size()); EXPECT_EQ(0, attr.get(0)); EXPECT_EQ(0, attr.get(10)); EXPECT_TRUE(attr.isUniform()); EXPECT_EQ(uniformMemUsage, attr.memUsage()); attr.set(0, 10); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(expandedMemUsage, attr.memUsage()); AttributeArrayI attr2(count); attr2.set(0, 10); EXPECT_TRUE(attr == attr2); attr.set(1, 5); EXPECT_TRUE(!attr.compact()); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(10, attr.get(0)); EXPECT_EQ(5, attr.get(1)); EXPECT_EQ(0, attr.get(2)); attr.collapse(5); EXPECT_TRUE(attr.isUniform()); EXPECT_EQ(uniformMemUsage, attr.memUsage()); EXPECT_EQ(5, attr.get(0)); EXPECT_EQ(5, attr.get(20)); EXPECT_EQ(5, attr.getUnsafe(20)); attr.expand(/*fill=*/false); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(expandedMemUsage, attr.memUsage()); attr.collapse(5); EXPECT_TRUE(attr.isUniform()); attr.expand(); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(expandedMemUsage, attr.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, attr.get(i)); } EXPECT_TRUE(attr.compact()); EXPECT_TRUE(attr.isUniform()); EXPECT_TRUE(attr.compact()); attr.expand(); attr.fill(10); EXPECT_TRUE(!attr.isUniform()); EXPECT_EQ(expandedMemUsage, attr.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(10, attr.get(i)); } attr.collapse(7); EXPECT_TRUE(attr.isUniform()); EXPECT_EQ(uniformMemUsage, attr.memUsage()); EXPECT_EQ(7, attr.get(0)); EXPECT_EQ(7, attr.get(20)); attr.fill(5); EXPECT_TRUE(attr.isUniform()); EXPECT_EQ(uniformMemUsage, attr.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, attr.get(i)); } EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); attr.setTransient(true); EXPECT_TRUE(attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); attr.setHidden(true); EXPECT_TRUE(attr.isTransient()); EXPECT_TRUE(attr.isHidden()); attr.setTransient(false); EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(attr.isHidden()); attr.setHidden(false); EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); attr.setHidden(true); { // test copy construction AttributeArrayI attrB(attr); EXPECT_TRUE(matchingNamePairs(attr.type(), attrB.type())); EXPECT_EQ(attr.size(), attrB.size()); EXPECT_EQ(attr.memUsage(), attrB.memUsage()); EXPECT_EQ(attr.isUniform(), attrB.isUniform()); EXPECT_EQ(attr.isTransient(), attrB.isTransient()); EXPECT_EQ(attr.isHidden(), attrB.isHidden()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attr.get(i), attrB.get(i)); EXPECT_EQ(attr.get(i), attrB.getUnsafe(i)); EXPECT_EQ(attr.getUnsafe(i), attrB.getUnsafe(i)); } } { // Equality using an unregistered attribute type TypedAttributeArray<half> attr1(50); TypedAttributeArray<half> attr2(50); EXPECT_TRUE(attr1 == attr2); } // attribute array must not be uniform for compression attr.set(1, 7); attr.set(2, 8); attr.set(6, 100); } { // Fixed codec (position range) AttributeArray::Ptr attr1(new AttributeArrayC(50)); AttributeArrayC& fixedPoint = static_cast<AttributeArrayC&>(*attr1); // position range is -0.5 => 0.5 fixedPoint.set(0, -0.6); fixedPoint.set(1, -0.4); fixedPoint.set(2, 0.4); fixedPoint.set(3, 0.6); EXPECT_NEAR(double(-0.5), fixedPoint.get(0), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(-0.4), fixedPoint.get(1), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.4), fixedPoint.get(2), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.5), fixedPoint.get(3), /*tolerance=*/double(0.0001)); } using UnitFixedPointCodec8 = FixedPointCodec<false, UnitRange>; using AttributeArrayUFxpt8 = TypedAttributeArray<float, UnitFixedPointCodec8>; { // 8-bit fixed codec (unit range) AttributeArray::Ptr attr1(new AttributeArrayUFxpt8(50)); AttributeArrayUFxpt8& fixedPoint = static_cast<AttributeArrayUFxpt8&>(*attr1); // unit range is 0.0 => 1.0 fixedPoint.set(0, -0.2); fixedPoint.set(1, 0.3); fixedPoint.set(2, 0.6); fixedPoint.set(3, 1.1); EXPECT_NEAR(double(0.0), fixedPoint.get(0), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.3), fixedPoint.get(1), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.6), fixedPoint.get(2), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(1.0), fixedPoint.get(3), /*tolerance=*/double(0.0001)); } using UnitFixedPointCodec16 = FixedPointCodec<false, UnitRange>; using AttributeArrayUFxpt16 = TypedAttributeArray<float, UnitFixedPointCodec16>; { // 16-bit fixed codec (unit range) AttributeArray::Ptr attr1(new AttributeArrayUFxpt16(50)); AttributeArrayUFxpt16& fixedPoint = static_cast<AttributeArrayUFxpt16&>(*attr1); // unit range is 0.0 => 1.0 fixedPoint.set(0, -0.2); fixedPoint.set(1, 0.3); fixedPoint.set(2, 0.6); fixedPoint.set(3, 1.1); EXPECT_NEAR(double(0.0), fixedPoint.get(0), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.3), fixedPoint.get(1), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(0.6), fixedPoint.get(2), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(1.0), fixedPoint.get(3), /*tolerance=*/double(0.0001)); } using AttributeArrayU = TypedAttributeArray<openvdb::Vec3f, UnitVecCodec>; { // UnitVec codec test AttributeArray::Ptr attr1(new AttributeArrayU(50)); AttributeArrayU& unitVec = static_cast<AttributeArrayU&>(*attr1); // all vectors must be unit length const openvdb::Vec3f vec1(1.0, 0.0, 0.0); const openvdb::Vec3f vec2(openvdb::Vec3f(1.0, 2.0, 3.0).unit()); const openvdb::Vec3f vec3(openvdb::Vec3f(1.0, 2.0, 300000.0).unit()); unitVec.set(0, vec1); unitVec.set(1, vec2); unitVec.set(2, vec3); EXPECT_NEAR(double(vec1.x()), unitVec.get(0).x(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec1.y()), unitVec.get(0).y(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec1.z()), unitVec.get(0).z(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec2.x()), unitVec.get(1).x(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec2.y()), unitVec.get(1).y(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec2.z()), unitVec.get(1).z(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec3.x()), unitVec.get(2).x(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec3.y()), unitVec.get(2).y(), /*tolerance=*/double(0.0001)); EXPECT_NEAR(double(vec3.z()), unitVec.get(2).z(), /*tolerance=*/double(0.0001)); } { // IO const Index count = 50; AttributeArrayI attrA(count); for (unsigned i = 0; i < unsigned(count); ++i) { attrA.set(i, int(i)); } attrA.setHidden(true); std::ostringstream ostr(std::ios_base::binary); io::setDataCompression(ostr, io::COMPRESS_BLOSC); attrA.write(ostr); AttributeArrayI attrB; std::istringstream istr(ostr.str(), std::ios_base::binary); attrB.read(istr); EXPECT_TRUE(attrA == attrB); AttributeArrayI attrC(count, 3); attrC.setTransient(true); std::ostringstream ostrC(std::ios_base::binary); attrC.write(ostrC); EXPECT_TRUE(ostrC.str().empty()); std::ostringstream ostrD(std::ios_base::binary); attrC.write(ostrD, /*transient=*/true); EXPECT_TRUE(!ostrD.str().empty()); } // Registry AttributeArrayI::registerType(); AttributeArray::Ptr attr = AttributeArray::create( AttributeArrayI::attributeType(), 34); { // Casting AttributeArray::Ptr array = TypedAttributeArray<float>::create(0); EXPECT_NO_THROW(TypedAttributeArray<float>::cast(*array)); EXPECT_THROW(TypedAttributeArray<int>::cast(*array), TypeError); AttributeArray::ConstPtr constArray = array; EXPECT_NO_THROW(TypedAttributeArray<float>::cast(*constArray)); EXPECT_THROW(TypedAttributeArray<int>::cast(*constArray), TypeError); } } struct VectorWrapper { using T = std::vector<std::pair<Index, Index>>; VectorWrapper(const T& _data) : data(_data) { } operator bool() const { return index < data.size(); } VectorWrapper& operator++() { index++; return *this; } Index sourceIndex() const { assert(*this); return data[index].first; } Index targetIndex() const { assert(*this); return data[index].second; } private: const T& data; T::size_type index = 0; }; // struct VectorWrapper TEST_F(TestAttributeArray, testAttributeArrayCopy) { using AttributeArrayD = TypedAttributeArray<double>; Index size(50); // initialize some test data AttributeArrayD sourceTypedAttr(size); AttributeArray& sourceAttr(sourceTypedAttr); EXPECT_EQ(size, sourceAttr.size()); sourceAttr.expand(); for (Index i = 0; i < size; i++) { sourceTypedAttr.set(i, double(i)/2); } // initialize source -> target pairs that reverse the order std::vector<std::pair<Index, Index>> indexPairs; for (Index i = 0; i < size; i++) { indexPairs.push_back(std::make_pair(i, size-i-1)); } // create a new index pair wrapper VectorWrapper wrapper(indexPairs); // build a target attribute array AttributeArrayD targetTypedAttr(size); AttributeArray& targetAttr(targetTypedAttr); for (const auto& pair : indexPairs) { targetTypedAttr.set(pair.second, sourceTypedAttr.get(pair.first)); } #if OPENVDB_ABI_VERSION_NUMBER < 6 { // verify behaviour with slow virtual function (ABI<6) AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); for (const auto& pair : indexPairs) { attr.set(pair.second, sourceAttr, pair.first); } EXPECT_TRUE(targetAttr == attr); } #else using AttributeArrayF = TypedAttributeArray<float>; { // use std::vector<std::pair<Index, Index>>::begin() as iterator to AttributeArray::copy() AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); attr.copyValues(sourceAttr, wrapper); EXPECT_TRUE(targetAttr == attr); } { // attempt to copy values between attribute arrays with different storage sizes AttributeArrayF typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_THROW(attr.copyValues(sourceAttr, wrapper), TypeError); } { // attempt to copy values between integer and float attribute arrays AttributeArrayF typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_THROW(attr.copyValues(sourceAttr, wrapper), TypeError); } { // copy values between attribute arrays with different value types, but the same storage type // target half array TypedAttributeArray<half> targetTypedAttr1(size); AttributeArray& targetAttr1(targetTypedAttr1); for (Index i = 0; i < size; i++) { targetTypedAttr1.set(i, io::RealToHalf<double>::convert(sourceTypedAttr.get(i))); } // truncated float array TypedAttributeArray<float, TruncateCodec> targetTypedAttr2(size); AttributeArray& targetAttr2(targetTypedAttr2); targetAttr2.copyValues(targetAttr1, wrapper); // equality fails as attribute types are not the same EXPECT_TRUE(targetAttr2 != targetAttr); EXPECT_TRUE(targetAttr2.type() != targetAttr.type()); // however testing value equality succeeds for (Index i = 0; i < size; i++) { EXPECT_TRUE(targetTypedAttr2.get(i) == targetTypedAttr.get(i)); } } { // out-of-range checking AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); decltype(indexPairs) rangeIndexPairs(indexPairs); rangeIndexPairs[10].first = size+1; VectorWrapper rangeWrapper(rangeIndexPairs); EXPECT_THROW(attr.copyValues(sourceAttr, rangeWrapper), IndexError); rangeIndexPairs[10].first = 0; EXPECT_NO_THROW(attr.copyValues(sourceAttr, rangeWrapper)); rangeIndexPairs[10].second = size+1; EXPECT_THROW(attr.copyValues(sourceAttr, rangeWrapper), IndexError); } { // source attribute array is uniform AttributeArrayD uniformTypedAttr(size); AttributeArray& uniformAttr(uniformTypedAttr); uniformTypedAttr.collapse(5.3); EXPECT_TRUE(uniformAttr.isUniform()); AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); EXPECT_TRUE(attr.isUniform()); attr.copyValues(uniformAttr, wrapper); EXPECT_TRUE(attr.isUniform()); attr.copyValues(uniformAttr, wrapper, /*preserveUniformity=*/false); EXPECT_TRUE(!attr.isUniform()); typedAttr.collapse(1.4); EXPECT_TRUE(attr.isUniform()); // resize the vector to be smaller than the size of the array decltype(indexPairs) subsetIndexPairs(indexPairs); subsetIndexPairs.resize(size-1); decltype(wrapper) subsetWrapper(subsetIndexPairs); // now copy the values attempting to preserve uniformity attr.copyValues(uniformAttr, subsetWrapper, /*preserveUniformity=*/true); // verify that the array cannot be kept uniform EXPECT_TRUE(!attr.isUniform()); } { // target attribute array is uniform AttributeArrayD uniformTypedAttr(size); AttributeArray& uniformAttr(uniformTypedAttr); uniformTypedAttr.collapse(5.3); EXPECT_TRUE(uniformAttr.isUniform()); AttributeArrayD typedAttr(size); AttributeArray& attr(typedAttr); typedAttr.set(5, 1.2); typedAttr.set(10, 3.1); EXPECT_TRUE(!attr.isUniform()); std::vector<std::pair<Index, Index>> uniformIndexPairs; uniformIndexPairs.push_back(std::make_pair(10, 0)); uniformIndexPairs.push_back(std::make_pair(5, 0)); VectorWrapper uniformWrapper(uniformIndexPairs); // note that calling copyValues() will implicitly expand the uniform target EXPECT_NO_THROW(uniformAttr.copyValuesUnsafe(attr, uniformWrapper)); EXPECT_TRUE(uniformAttr.isUniform()); EXPECT_TRUE(uniformTypedAttr.get(0) == typedAttr.get(5)); } #endif } void TestAttributeArray::testAccessorEval() { using AttributeF = TypedAttributeArray<float>; struct TestAccessor { static float getterError(const AttributeArray* /*array*/, const Index /*n*/) { OPENVDB_THROW(NotImplementedError, ""); } static void setterError [[noreturn]] (AttributeArray* /*array*/, const Index /*n*/, const float& /*value*/) { OPENVDB_THROW(NotImplementedError, ""); } //static float testGetter(const AttributeArray* array, const Index n) { // return AccessorEval<UnknownCodec, float>::get(&getterError, array, n); //} //static void testSetter(AttributeArray* array, const Index n, const float& value) { // AccessorEval<UnknownCodec, float>::set(&setterError, array, n, value); //} }; { // test get and set (NullCodec) AttributeF::Ptr attr = AttributeF::create(10); attr->collapse(5.0f); attr->expand(); AttributeArray& array = *attr; // explicit codec is used here so getter and setter are not called AttributeWriteHandle<float, NullCodec> writeHandle(array); writeHandle.mSetter = TestAccessor::setterError; writeHandle.set(4, 15.0f); AttributeHandle<float, NullCodec> handle(array); const AttributeArray& constArray(array); EXPECT_EQ(&constArray, &handle.array()); handle.mGetter = TestAccessor::getterError; const float result1 = handle.get(4); const float result2 = handle.get(6); EXPECT_EQ(15.0f, result1); EXPECT_EQ(5.0f, result2); } { // test get and set (UnknownCodec) AttributeF::Ptr attr = AttributeF::create(10); attr->collapse(5.0f); attr->expand(); AttributeArray& array = *attr; // unknown codec is used here so getter and setter are called AttributeWriteHandle<float, UnknownCodec> writeHandle(array); EXPECT_EQ(&array, &writeHandle.array()); writeHandle.mSetter = TestAccessor::setterError; EXPECT_THROW(writeHandle.set(4, 15.0f), NotImplementedError); AttributeHandle<float, UnknownCodec> handle(array); handle.mGetter = TestAccessor::getterError; EXPECT_THROW(handle.get(4), NotImplementedError); } } TEST_F(TestAttributeArray, testAccessorEval) { testAccessorEval(); } TEST_F(TestAttributeArray, testAttributeHandle) { using namespace openvdb::math; using AttributeI = TypedAttributeArray<int>; using AttributeFH = TypedAttributeArray<float, TruncateCodec>; using AttributeVec3f = TypedAttributeArray<Vec3f>; using AttributeHandleRWI = AttributeWriteHandle<int>; AttributeI::registerType(); AttributeFH::registerType(); AttributeVec3f::registerType(); // create a Descriptor and AttributeSet using Descriptor = AttributeSet::Descriptor; Descriptor::Ptr descr = Descriptor::create(AttributeVec3f::attributeType()); unsigned count = 500; AttributeSet attrSet(descr, /*arrayLength=*/count); attrSet.appendAttribute("truncate", AttributeFH::attributeType()); attrSet.appendAttribute("int", AttributeI::attributeType()); // check uniform value implementation { AttributeArray* array = attrSet.get(2); AttributeHandleRWI nonExpandingHandle(*array, /*expand=*/false); EXPECT_TRUE(nonExpandingHandle.isUniform()); AttributeHandleRWI handle(*array); EXPECT_TRUE(!handle.isUniform()); EXPECT_EQ(array->size(), handle.size()); EXPECT_EQ(0, handle.get(0)); EXPECT_EQ(0, handle.get(10)); handle.set(0, 10); EXPECT_TRUE(!handle.isUniform()); handle.collapse(5); EXPECT_TRUE(handle.isUniform()); EXPECT_EQ(5, handle.get(0)); EXPECT_EQ(5, handle.get(20)); handle.expand(); EXPECT_TRUE(!handle.isUniform()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, handle.get(i)); } EXPECT_TRUE(handle.compact()); EXPECT_TRUE(handle.isUniform()); handle.expand(); handle.fill(10); EXPECT_TRUE(!handle.isUniform()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(10, handle.get(i)); } handle.collapse(7); EXPECT_TRUE(handle.isUniform()); EXPECT_EQ(7, handle.get(0)); EXPECT_EQ(7, handle.get(20)); handle.fill(5); EXPECT_TRUE(handle.isUniform()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, handle.get(i)); } EXPECT_TRUE(handle.isUniform()); } { AttributeArray* array = attrSet.get(0); AttributeWriteHandle<Vec3f> handle(*array); handle.set(5, Vec3f(10)); EXPECT_EQ(Vec3f(10), handle.get(5)); } { AttributeArray* array = attrSet.get(1); AttributeWriteHandle<float> handle(*array); handle.set(6, float(11)); EXPECT_EQ(float(11), handle.get(6)); { AttributeHandle<float> handleRO(*array); EXPECT_EQ(float(11), handleRO.get(6)); } } // check values have been correctly set without using handles { AttributeVec3f* array = static_cast<AttributeVec3f*>(attrSet.get(0)); EXPECT_TRUE(array); EXPECT_EQ(Vec3f(10), array->get(5)); } { AttributeFH* array = static_cast<AttributeFH*>(attrSet.get(1)); EXPECT_TRUE(array); EXPECT_EQ(float(11), array->get(6)); } } TEST_F(TestAttributeArray, testStrided) { using AttributeArrayI = TypedAttributeArray<int>; using StridedHandle = AttributeHandle<int, /*CodecType=*/UnknownCodec>; using StridedWriteHandle = AttributeWriteHandle<int, /*CodecType=*/UnknownCodec>; { // non-strided array AttributeArrayI::Ptr array = AttributeArrayI::create(/*n=*/2, /*stride=*/1); EXPECT_TRUE(array->hasConstantStride()); EXPECT_EQ(Index(1), array->stride()); EXPECT_EQ(Index(2), array->size()); EXPECT_EQ(Index(2), array->dataSize()); } { // strided array AttributeArrayI::Ptr array = AttributeArrayI::create(/*n=*/2, /*stride=*/3); EXPECT_TRUE(array->hasConstantStride()); EXPECT_EQ(Index(3), array->stride()); EXPECT_EQ(Index(2), array->size()); EXPECT_EQ(Index(6), array->dataSize()); EXPECT_TRUE(array->isUniform()); EXPECT_EQ(0, array->get(0)); EXPECT_EQ(0, array->get(5)); EXPECT_THROW(array->get(6), IndexError); // out-of-range EXPECT_NO_THROW(StridedHandle::create(*array)); EXPECT_NO_THROW(StridedWriteHandle::create(*array)); array->collapse(10); EXPECT_EQ(int(10), array->get(0)); EXPECT_EQ(int(10), array->get(5)); array->expand(); EXPECT_EQ(int(10), array->get(0)); EXPECT_EQ(int(10), array->get(5)); array->collapse(0); EXPECT_EQ(int(0), array->get(0)); EXPECT_EQ(int(0), array->get(5)); StridedWriteHandle writeHandle(*array); writeHandle.set(0, 2, 5); writeHandle.set(1, 1, 10); EXPECT_EQ(Index(3), writeHandle.stride()); EXPECT_EQ(Index(2), writeHandle.size()); // non-interleaved: 0 0 5 0 10 0 EXPECT_EQ(5, array->get(2)); EXPECT_EQ(10, array->get(4)); EXPECT_EQ(5, writeHandle.get(0, 2)); EXPECT_EQ(10, writeHandle.get(1, 1)); StridedHandle handle(*array); EXPECT_TRUE(handle.hasConstantStride()); EXPECT_EQ(5, handle.get(0, 2)); EXPECT_EQ(10, handle.get(1, 1)); EXPECT_EQ(Index(3), handle.stride()); EXPECT_EQ(Index(2), handle.size()); // as of ABI=6, the base memory requirements of an AttributeArray have been lowered #if OPENVDB_ABI_VERSION_NUMBER >= 6 size_t arrayMem = 40; #else size_t arrayMem = 64; #endif EXPECT_EQ(sizeof(int) * /*size*/3 * /*stride*/2 + arrayMem, array->memUsage()); } { // dynamic stride AttributeArrayI::Ptr array = AttributeArrayI::create( /*n=*/2, /*stride=*/7, /*constantStride=*/false); EXPECT_TRUE(!array->hasConstantStride()); // zero indicates dynamic striding EXPECT_EQ(Index(0), array->stride()); EXPECT_EQ(Index(2), array->size()); // the actual array size EXPECT_EQ(Index(7), array->dataSize()); EXPECT_TRUE(array->isUniform()); EXPECT_EQ(0, array->get(0)); EXPECT_EQ(0, array->get(6)); EXPECT_THROW(array->get(7), IndexError); // out-of-range EXPECT_NO_THROW(StridedHandle::create(*array)); EXPECT_NO_THROW(StridedWriteHandle::create(*array)); // handle is bound as if a linear array with stride 1 StridedHandle handle(*array); EXPECT_TRUE(!handle.hasConstantStride()); EXPECT_EQ(Index(1), handle.stride()); EXPECT_EQ(array->dataSize(), handle.size()); } { // IO const Index count = 50, total = 100; AttributeArrayI attrA(count, total, /*constantStride=*/false); for (unsigned i = 0; i < unsigned(total); ++i) { attrA.set(i, int(i)); } std::ostringstream ostr(std::ios_base::binary); io::setDataCompression(ostr, io::COMPRESS_BLOSC); attrA.write(ostr); AttributeArrayI attrB; std::istringstream istr(ostr.str(), std::ios_base::binary); attrB.read(istr); EXPECT_TRUE(attrA == attrB); } } void TestAttributeArray::testDelayedLoad() { using AttributeArrayI = TypedAttributeArray<int>; using AttributeArrayF = TypedAttributeArray<float>; AttributeArrayI::registerType(); AttributeArrayF::registerType(); SharedPtr<io::MappedFile> mappedFile; io::StreamMetadata::Ptr streamMetadata(new io::StreamMetadata); std::string tempDir; if (const char* dir = std::getenv("TMPDIR")) tempDir = dir; #ifdef _MSC_VER if (tempDir.empty()) { char tempDirBuffer[MAX_PATH+1]; int tempDirLen = GetTempPath(MAX_PATH+1, tempDirBuffer); EXPECT_TRUE(tempDirLen > 0 && tempDirLen <= MAX_PATH); tempDir = tempDirBuffer; } #else if (tempDir.empty()) tempDir = P_tmpdir; #endif { // IO const Index count = 50; AttributeArrayI attrA(count); for (unsigned i = 0; i < unsigned(count); ++i) { attrA.set(i, int(i)); } AttributeArrayF attrA2(count); std::string filename; // write out attribute array to a temp file { filename = tempDir + "/openvdb_delayed1"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); attrA.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout); outputStreamSize.setSizeOnly(true); attrA.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout); outputStream.setSizeOnly(false); attrA.writePagedBuffers(outputStream, false); outputStream.flush(); attrA2.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize2(fileout); outputStreamSize2.setSizeOnly(true); attrA2.writePagedBuffers(outputStreamSize2, false); outputStreamSize2.flush(); compression::PagedOutputStream outputStream2(fileout); outputStream2.setSizeOnly(false); attrA2.writePagedBuffers(outputStream2, false); outputStream2.flush(); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check manual loading of data { AttributeArrayI attrB; AttributeArrayF attrB2; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(matchingNamePairs(attrA.type(), attrB.type())); EXPECT_EQ(attrA.size(), attrB.size()); EXPECT_EQ(attrA.isUniform(), attrB.isUniform()); EXPECT_EQ(attrA.isTransient(), attrB.isTransient()); EXPECT_EQ(attrA.isHidden(), attrB.isHidden()); AttributeArrayI attrBcopy(attrB); AttributeArrayI attrBequal = attrB; EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(attrBcopy.isOutOfCore()); EXPECT_TRUE(attrBequal.isOutOfCore()); #if OPENVDB_ABI_VERSION_NUMBER >= 6 EXPECT_TRUE(!static_cast<AttributeArray&>(attrB).isDataLoaded()); EXPECT_TRUE(!static_cast<AttributeArray&>(attrBcopy).isDataLoaded()); EXPECT_TRUE(!static_cast<AttributeArray&>(attrBequal).isDataLoaded()); #endif attrB.loadData(); attrBcopy.loadData(); attrBequal.loadData(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrBcopy.isOutOfCore()); EXPECT_TRUE(!attrBequal.isOutOfCore()); #if OPENVDB_ABI_VERSION_NUMBER >= 6 EXPECT_TRUE(static_cast<AttributeArray&>(attrB).isDataLoaded()); EXPECT_TRUE(static_cast<AttributeArray&>(attrBcopy).isDataLoaded()); EXPECT_TRUE(static_cast<AttributeArray&>(attrBequal).isDataLoaded()); #endif EXPECT_EQ(attrA.memUsage(), attrB.memUsage()); EXPECT_EQ(attrA.memUsage(), attrBcopy.memUsage()); EXPECT_EQ(attrA.memUsage(), attrBequal.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); EXPECT_EQ(attrA.get(i), attrBcopy.get(i)); EXPECT_EQ(attrA.get(i), attrBequal.get(i)); } attrB2.readMetadata(filein); compression::PagedInputStream inputStream2(filein); inputStream2.setSizeOnly(true); attrB2.readPagedBuffers(inputStream2); inputStream2.setSizeOnly(false); attrB2.readPagedBuffers(inputStream2); EXPECT_TRUE(matchingNamePairs(attrA2.type(), attrB2.type())); EXPECT_EQ(attrA2.size(), attrB2.size()); EXPECT_EQ(attrA2.isUniform(), attrB2.isUniform()); EXPECT_EQ(attrA2.isTransient(), attrB2.isTransient()); EXPECT_EQ(attrA2.isHidden(), attrB2.isHidden()); AttributeArrayF attrB2copy(attrB2); AttributeArrayF attrB2equal = attrB2; EXPECT_TRUE(attrB2.isOutOfCore()); EXPECT_TRUE(attrB2copy.isOutOfCore()); EXPECT_TRUE(attrB2equal.isOutOfCore()); attrB2.loadData(); attrB2copy.loadData(); attrB2equal.loadData(); EXPECT_TRUE(!attrB2.isOutOfCore()); EXPECT_TRUE(!attrB2copy.isOutOfCore()); EXPECT_TRUE(!attrB2equal.isOutOfCore()); EXPECT_EQ(attrA2.memUsage(), attrB2.memUsage()); EXPECT_EQ(attrA2.memUsage(), attrB2copy.memUsage()); EXPECT_EQ(attrA2.memUsage(), attrB2equal.memUsage()); EXPECT_EQ(attrA2.get(0), attrB2.get(0)); EXPECT_EQ(attrA2.get(0), attrB2copy.get(0)); EXPECT_EQ(attrA2.get(0), attrB2equal.get(0)); } // read in using delayed load and check fill() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); attrB.fill(5); EXPECT_TRUE(!attrB.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, attrB.get(i)); } } // read in using delayed load and check streaming (write handle) { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); attrB.setStreaming(true); { AttributeWriteHandle<int> handle(attrB); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); } EXPECT_TRUE(!attrB.isUniform()); } // read in using delayed load and check streaming (read handle) { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); attrB.setStreaming(true); { AttributeHandle<int> handle(attrB); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); } EXPECT_TRUE(attrB.isUniform()); } // read in using delayed load and check implicit load through get() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); attrB.get(0); EXPECT_TRUE(!attrB.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); } } // read in using delayed load and check implicit load through compress() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); } // read in using delayed load and check copy and assignment constructors { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); AttributeArrayI attrC(attrB); AttributeArrayI attrD = attrB; EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(attrC.isOutOfCore()); EXPECT_TRUE(attrD.isOutOfCore()); attrB.loadData(); attrC.loadData(); attrD.loadData(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrC.isOutOfCore()); EXPECT_TRUE(!attrD.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); EXPECT_EQ(attrA.get(i), attrC.get(i)); EXPECT_EQ(attrA.get(i), attrD.get(i)); } } // read in using delayed load and check implicit load through AttributeHandle { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); AttributeHandle<int> handle(attrB); EXPECT_TRUE(!attrB.isOutOfCore()); } // read in using delayed load and check detaching of file (using collapse()) { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(!attrB.isUniform()); attrB.collapse(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(attrB.isUniform()); EXPECT_EQ(0, attrB.get(0)); } // read in and write out using delayed load to check writing out-of-core attributes { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); std::string filename2 = tempDir + "/openvdb_delayed5"; std::ofstream fileout2(filename2.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout2, streamMetadata); io::setDataCompression(fileout2, io::COMPRESS_BLOSC); attrB.writeMetadata(fileout2, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout2); outputStreamSize.setSizeOnly(true); attrB.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout2); outputStream.setSizeOnly(false); attrB.writePagedBuffers(outputStream, false); outputStream.flush(); fileout2.close(); AttributeArrayI attrB2; std::ifstream filein2(filename2.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein2, streamMetadata); io::setMappedFilePtr(filein2, mappedFile); attrB2.readMetadata(filein2); compression::PagedInputStream inputStream2(filein2); inputStream2.setSizeOnly(true); attrB2.readPagedBuffers(inputStream2); inputStream2.setSizeOnly(false); attrB2.readPagedBuffers(inputStream2); EXPECT_TRUE(attrB2.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrB.get(i), attrB2.get(i)); } filein2.close(); } // Clean up temp files. std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); AttributeArrayI attrUniform(count); // write out uniform attribute array to a temp file { filename = tempDir + "/openvdb_delayed2"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); attrUniform.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout); outputStreamSize.setSizeOnly(true); attrUniform.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout); outputStream.setSizeOnly(false); attrUniform.writePagedBuffers(outputStream, false); outputStream.flush(); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check fill() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isUniform()); attrB.fill(5); EXPECT_TRUE(attrB.isUniform()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(5, attrB.get(i)); } } AttributeArrayI attrStrided(count, /*stride=*/3); EXPECT_EQ(Index(3), attrStrided.stride()); // Clean up temp files. std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); // write out strided attribute array to a temp file { filename = tempDir + "/openvdb_delayed3"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); attrStrided.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout); outputStreamSize.setSizeOnly(true); attrStrided.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout); outputStream.setSizeOnly(false); attrStrided.writePagedBuffers(outputStream, false); outputStream.flush(); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check fill() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_EQ(Index(3), attrB.stride()); } // Clean up temp files. std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); // write out compressed attribute array to a temp file { filename = tempDir + "/openvdb_delayed4"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); attrA.writeMetadata(fileout, false, /*paged=*/true); compression::PagedOutputStream outputStreamSize(fileout); outputStreamSize.setSizeOnly(true); attrA.writePagedBuffers(outputStreamSize, false); outputStreamSize.flush(); compression::PagedOutputStream outputStream(fileout); outputStream.setSizeOnly(false); attrA.writePagedBuffers(outputStream, false); outputStream.flush(); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check manual loading of data { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); attrB.loadData(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_EQ(attrA.memUsage(), attrB.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); } } // read in using delayed load and check partial read state { std::unique_ptr<AttributeArrayI> attrB(new AttributeArrayI); EXPECT_TRUE(!(attrB->flags() & AttributeArray::PARTIALREAD)); std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB->readMetadata(filein); // PARTIALREAD flag should now be set EXPECT_TRUE(attrB->flags() & AttributeArray::PARTIALREAD); // copy-construct and assign AttributeArray AttributeArrayI attrC(*attrB); EXPECT_TRUE(attrC.flags() & AttributeArray::PARTIALREAD); AttributeArrayI attrD = *attrB; EXPECT_TRUE(attrD.flags() & AttributeArray::PARTIALREAD); // verify deleting attrB is safe attrB.reset(); // verify data is not valid EXPECT_TRUE(!attrC.validData()); { // attempting to write a partially-read AttributeArray throws std::string filename = tempDir + "/openvdb_partial1"; ScopedFile f(filename); std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); EXPECT_THROW(attrC.writeMetadata(fileout, false, /*paged=*/true), IoError); } // continue loading with copy-constructed AttributeArray compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrC.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrC.readPagedBuffers(inputStream); EXPECT_TRUE(attrC.isOutOfCore()); attrC.loadData(); EXPECT_TRUE(!attrC.isOutOfCore()); // verify data is now valid EXPECT_TRUE(attrC.validData()); EXPECT_EQ(attrA.memUsage(), attrC.memUsage()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrC.get(i)); } } // read in using delayed load and check implicit load through get() { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); attrB.get(0); EXPECT_TRUE(!attrB.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); } } #ifdef OPENVDB_USE_BLOSC // read in using delayed load and check copy and assignment constructors { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); AttributeArrayI attrC(attrB); AttributeArrayI attrD = attrB; EXPECT_TRUE(attrB.isOutOfCore()); EXPECT_TRUE(attrC.isOutOfCore()); EXPECT_TRUE(attrD.isOutOfCore()); attrB.loadData(); attrC.loadData(); attrD.loadData(); EXPECT_TRUE(!attrB.isOutOfCore()); EXPECT_TRUE(!attrC.isOutOfCore()); EXPECT_TRUE(!attrD.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); EXPECT_EQ(attrA.get(i), attrC.get(i)); EXPECT_EQ(attrA.get(i), attrD.get(i)); } } // read in using delayed load and check implicit load through AttributeHandle { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); attrB.readMetadata(filein); compression::PagedInputStream inputStream(filein); inputStream.setSizeOnly(true); attrB.readPagedBuffers(inputStream); inputStream.setSizeOnly(false); attrB.readPagedBuffers(inputStream); EXPECT_TRUE(attrB.isOutOfCore()); AttributeHandle<int> handle(attrB); EXPECT_TRUE(!attrB.isOutOfCore()); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), handle.get(i)); } } #endif // Clean up temp files. std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); // write out invalid serialization flags as metadata to a temp file { filename = tempDir + "/openvdb_delayed5"; std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, io::COMPRESS_BLOSC); // write out unknown serialization flags to check forwards-compatibility Index64 bytes(0); uint8_t flags(0); uint8_t serializationFlags(Int16(0x10)); Index size(0); fileout.write(reinterpret_cast<const char*>(&bytes), sizeof(Index64)); fileout.write(reinterpret_cast<const char*>(&flags), sizeof(uint8_t)); fileout.write(reinterpret_cast<const char*>(&serializationFlags), sizeof(uint8_t)); fileout.write(reinterpret_cast<const char*>(&size), sizeof(Index)); fileout.close(); } mappedFile = TestMappedFile::create(filename); // read in using delayed load and check metadata fail due to serialization flags { AttributeArrayI attrB; std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); EXPECT_THROW(attrB.readMetadata(filein), openvdb::IoError); } // cleanup temp files std::remove(mappedFile->filename().c_str()); std::remove(filename.c_str()); } } TEST_F(TestAttributeArray, testDelayedLoad) { testDelayedLoad(); } TEST_F(TestAttributeArray, testDefaultValue) { using AttributeArrayF = TypedAttributeArray<float>; using AttributeArrayI = TypedAttributeArray<int>; AttributeArrayI::registerType(); AttributeArrayF::registerType(); TypedMetadata<float> defaultValue(5.4f); Metadata& baseDefaultValue = defaultValue; // default value is same value type AttributeArray::Ptr attr = AttributeArrayF::create(10, 1, true, &baseDefaultValue); EXPECT_TRUE(attr); EXPECT_EQ(5.4f, AttributeArrayF::cast(*attr).get(0)); // default value is different value type, so not used attr = AttributeArrayI::create(10, 1, true, &baseDefaultValue); EXPECT_TRUE(attr); EXPECT_EQ(0, AttributeArrayI::cast(*attr).get(0)); } TEST_F(TestAttributeArray, testQuaternions) { using AttributeQF = TypedAttributeArray<math::Quat<float>>; using AttributeQD = TypedAttributeArray<QuatR>; AttributeQF::registerType(); AttributeQD::registerType(); EXPECT_TRUE(AttributeQF::attributeType().first == "quats"); EXPECT_TRUE(AttributeQD::attributeType().first == "quatd"); AttributeQF test(/*size=*/5); AttributeQD orient(/*size=*/10); { // set some quaternion values AttributeWriteHandle<QuatR> orientHandle(orient); orientHandle.set(4, QuatR(1, 2, 3, 4)); orientHandle.set(7, QuatR::identity()); } { // get some quaternion values AttributeHandle<QuatR> orientHandle(orient); EXPECT_EQ(QuatR::zero(), orientHandle.get(3)); EXPECT_EQ(QuatR(1, 2, 3, 4), orientHandle.get(4)); EXPECT_EQ(QuatR::identity(), orientHandle.get(7)); } { // create a quaternion array with a zero uniform value AttributeQD zero(/*size=*/10, /*stride=*/1, /*constantStride=*/true, QuatR::zero()); EXPECT_EQ(QuatR::zero(), zero.get(5)); } } TEST_F(TestAttributeArray, testMatrices) { typedef TypedAttributeArray<Mat4d> AttributeM; AttributeM::registerType(); EXPECT_TRUE(AttributeM::attributeType().first == "mat4d"); AttributeM matrix(/*size=*/10); Mat4d testMatrix(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); { // set some matrix values AttributeWriteHandle<Mat4d> matrixHandle(matrix); matrixHandle.set(4, testMatrix); matrixHandle.set(7, Mat4d::zero()); } { // get some matrix values AttributeHandle<Mat4d> matrixHandle(matrix); EXPECT_EQ(Mat4d::zero(), matrixHandle.get(3)); EXPECT_EQ(testMatrix, matrixHandle.get(4)); EXPECT_EQ(Mat4d::zero(), matrixHandle.get(7)); } { // create a matrix array with a zero uniform value AttributeM zero(/*size=*/10, /*stride=*/1, /*constantStride=*/true, Mat4d::zero()); EXPECT_EQ(Mat4d::zero(), zero.get(5)); } } namespace profile { template <typename AttrT> void expand(const Name& prefix, AttrT& attr) { ProfileTimer timer(prefix + ": expand"); attr.expand(); } template <typename AttrT> void set(const Name& prefix, AttrT& attr) { ProfileTimer timer(prefix + ": set"); const Index size = attr.size(); for (Index i = 0; i < size; i++) { attr.setUnsafe(i, typename AttrT::ValueType(i)); } } template <typename CodecT, typename AttrT> void setH(const Name& prefix, AttrT& attr) { using ValueType = typename AttrT::ValueType; ProfileTimer timer(prefix + ": setHandle"); AttributeWriteHandle<ValueType, CodecT> handle(attr); const Index size = attr.size(); for (Index i = 0; i < size; i++) { handle.set(i, ValueType(i)); } } template <typename AttrT> void sum(const Name& prefix, const AttrT& attr) { ProfileTimer timer(prefix + ": sum"); using ValueType = typename AttrT::ValueType; ValueType sum = 0; const Index size = attr.size(); for (Index i = 0; i < size; i++) { sum += attr.getUnsafe(i); } // prevent compiler optimisations removing computation EXPECT_TRUE(sum!=ValueType()); } template <typename CodecT, typename AttrT> void sumH(const Name& prefix, const AttrT& attr) { ProfileTimer timer(prefix + ": sumHandle"); using ValueType = typename AttrT::ValueType; ValueType sum = 0; AttributeHandle<ValueType, CodecT> handle(attr); for (Index i = 0; i < attr.size(); i++) { sum += handle.get(i); } // prevent compiler optimisations removing computation EXPECT_TRUE(sum!=ValueType()); } } // namespace profile TEST_F(TestAttributeArray, testProfile) { using namespace openvdb::util; using namespace openvdb::math; using AttributeArrayF = TypedAttributeArray<float>; using AttributeArrayF16 = TypedAttributeArray<float, FixedPointCodec<false>>; using AttributeArrayF8 = TypedAttributeArray<float, FixedPointCodec<true>>; /////////////////////////////////////////////////// #ifdef PROFILE const size_t elements(1000 * 1000 * 1000); std::cerr << std::endl; #else const size_t elements(10 * 1000 * 1000); #endif // std::vector { std::vector<float> values; { ProfileTimer timer("Vector<float>: resize"); values.resize(elements); } { ProfileTimer timer("Vector<float>: set"); for (size_t i = 0; i < elements; i++) { values[i] = float(i); } } { ProfileTimer timer("Vector<float>: sum"); float sum = 0; for (size_t i = 0; i < elements; i++) { sum += float(values[i]); } // to prevent optimisation clean up EXPECT_TRUE(sum!=0.0f); } } // AttributeArray { AttributeArrayF attr(elements); profile::expand("AttributeArray<float>", attr); profile::set("AttributeArray<float>", attr); profile::sum("AttributeArray<float>", attr); } { AttributeArrayF16 attr(elements); profile::expand("AttributeArray<float, fp16>", attr); profile::set("AttributeArray<float, fp16>", attr); profile::sum("AttributeArray<float, fp16>", attr); } { AttributeArrayF8 attr(elements); profile::expand("AttributeArray<float, fp8>", attr); profile::set("AttributeArray<float, fp8>", attr); profile::sum("AttributeArray<float, fp8>", attr); } // AttributeHandle (UnknownCodec) { AttributeArrayF attr(elements); profile::expand("AttributeHandle<float>", attr); profile::setH<UnknownCodec>("AttributeHandle<float>", attr); profile::sumH<UnknownCodec>("AttributeHandle<float>", attr); } { AttributeArrayF16 attr(elements); profile::expand("AttributeHandle<float, fp16>", attr); profile::setH<UnknownCodec>("AttributeHandle<float, fp16>", attr); profile::sumH<UnknownCodec>("AttributeHandle<float, fp16>", attr); } { AttributeArrayF8 attr(elements); profile::expand("AttributeHandle<float, fp8>", attr); profile::setH<UnknownCodec>("AttributeHandle<float, fp8>", attr); profile::sumH<UnknownCodec>("AttributeHandle<float, fp8>", attr); } // AttributeHandle (explicit codec) { AttributeArrayF attr(elements); profile::expand("AttributeHandle<float>", attr); profile::setH<NullCodec>("AttributeHandle<float, Codec>", attr); profile::sumH<NullCodec>("AttributeHandle<float, Codec>", attr); } { AttributeArrayF16 attr(elements); profile::expand("AttributeHandle<float, fp16>", attr); profile::setH<FixedPointCodec<false>>("AttributeHandle<float, fp16, Codec>", attr); profile::sumH<FixedPointCodec<false>>("AttributeHandle<float, fp16, Codec>", attr); } { AttributeArrayF8 attr(elements); profile::expand("AttributeHandle<float, fp8>", attr); profile::setH<FixedPointCodec<true>>("AttributeHandle<float, fp8, Codec>", attr); profile::sumH<FixedPointCodec<true>>("AttributeHandle<float, fp8, Codec>", attr); } }
83,779
C++
32.891586
129
0.60884
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMetadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Metadata.h> #include <sstream> class TestMetadata: public ::testing::Test { public: void SetUp() override { openvdb::Metadata::clearRegistry(); } void TearDown() override { openvdb::Metadata::clearRegistry(); } }; TEST_F(TestMetadata, testMetadataRegistry) { using namespace openvdb; Int32Metadata::registerType(); StringMetadata strMetadata; EXPECT_TRUE(!Metadata::isRegisteredType(strMetadata.typeName())); StringMetadata::registerType(); EXPECT_TRUE(Metadata::isRegisteredType(strMetadata.typeName())); EXPECT_TRUE(Metadata::isRegisteredType(Int32Metadata::staticTypeName())); Metadata::Ptr stringMetadata = Metadata::createMetadata(strMetadata.typeName()); EXPECT_TRUE(stringMetadata->typeName() == strMetadata.typeName()); StringMetadata::unregisterType(); EXPECT_THROW(Metadata::createMetadata(strMetadata.typeName()), openvdb::LookupError); } TEST_F(TestMetadata, testMetadataAsBool) { using namespace openvdb; { FloatMetadata meta(0.0); EXPECT_TRUE(!meta.asBool()); meta.setValue(1.0); EXPECT_TRUE(meta.asBool()); meta.setValue(-1.0); EXPECT_TRUE(meta.asBool()); meta.setValue(999.0); EXPECT_TRUE(meta.asBool()); } { Int32Metadata meta(0); EXPECT_TRUE(!meta.asBool()); meta.setValue(1); EXPECT_TRUE(meta.asBool()); meta.setValue(-1); EXPECT_TRUE(meta.asBool()); meta.setValue(999); EXPECT_TRUE(meta.asBool()); } { StringMetadata meta(""); EXPECT_TRUE(!meta.asBool()); meta.setValue("abc"); EXPECT_TRUE(meta.asBool()); } { Vec3IMetadata meta(Vec3i(0)); EXPECT_TRUE(!meta.asBool()); meta.setValue(Vec3i(-1, 0, 1)); EXPECT_TRUE(meta.asBool()); } { Vec3SMetadata meta(Vec3s(0.0)); EXPECT_TRUE(!meta.asBool()); meta.setValue(Vec3s(-1.0, 0.0, 1.0)); EXPECT_TRUE(meta.asBool()); } { Vec4DMetadata meta(Vec4d(0.0)); EXPECT_TRUE(!meta.asBool()); meta.setValue(Vec4d(1.0)); EXPECT_TRUE(meta.asBool()); } } TEST_F(TestMetadata, testCustomMetadata) { using namespace openvdb; const Vec3i expected(1, 2, 3); std::ostringstream ostr(std::ios_base::binary); { Vec3IMetadata::registerType(); Vec3IMetadata meta(expected); // Write Vec3I metadata to a byte string. meta.write(ostr); } // Unregister Vec3I metadata. Metadata::clearRegistry(); { std::istringstream istr(ostr.str(), std::ios_base::binary); UnknownMetadata meta; // Verify that metadata of an unregistered type can be read successfully. EXPECT_NO_THROW(meta.read(istr)); // Verify that the metadata matches the original vector value. EXPECT_EQ(sizeof(Vec3i), size_t(meta.size())); EXPECT_TRUE(meta.value().size() == size_t(meta.size())); EXPECT_EQ(expected, *reinterpret_cast<const Vec3i*>(&meta.value()[0])); ostr.str(""); meta.write(ostr); // Verify that UnknownMetadata can be copied. auto metaPtr = meta.copy(); EXPECT_TRUE(metaPtr.get() != nullptr); EXPECT_TRUE(meta == *metaPtr); // Verify that typed metadata can be copied into UnknownMetadata. meta.copy(Vec3IMetadata(expected)); EXPECT_EQ(sizeof(expected), size_t(meta.size())); const auto* ptr = reinterpret_cast<const uint8_t*>(&expected); EXPECT_TRUE(UnknownMetadata::ByteVec(ptr, ptr + sizeof(expected)) == meta.value()); } Vec3IMetadata::registerType(); { std::istringstream istr(ostr.str(), std::ios_base::binary); Vec3IMetadata meta; meta.read(istr); EXPECT_EQ(expected, meta.value()); } }
4,024
C++
26.380952
91
0.619781
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestFindActiveValues.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <cstdio> // for remove() #include <fstream> #include <sstream> #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/util/CpuTimer.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/FindActiveValues.h> #include "util.h" // for unittest_util::makeSphere() #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); class TestFindActiveValues: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestFindActiveValues, testBasic) { const float background = 5.0f; openvdb::FloatTree tree(background); const openvdb::Coord min(-1,-2,30), max(20,30,55); const openvdb::CoordBBox bbox(min[0], min[1], min[2], max[0], max[1], max[2]); EXPECT_TRUE( openvdb::tools::noActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveVoxels(tree, bbox)); tree.setValue(min.offsetBy(-1), 1.0f); EXPECT_TRUE( openvdb::tools::noActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveVoxels(tree, bbox)); tree.setValue(max.offsetBy( 1), 1.0f); EXPECT_TRUE( openvdb::tools::noActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveVoxels(tree, bbox)); tree.setValue(min, 1.0f); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveVoxels(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveTiles(tree, bbox)); tree.setValue(max, 1.0f); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveVoxels(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveTiles(tree, bbox)); auto tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() == 0u ); tree.sparseFill(bbox, 1.0f); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveVoxels(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveTiles( tree, bbox)); tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() != 0u ); for (auto &t : tiles) { EXPECT_TRUE( t.level == 1); EXPECT_TRUE( t.bbox.volume() == openvdb::math::Pow3(uint64_t(8)) ); //std::cerr << "bbox = " << t.bbox << ", level = " << t.level << std::endl; } tree.denseFill(bbox, 1.0f); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); EXPECT_TRUE(openvdb::tools::anyActiveVoxels(tree, bbox)); EXPECT_TRUE(!openvdb::tools::anyActiveTiles(tree, bbox)); tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() == 0u ); } TEST_F(TestFindActiveValues, testSphere1) { const openvdb::Vec3f center(0.5f, 0.5f, 0.5f); const float radius = 0.3f; const int dim = 100, half_width = 3; const float voxel_size = 1.0f/dim; openvdb::FloatGrid::Ptr grid = openvdb::FloatGrid::create(/*background=*/half_width*voxel_size); const openvdb::FloatTree& tree = grid->tree(); grid->setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/voxel_size)); unittest_util::makeSphere<openvdb::FloatGrid>( openvdb::Coord(dim), center, radius, *grid, unittest_util::SPHERE_SPARSE_NARROW_BAND); const int c = int(0.5f/voxel_size); const openvdb::CoordBBox a(openvdb::Coord(c), openvdb::Coord(c+ 8)); EXPECT_TRUE(!tree.isValueOn(openvdb::Coord(c))); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, a)); const openvdb::Coord d(c + int(radius/voxel_size), c, c); EXPECT_TRUE(tree.isValueOn(d)); const auto b = openvdb::CoordBBox::createCube(d, 4); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, b)); const openvdb::CoordBBox e(openvdb::Coord(0), openvdb::Coord(dim)); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, e)); EXPECT_TRUE(!openvdb::tools::anyActiveTiles(tree, e)); auto tiles = openvdb::tools::activeTiles(tree, e); EXPECT_TRUE( tiles.size() == 0u ); } TEST_F(TestFindActiveValues, testSphere2) { const openvdb::Vec3f center(0.0f); const float radius = 0.5f; const int dim = 400, halfWidth = 3; const float voxelSize = 2.0f/dim; auto grid = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>(radius, center, voxelSize, halfWidth); openvdb::FloatTree& tree = grid->tree(); {//test center const openvdb::CoordBBox bbox(openvdb::Coord(0), openvdb::Coord(8)); EXPECT_TRUE(!tree.isValueOn(openvdb::Coord(0))); //openvdb::util::CpuTimer timer("\ncenter"); EXPECT_TRUE(!openvdb::tools::anyActiveValues(tree, bbox)); //timer.stop(); } {//test on sphere const openvdb::Coord d(int(radius/voxelSize), 0, 0); EXPECT_TRUE(tree.isValueOn(d)); const auto bbox = openvdb::CoordBBox::createCube(d, 4); //openvdb::util::CpuTimer timer("\non sphere"); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); //timer.stop(); } {//test full domain const openvdb::CoordBBox bbox(openvdb::Coord(-4000), openvdb::Coord(4000)); //openvdb::util::CpuTimer timer("\nfull domain"); EXPECT_TRUE(openvdb::tools::anyActiveValues(tree, bbox)); //timer.stop(); openvdb::tools::FindActiveValues<openvdb::FloatTree> op(tree); EXPECT_TRUE(op.count(bbox) == tree.activeVoxelCount()); } {// find largest inscribed cube in index space containing NO active values openvdb::tools::FindActiveValues<openvdb::FloatTree> op(tree); auto bbox = openvdb::CoordBBox::createCube(openvdb::Coord(0), 1); //openvdb::util::CpuTimer timer("\nInscribed cube (class)"); int count = 0; while(op.noActiveValues(bbox)) { ++count; bbox.expand(1); } //const double t = timer.stop(); //std::cerr << "Inscribed bbox = " << bbox << std::endl; const int n = int(openvdb::math::Sqrt(openvdb::math::Pow2(radius-halfWidth*voxelSize)/3.0f)/voxelSize) + 1; //std::cerr << "n=" << n << std::endl; EXPECT_TRUE( bbox.max() == openvdb::Coord( n)); EXPECT_TRUE( bbox.min() == openvdb::Coord(-n)); //openvdb::util::printTime(std::cerr, t/count, "time per lookup ", "\n", true, 4, 3); } {// find largest inscribed cube in index space containing NO active values auto bbox = openvdb::CoordBBox::createCube(openvdb::Coord(0), 1); //openvdb::util::CpuTimer timer("\nInscribed cube (func)"); int count = 0; while(!openvdb::tools::anyActiveValues(tree, bbox)) { bbox.expand(1); ++count; } //const double t = timer.stop(); //std::cerr << "Inscribed bbox = " << bbox << std::endl; const int n = int(openvdb::math::Sqrt(openvdb::math::Pow2(radius-halfWidth*voxelSize)/3.0f)/voxelSize) + 1; //std::cerr << "n=" << n << std::endl; //openvdb::util::printTime(std::cerr, t/count, "time per lookup ", "\n", true, 4, 3); EXPECT_TRUE( bbox.max() == openvdb::Coord( n)); EXPECT_TRUE( bbox.min() == openvdb::Coord(-n)); } } TEST_F(TestFindActiveValues, testSparseBox) { {//test active tiles in a sparsely filled box const int half_dim = 256; const openvdb::CoordBBox bbox(openvdb::Coord(-half_dim), openvdb::Coord(half_dim-1)); openvdb::FloatTree tree; EXPECT_TRUE(tree.activeTileCount() == 0); EXPECT_TRUE(tree.getValueDepth(openvdb::Coord(0)) == -1);//background value openvdb::tools::FindActiveValues<openvdb::FloatTree> op(tree); tree.sparseFill(bbox, 1.0f, true); op.update(tree);//tree was modified so op needs to be updated EXPECT_TRUE(tree.activeTileCount() > 0); EXPECT_TRUE(tree.getValueDepth(openvdb::Coord(0)) == 1);//upper internal tile value for (int i=1; i<half_dim; ++i) { EXPECT_TRUE( op.anyActiveValues(openvdb::CoordBBox::createCube(openvdb::Coord(-half_dim), i))); EXPECT_TRUE(!op.anyActiveVoxels(openvdb::CoordBBox::createCube(openvdb::Coord(-half_dim), i))); } EXPECT_TRUE(op.count(bbox) == bbox.volume()); auto bbox2 = openvdb::CoordBBox::createCube(openvdb::Coord(-half_dim), 1); //double t = 0.0; //openvdb::util::CpuTimer timer; for (bool test = true; test; ) { //timer.restart(); test = op.anyActiveValues(bbox2); //t = std::max(t, timer.restart()); if (test) bbox2.translate(openvdb::Coord(1)); } //std::cerr << "bbox = " << bbox2 << std::endl; //openvdb::util::printTime(std::cout, t, "The slowest sparse test ", "\n", true, 4, 3); EXPECT_TRUE(bbox2 == openvdb::CoordBBox::createCube(openvdb::Coord(half_dim), 1)); EXPECT_TRUE( openvdb::tools::anyActiveTiles(tree, bbox) ); auto tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() == openvdb::math::Pow3(size_t(4)) ); // {-256, -129} -> {-128, 0} -> {0, 127} -> {128, 255} //std::cerr << "bbox " << bbox << " overlaps with " << tiles.size() << " active tiles " << std::endl; openvdb::CoordBBox tmp; for (auto &t : tiles) { EXPECT_TRUE( t.state ); EXPECT_TRUE( t.level == 2);// tiles at level 1 are 8^3, at level 2 they are 128^3, and at level 3 they are 4096^3 EXPECT_TRUE( t.value == 1.0f); EXPECT_TRUE( t.bbox.volume() == openvdb::math::Pow3(openvdb::Index64(128)) ); tmp.expand( t.bbox ); //std::cerr << t.bbox << std::endl; } //std::cerr << tmp << std::endl; EXPECT_TRUE( tmp == bbox );// uniion of all the active tiles should equal the bbox of the sparseFill operation! } }// testSparseBox TEST_F(TestFindActiveValues, testDenseBox) { {//test active voxels in a densely filled box const int half_dim = 256; const openvdb::CoordBBox bbox(openvdb::Coord(-half_dim), openvdb::Coord(half_dim)); openvdb::FloatTree tree; EXPECT_TRUE(tree.activeTileCount() == 0); EXPECT_TRUE(tree.getValueDepth(openvdb::Coord(0)) == -1);//background value tree.denseFill(bbox, 1.0f, true); EXPECT_TRUE(tree.activeTileCount() == 0); openvdb::tools::FindActiveValues<openvdb::FloatTree> op(tree); EXPECT_TRUE(tree.getValueDepth(openvdb::Coord(0)) == 3);// leaf value for (int i=1; i<half_dim; ++i) { EXPECT_TRUE(op.anyActiveValues(openvdb::CoordBBox::createCube(openvdb::Coord(0), i))); EXPECT_TRUE(op.anyActiveVoxels(openvdb::CoordBBox::createCube(openvdb::Coord(0), i))); } EXPECT_TRUE(op.count(bbox) == bbox.volume()); auto bbox2 = openvdb::CoordBBox::createCube(openvdb::Coord(-half_dim), 1); //double t = 0.0; //openvdb::util::CpuTimer timer; for (bool test = true; test; ) { //timer.restart(); test = op.anyActiveValues(bbox2); //t = std::max(t, timer.restart()); if (test) bbox2.translate(openvdb::Coord(1)); } //std::cerr << "bbox = " << bbox2 << std::endl; //openvdb::util::printTime(std::cout, t, "The slowest dense test ", "\n", true, 4, 3); EXPECT_TRUE(bbox2 == openvdb::CoordBBox::createCube(openvdb::Coord(half_dim + 1), 1)); auto tiles = openvdb::tools::activeTiles(tree, bbox); EXPECT_TRUE( tiles.size() == 0u ); } }// testDenseBox TEST_F(TestFindActiveValues, testBenchmarks) { {//benchmark test against active tiles in a sparsely filled box using namespace openvdb; const int half_dim = 512, bbox_size = 6; const CoordBBox bbox(Coord(-half_dim), Coord(half_dim)); FloatTree tree; tree.sparseFill(bbox, 1.0f, true); tools::FindActiveValues<FloatTree> op(tree); //double t = 0.0; //util::CpuTimer timer; for (auto b = CoordBBox::createCube(Coord(-half_dim), bbox_size); true; b.translate(Coord(1))) { //timer.restart(); bool test = op.anyActiveValues(b); //t = std::max(t, timer.restart()); if (!test) break; } //std::cout << "\n*The slowest sparse test " << t << " milliseconds\n"; EXPECT_TRUE(op.count(bbox) == bbox.volume()); } {//benchmark test against active voxels in a densely filled box using namespace openvdb; const int half_dim = 256, bbox_size = 1; const CoordBBox bbox(Coord(-half_dim), Coord(half_dim)); FloatTree tree; tree.denseFill(bbox, 1.0f, true); tools::FindActiveValues<FloatTree> op(tree); //double t = 0.0; //openvdb::util::CpuTimer timer; for (auto b = CoordBBox::createCube(Coord(-half_dim), bbox_size); true; b.translate(Coord(1))) { //timer.restart(); bool test = op.anyActiveValues(b); //t = std::max(t, timer.restart()); if (!test) break; } //std::cout << "*The slowest dense test " << t << " milliseconds\n"; EXPECT_TRUE(op.count(bbox) == bbox.volume()); } {//benchmark test against active voxels in a densely filled box using namespace openvdb; FloatTree tree; tree.denseFill(CoordBBox::createCube(Coord(0), 256), 1.0f, true); tools::FindActiveValues<FloatTree> op(tree); //openvdb::util::CpuTimer timer("new test"); EXPECT_TRUE(op.noActiveValues(CoordBBox::createCube(Coord(256), 1))); //timer.stop(); } }// testBenchmarks
13,869
C++
42.208723
125
0.62016
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMeshToVolume.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <vector> #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/Exceptions.h> #include <openvdb/tools/MeshToVolume.h> #include <openvdb/util/Util.h> class TestMeshToVolume: public ::testing::Test { }; //////////////////////////////////////// TEST_F(TestMeshToVolume, testUtils) { /// Test nearestCoord openvdb::Vec3d xyz(0.7, 2.2, -2.7); openvdb::Coord ijk = openvdb::util::nearestCoord(xyz); EXPECT_TRUE(ijk[0] == 0 && ijk[1] == 2 && ijk[2] == -3); xyz = openvdb::Vec3d(-22.1, 4.6, 202.34); ijk = openvdb::util::nearestCoord(xyz); EXPECT_TRUE(ijk[0] == -23 && ijk[1] == 4 && ijk[2] == 202); /// Test the coordinate offset table for neghbouring voxels openvdb::Coord sum(0, 0, 0); unsigned int pX = 0, pY = 0, pZ = 0, mX = 0, mY = 0, mZ = 0; for (unsigned int i = 0; i < 26; ++i) { ijk = openvdb::util::COORD_OFFSETS[i]; sum += ijk; if (ijk[0] == 1) ++pX; else if (ijk[0] == -1) ++mX; if (ijk[1] == 1) ++pY; else if (ijk[1] == -1) ++mY; if (ijk[2] == 1) ++pZ; else if (ijk[2] == -1) ++mZ; } EXPECT_TRUE(sum == openvdb::Coord(0, 0, 0)); EXPECT_TRUE( pX == 9); EXPECT_TRUE( pY == 9); EXPECT_TRUE( pZ == 9); EXPECT_TRUE( mX == 9); EXPECT_TRUE( mY == 9); EXPECT_TRUE( mZ == 9); } TEST_F(TestMeshToVolume, testConversion) { using namespace openvdb; std::vector<Vec3s> points; std::vector<Vec4I> quads; // cube vertices points.push_back(Vec3s(2, 2, 2)); // 0 6--------7 points.push_back(Vec3s(5, 2, 2)); // 1 /| /| points.push_back(Vec3s(2, 5, 2)); // 2 2--------3 | points.push_back(Vec3s(5, 5, 2)); // 3 | | | | points.push_back(Vec3s(2, 2, 5)); // 4 | 4------|-5 points.push_back(Vec3s(5, 2, 5)); // 5 |/ |/ points.push_back(Vec3s(2, 5, 5)); // 6 0--------1 points.push_back(Vec3s(5, 5, 5)); // 7 // cube faces quads.push_back(Vec4I(0, 1, 3, 2)); // front quads.push_back(Vec4I(5, 4, 6, 7)); // back quads.push_back(Vec4I(0, 2, 6, 4)); // left quads.push_back(Vec4I(1, 5, 7, 3)); // right quads.push_back(Vec4I(2, 3, 7, 6)); // top quads.push_back(Vec4I(0, 4, 5, 1)); // bottom math::Transform::Ptr xform = math::Transform::createLinearTransform(); tools::QuadAndTriangleDataAdapter<Vec3s, Vec4I> mesh(points, quads); FloatGrid::Ptr grid = tools::meshToVolume<FloatGrid>(mesh, *xform); EXPECT_TRUE(grid.get() != NULL); EXPECT_EQ(int(GRID_LEVEL_SET), int(grid->getGridClass())); EXPECT_EQ(1, int(grid->baseTree().leafCount())); grid = tools::meshToLevelSet<FloatGrid>(*xform, points, quads); EXPECT_TRUE(grid.get() != NULL); EXPECT_EQ(int(GRID_LEVEL_SET), int(grid->getGridClass())); EXPECT_EQ(1, int(grid->baseTree().leafCount())); } TEST_F(TestMeshToVolume, testCreateLevelSetBox) { typedef openvdb::FloatGrid FloatGrid; typedef openvdb::Vec3s Vec3s; typedef openvdb::math::BBox<Vec3s> BBoxs; typedef openvdb::math::Transform Transform; BBoxs bbox(Vec3s(0.0, 0.0, 0.0), Vec3s(1.0, 1.0, 1.0)); Transform::Ptr transform = Transform::createLinearTransform(0.1); FloatGrid::Ptr grid = openvdb::tools::createLevelSetBox<FloatGrid>(bbox, *transform); double gridBackground = grid->background(); double expectedBackground = transform->voxelSize().x() * double(openvdb::LEVEL_SET_HALF_WIDTH); EXPECT_NEAR(expectedBackground, gridBackground, 1e-6); EXPECT_TRUE(grid->tree().leafCount() > 0); // test inside coord value openvdb::Coord ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(0.5, 0.5, 0.5)); EXPECT_TRUE(grid->tree().getValue(ijk) < 0.0f); // test outside coord value ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(1.5, 1.5, 1.5)); EXPECT_TRUE(grid->tree().getValue(ijk) > 0.0f); }
4,063
C++
29.328358
99
0.58159
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointSample.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/points/PointAttribute.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/PointSample.h> #include "util.h" #include <string> #include <vector> using namespace openvdb; class TestPointSample: public ::testing::Test { public: void SetUp() override { initialize(); } void TearDown() override { uninitialize(); } }; // class TestPointSample namespace { /// Utility function to quickly create a very simple grid (with specified value type), set a value /// at its origin and then create and sample to an attribute /// template <typename ValueType> typename points::AttributeHandle<ValueType>::Ptr testAttribute(points::PointDataGrid& points, const std::string& attributeName, const math::Transform::Ptr xform, const ValueType& val) { using TreeT = typename tree::Tree4<ValueType, 5, 4, 3>::Type; using GridT = Grid<TreeT>; typename GridT::Ptr grid = GridT::create(); grid->setTransform(xform); grid->tree().setValue(Coord(0,0,0), val); points::boxSample(points, *grid, attributeName); return(points::AttributeHandle<ValueType>::create( points.tree().cbeginLeaf()->attributeArray(attributeName))); } } // anonymous namespace TEST_F(TestPointSample, testPointSample) { using points::PointDataGrid; using points::NullCodec; const float voxelSize = 0.1f; math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); { // check that all supported grid types can be sampled. // This check will use very basic grids with a point at a cell-centered positions // create test point grid with a single point std::vector<Vec3f> pointPositions{Vec3f(0.0f, 0.0f, 0.0f)}; PointDataGrid::Ptr points = points::createPointDataGrid<NullCodec, PointDataGrid, Vec3f>( pointPositions, *transform); EXPECT_TRUE(points); // bool points::AttributeHandle<bool>::Ptr boolHandle = testAttribute<bool>(*points, "test_bool", transform, true); EXPECT_TRUE(boolHandle->get(0)); // int16 #if (defined _MSC_VER) || (defined __INTEL_COMPILER) || (defined __clang__) // GCC warns warns of narrowing conversions from int to int16_t, // and GCC 4.8, at least, ignores the -Wconversion suppression pragma. // So for now, skip this test if compiling with GCC. points::AttributeHandle<int16_t>::Ptr int16Handle = testAttribute<int16_t>(*points, "test_int16", transform, int16_t(10)); EXPECT_EQ(int16Handle->get(0), int16_t(10)); #endif // int32 points::AttributeHandle<Int32>::Ptr int32Handle = testAttribute<Int32>(*points, "test_Int32", transform, Int32(3)); EXPECT_EQ(Int32(3), int32Handle->get(0)); // int64 points::AttributeHandle<Int64>::Ptr int64Handle = testAttribute<Int64>(*points, "test_Int64", transform, Int64(2)); EXPECT_EQ(Int64(2), int64Handle->get(0)); // double points::AttributeHandle<double>::Ptr doubleHandle = testAttribute<double>(*points, "test_double", transform, 4.0); EXPECT_EQ(4.0, doubleHandle->get(0)); // Vec3i points::AttributeHandle<math::Vec3i>::Ptr vec3iHandle = testAttribute<Vec3i>(*points, "test_vec3i", transform, math::Vec3i(9, 8, 7)); EXPECT_EQ(vec3iHandle->get(0), math::Vec3i(9, 8, 7)); // Vec3f points::AttributeHandle<Vec3f>::Ptr vec3fHandle = testAttribute<Vec3f>(*points, "test_vec3f", transform, Vec3f(111.0f, 222.0f, 333.0f)); EXPECT_EQ(vec3fHandle->get(0), Vec3f(111.0f, 222.0f, 333.0f)); // Vec3d points::AttributeHandle<Vec3d>::Ptr vec3dHandle = testAttribute<Vec3d>(*points, "test_vec3d", transform, Vec3d(1.0, 2.0, 3.0)); EXPECT_TRUE(math::isApproxEqual(Vec3d(1.0, 2.0, 3.0), vec3dHandle->get(0))); } { // empty source grid std::vector<Vec3f> pointPositions{Vec3f(0.0f, 0.0f, 0.0f)}; PointDataGrid::Ptr points = points::createPointDataGrid<NullCodec, PointDataGrid, Vec3f>( pointPositions, *transform); points::appendAttribute<Vec3f>(points->tree(), "test"); VectorGrid::Ptr testGrid = VectorGrid::create(); points::boxSample(*points, *testGrid, "test"); points::AttributeHandle<Vec3f>::Ptr handle = points::AttributeHandle<Vec3f>::create( points->tree().cbeginLeaf()->attributeArray("test")); EXPECT_TRUE(math::isApproxEqual(Vec3f(0.0f, 0.0f, 0.0f), handle->get(0))); } { // empty point grid std::vector<Vec3f> pointPositions; PointDataGrid::Ptr points = points::createPointDataGrid<NullCodec, PointDataGrid, Vec3f>( pointPositions, *transform); EXPECT_TRUE(points); FloatGrid::Ptr testGrid = FloatGrid::create(1.0); points::appendAttribute<float>(points->tree(), "test"); EXPECT_NO_THROW(points::boxSample(*points, *testGrid, "test")); } { // exception if one tries to sample to "P" attribute std::vector<Vec3f> pointPositions{Vec3f(0.0f, 0.0f, 0.0f)}; PointDataGrid::Ptr points = points::createPointDataGrid<NullCodec, PointDataGrid, Vec3f>( pointPositions, *transform); EXPECT_TRUE(points); FloatGrid::Ptr testGrid = FloatGrid::create(1.0); EXPECT_THROW(points::boxSample(*points, *testGrid, "P"), RuntimeError); // name of the grid is used if no attribute is provided testGrid->setName("test_grid"); EXPECT_TRUE(!points->tree().cbeginLeaf()->hasAttribute("test_grid")); points::boxSample(*points, *testGrid); EXPECT_TRUE(points->tree().cbeginLeaf()->hasAttribute("test_grid")); // name fails if the grid is called "P" testGrid->setName("P"); EXPECT_THROW(points::boxSample(*points, *testGrid), RuntimeError); } { // test non-cell centered points with scalar data and matching transform // use various sampling orders std::vector<Vec3f> pointPositions{Vec3f(0.03f, 0.0f, 0.0f), Vec3f(0.11f, 0.03f, 0.0f)}; PointDataGrid::Ptr points = points::createPointDataGrid<NullCodec, PointDataGrid, Vec3f>( pointPositions, *transform); EXPECT_TRUE(points); FloatGrid::Ptr testGrid = FloatGrid::create(); testGrid->setTransform(transform); testGrid->tree().setValue(Coord(-1,0,0), -1.0f); testGrid->tree().setValue(Coord(0,0,0), 1.0f); testGrid->tree().setValue(Coord(1,0,0), 2.0f); testGrid->tree().setValue(Coord(2,0,0), 4.0f); testGrid->tree().setValue(Coord(0,1,0), 3.0f); points::appendAttribute<float>(points->tree(), "test"); points::AttributeHandle<float>::Ptr handle = points::AttributeHandle<float>::create( points->tree().cbeginLeaf()->attributeArray("test")); EXPECT_TRUE(handle.get()); FloatGrid::ConstAccessor testGridAccessor = testGrid->getConstAccessor(); // check nearest-neighbour sampling points::pointSample(*points, *testGrid, "test"); float expected = tools::PointSampler::sample(testGridAccessor, Vec3f(0.3f, 0.0f, 0.0f)); EXPECT_NEAR(expected, handle->get(0), 1e-6); expected = tools::PointSampler::sample(testGridAccessor, Vec3f(1.1f, 0.3f, 0.0f)); EXPECT_NEAR(expected, handle->get(1), 1e-6); // check tri-linear sampling points::boxSample(*points, *testGrid, "test"); expected = tools::BoxSampler::sample(testGridAccessor, Vec3f(0.3f, 0.0f, 0.0f)); EXPECT_NEAR(expected, handle->get(0), 1e-6); expected = tools::BoxSampler::sample(testGridAccessor, Vec3f(1.1f, 0.3f, 0.0f)); EXPECT_NEAR(expected, handle->get(1), 1e-6); // check tri-quadratic sampling points::quadraticSample(*points, *testGrid, "test"); expected = tools::QuadraticSampler::sample(testGridAccessor, Vec3f(0.3f, 0.0f, 0.0f)); EXPECT_NEAR(expected, handle->get(0), 1e-6); expected = tools::QuadraticSampler::sample(testGridAccessor, Vec3f(1.1f, 0.3f, 0.0f)); EXPECT_NEAR(expected, handle->get(1), 1e-6); } { // staggered grid and mismatching transforms std::vector<Vec3f> pointPositions{Vec3f(0.03f, 0.0f, 0.0f), Vec3f(0.0f, 0.03f, 0.0f), Vec3f(0.0f, 0.0f, 0.03f),}; PointDataGrid::Ptr points = points::createPointDataGrid<points::NullCodec, PointDataGrid, Vec3f>(pointPositions, *transform); EXPECT_TRUE(points); VectorGrid::Ptr testGrid = VectorGrid::create(); testGrid->setGridClass(GRID_STAGGERED); testGrid->tree().setValue(Coord(0,0,0), Vec3f(1.0f, 2.0f, 3.0f)); testGrid->tree().setValue(Coord(0,1,0), Vec3f(1.5f, 2.5f, 3.5f)); testGrid->tree().setValue(Coord(0,0,1), Vec3f(2.0f, 3.0f, 4.0)); points::appendAttribute<Vec3f>(points->tree(), "test"); points::AttributeHandle<Vec3f>::Ptr handle = points::AttributeHandle<Vec3f>::create( points->tree().cbeginLeaf()->attributeArray("test")); EXPECT_TRUE(handle.get()); Vec3fGrid::ConstAccessor testGridAccessor = testGrid->getConstAccessor(); // nearest-neighbour staggered sampling points::pointSample(*points, *testGrid, "test"); Vec3f expected = tools::StaggeredPointSampler::sample(testGridAccessor, Vec3f(0.03f, 0.0f, 0.0f)); EXPECT_TRUE(math::isApproxEqual(expected, handle->get(0))); expected = tools::StaggeredPointSampler::sample(testGridAccessor, Vec3f(0.0f, 0.03f, 0.0f)); EXPECT_TRUE(math::isApproxEqual(expected, handle->get(1))); // tri-linear staggered sampling points::boxSample(*points, *testGrid, "test"); expected = tools::StaggeredBoxSampler::sample(testGridAccessor, Vec3f(0.03f, 0.0f, 0.0f)); EXPECT_TRUE(math::isApproxEqual(expected, handle->get(0))); expected = tools::StaggeredBoxSampler::sample(testGridAccessor, Vec3f(0.0f, 0.03f, 0.0f)); EXPECT_TRUE(math::isApproxEqual(expected, handle->get(1))); // tri-quadratic staggered sampling points::quadraticSample(*points, *testGrid, "test"); expected = tools::StaggeredQuadraticSampler::sample(testGridAccessor, Vec3f(0.03f, 0.0f, 0.0f)); EXPECT_TRUE(math::isApproxEqual(expected, handle->get(0))); expected = tools::StaggeredQuadraticSampler::sample(testGridAccessor, Vec3f(0.0f, 0.03f, 0.0f)); EXPECT_TRUE(math::isApproxEqual(expected, handle->get(1))); } { // value type of grid and attribute type don't match std::vector<Vec3f> pointPositions{Vec3f(0.3f, 0.0f, 0.0f)}; math::Transform::Ptr transform2(math::Transform::createLinearTransform(1.0f)); PointDataGrid::Ptr points = points::createPointDataGrid<NullCodec, PointDataGrid, Vec3f>(pointPositions, *transform2); EXPECT_TRUE(points); FloatGrid::Ptr testFloatGrid = FloatGrid::create(); testFloatGrid->setTransform(transform2); testFloatGrid->tree().setValue(Coord(0,0,0), 1.1f); testFloatGrid->tree().setValue(Coord(1,0,0), 2.8f); testFloatGrid->tree().setValue(Coord(0,1,0), 3.4f); points::appendAttribute<int>(points->tree(), "testint"); points::boxSample(*points, *testFloatGrid, "testint"); points::AttributeHandle<int>::Ptr handle = points::AttributeHandle<int>::create( points->tree().cbeginLeaf()->attributeArray("testint")); EXPECT_TRUE(handle.get()); FloatGrid::ConstAccessor testFloatGridAccessor = testFloatGrid->getConstAccessor(); // check against box sampler values const float sampledValue = tools::BoxSampler::sample(testFloatGridAccessor, Vec3f(0.3f, 0.0f, 0.0f)); const int expected = static_cast<int>(math::Round(sampledValue)); EXPECT_EQ(expected, handle->get(0)); // check mismatching grid type using vector types Vec3fGrid::Ptr testVec3fGrid = Vec3fGrid::create(); testVec3fGrid->setTransform(transform2); testVec3fGrid->tree().setValue(Coord(0,0,0), Vec3f(1.0f, 2.0f, 3.0f)); testVec3fGrid->tree().setValue(Coord(1,0,0), Vec3f(1.5f, 2.5f, 3.5f)); testVec3fGrid->tree().setValue(Coord(0,1,0), Vec3f(2.0f, 3.0f, 4.0f)); points::appendAttribute<Vec3d>(points->tree(), "testvec3d"); points::boxSample(*points, *testVec3fGrid, "testvec3d"); points::AttributeHandle<Vec3d>::Ptr handle2 = points::AttributeHandle<Vec3d>::create( points->tree().cbeginLeaf()->attributeArray("testvec3d")); Vec3fGrid::ConstAccessor testVec3fGridAccessor = testVec3fGrid->getConstAccessor(); const Vec3d expected2 = static_cast<Vec3d>(tools::BoxSampler::sample(testVec3fGridAccessor, Vec3f(0.3f, 0.0f, 0.0f))); EXPECT_TRUE(math::isExactlyEqual(expected2, handle2->get(0))); // check implicit casting of types for sampling using sampleGrid() points::appendAttribute<Vec3d>(points->tree(), "testvec3d2"); points::sampleGrid(/*linear*/1, *points, *testVec3fGrid, "testvec3d2"); points::AttributeHandle<Vec3d>::Ptr handle3 = points::AttributeHandle<Vec3d>::create( points->tree().cbeginLeaf()->attributeArray("testvec3d2")); EXPECT_TRUE(math::isExactlyEqual(expected2, handle3->get(0))); // check explicit casting of types for sampling using sampleGrid() points::sampleGrid<PointDataGrid, Vec3SGrid, Vec3d>( /*linear*/1, *points, *testVec3fGrid, "testvec3d3"); points::AttributeHandle<Vec3d>::Ptr handle4 = points::AttributeHandle<Vec3d>::create( points->tree().cbeginLeaf()->attributeArray("testvec3d3")); EXPECT_TRUE(math::isExactlyEqual(expected2, handle4->get(0))); // check invalid casting of types points::appendAttribute<float>(points->tree(), "testfloat"); try { points::boxSample(*points, *testVec3fGrid, "testfloat"); FAIL() << "expected exception not thrown:" " cannot sample a vec3s grid on to a float attribute"; } catch (std::exception&) { } catch (...) { FAIL() << "expected std::exception or derived"; } // check invalid existing attribute type (Vec4s attribute) points::TypedAttributeArray<Vec4s>::registerType(); points::appendAttribute<Vec4s>(points->tree(), "testv4f"); EXPECT_THROW(points::boxSample(*points, *testVec3fGrid, "testv4f"), TypeError); } { // sample a non-standard grid type (a Vec4<float> grid) using Vec4STree = tree::Tree4<Vec4s, 5, 4, 3>::Type; using Vec4SGrid = Grid<Vec4STree>; Vec4SGrid::registerGrid(); points::TypedAttributeArray<Vec4s>::registerType(); std::vector<Vec3f> pointPositions{Vec3f(0.3f, 0.0f, 0.0f)}; math::Transform::Ptr transform2(math::Transform::createLinearTransform(1.0f)); PointDataGrid::Ptr points = points::createPointDataGrid<NullCodec, PointDataGrid, Vec3f>(pointPositions, *transform2); auto testVec4fGrid = Vec4SGrid::create(); testVec4fGrid->setTransform(transform2); testVec4fGrid->tree().setValue(Coord(0,0,0), Vec4s(1.0f, 2.0f, 3.0f, 4.0f)); testVec4fGrid->tree().setValue(Coord(1,0,0), Vec4s(1.5f, 2.5f, 3.5f, 4.5f)); testVec4fGrid->tree().setValue(Coord(0,1,0), Vec4s(2.0f, 3.0f, 4.0f, 5.0f)); points::boxSample(*points, *testVec4fGrid, "testvec4f"); points::AttributeHandle<Vec4s>::Ptr handle2 = points::AttributeHandle<Vec4s>::create( points->tree().cbeginLeaf()->attributeArray("testvec4f")); Vec4SGrid::ConstAccessor testVec4fGridAccessor = testVec4fGrid->getConstAccessor(); const Vec4s expected2 = static_cast<Vec4s>(tools::BoxSampler::sample(testVec4fGridAccessor, Vec3f(0.3f, 0.0f, 0.0f))); EXPECT_TRUE(math::isExactlyEqual(expected2, handle2->get(0))); } } TEST_F(TestPointSample, testPointSampleWithGroups) { using points::PointDataGrid; std::vector<Vec3f> pointPositions{Vec3f(0.03f, 0.0f, 0.0f), Vec3f(0.0f, 0.03f, 0.0f), Vec3f(0.0f, 0.0f, 0.0f)}; math::Transform::Ptr transform(math::Transform::createLinearTransform(0.1f)); PointDataGrid::Ptr points = points::createPointDataGrid<points::NullCodec, PointDataGrid, Vec3f>(pointPositions, *transform); EXPECT_TRUE(points); DoubleGrid::Ptr testGrid = DoubleGrid::create(); testGrid->setTransform(transform); testGrid->tree().setValue(Coord(0,0,0), 1.0); testGrid->tree().setValue(Coord(1,0,0), 2.0); testGrid->tree().setValue(Coord(0,1,0), 3.0); points::appendGroup(points->tree(), "group1"); auto leaf = points->tree().beginLeaf(); points::GroupWriteHandle group1Handle = leaf->groupWriteHandle("group1"); group1Handle.set(0, true); group1Handle.set(1, false); group1Handle.set(2, true); points::appendAttribute<double>(points->tree(), "test_include"); std::vector<std::string> includeGroups({"group1"}); std::vector<std::string> excludeGroups; points::MultiGroupFilter filter1(includeGroups, excludeGroups, leaf->attributeSet()); points::boxSample(*points, *testGrid, "test_include", filter1); points::AttributeHandle<double>::Ptr handle = points::AttributeHandle<double>::create( points->tree().cbeginLeaf()->attributeArray("test_include")); DoubleGrid::ConstAccessor testGridAccessor = testGrid->getConstAccessor(); double expected = tools::BoxSampler::sample(testGridAccessor, Vec3f(0.3f, 0.0f, 0.0f)); EXPECT_NEAR(expected, handle->get(0), 1e-6); EXPECT_NEAR(0.0, handle->get(1), 1e-6); expected = tools::BoxSampler::sample(testGridAccessor, Vec3f(0.0f, 0.0f, 0.0f)); EXPECT_NEAR(expected, handle->get(2), 1e-6); points::appendAttribute<double>(points->tree(), "test_exclude"); // test with group treated as "exclusion" group points::MultiGroupFilter filter2(excludeGroups, includeGroups, leaf->attributeSet()); points::boxSample(*points, *testGrid, "test_exclude", filter2); points::AttributeHandle<double>::Ptr handle2 = points::AttributeHandle<double>::create( points->tree().cbeginLeaf()->attributeArray("test_exclude")); EXPECT_NEAR(0.0, handle2->get(0), 1e-6); EXPECT_NEAR(0.0, handle2->get(2), 1e-6); expected = tools::BoxSampler::sample(testGridAccessor, Vec3f(0.0f, 0.3f, 0.0f)); EXPECT_NEAR(expected, handle2->get(1), 1e-6); }
19,073
C++
34.853383
100
0.641745
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestStringMetadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Metadata.h> class TestStringMetadata : public ::testing::Test { }; TEST_F(TestStringMetadata, test) { using namespace openvdb; Metadata::Ptr m(new StringMetadata("testing")); Metadata::Ptr m2 = m->copy(); EXPECT_TRUE(dynamic_cast<StringMetadata*>(m.get()) != 0); EXPECT_TRUE(dynamic_cast<StringMetadata*>(m2.get()) != 0); EXPECT_TRUE(m->typeName().compare("string") == 0); EXPECT_TRUE(m2->typeName().compare("string") == 0); StringMetadata *s = dynamic_cast<StringMetadata*>(m.get()); EXPECT_TRUE(s->value().compare("testing") == 0); s->value() = "testing2"; EXPECT_TRUE(s->value().compare("testing2") == 0); m2->copy(*s); s = dynamic_cast<StringMetadata*>(m2.get()); EXPECT_TRUE(s->value().compare("testing2") == 0); }
946
C++
25.305555
63
0.647992
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestInternalOrigin.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <set> class TestInternalOrigin: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestInternalOrigin, test) { std::set<openvdb::Coord> indices; indices.insert(openvdb::Coord( 0, 0, 0)); indices.insert(openvdb::Coord( 1, 0, 0)); indices.insert(openvdb::Coord( 0,100, 8)); indices.insert(openvdb::Coord(-9, 0, 8)); indices.insert(openvdb::Coord(32, 0, 16)); indices.insert(openvdb::Coord(33, -5, 16)); indices.insert(openvdb::Coord(42,707,-35)); indices.insert(openvdb::Coord(43, 17, 64)); typedef openvdb::tree::Tree4<float,5,4,3>::Type FloatTree4; FloatTree4 tree(0.0f); std::set<openvdb::Coord>::iterator iter=indices.begin(); for (int n = 0; iter != indices.end(); ++n, ++iter) { tree.setValue(*iter, float(1.0 + double(n) * 0.5)); } openvdb::Coord C3, G; typedef FloatTree4::RootNodeType Node0; typedef Node0::ChildNodeType Node1; typedef Node1::ChildNodeType Node2; typedef Node2::LeafNodeType Node3; for (Node0::ChildOnCIter iter0=tree.root().cbeginChildOn(); iter0; ++iter0) {//internal 1 openvdb::Coord C0=iter0->origin(); iter0.getCoord(G); EXPECT_EQ(C0,G); for (Node1::ChildOnCIter iter1=iter0->cbeginChildOn(); iter1; ++iter1) {//internal 2 openvdb::Coord C1=iter1->origin(); iter1.getCoord(G); EXPECT_EQ(C1,G); EXPECT_TRUE(C0 <= C1); EXPECT_TRUE(C1 <= C0 + openvdb::Coord(Node1::DIM,Node1::DIM,Node1::DIM)); for (Node2::ChildOnCIter iter2=iter1->cbeginChildOn(); iter2; ++iter2) {//leafs openvdb::Coord C2=iter2->origin(); iter2.getCoord(G); EXPECT_EQ(C2,G); EXPECT_TRUE(C1 <= C2); EXPECT_TRUE(C2 <= C1 + openvdb::Coord(Node2::DIM,Node2::DIM,Node2::DIM)); for (Node3::ValueOnCIter iter3=iter2->cbeginValueOn(); iter3; ++iter3) {//leaf voxels iter3.getCoord(G); iter = indices.find(G); EXPECT_TRUE(iter != indices.end()); indices.erase(iter); } } } } EXPECT_TRUE(indices.size() == 0); }
2,520
C++
36.073529
101
0.58254
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestGradient.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/tools/GridOperators.h> #include "util.h" // for unittest_util::makeSphere() #include "gtest/gtest.h" #include <sstream> class TestGradient: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestGradient, testISGradient) { using namespace openvdb; using AccessorType = FloatGrid::ConstAccessor; FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f ,30.0f, 40.0f); const float radius=10.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); const Coord xyz(10, 20, 30); // Index Space Gradients: random access and stencil version AccessorType inAccessor = grid->getConstAccessor(); Vec3f result; result = math::ISGradient<math::CD_2ND>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::CD_4TH>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::CD_6TH>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::FD_1ST>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.02); result = math::ISGradient<math::FD_2ND>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::FD_3RD>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::BD_1ST>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.02); result = math::ISGradient<math::BD_2ND>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::BD_3RD>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::FD_WENO5>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::BD_WENO5>::result(inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } TEST_F(TestGradient, testISGradientStencil) { using namespace openvdb; FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f ,30.0f, 40.0f); const float radius = 10.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); const Coord xyz(10, 20, 30); // Index Space Gradients: stencil version Vec3f result; // this stencil is large enough for all thie different schemes used // in this test math::NineteenPointStencil<FloatGrid> stencil(*grid); stencil.moveTo(xyz); result = math::ISGradient<math::CD_2ND>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::CD_4TH>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::CD_6TH>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::FD_1ST>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.02); result = math::ISGradient<math::FD_2ND>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::FD_3RD>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::BD_1ST>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.02); result = math::ISGradient<math::BD_2ND>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::BD_3RD>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::FD_WENO5>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::ISGradient<math::BD_WENO5>::result(stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } TEST_F(TestGradient, testWSGradient) { using namespace openvdb; using AccessorType = FloatGrid::ConstAccessor; double voxel_size = 0.5; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); grid->setTransform(math::Transform::createLinearTransform(voxel_size)); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius = 10.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); const Coord xyz(11, 17, 26); AccessorType inAccessor = grid->getConstAccessor(); // try with a map // Index Space Gradients: stencil version Vec3f result; math::MapBase::Ptr rotated_map; { math::UniformScaleMap map(voxel_size); result = math::Gradient<math::UniformScaleMap, math::CD_2ND>::result( map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); rotated_map = map.preRotate(1.5, math::X_AXIS); // verify the new map is an affine map EXPECT_TRUE(rotated_map->type() == math::AffineMap::mapType()); math::AffineMap::Ptr affine_map = StaticPtrCast<math::AffineMap, math::MapBase>(rotated_map); // the gradient should have the same length even after rotation result = math::Gradient<math::AffineMap, math::CD_2ND>::result( *affine_map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::Gradient<math::AffineMap, math::CD_4TH>::result( *affine_map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { math::UniformScaleTranslateMap map(voxel_size, Vec3d(0,0,0)); result = math::Gradient<math::UniformScaleTranslateMap, math::CD_2ND>::result( map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { math::ScaleTranslateMap map(Vec3d(voxel_size, voxel_size, voxel_size), Vec3d(0,0,0)); result = math::Gradient<math::ScaleTranslateMap, math::CD_2ND>::result( map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { // this map has no scale, expect result/voxel_spaceing = 1 math::TranslationMap map; result = math::Gradient<math::TranslationMap, math::CD_2ND>::result(map, inAccessor, xyz); EXPECT_NEAR(voxel_size, result.length(), /*tolerance=*/0.01); } { // test the GenericMap Grid interface math::GenericMap generic_map(*grid); result = math::Gradient<math::GenericMap, math::CD_2ND>::result( generic_map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { // test the GenericMap Transform interface math::GenericMap generic_map(grid->transform()); result = math::Gradient<math::GenericMap, math::CD_2ND>::result( generic_map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { // test the GenericMap Map interface math::GenericMap generic_map(rotated_map); result = math::Gradient<math::GenericMap, math::CD_2ND>::result( generic_map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { // test a map with non-uniform SCALING AND ROTATION Vec3d voxel_sizes(0.25, 0.45, 0.75); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); // apply rotation rotated_map = base_map->preRotate(1.5, math::X_AXIS); grid->setTransform(math::Transform::Ptr(new math::Transform(rotated_map))); // remake the sphere unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); math::AffineMap::Ptr affine_map = StaticPtrCast<math::AffineMap, math::MapBase>(rotated_map); // math::ScaleMap map(voxel_sizes); result = math::Gradient<math::AffineMap, math::CD_2ND>::result( *affine_map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { // test a map with non-uniform SCALING Vec3d voxel_sizes(0.25, 0.45, 0.75); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); grid->setTransform(math::Transform::Ptr(new math::Transform(base_map))); // remake the sphere unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); math::ScaleMap::Ptr scale_map = StaticPtrCast<math::ScaleMap, math::MapBase>(base_map); // math::ScaleMap map(voxel_sizes); result = math::Gradient<math::ScaleMap, math::CD_2ND>::result(*scale_map, inAccessor, xyz); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } } TEST_F(TestGradient, testWSGradientStencilFrustum) { using namespace openvdb; // Construct a frustum that matches the one in TestMaps::testFrustum() openvdb::BBoxd bbox(Vec3d(0), Vec3d(100)); math::NonlinearFrustumMap frustum(bbox, 1./6., 5); /// frustum will have depth, far plane - near plane = 5 /// the frustum has width 1 in the front and 6 in the back Vec3d trans(2,2,2); math::NonlinearFrustumMap::Ptr map = StaticPtrCast<math::NonlinearFrustumMap, math::MapBase>( frustum.preScale(Vec3d(10,10,10))->postTranslate(trans)); // Create a grid with this frustum FloatGrid::Ptr grid = FloatGrid::create(/*background=*/0.f); math::Transform::Ptr transform = math::Transform::Ptr( new math::Transform(map)); grid->setTransform(transform); FloatGrid::Accessor acc = grid->getAccessor(); // Totally fill the interior of the frustum with word space distances // from its center. math::Vec3d isCenter(.5 * 101, .5 * 101, .5 * 101); math::Vec3d wsCenter = map->applyMap(isCenter); math::Coord ijk; // convert to IntType Vec3i min(bbox.min()); Vec3i max = Vec3i(bbox.max()) + Vec3i(1, 1, 1); for (ijk[0] = min.x(); ijk[0] < max.x(); ++ijk[0]) { for (ijk[1] = min.y(); ijk[1] < max.y(); ++ijk[1]) { for (ijk[2] = min.z(); ijk[2] < max.z(); ++ijk[2]) { const math::Vec3d wsLocation = transform->indexToWorld(ijk); const float dis = float((wsLocation - wsCenter).length()); acc.setValue(ijk, dis); } } } { // test at location 10, 10, 10 in index space math::Coord xyz(10, 10, 10); math::Vec3s result = math::Gradient<math::NonlinearFrustumMap, math::CD_2ND>::result(*map, acc, xyz); // The Gradient should be unit lenght for this case EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); math::Vec3d wsVec = transform->indexToWorld(xyz); math::Vec3d direction = (wsVec - wsCenter); direction.normalize(); // test the actual direction of the gradient EXPECT_TRUE(direction.eq(result, 0.01 /*tolerance*/)); } { // test at location 30, 30, 60 in index space math::Coord xyz(30, 30, 60); math::Vec3s result = math::Gradient<math::NonlinearFrustumMap, math::CD_2ND>::result(*map, acc, xyz); // The Gradient should be unit lenght for this case EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); math::Vec3d wsVec = transform->indexToWorld(xyz); math::Vec3d direction = (wsVec - wsCenter); direction.normalize(); // test the actual direction of the gradient EXPECT_TRUE(direction.eq(result, 0.01 /*tolerance*/)); } } TEST_F(TestGradient, testWSGradientStencil) { using namespace openvdb; double voxel_size = 0.5; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); grid->setTransform(math::Transform::createLinearTransform(voxel_size)); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f ,10.0f);//i.e. (12,16,20) in index space const float radius = 10; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); const Coord xyz(11, 17, 26); // try with a map math::SevenPointStencil<FloatGrid> stencil(*grid); stencil.moveTo(xyz); math::SecondOrderDenseStencil<FloatGrid> dense_2ndOrder(*grid); dense_2ndOrder.moveTo(xyz); math::FourthOrderDenseStencil<FloatGrid> dense_4thOrder(*grid); dense_4thOrder.moveTo(xyz); Vec3f result; math::MapBase::Ptr rotated_map; { math::UniformScaleMap map(voxel_size); result = math::Gradient<math::UniformScaleMap, math::CD_2ND>::result( map, stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); rotated_map = map.preRotate(1.5, math::X_AXIS); // verify the new map is an affine map EXPECT_TRUE(rotated_map->type() == math::AffineMap::mapType()); math::AffineMap::Ptr affine_map = StaticPtrCast<math::AffineMap, math::MapBase>(rotated_map); // the gradient should have the same length even after rotation result = math::Gradient<math::AffineMap, math::CD_2ND>::result( *affine_map, dense_2ndOrder); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); result = math::Gradient<math::AffineMap, math::CD_4TH>::result( *affine_map, dense_4thOrder); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { math::UniformScaleTranslateMap map(voxel_size, Vec3d(0,0,0)); result = math::Gradient<math::UniformScaleTranslateMap, math::CD_2ND>::result(map, stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { math::ScaleTranslateMap map(Vec3d(voxel_size, voxel_size, voxel_size), Vec3d(0,0,0)); result = math::Gradient<math::ScaleTranslateMap, math::CD_2ND>::result(map, stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { math::TranslationMap map; result = math::Gradient<math::TranslationMap, math::CD_2ND>::result(map, stencil); // value = 1 because the translation map assumes uniform spacing EXPECT_NEAR(0.5, result.length(), /*tolerance=*/0.01); } { // test the GenericMap Grid interface math::GenericMap generic_map(*grid); result = math::Gradient<math::GenericMap, math::CD_2ND>::result( generic_map, dense_2ndOrder); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { // test the GenericMap Transform interface math::GenericMap generic_map(grid->transform()); result = math::Gradient<math::GenericMap, math::CD_2ND>::result( generic_map, dense_2ndOrder); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { // test the GenericMap Map interface math::GenericMap generic_map(rotated_map); result = math::Gradient<math::GenericMap, math::CD_2ND>::result( generic_map, dense_2ndOrder); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { // test a map with non-uniform SCALING AND ROTATION Vec3d voxel_sizes(0.25, 0.45, 0.75); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); // apply rotation rotated_map = base_map->preRotate(1.5, math::X_AXIS); grid->setTransform(math::Transform::Ptr(new math::Transform(rotated_map))); // remake the sphere unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); math::AffineMap::Ptr affine_map = StaticPtrCast<math::AffineMap, math::MapBase>(rotated_map); stencil.moveTo(xyz); result = math::Gradient<math::AffineMap, math::CD_2ND>::result(*affine_map, stencil); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } { // test a map with NON-UNIFORM SCALING Vec3d voxel_sizes(0.5, 1.0, 0.75); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); grid->setTransform(math::Transform::Ptr(new math::Transform(base_map))); // remake the sphere unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); math::ScaleMap map(voxel_sizes); dense_2ndOrder.moveTo(xyz); result = math::Gradient<math::ScaleMap, math::CD_2ND>::result(map, dense_2ndOrder); EXPECT_NEAR(1.0, result.length(), /*tolerance=*/0.01); } } TEST_F(TestGradient, testWSGradientNormSqr) { using namespace openvdb; using AccessorType = FloatGrid::ConstAccessor; double voxel_size = 0.5; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); grid->setTransform(math::Transform::createLinearTransform(voxel_size)); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f,8.0f,10.0f);//i.e. (12,16,20) in index space const float radius = 10.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); const Coord xyz(11, 17, 26); AccessorType inAccessor = grid->getConstAccessor(); // test gradient in index and world space using the 7-pt stencil math::UniformScaleMap uniform_scale(voxel_size); FloatTree::ValueType normsqrd; normsqrd = math::GradientNormSqrd<math::UniformScaleMap, math::FIRST_BIAS>::result( uniform_scale, inAccessor, xyz); EXPECT_NEAR(1.0, normsqrd, /*tolerance=*/0.07); // test world space using the 13pt stencil normsqrd = math::GradientNormSqrd<math::UniformScaleMap, math::SECOND_BIAS>::result( uniform_scale, inAccessor, xyz); EXPECT_NEAR(1.0, normsqrd, /*tolerance=*/0.05); math::AffineMap affine(voxel_size*math::Mat3d::identity()); normsqrd = math::GradientNormSqrd<math::AffineMap, math::FIRST_BIAS>::result( affine, inAccessor, xyz); EXPECT_NEAR(1.0, normsqrd, /*tolerance=*/0.07); normsqrd = math::GradientNormSqrd<math::UniformScaleMap, math::THIRD_BIAS>::result( uniform_scale, inAccessor, xyz); EXPECT_NEAR(1.0, normsqrd, /*tolerance=*/0.05); } TEST_F(TestGradient, testWSGradientNormSqrStencil) { using namespace openvdb; double voxel_size = 0.5; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); grid->setTransform(math::Transform::createLinearTransform(voxel_size)); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius = 10.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); const Coord xyz(11, 17, 26); math::SevenPointStencil<FloatGrid> sevenpt(*grid); sevenpt.moveTo(xyz); math::ThirteenPointStencil<FloatGrid> thirteenpt(*grid); thirteenpt.moveTo(xyz); math::SecondOrderDenseStencil<FloatGrid> dense_2ndOrder(*grid); dense_2ndOrder.moveTo(xyz); math::NineteenPointStencil<FloatGrid> nineteenpt(*grid); nineteenpt.moveTo(xyz); // test gradient in index and world space using the 7-pt stencil math::UniformScaleMap uniform_scale(voxel_size); FloatTree::ValueType normsqrd; normsqrd = math::GradientNormSqrd<math::UniformScaleMap, math::FIRST_BIAS>::result( uniform_scale, sevenpt); EXPECT_NEAR(1.0, normsqrd, /*tolerance=*/0.07); // test gradient in index and world space using the 13pt stencil normsqrd = math::GradientNormSqrd<math::UniformScaleMap, math::SECOND_BIAS>::result( uniform_scale, thirteenpt); EXPECT_NEAR(1.0, normsqrd, /*tolerance=*/0.05); math::AffineMap affine(voxel_size*math::Mat3d::identity()); normsqrd = math::GradientNormSqrd<math::AffineMap, math::FIRST_BIAS>::result( affine, dense_2ndOrder); EXPECT_NEAR(1.0, normsqrd, /*tolerance=*/0.07); normsqrd = math::GradientNormSqrd<math::UniformScaleMap, math::THIRD_BIAS>::result( uniform_scale, nineteenpt); EXPECT_NEAR(1.0, normsqrd, /*tolerance=*/0.05); } TEST_F(TestGradient, testGradientTool) { using namespace openvdb; FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); const openvdb::Coord dim(64, 64, 64); const openvdb::Vec3f center(35.0f, 30.0f, 40.0f); const float radius = 10.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); const Coord xyz(10, 20, 30); Vec3SGrid::Ptr grad = tools::gradient(*grid); EXPECT_EQ(int(tree.activeVoxelCount()), int(grad->activeVoxelCount())); EXPECT_NEAR(1.0, grad->getConstAccessor().getValue(xyz).length(), /*tolerance=*/0.01); } TEST_F(TestGradient, testGradientMaskedTool) { using namespace openvdb; FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); const openvdb::Coord dim(64, 64, 64); const openvdb::Vec3f center(35.0f, 30.0f, 40.0f); const float radius = 10.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); const openvdb::CoordBBox maskbbox(openvdb::Coord(35, 30, 30), openvdb::Coord(41, 41, 41)); BoolGrid::Ptr maskGrid = BoolGrid::create(false); maskGrid->fill(maskbbox, true/*value*/, true/*activate*/); Vec3SGrid::Ptr grad = tools::gradient(*grid, *maskGrid); {// outside the masked region const Coord xyz(10, 20, 30); EXPECT_TRUE(!maskbbox.isInside(xyz)); EXPECT_NEAR(0.0, grad->getConstAccessor().getValue(xyz).length(), /*tolerance=*/0.01); } {// inside the masked region const Coord xyz(38, 35, 33); EXPECT_TRUE(maskbbox.isInside(xyz)); EXPECT_NEAR(1.0, grad->getConstAccessor().getValue(xyz).length(), /*tolerance=*/0.01); } } TEST_F(TestGradient, testIntersectsIsoValue) { using namespace openvdb; {// test zero crossing in -x FloatGrid grid(/*backgroundValue=*/5.0); FloatTree& tree = grid.tree(); Coord xyz(2,-5,60); tree.setValue(xyz, 1.3f); tree.setValue(xyz.offsetBy(-1,0,0), -2.0f); math::SevenPointStencil<FloatGrid> stencil(grid); stencil.moveTo(xyz); EXPECT_TRUE( stencil.intersects( )); EXPECT_TRUE( stencil.intersects( 0.0f)); EXPECT_TRUE( stencil.intersects( 2.0f)); EXPECT_TRUE(!stencil.intersects( 5.5f)); EXPECT_TRUE(!stencil.intersects(-2.5f)); } {// test zero crossing in +x FloatGrid grid(/*backgroundValue=*/5.0); FloatTree& tree = grid.tree(); Coord xyz(2,-5,60); tree.setValue(xyz, 1.3f); tree.setValue(xyz.offsetBy(1,0,0), -2.0f); math::SevenPointStencil<FloatGrid> stencil(grid); stencil.moveTo(xyz); EXPECT_TRUE(stencil.intersects()); } {// test zero crossing in -y FloatGrid grid(/*backgroundValue=*/5.0); FloatTree& tree = grid.tree(); Coord xyz(2,-5,60); tree.setValue(xyz, 1.3f); tree.setValue(xyz.offsetBy(0,-1,0), -2.0f); math::SevenPointStencil<FloatGrid> stencil(grid); stencil.moveTo(xyz); EXPECT_TRUE(stencil.intersects()); } {// test zero crossing in y FloatGrid grid(/*backgroundValue=*/5.0); FloatTree& tree = grid.tree(); Coord xyz(2,-5,60); tree.setValue(xyz, 1.3f); tree.setValue(xyz.offsetBy(0,1,0), -2.0f); math::SevenPointStencil<FloatGrid> stencil(grid); stencil.moveTo(xyz); EXPECT_TRUE(stencil.intersects()); } {// test zero crossing in -z FloatGrid grid(/*backgroundValue=*/5.0); FloatTree& tree = grid.tree(); Coord xyz(2,-5,60); tree.setValue(xyz, 1.3f); tree.setValue(xyz.offsetBy(0,0,-1), -2.0f); math::SevenPointStencil<FloatGrid> stencil(grid); stencil.moveTo(xyz); EXPECT_TRUE(stencil.intersects()); } {// test zero crossing in z FloatGrid grid(/*backgroundValue=*/5.0); FloatTree& tree = grid.tree(); Coord xyz(2,-5,60); tree.setValue(xyz, 1.3f); tree.setValue(xyz.offsetBy(0,0,1), -2.0f); math::SevenPointStencil<FloatGrid> stencil(grid); stencil.moveTo(xyz); EXPECT_TRUE(stencil.intersects()); } {// test zero crossing in -x & z FloatGrid grid(/*backgroundValue=*/5.0); FloatTree& tree = grid.tree(); Coord xyz(2,-5,60); tree.setValue(xyz, 1.3f); tree.setValue(xyz.offsetBy(-1,0,1), -2.0f); math::SevenPointStencil<FloatGrid> stencil(grid); stencil.moveTo(xyz); EXPECT_TRUE(!stencil.intersects()); } {// test zero multiple crossings FloatGrid grid(/*backgroundValue=*/5.0); FloatTree& tree = grid.tree(); Coord xyz(2,-5,60); tree.setValue(xyz, 1.3f); tree.setValue(xyz.offsetBy(-1, 0, 1), -1.0f); tree.setValue(xyz.offsetBy( 0, 0, 1), -2.0f); tree.setValue(xyz.offsetBy( 0, 1, 0), -3.0f); tree.setValue(xyz.offsetBy( 0, 0,-1), -2.0f); math::SevenPointStencil<FloatGrid> stencil(grid); stencil.moveTo(xyz); EXPECT_TRUE(stencil.intersects()); } } TEST_F(TestGradient, testOldStyleStencils) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::createLinearTransform(/*voxel size=*/0.5)); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f,8.0f,10.0f);//i.e. (12,16,20) in index space const float radius=10.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); const Coord xyz(11, 17, 26); math::GradStencil<FloatGrid> gs(*grid); gs.moveTo(xyz); EXPECT_NEAR(1.0, gs.gradient().length(), /*tolerance=*/0.01); EXPECT_NEAR(1.0, gs.normSqGrad(), /*tolerance=*/0.10); math::WenoStencil<FloatGrid> ws(*grid); ws.moveTo(xyz); EXPECT_NEAR(1.0, ws.gradient().length(), /*tolerance=*/0.01); EXPECT_NEAR(1.0, ws.normSqGrad(), /*tolerance=*/0.01); math::CurvatureStencil<FloatGrid> cs(*grid); cs.moveTo(xyz); EXPECT_NEAR(1.0, cs.gradient().length(), /*tolerance=*/0.01); }
28,124
C++
36.650602
100
0.63092
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestInt32Metadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Metadata.h> class TestInt32Metadata : public ::testing::Test { }; TEST_F(TestInt32Metadata, test) { using namespace openvdb; Metadata::Ptr m(new Int32Metadata(123)); Metadata::Ptr m2 = m->copy(); EXPECT_TRUE(dynamic_cast<Int32Metadata*>(m.get()) != 0); EXPECT_TRUE(dynamic_cast<Int32Metadata*>(m2.get()) != 0); EXPECT_TRUE(m->typeName().compare("int32") == 0); EXPECT_TRUE(m2->typeName().compare("int32") == 0); Int32Metadata *s = dynamic_cast<Int32Metadata*>(m.get()); EXPECT_TRUE(s->value() == 123); s->value() = 456; EXPECT_TRUE(s->value() == 456); m2->copy(*s); s = dynamic_cast<Int32Metadata*>(m2.get()); EXPECT_TRUE(s->value() == 456); }
870
C++
23.194444
61
0.63908
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestStreamCompression.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/StreamCompression.h> #include <openvdb/io/Compression.h> // io::COMPRESS_BLOSC #ifdef __clang__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-macros" #endif // Boost.Interprocess uses a header-only portion of Boost.DateTime #define BOOST_DATE_TIME_NO_LIB #ifdef __clang__ #pragma GCC diagnostic pop #endif #include <boost/interprocess/file_mapping.hpp> #include <boost/interprocess/mapped_region.hpp> #include <boost/iostreams/device/array.hpp> #include <boost/iostreams/stream.hpp> #include <boost/system/error_code.hpp> #include <boost/uuid/uuid_generators.hpp> #include <boost/uuid/uuid_io.hpp> #include <boost/version.hpp> // for BOOST_VERSION #include <tbb/atomic.h> #ifdef _MSC_VER #include <boost/interprocess/detail/os_file_functions.hpp> // open_existing_file(), close_file() // boost::interprocess::detail was renamed to boost::interprocess::ipcdetail in Boost 1.48. // Ensure that both namespaces exist. namespace boost { namespace interprocess { namespace detail {} namespace ipcdetail {} } } #include <windows.h> #else #include <sys/types.h> // for struct stat #include <sys/stat.h> // for stat() #include <unistd.h> // for unlink() #endif #include <fstream> #include <numeric> // for std::iota() #ifdef OPENVDB_USE_BLOSC #include <blosc.h> // A Blosc optimization introduced in 1.11.0 uses a slightly smaller block size for // HCR codecs (LZ4, ZLIB, ZSTD), which otherwise fails a few regression test cases #if BLOSC_VERSION_MAJOR > 1 || (BLOSC_VERSION_MAJOR == 1 && BLOSC_VERSION_MINOR > 10) #define BLOSC_HCR_BLOCKSIZE_OPTIMIZATION #endif // Blosc 1.14+ writes backwards-compatible data by default. // http://blosc.org/posts/new-forward-compat-policy/ #if BLOSC_VERSION_MAJOR > 1 || (BLOSC_VERSION_MAJOR == 1 && BLOSC_VERSION_MINOR >= 14) #define BLOSC_BACKWARDS_COMPATIBLE #endif #endif /// @brief io::MappedFile has a private constructor, so this unit tests uses a matching proxy class ProxyMappedFile { public: explicit ProxyMappedFile(const std::string& filename) : mImpl(new Impl(filename)) { } private: class Impl { public: Impl(const std::string& filename) : mMap(filename.c_str(), boost::interprocess::read_only) , mRegion(mMap, boost::interprocess::read_only) { mLastWriteTime = 0; const char* regionFilename = mMap.get_name(); #ifdef _MSC_VER using namespace boost::interprocess::detail; using namespace boost::interprocess::ipcdetail; using openvdb::Index64; if (void* fh = open_existing_file(regionFilename, boost::interprocess::read_only)) { FILETIME mtime; if (GetFileTime(fh, nullptr, nullptr, &mtime)) { mLastWriteTime = (Index64(mtime.dwHighDateTime) << 32) | mtime.dwLowDateTime; } close_file(fh); } #else struct stat info; if (0 == ::stat(regionFilename, &info)) { mLastWriteTime = openvdb::Index64(info.st_mtime); } #endif } using Notifier = std::function<void(std::string /*filename*/)>; boost::interprocess::file_mapping mMap; boost::interprocess::mapped_region mRegion; bool mAutoDelete = false; Notifier mNotifier; mutable tbb::atomic<openvdb::Index64> mLastWriteTime; }; // class Impl std::unique_ptr<Impl> mImpl; }; // class ProxyMappedFile using namespace openvdb; using namespace openvdb::compression; class TestStreamCompression: public ::testing::Test { public: void testPagedStreams(); }; // class TestStreamCompression //////////////////////////////////////// TEST_F(TestStreamCompression, testBlosc) { // ensure that the library and unit tests are both built with or without Blosc enabled #ifdef OPENVDB_USE_BLOSC EXPECT_TRUE(bloscCanCompress()); #else EXPECT_TRUE(!bloscCanCompress()); #endif const int count = 256; { // valid buffer // compress std::unique_ptr<int[]> uncompressedBuffer(new int[count]); for (int i = 0; i < count; i++) { uncompressedBuffer.get()[i] = i / 2; } size_t uncompressedBytes = count * sizeof(int); size_t compressedBytes; size_t testCompressedBytes = bloscCompressedSize( reinterpret_cast<char*>(uncompressedBuffer.get()), uncompressedBytes); std::unique_ptr<char[]> compressedBuffer = bloscCompress( reinterpret_cast<char*>(uncompressedBuffer.get()), uncompressedBytes, compressedBytes); #ifdef OPENVDB_USE_BLOSC EXPECT_TRUE(compressedBytes < uncompressedBytes); EXPECT_TRUE(compressedBuffer); EXPECT_EQ(testCompressedBytes, compressedBytes); // uncompressedSize EXPECT_EQ(uncompressedBytes, bloscUncompressedSize(compressedBuffer.get())); // decompress std::unique_ptr<char[]> newUncompressedBuffer = bloscDecompress(compressedBuffer.get(), uncompressedBytes); // incorrect number of expected bytes EXPECT_THROW(newUncompressedBuffer = bloscDecompress(compressedBuffer.get(), 1), openvdb::RuntimeError); EXPECT_TRUE(newUncompressedBuffer); #else EXPECT_TRUE(!compressedBuffer); EXPECT_EQ(testCompressedBytes, size_t(0)); // uncompressedSize EXPECT_THROW(bloscUncompressedSize(compressedBuffer.get()), openvdb::RuntimeError); // decompress std::unique_ptr<char[]> newUncompressedBuffer; EXPECT_THROW( newUncompressedBuffer = bloscDecompress(compressedBuffer.get(), uncompressedBytes), openvdb::RuntimeError); EXPECT_TRUE(!newUncompressedBuffer); #endif } { // one value (below minimum bytes) std::unique_ptr<int[]> uncompressedBuffer(new int[1]); uncompressedBuffer.get()[0] = 10; size_t compressedBytes; std::unique_ptr<char[]> compressedBuffer = bloscCompress( reinterpret_cast<char*>(uncompressedBuffer.get()), sizeof(int), compressedBytes); EXPECT_TRUE(!compressedBuffer); EXPECT_EQ(compressedBytes, size_t(0)); } { // padded buffer std::unique_ptr<char[]> largeBuffer(new char[2048]); for (int paddedCount = 1; paddedCount < 256; paddedCount++) { std::unique_ptr<char[]> newTest(new char[paddedCount]); for (int i = 0; i < paddedCount; i++) newTest.get()[i] = char(0); #ifdef OPENVDB_USE_BLOSC size_t compressedBytes; std::unique_ptr<char[]> compressedBuffer = bloscCompress( newTest.get(), paddedCount, compressedBytes); // compress into a large buffer to check for any padding issues size_t compressedSizeBytes; bloscCompress(largeBuffer.get(), compressedSizeBytes, size_t(2048), newTest.get(), paddedCount); // regardless of compression, these numbers should always match EXPECT_EQ(compressedSizeBytes, compressedBytes); // no compression performed due to buffer being too small if (paddedCount <= BLOSC_MINIMUM_BYTES) { EXPECT_TRUE(!compressedBuffer); } else { EXPECT_TRUE(compressedBuffer); EXPECT_TRUE(compressedBytes > 0); EXPECT_TRUE(int(compressedBytes) < paddedCount); std::unique_ptr<char[]> uncompressedBuffer = bloscDecompress( compressedBuffer.get(), paddedCount); EXPECT_TRUE(uncompressedBuffer); for (int i = 0; i < paddedCount; i++) { EXPECT_EQ((uncompressedBuffer.get())[i], newTest[i]); } } #endif } } { // invalid buffer (out of range) // compress std::vector<int> smallBuffer; smallBuffer.reserve(count); for (int i = 0; i < count; i++) smallBuffer[i] = i; size_t invalidBytes = INT_MAX - 1; size_t testCompressedBytes = bloscCompressedSize( reinterpret_cast<char*>(&smallBuffer[0]), invalidBytes); EXPECT_EQ(testCompressedBytes, size_t(0)); std::unique_ptr<char[]> buffer = bloscCompress( reinterpret_cast<char*>(&smallBuffer[0]), invalidBytes, testCompressedBytes); EXPECT_TRUE(!buffer); EXPECT_EQ(testCompressedBytes, size_t(0)); // decompress #ifdef OPENVDB_USE_BLOSC std::unique_ptr<char[]> compressedBuffer = bloscCompress( reinterpret_cast<char*>(&smallBuffer[0]), count * sizeof(int), testCompressedBytes); EXPECT_THROW(buffer = bloscDecompress( reinterpret_cast<char*>(compressedBuffer.get()), invalidBytes - 16), openvdb::RuntimeError); EXPECT_TRUE(!buffer); EXPECT_THROW(bloscDecompress( reinterpret_cast<char*>(compressedBuffer.get()), count * sizeof(int) + 1), openvdb::RuntimeError); #endif } { // uncompressible buffer const int uncompressedCount = 32; std::vector<int> values; values.reserve(uncompressedCount); // 128 bytes for (int i = 0; i < uncompressedCount; i++) values.push_back(i*10000); std::random_device rng; std::mt19937 urng(rng()); std::shuffle(values.begin(), values.end(), urng); std::unique_ptr<int[]> uncompressedBuffer(new int[values.size()]); for (size_t i = 0; i < values.size(); i++) uncompressedBuffer.get()[i] = values[i]; size_t uncompressedBytes = values.size() * sizeof(int); size_t compressedBytes; std::unique_ptr<char[]> compressedBuffer = bloscCompress( reinterpret_cast<char*>(uncompressedBuffer.get()), uncompressedBytes, compressedBytes); EXPECT_TRUE(!compressedBuffer); EXPECT_EQ(compressedBytes, size_t(0)); } } void TestStreamCompression::testPagedStreams() { { // one small value std::ostringstream ostr(std::ios_base::binary); PagedOutputStream ostream(ostr); int foo = 5; ostream.write(reinterpret_cast<const char*>(&foo), sizeof(int)); EXPECT_EQ(ostr.tellp(), std::streampos(0)); ostream.flush(); EXPECT_EQ(ostr.tellp(), std::streampos(sizeof(int))); } { // small values up to page threshold std::ostringstream ostr(std::ios_base::binary); PagedOutputStream ostream(ostr); for (int i = 0; i < PageSize; i++) { uint8_t oneByte = 255; ostream.write(reinterpret_cast<const char*>(&oneByte), sizeof(uint8_t)); } EXPECT_EQ(ostr.tellp(), std::streampos(0)); std::vector<uint8_t> values; values.assign(PageSize, uint8_t(255)); size_t compressedSize = compression::bloscCompressedSize( reinterpret_cast<const char*>(&values[0]), PageSize); uint8_t oneMoreByte(255); ostream.write(reinterpret_cast<const char*>(&oneMoreByte), sizeof(char)); if (compressedSize == 0) { EXPECT_EQ(ostr.tellp(), std::streampos(PageSize)); } else { EXPECT_EQ(ostr.tellp(), std::streampos(compressedSize)); } } { // one large block at exactly page threshold std::ostringstream ostr(std::ios_base::binary); PagedOutputStream ostream(ostr); std::vector<uint8_t> values; values.assign(PageSize, uint8_t(255)); ostream.write(reinterpret_cast<const char*>(&values[0]), values.size()); EXPECT_EQ(ostr.tellp(), std::streampos(0)); } { // two large blocks at page threshold + 1 byte std::ostringstream ostr(std::ios_base::binary); PagedOutputStream ostream(ostr); std::vector<uint8_t> values; values.assign(PageSize + 1, uint8_t(255)); ostream.write(reinterpret_cast<const char*>(&values[0]), values.size()); size_t compressedSize = compression::bloscCompressedSize( reinterpret_cast<const char*>(&values[0]), values.size()); #ifndef OPENVDB_USE_BLOSC compressedSize = values.size(); #endif EXPECT_EQ(ostr.tellp(), std::streampos(compressedSize)); ostream.write(reinterpret_cast<const char*>(&values[0]), values.size()); EXPECT_EQ(ostr.tellp(), std::streampos(compressedSize * 2)); uint8_t oneMoreByte(255); ostream.write(reinterpret_cast<const char*>(&oneMoreByte), sizeof(uint8_t)); ostream.flush(); EXPECT_EQ(ostr.tellp(), std::streampos(compressedSize * 2 + 1)); } { // one full page std::stringstream ss(std::ios_base::out | std::ios_base::in | std::ios_base::binary); // write PagedOutputStream ostreamSizeOnly(ss); ostreamSizeOnly.setSizeOnly(true); EXPECT_EQ(ss.tellp(), std::streampos(0)); std::vector<uint8_t> values; values.resize(PageSize); std::iota(values.begin(), values.end(), 0); // ascending integer values ostreamSizeOnly.write(reinterpret_cast<const char*>(&values[0]), values.size()); ostreamSizeOnly.flush(); #ifdef OPENVDB_USE_BLOSC // two integers - compressed size and uncompressed size EXPECT_EQ(ss.tellp(), std::streampos(sizeof(int)*2)); #else // one integer - uncompressed size EXPECT_EQ(ss.tellp(), std::streampos(sizeof(int))); #endif PagedOutputStream ostream(ss); ostream.write(reinterpret_cast<const char*>(&values[0]), values.size()); ostream.flush(); #ifdef OPENVDB_USE_BLOSC #ifdef BLOSC_BACKWARDS_COMPATIBLE EXPECT_EQ(ss.tellp(), std::streampos(5400)); #else #ifdef BLOSC_HCR_BLOCKSIZE_OPTIMIZATION EXPECT_EQ(ss.tellp(), std::streampos(4422)); #else EXPECT_EQ(ss.tellp(), std::streampos(4452)); #endif #endif #else EXPECT_EQ(ss.tellp(), std::streampos(PageSize+sizeof(int))); #endif // read EXPECT_EQ(ss.tellg(), std::streampos(0)); PagedInputStream istream(ss); istream.setSizeOnly(true); PageHandle::Ptr handle = istream.createHandle(values.size()); #ifdef OPENVDB_USE_BLOSC // two integers - compressed size and uncompressed size EXPECT_EQ(ss.tellg(), std::streampos(sizeof(int)*2)); #else // one integer - uncompressed size EXPECT_EQ(ss.tellg(), std::streampos(sizeof(int))); #endif istream.read(handle, values.size(), false); #ifdef OPENVDB_USE_BLOSC #ifdef BLOSC_BACKWARDS_COMPATIBLE EXPECT_EQ(ss.tellg(), std::streampos(5400)); #else #ifdef BLOSC_HCR_BLOCKSIZE_OPTIMIZATION EXPECT_EQ(ss.tellg(), std::streampos(4422)); #else EXPECT_EQ(ss.tellg(), std::streampos(4452)); #endif #endif #else EXPECT_EQ(ss.tellg(), std::streampos(PageSize+sizeof(int))); #endif std::unique_ptr<uint8_t[]> newValues(reinterpret_cast<uint8_t*>(handle->read().release())); EXPECT_TRUE(newValues); for (size_t i = 0; i < values.size(); i++) { EXPECT_EQ(values[i], newValues.get()[i]); } } std::string tempDir; if (const char* dir = std::getenv("TMPDIR")) tempDir = dir; #ifdef _MSC_VER if (tempDir.empty()) { char tempDirBuffer[MAX_PATH+1]; int tempDirLen = GetTempPath(MAX_PATH+1, tempDirBuffer); EXPECT_TRUE(tempDirLen > 0 && tempDirLen <= MAX_PATH); tempDir = tempDirBuffer; } #else if (tempDir.empty()) tempDir = P_tmpdir; #endif { std::string filename = tempDir + "/openvdb_page1"; io::StreamMetadata::Ptr streamMetadata(new io::StreamMetadata); { // ascending values up to 10 million written in blocks of PageSize/3 std::ofstream fileout(filename.c_str(), std::ios_base::binary); io::setStreamMetadataPtr(fileout, streamMetadata); io::setDataCompression(fileout, openvdb::io::COMPRESS_BLOSC); std::vector<uint8_t> values; values.resize(10*1000*1000); std::iota(values.begin(), values.end(), 0); // ascending integer values // write page sizes PagedOutputStream ostreamSizeOnly(fileout); ostreamSizeOnly.setSizeOnly(true); EXPECT_EQ(fileout.tellp(), std::streampos(0)); int increment = PageSize/3; for (size_t i = 0; i < values.size(); i += increment) { if (size_t(i+increment) > values.size()) { ostreamSizeOnly.write( reinterpret_cast<const char*>(&values[0]+i), values.size() - i); } else { ostreamSizeOnly.write(reinterpret_cast<const char*>(&values[0]+i), increment); } } ostreamSizeOnly.flush(); #ifdef OPENVDB_USE_BLOSC int pages = static_cast<int>(fileout.tellp() / (sizeof(int)*2)); #else int pages = static_cast<int>(fileout.tellp() / (sizeof(int))); #endif EXPECT_EQ(pages, 10); // write PagedOutputStream ostream(fileout); for (size_t i = 0; i < values.size(); i += increment) { if (size_t(i+increment) > values.size()) { ostream.write(reinterpret_cast<const char*>(&values[0]+i), values.size() - i); } else { ostream.write(reinterpret_cast<const char*>(&values[0]+i), increment); } } ostream.flush(); #ifdef OPENVDB_USE_BLOSC #ifdef BLOSC_BACKWARDS_COMPATIBLE EXPECT_EQ(fileout.tellp(), std::streampos(51480)); #else #ifdef BLOSC_HCR_BLOCKSIZE_OPTIMIZATION EXPECT_EQ(fileout.tellp(), std::streampos(42424)); #else EXPECT_EQ(fileout.tellp(), std::streampos(42724)); #endif #endif #else EXPECT_EQ(fileout.tellp(), std::streampos(values.size()+sizeof(int)*pages)); #endif // abuse File being a friend of MappedFile to get around the private constructor ProxyMappedFile* proxy = new ProxyMappedFile(filename); SharedPtr<io::MappedFile> mappedFile(reinterpret_cast<io::MappedFile*>(proxy)); // read std::ifstream filein(filename.c_str(), std::ios_base::in | std::ios_base::binary); io::setStreamMetadataPtr(filein, streamMetadata); io::setMappedFilePtr(filein, mappedFile); EXPECT_EQ(filein.tellg(), std::streampos(0)); PagedInputStream istreamSizeOnly(filein); istreamSizeOnly.setSizeOnly(true); std::vector<PageHandle::Ptr> handles; for (size_t i = 0; i < values.size(); i += increment) { if (size_t(i+increment) > values.size()) { handles.push_back(istreamSizeOnly.createHandle(values.size() - i)); } else { handles.push_back(istreamSizeOnly.createHandle(increment)); } } #ifdef OPENVDB_USE_BLOSC // two integers - compressed size and uncompressed size EXPECT_EQ(filein.tellg(), std::streampos(pages*sizeof(int)*2)); #else // one integer - uncompressed size EXPECT_EQ(filein.tellg(), std::streampos(pages*sizeof(int))); #endif PagedInputStream istream(filein); int pageHandle = 0; for (size_t i = 0; i < values.size(); i += increment) { if (size_t(i+increment) > values.size()) { istream.read(handles[pageHandle++], values.size() - i); } else { istream.read(handles[pageHandle++], increment); } } // first three handles live in the same page Page& page0 = handles[0]->page(); Page& page1 = handles[1]->page(); Page& page2 = handles[2]->page(); Page& page3 = handles[3]->page(); EXPECT_TRUE(page0.isOutOfCore()); EXPECT_TRUE(page1.isOutOfCore()); EXPECT_TRUE(page2.isOutOfCore()); EXPECT_TRUE(page3.isOutOfCore()); handles[0]->read(); // store the Page shared_ptr Page::Ptr page = handles[0]->mPage; // verify use count is four (one plus three handles) EXPECT_EQ(page.use_count(), long(4)); // on reading from the first handle, all pages referenced // in the first three handles are in-core EXPECT_TRUE(!page0.isOutOfCore()); EXPECT_TRUE(!page1.isOutOfCore()); EXPECT_TRUE(!page2.isOutOfCore()); EXPECT_TRUE(page3.isOutOfCore()); handles[1]->read(); EXPECT_TRUE(handles[0]->mPage); handles[2]->read(); handles.erase(handles.begin()); handles.erase(handles.begin()); handles.erase(handles.begin()); // after all three handles have been read, // page should have just one use count (itself) EXPECT_EQ(page.use_count(), long(1)); } std::remove(filename.c_str()); } } TEST_F(TestStreamCompression, testPagedStreams) { testPagedStreams(); }
21,420
C++
31.753823
99
0.606303
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointMask.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/PointMask.h> #include <algorithm> #include <string> #include <vector> using namespace openvdb; using namespace openvdb::points; class TestPointMask: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestPointMask TEST_F(TestPointMask, testMask) { std::vector<Vec3s> positions = { {1, 1, 1}, {1, 5, 1}, {2, 1, 1}, {2, 2, 1}, }; const PointAttributeVector<Vec3s> pointList(positions); const float voxelSize = 0.1f; openvdb::math::Transform::Ptr transform( openvdb::math::Transform::createLinearTransform(voxelSize)); tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(pointList, *transform); PointDataGrid::Ptr points = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList, *transform); { // simple topology copy auto mask = convertPointsToMask(*points); EXPECT_EQ(points->tree().activeVoxelCount(), Index64(4)); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(4)); } { // mask grid instead of bool grid auto mask = convertPointsToMask<PointDataGrid, MaskGrid>(*points); EXPECT_EQ(points->tree().activeVoxelCount(), Index64(4)); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(4)); } { // identical transform auto mask = convertPointsToMask(*points, *transform); EXPECT_EQ(points->tree().activeVoxelCount(), Index64(4)); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(4)); } // assign point 3 to new group "test" appendGroup(points->tree(), "test"); std::vector<short> groups{0,0,1,0}; setGroup(points->tree(), pointIndexGrid->tree(), groups, "test"); std::vector<std::string> includeGroups{"test"}; std::vector<std::string> excludeGroups; { // convert in turn "test" and not "test" MultiGroupFilter filter(includeGroups, excludeGroups, points->tree().cbeginLeaf()->attributeSet()); auto mask = convertPointsToMask(*points, filter); EXPECT_EQ(points->tree().activeVoxelCount(), Index64(4)); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(1)); MultiGroupFilter filter2(excludeGroups, includeGroups, points->tree().cbeginLeaf()->attributeSet()); mask = convertPointsToMask(*points, filter2); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(3)); } { // use a much larger voxel size that splits the points into two regions const float newVoxelSize(4); openvdb::math::Transform::Ptr newTransform( openvdb::math::Transform::createLinearTransform(newVoxelSize)); auto mask = convertPointsToMask(*points, *newTransform); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(2)); MultiGroupFilter filter(includeGroups, excludeGroups, points->tree().cbeginLeaf()->attributeSet()); mask = convertPointsToMask(*points, *newTransform, filter); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(1)); MultiGroupFilter filter2(excludeGroups, includeGroups, points->tree().cbeginLeaf()->attributeSet()); mask = convertPointsToMask(*points, *newTransform, filter2); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(2)); } } struct StaticVoxelDeformer { StaticVoxelDeformer(const Vec3d& position) : mPosition(position) { } template <typename LeafT> void reset(LeafT& /*leaf*/, size_t /*idx*/) { } template <typename IterT> void apply(Vec3d& position, IterT&) const { position = mPosition; } private: Vec3d mPosition; }; template <bool WorldSpace = true> struct YOffsetDeformer { YOffsetDeformer(const Vec3d& offset) : mOffset(offset) { } template <typename LeafT> void reset(LeafT& /*leaf*/, size_t /*idx*/) { } template <typename IterT> void apply(Vec3d& position, IterT&) const { position += mOffset; } Vec3d mOffset; }; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { // configure both voxel deformers to be applied in index-space template<> struct DeformerTraits<StaticVoxelDeformer> { static const bool IndexSpace = true; }; template<> struct DeformerTraits<YOffsetDeformer<false>> { static const bool IndexSpace = true; }; } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb TEST_F(TestPointMask, testMaskDeformer) { // This test validates internal functionality that is used in various applications, such as // building masks and producing count grids. Note that by convention, methods that live // in an "internal" namespace are typically not promoted as part of the public API // and thus do not receive the same level of rigour in avoiding breaking API changes. std::vector<Vec3s> positions = { {1, 1, 1}, {1, 5, 1}, {2, 1, 1}, {2, 2, 1}, }; const PointAttributeVector<Vec3s> pointList(positions); const float voxelSize = 0.1f; openvdb::math::Transform::Ptr transform( openvdb::math::Transform::createLinearTransform(voxelSize)); tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(pointList, *transform); PointDataGrid::Ptr points = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList, *transform); // assign point 3 to new group "test" appendGroup(points->tree(), "test"); std::vector<short> groups{0,0,1,0}; setGroup(points->tree(), pointIndexGrid->tree(), groups, "test"); NullFilter nullFilter; { // null deformer NullDeformer deformer; auto mask = point_mask_internal::convertPointsToScalar<MaskGrid>( *points, *transform, nullFilter, deformer); auto mask2 = convertPointsToMask(*points); EXPECT_EQ(points->tree().activeVoxelCount(), Index64(4)); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(4)); EXPECT_TRUE(mask->tree().hasSameTopology(mask2->tree())); EXPECT_TRUE(mask->tree().hasSameTopology(points->tree())); } { // static voxel deformer // collapse all points into a random voxel at (9, 13, 106) StaticVoxelDeformer deformer(Vec3d(9, 13, 106)); auto mask = point_mask_internal::convertPointsToScalar<MaskGrid>( *points, *transform, nullFilter, deformer); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(1)); EXPECT_TRUE(!mask->tree().cbeginLeaf()->isValueOn(Coord(9, 13, 105))); EXPECT_TRUE(mask->tree().cbeginLeaf()->isValueOn(Coord(9, 13, 106))); } { // +y offset deformer Vec3d offset(0, 41.7, 0); YOffsetDeformer</*world-space*/false> deformer(offset); auto mask = point_mask_internal::convertPointsToScalar<MaskGrid>( *points, *transform, nullFilter, deformer); // (repeat with deformer configured as world-space) YOffsetDeformer</*world-space*/true> deformerWS(offset * voxelSize); auto maskWS = point_mask_internal::convertPointsToScalar<MaskGrid>( *points, *transform, nullFilter, deformerWS); EXPECT_EQ(mask->tree().activeVoxelCount(), Index64(4)); EXPECT_EQ(maskWS->tree().activeVoxelCount(), Index64(4)); std::vector<Coord> maskVoxels; std::vector<Coord> maskVoxelsWS; std::vector<Coord> pointVoxels; for (auto leaf = mask->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { maskVoxels.emplace_back(iter.getCoord()); } } for (auto leaf = maskWS->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { maskVoxelsWS.emplace_back(iter.getCoord()); } } for (auto leaf = points->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { pointVoxels.emplace_back(iter.getCoord()); } } std::sort(maskVoxels.begin(), maskVoxels.end()); std::sort(maskVoxelsWS.begin(), maskVoxelsWS.end()); std::sort(pointVoxels.begin(), pointVoxels.end()); EXPECT_EQ(maskVoxels.size(), size_t(4)); EXPECT_EQ(maskVoxelsWS.size(), size_t(4)); EXPECT_EQ(pointVoxels.size(), size_t(4)); for (int i = 0; i < int(pointVoxels.size()); i++) { Coord newCoord(pointVoxels[i]); newCoord.x() = static_cast<Int32>(newCoord.x() + offset.x()); newCoord.y() = static_cast<Int32>(math::Round(newCoord.y() + offset.y())); newCoord.z() = static_cast<Int32>(newCoord.z() + offset.z()); EXPECT_EQ(maskVoxels[i], newCoord); EXPECT_EQ(maskVoxelsWS[i], newCoord); } // use a different transform to verify deformers and transforms can be used together const float newVoxelSize = 0.02f; openvdb::math::Transform::Ptr newTransform( openvdb::math::Transform::createLinearTransform(newVoxelSize)); auto mask2 = point_mask_internal::convertPointsToScalar<MaskGrid>( *points, *newTransform, nullFilter, deformer); EXPECT_EQ(mask2->tree().activeVoxelCount(), Index64(4)); std::vector<Coord> maskVoxels2; for (auto leaf = mask2->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { maskVoxels2.emplace_back(iter.getCoord()); } } std::sort(maskVoxels2.begin(), maskVoxels2.end()); for (int i = 0; i < int(maskVoxels.size()); i++) { Coord newCoord(pointVoxels[i]); newCoord.x() = static_cast<Int32>((newCoord.x() + offset.x()) * 5); newCoord.y() = static_cast<Int32>(math::Round((newCoord.y() + offset.y()) * 5)); newCoord.z() = static_cast<Int32>((newCoord.z() + offset.z()) * 5); EXPECT_EQ(maskVoxels2[i], newCoord); } // only use points in group "test" std::vector<std::string> includeGroups{"test"}; std::vector<std::string> excludeGroups; MultiGroupFilter filter(includeGroups, excludeGroups, points->tree().cbeginLeaf()->attributeSet()); auto mask3 = point_mask_internal::convertPointsToScalar<MaskGrid>( *points, *transform, filter, deformer); EXPECT_EQ(mask3->tree().activeVoxelCount(), Index64(1)); for (auto leaf = mask3->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { Coord newCoord(pointVoxels[2]); newCoord.x() = static_cast<Int32>(newCoord.x() + offset.x()); newCoord.y() = static_cast<Int32>(math::Round(newCoord.y() + offset.y())); newCoord.z() = static_cast<Int32>(newCoord.z() + offset.z()); EXPECT_EQ(iter.getCoord(), newCoord); } } } }
11,910
C++
34.239645
95
0.604114
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestGridIO.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <cstdio> // for remove() class TestGridIO: public ::testing::Test { public: typedef openvdb::tree::Tree< openvdb::tree::RootNode< openvdb::tree::InternalNode< openvdb::tree::InternalNode< openvdb::tree::InternalNode< openvdb::tree::LeafNode<float, 2>, 3>, 4>, 5> > > Float5432Tree; typedef openvdb::Grid<Float5432Tree> Float5432Grid; void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } protected: template<typename GridType> void readAllTest(); }; //////////////////////////////////////// template<typename GridType> void TestGridIO::readAllTest() { using namespace openvdb; typedef typename GridType::TreeType TreeType; typedef typename TreeType::Ptr TreePtr; typedef typename TreeType::ValueType ValueT; typedef typename TreeType::NodeCIter NodeCIter; const ValueT zero = zeroVal<ValueT>(); // For each level of the tree, compute a bit mask for use in converting // global coordinates to node origins for nodes at that level. // That is, node_origin = global_coordinates & mask[node_level]. std::vector<Index> mask; TreeType::getNodeLog2Dims(mask); const size_t height = mask.size(); for (size_t i = 0; i < height; ++i) { Index dim = 0; for (size_t j = i; j < height; ++j) dim += mask[j]; mask[i] = ~((1 << dim) - 1); } const Index childDim = 1 + ~(mask[0]); // Choose sample coordinate pairs (coord0, coord1) and (coord0, coord2) // that are guaranteed to lie in different children of the root node // (because they are separated by more than the child node dimension). const Coord coord0(0, 0, 0), coord1(int(1.1 * childDim), 0, 0), coord2(0, int(1.1 * childDim), 0); // Create trees. TreePtr tree1(new TreeType(zero + 1)), tree2(new TreeType(zero + 2)); // Set some values. tree1->setValue(coord0, zero + 5); tree1->setValue(coord1, zero + 6); tree2->setValue(coord0, zero + 10); tree2->setValue(coord2, zero + 11); // Create grids with trees and assign transforms. math::Transform::Ptr trans1(math::Transform::createLinearTransform(0.1)), trans2(math::Transform::createLinearTransform(0.1)); GridBase::Ptr grid1 = createGrid(tree1), grid2 = createGrid(tree2); grid1->setTransform(trans1); grid1->setName("density"); grid2->setTransform(trans2); grid2->setName("temperature"); OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN EXPECT_EQ(ValueT(zero + 5), tree1->getValue(coord0)); EXPECT_EQ(ValueT(zero + 6), tree1->getValue(coord1)); EXPECT_EQ(ValueT(zero + 10), tree2->getValue(coord0)); EXPECT_EQ(ValueT(zero + 11), tree2->getValue(coord2)); OPENVDB_NO_FP_EQUALITY_WARNING_END // count[d] is the number of nodes already visited at depth d. // There should be exactly two nodes at each depth (apart from the root). std::vector<int> count(height, 0); // Verify that tree1 has correct node origins. for (NodeCIter iter = tree1->cbeginNode(); iter; ++iter) { const Index depth = iter.getDepth(); const Coord expected[2] = { coord0 & mask[depth], // origin of the first node at this depth coord1 & mask[depth] // origin of the second node at this depth }; EXPECT_EQ(expected[count[depth]], iter.getCoord()); ++count[depth]; } // Verify that tree2 has correct node origins. count.assign(height, 0); // reset node counts for (NodeCIter iter = tree2->cbeginNode(); iter; ++iter) { const Index depth = iter.getDepth(); const Coord expected[2] = { coord0 & mask[depth], coord2 & mask[depth] }; EXPECT_EQ(expected[count[depth]], iter.getCoord()); ++count[depth]; } MetaMap::Ptr meta(new MetaMap); meta->insertMeta("author", StringMetadata("Einstein")); meta->insertMeta("year", Int32Metadata(2009)); GridPtrVecPtr grids(new GridPtrVec); grids->push_back(grid1); grids->push_back(grid2); // Write grids and metadata out to a file. { io::File vdbfile("something.vdb2"); vdbfile.write(*grids, *meta); } meta.reset(); grids.reset(); io::File vdbfile("something.vdb2"); EXPECT_THROW(vdbfile.getGrids(), openvdb::IoError); // file has not been opened // Read the grids back in. vdbfile.open(); EXPECT_TRUE(vdbfile.isOpen()); grids = vdbfile.getGrids(); meta = vdbfile.getMetadata(); // Ensure we have the metadata. EXPECT_TRUE(meta.get() != NULL); EXPECT_EQ(2, int(meta->metaCount())); EXPECT_EQ(std::string("Einstein"), meta->metaValue<std::string>("author")); EXPECT_EQ(2009, meta->metaValue<int32_t>("year")); // Ensure we got both grids. EXPECT_TRUE(grids.get() != NULL); EXPECT_EQ(2, int(grids->size())); grid1.reset(); grid1 = findGridByName(*grids, "density"); EXPECT_TRUE(grid1.get() != NULL); TreePtr density = gridPtrCast<GridType>(grid1)->treePtr(); EXPECT_TRUE(density.get() != NULL); grid2.reset(); grid2 = findGridByName(*grids, "temperature"); EXPECT_TRUE(grid2.get() != NULL); TreePtr temperature = gridPtrCast<GridType>(grid2)->treePtr(); EXPECT_TRUE(temperature.get() != NULL); OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN EXPECT_EQ(ValueT(zero + 5), density->getValue(coord0)); EXPECT_EQ(ValueT(zero + 6), density->getValue(coord1)); EXPECT_EQ(ValueT(zero + 10), temperature->getValue(coord0)); EXPECT_EQ(ValueT(zero + 11), temperature->getValue(coord2)); OPENVDB_NO_FP_EQUALITY_WARNING_END // Check if we got the correct node origins. count.assign(height, 0); for (NodeCIter iter = density->cbeginNode(); iter; ++iter) { const Index depth = iter.getDepth(); const Coord expected[2] = { coord0 & mask[depth], coord1 & mask[depth] }; EXPECT_EQ(expected[count[depth]], iter.getCoord()); ++count[depth]; } count.assign(height, 0); for (NodeCIter iter = temperature->cbeginNode(); iter; ++iter) { const Index depth = iter.getDepth(); const Coord expected[2] = { coord0 & mask[depth], coord2 & mask[depth] }; EXPECT_EQ(expected[count[depth]], iter.getCoord()); ++count[depth]; } vdbfile.close(); ::remove("something.vdb2"); } TEST_F(TestGridIO, testReadAllBool) { readAllTest<openvdb::BoolGrid>(); } TEST_F(TestGridIO, testReadAllFloat) { readAllTest<openvdb::FloatGrid>(); } TEST_F(TestGridIO, testReadAllVec3S) { readAllTest<openvdb::Vec3SGrid>(); } TEST_F(TestGridIO, testReadAllFloat5432) { Float5432Grid::registerGrid(); readAllTest<Float5432Grid>(); }
6,926
C++
34.341837
105
0.639186
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestDenseSparseTools.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <openvdb/tools/Dense.h> #include <openvdb/tools/DenseSparseTools.h> #include <openvdb/Types.h> #include <openvdb/math/Math.h> #include "util.h" class TestDenseSparseTools: public ::testing::Test { public: void SetUp() override; void TearDown() override { delete mDense; } protected: openvdb::tools::Dense<float>* mDense; openvdb::math::Coord mijk; }; void TestDenseSparseTools::SetUp() { namespace vdbmath = openvdb::math; // Domain for the dense grid vdbmath::CoordBBox domain(vdbmath::Coord(-100, -16, 12), vdbmath::Coord( 90, 103, 100)); // Create dense grid, filled with 0.f mDense = new openvdb::tools::Dense<float>(domain, 0.f); // Insert non-zero values mijk[0] = 1; mijk[1] = -2; mijk[2] = 14; } namespace { // Simple Rule for extracting data greater than a determined mMaskValue // and producing a tree that holds type ValueType namespace vdbmath = openvdb::math; class FloatRule { public: // Standard tree type (e.g. BoolTree or FloatTree in openvdb.h) typedef openvdb::FloatTree ResultTreeType; typedef ResultTreeType::LeafNodeType ResultLeafNodeType; typedef float ResultValueType; typedef float DenseValueType; FloatRule(const DenseValueType& value): mMaskValue(value){} template <typename IndexOrCoord> void operator()(const DenseValueType& a, const IndexOrCoord& offset, ResultLeafNodeType* leaf) const { if (a > mMaskValue) { leaf->setValueOn(offset, a); } } private: const DenseValueType mMaskValue; }; class BoolRule { public: // Standard tree type (e.g. BoolTree or FloatTree in openvdb.h) typedef openvdb::BoolTree ResultTreeType; typedef ResultTreeType::LeafNodeType ResultLeafNodeType; typedef bool ResultValueType; typedef float DenseValueType; BoolRule(const DenseValueType& value): mMaskValue(value){} template <typename IndexOrCoord> void operator()(const DenseValueType& a, const IndexOrCoord& offset, ResultLeafNodeType* leaf) const { if (a > mMaskValue) { leaf->setValueOn(offset, true); } } private: const DenseValueType mMaskValue; }; // Square each value struct SqrOp { float operator()(const float& in) const { return in * in; } }; } TEST_F(TestDenseSparseTools, testExtractSparseFloatTree) { namespace vdbmath = openvdb::math; FloatRule rule(0.5f); const float testvalue = 1.f; mDense->setValue(mijk, testvalue); const float background(0.f); openvdb::FloatTree::Ptr result = openvdb::tools::extractSparseTree(*mDense, rule, background); // The result should have only one active value. EXPECT_TRUE(result->activeVoxelCount() == 1); // The result should have only one leaf EXPECT_TRUE(result->leafCount() == 1); // The background EXPECT_NEAR(background, result->background(), 1.e-6); // The stored value EXPECT_NEAR(testvalue, result->getValue(mijk), 1.e-6); } TEST_F(TestDenseSparseTools, testExtractSparseBoolTree) { const float testvalue = 1.f; mDense->setValue(mijk, testvalue); const float cutoff(0.5); openvdb::BoolTree::Ptr result = openvdb::tools::extractSparseTree(*mDense, BoolRule(cutoff), false); // The result should have only one active value. EXPECT_TRUE(result->activeVoxelCount() == 1); // The result should have only one leaf EXPECT_TRUE(result->leafCount() == 1); // The background EXPECT_TRUE(result->background() == false); // The stored value EXPECT_TRUE(result->getValue(mijk) == true); } TEST_F(TestDenseSparseTools, testExtractSparseAltDenseLayout) { namespace vdbmath = openvdb::math; FloatRule rule(0.5f); // Create a dense grid with the alternate data layout // but the same domain as mDense openvdb::tools::Dense<float, openvdb::tools::LayoutXYZ> dense(mDense->bbox(), 0.f); const float testvalue = 1.f; dense.setValue(mijk, testvalue); const float background(0.f); openvdb::FloatTree::Ptr result = openvdb::tools::extractSparseTree(dense, rule, background); // The result should have only one active value. EXPECT_TRUE(result->activeVoxelCount() == 1); // The result should have only one leaf EXPECT_TRUE(result->leafCount() == 1); // The background EXPECT_NEAR(background, result->background(), 1.e-6); // The stored value EXPECT_NEAR(testvalue, result->getValue(mijk), 1.e-6); } TEST_F(TestDenseSparseTools, testExtractSparseMaskedTree) { namespace vdbmath = openvdb::math; const float testvalue = 1.f; mDense->setValue(mijk, testvalue); // Create a mask with two values. One in the domain of // interest and one outside. The intersection of the active // state topology of the mask and the domain of interest will define // the topology of the extracted result. openvdb::FloatTree mask(0.f); // turn on a point inside the bouding domain of the dense grid mask.setValue(mijk, 5.f); // turn on a point outside the bounding domain of the dense grid vdbmath::Coord outsidePoint = mDense->bbox().min() - vdbmath::Coord(3, 3, 3); mask.setValue(outsidePoint, 1.f); float background = 10.f; openvdb::FloatTree::Ptr result = openvdb::tools::extractSparseTreeWithMask(*mDense, mask, background); // The result should have only one active value. EXPECT_TRUE(result->activeVoxelCount() == 1); // The result should have only one leaf EXPECT_TRUE(result->leafCount() == 1); // The background EXPECT_NEAR(background, result->background(), 1.e-6); // The stored value EXPECT_NEAR(testvalue, result->getValue(mijk), 1.e-6); } TEST_F(TestDenseSparseTools, testDenseTransform) { namespace vdbmath = openvdb::math; vdbmath::CoordBBox domain(vdbmath::Coord(-4, -6, 10), vdbmath::Coord( 1, 2, 15)); // Create dense grid, filled with value const float value(2.f); const float valueSqr(value*value); openvdb::tools::Dense<float> dense(domain, 0.f); dense.fill(value); SqrOp op; vdbmath::CoordBBox smallBBox(vdbmath::Coord(-5, -5, 11), vdbmath::Coord( 0, 1, 13) ); // Apply the transformation openvdb::tools::transformDense<float, SqrOp>(dense, smallBBox, op, true); vdbmath::Coord ijk; // Test results. for (ijk[0] = domain.min().x(); ijk[0] < domain.max().x() + 1; ++ijk[0]) { for (ijk[1] = domain.min().y(); ijk[1] < domain.max().y() + 1; ++ijk[1]) { for (ijk[2] = domain.min().z(); ijk[2] < domain.max().z() + 1; ++ijk[2]) { if (smallBBox.isInside(ijk)) { // the functor was applied here // the value should be base * base EXPECT_NEAR(dense.getValue(ijk), valueSqr, 1.e-6); } else { // the original value EXPECT_NEAR(dense.getValue(ijk), value, 1.e-6); } } } } } TEST_F(TestDenseSparseTools, testOver) { namespace vdbmath = openvdb::math; const vdbmath::CoordBBox domain(vdbmath::Coord(-10, 0, 5), vdbmath::Coord( 10, 5, 10)); const openvdb::Coord ijk = domain.min() + openvdb::Coord(1, 1, 1); // Create dense grid, filled with value const float value(2.f); const float strength(1.f); const float beta(1.f); openvdb::FloatTree src(0.f); src.setValue(ijk, 1.f); openvdb::FloatTree alpha(0.f); alpha.setValue(ijk, 1.f); const float expected = openvdb::tools::ds::OpOver<float>::apply( value, alpha.getValue(ijk), src.getValue(ijk), strength, beta, 1.f); { // testing composite function openvdb::tools::Dense<float> dense(domain, 0.f); dense.fill(value); openvdb::tools::compositeToDense<openvdb::tools::DS_OVER>( dense, src, alpha, beta, strength, true /*threaded*/); // Check for over value EXPECT_NEAR(dense.getValue(ijk), expected, 1.e-6); // Check for original value EXPECT_NEAR(dense.getValue(openvdb::Coord(1,1,1) + ijk), value, 1.e-6); } { // testing sparse explict sparse composite openvdb::tools::Dense<float> dense(domain, 0.f); dense.fill(value); typedef openvdb::tools::ds::CompositeFunctorTranslator<openvdb::tools::DS_OVER, float> CompositeTool; typedef CompositeTool::OpT Method; openvdb::tools::SparseToDenseCompositor<Method, openvdb::FloatTree> sparseToDense(dense, src, alpha, beta, strength); sparseToDense.sparseComposite(true); // Check for over value EXPECT_NEAR(dense.getValue(ijk), expected, 1.e-6); // Check for original value EXPECT_NEAR(dense.getValue(openvdb::Coord(1,1,1) + ijk), value, 1.e-6); } { // testing sparse explict dense composite openvdb::tools::Dense<float> dense(domain, 0.f); dense.fill(value); typedef openvdb::tools::ds::CompositeFunctorTranslator<openvdb::tools::DS_OVER, float> CompositeTool; typedef CompositeTool::OpT Method; openvdb::tools::SparseToDenseCompositor<Method, openvdb::FloatTree> sparseToDense(dense, src, alpha, beta, strength); sparseToDense.denseComposite(true); // Check for over value EXPECT_NEAR(dense.getValue(ijk), expected, 1.e-6); // Check for original value EXPECT_NEAR(dense.getValue(openvdb::Coord(1,1,1) + ijk), value, 1.e-6); } }
10,216
C++
27.699438
96
0.61854
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/main.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <algorithm> // for std::shuffle() #include <cmath> // for std::round() #include <cstdlib> // for EXIT_SUCCESS #include <cstring> // for strrchr() #include <exception> #include <fstream> #include <iostream> #include <random> #include <string> #include <vector> #include "gtest/gtest.h" int main(int argc, char *argv[]) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
491
C++
20.391303
48
0.692464
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLevelSetRayIntersector.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file unittest/TestLevelSetRayIntersector.cc /// @author Ken Museth // Uncomment to enable statistics of ray-intersections //#define STATS_TEST #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <openvdb/math/Ray.h> #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/RayIntersector.h> #include <openvdb/tools/RayTracer.h>// for Film #ifdef STATS_TEST //only needed for statistics #include <openvdb/math/Stats.h> #include <openvdb/util/CpuTimer.h> #include <iostream> #endif #define ASSERT_DOUBLES_APPROX_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/1.e-6); class TestLevelSetRayIntersector : public ::testing::Test { }; TEST_F(TestLevelSetRayIntersector, tests) { using namespace openvdb; typedef math::Ray<double> RayT; typedef RayT::Vec3Type Vec3T; {// voxel intersection against a level set sphere const float r = 5.0f; const Vec3f c(20.0f, 0.0f, 0.0f); const float s = 0.5f, w = 2.0f; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); tools::LevelSetRayIntersector<FloatGrid> lsri(*ls); const Vec3T dir(1.0, 0.0, 0.0); const Vec3T eye(2.0, 0.0, 0.0); const RayT ray(eye, dir); //std::cerr << ray << std::endl; Vec3T xyz(0); Real time = 0; EXPECT_TRUE(lsri.intersectsWS(ray, xyz, time)); ASSERT_DOUBLES_APPROX_EQUAL(15.0, xyz[0]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[1]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[2]); ASSERT_DOUBLES_APPROX_EQUAL(13.0, time); double t0=0, t1=0; EXPECT_TRUE(ray.intersects(c, r, t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(t0, time); //std::cerr << "\nray("<<t0<<")="<<ray(t0)<<std::endl; //std::cerr << "Intersection at xyz = " << xyz << " time = " << time << std::endl; EXPECT_TRUE(ray(t0) == xyz); } {// voxel intersection against a level set sphere const float r = 5.0f; const Vec3f c(20.0f, 0.0f, 0.0f); const float s = 0.5f, w = 2.0f; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); tools::LevelSetRayIntersector<FloatGrid> lsri(*ls); const Vec3T dir(1.0,-0.0,-0.0); const Vec3T eye(2.0, 0.0, 0.0); const RayT ray(eye, dir); //std::cerr << ray << std::endl; Vec3T xyz(0); Real time = 0; EXPECT_TRUE(lsri.intersectsWS(ray, xyz, time)); ASSERT_DOUBLES_APPROX_EQUAL(15.0, xyz[0]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[1]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[2]); ASSERT_DOUBLES_APPROX_EQUAL(13.0, time); double t0=0, t1=0; EXPECT_TRUE(ray.intersects(c, r, t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(t0, time); //std::cerr << "\nray("<<t0<<")="<<ray(t0)<<std::endl; //std::cerr << "Intersection at xyz = " << xyz << std::endl; EXPECT_TRUE(ray(t0) == xyz); } {// voxel intersection against a level set sphere const float r = 5.0f; const Vec3f c(0.0f, 20.0f, 0.0f); const float s = 1.5f, w = 2.0f; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); tools::LevelSetRayIntersector<FloatGrid> lsri(*ls); const Vec3T dir(0.0, 1.0, 0.0); const Vec3T eye(0.0,-2.0, 0.0); RayT ray(eye, dir); Vec3T xyz(0); Real time = 0; EXPECT_TRUE(lsri.intersectsWS(ray, xyz, time)); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[0]); ASSERT_DOUBLES_APPROX_EQUAL(15.0, xyz[1]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[2]); ASSERT_DOUBLES_APPROX_EQUAL(17.0, time); double t0=0, t1=0; EXPECT_TRUE(ray.intersects(c, r, t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(t0, time); //std::cerr << "\nray("<<t0<<")="<<ray(t0)<<std::endl; //std::cerr << "Intersection at xyz = " << xyz << std::endl; ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[0]); ASSERT_DOUBLES_APPROX_EQUAL(15.0, ray(t0)[1]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[2]); } {// voxel intersection against a level set sphere const float r = 5.0f; const Vec3f c(0.0f, 20.0f, 0.0f); const float s = 1.5f, w = 2.0f; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); tools::LevelSetRayIntersector<FloatGrid> lsri(*ls); const Vec3T dir(-0.0, 1.0,-0.0); const Vec3T eye( 0.0,-2.0, 0.0); RayT ray(eye, dir); Vec3T xyz(0); Real time = 0; EXPECT_TRUE(lsri.intersectsWS(ray, xyz, time)); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[0]); ASSERT_DOUBLES_APPROX_EQUAL(15.0, xyz[1]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[2]); ASSERT_DOUBLES_APPROX_EQUAL(17.0, time); double t0=0, t1=0; EXPECT_TRUE(ray.intersects(c, r, t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(t0, time); //std::cerr << "\nray("<<t0<<")="<<ray(t0)<<std::endl; //std::cerr << "Intersection at xyz = " << xyz << std::endl; ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[0]); ASSERT_DOUBLES_APPROX_EQUAL(15.0, ray(t0)[1]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[2]); } {// voxel intersection against a level set sphere const float r = 5.0f; const Vec3f c(0.0f, 0.0f, 20.0f); const float s = 1.0f, w = 3.0f; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); typedef tools::LinearSearchImpl<FloatGrid> SearchImplT; tools::LevelSetRayIntersector<FloatGrid, SearchImplT, -1> lsri(*ls); const Vec3T dir(0.0, 0.0, 1.0); const Vec3T eye(0.0, 0.0, 4.0); RayT ray(eye, dir); Vec3T xyz(0); Real time = 0; EXPECT_TRUE(lsri.intersectsWS(ray, xyz, time)); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[0]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[1]); ASSERT_DOUBLES_APPROX_EQUAL(15.0, xyz[2]); ASSERT_DOUBLES_APPROX_EQUAL(11.0, time); double t0=0, t1=0; EXPECT_TRUE(ray.intersects(c, r, t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(t0, time); //std::cerr << "\nray("<<t0<<")="<<ray(t0)<<std::endl; //std::cerr << "Intersection at xyz = " << xyz << std::endl; ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[0]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[1]); ASSERT_DOUBLES_APPROX_EQUAL(15.0, ray(t0)[2]); } {// voxel intersection against a level set sphere const float r = 5.0f; const Vec3f c(0.0f, 0.0f, 20.0f); const float s = 1.0f, w = 3.0f; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); typedef tools::LinearSearchImpl<FloatGrid> SearchImplT; tools::LevelSetRayIntersector<FloatGrid, SearchImplT, -1> lsri(*ls); const Vec3T dir(-0.0,-0.0, 1.0); const Vec3T eye( 0.0, 0.0, 4.0); RayT ray(eye, dir); Vec3T xyz(0); Real time = 0; EXPECT_TRUE(lsri.intersectsWS(ray, xyz, time)); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[0]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[1]); ASSERT_DOUBLES_APPROX_EQUAL(15.0, xyz[2]); ASSERT_DOUBLES_APPROX_EQUAL(11.0, time); double t0=0, t1=0; EXPECT_TRUE(ray.intersects(c, r, t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(t0, time); //std::cerr << "t0 = " << t0 << " t1 = " << t1 << std::endl; //std::cerr << "\nray("<<t0<<")="<<ray(t0)<<std::endl; //std::cerr << "Intersection at xyz = " << xyz << std::endl; ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[0]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[1]); ASSERT_DOUBLES_APPROX_EQUAL(15.0, ray(t0)[2]); } {// voxel intersection against a level set sphere const float r = 5.0f; const Vec3f c(0.0f, 0.0f, 20.0f); const float s = 1.0f, w = 3.0f; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); typedef tools::LinearSearchImpl<FloatGrid> SearchImplT; tools::LevelSetRayIntersector<FloatGrid, SearchImplT, -1> lsri(*ls); const Vec3T dir(-0.0,-0.0, 1.0); const Vec3T eye( 0.0, 0.0, 4.0); RayT ray(eye, dir, 16.0); Vec3T xyz(0); Real time = 0; EXPECT_TRUE(lsri.intersectsWS(ray, xyz, time)); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[0]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, xyz[1]); ASSERT_DOUBLES_APPROX_EQUAL(25.0, xyz[2]); ASSERT_DOUBLES_APPROX_EQUAL(21.0, time); double t0=0, t1=0; EXPECT_TRUE(ray.intersects(c, r, t0, t1)); //std::cerr << "t0 = " << t0 << " t1 = " << t1 << std::endl; //std::cerr << "\nray("<<t0<<")="<<ray(t0)<<std::endl; //std::cerr << "Intersection at xyz = " << xyz << std::endl; ASSERT_DOUBLES_APPROX_EQUAL(t1, time); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[0]); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, ray(t0)[1]); ASSERT_DOUBLES_APPROX_EQUAL(25.0, ray(t1)[2]); } {// voxel intersection against a level set sphere const float r = 5.0f; const Vec3f c(10.0f, 10.0f, 10.0f); const float s = 1.0f, w = 3.0f; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); tools::LevelSetRayIntersector<FloatGrid> lsri(*ls); Vec3T dir(1.0, 1.0, 1.0); dir.normalize(); const Vec3T eye(0.0, 0.0, 0.0); RayT ray(eye, dir); //std::cerr << "ray: " << ray << std::endl; Vec3T xyz(0); Real time = 0; EXPECT_TRUE(lsri.intersectsWS(ray, xyz, time)); //std::cerr << "\nIntersection at xyz = " << xyz << std::endl; //analytical intersection test double t0=0, t1=0; EXPECT_TRUE(ray.intersects(c, r, t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(t0, time); ASSERT_DOUBLES_APPROX_EQUAL((ray(t0)-c).length()-r, 0); ASSERT_DOUBLES_APPROX_EQUAL((ray(t1)-c).length()-r, 0); //std::cerr << "\nray("<<t0<<")="<<ray(t0)<<std::endl; //std::cerr << "\nray("<<t1<<")="<<ray(t1)<<std::endl; const Vec3T delta = xyz - ray(t0); //std::cerr << "delta = " << delta << std::endl; //std::cerr << "|delta|/dx=" << (delta.length()/ls->voxelSize()[0]) << std::endl; ASSERT_DOUBLES_APPROX_EQUAL(0, delta.length()); } {// test intersections against a high-resolution level set sphere @1024^3 const float r = 5.0f; const Vec3f c(10.0f, 10.0f, 20.0f); const float s = 0.01f, w = 2.0f; double t0=0, t1=0; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); typedef tools::LinearSearchImpl<FloatGrid, /*iterations=*/2> SearchImplT; tools::LevelSetRayIntersector<FloatGrid, SearchImplT> lsri(*ls); Vec3T xyz(0); Real time = 0; const size_t width = 1024; const double dx = 20.0/width; const Vec3T dir(0.0, 0.0, 1.0); for (size_t i=0; i<width; ++i) { for (size_t j=0; j<width; ++j) { const Vec3T eye(dx*double(i), dx*double(j), 0.0); const RayT ray(eye, dir); if (lsri.intersectsWS(ray, xyz, time)){ EXPECT_TRUE(ray.intersects(c, r, t0, t1)); EXPECT_NEAR(0, 100*(t0-time)/t0, /*tolerance=*/0.1);//percent double delta = (ray(t0)-xyz).length()/s;//in voxel units EXPECT_TRUE(delta < 0.06); } } } } } #ifdef STATS_TEST TEST_F(TestLevelSetRayIntersector, stats) { using namespace openvdb; typedef math::Ray<double> RayT; typedef RayT::Vec3Type Vec3T; util::CpuTimer timer; {// generate an image, benchmarks and statistics // Generate a high-resolution level set sphere @1024^3 const float r = 5.0f; const Vec3f c(10.0f, 10.0f, 20.0f); const float s = 0.01f, w = 2.0f; double t0=0, t1=0; FloatGrid::Ptr ls = tools::createLevelSetSphere<FloatGrid>(r, c, s, w); typedef tools::LinearSearchImpl<FloatGrid, /*iterations=*/2> SearchImplT; tools::LevelSetRayIntersector<FloatGrid, SearchImplT> lsri(*ls); Vec3T xyz(0); const size_t width = 1024; const double dx = 20.0/width; const Vec3T dir(0.0, 0.0, 1.0); tools::Film film(width, width); math::Stats stats; math::Histogram hist(0.0, 0.1, 20); timer.start("\nSerial ray-intersections of sphere"); for (size_t i=0; i<width; ++i) { for (size_t j=0; j<width; ++j) { const Vec3T eye(dx*i, dx*j, 0.0); const RayT ray(eye, dir); if (lsri.intersectsWS(ray, xyz)){ EXPECT_TRUE(ray.intersects(c, r, t0, t1)); double delta = (ray(t0)-xyz).length()/s;//in voxel units stats.add(delta); hist.add(delta); if (delta > 0.01) { film.pixel(i, j) = tools::Film::RGBA(1.0f, 0.0f, 0.0f); } else { film.pixel(i, j) = tools::Film::RGBA(0.0f, 1.0f, 0.0f); } } } } timer.stop(); film.savePPM("sphere_serial"); stats.print("First hit"); hist.print("First hit"); } } #endif // STATS_TEST #undef STATS_TEST
13,748
C++
36.463215
91
0.556663
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestStats.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/openvdb.h> #include <openvdb/math/Operators.h> // for ISGradient #include <openvdb/math/Stats.h> #include <openvdb/tools/Statistics.h> #include "gtest/gtest.h" #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); class TestStats: public ::testing::Test { }; TEST_F(TestStats, testMinMax) { {// test Coord which uses lexicographic less than openvdb::math::MinMax<openvdb::Coord> s(openvdb::Coord::max(), openvdb::Coord::min()); //openvdb::math::MinMax<openvdb::Coord> s;// will not compile since Coord is not a POD type EXPECT_EQ(openvdb::Coord::max(), s.min()); EXPECT_EQ(openvdb::Coord::min(), s.max()); s.add( openvdb::Coord(1,2,3) ); EXPECT_EQ(openvdb::Coord(1,2,3), s.min()); EXPECT_EQ(openvdb::Coord(1,2,3), s.max()); s.add( openvdb::Coord(0,2,3) ); EXPECT_EQ(openvdb::Coord(0,2,3), s.min()); EXPECT_EQ(openvdb::Coord(1,2,3), s.max()); s.add( openvdb::Coord(1,2,4) ); EXPECT_EQ(openvdb::Coord(0,2,3), s.min()); EXPECT_EQ(openvdb::Coord(1,2,4), s.max()); } {// test double openvdb::math::MinMax<double> s; EXPECT_EQ( std::numeric_limits<double>::max(), s.min()); EXPECT_EQ(-std::numeric_limits<double>::max(), s.max()); s.add( 1.0 ); EXPECT_EQ(1.0, s.min()); EXPECT_EQ(1.0, s.max()); s.add( 2.5 ); EXPECT_EQ(1.0, s.min()); EXPECT_EQ(2.5, s.max()); s.add( -0.5 ); EXPECT_EQ(-0.5, s.min()); EXPECT_EQ( 2.5, s.max()); } {// test int openvdb::math::MinMax<int> s; EXPECT_EQ(std::numeric_limits<int>::max(), s.min()); EXPECT_EQ(std::numeric_limits<int>::min(), s.max()); s.add( 1 ); EXPECT_EQ(1, s.min()); EXPECT_EQ(1, s.max()); s.add( 2 ); EXPECT_EQ(1, s.min()); EXPECT_EQ(2, s.max()); s.add( -5 ); EXPECT_EQ(-5, s.min()); EXPECT_EQ( 2, s.max()); } {// test unsigned openvdb::math::MinMax<uint32_t> s; EXPECT_EQ(std::numeric_limits<uint32_t>::max(), s.min()); EXPECT_EQ(uint32_t(0), s.max()); s.add( 1 ); EXPECT_EQ(uint32_t(1), s.min()); EXPECT_EQ(uint32_t(1), s.max()); s.add( 2 ); EXPECT_EQ(uint32_t(1), s.min()); EXPECT_EQ(uint32_t(2), s.max()); s.add( 0 ); EXPECT_EQ( uint32_t(0), s.min()); EXPECT_EQ( uint32_t(2), s.max()); } } TEST_F(TestStats, testExtrema) { {// trivial test openvdb::math::Extrema s; s.add(0); s.add(1); EXPECT_EQ(2, int(s.size())); EXPECT_NEAR(0.0, s.min(), 0.000001); EXPECT_NEAR(1.0, s.max(), 0.000001); EXPECT_NEAR(1.0, s.range(), 0.000001); //s.print("test"); } {// non-trivial test openvdb::math::Extrema s; const int data[5]={600, 470, 170, 430, 300}; for (int i=0; i<5; ++i) s.add(data[i]); EXPECT_EQ(5, int(s.size())); EXPECT_NEAR(data[2], s.min(), 0.000001); EXPECT_NEAR(data[0], s.max(), 0.000001); EXPECT_NEAR(data[0]-data[2], s.range(), 0.000001); //s.print("test"); } {// non-trivial test of Extrema::add(Extrema) openvdb::math::Extrema s, t; const int data[5]={600, 470, 170, 430, 300}; for (int i=0; i<3; ++i) s.add(data[i]); for (int i=3; i<5; ++i) t.add(data[i]); s.add(t); EXPECT_EQ(5, int(s.size())); EXPECT_NEAR(data[2], s.min(), 0.000001); EXPECT_NEAR(data[0], s.max(), 0.000001); EXPECT_NEAR(data[0]-data[2], s.range(), 0.000001); //s.print("test"); } {// Trivial test of Extrema::add(value, n) openvdb::math::Extrema s; const double val = 3.45; const uint64_t n = 57; s.add(val, 57); EXPECT_EQ(n, s.size()); EXPECT_NEAR(val, s.min(), 0.000001); EXPECT_NEAR(val, s.max(), 0.000001); EXPECT_NEAR(0.0, s.range(), 0.000001); } {// Test 1 of Extrema::add(value), Extrema::add(value, n) and Extrema::add(Extrema) openvdb::math::Extrema s, t; const double val1 = 1.0, val2 = 3.0; const uint64_t n1 = 1, n2 =1; s.add(val1, n1); EXPECT_EQ(uint64_t(n1), s.size()); EXPECT_NEAR(val1, s.min(), 0.000001); EXPECT_NEAR(val1, s.max(), 0.000001); for (uint64_t i=0; i<n2; ++i) t.add(val2); s.add(t); EXPECT_EQ(uint64_t(n2), t.size()); EXPECT_NEAR(val2, t.min(), 0.000001); EXPECT_NEAR(val2, t.max(), 0.000001); EXPECT_EQ(uint64_t(n1+n2), s.size()); EXPECT_NEAR(val1, s.min(), 0.000001); EXPECT_NEAR(val2, s.max(), 0.000001); } {// Non-trivial test of Extrema::add(value, n) openvdb::math::Extrema s; s.add(3.45, 6); s.add(1.39, 2); s.add(2.56, 13); s.add(0.03); openvdb::math::Extrema t; for (int i=0; i< 6; ++i) t.add(3.45); for (int i=0; i< 2; ++i) t.add(1.39); for (int i=0; i<13; ++i) t.add(2.56); t.add(0.03); EXPECT_EQ(s.size(), t.size()); EXPECT_NEAR(s.min(), t.min(), 0.000001); EXPECT_NEAR(s.max(), t.max(), 0.000001); } } TEST_F(TestStats, testStats) { {// trivial test openvdb::math::Stats s; s.add(0); s.add(1); EXPECT_EQ(2, int(s.size())); EXPECT_NEAR(0.0, s.min(), 0.000001); EXPECT_NEAR(1.0, s.max(), 0.000001); EXPECT_NEAR(0.5, s.mean(), 0.000001); EXPECT_NEAR(0.25, s.variance(), 0.000001); EXPECT_NEAR(0.5, s.stdDev(), 0.000001); //s.print("test"); } {// non-trivial test openvdb::math::Stats s; const int data[5]={600, 470, 170, 430, 300}; for (int i=0; i<5; ++i) s.add(data[i]); double sum = 0.0; for (int i=0; i<5; ++i) sum += data[i]; const double mean = sum/5.0; sum = 0.0; for (int i=0; i<5; ++i) sum += (data[i]-mean)*(data[i]-mean); const double var = sum/5.0; EXPECT_EQ(5, int(s.size())); EXPECT_NEAR(data[2], s.min(), 0.000001); EXPECT_NEAR(data[0], s.max(), 0.000001); EXPECT_NEAR(mean, s.mean(), 0.000001); EXPECT_NEAR(var, s.variance(), 0.000001); EXPECT_NEAR(sqrt(var), s.stdDev(), 0.000001); //s.print("test"); } {// non-trivial test of Stats::add(Stats) openvdb::math::Stats s, t; const int data[5]={600, 470, 170, 430, 300}; for (int i=0; i<3; ++i) s.add(data[i]); for (int i=3; i<5; ++i) t.add(data[i]); s.add(t); double sum = 0.0; for (int i=0; i<5; ++i) sum += data[i]; const double mean = sum/5.0; sum = 0.0; for (int i=0; i<5; ++i) sum += (data[i]-mean)*(data[i]-mean); const double var = sum/5.0; EXPECT_EQ(5, int(s.size())); EXPECT_NEAR(data[2], s.min(), 0.000001); EXPECT_NEAR(data[0], s.max(), 0.000001); EXPECT_NEAR(mean, s.mean(), 0.000001); EXPECT_NEAR(var, s.variance(), 0.000001); EXPECT_NEAR(sqrt(var), s.stdDev(), 0.000001); //s.print("test"); } {// Trivial test of Stats::add(value, n) openvdb::math::Stats s; const double val = 3.45; const uint64_t n = 57; s.add(val, 57); EXPECT_EQ(n, s.size()); EXPECT_NEAR(val, s.min(), 0.000001); EXPECT_NEAR(val, s.max(), 0.000001); EXPECT_NEAR(val, s.mean(), 0.000001); EXPECT_NEAR(0.0, s.variance(), 0.000001); EXPECT_NEAR(0.0, s.stdDev(), 0.000001); } {// Test 1 of Stats::add(value), Stats::add(value, n) and Stats::add(Stats) openvdb::math::Stats s, t; const double val1 = 1.0, val2 = 3.0, sum = val1 + val2; const uint64_t n1 = 1, n2 =1; s.add(val1, n1); EXPECT_EQ(uint64_t(n1), s.size()); EXPECT_NEAR(val1, s.min(), 0.000001); EXPECT_NEAR(val1, s.max(), 0.000001); EXPECT_NEAR(val1, s.mean(), 0.000001); EXPECT_NEAR(0.0, s.variance(), 0.000001); EXPECT_NEAR(0.0, s.stdDev(), 0.000001); for (uint64_t i=0; i<n2; ++i) t.add(val2); s.add(t); EXPECT_EQ(uint64_t(n2), t.size()); EXPECT_NEAR(val2, t.min(), 0.000001); EXPECT_NEAR(val2, t.max(), 0.000001); EXPECT_NEAR(val2, t.mean(), 0.000001); EXPECT_NEAR(0.0, t.variance(), 0.000001); EXPECT_NEAR(0.0, t.stdDev(), 0.000001); EXPECT_EQ(uint64_t(n1+n2), s.size()); EXPECT_NEAR(val1, s.min(), 0.000001); EXPECT_NEAR(val2, s.max(), 0.000001); const double mean = sum/double(n1+n2); EXPECT_NEAR(mean, s.mean(), 0.000001); double var = 0.0; for (uint64_t i=0; i<n1; ++i) var += openvdb::math::Pow2(val1-mean); for (uint64_t i=0; i<n2; ++i) var += openvdb::math::Pow2(val2-mean); var /= double(n1+n2); EXPECT_NEAR(var, s.variance(), 0.000001); } {// Test 2 of Stats::add(value), Stats::add(value, n) and Stats::add(Stats) openvdb::math::Stats s, t; const double val1 = 1.0, val2 = 3.0, sum = val1 + val2; const uint64_t n1 = 1, n2 =1; for (uint64_t i=0; i<n1; ++i) s.add(val1); EXPECT_EQ(uint64_t(n1), s.size()); EXPECT_NEAR(val1, s.min(), 0.000001); EXPECT_NEAR(val1, s.max(), 0.000001); EXPECT_NEAR(val1, s.mean(), 0.000001); EXPECT_NEAR(0.0, s.variance(), 0.000001); EXPECT_NEAR(0.0, s.stdDev(), 0.000001); t.add(val2, n2); EXPECT_EQ(uint64_t(n2), t.size()); EXPECT_NEAR(val2, t.min(), 0.000001); EXPECT_NEAR(val2, t.max(), 0.000001); EXPECT_NEAR(val2, t.mean(), 0.000001); EXPECT_NEAR(0.0, t.variance(), 0.000001); EXPECT_NEAR(0.0, t.stdDev(), 0.000001); s.add(t); EXPECT_EQ(uint64_t(n1+n2), s.size()); EXPECT_NEAR(val1, s.min(), 0.000001); EXPECT_NEAR(val2, s.max(), 0.000001); const double mean = sum/double(n1+n2); EXPECT_NEAR(mean, s.mean(), 0.000001); double var = 0.0; for (uint64_t i=0; i<n1; ++i) var += openvdb::math::Pow2(val1-mean); for (uint64_t i=0; i<n2; ++i) var += openvdb::math::Pow2(val2-mean); var /= double(n1+n2); EXPECT_NEAR(var, s.variance(), 0.000001); } {// Non-trivial test of Stats::add(value, n) and Stats::add(Stats) openvdb::math::Stats s; s.add(3.45, 6); s.add(1.39, 2); s.add(2.56, 13); s.add(0.03); openvdb::math::Stats t; for (int i=0; i< 6; ++i) t.add(3.45); for (int i=0; i< 2; ++i) t.add(1.39); for (int i=0; i<13; ++i) t.add(2.56); t.add(0.03); EXPECT_EQ(s.size(), t.size()); EXPECT_NEAR(s.min(), t.min(), 0.000001); EXPECT_NEAR(s.max(), t.max(), 0.000001); EXPECT_NEAR(s.mean(),t.mean(), 0.000001); EXPECT_NEAR(s.variance(), t.variance(), 0.000001); } {// Non-trivial test of Stats::add(value, n) openvdb::math::Stats s; s.add(3.45, 6); s.add(1.39, 2); s.add(2.56, 13); s.add(0.03); openvdb::math::Stats t; for (int i=0; i< 6; ++i) t.add(3.45); for (int i=0; i< 2; ++i) t.add(1.39); for (int i=0; i<13; ++i) t.add(2.56); t.add(0.03); EXPECT_EQ(s.size(), t.size()); EXPECT_NEAR(s.min(), t.min(), 0.000001); EXPECT_NEAR(s.max(), t.max(), 0.000001); EXPECT_NEAR(s.mean(),t.mean(), 0.000001); EXPECT_NEAR(s.variance(), t.variance(), 0.000001); } //std::cerr << "\nCompleted TestStats::testStats!\n" << std::endl; } TEST_F(TestStats, testHistogram) { {// Histogram test openvdb::math::Stats s; const int data[5]={600, 470, 170, 430, 300}; for (int i=0; i<5; ++i) s.add(data[i]); openvdb::math::Histogram h(s, 10); for (int i=0; i<5; ++i) EXPECT_TRUE(h.add(data[i])); int bin[10]={0}; for (int i=0; i<5; ++i) { for (int j=0; j<10; ++j) if (data[i] >= h.min(j) && data[i] < h.max(j)) bin[j]++; } for (int i=0; i<5; ++i) EXPECT_EQ(bin[i],int(h.count(i))); //h.print("test"); } {//Test print of Histogram openvdb::math::Stats s; const int N=500000; for (int i=0; i<N; ++i) s.add(N/2-i); //s.print("print-test"); openvdb::math::Histogram h(s, 25); for (int i=0; i<N; ++i) EXPECT_TRUE(h.add(N/2-i)); //h.print("print-test"); } } namespace { struct GradOp { typedef openvdb::FloatGrid GridT; GridT::ConstAccessor acc; GradOp(const GridT& grid): acc(grid.getConstAccessor()) {} template <typename StatsT> void operator()(const GridT::ValueOnCIter& it, StatsT& stats) const { typedef openvdb::math::ISGradient<openvdb::math::FD_1ST> GradT; if (it.isVoxelValue()) { stats.add(GradT::result(acc, it.getCoord()).length()); } else { openvdb::CoordBBox bbox = it.getBoundingBox(); openvdb::Coord xyz; int &x = xyz[0], &y = xyz[1], &z = xyz[2]; for (x = bbox.min()[0]; x <= bbox.max()[0]; ++x) { for (y = bbox.min()[1]; y <= bbox.max()[1]; ++y) { for (z = bbox.min()[2]; z <= bbox.max()[2]; ++z) { stats.add(GradT::result(acc, xyz).length()); } } } } } }; } // unnamed namespace TEST_F(TestStats, testGridExtrema) { using namespace openvdb; const int DIM = 109; { const float background = 0.0; FloatGrid grid(background); { // Compute active value statistics for a grid with a single active voxel. grid.tree().setValue(Coord(0), /*value=*/42.0); math::Extrema ex = tools::extrema(grid.cbeginValueOn()); EXPECT_NEAR(42.0, ex.min(), /*tolerance=*/0.0); EXPECT_NEAR(42.0, ex.max(), /*tolerance=*/0.0); // Compute inactive value statistics for a grid with only background voxels. grid.tree().setValueOff(Coord(0), background); ex = tools::extrema(grid.cbeginValueOff()); EXPECT_NEAR(background, ex.min(), /*tolerance=*/0.0); EXPECT_NEAR(background, ex.max(), /*tolerance=*/0.0); } // Compute active value statistics for a grid with two active voxel populations // of the same size but two different values. grid.fill(CoordBBox::createCube(Coord(0), DIM), /*value=*/1.0); grid.fill(CoordBBox::createCube(Coord(-300), DIM), /*value=*/-3.0); EXPECT_EQ(Index64(2 * DIM * DIM * DIM), grid.activeVoxelCount()); for (int threaded = 0; threaded <= 1; ++threaded) { math::Extrema ex = tools::extrema(grid.cbeginValueOn(), threaded); EXPECT_NEAR(double(-3.0), ex.min(), /*tolerance=*/0.0); EXPECT_NEAR(double(1.0), ex.max(), /*tolerance=*/0.0); } // Compute active value statistics for just the positive values. for (int threaded = 0; threaded <= 1; ++threaded) { struct Local { static void addIfPositive(const FloatGrid::ValueOnCIter& it, math::Extrema& ex) { const float f = *it; if (f > 0.0) { if (it.isVoxelValue()) ex.add(f); else ex.add(f, it.getVoxelCount()); } } }; math::Extrema ex = tools::extrema(grid.cbeginValueOn(), &Local::addIfPositive, threaded); EXPECT_NEAR(double(1.0), ex.min(), /*tolerance=*/0.0); EXPECT_NEAR(double(1.0), ex.max(), /*tolerance=*/0.0); } // Compute active value statistics for the first-order gradient. for (int threaded = 0; threaded <= 1; ++threaded) { // First, using a custom ValueOp... math::Extrema ex = tools::extrema(grid.cbeginValueOn(), GradOp(grid), threaded); EXPECT_NEAR(double(0.0), ex.min(), /*tolerance=*/0.0); EXPECT_NEAR( double(9.0 + 9.0 + 9.0), ex.max() * ex.max(), /*tol=*/1.0e-3); // max gradient is (dx, dy, dz) = (-3 - 0, -3 - 0, -3 - 0) // ...then using tools::opStatistics(). typedef math::ISOpMagnitude<math::ISGradient<math::FD_1ST> > MathOp; ex = tools::opExtrema(grid.cbeginValueOn(), MathOp(), threaded); EXPECT_NEAR(double(0.0), ex.min(), /*tolerance=*/0.0); EXPECT_NEAR( double(9.0 + 9.0 + 9.0), ex.max() * ex.max(), /*tolerance=*/1.0e-3); // max gradient is (dx, dy, dz) = (-3 - 0, -3 - 0, -3 - 0) } } { const Vec3s background(0.0); Vec3SGrid grid(background); // Compute active vector magnitude statistics for a vector-valued grid // with two active voxel populations of the same size but two different values. grid.fill(CoordBBox::createCube(Coord(0), DIM), Vec3s(3.0, 0.0, 4.0)); // length = 5 grid.fill(CoordBBox::createCube(Coord(-300), DIM), Vec3s(1.0, 2.0, 2.0)); // length = 3 EXPECT_EQ(Index64(2 * DIM * DIM * DIM), grid.activeVoxelCount()); for (int threaded = 0; threaded <= 1; ++threaded) { math::Extrema ex = tools::extrema(grid.cbeginValueOn(), threaded); EXPECT_NEAR(double(3.0), ex.min(), /*tolerance=*/0.0); EXPECT_NEAR(double(5.0), ex.max(), /*tolerance=*/0.0); } } } TEST_F(TestStats, testGridStats) { using namespace openvdb; const int DIM = 109; { const float background = 0.0; FloatGrid grid(background); { // Compute active value statistics for a grid with a single active voxel. grid.tree().setValue(Coord(0), /*value=*/42.0); math::Stats stats = tools::statistics(grid.cbeginValueOn()); EXPECT_NEAR(42.0, stats.min(), /*tolerance=*/0.0); EXPECT_NEAR(42.0, stats.max(), /*tolerance=*/0.0); EXPECT_NEAR(42.0, stats.mean(), /*tolerance=*/1.0e-8); EXPECT_NEAR(0.0, stats.variance(), /*tolerance=*/1.0e-8); // Compute inactive value statistics for a grid with only background voxels. grid.tree().setValueOff(Coord(0), background); stats = tools::statistics(grid.cbeginValueOff()); EXPECT_NEAR(background, stats.min(), /*tolerance=*/0.0); EXPECT_NEAR(background, stats.max(), /*tolerance=*/0.0); EXPECT_NEAR(background, stats.mean(), /*tolerance=*/1.0e-8); EXPECT_NEAR(0.0, stats.variance(), /*tolerance=*/1.0e-8); } // Compute active value statistics for a grid with two active voxel populations // of the same size but two different values. grid.fill(CoordBBox::createCube(Coord(0), DIM), /*value=*/1.0); grid.fill(CoordBBox::createCube(Coord(-300), DIM), /*value=*/-3.0); EXPECT_EQ(Index64(2 * DIM * DIM * DIM), grid.activeVoxelCount()); for (int threaded = 0; threaded <= 1; ++threaded) { math::Stats stats = tools::statistics(grid.cbeginValueOn(), threaded); EXPECT_NEAR(double(-3.0), stats.min(), /*tolerance=*/0.0); EXPECT_NEAR(double(1.0), stats.max(), /*tolerance=*/0.0); EXPECT_NEAR(double(-1.0), stats.mean(), /*tolerance=*/1.0e-8); EXPECT_NEAR(double(4.0), stats.variance(), /*tolerance=*/1.0e-8); } // Compute active value statistics for just the positive values. for (int threaded = 0; threaded <= 1; ++threaded) { struct Local { static void addIfPositive(const FloatGrid::ValueOnCIter& it, math::Stats& stats) { const float f = *it; if (f > 0.0) { if (it.isVoxelValue()) stats.add(f); else stats.add(f, it.getVoxelCount()); } } }; math::Stats stats = tools::statistics(grid.cbeginValueOn(), &Local::addIfPositive, threaded); EXPECT_NEAR(double(1.0), stats.min(), /*tolerance=*/0.0); EXPECT_NEAR(double(1.0), stats.max(), /*tolerance=*/0.0); EXPECT_NEAR(double(1.0), stats.mean(), /*tolerance=*/1.0e-8); EXPECT_NEAR(double(0.0), stats.variance(), /*tolerance=*/1.0e-8); } // Compute active value statistics for the first-order gradient. for (int threaded = 0; threaded <= 1; ++threaded) { // First, using a custom ValueOp... math::Stats stats = tools::statistics(grid.cbeginValueOn(), GradOp(grid), threaded); EXPECT_NEAR(double(0.0), stats.min(), /*tolerance=*/0.0); EXPECT_NEAR( double(9.0 + 9.0 + 9.0), stats.max() * stats.max(), /*tol=*/1.0e-3); // max gradient is (dx, dy, dz) = (-3 - 0, -3 - 0, -3 - 0) // ...then using tools::opStatistics(). typedef math::ISOpMagnitude<math::ISGradient<math::FD_1ST> > MathOp; stats = tools::opStatistics(grid.cbeginValueOn(), MathOp(), threaded); EXPECT_NEAR(double(0.0), stats.min(), /*tolerance=*/0.0); EXPECT_NEAR( double(9.0 + 9.0 + 9.0), stats.max() * stats.max(), /*tolerance=*/1.0e-3); // max gradient is (dx, dy, dz) = (-3 - 0, -3 - 0, -3 - 0) } } { const Vec3s background(0.0); Vec3SGrid grid(background); // Compute active vector magnitude statistics for a vector-valued grid // with two active voxel populations of the same size but two different values. grid.fill(CoordBBox::createCube(Coord(0), DIM), Vec3s(3.0, 0.0, 4.0)); // length = 5 grid.fill(CoordBBox::createCube(Coord(-300), DIM), Vec3s(1.0, 2.0, 2.0)); // length = 3 EXPECT_EQ(Index64(2 * DIM * DIM * DIM), grid.activeVoxelCount()); for (int threaded = 0; threaded <= 1; ++threaded) { math::Stats stats = tools::statistics(grid.cbeginValueOn(), threaded); EXPECT_NEAR(double(3.0), stats.min(), /*tolerance=*/0.0); EXPECT_NEAR(double(5.0), stats.max(), /*tolerance=*/0.0); EXPECT_NEAR(double(4.0), stats.mean(), /*tolerance=*/1.0e-8); EXPECT_NEAR(double(1.0), stats.variance(), /*tolerance=*/1.0e-8); } } } namespace { template<typename OpT, typename GridT> inline void doTestGridOperatorStats(const GridT& grid, const OpT& op) { openvdb::math::Stats serialStats = openvdb::tools::opStatistics(grid.cbeginValueOn(), op, /*threaded=*/false); openvdb::math::Stats parallelStats = openvdb::tools::opStatistics(grid.cbeginValueOn(), op, /*threaded=*/true); // Verify that the results from threaded and serial runs are equivalent. EXPECT_EQ(serialStats.size(), parallelStats.size()); ASSERT_DOUBLES_EXACTLY_EQUAL(serialStats.min(), parallelStats.min()); ASSERT_DOUBLES_EXACTLY_EQUAL(serialStats.max(), parallelStats.max()); EXPECT_NEAR(serialStats.mean(), parallelStats.mean(), /*tolerance=*/1.0e-6); EXPECT_NEAR(serialStats.variance(), parallelStats.variance(), 1.0e-6); } } TEST_F(TestStats, testGridOperatorStats) { using namespace openvdb; typedef math::UniformScaleMap MapType; MapType map; const int DIM = 109; { // Test operations on a scalar grid. const float background = 0.0; FloatGrid grid(background); grid.fill(CoordBBox::createCube(Coord(0), DIM), /*value=*/1.0); grid.fill(CoordBBox::createCube(Coord(-300), DIM), /*value=*/-3.0); { // Magnitude of gradient computed via first-order differencing typedef math::MapAdapter<MapType, math::OpMagnitude<math::Gradient<MapType, math::FD_1ST>, MapType>, double> OpT; doTestGridOperatorStats(grid, OpT(map)); } { // Magnitude of index-space gradient computed via first-order differencing typedef math::ISOpMagnitude<math::ISGradient<math::FD_1ST> > OpT; doTestGridOperatorStats(grid, OpT()); } { // Laplacian of index-space gradient computed via second-order central differencing typedef math::ISLaplacian<math::CD_SECOND> OpT; doTestGridOperatorStats(grid, OpT()); } } { // Test operations on a vector grid. const Vec3s background(0.0); Vec3SGrid grid(background); grid.fill(CoordBBox::createCube(Coord(0), DIM), Vec3s(3.0, 0.0, 4.0)); // length = 5 grid.fill(CoordBBox::createCube(Coord(-300), DIM), Vec3s(1.0, 2.0, 2.0)); // length = 3 { // Divergence computed via first-order differencing typedef math::MapAdapter<MapType, math::Divergence<MapType, math::FD_1ST>, double> OpT; doTestGridOperatorStats(grid, OpT(map)); } { // Magnitude of curl computed via first-order differencing typedef math::MapAdapter<MapType, math::OpMagnitude<math::Curl<MapType, math::FD_1ST>, MapType>, double> OpT; doTestGridOperatorStats(grid, OpT(map)); } { // Magnitude of index-space curl computed via first-order differencing typedef math::ISOpMagnitude<math::ISCurl<math::FD_1ST> > OpT; doTestGridOperatorStats(grid, OpT()); } } } TEST_F(TestStats, testGridHistogram) { using namespace openvdb; const int DIM = 109; { const float background = 0.0; FloatGrid grid(background); { const double value = 42.0; // Compute a histogram of the active values of a grid with a single active voxel. grid.tree().setValue(Coord(0), value); math::Histogram hist = tools::histogram(grid.cbeginValueOn(), /*min=*/0.0, /*max=*/100.0); for (int i = 0, N = int(hist.numBins()); i < N; ++i) { uint64_t expected = ((hist.min(i) <= value && value <= hist.max(i)) ? 1 : 0); EXPECT_EQ(expected, hist.count(i)); } } // Compute a histogram of the active values of a grid with two // active voxel populations of the same size but two different values. grid.fill(CoordBBox::createCube(Coord(0), DIM), /*value=*/1.0); grid.fill(CoordBBox::createCube(Coord(-300), DIM), /*value=*/3.0); EXPECT_EQ(uint64_t(2 * DIM * DIM * DIM), grid.activeVoxelCount()); for (int threaded = 0; threaded <= 1; ++threaded) { math::Histogram hist = tools::histogram(grid.cbeginValueOn(), /*min=*/0.0, /*max=*/10.0, /*numBins=*/9, threaded); EXPECT_EQ(Index64(2 * DIM * DIM * DIM), hist.size()); for (int i = 0, N = int(hist.numBins()); i < N; ++i) { if (i == 0 || i == 2) { EXPECT_EQ(uint64_t(DIM * DIM * DIM), hist.count(i)); } else { EXPECT_EQ(uint64_t(0), hist.count(i)); } } } } { const Vec3s background(0.0); Vec3SGrid grid(background); // Compute a histogram of vector magnitudes of the active values of a // vector-valued grid with two active voxel populations of the same size // but two different values. grid.fill(CoordBBox::createCube(Coord(0), DIM), Vec3s(3.0, 0.0, 4.0)); // length = 5 grid.fill(CoordBBox::createCube(Coord(-300), DIM), Vec3s(1.0, 2.0, 2.0)); // length = 3 EXPECT_EQ(Index64(2 * DIM * DIM * DIM), grid.activeVoxelCount()); for (int threaded = 0; threaded <= 1; ++threaded) { math::Histogram hist = tools::histogram(grid.cbeginValueOn(), /*min=*/0.0, /*max=*/10.0, /*numBins=*/9, threaded); EXPECT_EQ(Index64(2 * DIM * DIM * DIM), hist.size()); for (int i = 0, N = int(hist.numBins()); i < N; ++i) { if (i == 2 || i == 4) { EXPECT_EQ(uint64_t(DIM * DIM * DIM), hist.count(i)); } else { EXPECT_EQ(uint64_t(0), hist.count(i)); } } } } }
28,774
C++
38.526099
99
0.527977
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestCoord.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <unordered_map> #include "gtest/gtest.h" #include <openvdb/Types.h> #include <sstream> #include <tbb/tbb_stddef.h> // for tbb::split class TestCoord: public ::testing::Test { }; TEST_F(TestCoord, testCoord) { using openvdb::Coord; for (int i=0; i<3; ++i) { EXPECT_EQ(Coord::min()[i], std::numeric_limits<Coord::Int32>::min()); EXPECT_EQ(Coord::max()[i], std::numeric_limits<Coord::Int32>::max()); } Coord xyz(-1, 2, 4); Coord xyz2 = -xyz; EXPECT_EQ(Coord(1, -2, -4), xyz2); EXPECT_EQ(Coord(1, 2, 4), openvdb::math::Abs(xyz)); xyz2 = -xyz2; EXPECT_EQ(xyz, xyz2); xyz.setX(-xyz.x()); EXPECT_EQ(Coord(1, 2, 4), xyz); xyz2 = xyz >> 1; EXPECT_EQ(Coord(0, 1, 2), xyz2); xyz2 |= 1; EXPECT_EQ(Coord(1, 1, 3), xyz2); EXPECT_TRUE(xyz2 != xyz); EXPECT_TRUE(xyz2 < xyz); EXPECT_TRUE(xyz2 <= xyz); Coord xyz3(xyz2); xyz2 -= xyz3; EXPECT_EQ(Coord(), xyz2); xyz2.reset(0, 4, 4); xyz2.offset(-1); EXPECT_EQ(Coord(-1, 3, 3), xyz2); // xyz = (1, 2, 4), xyz2 = (-1, 3, 3) EXPECT_EQ(Coord(-1, 2, 3), Coord::minComponent(xyz, xyz2)); EXPECT_EQ(Coord(1, 3, 4), Coord::maxComponent(xyz, xyz2)); } TEST_F(TestCoord, testConversion) { using openvdb::Coord; openvdb::Vec3I iv(1, 2, 4); Coord xyz(iv); EXPECT_EQ(Coord(1, 2, 4), xyz); EXPECT_EQ(iv, xyz.asVec3I()); EXPECT_EQ(openvdb::Vec3i(1, 2, 4), xyz.asVec3i()); iv = (xyz + iv) + xyz; EXPECT_EQ(openvdb::Vec3I(3, 6, 12), iv); iv = iv - xyz; EXPECT_EQ(openvdb::Vec3I(2, 4, 8), iv); openvdb::Vec3s fv = xyz.asVec3s(); EXPECT_TRUE(openvdb::math::isExactlyEqual(openvdb::Vec3s(1, 2, 4), fv)); } TEST_F(TestCoord, testIO) { using openvdb::Coord; Coord xyz(-1, 2, 4), xyz2; std::ostringstream os(std::ios_base::binary); EXPECT_NO_THROW(xyz.write(os)); std::istringstream is(os.str(), std::ios_base::binary); EXPECT_NO_THROW(xyz2.read(is)); EXPECT_EQ(xyz, xyz2); os.str(""); os << xyz; EXPECT_EQ(std::string("[-1, 2, 4]"), os.str()); } TEST_F(TestCoord, testCoordBBox) { {// Empty constructor openvdb::CoordBBox b; EXPECT_EQ(openvdb::Coord::max(), b.min()); EXPECT_EQ(openvdb::Coord::min(), b.max()); EXPECT_TRUE(b.empty()); } {// Construct bbox from min and max const openvdb::Coord min(-1,-2,30), max(20,30,55); openvdb::CoordBBox b(min, max); EXPECT_EQ(min, b.min()); EXPECT_EQ(max, b.max()); } {// Construct bbox from components of min and max const openvdb::Coord min(-1,-2,30), max(20,30,55); openvdb::CoordBBox b(min[0], min[1], min[2], max[0], max[1], max[2]); EXPECT_EQ(min, b.min()); EXPECT_EQ(max, b.max()); } {// tbb::split constructor const openvdb::Coord min(-1,-2,30), max(20,30,55); openvdb::CoordBBox a(min, max), b(a, tbb::split()); EXPECT_EQ(min, b.min()); EXPECT_EQ(openvdb::Coord(20, 14, 55), b.max()); EXPECT_EQ(openvdb::Coord(-1, 15, 30), a.min()); EXPECT_EQ(max, a.max()); } {// createCube const openvdb::Coord min(0,8,16); const openvdb::CoordBBox b = openvdb::CoordBBox::createCube(min, 8); EXPECT_EQ(min, b.min()); EXPECT_EQ(min + openvdb::Coord(8-1), b.max()); } {// inf const openvdb::CoordBBox b = openvdb::CoordBBox::inf(); EXPECT_EQ(openvdb::Coord::min(), b.min()); EXPECT_EQ(openvdb::Coord::max(), b.max()); } {// empty, dim, hasVolume and volume const openvdb::Coord c(1,2,3); const openvdb::CoordBBox b0(c, c), b1(c, c.offsetBy(0,-1,0)), b2; EXPECT_TRUE( b0.hasVolume() && !b0.empty()); EXPECT_TRUE(!b1.hasVolume() && b1.empty()); EXPECT_TRUE(!b2.hasVolume() && b2.empty()); EXPECT_EQ(openvdb::Coord(1), b0.dim()); EXPECT_EQ(openvdb::Coord(0), b1.dim()); EXPECT_EQ(openvdb::Coord(0), b2.dim()); EXPECT_EQ(uint64_t(1), b0.volume()); EXPECT_EQ(uint64_t(0), b1.volume()); EXPECT_EQ(uint64_t(0), b2.volume()); } {// volume and split constructor const openvdb::Coord min(-1,-2,30), max(20,30,55); const openvdb::CoordBBox bbox(min,max); openvdb::CoordBBox a(bbox), b(a, tbb::split()); EXPECT_EQ(bbox.volume(), a.volume() + b.volume()); openvdb::CoordBBox c(b, tbb::split()); EXPECT_EQ(bbox.volume(), a.volume() + b.volume() + c.volume()); } {// getCenter const openvdb::Coord min(1,2,3), max(6,10,15); const openvdb::CoordBBox b(min, max); EXPECT_EQ(openvdb::Vec3d(3.5, 6.0, 9.0), b.getCenter()); } {// moveMin const openvdb::Coord min(1,2,3), max(6,10,15); openvdb::CoordBBox b(min, max); const openvdb::Coord dim = b.dim(); b.moveMin(openvdb::Coord(0)); EXPECT_EQ(dim, b.dim()); EXPECT_EQ(openvdb::Coord(0), b.min()); EXPECT_EQ(max-min, b.max()); } {// moveMax const openvdb::Coord min(1,2,3), max(6,10,15); openvdb::CoordBBox b(min, max); const openvdb::Coord dim = b.dim(); b.moveMax(openvdb::Coord(0)); EXPECT_EQ(dim, b.dim()); EXPECT_EQ(openvdb::Coord(0), b.max()); EXPECT_EQ(min-max, b.min()); } {// a volume that overflows Int32. using Int32 = openvdb::Int32; Int32 maxInt32 = std::numeric_limits<Int32>::max(); const openvdb::Coord min(Int32(0), Int32(0), Int32(0)); const openvdb::Coord max(maxInt32-Int32(2), Int32(2), Int32(2)); const openvdb::CoordBBox b(min, max); uint64_t volume = UINT64_C(19327352814); EXPECT_EQ(volume, b.volume()); } {// minExtent and maxExtent const openvdb::Coord min(1,2,3); { const openvdb::Coord max = min + openvdb::Coord(1,2,3); const openvdb::CoordBBox b(min, max); EXPECT_EQ(int(b.minExtent()), 0); EXPECT_EQ(int(b.maxExtent()), 2); } { const openvdb::Coord max = min + openvdb::Coord(1,3,2); const openvdb::CoordBBox b(min, max); EXPECT_EQ(int(b.minExtent()), 0); EXPECT_EQ(int(b.maxExtent()), 1); } { const openvdb::Coord max = min + openvdb::Coord(2,1,3); const openvdb::CoordBBox b(min, max); EXPECT_EQ(int(b.minExtent()), 1); EXPECT_EQ(int(b.maxExtent()), 2); } { const openvdb::Coord max = min + openvdb::Coord(2,3,1); const openvdb::CoordBBox b(min, max); EXPECT_EQ(int(b.minExtent()), 2); EXPECT_EQ(int(b.maxExtent()), 1); } { const openvdb::Coord max = min + openvdb::Coord(3,1,2); const openvdb::CoordBBox b(min, max); EXPECT_EQ(int(b.minExtent()), 1); EXPECT_EQ(int(b.maxExtent()), 0); } { const openvdb::Coord max = min + openvdb::Coord(3,2,1); const openvdb::CoordBBox b(min, max); EXPECT_EQ(int(b.minExtent()), 2); EXPECT_EQ(int(b.maxExtent()), 0); } } {//reset openvdb::CoordBBox b; EXPECT_EQ(openvdb::Coord::max(), b.min()); EXPECT_EQ(openvdb::Coord::min(), b.max()); EXPECT_TRUE(b.empty()); const openvdb::Coord min(-1,-2,30), max(20,30,55); b.reset(min, max); EXPECT_EQ(min, b.min()); EXPECT_EQ(max, b.max()); EXPECT_TRUE(!b.empty()); b.reset(); EXPECT_EQ(openvdb::Coord::max(), b.min()); EXPECT_EQ(openvdb::Coord::min(), b.max()); EXPECT_TRUE(b.empty()); } {// ZYX Iterator 1 const openvdb::Coord min(-1,-2,3), max(2,3,5); const openvdb::CoordBBox b(min, max); const size_t count = b.volume(); size_t n = 0; openvdb::CoordBBox::ZYXIterator ijk(b); for (int i=min[0]; i<=max[0]; ++i) { for (int j=min[1]; j<=max[1]; ++j) { for (int k=min[2]; k<=max[2]; ++k, ++ijk, ++n) { EXPECT_TRUE(ijk); EXPECT_EQ(openvdb::Coord(i,j,k), *ijk); } } } EXPECT_EQ(count, n); EXPECT_TRUE(!ijk); ++ijk; EXPECT_TRUE(!ijk); } {// ZYX Iterator 2 const openvdb::Coord min(-1,-2,3), max(2,3,5); const openvdb::CoordBBox b(min, max); const size_t count = b.volume(); size_t n = 0; openvdb::Coord::ValueType unused = 0; for (const auto& ijk: b) { unused += ijk[0]; EXPECT_TRUE(++n <= count); } EXPECT_EQ(count, n); } {// XYZ Iterator 1 const openvdb::Coord min(-1,-2,3), max(2,3,5); const openvdb::CoordBBox b(min, max); const size_t count = b.volume(); size_t n = 0; openvdb::CoordBBox::XYZIterator ijk(b); for (int k=min[2]; k<=max[2]; ++k) { for (int j=min[1]; j<=max[1]; ++j) { for (int i=min[0]; i<=max[0]; ++i, ++ijk, ++n) { EXPECT_TRUE( ijk ); EXPECT_EQ( openvdb::Coord(i,j,k), *ijk ); } } } EXPECT_EQ(count, n); EXPECT_TRUE( !ijk ); ++ijk; EXPECT_TRUE( !ijk ); } {// XYZ Iterator 2 const openvdb::Coord min(-1,-2,3), max(2,3,5); const openvdb::CoordBBox b(min, max); const size_t count = b.volume(); size_t n = 0; for (auto ijk = b.beginXYZ(); ijk; ++ijk) { EXPECT_TRUE( ++n <= count ); } EXPECT_EQ(count, n); } {// bit-wise operations const openvdb::Coord min(-1,-2,3), max(2,3,5); const openvdb::CoordBBox b(min, max); EXPECT_EQ(openvdb::CoordBBox(min>>1,max>>1), b>>size_t(1)); EXPECT_EQ(openvdb::CoordBBox(min>>3,max>>3), b>>size_t(3)); EXPECT_EQ(openvdb::CoordBBox(min<<1,max<<1), b<<size_t(1)); EXPECT_EQ(openvdb::CoordBBox(min&1,max&1), b&1); EXPECT_EQ(openvdb::CoordBBox(min|1,max|1), b|1); } {// test getCornerPoints const openvdb::CoordBBox bbox(1, 2, 3, 4, 5, 6); openvdb::Coord a[10]; bbox.getCornerPoints(a); //for (int i=0; i<8; ++i) { // std::cerr << "#"<<i<<" = ("<<a[i][0]<<","<<a[i][1]<<","<<a[i][2]<<")\n"; //} EXPECT_EQ( a[0], openvdb::Coord(1, 2, 3) ); EXPECT_EQ( a[1], openvdb::Coord(1, 2, 6) ); EXPECT_EQ( a[2], openvdb::Coord(1, 5, 3) ); EXPECT_EQ( a[3], openvdb::Coord(1, 5, 6) ); EXPECT_EQ( a[4], openvdb::Coord(4, 2, 3) ); EXPECT_EQ( a[5], openvdb::Coord(4, 2, 6) ); EXPECT_EQ( a[6], openvdb::Coord(4, 5, 3) ); EXPECT_EQ( a[7], openvdb::Coord(4, 5, 6) ); for (int i=1; i<8; ++i) EXPECT_TRUE( a[i-1] < a[i] ); } } TEST_F(TestCoord, testCoordHash) { {//test Coord::hash function openvdb::Coord a(-1, 34, 67), b(-2, 34, 67); EXPECT_TRUE(a.hash<>() != b.hash<>()); EXPECT_TRUE(a.hash<10>() != b.hash<10>()); EXPECT_TRUE(a.hash<5>() != b.hash<5>()); } {//test std::hash function std::hash<openvdb::Coord> h; openvdb::Coord a(-1, 34, 67), b(-2, 34, 67); EXPECT_TRUE(h(a) != h(b)); } {//test hash map (= unordered_map) using KeyT = openvdb::Coord; using ValueT = size_t; using HashT = std::hash<openvdb::Coord>; std::unordered_map<KeyT, ValueT, HashT> h; const openvdb::Coord min(-10,-20,30), max(20,30,50); const openvdb::CoordBBox bbox(min, max); size_t n = 0; for (const auto& ijk: bbox) h[ijk] = n++; EXPECT_EQ(h.size(), n); n = 0; for (const auto& ijk: bbox) EXPECT_EQ(h[ijk], n++); EXPECT_TRUE(h.load_factor() <= 1.0f);// no hask key collisions! } }
12,080
C++
31.130319
86
0.51457
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestFloatMetadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Metadata.h> class TestFloatMetadata : public ::testing::Test { }; TEST_F(TestFloatMetadata, test) { using namespace openvdb; Metadata::Ptr m(new FloatMetadata(1.0)); Metadata::Ptr m2 = m->copy(); EXPECT_TRUE(dynamic_cast<FloatMetadata*>(m.get()) != 0); EXPECT_TRUE(dynamic_cast<FloatMetadata*>(m2.get()) != 0); EXPECT_TRUE(m->typeName().compare("float") == 0); EXPECT_TRUE(m2->typeName().compare("float") == 0); FloatMetadata *s = dynamic_cast<FloatMetadata*>(m.get()); //EXPECT_TRUE(s->value() == 1.0); EXPECT_NEAR(1.0f,s->value(),0); s->value() = 2.0; //EXPECT_TRUE(s->value() == 2.0); EXPECT_NEAR(2.0f,s->value(),0); m2->copy(*s); s = dynamic_cast<FloatMetadata*>(m2.get()); //EXPECT_TRUE(s->value() == 2.0); EXPECT_NEAR(2.0f,s->value(),0); }
983
C++
24.894736
61
0.621567
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestGridTransformer.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/math/BBox.h> #include <openvdb/math/Math.h> #include <openvdb/tree/Tree.h> #include <openvdb/tools/GridTransformer.h> #include <openvdb/tools/Prune.h> #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); class TestGridTransformer: public ::testing::Test { protected: template<typename GridType, typename Sampler> void transformGrid(); }; //////////////////////////////////////// template<typename GridType, typename Sampler> void TestGridTransformer::transformGrid() { using openvdb::Coord; using openvdb::CoordBBox; using openvdb::Vec3R; typedef typename GridType::ValueType ValueT; const int radius = Sampler::radius(); const openvdb::Vec3R zeroVec(0, 0, 0), oneVec(1, 1, 1); const ValueT zero = openvdb::zeroVal<ValueT>(), one = zero + 1, two = one + 1, background = one; const bool transformTiles = true; // Create a sparse test grid comprising the eight corners of a 20 x 20 x 20 cube. typename GridType::Ptr inGrid = GridType::create(background); typename GridType::Accessor inAcc = inGrid->getAccessor(); inAcc.setValue(Coord( 0, 0, 0), /*value=*/zero); inAcc.setValue(Coord(20, 0, 0), zero); inAcc.setValue(Coord( 0, 20, 0), zero); inAcc.setValue(Coord( 0, 0, 20), zero); inAcc.setValue(Coord(20, 0, 20), zero); inAcc.setValue(Coord( 0, 20, 20), zero); inAcc.setValue(Coord(20, 20, 0), zero); inAcc.setValue(Coord(20, 20, 20), zero); EXPECT_EQ(openvdb::Index64(8), inGrid->activeVoxelCount()); // For various combinations of scaling, rotation and translation... for (int i = 0; i < 8; ++i) { const openvdb::Vec3R scale = i & 1 ? openvdb::Vec3R(10, 4, 7.5) : oneVec, rotate = (i & 2 ? openvdb::Vec3R(30, 230, -190) : zeroVec) * (M_PI / 180), translate = i & 4 ? openvdb::Vec3R(-5, 0, 10) : zeroVec, pivot = i & 8 ? openvdb::Vec3R(0.5, 4, -3.3) : zeroVec; openvdb::tools::GridTransformer transformer(pivot, scale, rotate, translate); transformer.setTransformTiles(transformTiles); // Add a tile (either active or inactive) in the interior of the cube. const bool tileIsActive = (i % 2); inGrid->fill(CoordBBox(Coord(8), Coord(15)), two, tileIsActive); if (tileIsActive) { EXPECT_EQ(openvdb::Index64(512 + 8), inGrid->activeVoxelCount()); } else { EXPECT_EQ(openvdb::Index64(8), inGrid->activeVoxelCount()); } // Verify that a voxel outside the cube has the background value. EXPECT_TRUE(openvdb::math::isExactlyEqual(inAcc.getValue(Coord(21, 0, 0)), background)); EXPECT_EQ(false, inAcc.isValueOn(Coord(21, 0, 0))); // Verify that a voxel inside the cube has value two. EXPECT_TRUE(openvdb::math::isExactlyEqual(inAcc.getValue(Coord(12)), two)); EXPECT_EQ(tileIsActive, inAcc.isValueOn(Coord(12))); // Verify that the bounding box of all active values is 20 x 20 x 20. CoordBBox activeVoxelBBox = inGrid->evalActiveVoxelBoundingBox(); EXPECT_TRUE(!activeVoxelBBox.empty()); const Coord imin = activeVoxelBBox.min(), imax = activeVoxelBBox.max(); EXPECT_EQ(Coord(0), imin); EXPECT_EQ(Coord(20), imax); // Transform the corners of the input grid's bounding box // and compute the enclosing bounding box in the output grid. const openvdb::Mat4R xform = transformer.getTransform(); const Vec3R inRMin(imin.x(), imin.y(), imin.z()), inRMax(imax.x(), imax.y(), imax.z()); Vec3R outRMin, outRMax; outRMin = outRMax = inRMin * xform; for (int j = 0; j < 8; ++j) { Vec3R corner( j & 1 ? inRMax.x() : inRMin.x(), j & 2 ? inRMax.y() : inRMin.y(), j & 4 ? inRMax.z() : inRMin.z()); outRMin = openvdb::math::minComponent(outRMin, corner * xform); outRMax = openvdb::math::maxComponent(outRMax, corner * xform); } CoordBBox bbox( Coord(openvdb::tools::local_util::floorVec3(outRMin) - radius), Coord(openvdb::tools::local_util::ceilVec3(outRMax) + radius)); // Transform the test grid. typename GridType::Ptr outGrid = GridType::create(background); transformer.transformGrid<Sampler>(*inGrid, *outGrid); openvdb::tools::prune(outGrid->tree()); // Verify that the bounding box of the transformed grid // matches the transformed bounding box of the original grid. activeVoxelBBox = outGrid->evalActiveVoxelBoundingBox(); EXPECT_TRUE(!activeVoxelBBox.empty()); const openvdb::Vec3i omin = activeVoxelBBox.min().asVec3i(), omax = activeVoxelBBox.max().asVec3i(); const int bboxTolerance = 1; // allow for rounding #if 0 if (!omin.eq(bbox.min().asVec3i(), bboxTolerance) || !omax.eq(bbox.max().asVec3i(), bboxTolerance)) { std::cerr << "\nS = " << scale << ", R = " << rotate << ", T = " << translate << ", P = " << pivot << "\n" << xform.transpose() << "\n" << "computed bbox = " << bbox << "\nactual bbox = " << omin << " -> " << omax << "\n"; } #endif EXPECT_TRUE(omin.eq(bbox.min().asVec3i(), bboxTolerance)); EXPECT_TRUE(omax.eq(bbox.max().asVec3i(), bboxTolerance)); // Verify that (a voxel in) the interior of the cube was // transformed correctly. const Coord center = Coord::round(Vec3R(12) * xform); const typename GridType::TreeType& outTree = outGrid->tree(); EXPECT_TRUE(openvdb::math::isExactlyEqual(transformTiles ? two : background, outTree.getValue(center))); if (transformTiles && tileIsActive) EXPECT_TRUE(outTree.isValueOn(center)); else EXPECT_TRUE(!outTree.isValueOn(center)); } } TEST_F(TestGridTransformer, testTransformBoolPoint) { transformGrid<openvdb::BoolGrid, openvdb::tools::PointSampler>(); } TEST_F(TestGridTransformer, TransformFloatPoint) { transformGrid<openvdb::FloatGrid, openvdb::tools::PointSampler>(); } TEST_F(TestGridTransformer, TransformFloatBox) { transformGrid<openvdb::FloatGrid, openvdb::tools::BoxSampler>(); } TEST_F(TestGridTransformer, TransformFloatQuadratic) { transformGrid<openvdb::FloatGrid, openvdb::tools::QuadraticSampler>(); } TEST_F(TestGridTransformer, TransformDoubleBox) { transformGrid<openvdb::DoubleGrid, openvdb::tools::BoxSampler>(); } TEST_F(TestGridTransformer, TransformInt32Box) { transformGrid<openvdb::Int32Grid, openvdb::tools::BoxSampler>(); } TEST_F(TestGridTransformer, TransformInt64Box) { transformGrid<openvdb::Int64Grid, openvdb::tools::BoxSampler>(); } TEST_F(TestGridTransformer, TransformVec3SPoint) { transformGrid<openvdb::VectorGrid, openvdb::tools::PointSampler>(); } TEST_F(TestGridTransformer, TransformVec3DBox) { transformGrid<openvdb::Vec3DGrid, openvdb::tools::BoxSampler>(); } //////////////////////////////////////// TEST_F(TestGridTransformer, testResampleToMatch) { using namespace openvdb; // Create an input grid with an identity transform. FloatGrid inGrid; // Populate it with a 20 x 20 x 20 cube. inGrid.fill(CoordBBox(Coord(5), Coord(24)), /*value=*/1.0); EXPECT_EQ(8000, int(inGrid.activeVoxelCount())); EXPECT_TRUE(inGrid.tree().activeTileCount() > 0); {//test identity transform FloatGrid outGrid; EXPECT_TRUE(outGrid.transform() == inGrid.transform()); // Resample the input grid into the output grid using point sampling. tools::resampleToMatch<tools::PointSampler>(inGrid, outGrid); EXPECT_EQ(int(inGrid.activeVoxelCount()), int(outGrid.activeVoxelCount())); for (openvdb::FloatTree::ValueOnCIter iter = inGrid.tree().cbeginValueOn(); iter; ++iter) { ASSERT_DOUBLES_EXACTLY_EQUAL(*iter,outGrid.tree().getValue(iter.getCoord())); } // The output grid's transform should not have changed. EXPECT_TRUE(outGrid.transform() == inGrid.transform()); } {//test nontrivial transform // Create an output grid with a different transform. math::Transform::Ptr xform = math::Transform::createLinearTransform(); xform->preScale(Vec3d(0.5, 0.5, 1.0)); FloatGrid outGrid; outGrid.setTransform(xform); EXPECT_TRUE(outGrid.transform() != inGrid.transform()); // Resample the input grid into the output grid using point sampling. tools::resampleToMatch<tools::PointSampler>(inGrid, outGrid); // The output grid's transform should not have changed. EXPECT_EQ(*xform, outGrid.transform()); // The output grid should have double the resolution of the input grid // in x and y and the same resolution in z. EXPECT_EQ(32000, int(outGrid.activeVoxelCount())); EXPECT_EQ(Coord(40, 40, 20), outGrid.evalActiveVoxelDim()); EXPECT_EQ(CoordBBox(Coord(9, 9, 5), Coord(48, 48, 24)), outGrid.evalActiveVoxelBoundingBox()); for (auto it = outGrid.tree().cbeginValueOn(); it; ++it) { EXPECT_NEAR(1.0, *it, 1.0e-6); } } } //////////////////////////////////////// TEST_F(TestGridTransformer, testDecomposition) { using namespace openvdb; using tools::local_util::decompose; { Vec3d s, r, t; auto m = Mat4d::identity(); EXPECT_TRUE(decompose(m, s, r, t)); m(1, 3) = 1.0; // add a perspective component // Verify that decomposition fails for perspective transforms. EXPECT_TRUE(!decompose(m, s, r, t)); } const auto rad = [](double deg) { return deg * M_PI / 180.0; }; const Vec3d ix(1, 0, 0), iy(0, 1, 0), iz(0, 0, 1); const auto translation = { Vec3d(0), Vec3d(100, 0, -100), Vec3d(-50, 100, 250) }; const auto scale = { 1.0, 0.25, -0.25, -1.0, 10.0, -10.0 }; const auto angle = { rad(0.0), rad(45.0), rad(90.0), rad(180.0), rad(225.0), rad(270.0), rad(315.0), rad(360.0) }; for (const auto& t: translation) { for (const double sx: scale) { for (const double sy: scale) { for (const double sz: scale) { const Vec3d s(sx, sy, sz); for (const double rx: angle) { for (const double ry: angle) { for (const double rz: angle) { Mat4d m = math::rotation<Mat4d>(iz, rz) * math::rotation<Mat4d>(iy, ry) * math::rotation<Mat4d>(ix, rx) * math::scale<Mat4d>(s); m.setTranslation(t); Vec3d outS(0), outR(0), outT(0); if (decompose(m, outS, outR, outT)) { // If decomposition succeeds, verify that it produces // the same matrix. (Most decompositions fail to find // a unique solution, though.) Mat4d outM = math::rotation<Mat4d>(iz, outR.z()) * math::rotation<Mat4d>(iy, outR.y()) * math::rotation<Mat4d>(ix, outR.x()) * math::scale<Mat4d>(outS); outM.setTranslation(outT); EXPECT_TRUE(outM.eq(m)); } tools::GridTransformer transformer(m); const bool transformUnchanged = transformer.getTransform().eq(m); EXPECT_TRUE(transformUnchanged); } } } } } } } }
12,385
C++
41.417808
99
0.575373
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestInt64Metadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Metadata.h> class TestInt64Metadata : public ::testing::Test { }; TEST_F(TestInt64Metadata, test) { using namespace openvdb; Metadata::Ptr m(new Int64Metadata(123)); Metadata::Ptr m2 = m->copy(); EXPECT_TRUE(dynamic_cast<Int64Metadata*>(m.get()) != 0); EXPECT_TRUE(dynamic_cast<Int64Metadata*>(m2.get()) != 0); EXPECT_TRUE(m->typeName().compare("int64") == 0); EXPECT_TRUE(m2->typeName().compare("int64") == 0); Int64Metadata *s = dynamic_cast<Int64Metadata*>(m.get()); EXPECT_TRUE(s->value() == 123); s->value() = 456; EXPECT_TRUE(s->value() == 456); m2->copy(*s); s = dynamic_cast<Int64Metadata*>(m2.get()); EXPECT_TRUE(s->value() == 456); }
870
C++
23.194444
61
0.63908
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMultiResGrid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <openvdb/tools/MultiResGrid.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/Diagnostics.h> #include <cstdio> // for remove() class TestMultiResGrid : public ::testing::Test { public: // Use to test logic in openvdb::tools::MultiResGrid struct CoordMask { static int Mask(int i, int j, int k) { return (i & 1) | ((j & 1) << 1) | ((k & 1) << 2); } CoordMask() : mask(0) {} CoordMask(const openvdb::Coord &c ) : mask( Mask(c[0],c[1],c[2]) ) {} inline void setCoord(int i, int j, int k) { mask = Mask(i,j,k); } inline void setCoord(const openvdb::Coord &c) { mask = Mask(c[0],c[1],c[2]); } inline bool allEven() const { return mask == 0; } inline bool xOdd() const { return mask == 1; } inline bool yOdd() const { return mask == 2; } inline bool zOdd() const { return mask == 4; } inline bool xyOdd() const { return mask == 3; } inline bool xzOdd() const { return mask == 5; } inline bool yzOdd() const { return mask == 6; } inline bool allOdd() const { return mask == 7; } int mask; };// CoordMask }; // Uncomment to test on models from our web-site //#define TestMultiResGrid_DATA_PATH "/home/kmu/src/openvdb/data/" //#define TestMultiResGrid_DATA_PATH "/usr/pic1/Data/OpenVDB/LevelSetModels/" TEST_F(TestMultiResGrid, testTwosComplement) { // test bit-operations that assume 2's complement representation of negative integers EXPECT_EQ( 1, 13 & 1 );// odd EXPECT_EQ( 1,-13 & 1 );// odd EXPECT_EQ( 0, 12 & 1 );// even EXPECT_EQ( 0,-12 & 1 );// even EXPECT_EQ( 0, 0 & 1 );// even for (int i=-50; i<=50; ++i) { if ( (i % 2) == 0 ) {//i.e. even number EXPECT_EQ( 0, i & 1); EXPECT_EQ( i, (i >> 1) << 1 ); } else {//i.e. odd number EXPECT_EQ( 1, i & 1); EXPECT_TRUE( i != (i >> 1) << 1 ); } } } TEST_F(TestMultiResGrid, testCoordMask) { using namespace openvdb; CoordMask mask; mask.setCoord(-4, 2, 18); EXPECT_TRUE(mask.allEven()); mask.setCoord(1, 2, -6); EXPECT_TRUE(mask.xOdd()); mask.setCoord(4, -3, -6); EXPECT_TRUE(mask.yOdd()); mask.setCoord(-8, 2, -7); EXPECT_TRUE(mask.zOdd()); mask.setCoord(1, -3, 2); EXPECT_TRUE(mask.xyOdd()); mask.setCoord(1, 2, -7); EXPECT_TRUE(mask.xzOdd()); mask.setCoord(-10, 3, -3); EXPECT_TRUE(mask.yzOdd()); mask.setCoord(1, 3,-3); EXPECT_TRUE(mask.allOdd()); } TEST_F(TestMultiResGrid, testManualTopology) { // Perform tests when the sparsity (or topology) of the multiple grids is defined manually using namespace openvdb; typedef tools::MultiResGrid<DoubleTree> MultiResGridT; const double background = -1.0; const size_t levels = 4; MultiResGridT::Ptr mrg(new MultiResGridT( levels, background)); EXPECT_TRUE(mrg != nullptr); EXPECT_EQ(levels , mrg->numLevels()); EXPECT_EQ(size_t(0), mrg->finestLevel()); EXPECT_EQ(levels-1, mrg->coarsestLevel()); // Define grid domain so they exactly overlap! const int w = 16;//half-width of dense patch on the finest grid level const CoordBBox bbox( Coord(-w), Coord(w) );// both inclusive // First check all trees against the background value for (size_t level = 0; level < mrg->numLevels(); ++level) { for (CoordBBox::Iterator<true> iter(bbox>>level); iter; ++iter) { EXPECT_NEAR(background, mrg->tree(level).getValue(*iter), /*tolerance=*/0.0); } } // Fill all trees according to a power of two refinement pattern for (size_t level = 0; level < mrg->numLevels(); ++level) { mrg->tree(level).fill( bbox>>level, double(level)); mrg->tree(level).voxelizeActiveTiles();// avoid active tiles // Check values for (CoordBBox::Iterator<true> iter(bbox>>level); iter; ++iter) { EXPECT_NEAR(double(level), mrg->tree(level).getValue(*iter), /*tolerance=*/0.0); } //mrg->tree( level ).print(std::cerr, 2); // Check bounding box of active voxels CoordBBox bbox_actual;// Expected Tree dimensions: 33^3 -> 17^3 -> 9^3 ->5^3 mrg->tree( level ).evalActiveVoxelBoundingBox( bbox_actual ); EXPECT_EQ( bbox >> level, bbox_actual ); } //pick a grid point that is shared between all the grids const Coord ijk(0); // Value at ijk equals the level EXPECT_NEAR(2.0, mrg->tree(2).getValue(ijk>>2), /*tolerance=*/ 0.001); EXPECT_NEAR(2.0, mrg->sampleValue<0>(ijk, size_t(2)), /*tolerance=*/ 0.001); EXPECT_NEAR(2.0, mrg->sampleValue<1>(ijk, size_t(2)), /*tolerance=*/ 0.001); EXPECT_NEAR(2.0, mrg->sampleValue<2>(ijk, size_t(2)), /*tolerance=*/ 0.001); EXPECT_NEAR(2.0, mrg->sampleValue<1>(ijk, 2.0f), /*tolerance=*/ 0.001); EXPECT_NEAR(2.0, mrg->sampleValue<1>(ijk, float(2)), /*tolerance=*/ 0.001); // Value at ijk at floating point level EXPECT_NEAR(2.25, mrg->sampleValue<1>(ijk, 2.25f), /*tolerance=*/ 0.001); // Value at a floating-point position close to ijk and a floating point level EXPECT_NEAR(2.25, mrg->sampleValue<1>(Vec3R(0.124), 2.25f), /*tolerance=*/ 0.001); // prolongate at a given point at top level EXPECT_NEAR(1.0, mrg->prolongateVoxel(ijk, 0), /*tolerance=*/ 0.0); // First check the coarsest level (3) for (CoordBBox::Iterator<true> iter(bbox>>size_t(3)); iter; ++iter) { EXPECT_NEAR(3.0, mrg->tree(3).getValue(*iter), /*tolerance=*/0.0); } // Prolongate from level 3 -> level 2 and check values mrg->prolongateActiveVoxels(2); for (CoordBBox::Iterator<true> iter(bbox>>size_t(2)); iter; ++iter) { EXPECT_NEAR(3.0, mrg->tree(2).getValue(*iter), /*tolerance=*/0.0); } // Prolongate from level 2 -> level 1 and check values mrg->prolongateActiveVoxels(1); for (CoordBBox::Iterator<true> iter(bbox>>size_t(1)); iter; ++iter) { EXPECT_NEAR(3.0, mrg->tree(1).getValue(*iter), /*tolerance=*/0.0); } // Prolongate from level 1 -> level 0 and check values mrg->prolongateActiveVoxels(0); for (CoordBBox::Iterator<true> iter(bbox); iter; ++iter) { EXPECT_NEAR(3.0, mrg->tree(0).getValue(*iter), /*tolerance=*/0.0); } // Redefine values at the finest level and check values mrg->finestTree().fill( bbox, 5.0 ); mrg->finestTree().voxelizeActiveTiles();// avoid active tiles for (CoordBBox::Iterator<true> iter(bbox); iter; ++iter) { EXPECT_NEAR(5.0, mrg->tree(0).getValue(*iter), /*tolerance=*/0.0); } // USE RESTRICTION BY INJECTION since it doesn't have boundary issues // // Restrict from level 0 -> level 1 and check values // mrg->restrictActiveVoxels(1); // for (CoordBBox::Iterator<true> iter((bbox>>1UL).expandBy(-1)); iter; ++iter) { // EXPECT_NEAR(5.0, mrg->tree(1).getValue(*iter), /*tolerance=*/0.0); // } // // Restrict from level 1 -> level 2 and check values // mrg->restrictActiveVoxels(2); // for (CoordBBox::Iterator<true> iter(bbox>>2UL); iter; ++iter) { // EXPECT_NEAR(5.0, mrg->tree(2).getValue(*iter), /*tolerance=*/0.0); // } // // Restrict from level 2 -> level 3 and check values // mrg->restrictActiveVoxels(3); // for (CoordBBox::Iterator<true> iter(bbox>>3UL); iter; ++iter) { // EXPECT_NEAR(5.0, mrg->tree(3).getValue(*iter), /*tolerance=*/0.0); // } } TEST_F(TestMultiResGrid, testIO) { using namespace openvdb; const float radius = 1.0f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 0.01f; openvdb::FloatGrid::Ptr ls = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>(radius, center, voxelSize); ls->setName("LevelSetSphere"); typedef tools::MultiResGrid<FloatTree> MultiResGridT; const size_t levels = 4; // Generate LOD sequence MultiResGridT mrg( levels, *ls, /* reduction by injection */ false ); //mrg.print( std::cout, 3 ); EXPECT_EQ(levels , mrg.numLevels()); EXPECT_EQ(size_t(0), mrg.finestLevel()); EXPECT_EQ(levels-1, mrg.coarsestLevel()); // Check inside and outside values for ( size_t level = 1; level < mrg.numLevels(); ++level) { const float inside = mrg.sampleValue<1>( Coord(0,0,0), 0UL, level ); EXPECT_NEAR( -ls->background(), inside,/*tolerance=*/ 0.001 ); const float outside = mrg.sampleValue<1>( Coord( int(1.1*radius/voxelSize) ), 0UL, level ); EXPECT_NEAR( ls->background(), outside,/*tolerance=*/ 0.001 ); } const std::string filename( "sphere.vdb" ); // Write grids io::File outputFile( filename ); outputFile.write( *mrg.grids() ); outputFile.close(); // Read grids openvdb::initialize(); openvdb::io::File file( filename ); file.open(); GridPtrVecPtr grids = file.getGrids(); EXPECT_EQ( levels, grids->size() ); //std::cerr << "\nsize = " << grids->size() << std::endl; for ( size_t i=0; i<grids->size(); ++i ) { FloatGrid::Ptr grid = gridPtrCast<FloatGrid>(grids->at(i)); EXPECT_EQ( grid->activeVoxelCount(), mrg.tree(i).activeVoxelCount() ); //grid->print(std::cerr, 3); } file.close(); ::remove(filename.c_str()); } TEST_F(TestMultiResGrid, testModels) { using namespace openvdb; #ifdef TestMultiResGrid_DATA_PATH initialize();//required whenever I/O of OpenVDB files is performed! const std::string path(TestMultiResGrid_DATA_PATH); std::vector<std::string> filenames; filenames.push_back("armadillo.vdb"); filenames.push_back("buddha.vdb"); filenames.push_back("bunny.vdb"); filenames.push_back("crawler.vdb"); filenames.push_back("dragon.vdb"); filenames.push_back("iss.vdb"); filenames.push_back("utahteapot.vdb"); util::CpuTimer timer; for ( size_t i=0; i<filenames.size(); ++i) { std::cerr << "\n=====================>\"" << filenames[i] << "\" =====================" << std::endl; std::cerr << "Reading \"" << filenames[i] << "\" ..."; io::File file( path + filenames[i] ); file.open(false);//disable delayed loading FloatGrid::Ptr model = gridPtrCast<FloatGrid>(file.getGrids()->at(0)); std::cerr << " done\nProcessing \"" << filenames[i] << "\" ..."; timer.start("\nMultiResGrid processing"); tools::MultiResGrid<FloatTree> mrg( 6, model ); timer.stop(); std::cerr << "\n High-res level set " << tools::checkLevelSet(*mrg.grid(0)) << "\n"; std::cerr << " done\nWriting \"" << filenames[i] << "\" ..."; io::File file( "/tmp/" + filenames[i] ); file.write( *mrg.grids() ); file.close(); std::cerr << " done\n" << std::endl; // {// in-betweening // timer.start("\nIn-betweening"); // FloatGrid::Ptr model3 = mrg.createGrid( 1.9999f ); // timer.stop(); // // // std::cerr << "\n" << tools::checkLevelSet(*model3) << "\n"; // // // GridPtrVecPtr grids2( new GridPtrVec ); // grids2->push_back( model3 ); // io::File file2( "/tmp/inbetween_" + filenames[i] ); // file2.write( *grids2 ); // file2.close(); // } // {// prolongate // timer.start("\nProlongate"); // mrg.prolongateActiveVoxels(1); // FloatGrid::Ptr model31= mrg.grid(1); // timer.stop(); // GridPtrVecPtr grids2( new GridPtrVec ); // grids2->push_back( model31 ); // io::File file2( "/tmp/prolongate_" + filenames[i] ); // file2.write( *grids2 ); // file2.close(); // } //::remove(filenames[i].c_str()); } #endif }
12,063
C++
36.465838
99
0.585592
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMeanCurvature.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/math/Stencils.h> #include <openvdb/math/Operators.h> #include <openvdb/tools/GridOperators.h> #include "util.h" // for unittest_util::makeSphere() #include "gtest/gtest.h" #include <openvdb/tools/LevelSetSphere.h> class TestMeanCurvature: public ::testing::Test { void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestMeanCurvature, testISMeanCurvature) { using namespace openvdb; typedef FloatGrid::ConstAccessor AccessorType; FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); AccessorType inAccessor = grid->getConstAccessor(); AccessorType::ValueType alpha, beta, meancurv, normGrad; Coord xyz(35,30,30); // First test an empty grid EXPECT_TRUE(tree.empty()); typedef math::ISMeanCurvature<math::CD_SECOND, math::CD_2ND> SecondOrder; EXPECT_TRUE(!SecondOrder::result(inAccessor, xyz, alpha, beta)); typedef math::ISMeanCurvature<math::CD_FOURTH, math::CD_4TH> FourthOrder; EXPECT_TRUE(!FourthOrder::result(inAccessor, xyz, alpha, beta)); typedef math::ISMeanCurvature<math::CD_SIXTH, math::CD_6TH> SixthOrder; EXPECT_TRUE(!SixthOrder::result(inAccessor, xyz, alpha, beta)); // Next test a level set sphere const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f ,30.0f, 40.0f); const float radius=0.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); SecondOrder::result(inAccessor, xyz, alpha, beta); meancurv = alpha/(2*math::Pow3(beta) ); normGrad = alpha/(2*math::Pow2(beta) ); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); FourthOrder::result(inAccessor, xyz, alpha, beta); meancurv = alpha/(2*math::Pow3(beta) ); normGrad = alpha/(2*math::Pow2(beta) ); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); SixthOrder::result(inAccessor, xyz, alpha, beta); meancurv = alpha/(2*math::Pow3(beta) ); normGrad = alpha/(2*math::Pow2(beta) ); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); xyz.reset(35,10,40); SecondOrder::result(inAccessor, xyz, alpha, beta); meancurv = alpha/(2*math::Pow3(beta) ); normGrad = alpha/(2*math::Pow2(beta) ); EXPECT_NEAR(1.0/20.0, meancurv, 0.001); EXPECT_NEAR(1.0/20.0, normGrad, 0.001); } TEST_F(TestMeanCurvature, testISMeanCurvatureStencil) { using namespace openvdb; typedef FloatGrid::ConstAccessor AccessorType; FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); math::SecondOrderDenseStencil<FloatGrid> dense_2nd(*grid); math::FourthOrderDenseStencil<FloatGrid> dense_4th(*grid); math::SixthOrderDenseStencil<FloatGrid> dense_6th(*grid); AccessorType::ValueType alpha, beta; Coord xyz(35,30,30); dense_2nd.moveTo(xyz); dense_4th.moveTo(xyz); dense_6th.moveTo(xyz); // First test on an empty grid EXPECT_TRUE(tree.empty()); typedef math::ISMeanCurvature<math::CD_SECOND, math::CD_2ND> SecondOrder; EXPECT_TRUE(!SecondOrder::result(dense_2nd, alpha, beta)); typedef math::ISMeanCurvature<math::CD_FOURTH, math::CD_4TH> FourthOrder; EXPECT_TRUE(!FourthOrder::result(dense_4th, alpha, beta)); typedef math::ISMeanCurvature<math::CD_SIXTH, math::CD_6TH> SixthOrder; EXPECT_TRUE(!SixthOrder::result(dense_6th, alpha, beta)); // Next test on a level set sphere const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f ,30.0f, 40.0f); const float radius=0.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); dense_2nd.moveTo(xyz); dense_4th.moveTo(xyz); dense_6th.moveTo(xyz); EXPECT_TRUE(!tree.empty()); EXPECT_TRUE(SecondOrder::result(dense_2nd, alpha, beta)); AccessorType::ValueType meancurv = alpha/(2*math::Pow3(beta) ); AccessorType::ValueType normGrad = alpha/(2*math::Pow2(beta) ); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); EXPECT_TRUE(FourthOrder::result(dense_4th, alpha, beta)); meancurv = alpha/(2*math::Pow3(beta) ); normGrad = alpha/(2*math::Pow2(beta) ); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); EXPECT_TRUE(SixthOrder::result(dense_6th, alpha, beta)); meancurv = alpha/(2*math::Pow3(beta) ); normGrad = alpha/(2*math::Pow2(beta) ); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); xyz.reset(35,10,40); dense_2nd.moveTo(xyz); EXPECT_TRUE(SecondOrder::result(dense_2nd, alpha, beta)); meancurv = alpha/(2*math::Pow3(beta) ); normGrad = alpha/(2*math::Pow2(beta) ); EXPECT_NEAR(1.0/20.0, meancurv, 0.001); EXPECT_NEAR(1.0/20.0, normGrad, 0.001); } TEST_F(TestMeanCurvature, testWSMeanCurvature) { using namespace openvdb; using math::AffineMap; using math::TranslationMap; using math::UniformScaleMap; typedef FloatGrid::ConstAccessor AccessorType; {// Empty grid test FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); AccessorType inAccessor = grid->getConstAccessor(); Coord xyz(35,30,30); EXPECT_TRUE(tree.empty()); AccessorType::ValueType meancurv; AccessorType::ValueType normGrad; AffineMap affine; meancurv = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::result( affine, inAccessor, xyz); normGrad = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::normGrad( affine, inAccessor, xyz); EXPECT_NEAR(0.0, meancurv, 0.0); EXPECT_NEAR(0.0, normGrad, 0.0); meancurv = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::result( affine, inAccessor, xyz); normGrad = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::normGrad( affine, inAccessor, xyz); EXPECT_NEAR(0.0, meancurv, 0.0); EXPECT_NEAR(0.0, normGrad, 0.0); UniformScaleMap uniform; meancurv = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::result( uniform, inAccessor, xyz); normGrad = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::normGrad( uniform, inAccessor, xyz); EXPECT_NEAR(0.0, meancurv, 0.0); EXPECT_NEAR(0.0, normGrad, 0.0); xyz.reset(35,10,40); TranslationMap trans; meancurv = math::MeanCurvature<TranslationMap, math::CD_SIXTH, math::CD_6TH>::result( trans, inAccessor, xyz); normGrad = math::MeanCurvature<TranslationMap, math::CD_SIXTH, math::CD_6TH>::normGrad( trans, inAccessor, xyz); EXPECT_NEAR(0.0, meancurv, 0.0); EXPECT_NEAR(0.0, normGrad, 0.0); } { // unit size voxel test FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f ,30.0f, 40.0f); const float radius=0.0f; unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); Coord xyz(35,30,30); AccessorType inAccessor = grid->getConstAccessor(); AccessorType::ValueType meancurv; AccessorType::ValueType normGrad; AffineMap affine; meancurv = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::result( affine, inAccessor, xyz); normGrad = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::normGrad( affine, inAccessor, xyz); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); meancurv = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::result( affine, inAccessor, xyz); normGrad = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::normGrad( affine, inAccessor, xyz); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); UniformScaleMap uniform; meancurv = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::result( uniform, inAccessor, xyz); normGrad = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::normGrad( uniform, inAccessor, xyz); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); xyz.reset(35,10,40); TranslationMap trans; meancurv = math::MeanCurvature<TranslationMap, math::CD_SIXTH, math::CD_6TH>::result( trans, inAccessor, xyz); normGrad = math::MeanCurvature<TranslationMap, math::CD_SIXTH, math::CD_6TH>::normGrad( trans, inAccessor, xyz); EXPECT_NEAR(1.0/20.0, meancurv, 0.001); EXPECT_NEAR(1.0/20.0, normGrad, 0.001); } { // non-unit sized voxel double voxel_size = 0.5; FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::createLinearTransform(voxel_size)); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10.0f; unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); AccessorType inAccessor = grid->getConstAccessor(); AccessorType::ValueType meancurv; AccessorType::ValueType normGrad; Coord xyz(20,16,20); AffineMap affine(voxel_size*math::Mat3d::identity()); meancurv = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::result( affine, inAccessor, xyz); normGrad = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::normGrad( affine, inAccessor, xyz); EXPECT_NEAR(1.0/4.0, meancurv, 0.001); EXPECT_NEAR(1.0/4.0, normGrad, 0.001); meancurv = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::result( affine, inAccessor, xyz); normGrad = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::normGrad( affine, inAccessor, xyz); EXPECT_NEAR(1.0/4.0, meancurv, 0.001); EXPECT_NEAR(1.0/4.0, normGrad, 0.001); UniformScaleMap uniform(voxel_size); meancurv = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::result( uniform, inAccessor, xyz); normGrad = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::normGrad( uniform, inAccessor, xyz); EXPECT_NEAR(1.0/4.0, meancurv, 0.001); EXPECT_NEAR(1.0/4.0, normGrad, 0.001); } { // NON-UNIFORM SCALING AND ROTATION Vec3d voxel_sizes(0.25, 0.45, 0.75); FloatGrid::Ptr grid = FloatGrid::create(); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); // apply rotation math::MapBase::Ptr rotated_map = base_map->preRotate(1.5, math::X_AXIS); grid->setTransform(math::Transform::Ptr(new math::Transform(rotated_map))); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10.0f; unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); AccessorType inAccessor = grid->getConstAccessor(); AccessorType::ValueType meancurv; AccessorType::ValueType normGrad; Coord xyz(20,16,20); Vec3d location = grid->indexToWorld(xyz); double dist = (center - location).length(); AffineMap::ConstPtr affine = grid->transform().map<AffineMap>(); meancurv = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::result( *affine, inAccessor, xyz); normGrad = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::normGrad( *affine, inAccessor, xyz); EXPECT_NEAR(1.0/dist, meancurv, 0.001); EXPECT_NEAR(1.0/dist, normGrad, 0.001); meancurv = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::result( *affine, inAccessor, xyz); normGrad = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::normGrad( *affine, inAccessor, xyz); EXPECT_NEAR(1.0/dist, meancurv, 0.001); EXPECT_NEAR(1.0/dist, normGrad, 0.001); } } TEST_F(TestMeanCurvature, testWSMeanCurvatureStencil) { using namespace openvdb; using math::AffineMap; using math::TranslationMap; using math::UniformScaleMap; typedef FloatGrid::ConstAccessor AccessorType; {// empty grid test FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); Coord xyz(35,30,30); math::SecondOrderDenseStencil<FloatGrid> dense_2nd(*grid); math::FourthOrderDenseStencil<FloatGrid> dense_4th(*grid); math::SixthOrderDenseStencil<FloatGrid> dense_6th(*grid); dense_2nd.moveTo(xyz); dense_4th.moveTo(xyz); dense_6th.moveTo(xyz); AccessorType::ValueType meancurv; AccessorType::ValueType normGrad; AffineMap affine; meancurv = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::result( affine, dense_2nd); normGrad = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::normGrad( affine, dense_2nd); EXPECT_NEAR(0.0, meancurv, 0.0); EXPECT_NEAR(0.0, normGrad, 0.00); meancurv = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::result( affine, dense_4th); normGrad = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::normGrad( affine, dense_4th); EXPECT_NEAR(0.0, meancurv, 0.00); EXPECT_NEAR(0.0, normGrad, 0.00); UniformScaleMap uniform; meancurv = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::result( uniform, dense_6th); normGrad = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::normGrad( uniform, dense_6th); EXPECT_NEAR(0.0, meancurv, 0.0); EXPECT_NEAR(0.0, normGrad, 0.0); xyz.reset(35,10,40); dense_6th.moveTo(xyz); TranslationMap trans; meancurv = math::MeanCurvature<TranslationMap, math::CD_SIXTH, math::CD_6TH>::result( trans, dense_6th); normGrad = math::MeanCurvature<TranslationMap, math::CD_SIXTH, math::CD_6TH>::normGrad( trans, dense_6th); EXPECT_NEAR(0.0, meancurv, 0.0); EXPECT_NEAR(0.0, normGrad, 0.0); } { // unit-sized voxels FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f, 30.0f, 40.0f);//i.e. (35,30,40) in index space const float radius=0.0f; unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); Coord xyz(35,30,30); math::SecondOrderDenseStencil<FloatGrid> dense_2nd(*grid); math::FourthOrderDenseStencil<FloatGrid> dense_4th(*grid); math::SixthOrderDenseStencil<FloatGrid> dense_6th(*grid); dense_2nd.moveTo(xyz); dense_4th.moveTo(xyz); dense_6th.moveTo(xyz); AccessorType::ValueType meancurv; AccessorType::ValueType normGrad; AffineMap affine; meancurv = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::result( affine, dense_2nd); normGrad = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::normGrad( affine, dense_2nd); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); meancurv = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::result( affine, dense_4th); normGrad = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::normGrad( affine, dense_4th); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); UniformScaleMap uniform; meancurv = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::result( uniform, dense_6th); normGrad = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::normGrad( uniform, dense_6th); EXPECT_NEAR(1.0/10.0, meancurv, 0.001); EXPECT_NEAR(1.0/10.0, normGrad, 0.001); xyz.reset(35,10,40); dense_6th.moveTo(xyz); TranslationMap trans; meancurv = math::MeanCurvature<TranslationMap, math::CD_SIXTH, math::CD_6TH>::result( trans, dense_6th); normGrad = math::MeanCurvature<TranslationMap, math::CD_SIXTH, math::CD_6TH>::normGrad( trans, dense_6th); EXPECT_NEAR(1.0/20.0, meancurv, 0.001); EXPECT_NEAR(1.0/20.0, normGrad, 0.001); } { // non-unit sized voxel double voxel_size = 0.5; FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::createLinearTransform(voxel_size)); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10.0f; unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); AccessorType::ValueType meancurv; AccessorType::ValueType normGrad; Coord xyz(20,16,20); math::SecondOrderDenseStencil<FloatGrid> dense_2nd(*grid); math::FourthOrderDenseStencil<FloatGrid> dense_4th(*grid); math::SixthOrderDenseStencil<FloatGrid> dense_6th(*grid); dense_2nd.moveTo(xyz); dense_4th.moveTo(xyz); dense_6th.moveTo(xyz); AffineMap affine(voxel_size*math::Mat3d::identity()); meancurv = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::result( affine, dense_2nd); normGrad = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::normGrad( affine, dense_2nd); EXPECT_NEAR(1.0/4.0, meancurv, 0.001); EXPECT_NEAR(1.0/4.0, normGrad, 0.001); meancurv = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::result( affine, dense_4th); normGrad = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::normGrad( affine, dense_4th); EXPECT_NEAR(1.0/4.0, meancurv, 0.001); EXPECT_NEAR(1.0/4.0, normGrad, 0.001); UniformScaleMap uniform(voxel_size); meancurv = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::result( uniform, dense_6th); normGrad = math::MeanCurvature<UniformScaleMap, math::CD_SIXTH, math::CD_6TH>::normGrad( uniform, dense_6th); EXPECT_NEAR(1.0/4.0, meancurv, 0.001); EXPECT_NEAR(1.0/4.0, normGrad, 0.001); } { // NON-UNIFORM SCALING AND ROTATION Vec3d voxel_sizes(0.25, 0.45, 0.75); FloatGrid::Ptr grid = FloatGrid::create(); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); // apply rotation math::MapBase::Ptr rotated_map = base_map->preRotate(1.5, math::X_AXIS); grid->setTransform(math::Transform::Ptr(new math::Transform(rotated_map))); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10.0f; unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); AccessorType::ValueType meancurv; AccessorType::ValueType normGrad; Coord xyz(20,16,20); math::SecondOrderDenseStencil<FloatGrid> dense_2nd(*grid); math::FourthOrderDenseStencil<FloatGrid> dense_4th(*grid); dense_2nd.moveTo(xyz); dense_4th.moveTo(xyz); Vec3d location = grid->indexToWorld(xyz); double dist = (center - location).length(); AffineMap::ConstPtr affine = grid->transform().map<AffineMap>(); meancurv = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::result( *affine, dense_2nd); normGrad = math::MeanCurvature<AffineMap, math::CD_SECOND, math::CD_2ND>::normGrad( *affine, dense_2nd); EXPECT_NEAR(1.0/dist, meancurv, 0.001); EXPECT_NEAR(1.0/dist, normGrad, 0.001); meancurv = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::result( *affine, dense_4th); normGrad = math::MeanCurvature<AffineMap, math::CD_FOURTH, math::CD_4TH>::normGrad( *affine, dense_4th); EXPECT_NEAR(1.0/dist, meancurv, 0.001); EXPECT_NEAR(1.0/dist, normGrad, 0.001); } } TEST_F(TestMeanCurvature, testMeanCurvatureTool) { using namespace openvdb; FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f, 30.0f, 40.0f);//i.e. (35,30,40) in index space const float radius=0.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); FloatGrid::Ptr curv = tools::meanCurvature(*grid); FloatGrid::ConstAccessor accessor = curv->getConstAccessor(); Coord xyz(35,30,30); EXPECT_NEAR(1.0/10.0, accessor.getValue(xyz), 0.001); xyz.reset(35,10,40); EXPECT_NEAR(1.0/20.0, accessor.getValue(xyz), 0.001); } TEST_F(TestMeanCurvature, testMeanCurvatureMaskedTool) { using namespace openvdb; FloatGrid::Ptr grid = createGrid<FloatGrid>(/*background=*/5.0); FloatTree& tree = grid->tree(); const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f, 30.0f, 40.0f);//i.e. (35,30,40) in index space const float radius=0.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); const openvdb::CoordBBox maskbbox(openvdb::Coord(35, 30, 30), openvdb::Coord(41, 41, 41)); BoolGrid::Ptr maskGrid = BoolGrid::create(false); maskGrid->fill(maskbbox, true/*value*/, true/*activate*/); FloatGrid::Ptr curv = tools::meanCurvature(*grid, *maskGrid); FloatGrid::ConstAccessor accessor = curv->getConstAccessor(); // test inside Coord xyz(35,30,30); EXPECT_TRUE(maskbbox.isInside(xyz)); EXPECT_NEAR(1.0/10.0, accessor.getValue(xyz), 0.001); // test outside xyz.reset(35,10,40); EXPECT_TRUE(!maskbbox.isInside(xyz)); EXPECT_NEAR(0.0, accessor.getValue(xyz), 0.001); } TEST_F(TestMeanCurvature, testCurvatureStencil) { using namespace openvdb; {// test of level set to sphere at (6,8,10) with R=10 and dx=0.5 FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::createLinearTransform(/*voxel size=*/0.5)); EXPECT_TRUE(grid->empty()); math::CurvatureStencil<FloatGrid> cs(*grid); Coord xyz(20,16,20);//i.e. 8 voxel or 4 world units away from the center cs.moveTo(xyz); // First test on an empty grid EXPECT_NEAR(0.0, cs.meanCurvature(), 0.0); EXPECT_NEAR(0.0, cs.meanCurvatureNormGrad(), 0.0); // Next test on a level set sphere const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10.0f; unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); cs.moveTo(xyz); EXPECT_NEAR(1.0/4.0, cs.meanCurvature(), 0.01);// 1/distance from center EXPECT_NEAR(1.0/4.0, cs.meanCurvatureNormGrad(), 0.01);// 1/distance from center EXPECT_NEAR(1.0/16.0, cs.gaussianCurvature(), 0.01);// 1/distance^2 from center EXPECT_NEAR(1.0/16.0, cs.gaussianCurvatureNormGrad(), 0.01);// 1/distance^2 from center float mean, gaussian; cs.curvatures(mean, gaussian); EXPECT_NEAR(1.0/4.0, mean, 0.01);// 1/distance from center EXPECT_NEAR(1.0/16.0, gaussian, 0.01);// 1/distance^2 from center auto principalCurvatures = cs.principalCurvatures(); EXPECT_NEAR(1.0/4.0, principalCurvatures.first, 0.01);// 1/distance from center EXPECT_NEAR(1.0/4.0, principalCurvatures.second, 0.01);// 1/distance from center xyz.reset(12,16,10);//i.e. 10 voxel or 5 world units away from the center cs.moveTo(xyz); EXPECT_NEAR(1.0/5.0, cs.meanCurvature(), 0.01);// 1/distance from center EXPECT_NEAR( 1.0/5.0, cs.meanCurvatureNormGrad(), 0.01);// 1/distance from center EXPECT_NEAR(1.0/25.0, cs.gaussianCurvature(), 0.01);// 1/distance^2 from center EXPECT_NEAR( 1.0/25.0, cs.gaussianCurvatureNormGrad(), 0.01);// 1/distance^2 from center principalCurvatures = cs.principalCurvatures(); EXPECT_NEAR(1.0/5.0, principalCurvatures.first, 0.01);// 1/distance from center EXPECT_NEAR(1.0/5.0, principalCurvatures.second, 0.01);// 1/distance from center EXPECT_NEAR( 1.0/5.0, principalCurvatures.first, 0.01);// 1/distance from center EXPECT_NEAR( 1.0/5.0, principalCurvatures.second, 0.01);// 1/distance from center cs.curvaturesNormGrad(mean, gaussian); EXPECT_NEAR(1.0/5.0, mean, 0.01);// 1/distance from center EXPECT_NEAR(1.0/25.0, gaussian, 0.01);// 1/distance^2 from center } {// test sparse level set sphere const double percentage = 0.1/100.0;//i.e. 0.1% const int dim = 256; // sparse level set sphere Vec3f C(0.35f, 0.35f, 0.35f); Real r = 0.15, voxelSize = 1.0/(dim-1); FloatGrid::Ptr sphere = tools::createLevelSetSphere<FloatGrid>(float(r), C, float(voxelSize)); math::CurvatureStencil<FloatGrid> cs(*sphere); const Coord ijk = Coord::round(sphere->worldToIndex(Vec3d(0.35, 0.35, 0.35 + 0.15))); const double radius = (sphere->indexToWorld(ijk)-Vec3d(0.35)).length(); //std::cerr << "\rRadius = " << radius << std::endl; //std::cerr << "Index coord =" << ijk << std::endl; cs.moveTo(ijk); //std::cerr << "Mean curvature = " << cs.meanCurvature() << ", 1/r=" << 1.0/radius << std::endl; //std::cerr << "Gaussian curvature = " << cs.gaussianCurvature() << ", 1/(r*r)=" << 1.0/(radius*radius) << std::endl; EXPECT_NEAR(1.0/radius, cs.meanCurvature(), percentage*1.0/radius); EXPECT_NEAR(1.0/(radius*radius), cs.gaussianCurvature(), percentage*1.0/(radius*radius)); float mean, gauss; cs.curvatures(mean, gauss); //std::cerr << "Mean curvature = " << mean << ", 1/r=" << 1.0/radius << std::endl; //std::cerr << "Gaussian curvature = " << gauss << ", 1/(r*r)=" << 1.0/(radius*radius) << std::endl; EXPECT_NEAR(1.0/radius, mean, percentage*1.0/radius); EXPECT_NEAR(1.0/(radius*radius), gauss, percentage*1.0/(radius*radius)); } } TEST_F(TestMeanCurvature, testIntersection) { using namespace openvdb; const Coord ijk(1,4,-9); FloatGrid grid(0.0f); auto acc = grid.getAccessor(); math::GradStencil<FloatGrid> stencil(grid); acc.setValue(ijk,-1.0f); int cases = 0; for (int mx=0; mx<2; ++mx) { acc.setValue(ijk.offsetBy(-1,0,0), mx ? 1.0f : -1.0f); for (int px=0; px<2; ++px) { acc.setValue(ijk.offsetBy(1,0,0), px ? 1.0f : -1.0f); for (int my=0; my<2; ++my) { acc.setValue(ijk.offsetBy(0,-1,0), my ? 1.0f : -1.0f); for (int py=0; py<2; ++py) { acc.setValue(ijk.offsetBy(0,1,0), py ? 1.0f : -1.0f); for (int mz=0; mz<2; ++mz) { acc.setValue(ijk.offsetBy(0,0,-1), mz ? 1.0f : -1.0f); for (int pz=0; pz<2; ++pz) { acc.setValue(ijk.offsetBy(0,0,1), pz ? 1.0f : -1.0f); ++cases; EXPECT_EQ(7, int(grid.activeVoxelCount())); stencil.moveTo(ijk); const size_t count = mx + px + my + py + mz + pz;// number of intersections EXPECT_TRUE(stencil.intersects() == (count > 0)); auto mask = stencil.intersectionMask(); EXPECT_TRUE(mask.none() == (count == 0)); EXPECT_TRUE(mask.any() == (count > 0)); EXPECT_EQ(count, mask.count()); EXPECT_TRUE(mask.test(0) == mx); EXPECT_TRUE(mask.test(1) == px); EXPECT_TRUE(mask.test(2) == my); EXPECT_TRUE(mask.test(3) == py); EXPECT_TRUE(mask.test(4) == mz); EXPECT_TRUE(mask.test(5) == pz); }//pz }//mz }//py }//my }//px }//mx EXPECT_EQ(64, cases);// = 2^6 }//testIntersection
30,059
C++
37.587933
123
0.620879
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestGridBbox.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/tree/Tree.h> #include <openvdb/tree/LeafNode.h> #include <openvdb/Types.h> #include <openvdb/Exceptions.h> class TestGridBbox: public ::testing::Test { }; //////////////////////////////////////// TEST_F(TestGridBbox, testLeafBbox) { openvdb::FloatTree tree(/*fillValue=*/256.0f); openvdb::CoordBBox bbox; EXPECT_TRUE(!tree.evalLeafBoundingBox(bbox)); // Add values to buffer zero. tree.setValue(openvdb::Coord( 0, 9, 9), 2.0); tree.setValue(openvdb::Coord(100, 35, 800), 2.5); // Coordinates in CoordBBox are inclusive! EXPECT_TRUE(tree.evalLeafBoundingBox(bbox)); EXPECT_EQ(openvdb::Coord(0, 8, 8), bbox.min()); EXPECT_EQ(openvdb::Coord(104-1, 40-1, 808-1), bbox.max()); // Test negative coordinates. tree.setValue(openvdb::Coord(-100, -35, -800), 2.5); EXPECT_TRUE(tree.evalLeafBoundingBox(bbox)); EXPECT_EQ(openvdb::Coord(-104, -40, -800), bbox.min()); EXPECT_EQ(openvdb::Coord(104-1, 40-1, 808-1), bbox.max()); // Clear the tree without trimming. tree.setValueOff(openvdb::Coord( 0, 9, 9)); tree.setValueOff(openvdb::Coord(100, 35, 800)); tree.setValueOff(openvdb::Coord(-100, -35, -800)); EXPECT_TRUE(!tree.evalLeafBoundingBox(bbox)); } TEST_F(TestGridBbox, testGridBbox) { openvdb::FloatTree tree(/*fillValue=*/256.0f); openvdb::CoordBBox bbox; EXPECT_TRUE(!tree.evalActiveVoxelBoundingBox(bbox)); // Add values to buffer zero. tree.setValue(openvdb::Coord( 1, 0, 0), 1.5); tree.setValue(openvdb::Coord( 0, 12, 8), 2.0); tree.setValue(openvdb::Coord( 1, 35, 800), 2.5); tree.setValue(openvdb::Coord(100, 0, 16), 3.0); tree.setValue(openvdb::Coord( 1, 0, 16), 3.5); // Coordinates in CoordBBox are inclusive! EXPECT_TRUE(tree.evalActiveVoxelBoundingBox(bbox)); EXPECT_EQ(openvdb::Coord( 0, 0, 0), bbox.min()); EXPECT_EQ(openvdb::Coord(100, 35, 800), bbox.max()); // Test negative coordinates. tree.setValue(openvdb::Coord(-100, -35, -800), 2.5); EXPECT_TRUE(tree.evalActiveVoxelBoundingBox(bbox)); EXPECT_EQ(openvdb::Coord(-100, -35, -800), bbox.min()); EXPECT_EQ(openvdb::Coord(100, 35, 800), bbox.max()); // Clear the tree without trimming. tree.setValueOff(openvdb::Coord( 1, 0, 0)); tree.setValueOff(openvdb::Coord( 0, 12, 8)); tree.setValueOff(openvdb::Coord( 1, 35, 800)); tree.setValueOff(openvdb::Coord(100, 0, 16)); tree.setValueOff(openvdb::Coord( 1, 0, 16)); tree.setValueOff(openvdb::Coord(-100, -35, -800)); EXPECT_TRUE(!tree.evalActiveVoxelBoundingBox(bbox)); }
2,799
C++
31.183908
62
0.6388
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestGrid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <openvdb/Types.h> #include <openvdb/util/Name.h> #include <openvdb/math/Transform.h> #include <openvdb/Grid.h> #include <openvdb/tree/Tree.h> #include <openvdb/util/CpuTimer.h> #include "gtest/gtest.h" #include <iostream> #include <memory> // for std::make_unique #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); class TestGrid: public ::testing::Test { }; //////////////////////////////////////// class ProxyTree: public openvdb::TreeBase { public: using ValueType = int; using BuildType = int; using LeafNodeType = void; using ValueAllCIter = void; using ValueAllIter = void; using ValueOffCIter = void; using ValueOffIter = void; using ValueOnCIter = void; using ValueOnIter = void; using TreeBasePtr = openvdb::TreeBase::Ptr; using Ptr = openvdb::SharedPtr<ProxyTree>; using ConstPtr = openvdb::SharedPtr<const ProxyTree>; static const openvdb::Index DEPTH; static const ValueType backg; ProxyTree() {} ProxyTree(const ValueType&) {} ProxyTree(const ProxyTree&) = default; ~ProxyTree() override = default; static const openvdb::Name& treeType() { static const openvdb::Name s("proxy"); return s; } const openvdb::Name& type() const override { return treeType(); } openvdb::Name valueType() const override { return "proxy"; } const ValueType& background() const { return backg; } TreeBasePtr copy() const override { return TreeBasePtr(new ProxyTree(*this)); } void readTopology(std::istream& is, bool = false) override { is.seekg(0, std::ios::beg); } void writeTopology(std::ostream& os, bool = false) const override { os.seekp(0); } void readBuffers(std::istream& is, const openvdb::CoordBBox&, bool /*saveFloatAsHalf*/=false) override { is.seekg(0); } void readNonresidentBuffers() const override {} void readBuffers(std::istream& is, bool /*saveFloatAsHalf*/=false) override { is.seekg(0); } void writeBuffers(std::ostream& os, bool /*saveFloatAsHalf*/=false) const override { os.seekp(0, std::ios::beg); } bool empty() const { return true; } void clear() {} void prune(const ValueType& = 0) {} void clip(const openvdb::CoordBBox&) {} void clipUnallocatedNodes() override {} openvdb::Index32 unallocatedLeafCount() const override { return 0; } void getIndexRange(openvdb::CoordBBox&) const override {} bool evalLeafBoundingBox(openvdb::CoordBBox& bbox) const override { bbox.min() = bbox.max() = openvdb::Coord(0, 0, 0); return false; } bool evalActiveVoxelBoundingBox(openvdb::CoordBBox& bbox) const override { bbox.min() = bbox.max() = openvdb::Coord(0, 0, 0); return false; } bool evalActiveVoxelDim(openvdb::Coord& dim) const override { dim = openvdb::Coord(0, 0, 0); return false; } bool evalLeafDim(openvdb::Coord& dim) const override { dim = openvdb::Coord(0, 0, 0); return false; } openvdb::Index treeDepth() const override { return 0; } openvdb::Index leafCount() const override { return 0; } #if OPENVDB_ABI_VERSION_NUMBER >= 7 std::vector<openvdb::Index32> nodeCount() const override { return std::vector<openvdb::Index32>(DEPTH, 0); } #endif openvdb::Index nonLeafCount() const override { return 0; } openvdb::Index64 activeVoxelCount() const override { return 0UL; } openvdb::Index64 inactiveVoxelCount() const override { return 0UL; } openvdb::Index64 activeLeafVoxelCount() const override { return 0UL; } openvdb::Index64 inactiveLeafVoxelCount() const override { return 0UL; } openvdb::Index64 activeTileCount() const override { return 0UL; } }; const openvdb::Index ProxyTree::DEPTH = 0; const ProxyTree::ValueType ProxyTree::backg = 0; using ProxyGrid = openvdb::Grid<ProxyTree>; //////////////////////////////////////// TEST_F(TestGrid, testGridRegistry) { using namespace openvdb::tree; using TreeType = Tree<RootNode<InternalNode<LeafNode<float, 3>, 2> > >; using GridType = openvdb::Grid<TreeType>; openvdb::GridBase::clearRegistry(); EXPECT_TRUE(!GridType::isRegistered()); GridType::registerGrid(); EXPECT_TRUE(GridType::isRegistered()); EXPECT_THROW(GridType::registerGrid(), openvdb::KeyError); GridType::unregisterGrid(); EXPECT_TRUE(!GridType::isRegistered()); EXPECT_NO_THROW(GridType::unregisterGrid()); EXPECT_TRUE(!GridType::isRegistered()); EXPECT_NO_THROW(GridType::registerGrid()); EXPECT_TRUE(GridType::isRegistered()); openvdb::GridBase::clearRegistry(); } TEST_F(TestGrid, testConstPtr) { using namespace openvdb; GridBase::ConstPtr constgrid = ProxyGrid::create(); EXPECT_EQ(Name("proxy"), constgrid->type()); } TEST_F(TestGrid, testGetGrid) { using namespace openvdb; GridBase::Ptr grid = FloatGrid::create(/*bg=*/0.0); GridBase::ConstPtr constGrid = grid; EXPECT_TRUE(grid->baseTreePtr()); EXPECT_TRUE(!gridPtrCast<DoubleGrid>(grid)); EXPECT_TRUE(!gridPtrCast<DoubleGrid>(grid)); EXPECT_TRUE(gridConstPtrCast<FloatGrid>(constGrid)); EXPECT_TRUE(!gridConstPtrCast<DoubleGrid>(constGrid)); } TEST_F(TestGrid, testIsType) { using namespace openvdb; GridBase::Ptr grid = FloatGrid::create(); EXPECT_TRUE(grid->isType<FloatGrid>()); EXPECT_TRUE(!grid->isType<DoubleGrid>()); } TEST_F(TestGrid, testIsTreeUnique) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(); EXPECT_TRUE(grid->isTreeUnique()); // a shallow copy shares the same tree FloatGrid::Ptr grid2 = grid->copy(); EXPECT_TRUE(!grid->isTreeUnique()); EXPECT_TRUE(!grid2->isTreeUnique()); // cleanup the shallow copy grid2.reset(); EXPECT_TRUE(grid->isTreeUnique()); // copy with new tree GridBase::Ptr grid3 = grid->copyGridWithNewTree(); EXPECT_TRUE(grid->isTreeUnique()); #if OPENVDB_ABI_VERSION_NUMBER >= 8 // shallow copy using GridBase GridBase::Ptr grid4 = grid->copyGrid(); EXPECT_TRUE(!grid4->isTreeUnique()); // copy with new tree using GridBase GridBase::Ptr grid5 = grid->copyGridWithNewTree(); EXPECT_TRUE(grid5->isTreeUnique()); #endif } TEST_F(TestGrid, testTransform) { ProxyGrid grid; // Verify that the grid has a valid default transform. EXPECT_TRUE(grid.transformPtr()); // Verify that a null transform pointer is not allowed. EXPECT_THROW(grid.setTransform(openvdb::math::Transform::Ptr()), openvdb::ValueError); grid.setTransform(openvdb::math::Transform::createLinearTransform()); EXPECT_TRUE(grid.transformPtr()); // Verify that calling Transform-related Grid methods (Grid::voxelSize(), etc.) // is the same as calling those methods on the Transform. EXPECT_TRUE(grid.transform().voxelSize().eq(grid.voxelSize())); EXPECT_TRUE(grid.transform().voxelSize(openvdb::Vec3d(0.1, 0.2, 0.3)).eq( grid.voxelSize(openvdb::Vec3d(0.1, 0.2, 0.3)))); EXPECT_TRUE(grid.transform().indexToWorld(openvdb::Vec3d(0.1, 0.2, 0.3)).eq( grid.indexToWorld(openvdb::Vec3d(0.1, 0.2, 0.3)))); EXPECT_TRUE(grid.transform().indexToWorld(openvdb::Coord(1, 2, 3)).eq( grid.indexToWorld(openvdb::Coord(1, 2, 3)))); EXPECT_TRUE(grid.transform().worldToIndex(openvdb::Vec3d(0.1, 0.2, 0.3)).eq( grid.worldToIndex(openvdb::Vec3d(0.1, 0.2, 0.3)))); } TEST_F(TestGrid, testCopyGrid) { using namespace openvdb; // set up a grid const float fillValue1=5.0f; FloatGrid::Ptr grid1 = createGrid<FloatGrid>(/*bg=*/fillValue1); FloatTree& tree1 = grid1->tree(); tree1.setValue(Coord(-10,40,845), 3.456f); tree1.setValue(Coord(1,-50,-8), 1.0f); // create a new grid, copying the first grid GridBase::Ptr grid2 = grid1->deepCopy(); // cast down to the concrete type to query values FloatTree& tree2 = gridPtrCast<FloatGrid>(grid2)->tree(); // compare topology EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); // trees should be equal ASSERT_DOUBLES_EXACTLY_EQUAL(fillValue1, tree2.getValue(Coord(1,2,3))); ASSERT_DOUBLES_EXACTLY_EQUAL(3.456f, tree2.getValue(Coord(-10,40,845))); ASSERT_DOUBLES_EXACTLY_EQUAL(1.0f, tree2.getValue(Coord(1,-50,-8))); // change 1 value in tree2 Coord changeCoord(1, -500, -8); tree2.setValue(changeCoord, 1.0f); // topology should no longer match EXPECT_TRUE(!tree1.hasSameTopology(tree2)); EXPECT_TRUE(!tree2.hasSameTopology(tree1)); // query changed value and make sure it's different between trees ASSERT_DOUBLES_EXACTLY_EQUAL(fillValue1, tree1.getValue(changeCoord)); ASSERT_DOUBLES_EXACTLY_EQUAL(1.0f, tree2.getValue(changeCoord)); #if OPENVDB_ABI_VERSION_NUMBER >= 7 // shallow-copy a const grid but supply a new transform and meta map EXPECT_EQ(1.0, grid1->transform().voxelSize().x()); EXPECT_EQ(size_t(0), grid1->metaCount()); EXPECT_EQ(Index(2), grid1->tree().leafCount()); math::Transform::Ptr xform(math::Transform::createLinearTransform(/*voxelSize=*/0.25)); MetaMap meta; meta.insertMeta("test", Int32Metadata(4)); FloatGrid::ConstPtr constGrid1 = ConstPtrCast<const FloatGrid>(grid1); GridBase::ConstPtr grid3 = constGrid1->copyGridReplacingMetadataAndTransform(meta, xform); const FloatTree& tree3 = gridConstPtrCast<FloatGrid>(grid3)->tree(); EXPECT_EQ(0.25, grid3->transform().voxelSize().x()); EXPECT_EQ(size_t(1), grid3->metaCount()); EXPECT_EQ(Index(2), tree3.leafCount()); EXPECT_EQ(long(3), constGrid1->constTreePtr().use_count()); #endif } TEST_F(TestGrid, testValueConversion) { using namespace openvdb; const Coord c0(-10, 40, 845), c1(1, -50, -8), c2(1, 2, 3); const float fval0 = 3.25f, fval1 = 1.0f, fbkgd = 5.0f; // Create a FloatGrid. FloatGrid fgrid(fbkgd); FloatTree& ftree = fgrid.tree(); ftree.setValue(c0, fval0); ftree.setValue(c1, fval1); // Copy the FloatGrid to a DoubleGrid. DoubleGrid dgrid(fgrid); DoubleTree& dtree = dgrid.tree(); // Compare topology. EXPECT_TRUE(dtree.hasSameTopology(ftree)); EXPECT_TRUE(ftree.hasSameTopology(dtree)); // Compare values. ASSERT_DOUBLES_EXACTLY_EQUAL(double(fbkgd), dtree.getValue(c2)); ASSERT_DOUBLES_EXACTLY_EQUAL(double(fval0), dtree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(double(fval1), dtree.getValue(c1)); // Copy the FloatGrid to a BoolGrid. BoolGrid bgrid(fgrid); BoolTree& btree = bgrid.tree(); // Compare topology. EXPECT_TRUE(btree.hasSameTopology(ftree)); EXPECT_TRUE(ftree.hasSameTopology(btree)); // Compare values. EXPECT_EQ(bool(fbkgd), btree.getValue(c2)); EXPECT_EQ(bool(fval0), btree.getValue(c0)); EXPECT_EQ(bool(fval1), btree.getValue(c1)); // Copy the FloatGrid to a Vec3SGrid. Vec3SGrid vgrid(fgrid); Vec3STree& vtree = vgrid.tree(); // Compare topology. EXPECT_TRUE(vtree.hasSameTopology(ftree)); EXPECT_TRUE(ftree.hasSameTopology(vtree)); // Compare values. EXPECT_EQ(Vec3s(fbkgd), vtree.getValue(c2)); EXPECT_EQ(Vec3s(fval0), vtree.getValue(c0)); EXPECT_EQ(Vec3s(fval1), vtree.getValue(c1)); // Verify that a Vec3SGrid can't be copied to an Int32Grid // (because an Int32 can't be constructed from a Vec3S). EXPECT_THROW(Int32Grid igrid2(vgrid), openvdb::TypeError); // Verify that a grid can't be converted to another type with a different // tree configuration. using DTree23 = tree::Tree3<double, 2, 3>::Type; using DGrid23 = Grid<DTree23>; EXPECT_THROW(DGrid23 d23grid(fgrid), openvdb::TypeError); } //////////////////////////////////////// template<typename GridT> void validateClippedGrid(const GridT& clipped, const typename GridT::ValueType& fg) { using namespace openvdb; using ValueT = typename GridT::ValueType; const CoordBBox bbox = clipped.evalActiveVoxelBoundingBox(); EXPECT_EQ(4, bbox.min().x()); EXPECT_EQ(4, bbox.min().y()); EXPECT_EQ(-6, bbox.min().z()); EXPECT_EQ(4, bbox.max().x()); EXPECT_EQ(4, bbox.max().y()); EXPECT_EQ(6, bbox.max().z()); EXPECT_EQ(6 + 6 + 1, int(clipped.activeVoxelCount())); EXPECT_EQ(2, int(clipped.constTree().leafCount())); typename GridT::ConstAccessor acc = clipped.getConstAccessor(); const ValueT bg = clipped.background(); Coord xyz; int &x = xyz[0], &y = xyz[1], &z = xyz[2]; for (x = -10; x <= 10; ++x) { for (y = -10; y <= 10; ++y) { for (z = -10; z <= 10; ++z) { if (x == 4 && y == 4 && z >= -6 && z <= 6) { EXPECT_EQ(fg, acc.getValue(Coord(4, 4, z))); } else { EXPECT_EQ(bg, acc.getValue(Coord(x, y, z))); } } } } } // See also TestTools::testClipping() TEST_F(TestGrid, testClipping) { using namespace openvdb; const BBoxd clipBox(Vec3d(4.0, 4.0, -6.0), Vec3d(4.9, 4.9, 6.0)); { const float fg = 5.f; FloatGrid cube(0.f); cube.fill(CoordBBox(Coord(-10), Coord(10)), /*value=*/fg, /*active=*/true); cube.clipGrid(clipBox); validateClippedGrid(cube, fg); } { const bool fg = true; BoolGrid cube(false); cube.fill(CoordBBox(Coord(-10), Coord(10)), /*value=*/fg, /*active=*/true); cube.clipGrid(clipBox); validateClippedGrid(cube, fg); } { const Vec3s fg(1.f, -2.f, 3.f); Vec3SGrid cube(Vec3s(0.f)); cube.fill(CoordBBox(Coord(-10), Coord(10)), /*value=*/fg, /*active=*/true); cube.clipGrid(clipBox); validateClippedGrid(cube, fg); } /* {// Benchmark multi-threaded copy construction openvdb::util::CpuTimer timer; openvdb::initialize(); openvdb::io::File file("/usr/pic1/Data/OpenVDB/LevelSetModels/crawler.vdb"); file.open(); openvdb::GridBase::Ptr baseGrid = file.readGrid("ls_crawler"); file.close(); openvdb::FloatGrid::Ptr grid = openvdb::gridPtrCast<openvdb::FloatGrid>(baseGrid); //grid->tree().print(); timer.start("\nCopy construction"); openvdb::FloatTree fTree(grid->tree()); timer.stop(); timer.start("\nBoolean topology copy construction"); openvdb::BoolTree bTree(grid->tree(), false, openvdb::TopologyCopy()); timer.stop(); timer.start("\nBoolean topology union"); bTree.topologyUnion(fTree); timer.stop(); //bTree.print(); } */ } //////////////////////////////////////// namespace { struct GridOp { bool isConst = false; template<typename GridT> void operator()(const GridT&) { isConst = true; } template<typename GridT> void operator()(GridT&) { isConst = false; } }; } // anonymous namespace TEST_F(TestGrid, testApply) { using namespace openvdb; const GridBase::Ptr boolGrid = BoolGrid::create(), floatGrid = FloatGrid::create(), doubleGrid = DoubleGrid::create(), intGrid = Int32Grid::create(); const GridBase::ConstPtr boolCGrid = BoolGrid::create(), floatCGrid = FloatGrid::create(), doubleCGrid = DoubleGrid::create(), intCGrid = Int32Grid::create(); { using AllowedGridTypes = TypeList<>; // Verify that the functor is not applied to any of the grids. GridOp op; EXPECT_TRUE(!boolGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!boolCGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!floatGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!floatCGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!doubleGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!doubleCGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!intGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!intCGrid->apply<AllowedGridTypes>(op)); } { using AllowedGridTypes = TypeList<FloatGrid, FloatGrid, DoubleGrid>; // Verify that the functor is applied only to grids of the allowed types // and that their constness is respected. GridOp op; EXPECT_TRUE(!boolGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!intGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(floatGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!op.isConst); EXPECT_TRUE(doubleGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!op.isConst); EXPECT_TRUE(!boolCGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(!intCGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(floatCGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(op.isConst); EXPECT_TRUE(doubleCGrid->apply<AllowedGridTypes>(op)); EXPECT_TRUE(op.isConst); } { using AllowedGridTypes = TypeList<FloatGrid, DoubleGrid>; // Verify that rvalue functors are supported. int n = 0; EXPECT_TRUE( !boolGrid->apply<AllowedGridTypes>([&n](GridBase&) { ++n; })); EXPECT_TRUE( !intGrid->apply<AllowedGridTypes>([&n](GridBase&) { ++n; })); EXPECT_TRUE( floatGrid->apply<AllowedGridTypes>([&n](GridBase&) { ++n; })); EXPECT_TRUE( doubleGrid->apply<AllowedGridTypes>([&n](GridBase&) { ++n; })); EXPECT_TRUE( !boolCGrid->apply<AllowedGridTypes>([&n](const GridBase&) { ++n; })); EXPECT_TRUE( !intCGrid->apply<AllowedGridTypes>([&n](const GridBase&) { ++n; })); EXPECT_TRUE( floatCGrid->apply<AllowedGridTypes>([&n](const GridBase&) { ++n; })); EXPECT_TRUE(doubleCGrid->apply<AllowedGridTypes>([&n](const GridBase&) { ++n; })); EXPECT_EQ(4, n); } }
17,880
C++
33.320537
96
0.647315
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestCurl.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/tools/GridOperators.h> #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/1e-6); namespace { const int GRID_DIM = 10; } class TestCurl: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestCurl, testCurlTool) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); const VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); VectorGrid::Accessor inAccessor = inGrid->getAccessor(); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inAccessor.setValue(Coord(x,y,z), VectorTree::ValueType(float(y), float(-x), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); VectorGrid::Ptr curl_grid = tools::curl(*inGrid); EXPECT_EQ(math::Pow3(2*dim), int(curl_grid->activeVoxelCount())); VectorGrid::ConstAccessor curlAccessor = curl_grid->getConstAccessor(); --dim;//ignore boundary curl vectors for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inAccessor.getValue(xyz); //std::cout << "vec(" << xyz << ")=" << v << std::endl; ASSERT_DOUBLES_EXACTLY_EQUAL( y,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); v = curlAccessor.getValue(xyz); //std::cout << "curl(" << xyz << ")=" << v << std::endl; ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); } } } } TEST_F(TestCurl, testCurlMaskedTool) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); const VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); VectorGrid::Accessor inAccessor = inGrid->getAccessor(); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inAccessor.setValue(Coord(x,y,z), VectorTree::ValueType(float(y), float(-x), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); openvdb::CoordBBox maskBBox(openvdb::Coord(0), openvdb::Coord(dim)); BoolGrid::Ptr maskGrid = BoolGrid::create(false); maskGrid->fill(maskBBox, true /*value*/, true /*activate*/); openvdb::CoordBBox testBBox(openvdb::Coord(-dim+1), openvdb::Coord(dim)); BoolGrid::Ptr testGrid = BoolGrid::create(false); testGrid->fill(testBBox, true, true); testGrid->topologyIntersection(*maskGrid); VectorGrid::Ptr curl_grid = tools::curl(*inGrid, *maskGrid); EXPECT_EQ(math::Pow3(dim), int(curl_grid->activeVoxelCount())); VectorGrid::ConstAccessor curlAccessor = curl_grid->getConstAccessor(); --dim;//ignore boundary curl vectors for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( y,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); v = curlAccessor.getValue(xyz); if (maskBBox.isInside(xyz)) { ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); } else { // get the background value outside masked region ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); } } } } } TEST_F(TestCurl, testISCurl) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); const VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); VectorGrid::Accessor inAccessor = inGrid->getAccessor(); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inAccessor.setValue(Coord(x,y,z), VectorTree::ValueType(float(y), float(-x), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); VectorGrid::Ptr curl_grid = tools::curl(*inGrid); EXPECT_EQ(math::Pow3(2*dim), int(curl_grid->activeVoxelCount())); --dim;//ignore boundary curl vectors // test unit space operators VectorGrid::ConstAccessor inConstAccessor = inGrid->getConstAccessor(); for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( y,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); v = math::ISCurl<math::CD_2ND>::result(inConstAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::ISCurl<math::FD_1ST>::result(inConstAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::ISCurl<math::BD_1ST>::result(inConstAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); } } } --dim;//ignore boundary curl vectors for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( y,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); v = math::ISCurl<math::CD_4TH>::result(inConstAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::ISCurl<math::FD_2ND>::result(inConstAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::ISCurl<math::BD_2ND>::result(inConstAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); } } } --dim;//ignore boundary curl vectors for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( y, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, v[2]); v = math::ISCurl<math::CD_6TH>::result(inConstAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2, v[2]); v = math::ISCurl<math::FD_3RD>::result(inConstAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, v[0]); EXPECT_NEAR( 0, v[1], /*tolerance=*/0.00001); EXPECT_NEAR(-2, v[2], /*tolerance=*/0.00001); v = math::ISCurl<math::BD_3RD>::result(inConstAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, v[1]); EXPECT_NEAR(-2, v[2], /*tolerance=*/0.00001); } } } } TEST_F(TestCurl, testISCurlStencil) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); const VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); VectorGrid::Accessor inAccessor = inGrid->getAccessor(); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inAccessor.setValue(Coord(x,y,z), VectorTree::ValueType(float(y), float(-x), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); VectorGrid::Ptr curl_grid = tools::curl(*inGrid); EXPECT_EQ(math::Pow3(2*dim), int(curl_grid->activeVoxelCount())); math::SevenPointStencil<VectorGrid> sevenpt(*inGrid); math::ThirteenPointStencil<VectorGrid> thirteenpt(*inGrid); math::NineteenPointStencil<VectorGrid> nineteenpt(*inGrid); // test unit space operators --dim;//ignore boundary curl vectors for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); sevenpt.moveTo(xyz); VectorTree::ValueType v = inAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( y,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); v = math::ISCurl<math::CD_2ND>::result(sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::ISCurl<math::FD_1ST>::result(sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::ISCurl<math::BD_1ST>::result(sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); } } } --dim;//ignore boundary curl vectors for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); thirteenpt.moveTo(xyz); VectorTree::ValueType v = inAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( y,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); v = math::ISCurl<math::CD_4TH>::result(thirteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::ISCurl<math::FD_2ND>::result(thirteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::ISCurl<math::BD_2ND>::result(thirteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); } } } --dim;//ignore boundary curl vectors for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); nineteenpt.moveTo(xyz); VectorTree::ValueType v = inAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( y,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); v = math::ISCurl<math::CD_6TH>::result(nineteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::ISCurl<math::FD_3RD>::result(nineteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); EXPECT_NEAR(-2,v[2], /*tolerance=*/0.00001); v = math::ISCurl<math::BD_3RD>::result(nineteenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); EXPECT_NEAR(-2,v[2], /*tolerance=*/0.00001); } } } } TEST_F(TestCurl, testWSCurl) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); const VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); VectorGrid::Accessor inAccessor = inGrid->getAccessor(); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inAccessor.setValue(Coord(x,y,z), VectorTree::ValueType(float(y), float(-x), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); VectorGrid::Ptr curl_grid = tools::curl(*inGrid); EXPECT_EQ(math::Pow3(2*dim), int(curl_grid->activeVoxelCount())); // test with a map math::AffineMap map; math::UniformScaleMap uniform_map; // test unit space operators --dim;//ignore boundary curl vectors for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); VectorTree::ValueType v = inAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( y,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); v = math::Curl<math::AffineMap, math::CD_2ND>::result(map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::AffineMap, math::FD_1ST>::result(map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::AffineMap, math::BD_1ST>::result(map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::UniformScaleMap, math::CD_2ND>::result( uniform_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::UniformScaleMap, math::FD_1ST>::result( uniform_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::UniformScaleMap, math::BD_1ST>::result( uniform_map, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); } } } } TEST_F(TestCurl, testWSCurlStencil) { using namespace openvdb; VectorGrid::Ptr inGrid = VectorGrid::create(); const VectorTree& inTree = inGrid->tree(); EXPECT_TRUE(inTree.empty()); VectorGrid::Accessor inAccessor = inGrid->getAccessor(); int dim = GRID_DIM; for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { inAccessor.setValue(Coord(x,y,z), VectorTree::ValueType(float(y), float(-x), 0.f)); } } } EXPECT_TRUE(!inTree.empty()); EXPECT_EQ(math::Pow3(2*dim), int(inTree.activeVoxelCount())); VectorGrid::Ptr curl_grid = tools::curl(*inGrid); EXPECT_EQ(math::Pow3(2*dim), int(curl_grid->activeVoxelCount())); // test with a map math::AffineMap map; math::UniformScaleMap uniform_map; math::SevenPointStencil<VectorGrid> sevenpt(*inGrid); math::SecondOrderDenseStencil<VectorGrid> dense_2ndOrder(*inGrid); // test unit space operators --dim;//ignore boundary curl vectors for (int x = -dim; x<dim; ++x) { for (int y = -dim; y<dim; ++y) { for (int z = -dim; z<dim; ++z) { Coord xyz(x,y,z); sevenpt.moveTo(xyz); dense_2ndOrder.moveTo(xyz); VectorTree::ValueType v = inAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL( y,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(-x,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[2]); v = math::Curl<math::AffineMap, math::CD_2ND>::result(map, dense_2ndOrder); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::AffineMap, math::FD_1ST>::result(map, dense_2ndOrder); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::AffineMap, math::BD_1ST>::result(map, dense_2ndOrder); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::UniformScaleMap, math::CD_2ND>::result(uniform_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::UniformScaleMap, math::FD_1ST>::result(uniform_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); v = math::Curl<math::UniformScaleMap, math::BD_1ST>::result(uniform_map, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[0]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,v[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(-2,v[2]); } } } }
19,834
C++
35.936685
98
0.528739
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestDense.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 //#define BENCHMARK_TEST #include <openvdb/openvdb.h> #include "gtest/gtest.h" #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/Dense.h> #include <openvdb/Exceptions.h> #include <sstream> #ifdef BENCHMARK_TEST #include <openvdb/util/CpuTimer.h> #endif class TestDense: public ::testing::Test { public: template <openvdb::tools::MemoryLayout Layout> void testCopy(); template <openvdb::tools::MemoryLayout Layout> void testCopyBool(); template <openvdb::tools::MemoryLayout Layout> void testCopyFromDenseWithOffset(); template <openvdb::tools::MemoryLayout Layout> void testDense2Sparse(); template <openvdb::tools::MemoryLayout Layout> void testDense2Sparse2(); template <openvdb::tools::MemoryLayout Layout> void testInvalidBBox(); template <openvdb::tools::MemoryLayout Layout> void testDense2Sparse2Dense(); }; TEST_F(TestDense, testDenseZYX) { const openvdb::CoordBBox bbox(openvdb::Coord(-40,-5, 6), openvdb::Coord(-11, 7,22)); openvdb::tools::Dense<float> dense(bbox);//LayoutZYX is the default // Check Desne::origin() EXPECT_TRUE(openvdb::Coord(-40,-5, 6) == dense.origin()); // Check coordToOffset and offsetToCoord size_t offset = 0; for (openvdb::Coord P(bbox.min()); P[0] <= bbox.max()[0]; ++P[0]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[2] = bbox.min()[2]; P[2] <= bbox.max()[2]; ++P[2]) { //std::cerr << "offset = " << offset << " P = " << P << std::endl; EXPECT_EQ(offset, dense.coordToOffset(P)); EXPECT_EQ(P - dense.origin(), dense.offsetToLocalCoord(offset)); EXPECT_EQ(P, dense.offsetToCoord(offset)); ++offset; } } } // Check Dense::valueCount const int size = static_cast<int>(dense.valueCount()); EXPECT_EQ(30*13*17, size); // Check Dense::fill(float) and Dense::getValue(size_t) const float v = 0.234f; dense.fill(v); for (int i=0; i<size; ++i) { EXPECT_NEAR(v, dense.getValue(i),/*tolerance=*/0.0001); } // Check Dense::data() and Dense::getValue(Coord, float) float* a = dense.data(); int s = size; while(s--) EXPECT_NEAR(v, *a++, /*tolerance=*/0.0001); for (openvdb::Coord P(bbox.min()); P[0] <= bbox.max()[0]; ++P[0]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[2] = bbox.min()[2]; P[2] <= bbox.max()[2]; ++P[2]) { EXPECT_NEAR(v, dense.getValue(P), /*tolerance=*/0.0001); } } } // Check Dense::setValue(Coord, float) const openvdb::Coord C(-30, 3,12); const float v1 = 3.45f; dense.setValue(C, v1); for (openvdb::Coord P(bbox.min()); P[0] <= bbox.max()[0]; ++P[0]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[2] = bbox.min()[2]; P[2] <= bbox.max()[2]; ++P[2]) { EXPECT_NEAR(P==C ? v1 : v, dense.getValue(P), /*tolerance=*/0.0001); } } } // Check Dense::setValue(size_t, size_t, size_t, float) dense.setValue(C, v); const openvdb::Coord L(1,2,3), C1 = bbox.min() + L; dense.setValue(L[0], L[1], L[2], v1); for (openvdb::Coord P(bbox.min()); P[0] <= bbox.max()[0]; ++P[0]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[2] = bbox.min()[2]; P[2] <= bbox.max()[2]; ++P[2]) { EXPECT_NEAR(P==C1 ? v1 : v, dense.getValue(P), /*tolerance=*/0.0001); } } } } TEST_F(TestDense, testDenseXYZ) { const openvdb::CoordBBox bbox(openvdb::Coord(-40,-5, 6), openvdb::Coord(-11, 7,22)); openvdb::tools::Dense<float, openvdb::tools::LayoutXYZ> dense(bbox); // Check Desne::origin() EXPECT_TRUE(openvdb::Coord(-40,-5, 6) == dense.origin()); // Check coordToOffset and offsetToCoord size_t offset = 0; for (openvdb::Coord P(bbox.min()); P[2] <= bbox.max()[2]; ++P[2]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[0] = bbox.min()[0]; P[0] <= bbox.max()[0]; ++P[0]) { //std::cerr << "offset = " << offset << " P = " << P << std::endl; EXPECT_EQ(offset, dense.coordToOffset(P)); EXPECT_EQ(P - dense.origin(), dense.offsetToLocalCoord(offset)); EXPECT_EQ(P, dense.offsetToCoord(offset)); ++offset; } } } // Check Dense::valueCount const int size = static_cast<int>(dense.valueCount()); EXPECT_EQ(30*13*17, size); // Check Dense::fill(float) and Dense::getValue(size_t) const float v = 0.234f; dense.fill(v); for (int i=0; i<size; ++i) { EXPECT_NEAR(v, dense.getValue(i),/*tolerance=*/0.0001); } // Check Dense::data() and Dense::getValue(Coord, float) float* a = dense.data(); int s = size; while(s--) EXPECT_NEAR(v, *a++, /*tolerance=*/0.0001); for (openvdb::Coord P(bbox.min()); P[2] <= bbox.max()[2]; ++P[2]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[0] = bbox.min()[0]; P[0] <= bbox.max()[0]; ++P[0]) { EXPECT_NEAR(v, dense.getValue(P), /*tolerance=*/0.0001); } } } // Check Dense::setValue(Coord, float) const openvdb::Coord C(-30, 3,12); const float v1 = 3.45f; dense.setValue(C, v1); for (openvdb::Coord P(bbox.min()); P[2] <= bbox.max()[2]; ++P[2]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[0] = bbox.min()[0]; P[0] <= bbox.max()[0]; ++P[0]) { EXPECT_NEAR(P==C ? v1 : v, dense.getValue(P), /*tolerance=*/0.0001); } } } // Check Dense::setValue(size_t, size_t, size_t, float) dense.setValue(C, v); const openvdb::Coord L(1,2,3), C1 = bbox.min() + L; dense.setValue(L[0], L[1], L[2], v1); for (openvdb::Coord P(bbox.min()); P[2] <= bbox.max()[2]; ++P[2]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[0] = bbox.min()[0]; P[0] <= bbox.max()[0]; ++P[0]) { EXPECT_NEAR(P==C1 ? v1 : v, dense.getValue(P), /*tolerance=*/0.0001); } } } } // The check is so slow that we're going to multi-thread it :) template <typename TreeT, typename DenseT = openvdb::tools::Dense<typename TreeT::ValueType, openvdb::tools::LayoutZYX> > class CheckDense { public: typedef typename TreeT::ValueType ValueT; CheckDense() : mTree(NULL), mDense(NULL) { EXPECT_TRUE(DenseT::memoryLayout() == openvdb::tools::LayoutZYX || DenseT::memoryLayout() == openvdb::tools::LayoutXYZ ); } void check(const TreeT& tree, const DenseT& dense) { mTree = &tree; mDense = &dense; tbb::parallel_for(dense.bbox(), *this); } void operator()(const openvdb::CoordBBox& bbox) const { openvdb::tree::ValueAccessor<const TreeT> acc(*mTree); if (DenseT::memoryLayout() == openvdb::tools::LayoutZYX) {//resolved at compiletime for (openvdb::Coord P(bbox.min()); P[0] <= bbox.max()[0]; ++P[0]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[2] = bbox.min()[2]; P[2] <= bbox.max()[2]; ++P[2]) { EXPECT_NEAR(acc.getValue(P), mDense->getValue(P), /*tolerance=*/0.0001); } } } } else { for (openvdb::Coord P(bbox.min()); P[2] <= bbox.max()[2]; ++P[2]) { for (P[1] = bbox.min()[1]; P[1] <= bbox.max()[1]; ++P[1]) { for (P[0] = bbox.min()[0]; P[0] <= bbox.max()[0]; ++P[0]) { EXPECT_NEAR(acc.getValue(P), mDense->getValue(P), /*tolerance=*/0.0001); } } } } } private: const TreeT* mTree; const DenseT* mDense; };// CheckDense template <openvdb::tools::MemoryLayout Layout> void TestDense::testCopy() { using namespace openvdb; //std::cerr << "\nTesting testCopy with " // << (Layout == tools::LayoutXYZ ? "XYZ" : "ZYX") << " memory layout" // << std::endl; typedef tools::Dense<float, Layout> DenseT; CheckDense<FloatTree, DenseT> checkDense; const float radius = 10.0f, tolerance = 0.00001f; const Vec3f center(0.0f); // decrease the voxelSize to test larger grids #ifdef BENCHMARK_TEST const float voxelSize = 0.05f, width = 5.0f; #else const float voxelSize = 0.5f, width = 5.0f; #endif // Create a VDB containing a level set of a sphere FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); FloatTree& tree0 = grid->tree(); // Create an empty dense grid DenseT dense(grid->evalActiveVoxelBoundingBox()); #ifdef BENCHMARK_TEST std::cerr << "\nBBox = " << grid->evalActiveVoxelBoundingBox() << std::endl; #endif {//check Dense::fill dense.fill(voxelSize); #ifndef BENCHMARK_TEST checkDense.check(FloatTree(voxelSize), dense); #endif } {// parallel convert to dense #ifdef BENCHMARK_TEST util::CpuTimer ts; ts.start("CopyToDense"); #endif tools::copyToDense(*grid, dense); #ifdef BENCHMARK_TEST ts.stop(); #else checkDense.check(tree0, dense); #endif } {// Parallel create from dense #ifdef BENCHMARK_TEST util::CpuTimer ts; ts.start("CopyFromDense"); #endif FloatTree tree1(tree0.background()); tools::copyFromDense(dense, tree1, tolerance); #ifdef BENCHMARK_TEST ts.stop(); #else checkDense.check(tree1, dense); #endif } } template <openvdb::tools::MemoryLayout Layout> void TestDense::testCopyBool() { using namespace openvdb; //std::cerr << "\nTesting testCopyBool with " // << (Layout == tools::LayoutXYZ ? "XYZ" : "ZYX") << " memory layout" // << std::endl; const Coord bmin(-1), bmax(8); const CoordBBox bbox(bmin, bmax); BoolGrid::Ptr grid = createGrid<BoolGrid>(false); BoolGrid::ConstAccessor acc = grid->getConstAccessor(); typedef openvdb::tools::Dense<bool, Layout> DenseT; DenseT dense(bbox); dense.fill(false); // Start with sparse and dense grids both filled with false. Coord xyz; int &x = xyz[0], &y = xyz[1], &z = xyz[2]; for (x = bmin.x(); x <= bmax.x(); ++x) { for (y = bmin.y(); y <= bmax.y(); ++y) { for (z = bmin.z(); z <= bmax.z(); ++z) { EXPECT_EQ(false, dense.getValue(xyz)); EXPECT_EQ(false, acc.getValue(xyz)); } } } // Fill the dense grid with true. dense.fill(true); // Copy the contents of the dense grid to the sparse grid. tools::copyFromDense(dense, *grid, /*tolerance=*/false); // Verify that both sparse and dense grids are now filled with true. for (x = bmin.x(); x <= bmax.x(); ++x) { for (y = bmin.y(); y <= bmax.y(); ++y) { for (z = bmin.z(); z <= bmax.z(); ++z) { EXPECT_EQ(true, dense.getValue(xyz)); EXPECT_EQ(true, acc.getValue(xyz)); } } } // Fill the dense grid with false. dense.fill(false); // Copy the contents (= true) of the sparse grid to the dense grid. tools::copyToDense(*grid, dense); // Verify that the dense grid is now filled with true. for (x = bmin.x(); x <= bmax.x(); ++x) { for (y = bmin.y(); y <= bmax.y(); ++y) { for (z = bmin.z(); z <= bmax.z(); ++z) { EXPECT_EQ(true, dense.getValue(xyz)); } } } } // Test copying from a dense grid to a sparse grid with various bounding boxes. template <openvdb::tools::MemoryLayout Layout> void TestDense::testCopyFromDenseWithOffset() { using namespace openvdb; //std::cerr << "\nTesting testCopyFromDenseWithOffset with " // << (Layout == tools::LayoutXYZ ? "XYZ" : "ZYX") << " memory layout" // << std::endl; typedef openvdb::tools::Dense<float, Layout> DenseT; const int DIM = 20, COUNT = DIM * DIM * DIM; const float FOREGROUND = 99.0f, BACKGROUND = 5000.0f; const int OFFSET[] = { 1, -1, 1001, -1001 }; for (int offsetIdx = 0; offsetIdx < 4; ++offsetIdx) { const int offset = OFFSET[offsetIdx]; const CoordBBox bbox = CoordBBox::createCube(Coord(offset), DIM); DenseT dense(bbox, FOREGROUND); EXPECT_EQ(bbox, dense.bbox()); FloatGrid grid(BACKGROUND); tools::copyFromDense(dense, grid, /*tolerance=*/0.0); const CoordBBox gridBBox = grid.evalActiveVoxelBoundingBox(); EXPECT_EQ(bbox, gridBBox); EXPECT_EQ(COUNT, int(grid.activeVoxelCount())); FloatGrid::ConstAccessor acc = grid.getConstAccessor(); for (int i = gridBBox.min()[0], ie = gridBBox.max()[0]; i < ie; ++i) { for (int j = gridBBox.min()[1], je = gridBBox.max()[1]; j < je; ++j) { for (int k = gridBBox.min()[2], ke = gridBBox.max()[2]; k < ke; ++k) { const Coord ijk(i, j, k); EXPECT_NEAR( FOREGROUND, acc.getValue(ijk), /*tolerance=*/0.0); EXPECT_TRUE(acc.isValueOn(ijk)); } } } } } template <openvdb::tools::MemoryLayout Layout> void TestDense::testDense2Sparse() { // The following test revealed a bug in v2.0.0b2 using namespace openvdb; //std::cerr << "\nTesting testDense2Sparse with " // << (Layout == tools::LayoutXYZ ? "XYZ" : "ZYX") << " memory layout" // << std::endl; typedef tools::Dense<float, Layout> DenseT; // Test Domain Resolution Int32 sizeX = 8, sizeY = 8, sizeZ = 9; // Define a dense grid DenseT dense(Coord(sizeX, sizeY, sizeZ)); const CoordBBox bboxD = dense.bbox(); // std::cerr << "\nDense bbox" << bboxD << std::endl; // Verify that the CoordBBox is truely used as [inclusive, inclusive] EXPECT_TRUE(int(dense.valueCount()) == int(sizeX * sizeY * sizeZ)); // Fill the dense grid with constant value 1. dense.fill(1.0f); // Create two empty float grids FloatGrid::Ptr gridS = FloatGrid::create(0.0f /*background*/); FloatGrid::Ptr gridP = FloatGrid::create(0.0f /*background*/); // Convert in serial and parallel modes tools::copyFromDense(dense, *gridS, /*tolerance*/0.0f, /*serial = */ true); tools::copyFromDense(dense, *gridP, /*tolerance*/0.0f, /*serial = */ false); float minS, maxS; float minP, maxP; gridS->evalMinMax(minS, maxS); gridP->evalMinMax(minP, maxP); const float tolerance = 0.0001f; EXPECT_NEAR(minS, minP, tolerance); EXPECT_NEAR(maxS, maxP, tolerance); EXPECT_EQ(gridP->activeVoxelCount(), Index64(sizeX * sizeY * sizeZ)); const FloatTree& treeS = gridS->tree(); const FloatTree& treeP = gridP->tree(); // Values in Test Domain are correct for (Coord ijk(bboxD.min()); ijk[0] <= bboxD.max()[0]; ++ijk[0]) { for (ijk[1] = bboxD.min()[1]; ijk[1] <= bboxD.max()[1]; ++ijk[1]) { for (ijk[2] = bboxD.min()[2]; ijk[2] <= bboxD.max()[2]; ++ijk[2]) { const float expected = bboxD.isInside(ijk) ? 1.f : 0.f; EXPECT_NEAR(expected, 1.f, tolerance); const float& vS = treeS.getValue(ijk); const float& vP = treeP.getValue(ijk); EXPECT_NEAR(expected, vS, tolerance); EXPECT_NEAR(expected, vP, tolerance); } } } CoordBBox bboxP = gridP->evalActiveVoxelBoundingBox(); const Index64 voxelCountP = gridP->activeVoxelCount(); //std::cerr << "\nParallel: bbox=" << bboxP << " voxels=" << voxelCountP << std::endl; EXPECT_TRUE( bboxP == bboxD ); EXPECT_EQ( dense.valueCount(), voxelCountP); CoordBBox bboxS = gridS->evalActiveVoxelBoundingBox(); const Index64 voxelCountS = gridS->activeVoxelCount(); //std::cerr << "\nSerial: bbox=" << bboxS << " voxels=" << voxelCountS << std::endl; EXPECT_TRUE( bboxS == bboxD ); EXPECT_EQ( dense.valueCount(), voxelCountS); // Topology EXPECT_TRUE( bboxS.isInside(bboxS) ); EXPECT_TRUE( bboxP.isInside(bboxP) ); EXPECT_TRUE( bboxS.isInside(bboxP) ); EXPECT_TRUE( bboxP.isInside(bboxS) ); /// Check that the two grids agree for (Coord ijk(bboxS.min()); ijk[0] <= bboxS.max()[0]; ++ijk[0]) { for (ijk[1] = bboxS.min()[1]; ijk[1] <= bboxS.max()[1]; ++ijk[1]) { for (ijk[2] = bboxS.min()[2]; ijk[2] <= bboxS.max()[2]; ++ijk[2]) { const float& vS = treeS.getValue(ijk); const float& vP = treeP.getValue(ijk); EXPECT_NEAR(vS, vP, tolerance); // the value we should get based on the original domain const float expected = bboxD.isInside(ijk) ? 1.f : 0.f; EXPECT_NEAR(expected, vP, tolerance); EXPECT_NEAR(expected, vS, tolerance); } } } // Verify the tree topology matches. EXPECT_EQ(gridP->activeVoxelCount(), gridS->activeVoxelCount()); EXPECT_TRUE(gridP->evalActiveVoxelBoundingBox() == gridS->evalActiveVoxelBoundingBox()); EXPECT_TRUE(treeP.hasSameTopology(treeS) ); } template <openvdb::tools::MemoryLayout Layout> void TestDense::testDense2Sparse2() { // The following tests copying a dense grid into a VDB tree with // existing values outside the bbox of the dense grid. using namespace openvdb; //std::cerr << "\nTesting testDense2Sparse2 with " // << (Layout == tools::LayoutXYZ ? "XYZ" : "ZYX") << " memory layout" // << std::endl; typedef tools::Dense<float, Layout> DenseT; // Test Domain Resolution const int sizeX = 8, sizeY = 8, sizeZ = 9; const Coord magicVoxel(sizeX, sizeY, sizeZ); // Define a dense grid DenseT dense(Coord(sizeX, sizeY, sizeZ)); const CoordBBox bboxD = dense.bbox(); //std::cerr << "\nDense bbox" << bboxD << std::endl; // Verify that the CoordBBox is truely used as [inclusive, inclusive] EXPECT_EQ(sizeX * sizeY * sizeZ, static_cast<int>(dense.valueCount())); // Fill the dense grid with constant value 1. dense.fill(1.0f); // Create two empty float grids FloatGrid::Ptr gridS = FloatGrid::create(0.0f /*background*/); FloatGrid::Ptr gridP = FloatGrid::create(0.0f /*background*/); gridS->tree().setValue(magicVoxel, 5.0f); gridP->tree().setValue(magicVoxel, 5.0f); // Convert in serial and parallel modes tools::copyFromDense(dense, *gridS, /*tolerance*/0.0f, /*serial = */ true); tools::copyFromDense(dense, *gridP, /*tolerance*/0.0f, /*serial = */ false); float minS, maxS; float minP, maxP; gridS->evalMinMax(minS, maxS); gridP->evalMinMax(minP, maxP); const float tolerance = 0.0001f; EXPECT_NEAR(1.0f, minP, tolerance); EXPECT_NEAR(1.0f, minS, tolerance); EXPECT_NEAR(5.0f, maxP, tolerance); EXPECT_NEAR(5.0f, maxS, tolerance); EXPECT_EQ(gridP->activeVoxelCount(), Index64(1 + sizeX * sizeY * sizeZ)); const FloatTree& treeS = gridS->tree(); const FloatTree& treeP = gridP->tree(); // Values in Test Domain are correct for (Coord ijk(bboxD.min()); ijk[0] <= bboxD.max()[0]; ++ijk[0]) { for (ijk[1] = bboxD.min()[1]; ijk[1] <= bboxD.max()[1]; ++ijk[1]) { for (ijk[2] = bboxD.min()[2]; ijk[2] <= bboxD.max()[2]; ++ijk[2]) { const float expected = bboxD.isInside(ijk) ? 1.0f : 0.0f; EXPECT_NEAR(expected, 1.0f, tolerance); const float& vS = treeS.getValue(ijk); const float& vP = treeP.getValue(ijk); EXPECT_NEAR(expected, vS, tolerance); EXPECT_NEAR(expected, vP, tolerance); } } } CoordBBox bboxP = gridP->evalActiveVoxelBoundingBox(); const Index64 voxelCountP = gridP->activeVoxelCount(); //std::cerr << "\nParallel: bbox=" << bboxP << " voxels=" << voxelCountP << std::endl; EXPECT_TRUE( bboxP != bboxD ); EXPECT_TRUE( bboxP == CoordBBox(Coord(0,0,0), magicVoxel) ); EXPECT_EQ( dense.valueCount()+1, voxelCountP); CoordBBox bboxS = gridS->evalActiveVoxelBoundingBox(); const Index64 voxelCountS = gridS->activeVoxelCount(); //std::cerr << "\nSerial: bbox=" << bboxS << " voxels=" << voxelCountS << std::endl; EXPECT_TRUE( bboxS != bboxD ); EXPECT_TRUE( bboxS == CoordBBox(Coord(0,0,0), magicVoxel) ); EXPECT_EQ( dense.valueCount()+1, voxelCountS); // Topology EXPECT_TRUE( bboxS.isInside(bboxS) ); EXPECT_TRUE( bboxP.isInside(bboxP) ); EXPECT_TRUE( bboxS.isInside(bboxP) ); EXPECT_TRUE( bboxP.isInside(bboxS) ); /// Check that the two grids agree for (Coord ijk(bboxS.min()); ijk[0] <= bboxS.max()[0]; ++ijk[0]) { for (ijk[1] = bboxS.min()[1]; ijk[1] <= bboxS.max()[1]; ++ijk[1]) { for (ijk[2] = bboxS.min()[2]; ijk[2] <= bboxS.max()[2]; ++ijk[2]) { const float& vS = treeS.getValue(ijk); const float& vP = treeP.getValue(ijk); EXPECT_NEAR(vS, vP, tolerance); // the value we should get based on the original domain const float expected = bboxD.isInside(ijk) ? 1.0f : ijk == magicVoxel ? 5.0f : 0.0f; EXPECT_NEAR(expected, vP, tolerance); EXPECT_NEAR(expected, vS, tolerance); } } } // Verify the tree topology matches. EXPECT_EQ(gridP->activeVoxelCount(), gridS->activeVoxelCount()); EXPECT_TRUE(gridP->evalActiveVoxelBoundingBox() == gridS->evalActiveVoxelBoundingBox()); EXPECT_TRUE(treeP.hasSameTopology(treeS) ); } template <openvdb::tools::MemoryLayout Layout> void TestDense::testInvalidBBox() { using namespace openvdb; //std::cerr << "\nTesting testInvalidBBox with " // << (Layout == tools::LayoutXYZ ? "XYZ" : "ZYX") << " memory layout" // << std::endl; typedef tools::Dense<float, Layout> DenseT; const CoordBBox badBBox(Coord(1, 1, 1), Coord(-1, 2, 2)); EXPECT_TRUE(badBBox.empty()); EXPECT_THROW(DenseT dense(badBBox), ValueError); } template <openvdb::tools::MemoryLayout Layout> void TestDense::testDense2Sparse2Dense() { using namespace openvdb; //std::cerr << "\nTesting testDense2Sparse2Dense with " // << (Layout == tools::LayoutXYZ ? "XYZ" : "ZYX") << " memory layout" // << std::endl; typedef tools::Dense<float, Layout> DenseT; const CoordBBox bboxBig(Coord(-12, 7, -32), Coord(12, 14, -15)); const CoordBBox bboxSmall(Coord(-10, 8, -31), Coord(10, 12, -20)); // A larger bbox CoordBBox bboxBigger = bboxBig; bboxBigger.expand(Coord(10)); // Small is in big EXPECT_TRUE(bboxBig.isInside(bboxSmall)); // Big is in Bigger EXPECT_TRUE(bboxBigger.isInside(bboxBig)); // Construct a small dense grid DenseT denseSmall(bboxSmall, 0.f); { // insert non-const values const int n = static_cast<int>(denseSmall.valueCount()); float* d = denseSmall.data(); for (int i = 0; i < n; ++i) { d[i] = static_cast<float>(i); } } // Construct large dense grid DenseT denseBig(bboxBig, 0.f); { // insert non-const values const int n = static_cast<int>(denseBig.valueCount()); float* d = denseBig.data(); for (int i = 0; i < n; ++i) { d[i] = static_cast<float>(i); } } // Make a sparse grid to copy this data into FloatGrid::Ptr grid = FloatGrid::create(3.3f /*background*/); tools::copyFromDense(denseBig, *grid, /*tolerance*/0.0f, /*serial = */ true); tools::copyFromDense(denseSmall, *grid, /*tolerance*/0.0f, /*serial = */ false); const FloatTree& tree = grid->tree(); // EXPECT_EQ(bboxBig.volume(), grid->activeVoxelCount()); // iterate over the Bigger for (Coord ijk(bboxBigger.min()); ijk[0] <= bboxBigger.max()[0]; ++ijk[0]) { for (ijk[1] = bboxBigger.min()[1]; ijk[1] <= bboxBigger.max()[1]; ++ijk[1]) { for (ijk[2] = bboxBigger.min()[2]; ijk[2] <= bboxBigger.max()[2]; ++ijk[2]) { float expected = 3.3f; if (bboxSmall.isInside(ijk)) { expected = denseSmall.getValue(ijk); } else if (bboxBig.isInside(ijk)) { expected = denseBig.getValue(ijk); } const float& value = tree.getValue(ijk); EXPECT_NEAR(expected, value, 0.0001); } } } // Convert to Dense in small bbox { DenseT denseSmall2(bboxSmall); tools::copyToDense(*grid, denseSmall2, true /* serial */); // iterate over the Bigger for (Coord ijk(bboxSmall.min()); ijk[0] <= bboxSmall.max()[0]; ++ijk[0]) { for (ijk[1] = bboxSmall.min()[1]; ijk[1] <= bboxSmall.max()[1]; ++ijk[1]) { for (ijk[2] = bboxSmall.min()[2]; ijk[2] <= bboxSmall.max()[2]; ++ijk[2]) { const float& expected = denseSmall.getValue(ijk); const float& value = denseSmall2.getValue(ijk); EXPECT_NEAR(expected, value, 0.0001); } } } } // Convert to Dense in large bbox { DenseT denseBig2(bboxBig); tools::copyToDense(*grid, denseBig2, false /* serial */); // iterate over the Bigger for (Coord ijk(bboxBig.min()); ijk[0] <= bboxBig.max()[0]; ++ijk[0]) { for (ijk[1] = bboxBig.min()[1]; ijk[1] <= bboxBig.max()[1]; ++ijk[1]) { for (ijk[2] = bboxBig.min()[2]; ijk[2] <= bboxBig.max()[2]; ++ijk[2]) { float expected = -1.f; // should never be this if (bboxSmall.isInside(ijk)) { expected = denseSmall.getValue(ijk); } else if (bboxBig.isInside(ijk)) { expected = denseBig.getValue(ijk); } const float& value = denseBig2.getValue(ijk); EXPECT_NEAR(expected, value, 0.0001); } } } } } TEST_F(TestDense, testCopyZYX) { this->testCopy<openvdb::tools::LayoutZYX>(); } TEST_F(TestDense, testCopyXYZ) { this->testCopy<openvdb::tools::LayoutXYZ>(); } TEST_F(TestDense, testCopyBoolZYX) { this->testCopyBool<openvdb::tools::LayoutZYX>(); } TEST_F(TestDense, testCopyBoolXYZ) { this->testCopyBool<openvdb::tools::LayoutXYZ>(); } TEST_F(TestDense, testCopyFromDenseWithOffsetZYX) { this->testCopyFromDenseWithOffset<openvdb::tools::LayoutZYX>(); } TEST_F(TestDense, testCopyFromDenseWithOffsetXYZ) { this->testCopyFromDenseWithOffset<openvdb::tools::LayoutXYZ>(); } TEST_F(TestDense, testDense2SparseZYX) { this->testDense2Sparse<openvdb::tools::LayoutZYX>(); } TEST_F(TestDense, testDense2SparseXYZ) { this->testDense2Sparse<openvdb::tools::LayoutXYZ>(); } TEST_F(TestDense, testDense2Sparse2ZYX) { this->testDense2Sparse2<openvdb::tools::LayoutZYX>(); } TEST_F(TestDense, testDense2Sparse2XYZ) { this->testDense2Sparse2<openvdb::tools::LayoutXYZ>(); } TEST_F(TestDense, testInvalidBBoxZYX) { this->testInvalidBBox<openvdb::tools::LayoutZYX>(); } TEST_F(TestDense, testInvalidBBoxXYZ) { this->testInvalidBBox<openvdb::tools::LayoutXYZ>(); } TEST_F(TestDense, testDense2Sparse2DenseZYX) { this->testDense2Sparse2Dense<openvdb::tools::LayoutZYX>(); } TEST_F(TestDense, testDense2Sparse2DenseXYZ) { this->testDense2Sparse2Dense<openvdb::tools::LayoutXYZ>(); } #undef BENCHMARK_TEST
28,310
C++
34.566583
117
0.562734
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMetadataIO.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Metadata.h> #include <openvdb/Types.h> #include <iostream> #include <sstream> class TestMetadataIO: public ::testing::Test { public: template <typename T> void test(); template <typename T> void testMultiple(); }; namespace { template<typename T> struct Value { static T create(int i) { return T(i); } }; template<> struct Value<std::string> { static std::string create(int i) { return "test" + std::to_string(i); } }; template<typename T> struct Value<openvdb::math::Vec2<T>> { using ValueType = openvdb::math::Vec2<T>; static ValueType create(int i) { return ValueType(i, i+1); } }; template<typename T> struct Value<openvdb::math::Vec3<T>> { using ValueType = openvdb::math::Vec3<T>; static ValueType create(int i) { return ValueType(i, i+1, i+2); } }; template<typename T> struct Value<openvdb::math::Vec4<T>> { using ValueType = openvdb::math::Vec4<T>; static ValueType create(int i) { return ValueType(i, i+1, i+2, i+3); } }; } template <typename T> void TestMetadataIO::test() { using namespace openvdb; const T val = Value<T>::create(1); TypedMetadata<T> m(val); std::ostringstream ostr(std::ios_base::binary); m.write(ostr); std::istringstream istr(ostr.str(), std::ios_base::binary); TypedMetadata<T> tm; tm.read(istr); OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN EXPECT_EQ(val, tm.value()); OPENVDB_NO_FP_EQUALITY_WARNING_END } template <typename T> void TestMetadataIO::testMultiple() { using namespace openvdb; const T val1 = Value<T>::create(1), val2 = Value<T>::create(2); TypedMetadata<T> m1(val1); TypedMetadata<T> m2(val2); std::ostringstream ostr(std::ios_base::binary); m1.write(ostr); m2.write(ostr); std::istringstream istr(ostr.str(), std::ios_base::binary); TypedMetadata<T> tm1, tm2; tm1.read(istr); tm2.read(istr); OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN EXPECT_EQ(val1, tm1.value()); EXPECT_EQ(val2, tm2.value()); OPENVDB_NO_FP_EQUALITY_WARNING_END } TEST_F(TestMetadataIO, testInt) { test<int>(); } TEST_F(TestMetadataIO, testMultipleInt) { testMultiple<int>(); } TEST_F(TestMetadataIO, testInt64) { test<int64_t>(); } TEST_F(TestMetadataIO, testMultipleInt64) { testMultiple<int64_t>(); } TEST_F(TestMetadataIO, testFloat) { test<float>(); } TEST_F(TestMetadataIO, testMultipleFloat) { testMultiple<float>(); } TEST_F(TestMetadataIO, testDouble) { test<double>(); } TEST_F(TestMetadataIO, testMultipleDouble) { testMultiple<double>(); } TEST_F(TestMetadataIO, testString) { test<std::string>(); } TEST_F(TestMetadataIO, testMultipleString) { testMultiple<std::string>(); } TEST_F(TestMetadataIO, testVec3R) { test<openvdb::Vec3R>(); } TEST_F(TestMetadataIO, testMultipleVec3R) { testMultiple<openvdb::Vec3R>(); } TEST_F(TestMetadataIO, testVec2i) { test<openvdb::Vec2i>(); } TEST_F(TestMetadataIO, testMultipleVec2i) { testMultiple<openvdb::Vec2i>(); } TEST_F(TestMetadataIO, testVec4d) { test<openvdb::Vec4d>(); } TEST_F(TestMetadataIO, testMultipleVec4d) { testMultiple<openvdb::Vec4d>(); }
3,223
C++
25.644628
78
0.683835
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestCpt.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <sstream> #include "gtest/gtest.h" #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/tools/GridOperators.h> #include <openvdb/math/Stencils.h> // for old GradientStencil #include "util.h" // for unittest_util::makeSphere() #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); class TestCpt: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestCpt, testCpt) { using namespace openvdb; typedef FloatGrid::ConstAccessor AccessorType; { // unit voxel size tests FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); const FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const Coord dim(64,64,64); const Vec3f center(35.0, 30.0f, 40.0f); const float radius=0;//point at {35,30,40} unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); AccessorType inAccessor = grid->getConstAccessor(); // this uses the gradient. Only test for a few maps, since the gradient is // tested elsewhere Coord xyz(35,30,30); math::TranslationMap translate; // Note the CPT::result is in continuous index space Vec3f P = math::CPT<math::TranslationMap, math::CD_2ND>::result(translate, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); // CPT_RANGE::result is in the range of the map // CPT_RANGE::result = map.applyMap(CPT::result()) // for our tests, the map is an identity so in this special case // the two versions of the Cpt should exactly agree P = math::CPT_RANGE<math::TranslationMap, math::CD_2ND>::result(translate, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); xyz.reset(35,30,35); P = math::CPT<math::TranslationMap, math::CD_2ND>::result(translate, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); P = math::CPT_RANGE<math::TranslationMap, math::CD_2ND>::result(translate, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); } { // NON-UNIT VOXEL SIZE double voxel_size = 0.5; FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::createLinearTransform(voxel_size)); EXPECT_TRUE(grid->empty()); AccessorType inAccessor = grid->getConstAccessor(); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10;//i.e. (16,8,10) and (6,8,0) are on the sphere unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); Coord xyz(20,16,20);//i.e. (10,8,10) in world space or 6 world units inside the sphere math::AffineMap affine(voxel_size*math::Mat3d::identity()); Vec3f P = math::CPT<math::AffineMap, math::CD_2ND>::result(affine, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(32,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(16,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(20,P[2]); P = math::CPT_RANGE<math::AffineMap, math::CD_2ND>::result(affine, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(16,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(8,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(10,P[2]); xyz.reset(12,16,10); P = math::CPT<math::AffineMap, math::CD_2ND>::result(affine, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(12,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(16,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0,P[2]); P = math::CPT_RANGE<math::AffineMap, math::CD_2ND>::result(affine, inAccessor, xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(6,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(8,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0,P[2]); } { // NON-UNIFORM SCALING Vec3d voxel_sizes(0.5, 1, 0.5); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::Ptr(new math::Transform(base_map))); EXPECT_TRUE(grid->empty()); AccessorType inAccessor = grid->getConstAccessor(); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10;//i.e. (16,8,10) and (6,8,0) are on the sphere unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); Coord ijk = grid->transform().worldToIndexNodeCentered(Vec3d(10,8,10)); //Coord xyz(20,16,20);//i.e. (10,8,10) in world space or 6 world units inside the sphere math::ScaleMap scale(voxel_sizes); Vec3f P; P = math::CPT<math::ScaleMap, math::CD_2ND>::result(scale, inAccessor, ijk); ASSERT_DOUBLES_EXACTLY_EQUAL(32,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(8,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(20,P[2]); // world space result P = math::CPT_RANGE<math::ScaleMap, math::CD_2ND>::result(scale, inAccessor, ijk); EXPECT_NEAR(16,P[0], 0.02 ); EXPECT_NEAR(8, P[1], 0.02); EXPECT_NEAR(10,P[2], 0.02); //xyz.reset(12,16,10); ijk = grid->transform().worldToIndexNodeCentered(Vec3d(6,8,5)); P = math::CPT<math::ScaleMap, math::CD_2ND>::result(scale, inAccessor, ijk); ASSERT_DOUBLES_EXACTLY_EQUAL(12,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(8,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0,P[2]); P = math::CPT_RANGE<math::ScaleMap, math::CD_2ND>::result(scale, inAccessor, ijk); EXPECT_NEAR(6,P[0], 0.02); EXPECT_NEAR(8,P[1], 0.02); EXPECT_NEAR(0,P[2], 0.02); } } TEST_F(TestCpt, testCptStencil) { using namespace openvdb; { // UNIT VOXEL TEST FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); const FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f ,30.0f, 40.0f); const float radius=0.0f; unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); // this uses the gradient. Only test for a few maps, since the gradient is // tested elsewhere math::SevenPointStencil<FloatGrid> sevenpt(*grid); math::SecondOrderDenseStencil<FloatGrid> dense_2nd(*grid); Coord xyz(35,30,30); EXPECT_TRUE(tree.isValueOn(xyz)); sevenpt.moveTo(xyz); dense_2nd.moveTo(xyz); math::TranslationMap translate; // Note the CPT::result is in continuous index space Vec3f P = math::CPT<math::TranslationMap, math::CD_2ND>::result(translate, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); // CPT_RANGE::result_stencil is in the range of the map // CPT_RANGE::result_stencil = map.applyMap(CPT::result_stencil()) // for our tests, the map is an identity so in this special case // the two versions of the Cpt should exactly agree P = math::CPT_RANGE<math::TranslationMap, math::CD_2ND>::result(translate, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); xyz.reset(35,30,35); sevenpt.moveTo(xyz); dense_2nd.moveTo(xyz); EXPECT_TRUE(tree.isValueOn(xyz)); P = math::CPT<math::TranslationMap, math::CD_2ND>::result(translate, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); P = math::CPT_RANGE<math::TranslationMap, math::CD_2ND>::result(translate, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); xyz.reset(35,30,30); sevenpt.moveTo(xyz); dense_2nd.moveTo(xyz); math::AffineMap affine; P = math::CPT<math::AffineMap, math::CD_2ND>::result(affine, dense_2nd); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); P = math::CPT_RANGE<math::AffineMap, math::CD_2ND>::result(affine, dense_2nd); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); xyz.reset(35,30,35); sevenpt.moveTo(xyz); dense_2nd.moveTo(xyz); EXPECT_TRUE(tree.isValueOn(xyz)); P = math::CPT<math::AffineMap, math::CD_2ND>::result(affine, dense_2nd); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); EXPECT_TRUE(tree.isValueOn(xyz)); P = math::CPT_RANGE<math::AffineMap, math::CD_2ND>::result(affine, dense_2nd); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); } { // NON-UNIT VOXEL SIZE double voxel_size = 0.5; FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::createLinearTransform(voxel_size)); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10;//i.e. (16,8,10) and (6,8,0) are on the sphere unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); math::SecondOrderDenseStencil<FloatGrid> dense_2nd(*grid); Coord xyz(20,16,20);//i.e. (10,8,10) in world space or 6 world units inside the sphere math::AffineMap affine(voxel_size*math::Mat3d::identity()); dense_2nd.moveTo(xyz); Vec3f P = math::CPT<math::AffineMap, math::CD_2ND>::result(affine, dense_2nd); ASSERT_DOUBLES_EXACTLY_EQUAL(32,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(16,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(20,P[2]); P = math::CPT_RANGE<math::AffineMap, math::CD_2ND>::result(affine, dense_2nd); ASSERT_DOUBLES_EXACTLY_EQUAL(16,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(8,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(10,P[2]); xyz.reset(12,16,10); dense_2nd.moveTo(xyz); P = math::CPT<math::AffineMap, math::CD_2ND>::result(affine, dense_2nd); ASSERT_DOUBLES_EXACTLY_EQUAL(12,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(16,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0,P[2]); P = math::CPT_RANGE<math::AffineMap, math::CD_2ND>::result(affine, dense_2nd); ASSERT_DOUBLES_EXACTLY_EQUAL(6,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(8,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0,P[2]); } { // NON-UNIFORM SCALING Vec3d voxel_sizes(0.5, 1, 0.5); math::MapBase::Ptr base_map( new math::ScaleMap(voxel_sizes)); FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::Ptr(new math::Transform(base_map))); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10;//i.e. (16,8,10) and (6,8,0) are on the sphere unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); Coord ijk = grid->transform().worldToIndexNodeCentered(Vec3d(10,8,10)); math::SevenPointStencil<FloatGrid> sevenpt(*grid); sevenpt.moveTo(ijk); //Coord xyz(20,16,20);//i.e. (10,8,10) in world space or 6 world units inside the sphere math::ScaleMap scale(voxel_sizes); Vec3f P; P = math::CPT<math::ScaleMap, math::CD_2ND>::result(scale, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(32,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(8,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(20,P[2]); // world space result P = math::CPT_RANGE<math::ScaleMap, math::CD_2ND>::result(scale, sevenpt); EXPECT_NEAR(16,P[0], 0.02 ); EXPECT_NEAR(8, P[1], 0.02); EXPECT_NEAR(10,P[2], 0.02); //xyz.reset(12,16,10); ijk = grid->transform().worldToIndexNodeCentered(Vec3d(6,8,5)); sevenpt.moveTo(ijk); P = math::CPT<math::ScaleMap, math::CD_2ND>::result(scale, sevenpt); ASSERT_DOUBLES_EXACTLY_EQUAL(12,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(8,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0,P[2]); P = math::CPT_RANGE<math::ScaleMap, math::CD_2ND>::result(scale, sevenpt); EXPECT_NEAR(6,P[0], 0.02); EXPECT_NEAR(8,P[1], 0.02); EXPECT_NEAR(0,P[2], 0.02); } } TEST_F(TestCpt, testCptTool) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); const FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f, 30.0f, 40.0f); const float radius=0;//point at {35,30,40} unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); // run the tool typedef openvdb::tools::Cpt<FloatGrid> FloatCpt; FloatCpt cpt(*grid); FloatCpt::OutGridType::Ptr cptGrid = cpt.process(true/*threaded*/, false/*use world transform*/); FloatCpt::OutGridType::ConstAccessor cptAccessor = cptGrid->getConstAccessor(); Coord xyz(35,30,30); EXPECT_TRUE(tree.isValueOn(xyz)); Vec3f P = cptAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); xyz.reset(35,30,35); EXPECT_TRUE(tree.isValueOn(xyz)); P = cptAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0],P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1],P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2],P[2]); } TEST_F(TestCpt, testCptMaskedTool) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); const FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const openvdb::Coord dim(64,64,64); const openvdb::Vec3f center(35.0f, 30.0f, 40.0f); const float radius=0;//point at {35,30,40} unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); const openvdb::CoordBBox maskbbox(openvdb::Coord(35, 30, 30), openvdb::Coord(41, 41, 41)); BoolGrid::Ptr maskGrid = BoolGrid::create(false); maskGrid->fill(maskbbox, true/*value*/, true/*activate*/); // run the tool //typedef openvdb::tools::Cpt<FloatGrid> FloatCpt;//fails because MaskT defaults to MaskGrid typedef openvdb::tools::Cpt<FloatGrid, BoolGrid> FloatCpt; FloatCpt cpt(*grid, *maskGrid); FloatCpt::OutGridType::Ptr cptGrid = cpt.process(true/*threaded*/, false/*use world transform*/); FloatCpt::OutGridType::ConstAccessor cptAccessor = cptGrid->getConstAccessor(); // inside the masked region Coord xyz(35,30,30); EXPECT_TRUE(tree.isValueOn(xyz)); Vec3f P = cptAccessor.getValue(xyz); ASSERT_DOUBLES_EXACTLY_EQUAL(center[0], P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[1], P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(center[2], P[2]); // outside the masked region xyz.reset(42,42,42); EXPECT_TRUE(!cptAccessor.isValueOn(xyz)); } TEST_F(TestCpt, testOldStyleStencils) { using namespace openvdb; {// test of level set to sphere at (6,8,10) with R=10 and dx=0.5 FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::createLinearTransform(/*voxel size=*/0.5)); EXPECT_TRUE(grid->empty()); const openvdb::Coord dim(32,32,32); const openvdb::Vec3f center(6.0f,8.0f,10.0f);//i.e. (12,16,20) in index space const float radius=10;//i.e. (16,8,10) and (6,8,0) are on the sphere unittest_util::makeSphere<FloatGrid>( dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); math::GradStencil<FloatGrid> gs(*grid); Coord xyz(20,16,20);//i.e. (10,8,10) in world space or 6 world units inside the sphere gs.moveTo(xyz); float dist = gs.getValue();//signed closest distance to sphere in world coordinates Vec3f P = gs.cpt();//closes point to sphere in index space ASSERT_DOUBLES_EXACTLY_EQUAL(dist,-6); ASSERT_DOUBLES_EXACTLY_EQUAL(32,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(16,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(20,P[2]); xyz.reset(12,16,10);//i.e. (6,8,5) in world space or 15 world units inside the sphere gs.moveTo(xyz); dist = gs.getValue();//signed closest distance to sphere in world coordinates P = gs.cpt();//closes point to sphere in index space ASSERT_DOUBLES_EXACTLY_EQUAL(-5,dist); ASSERT_DOUBLES_EXACTLY_EQUAL(12,P[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(16,P[1]); ASSERT_DOUBLES_EXACTLY_EQUAL( 0,P[2]); } }
19,365
C++
36.603883
100
0.623393
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestNodeIterator.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/tree/Tree.h> class TestNodeIterator: public ::testing::Test { }; namespace { typedef openvdb::tree::Tree4<float, 3, 2, 3>::Type Tree323f; } //////////////////////////////////////// TEST_F(TestNodeIterator, testEmpty) { Tree323f tree(/*fillValue=*/256.0f); { Tree323f::NodeCIter iter(tree); EXPECT_TRUE(!iter.next()); } { tree.setValue(openvdb::Coord(8, 16, 24), 10.f); Tree323f::NodeIter iter(tree); // non-const EXPECT_TRUE(iter); // Try modifying the tree through a non-const iterator. Tree323f::RootNodeType* root = NULL; iter.getNode(root); EXPECT_TRUE(root != NULL); root->clear(); // Verify that the tree is now empty. iter = Tree323f::NodeIter(tree); EXPECT_TRUE(iter); EXPECT_TRUE(!iter.next()); } } TEST_F(TestNodeIterator, testSinglePositive) { { Tree323f tree(/*fillValue=*/256.0f); tree.setValue(openvdb::Coord(8, 16, 24), 10.f); Tree323f::NodeCIter iter(tree); EXPECT_TRUE(Tree323f::LeafNodeType::DIM == 8); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getDepth()); EXPECT_EQ(tree.treeDepth(), 1 + iter.getLevel()); openvdb::CoordBBox range, bbox; tree.getIndexRange(range); iter.getBoundingBox(bbox); EXPECT_EQ(bbox.min(), range.min()); EXPECT_EQ(bbox.max(), range.max()); // Descend to the depth-1 internal node with bounding box // (0, 0, 0) -> (255, 255, 255) containing voxel (8, 16, 24). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(1U, iter.getDepth()); iter.getBoundingBox(bbox); EXPECT_EQ(openvdb::Coord(0), bbox.min()); EXPECT_EQ(openvdb::Coord((1 << (3 + 2 + 3)) - 1), bbox.max()); // Descend to the depth-2 internal node with bounding box // (0, 0, 0) -> (31, 31, 31) containing voxel (8, 16, 24). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(2U, iter.getDepth()); iter.getBoundingBox(bbox); EXPECT_EQ(openvdb::Coord(0), bbox.min()); EXPECT_EQ(openvdb::Coord((1 << (2 + 3)) - 1), bbox.max()); // Descend to the leaf node with bounding box (8, 16, 24) -> (15, 23, 31) // containing voxel (8, 16, 24). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getLevel()); iter.getBoundingBox(bbox); range.min().reset(8, 16, 24); range.max() = range.min().offsetBy((1 << 3) - 1); // add leaf node size EXPECT_EQ(range.min(), bbox.min()); EXPECT_EQ(range.max(), bbox.max()); iter.next(); EXPECT_TRUE(!iter); } { Tree323f tree(/*fillValue=*/256.0f); tree.setValue(openvdb::Coord(129), 10.f); Tree323f::NodeCIter iter(tree); EXPECT_TRUE(Tree323f::LeafNodeType::DIM == 8); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getDepth()); EXPECT_EQ(tree.treeDepth(), 1 + iter.getLevel()); openvdb::CoordBBox range, bbox; tree.getIndexRange(range); iter.getBoundingBox(bbox); EXPECT_EQ(bbox.min(), range.min()); EXPECT_EQ(bbox.max(), range.max()); // Descend to the depth-1 internal node with bounding box // (0, 0, 0) -> (255, 255, 255) containing voxel (129, 129, 129). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(1U, iter.getDepth()); iter.getBoundingBox(bbox); EXPECT_EQ(openvdb::Coord(0), bbox.min()); EXPECT_EQ(openvdb::Coord((1 << (3 + 2 + 3)) - 1), bbox.max()); // Descend to the depth-2 internal node with bounding box // (128, 128, 128) -> (159, 159, 159) containing voxel (129, 129, 129). // (128 is the nearest multiple of 32 less than 129.) iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(2U, iter.getDepth()); iter.getBoundingBox(bbox); range.min().reset(128, 128, 128); EXPECT_EQ(range.min(), bbox.min()); EXPECT_EQ(range.min().offsetBy((1 << (2 + 3)) - 1), bbox.max()); // Descend to the leaf node with bounding box // (128, 128, 128) -> (135, 135, 135) containing voxel (129, 129, 129). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getLevel()); iter.getBoundingBox(bbox); range.max() = range.min().offsetBy((1 << 3) - 1); // add leaf node size EXPECT_EQ(range.min(), bbox.min()); EXPECT_EQ(range.max(), bbox.max()); iter.next(); EXPECT_TRUE(!iter); } } TEST_F(TestNodeIterator, testSingleNegative) { Tree323f tree(/*fillValue=*/256.0f); tree.setValue(openvdb::Coord(-1), 10.f); Tree323f::NodeCIter iter(tree); EXPECT_TRUE(Tree323f::LeafNodeType::DIM == 8); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getDepth()); EXPECT_EQ(tree.treeDepth(), 1 + iter.getLevel()); openvdb::CoordBBox range, bbox; tree.getIndexRange(range); iter.getBoundingBox(bbox); EXPECT_EQ(bbox.min(), range.min()); EXPECT_EQ(bbox.max(), range.max()); // Descend to the depth-1 internal node with bounding box // (-256, -256, -256) -> (-1, -1, -1) containing voxel (-1, -1, -1). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(1U, iter.getDepth()); iter.getBoundingBox(bbox); EXPECT_EQ(openvdb::Coord(-(1 << (3 + 2 + 3))), bbox.min()); EXPECT_EQ(openvdb::Coord(-1), bbox.max()); // Descend to the depth-2 internal node with bounding box // (-32, -32, -32) -> (-1, -1, -1) containing voxel (-1, -1, -1). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(2U, iter.getDepth()); iter.getBoundingBox(bbox); EXPECT_EQ(openvdb::Coord(-(1 << (2 + 3))), bbox.min()); EXPECT_EQ(openvdb::Coord(-1), bbox.max()); // Descend to the leaf node with bounding box (-8, -8, -8) -> (-1, -1, -1) // containing voxel (-1, -1, -1). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getLevel()); iter.getBoundingBox(bbox); range.max().reset(-1, -1, -1); range.min() = range.max().offsetBy(-((1 << 3) - 1)); // add leaf node size EXPECT_EQ(range.min(), bbox.min()); EXPECT_EQ(range.max(), bbox.max()); iter.next(); EXPECT_TRUE(!iter); } TEST_F(TestNodeIterator, testMultipleBlocks) { Tree323f tree(/*fillValue=*/256.0f); tree.setValue(openvdb::Coord(-1), 10.f); tree.setValue(openvdb::Coord(129), 10.f); Tree323f::NodeCIter iter(tree); EXPECT_TRUE(Tree323f::LeafNodeType::DIM == 8); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getDepth()); EXPECT_EQ(tree.treeDepth(), 1 + iter.getLevel()); // Descend to the depth-1 internal node with bounding box // (-256, -256, -256) -> (-1, -1, -1) containing voxel (-1, -1, -1). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(1U, iter.getDepth()); // Descend to the depth-2 internal node with bounding box // (-32, -32, -32) -> (-1, -1, -1) containing voxel (-1, -1, -1). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(2U, iter.getDepth()); // Descend to the leaf node with bounding box (-8, -8, -8) -> (-1, -1, -1) // containing voxel (-1, -1, -1). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getLevel()); openvdb::Coord expectedMin, expectedMax(-1, -1, -1); expectedMin = expectedMax.offsetBy(-((1 << 3) - 1)); // add leaf node size openvdb::CoordBBox bbox; iter.getBoundingBox(bbox); EXPECT_EQ(expectedMin, bbox.min()); EXPECT_EQ(expectedMax, bbox.max()); // Ascend to the depth-1 internal node with bounding box (0, 0, 0) -> (255, 255, 255) // containing voxel (129, 129, 129). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(1U, iter.getDepth()); // Descend to the depth-2 internal node with bounding box // (128, 128, 128) -> (159, 159, 159) containing voxel (129, 129, 129). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(2U, iter.getDepth()); // Descend to the leaf node with bounding box (128, 128, 128) -> (135, 135, 135) // containing voxel (129, 129, 129). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getLevel()); expectedMin.reset(128, 128, 128); expectedMax = expectedMin.offsetBy((1 << 3) - 1); // add leaf node size iter.getBoundingBox(bbox); EXPECT_EQ(expectedMin, bbox.min()); EXPECT_EQ(expectedMax, bbox.max()); iter.next(); EXPECT_TRUE(!iter); } TEST_F(TestNodeIterator, testDepthBounds) { Tree323f tree(/*fillValue=*/256.0f); tree.setValue(openvdb::Coord(-1), 10.f); tree.setValue(openvdb::Coord(129), 10.f); { // Iterate over internal nodes only. Tree323f::NodeCIter iter(tree); iter.setMaxDepth(2); iter.setMinDepth(1); // Begin at the depth-1 internal node with bounding box // (-256, -256, -256) -> (-1, -1, -1) containing voxel (-1, -1, -1). EXPECT_TRUE(iter); EXPECT_EQ(1U, iter.getDepth()); // Descend to the depth-2 internal node with bounding box // (-32, -32, -32) -> (-1, -1, -1) containing voxel (-1, -1, -1). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(2U, iter.getDepth()); // Skipping the leaf node, ascend to the depth-1 internal node with bounding box // (0, 0, 0) -> (255, 255, 255) containing voxel (129, 129, 129). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(1U, iter.getDepth()); // Descend to the depth-2 internal node with bounding box // (128, 128, 128) -> (159, 159, 159) containing voxel (129, 129, 129). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(2U, iter.getDepth()); // Verify that no internal nodes remain unvisited. iter.next(); EXPECT_TRUE(!iter); } { // Iterate over depth-1 internal nodes only. Tree323f::NodeCIter iter(tree); iter.setMaxDepth(1); iter.setMinDepth(1); // Begin at the depth-1 internal node with bounding box // (-256, -256, -256) -> (-1, -1, -1) containing voxel (-1, -1, -1). EXPECT_TRUE(iter); EXPECT_EQ(1U, iter.getDepth()); // Skip to the depth-1 internal node with bounding box // (0, 0, 0) -> (255, 255, 255) containing voxel (129, 129, 129). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(1U, iter.getDepth()); // Verify that no depth-1 nodes remain unvisited. iter.next(); EXPECT_TRUE(!iter); } { // Iterate over leaf nodes only. Tree323f::NodeCIter iter = tree.cbeginNode(); iter.setMaxDepth(3); iter.setMinDepth(3); // Begin at the leaf node with bounding box (-8, -8, -8) -> (-1, -1, -1) // containing voxel (-1, -1, -1). EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getLevel()); // Skip to the leaf node with bounding box (128, 128, 128) -> (135, 135, 135) // containing voxel (129, 129, 129). iter.next(); EXPECT_TRUE(iter); EXPECT_EQ(0U, iter.getLevel()); // Verify that no leaf nodes remain unvisited. iter.next(); EXPECT_TRUE(!iter); } }
11,314
C++
30.783708
89
0.566555
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestExceptions.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> class TestExceptions : public ::testing::Test { protected: template<typename ExceptionT> void testException(); }; template<typename ExceptionT> struct ExceptionTraits { static std::string name() { return ""; } }; template<> struct ExceptionTraits<openvdb::ArithmeticError> { static std::string name() { return "ArithmeticError"; } }; template<> struct ExceptionTraits<openvdb::IndexError> { static std::string name() { return "IndexError"; } }; template<> struct ExceptionTraits<openvdb::IoError> { static std::string name() { return "IoError"; } }; template<> struct ExceptionTraits<openvdb::KeyError> { static std::string name() { return "KeyError"; } }; template<> struct ExceptionTraits<openvdb::LookupError> { static std::string name() { return "LookupError"; } }; template<> struct ExceptionTraits<openvdb::NotImplementedError> { static std::string name() { return "NotImplementedError"; } }; template<> struct ExceptionTraits<openvdb::ReferenceError> { static std::string name() { return "ReferenceError"; } }; template<> struct ExceptionTraits<openvdb::RuntimeError> { static std::string name() { return "RuntimeError"; } }; template<> struct ExceptionTraits<openvdb::TypeError> { static std::string name() { return "TypeError"; } }; template<> struct ExceptionTraits<openvdb::ValueError> { static std::string name() { return "ValueError"; } }; template<typename ExceptionT> void TestExceptions::testException() { std::string ErrorMsg("Error message"); EXPECT_THROW(OPENVDB_THROW(ExceptionT, ErrorMsg), ExceptionT); try { OPENVDB_THROW(ExceptionT, ErrorMsg); } catch (openvdb::Exception& e) { const std::string expectedMsg = ExceptionTraits<ExceptionT>::name() + ": " + ErrorMsg; EXPECT_EQ(expectedMsg, std::string(e.what())); } } TEST_F(TestExceptions, testArithmeticError) { testException<openvdb::ArithmeticError>(); } TEST_F(TestExceptions, testIndexError) { testException<openvdb::IndexError>(); } TEST_F(TestExceptions, testIoError) { testException<openvdb::IoError>(); } TEST_F(TestExceptions, testKeyError) { testException<openvdb::KeyError>(); } TEST_F(TestExceptions, testLookupError) { testException<openvdb::LookupError>(); } TEST_F(TestExceptions, testNotImplementedError) { testException<openvdb::NotImplementedError>(); } TEST_F(TestExceptions, testReferenceError) { testException<openvdb::ReferenceError>(); } TEST_F(TestExceptions, testRuntimeError) { testException<openvdb::RuntimeError>(); } TEST_F(TestExceptions, testTypeError) { testException<openvdb::TypeError>(); } TEST_F(TestExceptions, testValueError) { testException<openvdb::ValueError>(); }
2,779
C++
41.76923
98
0.739834
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMetaMap.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/util/logging.h> #include <openvdb/Metadata.h> #include <openvdb/MetaMap.h> class TestMetaMap: public ::testing::Test { }; TEST_F(TestMetaMap, testInsert) { using namespace openvdb; MetaMap meta; meta.insertMeta("meta1", StringMetadata("testing")); meta.insertMeta("meta2", Int32Metadata(20)); meta.insertMeta("meta3", FloatMetadata(2.0)); MetaMap::MetaIterator iter = meta.beginMeta(); int i = 1; for( ; iter != meta.endMeta(); ++iter, ++i) { if(i == 1) { EXPECT_TRUE(iter->first.compare("meta1") == 0); std::string val = meta.metaValue<std::string>("meta1"); EXPECT_TRUE(val == "testing"); } else if(i == 2) { EXPECT_TRUE(iter->first.compare("meta2") == 0); int32_t val = meta.metaValue<int32_t>("meta2"); EXPECT_TRUE(val == 20); } else if(i == 3) { EXPECT_TRUE(iter->first.compare("meta3") == 0); float val = meta.metaValue<float>("meta3"); //EXPECT_TRUE(val == 2.0); EXPECT_NEAR(2.0f,val,0); } } } TEST_F(TestMetaMap, testRemove) { using namespace openvdb; MetaMap meta; meta.insertMeta("meta1", StringMetadata("testing")); meta.insertMeta("meta2", Int32Metadata(20)); meta.insertMeta("meta3", FloatMetadata(2.0)); meta.removeMeta("meta2"); MetaMap::MetaIterator iter = meta.beginMeta(); int i = 1; for( ; iter != meta.endMeta(); ++iter, ++i) { if(i == 1) { EXPECT_TRUE(iter->first.compare("meta1") == 0); std::string val = meta.metaValue<std::string>("meta1"); EXPECT_TRUE(val == "testing"); } else if(i == 2) { EXPECT_TRUE(iter->first.compare("meta3") == 0); float val = meta.metaValue<float>("meta3"); //EXPECT_TRUE(val == 2.0); EXPECT_NEAR(2.0f,val,0); } } meta.removeMeta("meta1"); iter = meta.beginMeta(); for( ; iter != meta.endMeta(); ++iter, ++i) { EXPECT_TRUE(iter->first.compare("meta3") == 0); float val = meta.metaValue<float>("meta3"); //EXPECT_TRUE(val == 2.0); EXPECT_NEAR(2.0f,val,0); } meta.removeMeta("meta3"); EXPECT_EQ(0, int(meta.metaCount())); } TEST_F(TestMetaMap, testGetMetadata) { using namespace openvdb; MetaMap meta; meta.insertMeta("meta1", StringMetadata("testing")); meta.insertMeta("meta2", Int32Metadata(20)); meta.insertMeta("meta3", DoubleMetadata(2.0)); Metadata::Ptr metadata = meta["meta2"]; EXPECT_TRUE(metadata); EXPECT_TRUE(metadata->typeName().compare("int32") == 0); DoubleMetadata::Ptr dm = meta.getMetadata<DoubleMetadata>("meta3"); //EXPECT_TRUE(dm->value() == 2.0); EXPECT_NEAR(2.0,dm->value(),0); const DoubleMetadata::Ptr cdm = meta.getMetadata<DoubleMetadata>("meta3"); //EXPECT_TRUE(dm->value() == 2.0); EXPECT_NEAR(2.0,cdm->value(),0); EXPECT_TRUE(!meta.getMetadata<StringMetadata>("meta2")); EXPECT_THROW(meta.metaValue<int32_t>("meta3"), openvdb::TypeError); EXPECT_THROW(meta.metaValue<double>("meta5"), openvdb::LookupError); } TEST_F(TestMetaMap, testIO) { using namespace openvdb; logging::LevelScope suppressLogging{logging::Level::Fatal}; Metadata::clearRegistry(); // Write some metadata using unregistered types. MetaMap meta; meta.insertMeta("meta1", StringMetadata("testing")); meta.insertMeta("meta2", Int32Metadata(20)); meta.insertMeta("meta3", DoubleMetadata(2.0)); std::ostringstream ostr(std::ios_base::binary); meta.writeMeta(ostr); // Verify that reading metadata of unregistered types is possible, // though the values cannot be retrieved. MetaMap meta2; std::istringstream istr(ostr.str(), std::ios_base::binary); EXPECT_NO_THROW(meta2.readMeta(istr)); EXPECT_EQ(3, int(meta2.metaCount())); // Verify that writing metadata of unknown type (i.e., UnknownMetadata) is possible. std::ostringstream ostrUnknown(std::ios_base::binary); meta2.writeMeta(ostrUnknown); // Register just one of the three types, then reread and verify that // the value of the registered type can be retrieved. Int32Metadata::registerType(); istr.seekg(0, std::ios_base::beg); EXPECT_NO_THROW(meta2.readMeta(istr)); EXPECT_EQ(3, int(meta2.metaCount())); EXPECT_EQ(meta.metaValue<int>("meta2"), meta2.metaValue<int>("meta2")); // Register the remaining types. StringMetadata::registerType(); DoubleMetadata::registerType(); { // Now seek to beginning and read again. istr.seekg(0, std::ios_base::beg); meta2.clearMetadata(); EXPECT_NO_THROW(meta2.readMeta(istr)); EXPECT_EQ(meta.metaCount(), meta2.metaCount()); std::string val = meta.metaValue<std::string>("meta1"); std::string val2 = meta2.metaValue<std::string>("meta1"); EXPECT_EQ(0, val.compare(val2)); int intval = meta.metaValue<int>("meta2"); int intval2 = meta2.metaValue<int>("meta2"); EXPECT_EQ(intval, intval2); double dval = meta.metaValue<double>("meta3"); double dval2 = meta2.metaValue<double>("meta3"); EXPECT_NEAR(dval, dval2,0); } { // Verify that metadata that was written as UnknownMetadata can // be read as typed metadata once the underlying types are registered. std::istringstream istrUnknown(ostrUnknown.str(), std::ios_base::binary); meta2.clearMetadata(); EXPECT_NO_THROW(meta2.readMeta(istrUnknown)); EXPECT_EQ(meta.metaCount(), meta2.metaCount()); EXPECT_EQ( meta.metaValue<std::string>("meta1"), meta2.metaValue<std::string>("meta1")); EXPECT_EQ(meta.metaValue<int>("meta2"), meta2.metaValue<int>("meta2")); EXPECT_NEAR( meta.metaValue<double>("meta3"), meta2.metaValue<double>("meta3"), 0.0); } // Clear the registry once the test is done. Metadata::clearRegistry(); } TEST_F(TestMetaMap, testEmptyIO) { using namespace openvdb; MetaMap meta; // Write out an empty metadata std::ostringstream ostr(std::ios_base::binary); // Read in the metadata; MetaMap meta2; std::istringstream istr(ostr.str(), std::ios_base::binary); EXPECT_NO_THROW(meta2.readMeta(istr)); EXPECT_TRUE(meta2.metaCount() == 0); } TEST_F(TestMetaMap, testCopyConstructor) { using namespace openvdb; MetaMap meta; meta.insertMeta("meta1", StringMetadata("testing")); meta.insertMeta("meta2", Int32Metadata(20)); meta.insertMeta("meta3", FloatMetadata(2.0)); // copy constructor MetaMap meta2(meta); EXPECT_TRUE(meta.metaCount() == meta2.metaCount()); std::string str = meta.metaValue<std::string>("meta1"); std::string str2 = meta2.metaValue<std::string>("meta1"); EXPECT_TRUE(str == str2); EXPECT_TRUE(meta.metaValue<int32_t>("meta2") == meta2.metaValue<int32_t>("meta2")); EXPECT_NEAR(meta.metaValue<float>("meta3"), meta2.metaValue<float>("meta3"),0); //EXPECT_TRUE(meta.metaValue<float>("meta3") == // meta2.metaValue<float>("meta3")); } TEST_F(TestMetaMap, testCopyConstructorEmpty) { using namespace openvdb; MetaMap meta; MetaMap meta2(meta); EXPECT_TRUE(meta.metaCount() == 0); EXPECT_TRUE(meta2.metaCount() == meta.metaCount()); } TEST_F(TestMetaMap, testAssignment) { using namespace openvdb; // Populate a map with data. MetaMap meta; meta.insertMeta("meta1", StringMetadata("testing")); meta.insertMeta("meta2", Int32Metadata(20)); meta.insertMeta("meta3", FloatMetadata(2.0)); // Create an empty map. MetaMap meta2; EXPECT_EQ(0, int(meta2.metaCount())); // Copy the first map to the second. meta2 = meta; EXPECT_EQ(meta.metaCount(), meta2.metaCount()); // Verify that the contents of the two maps are the same. EXPECT_EQ( meta.metaValue<std::string>("meta1"), meta2.metaValue<std::string>("meta1")); EXPECT_EQ(meta.metaValue<int32_t>("meta2"), meta2.metaValue<int32_t>("meta2")); EXPECT_NEAR( meta.metaValue<float>("meta3"), meta2.metaValue<float>("meta3"), /*tolerance=*/0); // Verify that changing one map doesn't affect the other. meta.insertMeta("meta1", StringMetadata("changed")); std::string str = meta.metaValue<std::string>("meta1"); EXPECT_EQ(std::string("testing"), meta2.metaValue<std::string>("meta1")); } TEST_F(TestMetaMap, testEquality) { using namespace openvdb; // Populate a map with data. MetaMap meta; meta.insertMeta("meta1", StringMetadata("testing")); meta.insertMeta("meta2", Int32Metadata(20)); meta.insertMeta("meta3", FloatMetadata(3.14159f)); // Create an empty map. MetaMap meta2; // Verify that the two maps differ. EXPECT_TRUE(meta != meta2); EXPECT_TRUE(meta2 != meta); // Copy the first map to the second. meta2 = meta; // Verify that the two maps are equivalent. EXPECT_TRUE(meta == meta2); EXPECT_TRUE(meta2 == meta); // Modify the first map. meta.removeMeta("meta1"); meta.insertMeta("abc", DoubleMetadata(2.0)); // Verify that the two maps differ. EXPECT_TRUE(meta != meta2); EXPECT_TRUE(meta2 != meta); // Modify the second map and verify that the two maps differ. meta2 = meta; meta2.insertMeta("meta2", Int32Metadata(42)); EXPECT_TRUE(meta != meta2); EXPECT_TRUE(meta2 != meta); meta2 = meta; meta2.insertMeta("meta3", FloatMetadata(2.0001f)); EXPECT_TRUE(meta != meta2); EXPECT_TRUE(meta2 != meta); meta2 = meta; meta2.insertMeta("abc", DoubleMetadata(2.0001)); EXPECT_TRUE(meta != meta2); EXPECT_TRUE(meta2 != meta); }
10,073
C++
29.343373
90
0.625137
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestName.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/util/Name.h> class TestName : public ::testing::Test { }; TEST_F(TestName, test) { using namespace openvdb; Name name; Name name2("something"); Name name3 = std::string("something2"); name = "something"; EXPECT_TRUE(name == name2); EXPECT_TRUE(name != name3); EXPECT_TRUE(name != Name("testing")); EXPECT_TRUE(name == Name("something")); } TEST_F(TestName, testIO) { using namespace openvdb; Name name("some name that i made up"); std::ostringstream ostr(std::ios_base::binary); openvdb::writeString(ostr, name); name = "some other name"; EXPECT_TRUE(name == Name("some other name")); std::istringstream istr(ostr.str(), std::ios_base::binary); name = openvdb::readString(istr); EXPECT_TRUE(name == Name("some name that i made up")); } TEST_F(TestName, testMultipleIO) { using namespace openvdb; Name name("some name that i made up"); Name name2("something else"); std::ostringstream ostr(std::ios_base::binary); openvdb::writeString(ostr, name); openvdb::writeString(ostr, name2); std::istringstream istr(ostr.str(), std::ios_base::binary); Name n = openvdb::readString(istr), n2 = openvdb::readString(istr); EXPECT_TRUE(name == n); EXPECT_TRUE(name2 == n2); }
1,456
C++
20.42647
71
0.651786
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestFastSweeping.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file TestFastSweeping.cc /// /// @author Ken Museth //#define BENCHMARK_FAST_SWEEPING //#define TIMING_FAST_SWEEPING #include <sstream> #include "gtest/gtest.h" #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/tools/ChangeBackground.h> #include <openvdb/tools/Diagnostics.h> #include <openvdb/tools/FastSweeping.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/LevelSetTracker.h> #include <openvdb/tools/LevelSetRebuild.h> #include <openvdb/tools/LevelSetPlatonic.h> #include <openvdb/tools/LevelSetUtil.h> #ifdef TIMING_FAST_SWEEPING #include <openvdb/util/CpuTimer.h> #endif // Uncomment to test on models from our web-site //#define TestFastSweeping_DATA_PATH "/Users/ken/dev/data/vdb/" //#define TestFastSweeping_DATA_PATH "/home/kmu/dev/data/vdb/" //#define TestFastSweeping_DATA_PATH "/usr/pic1/Data/OpenVDB/LevelSetModels/" class TestFastSweeping: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } void writeFile(const std::string &name, openvdb::FloatGrid::Ptr grid) { openvdb::io::File file(name); file.setCompression(openvdb::io::COMPRESS_NONE); openvdb::GridPtrVec grids; grids.push_back(grid); file.write(grids); } };// TestFastSweeping TEST_F(TestFastSweeping, dilateSignedDistance) { using namespace openvdb; // Define parameters for the level set sphere to be re-normalized const float radius = 200.0f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 1.0f;//half width const int width = 3, new_width = 50;//half width FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, float(width)); const size_t oldVoxelCount = grid->activeVoxelCount(); tools::FastSweeping<FloatGrid> fs; EXPECT_EQ(size_t(0), fs.sweepingVoxelCount()); EXPECT_EQ(size_t(0), fs.boundaryVoxelCount()); fs.initDilate(*grid, new_width - width); EXPECT_TRUE(fs.sweepingVoxelCount() > 0); EXPECT_TRUE(fs.boundaryVoxelCount() > 0); fs.sweep(); EXPECT_TRUE(fs.sweepingVoxelCount() > 0); EXPECT_TRUE(fs.boundaryVoxelCount() > 0); auto grid2 = fs.sdfGrid(); fs.clear(); EXPECT_EQ(size_t(0), fs.sweepingVoxelCount()); EXPECT_EQ(size_t(0), fs.boundaryVoxelCount()); const Index64 sweepingVoxelCount = grid2->activeVoxelCount(); EXPECT_TRUE(sweepingVoxelCount > oldVoxelCount); {// Check that the norm of the gradient for all active voxels is close to unity tools::Diagnose<FloatGrid> diagnose(*grid2); tools::CheckNormGrad<FloatGrid> test(*grid2, 0.99f, 1.01f); const std::string message = diagnose.check(test, false,// don't generate a mask grid true,// check active voxels false,// ignore active tiles since a level set has none false);// no need to check the background value EXPECT_TRUE(message.empty()); EXPECT_EQ(Index64(0), diagnose.failureCount()); //std::cout << "\nOutput 1: " << message << std::endl; } {// Make sure all active voxels fail the following test tools::Diagnose<FloatGrid> diagnose(*grid2); tools::CheckNormGrad<FloatGrid> test(*grid2, std::numeric_limits<float>::min(), 0.99f); const std::string message = diagnose.check(test, false,// don't generate a mask grid true,// check active voxels false,// ignore active tiles since a level set has none false);// no need to check the background value EXPECT_TRUE(!message.empty()); EXPECT_EQ(sweepingVoxelCount, diagnose.failureCount()); //std::cout << "\nOutput 2: " << message << std::endl; } {// Make sure all active voxels fail the following test tools::Diagnose<FloatGrid> diagnose(*grid2); tools::CheckNormGrad<FloatGrid> test(*grid2, 1.01f, std::numeric_limits<float>::max()); const std::string message = diagnose.check(test, false,// don't generate a mask grid true,// check active voxels false,// ignore active tiles since a level set has none false);// no need to check the background value EXPECT_TRUE(!message.empty()); EXPECT_EQ(sweepingVoxelCount, diagnose.failureCount()); //std::cout << "\nOutput 3: " << message << std::endl; } }// dilateSignedDistance TEST_F(TestFastSweeping, testMaskSdf) { using namespace openvdb; // Define parameterS FOR the level set sphere to be re-normalized const float radius = 200.0f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 1.0f, width = 3.0f;//half width const float new_width = 50; {// Use box as a mask //std::cerr << "\nUse box as a mask" << std::endl; FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); CoordBBox bbox(Coord(150,-50,-50), Coord(250,50,50)); MaskGrid mask; mask.sparseFill(bbox, true); //this->writeFile("/tmp/box_mask_input.vdb", grid); #ifdef TIMING_FAST_SWEEPING util::CpuTimer timer("\nParallel sparse fast sweeping with a box mask"); #endif grid = tools::maskSdf(*grid, mask); //tools::FastSweeping<FloatGrid> fs; //fs.initMask(*grid, mask); //fs.sweep(); //std::cerr << "voxel count = " << fs.sweepingVoxelCount() << std::endl; //std::cerr << "boundary count = " << fs.boundaryVoxelCount() << std::endl; //EXPECT_TRUE(fs.sweepingVoxelCount() > 0); #ifdef TIMING_FAST_SWEEPING timer.stop(); #endif //writeFile("/tmp/box_mask_output.vdb", grid); {// Check that the norm of the gradient for all active voxels is close to unity tools::Diagnose<FloatGrid> diagnose(*grid); tools::CheckNormGrad<FloatGrid> test(*grid, 0.99f, 1.01f); const std::string message = diagnose.check(test, false,// don't generate a mask grid true,// check active voxels false,// ignore active tiles since a level set has none false);// no need to check the background value //std::cerr << message << std::endl; const double percent = 100.0*double(diagnose.failureCount())/double(grid->activeVoxelCount()); //std::cerr << "Failures = " << percent << "%" << std::endl; //std::cerr << "Failed: " << diagnose.failureCount() << std::endl; //std::cerr << "Total : " << grid->activeVoxelCount() << std::endl; EXPECT_TRUE(percent < 0.01); //EXPECT_TRUE(message.empty()); //EXPECT_EQ(size_t(0), diagnose.failureCount()); } } {// Use sphere as a mask //std::cerr << "\nUse sphere as a mask" << std::endl; FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); FloatGrid::Ptr mask = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, new_width); //this->writeFile("/tmp/sphere_mask_input.vdb", grid); #ifdef TIMING_FAST_SWEEPING util::CpuTimer timer("\nParallel sparse fast sweeping with a sphere mask"); #endif grid = tools::maskSdf(*grid, *mask); //tools::FastSweeping<FloatGrid> fs; //fs.initMask(*grid, *mask); //fs.sweep(); #ifdef TIMING_FAST_SWEEPING timer.stop(); #endif //std::cerr << "voxel count = " << fs.sweepingVoxelCount() << std::endl; //std::cerr << "boundary count = " << fs.boundaryVoxelCount() << std::endl; //EXPECT_TRUE(fs.sweepingVoxelCount() > 0); //this->writeFile("/tmp/sphere_mask_output.vdb", grid); {// Check that the norm of the gradient for all active voxels is close to unity tools::Diagnose<FloatGrid> diagnose(*grid); tools::CheckNormGrad<FloatGrid> test(*grid, 0.99f, 1.01f); const std::string message = diagnose.check(test, false,// don't generate a mask grid true,// check active voxels false,// ignore active tiles since a level set has none false);// no need to check the background value //std::cerr << message << std::endl; const double percent = 100.0*double(diagnose.failureCount())/double(grid->activeVoxelCount()); //std::cerr << "Failures = " << percent << "%" << std::endl; //std::cerr << "Failed: " << diagnose.failureCount() << std::endl; //std::cerr << "Total : " << grid->activeVoxelCount() << std::endl; //EXPECT_TRUE(message.empty()); //EXPECT_EQ(size_t(0), diagnose.failureCount()); EXPECT_TRUE(percent < 0.01); //std::cout << "\nOutput 1: " << message << std::endl; } } {// Use dodecahedron as a mask //std::cerr << "\nUse dodecahedron as a mask" << std::endl; FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); FloatGrid::Ptr mask = tools::createLevelSetDodecahedron<FloatGrid>(50, Vec3f(radius, 0.0f, 0.0f), voxelSize, 10); //this->writeFile("/tmp/dodecahedron_mask_input.vdb", grid); #ifdef TIMING_FAST_SWEEPING util::CpuTimer timer("\nParallel sparse fast sweeping with a dodecahedron mask"); #endif grid = tools::maskSdf(*grid, *mask); //tools::FastSweeping<FloatGrid> fs; //fs.initMask(*grid, *mask); //std::cerr << "voxel count = " << fs.sweepingVoxelCount() << std::endl; //std::cerr << "boundary count = " << fs.boundaryVoxelCount() << std::endl; //EXPECT_TRUE(fs.sweepingVoxelCount() > 0); //fs.sweep(); #ifdef TIMING_FAST_SWEEPING timer.stop(); #endif //this->writeFile("/tmp/dodecahedron_mask_output.vdb", grid); {// Check that the norm of the gradient for all active voxels is close to unity tools::Diagnose<FloatGrid> diagnose(*grid); tools::CheckNormGrad<FloatGrid> test(*grid, 0.99f, 1.01f); const std::string message = diagnose.check(test, false,// don't generate a mask grid true,// check active voxels false,// ignore active tiles since a level set has none false);// no need to check the background value //std::cerr << message << std::endl; const double percent = 100.0*double(diagnose.failureCount())/double(grid->activeVoxelCount()); //std::cerr << "Failures = " << percent << "%" << std::endl; //std::cerr << "Failed: " << diagnose.failureCount() << std::endl; //std::cerr << "Total : " << grid->activeVoxelCount() << std::endl; //EXPECT_TRUE(message.empty()); //EXPECT_EQ(size_t(0), diagnose.failureCount()); EXPECT_TRUE(percent < 0.01); //std::cout << "\nOutput 1: " << message << std::endl; } } #ifdef TestFastSweeping_DATA_PATH {// Use bunny as a mask //std::cerr << "\nUse bunny as a mask" << std::endl; FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(10.0f, Vec3f(-10,0,0), 0.05f, width); openvdb::initialize();//required whenever I/O of OpenVDB files is performed! const std::string path(TestFastSweeping_DATA_PATH); io::File file( path + "bunny.vdb" ); file.open(false);//disable delayed loading FloatGrid::Ptr mask = openvdb::gridPtrCast<openvdb::FloatGrid>(file.getGrids()->at(0)); //this->writeFile("/tmp/bunny_mask_input.vdb", grid); tools::FastSweeping<FloatGrid> fs; #ifdef TIMING_FAST_SWEEPING util::CpuTimer timer("\nParallel sparse fast sweeping with a bunny mask"); #endif fs.initMask(*grid, *mask); //std::cerr << "voxel count = " << fs.sweepingVoxelCount() << std::endl; //std::cerr << "boundary count = " << fs.boundaryVoxelCount() << std::endl; fs.sweep(); auto grid2 = fs.sdfGrid(); #ifdef TIMING_FAST_SWEEPING timer.stop(); #endif //this->writeFile("/tmp/bunny_mask_output.vdb", grid2); {// Check that the norm of the gradient for all active voxels is close to unity tools::Diagnose<FloatGrid> diagnose(*grid2); tools::CheckNormGrad<FloatGrid> test(*grid2, 0.99f, 1.01f); const std::string message = diagnose.check(test, false,// don't generate a mask grid true,// check active voxels false,// ignore active tiles since a level set has none false);// no need to check the background value //std::cerr << message << std::endl; const double percent = 100.0*double(diagnose.failureCount())/double(grid2->activeVoxelCount()); //std::cerr << "Failures = " << percent << "%" << std::endl; //std::cerr << "Failed: " << diagnose.failureCount() << std::endl; //std::cerr << "Total : " << grid2->activeVoxelCount() << std::endl; //EXPECT_TRUE(message.empty()); //EXPECT_EQ(size_t(0), diagnose.failureCount()); EXPECT_TRUE(percent < 4.5);// crossing characteristics! //std::cout << "\nOutput 1: " << message << std::endl; } } #endif }// testMaskSdf TEST_F(TestFastSweeping, testSdfToFogVolume) { using namespace openvdb; // Define parameterS FOR the level set sphere to be re-normalized const float radius = 200.0f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 1.0f, width = 3.0f;//half width FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, float(width)); tools::sdfToFogVolume(*grid); const Index64 sweepingVoxelCount = grid->activeVoxelCount(); //this->writeFile("/tmp/fog_input.vdb", grid); tools::FastSweeping<FloatGrid> fs; #ifdef TIMING_FAST_SWEEPING util::CpuTimer timer("\nParallel sparse fast sweeping with a fog volume"); #endif fs.initSdf(*grid, /*isoValue*/0.5f,/*isInputSdf*/false); EXPECT_TRUE(fs.sweepingVoxelCount() > 0); //std::cerr << "voxel count = " << fs.sweepingVoxelCount() << std::endl; //std::cerr << "boundary count = " << fs.boundaryVoxelCount() << std::endl; fs.sweep(); auto grid2 = fs.sdfGrid(); #ifdef TIMING_FAST_SWEEPING timer.stop(); #endif EXPECT_EQ(sweepingVoxelCount, grid->activeVoxelCount()); //this->writeFile("/tmp/ls_output.vdb", grid2); {// Check that the norm of the gradient for all active voxels is close to unity tools::Diagnose<FloatGrid> diagnose(*grid2); tools::CheckNormGrad<FloatGrid> test(*grid2, 0.99f, 1.01f); const std::string message = diagnose.check(test, false,// don't generate a mask grid true,// check active voxels false,// ignore active tiles since a level set has none false);// no need to check the background value //std::cerr << message << std::endl; const double percent = 100.0*double(diagnose.failureCount())/double(grid2->activeVoxelCount()); //std::cerr << "Failures = " << percent << "%" << std::endl; //std::cerr << "Failure count = " << diagnose.failureCount() << std::endl; //std::cerr << "Total active voxel count = " << grid2->activeVoxelCount() << std::endl; EXPECT_TRUE(percent < 3.0); } }// testSdfToFogVolume #ifdef BENCHMARK_FAST_SWEEPING TEST_F(TestFastSweeping, testBenchmarks) { using namespace openvdb; // Define parameterS FOR the level set sphere to be re-normalized const float radius = 200.0f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 1.0f, width = 3.0f;//half width const float new_width = 50; {// Use rebuildLevelSet (limited to closed and symmetric narrow-band level sets) FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); #ifdef TIMING_FAST_SWEEPING util::CpuTimer timer("\nRebuild level set"); #endif FloatGrid::Ptr ls = tools::levelSetRebuild(*grid, 0.0f, new_width); #ifdef TIMING_FAST_SWEEPING timer.stop(); #endif std::cout << "Diagnostics:\n" << tools::checkLevelSet(*ls, 9) << std::endl; //this->writeFile("/tmp/rebuild_sdf.vdb", ls); } {// Use LevelSetTracker::normalize() FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); tools::dilateActiveValues(grid->tree(), int(new_width-width), tools::NN_FACE, tools::IGNORE_TILES); tools::changeLevelSetBackground(grid->tree(), new_width); std::cout << "Diagnostics:\n" << tools::checkLevelSet(*grid, 9) << std::endl; //std::cerr << "Number of active tiles = " << grid->tree().activeTileCount() << std::endl; //grid->print(std::cout, 3); tools::LevelSetTracker<FloatGrid> track(*grid); track.setNormCount(int(new_width/0.3f));//CFL is 1/3 for RK1 track.setSpatialScheme(math::FIRST_BIAS); track.setTemporalScheme(math::TVD_RK1); #ifdef TIMING_FAST_SWEEPING util::CpuTimer timer("\nConventional re-normalization"); #endif track.normalize(); #ifdef TIMING_FAST_SWEEPING timer.stop(); #endif std::cout << "Diagnostics:\n" << tools::checkLevelSet(*grid, 9) << std::endl; //this->writeFile("/tmp/old_sdf.vdb", grid); } {// Use new sparse and parallel fast sweeping FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); //this->writeFile("/tmp/original_sdf.vdb", grid); #ifdef TIMING_FAST_SWEEPING util::CpuTimer timer("\nParallel sparse fast sweeping"); #endif auto grid2 = tools::dilateSdf(*grid, int(new_width - width), tools::NN_FACE_EDGE); //tools::FastSweeping<FloatGrid> fs(*grid); //EXPECT_TRUE(fs.sweepingVoxelCount() > 0); //tbb::task_scheduler_init init(4);//thread count //fs.sweep(); #ifdef TIMING_FAST_SWEEPING timer.stop(); #endif //std::cout << "Diagnostics:\n" << tools::checkLevelSet(*grid, 9) << std::endl; //this->writeFile("/tmp/new_sdf.vdb", grid2); } } #endif TEST_F(TestFastSweeping, testIntersection) { using namespace openvdb; const Coord ijk(1,4,-9); FloatGrid grid(0.0f); auto acc = grid.getAccessor(); math::GradStencil<FloatGrid> stencil(grid); acc.setValue(ijk,-1.0f); int cases = 0; for (int mx=0; mx<2; ++mx) { acc.setValue(ijk.offsetBy(-1,0,0), mx ? 1.0f : -1.0f); for (int px=0; px<2; ++px) { acc.setValue(ijk.offsetBy(1,0,0), px ? 1.0f : -1.0f); for (int my=0; my<2; ++my) { acc.setValue(ijk.offsetBy(0,-1,0), my ? 1.0f : -1.0f); for (int py=0; py<2; ++py) { acc.setValue(ijk.offsetBy(0,1,0), py ? 1.0f : -1.0f); for (int mz=0; mz<2; ++mz) { acc.setValue(ijk.offsetBy(0,0,-1), mz ? 1.0f : -1.0f); for (int pz=0; pz<2; ++pz) { acc.setValue(ijk.offsetBy(0,0,1), pz ? 1.0f : -1.0f); ++cases; EXPECT_EQ(Index64(7), grid.activeVoxelCount()); stencil.moveTo(ijk); const size_t count = mx + px + my + py + mz + pz;// number of intersections EXPECT_TRUE(stencil.intersects() == (count > 0)); auto mask = stencil.intersectionMask(); EXPECT_TRUE(mask.none() == (count == 0)); EXPECT_TRUE(mask.any() == (count > 0)); EXPECT_EQ(count, mask.count()); EXPECT_TRUE(mask.test(0) == mx); EXPECT_TRUE(mask.test(1) == px); EXPECT_TRUE(mask.test(2) == my); EXPECT_TRUE(mask.test(3) == py); EXPECT_TRUE(mask.test(4) == mz); EXPECT_TRUE(mask.test(5) == pz); }//pz }//mz }//py }//my }//px }//mx EXPECT_EQ(64, cases);// = 2^6 }//testIntersection TEST_F(TestFastSweeping, fogToSdfAndExt) { using namespace openvdb; const float isoValue = 0.5f; const float radius = 100.0f; const float background = 0.0f; const float tolerance = 0.00001f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 1.0f, width = 3.0f;//half width FloatGrid::Ptr grid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, float(width)); tools::sdfToFogVolume(*grid); EXPECT_TRUE(grid); const float fog[] = {grid->tree().getValue( Coord(102, 0, 0) ), grid->tree().getValue( Coord(101, 0, 0) ), grid->tree().getValue( Coord(100, 0, 0) ), grid->tree().getValue( Coord( 99, 0, 0) ), grid->tree().getValue( Coord( 98, 0, 0) )}; //for (auto v : fog) std::cerr << v << std::endl; EXPECT_TRUE( math::isApproxEqual(fog[0], 0.0f, tolerance) ); EXPECT_TRUE( math::isApproxEqual(fog[1], 0.0f, tolerance) ); EXPECT_TRUE( math::isApproxEqual(fog[2], 0.0f, tolerance) ); EXPECT_TRUE( math::isApproxEqual(fog[3], 1.0f/3.0f, tolerance) ); EXPECT_TRUE( math::isApproxEqual(fog[4], 2.0f/3.0f, tolerance) ); //this->writeFile("/tmp/sphere1_fog_in.vdb", grid); auto op = [radius](const Vec3R &xyz) {return math::Sin(2*3.14*(xyz[0]+xyz[1]+xyz[2])/radius);}; auto grids = tools::fogToSdfAndExt(*grid, op, background, isoValue); const auto sdf1 = grids.first->tree().getValue( Coord(100, 0, 0) ); const auto sdf2 = grids.first->tree().getValue( Coord( 99, 0, 0) ); const auto sdf3 = grids.first->tree().getValue( Coord( 98, 0, 0) ); //std::cerr << "\nsdf1 = " << sdf1 << ", sdf2 = " << sdf2 << ", sdf3 = " << sdf3 << std::endl; EXPECT_TRUE( sdf1 > sdf2 ); EXPECT_TRUE( math::isApproxEqual( sdf2, 0.5f, tolerance) ); EXPECT_TRUE( math::isApproxEqual( sdf3,-0.5f, tolerance) ); const auto ext1 = grids.second->tree().getValue( Coord(100, 0, 0) ); const auto ext2 = grids.second->tree().getValue( Coord( 99, 0, 0) ); const auto ext3 = grids.second->tree().getValue( Coord( 98, 0, 0) ); //std::cerr << "\next1 = " << ext1 << ", ext2 = " << ext2 << ", ext3 = " << ext3 << std::endl; EXPECT_TRUE( math::isApproxEqual(ext1, background, tolerance) ); EXPECT_TRUE( math::isApproxEqual(ext2, ext3, tolerance) ); //this->writeFile("/tmp/sphere1_sdf_out.vdb", grids.first); //this->writeFile("/tmp/sphere1_ext_out.vdb", grids.second); }// fogToSdfAndExt TEST_F(TestFastSweeping, sdfToSdfAndExt) { using namespace openvdb; const float isoValue = 0.0f; const float radius = 100.0f; const float background = 1.234f; const float tolerance = 0.00001f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 1.0f, width = 3.0f;//half width FloatGrid::Ptr lsGrid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); //std::cerr << "\nls(100,0,0) = " << lsGrid->tree().getValue( Coord(100, 0, 0) ) << std::endl; EXPECT_TRUE( math::isApproxEqual(lsGrid->tree().getValue( Coord(100, 0, 0) ), 0.0f, tolerance) ); auto op = [radius](const Vec3R &xyz) {return math::Sin(2*3.14*xyz[0]/radius);}; auto grids = tools::sdfToSdfAndExt(*lsGrid, op, background, isoValue); EXPECT_TRUE(grids.first); EXPECT_TRUE(grids.second); //std::cerr << "\nsdf = " << grids.first->tree().getValue( Coord(100, 0, 0) ) << std::endl; EXPECT_TRUE( math::isApproxEqual(grids.first->tree().getValue( Coord(100, 0, 0) ), 0.0f, tolerance) ); //std::cerr << "\nBackground = " << grids.second->background() << std::endl; //std::cerr << "\nBackground = " << grids.second->tree().getValue( Coord(10000) ) << std::endl; EXPECT_TRUE( math::isApproxEqual(grids.second->background(), background, tolerance) ); const auto sdf1 = grids.first->tree().getValue( Coord(100, 0, 0) ); const auto sdf2 = grids.first->tree().getValue( Coord(102, 0, 0) ); const auto sdf3 = grids.first->tree().getValue( Coord(102, 1, 1) ); //std::cerr << "\nsdf1 = " << sdf1 << ", sdf2 = " << sdf2 << ", sdf3 = " << sdf3 << std::endl; EXPECT_TRUE( math::isApproxEqual( sdf1, 0.0f, tolerance) ); EXPECT_TRUE( math::isApproxEqual( sdf2, 2.0f, tolerance) ); EXPECT_TRUE( sdf3 > 2.0f ); const auto ext1 = grids.second->tree().getValue( Coord(100, 0, 0) ); const auto ext2 = grids.second->tree().getValue( Coord(102, 0, 0) ); const auto ext3 = grids.second->tree().getValue( Coord(102, 1, 0) ); //std::cerr << "\next1 = " << ext1 << ", ext2 = " << ext2 << ", ext3 = " << ext3 << std::endl; EXPECT_TRUE( math::isApproxEqual(float(op(Vec3R(100, 0, 0))), ext1, tolerance) ); EXPECT_TRUE( math::isApproxEqual(ext1, ext2, tolerance) ); EXPECT_TRUE(!math::isApproxEqual(ext1, ext3, tolerance) ); //writeFile("/tmp/sphere2_sdf_out.vdb", grids.first); //writeFile("/tmp/sphere2_ext_out.vdb", grids.second); }// sdfToSdfAndExt TEST_F(TestFastSweeping, sdfToSdfAndExt_velocity) { using namespace openvdb; const float isoValue = 0.0f; const float radius = 100.0f; const Vec3f background(-1.0f, 2.0f, 1.234f); const float tolerance = 0.00001f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 1.0f, width = 3.0f;//half width FloatGrid::Ptr lsGrid = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, width); //std::cerr << "\nls(100,0,0) = " << lsGrid->tree().getValue( Coord(100, 0, 0) ) << std::endl; EXPECT_TRUE( math::isApproxEqual(lsGrid->tree().getValue( Coord(100, 0, 0) ), 0.0f, tolerance) ); //tools::sdfToFogVolume(*grid); //writeFile("/tmp/sphere1_fog_in.vdb", grid); //tools::fogToSdf(*grid, isoValue); // Vector valued extension field, e.g. a velocity field auto op = [radius](const Vec3R &xyz) { return Vec3f(float(xyz[0]), float(-xyz[1]), float(math::Sin(2*3.14*xyz[2]/radius))); }; auto grids = tools::sdfToSdfAndExt(*lsGrid, op, background, isoValue); EXPECT_TRUE(grids.first); EXPECT_TRUE(grids.second); //std::cerr << "\nBackground = " << grids.second->background() << std::endl; //std::cerr << "\nBackground = " << grids.second->tree().getValue( Coord(10000) ) << std::endl; EXPECT_TRUE( math::isApproxZero((grids.second->background()-background).length(), tolerance) ); //std::cerr << "\nsdf = " << grids.first->tree().getValue( Coord(100, 0, 0) ) << std::endl; EXPECT_TRUE( math::isApproxEqual(grids.first->tree().getValue( Coord(100, 0, 0) ), 0.0f, tolerance) ); const auto sdf1 = grids.first->tree().getValue( Coord(100, 0, 0) ); const auto sdf2 = grids.first->tree().getValue( Coord(102, 0, 0) ); const auto sdf3 = grids.first->tree().getValue( Coord(102, 1, 1) ); //std::cerr << "\nsdf1 = " << sdf1 << ", sdf2 = " << sdf2 << ", sdf3 = " << sdf3 << std::endl; EXPECT_TRUE( math::isApproxEqual( sdf1, 0.0f, tolerance) ); EXPECT_TRUE( math::isApproxEqual( sdf2, 2.0f, tolerance) ); EXPECT_TRUE( sdf3 > 2.0f ); const auto ext1 = grids.second->tree().getValue( Coord(100, 0, 0) ); const auto ext2 = grids.second->tree().getValue( Coord(102, 0, 0) ); const auto ext3 = grids.second->tree().getValue( Coord(102, 1, 0) ); //std::cerr << "\next1 = " << ext1 << ", ext2 = " << ext2 << ", ext3 = " << ext3 << std::endl; EXPECT_TRUE( math::isApproxZero((op(Vec3R(100, 0, 0)) - ext1).length(), tolerance) ); EXPECT_TRUE( math::isApproxZero((ext1 - ext2).length(), tolerance) ); EXPECT_TRUE(!math::isApproxZero((ext1 - ext3).length(), tolerance) ); //writeFile("/tmp/sphere2_sdf_out.vdb", grids.first); //writeFile("/tmp/sphere2_ext_out.vdb", grids.second); }// sdfToSdfAndExt_velocity #ifdef TestFastSweeping_DATA_PATH TEST_F(TestFastSweeping, velocityExtensionOfFogBunny) { using namespace openvdb; openvdb::initialize();//required whenever I/O of OpenVDB files is performed! const std::string path(TestFastSweeping_DATA_PATH); io::File file( path + "bunny.vdb" ); file.open(false);//disable delayed loading auto grid = openvdb::gridPtrCast<openvdb::FloatGrid>(file.getGrids()->at(0)); tools::sdfToFogVolume(*grid); writeFile("/tmp/bunny1_fog_in.vdb", grid); auto bbox = grid->evalActiveVoxelBoundingBox(); const double xSize = bbox.dim()[0]*grid->voxelSize()[0]; std::cerr << "\ndim=" << bbox.dim() << ", voxelSize="<< grid->voxelSize()[0] << ", xSize=" << xSize << std::endl; auto op = [xSize](const Vec3R &xyz) { return math::Sin(2*3.14*xyz[0]/xSize); }; auto grids = tools::fogToSdfAndExt(*grid, op, 0.0f, 0.5f); std::cerr << "before writing" << std::endl; writeFile("/tmp/bunny1_sdf_out.vdb", grids.first); writeFile("/tmp/bunny1_ext_out.vdb", grids.second); std::cerr << "after writing" << std::endl; }//velocityExtensionOfFogBunnyevalActiveVoxelBoundingBox TEST_F(TestFastSweeping, velocityExtensionOfSdfBunny) { using namespace openvdb; const std::string path(TestFastSweeping_DATA_PATH); io::File file( path + "bunny.vdb" ); file.open(false);//disable delayed loading auto grid = openvdb::gridPtrCast<openvdb::FloatGrid>(file.getGrids()->at(0)); writeFile("/tmp/bunny2_sdf_in.vdb", grid); auto bbox = grid->evalActiveVoxelBoundingBox(); const double xSize = bbox.dim()[0]*grid->voxelSize()[0]; std::cerr << "\ndim=" << bbox.dim() << ", voxelSize="<< grid->voxelSize()[0] << ", xSize=" << xSize << std::endl; auto op = [xSize](const Vec3R &xyz) { return math::Sin(2*3.14*xyz[0]/xSize); }; auto grids = tools::sdfToSdfAndExt(*grid, op, 0.0f); std::cerr << "before writing" << std::endl; writeFile("/tmp/bunny2_sdf_out.vdb", grids.first); writeFile("/tmp/bunny2_ext_out.vdb", grids.second); std::cerr << "after writing" << std::endl; }//velocityExtensionOfFogBunnyevalActiveVoxelBoundingBox #endif
31,124
C++
47.70892
111
0.59414