file_path
stringlengths
21
202
content
stringlengths
12
1.02M
size
int64
12
1.02M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
10
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestConjGradient.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/version.h> #include <openvdb/math/ConjGradient.h> class TestConjGradient: public ::testing::Test { }; //////////////////////////////////////// TEST_F(TestConjGradient, testJacobi) { using namespace openvdb; typedef math::pcg::SparseStencilMatrix<double, 7> MatrixType; const math::pcg::SizeType rows = 5; MatrixType A(rows); A.setValue(0, 0, 24.0); A.setValue(0, 2, 6.0); A.setValue(1, 1, 8.0); A.setValue(1, 2, 2.0); A.setValue(2, 0, 6.0); A.setValue(2, 1, 2.0); A.setValue(2, 2, 8.0); A.setValue(2, 3, -6.0); A.setValue(2, 4, 2.0); A.setValue(3, 2, -6.0); A.setValue(3, 3, 24.0); A.setValue(4, 2, 2.0); A.setValue(4, 4, 8.0); EXPECT_TRUE(A.isFinite()); MatrixType::VectorType x(rows, 0.0), b(rows, 1.0), expected(rows); expected[0] = 0.0104167; expected[1] = 0.09375; expected[2] = 0.125; expected[3] = 0.0729167; expected[4] = 0.09375; math::pcg::JacobiPreconditioner<MatrixType> precond(A); // Solve A * x = b for x. math::pcg::State result = math::pcg::solve( A, b, x, precond, math::pcg::terminationDefaults<double>()); EXPECT_TRUE(result.success); EXPECT_TRUE(result.iterations <= 20); EXPECT_TRUE(x.eq(expected, 1.0e-5)); } TEST_F(TestConjGradient, testIncompleteCholesky) { using namespace openvdb; typedef math::pcg::SparseStencilMatrix<double, 7> MatrixType; typedef math::pcg::IncompleteCholeskyPreconditioner<MatrixType> CholeskyPrecond; const math::pcg::SizeType rows = 5; MatrixType A(5); A.setValue(0, 0, 24.0); A.setValue(0, 2, 6.0); A.setValue(1, 1, 8.0); A.setValue(1, 2, 2.0); A.setValue(2, 0, 6.0); A.setValue(2, 1, 2.0); A.setValue(2, 2, 8.0); A.setValue(2, 3, -6.0); A.setValue(2, 4, 2.0); A.setValue(3, 2, -6.0); A.setValue(3, 3, 24.0); A.setValue(4, 2, 2.0); A.setValue(4, 4, 8.0); EXPECT_TRUE(A.isFinite()); CholeskyPrecond precond(A); { const CholeskyPrecond::TriangularMatrix lower = precond.lowerMatrix(); CholeskyPrecond::TriangularMatrix expected(5); expected.setValue(0, 0, 4.89898); expected.setValue(1, 1, 2.82843); expected.setValue(2, 0, 1.22474); expected.setValue(2, 1, 0.707107); expected.setValue(2, 2, 2.44949); expected.setValue(3, 2, -2.44949); expected.setValue(3, 3, 4.24264); expected.setValue(4, 2, 0.816497); expected.setValue(4, 4, 2.70801); #if 0 std::cout << "Expected:\n"; for (int i = 0; i < 5; ++i) { std::cout << " " << expected.getConstRow(i).str() << std::endl; } std::cout << "Actual:\n"; for (int i = 0; i < 5; ++i) { std::cout << " " << lower.getConstRow(i).str() << std::endl; } #endif EXPECT_TRUE(lower.eq(expected, 1.0e-5)); } { const CholeskyPrecond::TriangularMatrix upper = precond.upperMatrix(); CholeskyPrecond::TriangularMatrix expected(5); { expected.setValue(0, 0, 4.89898); expected.setValue(0, 2, 1.22474); expected.setValue(1, 1, 2.82843); expected.setValue(1, 2, 0.707107); expected.setValue(2, 2, 2.44949); expected.setValue(2, 3, -2.44949); expected.setValue(2, 4, 0.816497); expected.setValue(3, 3, 4.24264); expected.setValue(4, 4, 2.70801); } #if 0 std::cout << "Expected:\n"; for (int i = 0; i < 5; ++i) { std::cout << " " << expected.getConstRow(i).str() << std::endl; } std::cout << "Actual:\n"; for (int i = 0; i < 5; ++i) { std::cout << " " << upper.getConstRow(i).str() << std::endl; } #endif EXPECT_TRUE(upper.eq(expected, 1.0e-5)); } MatrixType::VectorType x(rows, 0.0), b(rows, 1.0), expected(rows); expected[0] = 0.0104167; expected[1] = 0.09375; expected[2] = 0.125; expected[3] = 0.0729167; expected[4] = 0.09375; // Solve A * x = b for x. math::pcg::State result = math::pcg::solve( A, b, x, precond, math::pcg::terminationDefaults<double>()); EXPECT_TRUE(result.success); EXPECT_TRUE(result.iterations <= 20); EXPECT_TRUE(x.eq(expected, 1.0e-5)); } TEST_F(TestConjGradient, testVectorDotProduct) { using namespace openvdb; typedef math::pcg::Vector<double> VectorType; // Test small vector - runs in series { const size_t length = 1000; VectorType aVec(length, 2.0); VectorType bVec(length, 3.0); VectorType::ValueType result = aVec.dot(bVec); EXPECT_NEAR(result, 6.0 * length, 1.0e-7); } // Test long vector - runs in parallel { const size_t length = 10034502; VectorType aVec(length, 2.0); VectorType bVec(length, 3.0); VectorType::ValueType result = aVec.dot(bVec); EXPECT_NEAR(result, 6.0 * length, 1.0e-7); } }
5,279
C++
25.80203
84
0.557303
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestIndexIterator.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/IndexIterator.h> #include <openvdb/Types.h> #include <openvdb/tree/LeafNode.h> #include <sstream> #include <iostream> #include <tbb/tick_count.h> #include <iomanip>//for setprecision using namespace openvdb; using namespace openvdb::points; class TestIndexIterator: public ::testing::Test { }; // class TestIndexIterator //////////////////////////////////////// /// @brief Functionality similar to openvdb::util::CpuTimer except with prefix padding and no decimals. /// /// @code /// ProfileTimer timer("algorithm 1"); /// // code to be timed goes here /// timer.stop(); /// @endcode class ProfileTimer { public: /// @brief Prints message and starts timer. /// /// @note Should normally be followed by a call to stop() ProfileTimer(const std::string& msg) { (void)msg; #ifdef PROFILE // padd string to 50 characters std::string newMsg(msg); if (newMsg.size() < 50) newMsg.insert(newMsg.end(), 50 - newMsg.size(), ' '); std::cerr << newMsg << " ... "; #endif mT0 = tbb::tick_count::now(); } ~ProfileTimer() { this->stop(); } /// Return Time diference in milliseconds since construction or start was called. inline double delta() const { tbb::tick_count::interval_t dt = tbb::tick_count::now() - mT0; return 1000.0*dt.seconds(); } /// @brief Print time in milliseconds since construction or start was called. inline void stop() const { #ifdef PROFILE std::stringstream ss; ss << std::setw(6) << ::round(this->delta()); std::cerr << "completed in " << ss.str() << " ms\n"; #endif } private: tbb::tick_count mT0; };// ProfileTimer //////////////////////////////////////// TEST_F(TestIndexIterator, testNullFilter) { NullFilter filter; EXPECT_TRUE(filter.initialized()); EXPECT_TRUE(filter.state() == index::ALL); int a; EXPECT_TRUE(filter.valid(a)); } TEST_F(TestIndexIterator, testValueIndexIterator) { using namespace openvdb::tree; using LeafNode = LeafNode<unsigned, 1>; using ValueOnIter = LeafNode::ValueOnIter; const int size = LeafNode::SIZE; { // one per voxel offset, all active LeafNode leafNode; for (int i = 0; i < size; i++) { leafNode.setValueOn(i, i+1); } ValueOnIter valueIter = leafNode.beginValueOn(); IndexIter<ValueOnIter, NullFilter>::ValueIndexIter iter(valueIter); EXPECT_TRUE(iter); EXPECT_EQ(iterCount(iter), Index64(size)); // check assignment operator auto iter2 = iter; EXPECT_EQ(iterCount(iter2), Index64(size)); ++iter; // check coord value Coord xyz; iter.getCoord(xyz); EXPECT_EQ(xyz, openvdb::Coord(0, 0, 1)); EXPECT_EQ(iter.getCoord(), openvdb::Coord(0, 0, 1)); // check iterators retrieval EXPECT_EQ(iter.valueIter().getCoord(), openvdb::Coord(0, 0, 1)); EXPECT_EQ(iter.end(), Index32(2)); ++iter; // check coord value iter.getCoord(xyz); EXPECT_EQ(xyz, openvdb::Coord(0, 1, 0)); EXPECT_EQ(iter.getCoord(), openvdb::Coord(0, 1, 0)); // check iterators retrieval EXPECT_EQ(iter.valueIter().getCoord(), openvdb::Coord(0, 1, 0)); EXPECT_EQ(iter.end(), Index32(3)); } { // one per even voxel offsets, only these active LeafNode leafNode; int offset = 0; for (int i = 0; i < size; i++) { if ((i % 2) == 0) { leafNode.setValueOn(i, ++offset); } else { leafNode.setValueOff(i, offset); } } { ValueOnIter valueIter = leafNode.beginValueOn(); IndexIter<ValueOnIter, NullFilter>::ValueIndexIter iter(valueIter); EXPECT_TRUE(iter); EXPECT_EQ(iterCount(iter), Index64(size/2)); } } { // one per odd voxel offsets, all active LeafNode leafNode; int offset = 0; for (int i = 0; i < size; i++) { if ((i % 2) == 1) { leafNode.setValueOn(i, offset++); } else { leafNode.setValueOn(i, offset); } } { ValueOnIter valueIter = leafNode.beginValueOn(); IndexIter<ValueOnIter, NullFilter>::ValueIndexIter iter(valueIter); EXPECT_TRUE(iter); EXPECT_EQ(iterCount(iter), Index64(3)); } } { // one per even voxel offsets, all active LeafNode leafNode; int offset = 0; for (int i = 0; i < size; i++) { if ((i % 2) == 0) { leafNode.setValueOn(i, offset++); } else { leafNode.setValueOn(i, offset); } } { ValueOnIter valueIter = leafNode.beginValueOn(); IndexIter<ValueOnIter, NullFilter>::ValueIndexIter iter(valueIter); EXPECT_TRUE(iter); EXPECT_EQ(iterCount(iter), Index64(size/2)); } } { // one per voxel offset, none active LeafNode leafNode; for (int i = 0; i < size; i++) { leafNode.setValueOff(i, i); } ValueOnIter valueIter = leafNode.beginValueOn(); IndexIter<ValueOnIter, NullFilter>::ValueIndexIter iter(valueIter); EXPECT_TRUE(!iter); EXPECT_EQ(iterCount(iter), Index64(0)); } } struct EvenIndexFilter { static bool initialized() { return true; } static bool all() { return false; } static bool none() { return false; } template <typename IterT> bool valid(const IterT& iter) const { return ((*iter) % 2) == 0; } }; struct OddIndexFilter { static bool initialized() { return true; } static bool all() { return false; } static bool none() { return false; } OddIndexFilter() : mFilter() { } template <typename IterT> bool valid(const IterT& iter) const { return !mFilter.valid(iter); } private: EvenIndexFilter mFilter; }; struct ConstantIter { ConstantIter(const int _value) : value(_value) { } int operator*() const { return value; } const int value; }; TEST_F(TestIndexIterator, testFilterIndexIterator) { { // index iterator with even filter EvenIndexFilter filter; ValueVoxelCIter indexIter(0, 5); IndexIter<ValueVoxelCIter, EvenIndexFilter> iter(indexIter, filter); EXPECT_TRUE(iter); EXPECT_EQ(*iter, Index32(0)); EXPECT_TRUE(iter.next()); EXPECT_EQ(*iter, Index32(2)); EXPECT_TRUE(iter.next()); EXPECT_EQ(*iter, Index32(4)); EXPECT_TRUE(!iter.next()); EXPECT_EQ(iter.end(), Index32(5)); EXPECT_EQ(filter.valid(ConstantIter(1)), iter.filter().valid(ConstantIter(1))); EXPECT_EQ(filter.valid(ConstantIter(2)), iter.filter().valid(ConstantIter(2))); } { // index iterator with odd filter OddIndexFilter filter; ValueVoxelCIter indexIter(0, 5); IndexIter<ValueVoxelCIter, OddIndexFilter> iter(indexIter, filter); EXPECT_EQ(*iter, Index32(1)); EXPECT_TRUE(iter.next()); EXPECT_EQ(*iter, Index32(3)); EXPECT_TRUE(!iter.next()); } } TEST_F(TestIndexIterator, testProfile) { using namespace openvdb::util; using namespace openvdb::math; using namespace openvdb::tree; #ifdef PROFILE const int elements(1000 * 1000 * 1000); std::cerr << std::endl; #else const int elements(10 * 1000 * 1000); #endif { // for loop ProfileTimer timer("ForLoop: sum"); volatile int sum = 0; for (int i = 0; i < elements; i++) { sum += i; } EXPECT_TRUE(sum); } { // index iterator ProfileTimer timer("IndexIter: sum"); volatile int sum = 0; ValueVoxelCIter iter(0, elements); for (; iter; ++iter) { sum += *iter; } EXPECT_TRUE(sum); } using LeafNode = LeafNode<unsigned, 3>; LeafNode leafNode; const int size = LeafNode::SIZE; for (int i = 0; i < size - 1; i++) { leafNode.setValueOn(i, (elements / size) * i); } leafNode.setValueOn(size - 1, elements); { // manual value iteration ProfileTimer timer("ValueIteratorManual: sum"); volatile int sum = 0; auto indexIter(leafNode.cbeginValueOn()); int offset = 0; for (; indexIter; ++indexIter) { int start = offset > 0 ? leafNode.getValue(offset - 1) : 0; int end = leafNode.getValue(offset); for (int i = start; i < end; i++) { sum += i; } offset++; } EXPECT_TRUE(sum); } { // value on iterator (all on) ProfileTimer timer("ValueIndexIter: sum"); volatile int sum = 0; auto indexIter(leafNode.cbeginValueAll()); IndexIter<LeafNode::ValueAllCIter, NullFilter>::ValueIndexIter iter(indexIter); for (; iter; ++iter) { sum += *iter; } EXPECT_TRUE(sum); } }
9,371
C++
23.793651
103
0.561413
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestVolumeRayIntersector.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file unittest/TestVolumeRayIntersector.cc /// @author Ken Museth #include <openvdb/openvdb.h> #include <openvdb/math/Ray.h> #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/RayIntersector.h> #include "gtest/gtest.h" #include <cassert> #include <deque> #include <iostream> #include <vector> #define ASSERT_DOUBLES_APPROX_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/1.e-6); class TestVolumeRayIntersector : public ::testing::Test { }; TEST_F(TestVolumeRayIntersector, testAll) { using namespace openvdb; typedef math::Ray<double> RayT; typedef RayT::Vec3Type Vec3T; {//one single leaf node FloatGrid grid(0.0f); grid.tree().setValue(Coord(0,0,0), 1.0f); grid.tree().setValue(Coord(7,7,7), 1.0f); const Vec3T dir( 1.0, 0.0, 0.0); const Vec3T eye(-1.0, 0.0, 0.0); const RayT ray(eye, dir);//ray in index space tools::VolumeRayIntersector<FloatGrid> inter(grid); EXPECT_TRUE(inter.setIndexRay(ray)); double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL( 1.0, t0); ASSERT_DOUBLES_APPROX_EQUAL( 9.0, t1); EXPECT_TRUE(!inter.march(t0, t1)); } {//same as above but with dilation FloatGrid grid(0.0f); grid.tree().setValue(Coord(0,0,0), 1.0f); grid.tree().setValue(Coord(7,7,7), 1.0f); const Vec3T dir( 1.0, 0.0, 0.0); const Vec3T eye(-1.0, 0.0, 0.0); const RayT ray(eye, dir);//ray in index space tools::VolumeRayIntersector<FloatGrid> inter(grid, 1); EXPECT_TRUE(inter.setIndexRay(ray)); double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL( 0.0, t0); ASSERT_DOUBLES_APPROX_EQUAL(17.0, t1); EXPECT_TRUE(!inter.march(t0, t1)); } {//one single leaf node FloatGrid grid(0.0f); grid.tree().setValue(Coord(1,1,1), 1.0f); grid.tree().setValue(Coord(7,3,3), 1.0f); const Vec3T dir( 1.0, 0.0, 0.0); const Vec3T eye(-1.0, 0.0, 0.0); const RayT ray(eye, dir);//ray in index space tools::VolumeRayIntersector<FloatGrid> inter(grid); EXPECT_TRUE(inter.setIndexRay(ray)); double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL( 1.0, t0); ASSERT_DOUBLES_APPROX_EQUAL( 9.0, t1); EXPECT_TRUE(!inter.march(t0, t1)); } {//same as above but with dilation FloatGrid grid(0.0f); grid.tree().setValue(Coord(1,1,1), 1.0f); grid.tree().setValue(Coord(7,3,3), 1.0f); const Vec3T dir( 1.0, 0.0, 0.0); const Vec3T eye(-1.0, 0.0, 0.0); const RayT ray(eye, dir);//ray in index space tools::VolumeRayIntersector<FloatGrid> inter(grid, 1); EXPECT_TRUE(inter.setIndexRay(ray)); double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL( 1.0, t0); ASSERT_DOUBLES_APPROX_EQUAL(17.0, t1); EXPECT_TRUE(!inter.march(t0, t1)); } {//two adjacent leaf nodes FloatGrid grid(0.0f); grid.tree().setValue(Coord(0,0,0), 1.0f); grid.tree().setValue(Coord(8,0,0), 1.0f); grid.tree().setValue(Coord(15,7,7), 1.0f); const Vec3T dir( 1.0, 0.0, 0.0); const Vec3T eye(-1.0, 0.0, 0.0); const RayT ray(eye, dir);//ray in index space tools::VolumeRayIntersector<FloatGrid> inter(grid); EXPECT_TRUE(inter.setIndexRay(ray)); double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL( 1.0, t0); ASSERT_DOUBLES_APPROX_EQUAL(17.0, t1); EXPECT_TRUE(!inter.march(t0, t1)); } {//two adjacent leafs followed by a gab and leaf FloatGrid grid(0.0f); grid.tree().setValue(Coord(0*8,0,0), 1.0f); grid.tree().setValue(Coord(1*8,0,0), 1.0f); grid.tree().setValue(Coord(3*8,0,0), 1.0f); grid.tree().setValue(Coord(3*8+7,7,7), 1.0f); const Vec3T dir( 1.0, 0.0, 0.0); const Vec3T eye(-1.0, 0.0, 0.0); const RayT ray(eye, dir);//ray in index space tools::VolumeRayIntersector<FloatGrid> inter(grid); EXPECT_TRUE(inter.setIndexRay(ray)); double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL( 1.0, t0); ASSERT_DOUBLES_APPROX_EQUAL(17.0, t1); EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(25.0, t0); ASSERT_DOUBLES_APPROX_EQUAL(33.0, t1); EXPECT_TRUE(!inter.march(t0, t1)); } {//two adjacent leafs followed by a gab, a leaf and an active tile FloatGrid grid(0.0f); grid.tree().setValue(Coord(0*8,0,0), 1.0f); grid.tree().setValue(Coord(1*8,0,0), 1.0f); grid.tree().setValue(Coord(3*8,0,0), 1.0f); grid.fill(CoordBBox(Coord(4*8,0,0), Coord(4*8+7,7,7)), 2.0f, true); const Vec3T dir( 1.0, 0.0, 0.0); const Vec3T eye(-1.0, 0.0, 0.0); const RayT ray(eye, dir);//ray in index space tools::VolumeRayIntersector<FloatGrid> inter(grid); EXPECT_TRUE(inter.setIndexRay(ray)); double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL( 1.0, t0); ASSERT_DOUBLES_APPROX_EQUAL(17.0, t1); EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(25.0, t0); ASSERT_DOUBLES_APPROX_EQUAL(41.0, t1); EXPECT_TRUE(!inter.march(t0, t1)); } {//two adjacent leafs followed by a gab, a leaf and an active tile FloatGrid grid(0.0f); grid.tree().setValue(Coord(0*8,0,0), 1.0f); grid.tree().setValue(Coord(1*8,0,0), 1.0f); grid.tree().setValue(Coord(3*8,0,0), 1.0f); grid.fill(CoordBBox(Coord(4*8,0,0), Coord(4*8+7,7,7)), 2.0f, true); const Vec3T dir( 1.0, 0.0, 0.0); const Vec3T eye(-1.0, 0.0, 0.0); const RayT ray(eye, dir);//ray in index space tools::VolumeRayIntersector<FloatGrid> inter(grid); EXPECT_TRUE(inter.setIndexRay(ray)); std::vector<RayT::TimeSpan> list; inter.hits(list); EXPECT_TRUE(list.size() == 2); ASSERT_DOUBLES_APPROX_EQUAL( 1.0, list[0].t0); ASSERT_DOUBLES_APPROX_EQUAL(17.0, list[0].t1); ASSERT_DOUBLES_APPROX_EQUAL(25.0, list[1].t0); ASSERT_DOUBLES_APPROX_EQUAL(41.0, list[1].t1); } {//same as above but now with std::deque instead of std::vector FloatGrid grid(0.0f); grid.tree().setValue(Coord(0*8,0,0), 1.0f); grid.tree().setValue(Coord(1*8,0,0), 1.0f); grid.tree().setValue(Coord(3*8,0,0), 1.0f); grid.fill(CoordBBox(Coord(4*8,0,0), Coord(4*8+7,7,7)), 2.0f, true); const Vec3T dir( 1.0, 0.0, 0.0); const Vec3T eye(-1.0, 0.0, 0.0); const RayT ray(eye, dir);//ray in index space tools::VolumeRayIntersector<FloatGrid> inter(grid); EXPECT_TRUE(inter.setIndexRay(ray)); std::deque<RayT::TimeSpan> list; inter.hits(list); EXPECT_TRUE(list.size() == 2); ASSERT_DOUBLES_APPROX_EQUAL( 1.0, list[0].t0); ASSERT_DOUBLES_APPROX_EQUAL(17.0, list[0].t1); ASSERT_DOUBLES_APPROX_EQUAL(25.0, list[1].t0); ASSERT_DOUBLES_APPROX_EQUAL(41.0, list[1].t1); } {// Test submitted by "Jan" @ GitHub FloatGrid grid(0.0f); grid.tree().setValue(Coord(0*8,0,0), 1.0f); grid.tree().setValue(Coord(1*8,0,0), 1.0f); grid.tree().setValue(Coord(3*8,0,0), 1.0f); tools::VolumeRayIntersector<FloatGrid> inter(grid); const Vec3T dir(-1.0, 0.0, 0.0); const Vec3T eye(50.0, 0.0, 0.0); const RayT ray(eye, dir); EXPECT_TRUE(inter.setIndexRay(ray)); double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(18.0, t0); ASSERT_DOUBLES_APPROX_EQUAL(26.0, t1); EXPECT_TRUE(inter.march(t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL(34.0, t0); ASSERT_DOUBLES_APPROX_EQUAL(50.0, t1); EXPECT_TRUE(!inter.march(t0, t1)); } {// Test submitted by "Trevor" @ GitHub FloatGrid::Ptr grid = createGrid<FloatGrid>(0.0f); grid->tree().setValue(Coord(0,0,0), 1.0f); tools::dilateVoxels(grid->tree()); tools::VolumeRayIntersector<FloatGrid> inter(*grid); //std::cerr << "BBox = " << inter.bbox() << std::endl; const Vec3T eye(-0.25, -0.25, 10.0); const Vec3T dir( 0.00, 0.00, -1.0); const RayT ray(eye, dir); EXPECT_TRUE(inter.setIndexRay(ray));// hits bbox double t0=0, t1=0; EXPECT_TRUE(!inter.march(t0, t1));// misses leafs } {// Test submitted by "Trevor" @ GitHub FloatGrid::Ptr grid = createGrid<FloatGrid>(0.0f); grid->tree().setValue(Coord(0,0,0), 1.0f); tools::dilateVoxels(grid->tree()); tools::VolumeRayIntersector<FloatGrid> inter(*grid); //GridPtrVec grids; //grids.push_back(grid); //io::File vdbfile("trevor_v1.vdb"); //vdbfile.write(grids); //std::cerr << "BBox = " << inter.bbox() << std::endl; const Vec3T eye(0.75, 0.75, 10.0); const Vec3T dir( 0.00, 0.00, -1.0); const RayT ray(eye, dir); EXPECT_TRUE(inter.setIndexRay(ray));// hits bbox double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1));// misses leafs //std::cerr << "t0=" << t0 << " t1=" << t1 << std::endl; } {// Test derived from the test submitted by "Trevor" @ GitHub FloatGrid grid(0.0f); grid.fill(math::CoordBBox(Coord(-1,-1,-1),Coord(1,1,1)), 1.0f); tools::VolumeRayIntersector<FloatGrid> inter(grid); //std::cerr << "BBox = " << inter.bbox() << std::endl; const Vec3T eye(-0.25, -0.25, 10.0); const Vec3T dir( 0.00, 0.00, -1.0); const RayT ray(eye, dir); EXPECT_TRUE(inter.setIndexRay(ray));// hits bbox double t0=0, t1=0; EXPECT_TRUE(inter.march(t0, t1));// hits leafs //std::cerr << "t0=" << t0 << " t1=" << t1 << std::endl; } }
10,396
C++
34.363945
75
0.578011
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestAttributeGroup.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/AttributeArray.h> #include <openvdb/points/AttributeGroup.h> #include <openvdb/points/IndexIterator.h> #include <openvdb/points/IndexFilter.h> #include <openvdb/openvdb.h> #include <iostream> #include <sstream> using namespace openvdb; using namespace openvdb::points; class TestAttributeGroup: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestAttributeGroup //////////////////////////////////////// namespace { bool matchingNamePairs(const openvdb::NamePair& lhs, const openvdb::NamePair& rhs) { if (lhs.first != rhs.first) return false; if (lhs.second != rhs.second) return false; return true; } } // namespace //////////////////////////////////////// TEST_F(TestAttributeGroup, testAttributeGroup) { { // Typed class API const size_t count = 50; GroupAttributeArray attr(count); EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); EXPECT_TRUE(isGroup(attr)); attr.setTransient(true); EXPECT_TRUE(attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); EXPECT_TRUE(isGroup(attr)); attr.setHidden(true); EXPECT_TRUE(attr.isTransient()); EXPECT_TRUE(attr.isHidden()); EXPECT_TRUE(isGroup(attr)); attr.setTransient(false); EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(attr.isHidden()); EXPECT_TRUE(isGroup(attr)); GroupAttributeArray attrB(attr); EXPECT_TRUE(matchingNamePairs(attr.type(), attrB.type())); EXPECT_EQ(attr.size(), attrB.size()); EXPECT_EQ(attr.memUsage(), attrB.memUsage()); EXPECT_EQ(attr.isUniform(), attrB.isUniform()); EXPECT_EQ(attr.isTransient(), attrB.isTransient()); EXPECT_EQ(attr.isHidden(), attrB.isHidden()); EXPECT_EQ(isGroup(attr), isGroup(attrB)); #if OPENVDB_ABI_VERSION_NUMBER >= 6 AttributeArray& baseAttr(attr); EXPECT_EQ(Name(typeNameAsString<GroupType>()), baseAttr.valueType()); EXPECT_EQ(Name("grp"), baseAttr.codecType()); EXPECT_EQ(Index(1), baseAttr.valueTypeSize()); EXPECT_EQ(Index(1), baseAttr.storageTypeSize()); EXPECT_TRUE(!baseAttr.valueTypeIsFloatingPoint()); #endif } { // casting TypedAttributeArray<float> floatAttr(4); AttributeArray& floatArray = floatAttr; const AttributeArray& constFloatArray = floatAttr; EXPECT_THROW(GroupAttributeArray::cast(floatArray), TypeError); EXPECT_THROW(GroupAttributeArray::cast(constFloatArray), TypeError); GroupAttributeArray groupAttr(4); AttributeArray& groupArray = groupAttr; const AttributeArray& constGroupArray = groupAttr; EXPECT_NO_THROW(GroupAttributeArray::cast(groupArray)); EXPECT_NO_THROW(GroupAttributeArray::cast(constGroupArray)); } { // IO const size_t count = 50; GroupAttributeArray attrA(count); for (unsigned i = 0; i < unsigned(count); ++i) { attrA.set(i, int(i)); } attrA.setHidden(true); std::ostringstream ostr(std::ios_base::binary); attrA.write(ostr); GroupAttributeArray attrB; std::istringstream istr(ostr.str(), std::ios_base::binary); attrB.read(istr); EXPECT_TRUE(matchingNamePairs(attrA.type(), attrB.type())); EXPECT_EQ(attrA.size(), attrB.size()); EXPECT_EQ(attrA.memUsage(), attrB.memUsage()); EXPECT_EQ(attrA.isUniform(), attrB.isUniform()); EXPECT_EQ(attrA.isTransient(), attrB.isTransient()); EXPECT_EQ(attrA.isHidden(), attrB.isHidden()); EXPECT_EQ(isGroup(attrA), isGroup(attrB)); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); } } } TEST_F(TestAttributeGroup, testAttributeGroupHandle) { GroupAttributeArray attr(4); GroupHandle handle(attr, 3); EXPECT_EQ(handle.size(), Index(4)); EXPECT_EQ(handle.size(), attr.size()); // construct bitmasks const GroupType bitmask3 = GroupType(1) << 3; const GroupType bitmask6 = GroupType(1) << 6; const GroupType bitmask36 = GroupType(1) << 3 | GroupType(1) << 6; // enable attribute 1,2,3 for group permutations of 3 and 6 attr.set(0, 0); attr.set(1, bitmask3); attr.set(2, bitmask6); attr.set(3, bitmask36); EXPECT_TRUE(attr.get(2) != bitmask36); EXPECT_EQ(attr.get(3), bitmask36); { // group 3 valid for attributes 1 and 3 (using specific offset) GroupHandle handle3(attr, 3); EXPECT_TRUE(!handle3.get(0)); EXPECT_TRUE(handle3.get(1)); EXPECT_TRUE(!handle3.get(2)); EXPECT_TRUE(handle3.get(3)); } { // test group 3 valid for attributes 1 and 3 (unsafe access) GroupHandle handle3(attr, 3); EXPECT_TRUE(!handle3.getUnsafe(0)); EXPECT_TRUE(handle3.getUnsafe(1)); EXPECT_TRUE(!handle3.getUnsafe(2)); EXPECT_TRUE(handle3.getUnsafe(3)); } { // group 6 valid for attributes 2 and 3 (using specific offset) GroupHandle handle6(attr, 6); EXPECT_TRUE(!handle6.get(0)); EXPECT_TRUE(!handle6.get(1)); EXPECT_TRUE(handle6.get(2)); EXPECT_TRUE(handle6.get(3)); } { // groups 3 and 6 only valid for attribute 3 (using bitmask) GroupHandle handle36(attr, bitmask36, GroupHandle::BitMask()); EXPECT_TRUE(!handle36.get(0)); EXPECT_TRUE(!handle36.get(1)); EXPECT_TRUE(!handle36.get(2)); EXPECT_TRUE(handle36.get(3)); } // clear the array attr.fill(0); EXPECT_EQ(attr.get(1), GroupType(0)); // write handles GroupWriteHandle writeHandle3(attr, 3); GroupWriteHandle writeHandle6(attr, 6); // test collapse EXPECT_EQ(writeHandle3.get(1), false); EXPECT_EQ(writeHandle6.get(1), false); EXPECT_TRUE(writeHandle6.compact()); EXPECT_TRUE(writeHandle6.isUniform()); attr.expand(); EXPECT_TRUE(!writeHandle6.isUniform()); EXPECT_TRUE(writeHandle3.collapse(true)); EXPECT_TRUE(attr.isUniform()); EXPECT_TRUE(writeHandle3.isUniform()); EXPECT_TRUE(writeHandle6.isUniform()); EXPECT_EQ(writeHandle3.get(1), true); EXPECT_EQ(writeHandle6.get(1), false); EXPECT_TRUE(writeHandle3.collapse(false)); EXPECT_TRUE(writeHandle3.isUniform()); EXPECT_EQ(writeHandle3.get(1), false); attr.fill(0); writeHandle3.set(1, true); EXPECT_TRUE(!attr.isUniform()); EXPECT_TRUE(!writeHandle3.isUniform()); EXPECT_TRUE(!writeHandle6.isUniform()); EXPECT_TRUE(!writeHandle3.collapse(true)); EXPECT_TRUE(!attr.isUniform()); EXPECT_TRUE(!writeHandle3.isUniform()); EXPECT_TRUE(!writeHandle6.isUniform()); EXPECT_EQ(writeHandle3.get(1), true); EXPECT_EQ(writeHandle6.get(1), false); writeHandle6.set(2, true); EXPECT_TRUE(!writeHandle3.collapse(false)); EXPECT_TRUE(!writeHandle3.isUniform()); attr.fill(0); writeHandle3.set(1, true); writeHandle6.set(2, true); writeHandle3.setUnsafe(3, true); writeHandle6.setUnsafe(3, true); { // group 3 valid for attributes 1 and 3 (using specific offset) GroupHandle handle3(attr, 3); EXPECT_TRUE(!handle3.get(0)); EXPECT_TRUE(handle3.get(1)); EXPECT_TRUE(!handle3.get(2)); EXPECT_TRUE(handle3.get(3)); EXPECT_TRUE(!writeHandle3.get(0)); EXPECT_TRUE(writeHandle3.get(1)); EXPECT_TRUE(!writeHandle3.get(2)); EXPECT_TRUE(writeHandle3.get(3)); } { // group 6 valid for attributes 2 and 3 (using specific offset) GroupHandle handle6(attr, 6); EXPECT_TRUE(!handle6.get(0)); EXPECT_TRUE(!handle6.get(1)); EXPECT_TRUE(handle6.get(2)); EXPECT_TRUE(handle6.get(3)); EXPECT_TRUE(!writeHandle6.get(0)); EXPECT_TRUE(!writeHandle6.get(1)); EXPECT_TRUE(writeHandle6.get(2)); EXPECT_TRUE(writeHandle6.get(3)); } writeHandle3.set(3, false); { // group 3 valid for attributes 1 and 3 (using specific offset) GroupHandle handle3(attr, 3); EXPECT_TRUE(!handle3.get(0)); EXPECT_TRUE(handle3.get(1)); EXPECT_TRUE(!handle3.get(2)); EXPECT_TRUE(!handle3.get(3)); EXPECT_TRUE(!writeHandle3.get(0)); EXPECT_TRUE(writeHandle3.get(1)); EXPECT_TRUE(!writeHandle3.get(2)); EXPECT_TRUE(!writeHandle3.get(3)); } { // group 6 valid for attributes 2 and 3 (using specific offset) GroupHandle handle6(attr, 6); EXPECT_TRUE(!handle6.get(0)); EXPECT_TRUE(!handle6.get(1)); EXPECT_TRUE(handle6.get(2)); EXPECT_TRUE(handle6.get(3)); EXPECT_TRUE(!writeHandle6.get(0)); EXPECT_TRUE(!writeHandle6.get(1)); EXPECT_TRUE(writeHandle6.get(2)); EXPECT_TRUE(writeHandle6.get(3)); } } class GroupNotFilter { public: explicit GroupNotFilter(const AttributeSet::Descriptor::GroupIndex& index) : mFilter(index) { } inline bool initialized() const { return mFilter.initialized(); } template <typename LeafT> void reset(const LeafT& leaf) { mFilter.reset(leaf); } template <typename IterT> bool valid(const IterT& iter) const { return !mFilter.valid(iter); } private: GroupFilter mFilter; }; // class GroupNotFilter struct HandleWrapper { HandleWrapper(const GroupHandle& handle) : mHandle(handle) { } GroupHandle groupHandle(const AttributeSet::Descriptor::GroupIndex& /*index*/) const { return mHandle; } private: const GroupHandle mHandle; }; // struct HandleWrapper TEST_F(TestAttributeGroup, testAttributeGroupFilter) { using GroupIndex = AttributeSet::Descriptor::GroupIndex; GroupIndex zeroIndex; typedef IndexIter<ValueVoxelCIter, GroupFilter> IndexGroupAllIter; GroupAttributeArray attrGroup(4); const Index32 size = attrGroup.size(); { // group values all zero ValueVoxelCIter indexIter(0, size); GroupFilter filter(zeroIndex); EXPECT_TRUE(filter.state() == index::PARTIAL); filter.reset(HandleWrapper(GroupHandle(attrGroup, 0))); IndexGroupAllIter iter(indexIter, filter); EXPECT_TRUE(!iter); } // enable attributes 0 and 2 for groups 3 and 6 const GroupType bitmask = GroupType(1) << 3 | GroupType(1) << 6; attrGroup.set(0, bitmask); attrGroup.set(2, bitmask); // index iterator only valid in groups 3 and 6 { ValueVoxelCIter indexIter(0, size); GroupFilter filter(zeroIndex); filter.reset(HandleWrapper(GroupHandle(attrGroup, 0))); EXPECT_TRUE(!IndexGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 1))); EXPECT_TRUE(!IndexGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 2))); EXPECT_TRUE(!IndexGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 3))); EXPECT_TRUE(IndexGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 4))); EXPECT_TRUE(!IndexGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 5))); EXPECT_TRUE(!IndexGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 6))); EXPECT_TRUE(IndexGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 7))); EXPECT_TRUE(!IndexGroupAllIter(indexIter, filter)); } attrGroup.set(1, bitmask); attrGroup.set(3, bitmask); using IndexNotGroupAllIter = IndexIter<ValueVoxelCIter, GroupNotFilter>; // index iterator only not valid in groups 3 and 6 { ValueVoxelCIter indexIter(0, size); GroupNotFilter filter(zeroIndex); filter.reset(HandleWrapper(GroupHandle(attrGroup, 0))); EXPECT_TRUE(IndexNotGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 1))); EXPECT_TRUE(IndexNotGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 2))); EXPECT_TRUE(IndexNotGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 3))); EXPECT_TRUE(!IndexNotGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 4))); EXPECT_TRUE(IndexNotGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 5))); EXPECT_TRUE(IndexNotGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 6))); EXPECT_TRUE(!IndexNotGroupAllIter(indexIter, filter)); filter.reset(HandleWrapper(GroupHandle(attrGroup, 7))); EXPECT_TRUE(IndexNotGroupAllIter(indexIter, filter)); } // clear group membership for attributes 1 and 3 attrGroup.set(1, GroupType(0)); attrGroup.set(3, GroupType(0)); { // index in group next ValueVoxelCIter indexIter(0, size); GroupFilter filter(zeroIndex); filter.reset(HandleWrapper(GroupHandle(attrGroup, 3))); IndexGroupAllIter iter(indexIter, filter); EXPECT_TRUE(iter); EXPECT_EQ(*iter, Index32(0)); EXPECT_TRUE(iter.next()); EXPECT_EQ(*iter, Index32(2)); EXPECT_TRUE(!iter.next()); } { // index in group prefix ++ ValueVoxelCIter indexIter(0, size); GroupFilter filter(zeroIndex); filter.reset(HandleWrapper(GroupHandle(attrGroup, 3))); IndexGroupAllIter iter(indexIter, filter); EXPECT_TRUE(iter); EXPECT_EQ(*iter, Index32(0)); IndexGroupAllIter old = ++iter; EXPECT_EQ(*old, Index32(2)); EXPECT_EQ(*iter, Index32(2)); EXPECT_TRUE(!iter.next()); } { // index in group postfix ++/-- ValueVoxelCIter indexIter(0, size); GroupFilter filter(zeroIndex); filter.reset(HandleWrapper(GroupHandle(attrGroup, 3))); IndexGroupAllIter iter(indexIter, filter); EXPECT_TRUE(iter); EXPECT_EQ(*iter, Index32(0)); IndexGroupAllIter old = iter++; EXPECT_EQ(*old, Index32(0)); EXPECT_EQ(*iter, Index32(2)); EXPECT_TRUE(!iter.next()); } { // index not in group next ValueVoxelCIter indexIter(0, size); GroupNotFilter filter(zeroIndex); filter.reset(HandleWrapper(GroupHandle(attrGroup, 3))); IndexNotGroupAllIter iter(indexIter, filter); EXPECT_TRUE(iter); EXPECT_EQ(*iter, Index32(1)); EXPECT_TRUE(iter.next()); EXPECT_EQ(*iter, Index32(3)); EXPECT_TRUE(!iter.next()); } { // index not in group prefix ++ ValueVoxelCIter indexIter(0, size); GroupNotFilter filter(zeroIndex); filter.reset(HandleWrapper(GroupHandle(attrGroup, 3))); IndexNotGroupAllIter iter(indexIter, filter); EXPECT_TRUE(iter); EXPECT_EQ(*iter, Index32(1)); IndexNotGroupAllIter old = ++iter; EXPECT_EQ(*old, Index32(3)); EXPECT_EQ(*iter, Index32(3)); EXPECT_TRUE(!iter.next()); } { // index not in group postfix ++ ValueVoxelCIter indexIter(0, size); GroupNotFilter filter(zeroIndex); filter.reset(HandleWrapper(GroupHandle(attrGroup, 3))); IndexNotGroupAllIter iter(indexIter, filter); EXPECT_TRUE(iter); EXPECT_EQ(*iter, Index32(1)); IndexNotGroupAllIter old = iter++; EXPECT_EQ(*old, Index32(1)); EXPECT_EQ(*iter, Index32(3)); EXPECT_TRUE(!iter.next()); } }
16,037
C++
28.427523
90
0.63553
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestTypes.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/Types.h> #include <functional> // for std::ref() #include <string> using namespace openvdb; class TestTypes: public ::testing::Test { }; namespace { struct Dummy {}; } // Work-around for a macro expansion bug in debug mode that presents as an // undefined reference linking error. This happens in cases where template // instantiation of a type trait is prevented from occurring as the template // instantiation is deemed to be in an unreachable code block. // The work-around is to wrap the EXPECT_TRUE macro in another which holds // the expected value in a temporary. #define EXPECT_TRUE_TEMP(expected) \ { bool result = expected; EXPECT_TRUE(result); } TEST_F(TestTypes, testVecTraits) { { // VecTraits - IsVec // standard types (Vec3s, etc) EXPECT_TRUE_TEMP(VecTraits<Vec3s>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec3d>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec3i>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec2i>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec2s>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec2d>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec4i>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec4s>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec4d>::IsVec); // some less common types (Vec3U16, etc) EXPECT_TRUE_TEMP(VecTraits<Vec2R>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec3U16>::IsVec); EXPECT_TRUE_TEMP(VecTraits<Vec4H>::IsVec); // some non-vector types EXPECT_TRUE_TEMP(!VecTraits<int>::IsVec); EXPECT_TRUE_TEMP(!VecTraits<double>::IsVec); EXPECT_TRUE_TEMP(!VecTraits<bool>::IsVec); EXPECT_TRUE_TEMP(!VecTraits<Quats>::IsVec); EXPECT_TRUE_TEMP(!VecTraits<Mat4d>::IsVec); EXPECT_TRUE_TEMP(!VecTraits<ValueMask>::IsVec); EXPECT_TRUE_TEMP(!VecTraits<Dummy>::IsVec); EXPECT_TRUE_TEMP(!VecTraits<Byte>::IsVec); } { // VecTraits - Size // standard types (Vec3s, etc) EXPECT_TRUE(VecTraits<Vec3s>::Size == 3); EXPECT_TRUE(VecTraits<Vec3d>::Size == 3); EXPECT_TRUE(VecTraits<Vec3i>::Size == 3); EXPECT_TRUE(VecTraits<Vec2i>::Size == 2); EXPECT_TRUE(VecTraits<Vec2s>::Size == 2); EXPECT_TRUE(VecTraits<Vec2d>::Size == 2); EXPECT_TRUE(VecTraits<Vec4i>::Size == 4); EXPECT_TRUE(VecTraits<Vec4s>::Size == 4); EXPECT_TRUE(VecTraits<Vec4d>::Size == 4); // some less common types (Vec3U16, etc) EXPECT_TRUE(VecTraits<Vec2R>::Size == 2); EXPECT_TRUE(VecTraits<Vec3U16>::Size == 3); EXPECT_TRUE(VecTraits<Vec4H>::Size == 4); // some non-vector types EXPECT_TRUE(VecTraits<int>::Size == 1); EXPECT_TRUE(VecTraits<double>::Size == 1); EXPECT_TRUE(VecTraits<bool>::Size == 1); EXPECT_TRUE(VecTraits<Quats>::Size == 1); EXPECT_TRUE(VecTraits<Mat4d>::Size == 1); EXPECT_TRUE(VecTraits<ValueMask>::Size == 1); EXPECT_TRUE(VecTraits<Dummy>::Size == 1); EXPECT_TRUE(VecTraits<Byte>::Size == 1); } { // VecTraits - ElementType // standard types (Vec3s, etc) EXPECT_TRUE(bool(std::is_same<VecTraits<Vec3s>::ElementType, float>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec3d>::ElementType, double>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec3i>::ElementType, int>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec2i>::ElementType, int>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec2s>::ElementType, float>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec2d>::ElementType, double>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec4i>::ElementType, int>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec4s>::ElementType, float>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec4d>::ElementType, double>::value)); // some less common types (Vec3U16, etc) EXPECT_TRUE(bool(std::is_same<VecTraits<Vec2R>::ElementType, double>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec3U16>::ElementType, uint16_t>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Vec4H>::ElementType, half>::value)); // some non-vector types EXPECT_TRUE(bool(std::is_same<VecTraits<int>::ElementType, int>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<double>::ElementType, double>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<bool>::ElementType, bool>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Quats>::ElementType, Quats>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Mat4d>::ElementType, Mat4d>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<ValueMask>::ElementType, ValueMask>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Dummy>::ElementType, Dummy>::value)); EXPECT_TRUE(bool(std::is_same<VecTraits<Byte>::ElementType, Byte>::value)); } } TEST_F(TestTypes, testQuatTraits) { { // QuatTraits - IsQuat // standard types (Quats, etc) EXPECT_TRUE_TEMP(QuatTraits<Quats>::IsQuat); EXPECT_TRUE_TEMP(QuatTraits<Quatd>::IsQuat); // some non-quaternion types EXPECT_TRUE_TEMP(!QuatTraits<Vec3s>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<Vec4d>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<Vec2i>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<Vec3U16>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<int>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<double>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<bool>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<Mat4s>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<ValueMask>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<Dummy>::IsQuat); EXPECT_TRUE_TEMP(!QuatTraits<Byte>::IsQuat); } { // QuatTraits - Size // standard types (Quats, etc) EXPECT_TRUE(QuatTraits<Quats>::Size == 4); EXPECT_TRUE(QuatTraits<Quatd>::Size == 4); // some non-quaternion types EXPECT_TRUE(QuatTraits<Vec3s>::Size == 1); EXPECT_TRUE(QuatTraits<Vec4d>::Size == 1); EXPECT_TRUE(QuatTraits<Vec2i>::Size == 1); EXPECT_TRUE(QuatTraits<Vec3U16>::Size == 1); EXPECT_TRUE(QuatTraits<int>::Size == 1); EXPECT_TRUE(QuatTraits<double>::Size == 1); EXPECT_TRUE(QuatTraits<bool>::Size == 1); EXPECT_TRUE(QuatTraits<Mat4s>::Size == 1); EXPECT_TRUE(QuatTraits<ValueMask>::Size == 1); EXPECT_TRUE(QuatTraits<Dummy>::Size == 1); EXPECT_TRUE(QuatTraits<Byte>::Size == 1); } { // QuatTraits - ElementType // standard types (Quats, etc) EXPECT_TRUE(bool(std::is_same<QuatTraits<Quats>::ElementType, float>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<Quatd>::ElementType, double>::value)); // some non-matrix types EXPECT_TRUE(bool(std::is_same<QuatTraits<Vec3s>::ElementType, Vec3s>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<Vec4d>::ElementType, Vec4d>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<Vec2i>::ElementType, Vec2i>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<Vec3U16>::ElementType, Vec3U16>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<int>::ElementType, int>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<double>::ElementType, double>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<bool>::ElementType, bool>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<Mat4s>::ElementType, Mat4s>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<ValueMask>::ElementType, ValueMask>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<Dummy>::ElementType, Dummy>::value)); EXPECT_TRUE(bool(std::is_same<QuatTraits<Byte>::ElementType, Byte>::value)); } } TEST_F(TestTypes, testMatTraits) { { // MatTraits - IsMat // standard types (Mat4d, etc) EXPECT_TRUE_TEMP(MatTraits<Mat3s>::IsMat); EXPECT_TRUE_TEMP(MatTraits<Mat3d>::IsMat); EXPECT_TRUE_TEMP(MatTraits<Mat4s>::IsMat); EXPECT_TRUE_TEMP(MatTraits<Mat4d>::IsMat); // some non-matrix types EXPECT_TRUE_TEMP(!MatTraits<Vec3s>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<Vec4d>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<Vec2i>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<Vec3U16>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<int>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<double>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<bool>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<Quats>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<ValueMask>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<Dummy>::IsMat); EXPECT_TRUE_TEMP(!MatTraits<Byte>::IsMat); } { // MatTraits - Size // standard types (Mat4d, etc) EXPECT_TRUE(MatTraits<Mat3s>::Size == 3); EXPECT_TRUE(MatTraits<Mat3d>::Size == 3); EXPECT_TRUE(MatTraits<Mat4s>::Size == 4); EXPECT_TRUE(MatTraits<Mat4d>::Size == 4); // some non-matrix types EXPECT_TRUE(MatTraits<Vec3s>::Size == 1); EXPECT_TRUE(MatTraits<Vec4d>::Size == 1); EXPECT_TRUE(MatTraits<Vec2i>::Size == 1); EXPECT_TRUE(MatTraits<Vec3U16>::Size == 1); EXPECT_TRUE(MatTraits<int>::Size == 1); EXPECT_TRUE(MatTraits<double>::Size == 1); EXPECT_TRUE(MatTraits<bool>::Size == 1); EXPECT_TRUE(MatTraits<Quats>::Size == 1); EXPECT_TRUE(MatTraits<ValueMask>::Size == 1); EXPECT_TRUE(MatTraits<Dummy>::Size == 1); EXPECT_TRUE(MatTraits<Byte>::Size == 1); } { // MatTraits - ElementType // standard types (Mat4d, etc) EXPECT_TRUE(bool(std::is_same<MatTraits<Mat3s>::ElementType, float>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<Mat3d>::ElementType, double>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<Mat4s>::ElementType, float>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<Mat4d>::ElementType, double>::value)); // some non-matrix types EXPECT_TRUE(bool(std::is_same<MatTraits<Vec3s>::ElementType, Vec3s>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<Vec4d>::ElementType, Vec4d>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<Vec2i>::ElementType, Vec2i>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<Vec3U16>::ElementType, Vec3U16>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<int>::ElementType, int>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<double>::ElementType, double>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<bool>::ElementType, bool>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<Quats>::ElementType, Quats>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<ValueMask>::ElementType, ValueMask>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<Dummy>::ElementType, Dummy>::value)); EXPECT_TRUE(bool(std::is_same<MatTraits<Byte>::ElementType, Byte>::value)); } } TEST_F(TestTypes, testValueTraits) { { // ValueTraits - IsVec, IsQuat, IsMat, IsScalar // vector types EXPECT_TRUE_TEMP(ValueTraits<Vec3s>::IsVec); EXPECT_TRUE_TEMP(!ValueTraits<Vec3s>::IsQuat); EXPECT_TRUE_TEMP(!ValueTraits<Vec3s>::IsMat); EXPECT_TRUE_TEMP(!ValueTraits<Vec3s>::IsScalar); EXPECT_TRUE_TEMP(ValueTraits<Vec4d>::IsVec); EXPECT_TRUE_TEMP(!ValueTraits<Vec4d>::IsQuat); EXPECT_TRUE_TEMP(!ValueTraits<Vec4d>::IsMat); EXPECT_TRUE_TEMP(!ValueTraits<Vec4d>::IsScalar); EXPECT_TRUE_TEMP(ValueTraits<Vec3U16>::IsVec); EXPECT_TRUE_TEMP(!ValueTraits<Vec3U16>::IsQuat); EXPECT_TRUE_TEMP(!ValueTraits<Vec3U16>::IsMat); EXPECT_TRUE_TEMP(!ValueTraits<Vec3U16>::IsScalar); // quaternion types EXPECT_TRUE_TEMP(!ValueTraits<Quats>::IsVec); EXPECT_TRUE_TEMP(ValueTraits<Quats>::IsQuat); EXPECT_TRUE_TEMP(!ValueTraits<Quats>::IsMat); EXPECT_TRUE_TEMP(!ValueTraits<Quats>::IsScalar); EXPECT_TRUE_TEMP(!ValueTraits<Quatd>::IsVec); EXPECT_TRUE_TEMP(ValueTraits<Quatd>::IsQuat); EXPECT_TRUE_TEMP(!ValueTraits<Quatd>::IsMat); EXPECT_TRUE_TEMP(!ValueTraits<Quatd>::IsScalar); // matrix types EXPECT_TRUE_TEMP(!ValueTraits<Mat3s>::IsVec); EXPECT_TRUE_TEMP(!ValueTraits<Mat3s>::IsQuat); EXPECT_TRUE_TEMP(ValueTraits<Mat3s>::IsMat); EXPECT_TRUE_TEMP(!ValueTraits<Mat3s>::IsScalar); EXPECT_TRUE_TEMP(!ValueTraits<Mat4d>::IsVec); EXPECT_TRUE_TEMP(!ValueTraits<Mat4d>::IsQuat); EXPECT_TRUE_TEMP(ValueTraits<Mat4d>::IsMat); EXPECT_TRUE_TEMP(!ValueTraits<Mat4d>::IsScalar); // scalar types EXPECT_TRUE_TEMP(!ValueTraits<double>::IsVec); EXPECT_TRUE_TEMP(!ValueTraits<double>::IsQuat); EXPECT_TRUE_TEMP(!ValueTraits<double>::IsMat); EXPECT_TRUE_TEMP(ValueTraits<double>::IsScalar); EXPECT_TRUE_TEMP(!ValueTraits<bool>::IsVec); EXPECT_TRUE_TEMP(!ValueTraits<bool>::IsQuat); EXPECT_TRUE_TEMP(!ValueTraits<bool>::IsMat); EXPECT_TRUE_TEMP(ValueTraits<bool>::IsScalar); EXPECT_TRUE_TEMP(!ValueTraits<ValueMask>::IsVec); EXPECT_TRUE_TEMP(!ValueTraits<ValueMask>::IsQuat); EXPECT_TRUE_TEMP(!ValueTraits<ValueMask>::IsMat); EXPECT_TRUE_TEMP(ValueTraits<ValueMask>::IsScalar); EXPECT_TRUE_TEMP(!ValueTraits<Dummy>::IsVec); EXPECT_TRUE_TEMP(!ValueTraits<Dummy>::IsQuat); EXPECT_TRUE_TEMP(!ValueTraits<Dummy>::IsMat); EXPECT_TRUE_TEMP(ValueTraits<Dummy>::IsScalar); } { // ValueTraits - Size // vector types EXPECT_TRUE(ValueTraits<Vec3s>::Size == 3); EXPECT_TRUE(ValueTraits<Vec4d>::Size == 4); EXPECT_TRUE(ValueTraits<Vec3U16>::Size == 3); // quaternion types EXPECT_TRUE(ValueTraits<Quats>::Size == 4); EXPECT_TRUE(ValueTraits<Quatd>::Size == 4); // matrix types EXPECT_TRUE(ValueTraits<Mat3s>::Size == 3); EXPECT_TRUE(ValueTraits<Mat4d>::Size == 4); // scalar types EXPECT_TRUE(ValueTraits<double>::Size == 1); EXPECT_TRUE(ValueTraits<bool>::Size == 1); EXPECT_TRUE(ValueTraits<ValueMask>::Size == 1); EXPECT_TRUE(ValueTraits<Dummy>::Size == 1); } { // ValueTraits - Elements // vector types EXPECT_TRUE(ValueTraits<Vec3s>::Elements == 3); EXPECT_TRUE(ValueTraits<Vec4d>::Elements == 4); EXPECT_TRUE(ValueTraits<Vec3U16>::Elements == 3); // quaternion types EXPECT_TRUE(ValueTraits<Quats>::Elements == 4); EXPECT_TRUE(ValueTraits<Quatd>::Elements == 4); // matrix types EXPECT_TRUE(ValueTraits<Mat3s>::Elements == 3*3); EXPECT_TRUE(ValueTraits<Mat4d>::Elements == 4*4); // scalar types EXPECT_TRUE(ValueTraits<double>::Elements == 1); EXPECT_TRUE(ValueTraits<bool>::Elements == 1); EXPECT_TRUE(ValueTraits<ValueMask>::Elements == 1); EXPECT_TRUE(ValueTraits<Dummy>::Elements == 1); } { // ValueTraits - ElementType // vector types EXPECT_TRUE(bool(std::is_same<ValueTraits<Vec3s>::ElementType, float>::value)); EXPECT_TRUE(bool(std::is_same<ValueTraits<Vec4d>::ElementType, double>::value)); EXPECT_TRUE(bool(std::is_same<ValueTraits<Vec3U16>::ElementType, uint16_t>::value)); // quaternion types EXPECT_TRUE(bool(std::is_same<ValueTraits<Quats>::ElementType, float>::value)); EXPECT_TRUE(bool(std::is_same<ValueTraits<Quatd>::ElementType, double>::value)); // matrix types EXPECT_TRUE(bool(std::is_same<ValueTraits<Mat3s>::ElementType, float>::value)); EXPECT_TRUE(bool(std::is_same<ValueTraits<Mat4d>::ElementType, double>::value)); // scalar types EXPECT_TRUE(bool(std::is_same<ValueTraits<double>::ElementType, double>::value)); EXPECT_TRUE(bool(std::is_same<ValueTraits<bool>::ElementType, bool>::value)); EXPECT_TRUE(bool(std::is_same<ValueTraits<ValueMask>::ElementType, ValueMask>::value)); EXPECT_TRUE(bool(std::is_same<ValueTraits<Dummy>::ElementType, Dummy>::value)); } } //////////////////////////////////////// namespace { template<typename T> char typeCode() { return '.'; } template<> char typeCode<bool>() { return 'b'; } template<> char typeCode<char>() { return 'c'; } template<> char typeCode<double>() { return 'd'; } template<> char typeCode<float>() { return 'f'; } template<> char typeCode<int>() { return 'i'; } template<> char typeCode<long>() { return 'l'; } struct TypeCodeOp { std::string codes; template<typename T> void operator()(const T&) { codes.push_back(typeCode<T>()); } }; template<typename TSet> inline std::string typeSetAsString() { TypeCodeOp op; TSet::foreach(std::ref(op)); return op.codes; } } // anonymous namespace TEST_F(TestTypes, testTypeList) { using T0 = TypeList<>; EXPECT_EQ(std::string(), typeSetAsString<T0>()); using T1 = TypeList<int>; EXPECT_EQ(std::string("i"), typeSetAsString<T1>()); using T2 = TypeList<float>; EXPECT_EQ(std::string("f"), typeSetAsString<T2>()); using T3 = TypeList<bool, double>; EXPECT_EQ(std::string("bd"), typeSetAsString<T3>()); using T4 = T1::Append<T2>; EXPECT_EQ(std::string("if"), typeSetAsString<T4>()); EXPECT_EQ(std::string("fi"), typeSetAsString<T2::Append<T1>>()); using T5 = T3::Append<T4>; EXPECT_EQ(std::string("bdif"), typeSetAsString<T5>()); using T6 = T5::Append<T5>; EXPECT_EQ(std::string("bdifbdif"), typeSetAsString<T6>()); using T7 = T5::Append<char, long>; EXPECT_EQ(std::string("bdifcl"), typeSetAsString<T7>()); using T8 = T5::Append<char>::Append<long>; EXPECT_EQ(std::string("bdifcl"), typeSetAsString<T8>()); using T9 = T8::Remove<TypeList<>>; EXPECT_EQ(std::string("bdifcl"), typeSetAsString<T9>()); using T10 = T8::Remove<std::string>; EXPECT_EQ(std::string("bdifcl"), typeSetAsString<T10>()); using T11 = T8::Remove<char>::Remove<int>; EXPECT_EQ(std::string("bdfl"), typeSetAsString<T11>()); using T12 = T8::Remove<char, int>; EXPECT_EQ(std::string("bdfl"), typeSetAsString<T12>()); using T13 = T8::Remove<TypeList<char, int>>; EXPECT_EQ(std::string("bdfl"), typeSetAsString<T13>()); /// Compile time tests of TypeList /// @note static_assert with no message requires C++17 using IntTypes = TypeList<Int16, Int32, Int64>; using EmptyList = TypeList<>; // Size static_assert(IntTypes::Size == 3, ""); static_assert(EmptyList::Size == 0, ""); // Contains static_assert(IntTypes::Contains<Int16>, ""); static_assert(IntTypes::Contains<Int32>, ""); static_assert(IntTypes::Contains<Int64>, ""); static_assert(!IntTypes::Contains<float>, ""); // Index static_assert(IntTypes::Index<Int16> == 0, ""); static_assert(IntTypes::Index<Int32> == 1, ""); static_assert(IntTypes::Index<Int64> == 2, ""); static_assert(IntTypes::Index<float> == -1, ""); // Get static_assert(std::is_same<IntTypes::Get<0>, Int16>::value, ""); static_assert(std::is_same<IntTypes::Get<1>, Int32>::value, ""); static_assert(std::is_same<IntTypes::Get<2>, Int64>::value, ""); static_assert(std::is_same<IntTypes::Get<3>, typelist_internal::NullType>::value, ""); static_assert(!std::is_same<IntTypes::Get<3>, void>::value, ""); // Unique static_assert(std::is_same<IntTypes::Unique<>, IntTypes>::value, ""); static_assert(std::is_same<EmptyList::Unique<>, EmptyList>::value, ""); // Front/Back static_assert(std::is_same<IntTypes::Front, Int16>::value, ""); static_assert(std::is_same<IntTypes::Back, Int64>::value, ""); // PopFront/PopBack static_assert(std::is_same<IntTypes::PopFront, TypeList<Int32, Int64>>::value, ""); static_assert(std::is_same<IntTypes::PopBack, TypeList<Int16, Int32>>::value, ""); // RemoveByIndex static_assert(std::is_same<IntTypes::RemoveByIndex<0,0>, IntTypes::PopFront>::value, ""); static_assert(std::is_same<IntTypes::RemoveByIndex<2,2>, IntTypes::PopBack>::value, ""); static_assert(std::is_same<IntTypes::RemoveByIndex<0,2>, EmptyList>::value, ""); static_assert(std::is_same<IntTypes::RemoveByIndex<1,2>, TypeList<Int16>>::value, ""); static_assert(std::is_same<IntTypes::RemoveByIndex<1,1>, TypeList<Int16, Int64>>::value, ""); static_assert(std::is_same<IntTypes::RemoveByIndex<0,1>, TypeList<Int64>>::value, ""); static_assert(std::is_same<IntTypes::RemoveByIndex<0,10>, EmptyList>::value, ""); // invalid indices do nothing static_assert(std::is_same<IntTypes::RemoveByIndex<2,1>, IntTypes>::value, ""); static_assert(std::is_same<IntTypes::RemoveByIndex<3,3>, IntTypes>::value, ""); // // Test methods on an empty list static_assert(!EmptyList::Contains<Int16>, ""); static_assert(EmptyList::Index<Int16> == -1, ""); static_assert(std::is_same<EmptyList::Get<0>, typelist_internal::NullType>::value, ""); static_assert(std::is_same<EmptyList::Front, typelist_internal::NullType>::value, ""); static_assert(std::is_same<EmptyList::Back, typelist_internal::NullType>::value, ""); static_assert(std::is_same<EmptyList::PopFront, EmptyList>::value, ""); static_assert(std::is_same<EmptyList::PopBack, EmptyList>::value, ""); static_assert(std::is_same<EmptyList::RemoveByIndex<0,0>, EmptyList>::value, ""); // // Test some methods on lists with duplicate types using DuplicateIntTypes = TypeList<Int32, Int16, Int64, Int16>; using DuplicateRealTypes = TypeList<float, float, float, float>; static_assert(DuplicateIntTypes::Size == 4, ""); static_assert(DuplicateRealTypes::Size == 4, ""); static_assert(DuplicateIntTypes::Index<Int16> == 1, ""); static_assert(std::is_same<DuplicateIntTypes::Unique<>, TypeList<Int32, Int16, Int64>>::value, ""); static_assert(std::is_same<DuplicateRealTypes::Unique<>, TypeList<float>>::value, ""); // // Tests on VDB grid node chains - reverse node chains from leaf->root using Tree4Float = openvdb::tree::Tree4<float, 5, 4, 3>::Type; // usually the same as FloatTree using NodeChainT = Tree4Float::RootNodeType::NodeChainType; // Expected types using LeafT = openvdb::tree::LeafNode<float, 3>; using IternalT1 = openvdb::tree::InternalNode<LeafT, 4>; using IternalT2 = openvdb::tree::InternalNode<IternalT1, 5>; using RootT = openvdb::tree::RootNode<IternalT2>; static_assert(std::is_same<NodeChainT::Get<0>, LeafT>::value, ""); static_assert(std::is_same<NodeChainT::Get<1>, IternalT1>::value, ""); static_assert(std::is_same<NodeChainT::Get<2>, IternalT2>::value, ""); static_assert(std::is_same<NodeChainT::Get<3>, RootT>::value, ""); static_assert(std::is_same<NodeChainT::Get<4>, typelist_internal::NullType>::value, ""); }
23,229
C++
41.236364
103
0.639244
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestVolumeToSpheres.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/tools/LevelSetSphere.h> // for createLevelSetSphere #include <openvdb/tools/LevelSetUtil.h> // for sdfToFogVolume #include <openvdb/tools/VolumeToSpheres.h> // for fillWithSpheres #include <cmath> #include <iostream> #include <limits> #include <vector> class TestVolumeToSpheres: public ::testing::Test { }; //////////////////////////////////////// TEST_F(TestVolumeToSpheres, testFromLevelSet) { const float radius = 20.0f, voxelSize = 1.0f, halfWidth = 3.0f; const openvdb::Vec3f center(15.0f, 13.0f, 16.0f); openvdb::FloatGrid::ConstPtr grid = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>( radius, center, voxelSize, halfWidth); const bool overlapping = false; const int instanceCount = 10000; const float isovalue = 0.0f, minRadius = 5.0f, maxRadius = std::numeric_limits<float>::max(); const openvdb::Vec2i sphereCount(1, 100); { std::vector<openvdb::Vec4s> spheres; openvdb::tools::fillWithSpheres(*grid, spheres, sphereCount, overlapping, minRadius, maxRadius, isovalue, instanceCount); EXPECT_EQ(1, int(spheres.size())); //for (size_t i=0; i< spheres.size(); ++i) { // std::cout << "\nSphere #" << i << ": " << spheres[i] << std::endl; //} const auto tolerance = 2.0 * voxelSize; EXPECT_NEAR(center[0], spheres[0][0], tolerance); EXPECT_NEAR(center[1], spheres[0][1], tolerance); EXPECT_NEAR(center[2], spheres[0][2], tolerance); EXPECT_NEAR(radius, spheres[0][3], tolerance); } { // Verify that an isovalue outside the narrow band still produces a valid sphere. std::vector<openvdb::Vec4s> spheres; openvdb::tools::fillWithSpheres(*grid, spheres, sphereCount, overlapping, minRadius, maxRadius, 1.5f * halfWidth, instanceCount); EXPECT_EQ(1, int(spheres.size())); } { // Verify that an isovalue inside the narrow band produces no spheres. std::vector<openvdb::Vec4s> spheres; openvdb::tools::fillWithSpheres(*grid, spheres, sphereCount, overlapping, minRadius, maxRadius, -1.5f * halfWidth, instanceCount); EXPECT_EQ(0, int(spheres.size())); } } TEST_F(TestVolumeToSpheres, testFromFog) { const float radius = 20.0f, voxelSize = 1.0f, halfWidth = 3.0f; const openvdb::Vec3f center(15.0f, 13.0f, 16.0f); auto grid = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>( radius, center, voxelSize, halfWidth); openvdb::tools::sdfToFogVolume(*grid); const bool overlapping = false; const int instanceCount = 10000; const float isovalue = 0.01f, minRadius = 5.0f, maxRadius = std::numeric_limits<float>::max(); const openvdb::Vec2i sphereCount(1, 100); { std::vector<openvdb::Vec4s> spheres; openvdb::tools::fillWithSpheres(*grid, spheres, sphereCount, overlapping, minRadius, maxRadius, isovalue, instanceCount); //for (size_t i=0; i< spheres.size(); ++i) { // std::cout << "\nSphere #" << i << ": " << spheres[i] << std::endl; //} EXPECT_EQ(1, int(spheres.size())); const auto tolerance = 2.0 * voxelSize; EXPECT_NEAR(center[0], spheres[0][0], tolerance); EXPECT_NEAR(center[1], spheres[0][1], tolerance); EXPECT_NEAR(center[2], spheres[0][2], tolerance); EXPECT_NEAR(radius, spheres[0][3], tolerance); } { // Verify that an isovalue outside the narrow band still produces valid spheres. std::vector<openvdb::Vec4s> spheres; openvdb::tools::fillWithSpheres(*grid, spheres, sphereCount, overlapping, minRadius, maxRadius, 10.0f, instanceCount); EXPECT_TRUE(!spheres.empty()); } } TEST_F(TestVolumeToSpheres, testMinimumSphereCount) { using namespace openvdb; { auto grid = tools::createLevelSetSphere<FloatGrid>(/*radius=*/5.0f, /*center=*/Vec3f(15.0f, 13.0f, 16.0f), /*voxelSize=*/1.0f, /*halfWidth=*/3.0f); // Verify that the requested minimum number of spheres is generated, for various minima. const int maxSphereCount = 100; for (int minSphereCount = 1; minSphereCount < 20; minSphereCount += 5) { std::vector<Vec4s> spheres; tools::fillWithSpheres(*grid, spheres, Vec2i(minSphereCount, maxSphereCount), /*overlapping=*/true, /*minRadius=*/2.0f); // Given the relatively large minimum radius, the actual sphere count // should be no larger than the requested mimimum count. EXPECT_EQ(minSphereCount, int(spheres.size())); //EXPECT_TRUE(int(spheres.size()) >= minSphereCount); EXPECT_TRUE(int(spheres.size()) <= maxSphereCount); } } { // One step in the sphere packing algorithm is to erode the active voxel mask // of the input grid. Previously, for very small grids this sometimes resulted in // an empty mask and therefore no spheres. Verify that that no longer happens // (as long as the minimum sphere count is nonzero). FloatGrid grid; CoordBBox bbox(Coord(1), Coord(2)); grid.fill(bbox, 1.0f); const float minRadius = 1.0f; const Vec2i sphereCount(5, 100); std::vector<Vec4s> spheres; tools::fillWithSpheres(grid, spheres, sphereCount, /*overlapping=*/true, minRadius); EXPECT_TRUE(int(spheres.size()) >= sphereCount[0]); } } TEST_F(TestVolumeToSpheres, testClosestSurfacePoint) { using namespace openvdb; const float voxelSize = 1.0f; const Vec3f center{0.0f}; // ensure multiple internal nodes for (const float radius: { 8.0f, 50.0f }) { // Construct a spherical level set. const auto sphere = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize); EXPECT_TRUE(sphere); // Construct the corners of a cube that exactly encloses the sphere. const std::vector<Vec3R> corners{ { -radius, -radius, -radius }, { -radius, -radius, radius }, { -radius, radius, -radius }, { -radius, radius, radius }, { radius, -radius, -radius }, { radius, -radius, radius }, { radius, radius, -radius }, { radius, radius, radius }, }; // Compute the distance from a corner of the cube to the surface of the sphere. const auto distToSurface = Vec3d{radius}.length() - radius; auto csp = tools::ClosestSurfacePoint<FloatGrid>::create(*sphere); EXPECT_TRUE(csp); // Move each corner point to the closest surface point. auto points = corners; std::vector<float> distances; bool ok = csp->searchAndReplace(points, distances); EXPECT_TRUE(ok); EXPECT_EQ(8, int(points.size())); EXPECT_EQ(8, int(distances.size())); for (auto d: distances) { EXPECT_TRUE((std::abs(d - distToSurface) / distToSurface) < 0.01); // rel err < 1% } for (int i = 0; i < 8; ++i) { const auto intersection = corners[i] + distToSurface * (center - corners[i]).unit(); EXPECT_TRUE(points[i].eq(intersection, /*tolerance=*/0.1)); } // Place a point inside the sphere. points.clear(); distances.clear(); points.emplace_back(1, 0, 0); ok = csp->searchAndReplace(points, distances); EXPECT_TRUE(ok); EXPECT_EQ(1, int(points.size())); EXPECT_EQ(1, int(distances.size())); EXPECT_TRUE((std::abs(radius - 1 - distances[0]) / (radius - 1)) < 0.01); EXPECT_TRUE(points[0].eq(Vec3R{radius, 0, 0}, /*tolerance=*/0.5)); ///< @todo off by half a voxel in y and z } }
8,092
C++
34.809734
97
0.604424
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestGridDescriptor.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/io/GridDescriptor.h> #include <openvdb/openvdb.h> class TestGridDescriptor: public ::testing::Test { }; TEST_F(TestGridDescriptor, testIO) { using namespace openvdb::io; using namespace openvdb; typedef FloatGrid GridType; GridDescriptor gd(GridDescriptor::addSuffix("temperature", 2), GridType::gridType()); gd.setInstanceParentName("temperature_32bit"); gd.setGridPos(123); gd.setBlockPos(234); gd.setEndPos(567); // write out the gd. std::ostringstream ostr(std::ios_base::binary); gd.writeHeader(ostr); gd.writeStreamPos(ostr); // Read in the gd. std::istringstream istr(ostr.str(), std::ios_base::binary); // Since the input is only a fragment of a VDB file (in particular, // it doesn't have a header), set the file format version number explicitly. io::setCurrentVersion(istr); GridDescriptor gd2; EXPECT_THROW(gd2.read(istr), openvdb::LookupError); // Register the grid. GridBase::clearRegistry(); GridType::registerGrid(); // seek back and read again. istr.seekg(0, std::ios_base::beg); GridBase::Ptr grid; EXPECT_NO_THROW(grid = gd2.read(istr)); EXPECT_EQ(gd.gridName(), gd2.gridName()); EXPECT_EQ(gd.uniqueName(), gd2.uniqueName()); EXPECT_EQ(gd.gridType(), gd2.gridType()); EXPECT_EQ(gd.instanceParentName(), gd2.instanceParentName()); EXPECT_TRUE(grid.get() != NULL); EXPECT_EQ(GridType::gridType(), grid->type()); EXPECT_EQ(gd.getGridPos(), gd2.getGridPos()); EXPECT_EQ(gd.getBlockPos(), gd2.getBlockPos()); EXPECT_EQ(gd.getEndPos(), gd2.getEndPos()); // Clear the registry when we are done. GridBase::clearRegistry(); } TEST_F(TestGridDescriptor, testCopy) { using namespace openvdb::io; using namespace openvdb; typedef FloatGrid GridType; GridDescriptor gd("temperature", GridType::gridType()); gd.setInstanceParentName("temperature_32bit"); gd.setGridPos(123); gd.setBlockPos(234); gd.setEndPos(567); GridDescriptor gd2; // do the copy gd2 = gd; EXPECT_EQ(gd.gridName(), gd2.gridName()); EXPECT_EQ(gd.uniqueName(), gd2.uniqueName()); EXPECT_EQ(gd.gridType(), gd2.gridType()); EXPECT_EQ(gd.instanceParentName(), gd2.instanceParentName()); EXPECT_EQ(gd.getGridPos(), gd2.getGridPos()); EXPECT_EQ(gd.getBlockPos(), gd2.getBlockPos()); EXPECT_EQ(gd.getEndPos(), gd2.getEndPos()); } TEST_F(TestGridDescriptor, testName) { using openvdb::Name; using openvdb::io::GridDescriptor; const std::string typ = openvdb::FloatGrid::gridType(); Name name("test"); GridDescriptor gd(name, typ); // Verify that the grid name and the unique name are equivalent // when the unique name has no suffix. EXPECT_EQ(name, gd.gridName()); EXPECT_EQ(name, gd.uniqueName()); EXPECT_EQ(name, GridDescriptor::nameAsString(name)); EXPECT_EQ(name, GridDescriptor::stripSuffix(name)); // Add a suffix. name = GridDescriptor::addSuffix("test", 2); gd = GridDescriptor(name, typ); // Verify that the grid name and the unique name differ // when the unique name has a suffix. EXPECT_EQ(name, gd.uniqueName()); EXPECT_TRUE(gd.gridName() != gd.uniqueName()); EXPECT_EQ(GridDescriptor::stripSuffix(name), gd.gridName()); EXPECT_EQ(Name("test[2]"), GridDescriptor::nameAsString(name)); // As above, but with a longer suffix name = GridDescriptor::addSuffix("test", 13); gd = GridDescriptor(name, typ); EXPECT_EQ(name, gd.uniqueName()); EXPECT_TRUE(gd.gridName() != gd.uniqueName()); EXPECT_EQ(GridDescriptor::stripSuffix(name), gd.gridName()); EXPECT_EQ(Name("test[13]"), GridDescriptor::nameAsString(name)); // Multiple suffixes aren't supported, but verify that // they behave reasonably, at least. name = GridDescriptor::addSuffix(name, 4); gd = GridDescriptor(name, typ); EXPECT_EQ(name, gd.uniqueName()); EXPECT_TRUE(gd.gridName() != gd.uniqueName()); EXPECT_EQ(GridDescriptor::stripSuffix(name), gd.gridName()); }
4,251
C++
28.324138
89
0.673489
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMaps.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Exceptions.h> #include <openvdb/math/Maps.h> #include <openvdb/util/MapsUtil.h> #include "gtest/gtest.h" class TestMaps: public ::testing::Test { }; // Work-around for a macro expansion bug in debug mode that presents as an // undefined reference linking error. This happens in cases where template // instantiation of a type trait is prevented from occurring as the template // instantiation is deemed to be in an unreachable code block. // The work-around is to wrap the EXPECT_TRUE macro in another which holds // the expected value in a temporary. #define EXPECT_TRUE_TEMP(expected) \ { bool result = expected; EXPECT_TRUE(result); } TEST_F(TestMaps, testApproxInverse) { using namespace openvdb::math; Mat4d singular = Mat4d::identity(); singular[1][1] = 0.f; { Mat4d singularInv = approxInverse(singular); EXPECT_TRUE( singular == singularInv ); } { Mat4d rot = Mat4d::identity(); rot.setToRotation(X_AXIS, M_PI/4.); Mat4d rotInv = rot.inverse(); Mat4d mat = rotInv * singular * rot; Mat4d singularInv = approxInverse(mat); // this matrix is equal to its own singular inverse EXPECT_TRUE( mat.eq(singularInv) ); } { Mat4d m = Mat4d::identity(); m[0][1] = 1; // should give true inverse, since this matrix has det=1 Mat4d minv = approxInverse(m); Mat4d prod = m * minv; EXPECT_TRUE( prod.eq( Mat4d::identity() ) ); } { Mat4d m = Mat4d::identity(); m[0][1] = 1; m[1][1] = 0; // should give true inverse, since this matrix has det=1 Mat4d minv = approxInverse(m); Mat4d expected = Mat4d::zero(); expected[3][3] = 1; EXPECT_TRUE( minv.eq(expected ) ); } } TEST_F(TestMaps, testUniformScale) { using namespace openvdb::math; AffineMap map; EXPECT_TRUE(map.hasUniformScale()); // Apply uniform scale: should still have square voxels map.accumPreScale(Vec3d(2, 2, 2)); EXPECT_TRUE(map.hasUniformScale()); // Apply a rotation, should still have squaure voxels. map.accumPostRotation(X_AXIS, 2.5); EXPECT_TRUE(map.hasUniformScale()); // non uniform scaling will stretch the voxels map.accumPostScale(Vec3d(1, 3, 1) ); EXPECT_TRUE(!map.hasUniformScale()); } TEST_F(TestMaps, testTranslation) { using namespace openvdb::math; double TOL = 1e-7; TranslationMap::Ptr translation(new TranslationMap(Vec3d(1,1,1))); EXPECT_TRUE_TEMP(is_linear<TranslationMap>::value); TranslationMap another_translation(Vec3d(1,1,1)); EXPECT_TRUE(another_translation == *translation); TranslationMap::Ptr translate_by_two(new TranslationMap(Vec3d(2,2,2))); EXPECT_TRUE(*translate_by_two != *translation); EXPECT_NEAR(translate_by_two->determinant(), 1, TOL); EXPECT_TRUE(translate_by_two->hasUniformScale()); /// apply the map forward Vec3d unit(1,0,0); Vec3d result = translate_by_two->applyMap(unit); EXPECT_NEAR(result(0), 3, TOL); EXPECT_NEAR(result(1), 2, TOL); EXPECT_NEAR(result(2), 2, TOL); /// invert the map result = translate_by_two->applyInverseMap(result); EXPECT_NEAR(result(0), 1, TOL); EXPECT_NEAR(result(1), 0, TOL); EXPECT_NEAR(result(2), 0, TOL); /// Inverse Jacobian Transpose result = translate_by_two->applyIJT(result); EXPECT_NEAR(result(0), 1, TOL); EXPECT_NEAR(result(1), 0, TOL); EXPECT_NEAR(result(2), 0, TOL); /// Jacobian Transpose result = translate_by_two->applyJT(translate_by_two->applyIJT(unit)); EXPECT_NEAR(result(0), unit(0), TOL); EXPECT_NEAR(result(1), unit(1), TOL); EXPECT_NEAR(result(2), unit(2), TOL); MapBase::Ptr inverse = translation->inverseMap(); EXPECT_TRUE(inverse->type() == TranslationMap::mapType()); // apply the map forward and the inverse map back result = inverse->applyMap(translation->applyMap(unit)); EXPECT_NEAR(result(0), 1, TOL); EXPECT_NEAR(result(1), 0, TOL); EXPECT_NEAR(result(2), 0, TOL); } TEST_F(TestMaps, testScaleDefault) { using namespace openvdb::math; double TOL = 1e-7; // testing default constructor // should be the identity ScaleMap::Ptr scale(new ScaleMap()); Vec3d unit(1, 1, 1); Vec3d result = scale->applyMap(unit); EXPECT_NEAR(unit(0), result(0), TOL); EXPECT_NEAR(unit(1), result(1), TOL); EXPECT_NEAR(unit(2), result(2), TOL); result = scale->applyInverseMap(unit); EXPECT_NEAR(unit(0), result(0), TOL); EXPECT_NEAR(unit(1), result(1), TOL); EXPECT_NEAR(unit(2), result(2), TOL); MapBase::Ptr inverse = scale->inverseMap(); EXPECT_TRUE(inverse->type() == ScaleMap::mapType()); // apply the map forward and the inverse map back result = inverse->applyMap(scale->applyMap(unit)); EXPECT_NEAR(result(0), unit(0), TOL); EXPECT_NEAR(result(1), unit(1), TOL); EXPECT_NEAR(result(2), unit(2), TOL); } TEST_F(TestMaps, testRotation) { using namespace openvdb::math; double TOL = 1e-7; double pi = 4.*atan(1.); UnitaryMap::Ptr rotation(new UnitaryMap(Vec3d(1,0,0), pi/2)); EXPECT_TRUE_TEMP(is_linear<UnitaryMap>::value); UnitaryMap another_rotation(Vec3d(1,0,0), pi/2.); EXPECT_TRUE(another_rotation == *rotation); UnitaryMap::Ptr rotation_two(new UnitaryMap(Vec3d(1,0,0), pi/4.)); EXPECT_TRUE(*rotation_two != *rotation); EXPECT_NEAR(rotation->determinant(), 1, TOL); EXPECT_TRUE(rotation_two->hasUniformScale()); /// apply the map forward Vec3d unit(0,1,0); Vec3d result = rotation->applyMap(unit); EXPECT_NEAR(0, result(0), TOL); EXPECT_NEAR(0, result(1), TOL); EXPECT_NEAR(1, result(2), TOL); /// invert the map result = rotation->applyInverseMap(result); EXPECT_NEAR(0, result(0), TOL); EXPECT_NEAR(1, result(1), TOL); EXPECT_NEAR(0, result(2), TOL); /// Inverse Jacobian Transpose result = rotation_two->applyIJT(result); // rotate backwards EXPECT_NEAR(0, result(0), TOL); EXPECT_NEAR(sqrt(2.)/2, result(1), TOL); EXPECT_NEAR(sqrt(2.)/2, result(2), TOL); /// Jacobian Transpose result = rotation_two->applyJT(rotation_two->applyIJT(unit)); EXPECT_NEAR(result(0), unit(0), TOL); EXPECT_NEAR(result(1), unit(1), TOL); EXPECT_NEAR(result(2), unit(2), TOL); // Test inverse map MapBase::Ptr inverse = rotation->inverseMap(); EXPECT_TRUE(inverse->type() == UnitaryMap::mapType()); // apply the map forward and the inverse map back result = inverse->applyMap(rotation->applyMap(unit)); EXPECT_NEAR(result(0), unit(0), TOL); EXPECT_NEAR(result(1), unit(1), TOL); EXPECT_NEAR(result(2), unit(2), TOL); } TEST_F(TestMaps, testScaleTranslate) { using namespace openvdb::math; double TOL = 1e-7; EXPECT_TRUE_TEMP(is_linear<ScaleTranslateMap>::value); TranslationMap::Ptr translation(new TranslationMap(Vec3d(1,1,1))); ScaleMap::Ptr scale(new ScaleMap(Vec3d(1,2,3))); ScaleTranslateMap::Ptr scaleAndTranslate( new ScaleTranslateMap(*scale, *translation)); TranslationMap translate_by_two(Vec3d(2,2,2)); ScaleTranslateMap another_scaleAndTranslate(*scale, translate_by_two); EXPECT_TRUE(another_scaleAndTranslate != *scaleAndTranslate); EXPECT_TRUE(!scaleAndTranslate->hasUniformScale()); //EXPECT_NEAR(scaleAndTranslate->determinant(), 6, TOL); /// apply the map forward Vec3d unit(1,0,0); Vec3d result = scaleAndTranslate->applyMap(unit); EXPECT_NEAR(2, result(0), TOL); EXPECT_NEAR(1, result(1), TOL); EXPECT_NEAR(1, result(2), TOL); /// invert the map result = scaleAndTranslate->applyInverseMap(result); EXPECT_NEAR(1, result(0), TOL); EXPECT_NEAR(0, result(1), TOL); EXPECT_NEAR(0, result(2), TOL); /// Inverse Jacobian Transpose result = Vec3d(0,2,0); result = scaleAndTranslate->applyIJT(result ); EXPECT_NEAR(0, result(0), TOL); EXPECT_NEAR(1, result(1), TOL); EXPECT_NEAR(0, result(2), TOL); /// Jacobian Transpose result = scaleAndTranslate->applyJT(scaleAndTranslate->applyIJT(unit)); EXPECT_NEAR(result(0), unit(0), TOL); EXPECT_NEAR(result(1), unit(1), TOL); EXPECT_NEAR(result(2), unit(2), TOL); // Test inverse map MapBase::Ptr inverse = scaleAndTranslate->inverseMap(); EXPECT_TRUE(inverse->type() == ScaleTranslateMap::mapType()); // apply the map forward and the inverse map back result = inverse->applyMap(scaleAndTranslate->applyMap(unit)); EXPECT_NEAR(result(0), unit(0), TOL); EXPECT_NEAR(result(1), unit(1), TOL); EXPECT_NEAR(result(2), unit(2), TOL); } TEST_F(TestMaps, testUniformScaleTranslate) { using namespace openvdb::math; double TOL = 1e-7; EXPECT_TRUE_TEMP(is_linear<UniformScaleMap>::value); EXPECT_TRUE_TEMP(is_linear<UniformScaleTranslateMap>::value); TranslationMap::Ptr translation(new TranslationMap(Vec3d(1,1,1))); UniformScaleMap::Ptr scale(new UniformScaleMap(2)); UniformScaleTranslateMap::Ptr scaleAndTranslate( new UniformScaleTranslateMap(*scale, *translation)); TranslationMap translate_by_two(Vec3d(2,2,2)); UniformScaleTranslateMap another_scaleAndTranslate(*scale, translate_by_two); EXPECT_TRUE(another_scaleAndTranslate != *scaleAndTranslate); EXPECT_TRUE(scaleAndTranslate->hasUniformScale()); //EXPECT_NEAR(scaleAndTranslate->determinant(), 6, TOL); /// apply the map forward Vec3d unit(1,0,0); Vec3d result = scaleAndTranslate->applyMap(unit); EXPECT_NEAR(3, result(0), TOL); EXPECT_NEAR(1, result(1), TOL); EXPECT_NEAR(1, result(2), TOL); /// invert the map result = scaleAndTranslate->applyInverseMap(result); EXPECT_NEAR(1, result(0), TOL); EXPECT_NEAR(0, result(1), TOL); EXPECT_NEAR(0, result(2), TOL); /// Inverse Jacobian Transpose result = Vec3d(0,2,0); result = scaleAndTranslate->applyIJT(result ); EXPECT_NEAR(0, result(0), TOL); EXPECT_NEAR(1, result(1), TOL); EXPECT_NEAR(0, result(2), TOL); /// Jacobian Transpose result = scaleAndTranslate->applyJT(scaleAndTranslate->applyIJT(unit)); EXPECT_NEAR(result(0), unit(0), TOL); EXPECT_NEAR(result(1), unit(1), TOL); EXPECT_NEAR(result(2), unit(2), TOL); // Test inverse map MapBase::Ptr inverse = scaleAndTranslate->inverseMap(); EXPECT_TRUE(inverse->type() == UniformScaleTranslateMap::mapType()); // apply the map forward and the inverse map back result = inverse->applyMap(scaleAndTranslate->applyMap(unit)); EXPECT_NEAR(result(0), unit(0), TOL); EXPECT_NEAR(result(1), unit(1), TOL); EXPECT_NEAR(result(2), unit(2), TOL); } TEST_F(TestMaps, testDecomposition) { using namespace openvdb::math; //double TOL = 1e-7; EXPECT_TRUE_TEMP(is_linear<UnitaryMap>::value); EXPECT_TRUE_TEMP(is_linear<SymmetricMap>::value); EXPECT_TRUE_TEMP(is_linear<PolarDecomposedMap>::value); EXPECT_TRUE_TEMP(is_linear<FullyDecomposedMap>::value); Mat4d matrix(Mat4d::identity()); Vec3d input_translation(0,0,1); matrix.setTranslation(input_translation); matrix(0,0) = 1.8930039; matrix(1,0) = -0.120080537; matrix(2,0) = -0.497615212; matrix(0,1) = -0.120080537; matrix(1,1) = 2.643265436; matrix(2,1) = 0.6176957495; matrix(0,2) = -0.497615212; matrix(1,2) = 0.6176957495; matrix(2,2) = 1.4637305884; FullyDecomposedMap::Ptr decomp = createFullyDecomposedMap(matrix); /// the singular values const Vec3<double>& singular_values = decomp->firstMap().firstMap().secondMap().getScale(); /// expected values Vec3d expected_values(2, 3, 1); EXPECT_TRUE( isApproxEqual(singular_values, expected_values) ); const Vec3<double>& the_translation = decomp->secondMap().secondMap().getTranslation(); EXPECT_TRUE( isApproxEqual(the_translation, input_translation)); } TEST_F(TestMaps, testFrustum) { using namespace openvdb::math; openvdb::BBoxd bbox(Vec3d(0), Vec3d(100)); NonlinearFrustumMap frustum(bbox, 1./6., 5); /// frustum will have depth, far plane - near plane = 5 /// the frustum has width 1 in the front and 6 in the back Vec3d trans(2,2,2); NonlinearFrustumMap::Ptr map = openvdb::StaticPtrCast<NonlinearFrustumMap, MapBase>( frustum.preScale(Vec3d(10,10,10))->postTranslate(trans)); EXPECT_TRUE(!map->hasUniformScale()); Vec3d result; result = map->voxelSize(); EXPECT_TRUE( isApproxEqual(result.x(), 0.1)); EXPECT_TRUE( isApproxEqual(result.y(), 0.1)); EXPECT_TRUE( isApproxEqual(result.z(), 0.5, 0.0001)); //--------- Front face Vec3d corner(0,0,0); result = map->applyMap(corner); EXPECT_TRUE(isApproxEqual(result, Vec3d(-5, -5, 0) + trans)); corner = Vec3d(100,0,0); result = map->applyMap(corner); EXPECT_TRUE( isApproxEqual(result, Vec3d(5, -5, 0) + trans)); corner = Vec3d(0,100,0); result = map->applyMap(corner); EXPECT_TRUE( isApproxEqual(result, Vec3d(-5, 5, 0) + trans)); corner = Vec3d(100,100,0); result = map->applyMap(corner); EXPECT_TRUE( isApproxEqual(result, Vec3d(5, 5, 0) + trans)); //--------- Back face corner = Vec3d(0,0,100); result = map->applyMap(corner); EXPECT_TRUE( isApproxEqual(result, Vec3d(-30, -30, 50) + trans)); // 10*(5/2 + 1/2) = 30 corner = Vec3d(100,0,100); result = map->applyMap(corner); EXPECT_TRUE( isApproxEqual(result, Vec3d(30, -30, 50) + trans)); corner = Vec3d(0,100,100); result = map->applyMap(corner); EXPECT_TRUE( isApproxEqual(result, Vec3d(-30, 30, 50) + trans)); corner = Vec3d(100,100,100); result = map->applyMap(corner); EXPECT_TRUE( isApproxEqual(result, Vec3d(30, 30, 50) + trans)); // invert a single corner result = map->applyInverseMap(Vec3d(30,30,50) + trans); EXPECT_TRUE( isApproxEqual(result, Vec3d(100, 100, 100))); EXPECT_TRUE(map->hasSimpleAffine()); /// create a frustum from from camera type information // the location of the camera Vec3d position(100,10,1); // the direction the camera is pointing Vec3d direction(0,1,1); direction.normalize(); // the up-direction for the camera Vec3d up(10,3,-3); // distance from camera to near-plane measured in the direction 'direction' double z_near = 100.; // depth of frustum to far-plane to near-plane double depth = 500.; //aspect ratio of frustum: width/height double aspect = 2; // voxel count in frustum. the y_count = x_count / aspect Coord::ValueType x_count = 500; Coord::ValueType z_count = 5000; NonlinearFrustumMap frustumMap_from_camera( position, direction, up, aspect, z_near, depth, x_count, z_count); Vec3d center; // find the center of the near plane and make sure it is in the correct place center = Vec3d(0,0,0); center += frustumMap_from_camera.applyMap(Vec3d(0,0,0)); center += frustumMap_from_camera.applyMap(Vec3d(500,0,0)); center += frustumMap_from_camera.applyMap(Vec3d(0,250,0)); center += frustumMap_from_camera.applyMap(Vec3d(500,250,0)); center = center /4.; EXPECT_TRUE( isApproxEqual(center, position + z_near * direction)); // find the center of the far plane and make sure it is in the correct place center = Vec3d(0,0,0); center += frustumMap_from_camera.applyMap(Vec3d( 0, 0,5000)); center += frustumMap_from_camera.applyMap(Vec3d(500, 0,5000)); center += frustumMap_from_camera.applyMap(Vec3d( 0,250,5000)); center += frustumMap_from_camera.applyMap(Vec3d(500,250,5000)); center = center /4.; EXPECT_TRUE( isApproxEqual(center, position + (z_near+depth) * direction)); // check that the frustum has the correct heigh on the near plane Vec3d corner1 = frustumMap_from_camera.applyMap(Vec3d(0,0,0)); Vec3d corner2 = frustumMap_from_camera.applyMap(Vec3d(0,250,0)); Vec3d side = corner2-corner1; EXPECT_TRUE( isApproxEqual( side.length(), 2 * up.length())); // check that the frustum is correctly oriented w.r.t up side.normalize(); EXPECT_TRUE( isApproxEqual( side * (up.length()), up)); // check that the linear map inside the frustum is a simple affine map (i.e. has no shear) EXPECT_TRUE(frustumMap_from_camera.hasSimpleAffine()); } TEST_F(TestMaps, testCalcBoundingBox) { using namespace openvdb::math; openvdb::BBoxd world_bbox(Vec3d(0,0,0), Vec3d(1,1,1)); openvdb::BBoxd voxel_bbox; openvdb::BBoxd expected; { AffineMap affine; affine.accumPreScale(Vec3d(2,2,2)); openvdb::util::calculateBounds<AffineMap>(affine, world_bbox, voxel_bbox); expected = openvdb::BBoxd(Vec3d(0,0,0), Vec3d(0.5, 0.5, 0.5)); EXPECT_TRUE(isApproxEqual(voxel_bbox.min(), expected.min())); EXPECT_TRUE(isApproxEqual(voxel_bbox.max(), expected.max())); affine.accumPostTranslation(Vec3d(1,1,1)); openvdb::util::calculateBounds<AffineMap>(affine, world_bbox, voxel_bbox); expected = openvdb::BBoxd(Vec3d(-0.5,-0.5,-0.5), Vec3d(0, 0, 0)); EXPECT_TRUE(isApproxEqual(voxel_bbox.min(), expected.min())); EXPECT_TRUE(isApproxEqual(voxel_bbox.max(), expected.max())); } { AffineMap affine; affine.accumPreScale(Vec3d(2,2,2)); affine.accumPostTranslation(Vec3d(1,1,1)); // test a sphere: Vec3d center(0,0,0); double radius = 10; openvdb::util::calculateBounds<AffineMap>(affine, center, radius, voxel_bbox); expected = openvdb::BBoxd(Vec3d(-5.5,-5.5,-5.5), Vec3d(4.5, 4.5, 4.5)); EXPECT_TRUE(isApproxEqual(voxel_bbox.min(), expected.min())); EXPECT_TRUE(isApproxEqual(voxel_bbox.max(), expected.max())); } { AffineMap affine; affine.accumPreScale(Vec3d(2,2,2)); double pi = 4.*atan(1.); affine.accumPreRotation(X_AXIS, pi/4.); Vec3d center(0,0,0); double radius = 10; openvdb::util::calculateBounds<AffineMap>(affine, center, radius, voxel_bbox); expected = openvdb::BBoxd(Vec3d(-5,-5,-5), Vec3d(5, 5, 5)); EXPECT_TRUE(isApproxEqual(voxel_bbox.min(), expected.min())); EXPECT_TRUE(isApproxEqual(voxel_bbox.max(), expected.max())); } { AffineMap affine; affine.accumPreScale(Vec3d(2,1,1)); double pi = 4.*atan(1.); affine.accumPreRotation(X_AXIS, pi/4.); Vec3d center(0,0,0); double radius = 10; openvdb::util::calculateBounds<AffineMap>(affine, center, radius, voxel_bbox); expected = openvdb::BBoxd(Vec3d(-5,-10,-10), Vec3d(5, 10, 10)); EXPECT_TRUE(isApproxEqual(voxel_bbox.min(), expected.min())); EXPECT_TRUE(isApproxEqual(voxel_bbox.max(), expected.max())); } { AffineMap affine; affine.accumPreScale(Vec3d(2,1,1)); double pi = 4.*atan(1.); affine.accumPreRotation(X_AXIS, pi/4.); affine.accumPostTranslation(Vec3d(1,1,1)); Vec3d center(1,1,1); double radius = 10; openvdb::util::calculateBounds<AffineMap>(affine, center, radius, voxel_bbox); expected = openvdb::BBoxd(Vec3d(-5,-10,-10), Vec3d(5, 10, 10)); EXPECT_TRUE(isApproxEqual(voxel_bbox.min(), expected.min())); EXPECT_TRUE(isApproxEqual(voxel_bbox.max(), expected.max())); } { openvdb::BBoxd bbox(Vec3d(0), Vec3d(100)); NonlinearFrustumMap frustum(bbox, 2, 5); NonlinearFrustumMap::Ptr map = openvdb::StaticPtrCast<NonlinearFrustumMap, MapBase>( frustum.preScale(Vec3d(2,2,2))); Vec3d center(20,20,10); double radius(1); openvdb::util::calculateBounds<NonlinearFrustumMap>(*map, center, radius, voxel_bbox); } } TEST_F(TestMaps, testJacobians) { using namespace openvdb::math; const double TOL = 1e-7; { AffineMap affine; const int n = 10; const double dtheta = M_PI / n; const Vec3d test(1,2,3); const Vec3d origin(0,0,0); for (int i = 0; i < n; ++i) { double theta = i * dtheta; affine.accumPostRotation(X_AXIS, theta); Vec3d result = affine.applyJacobian(test); Vec3d expected = affine.applyMap(test) - affine.applyMap(origin); EXPECT_NEAR(result(0), expected(0), TOL); EXPECT_NEAR(result(1), expected(1), TOL); EXPECT_NEAR(result(2), expected(2), TOL); Vec3d tmp = affine.applyInverseJacobian(result); EXPECT_NEAR(tmp(0), test(0), TOL); EXPECT_NEAR(tmp(1), test(1), TOL); EXPECT_NEAR(tmp(2), test(2), TOL); } } { UniformScaleMap scale(3); const Vec3d test(1,2,3); const Vec3d origin(0,0,0); Vec3d result = scale.applyJacobian(test); Vec3d expected = scale.applyMap(test) - scale.applyMap(origin); EXPECT_NEAR(result(0), expected(0), TOL); EXPECT_NEAR(result(1), expected(1), TOL); EXPECT_NEAR(result(2), expected(2), TOL); Vec3d tmp = scale.applyInverseJacobian(result); EXPECT_NEAR(tmp(0), test(0), TOL); EXPECT_NEAR(tmp(1), test(1), TOL); EXPECT_NEAR(tmp(2), test(2), TOL); } { ScaleMap scale(Vec3d(1,2,3)); const Vec3d test(1,2,3); const Vec3d origin(0,0,0); Vec3d result = scale.applyJacobian(test); Vec3d expected = scale.applyMap(test) - scale.applyMap(origin); EXPECT_NEAR(result(0), expected(0), TOL); EXPECT_NEAR(result(1), expected(1), TOL); EXPECT_NEAR(result(2), expected(2), TOL); Vec3d tmp = scale.applyInverseJacobian(result); EXPECT_NEAR(tmp(0), test(0), TOL); EXPECT_NEAR(tmp(1), test(1), TOL); EXPECT_NEAR(tmp(2), test(2), TOL); } { TranslationMap map(Vec3d(1,2,3)); const Vec3d test(1,2,3); const Vec3d origin(0,0,0); Vec3d result = map.applyJacobian(test); Vec3d expected = map.applyMap(test) - map.applyMap(origin); EXPECT_NEAR(result(0), expected(0), TOL); EXPECT_NEAR(result(1), expected(1), TOL); EXPECT_NEAR(result(2), expected(2), TOL); Vec3d tmp = map.applyInverseJacobian(result); EXPECT_NEAR(tmp(0), test(0), TOL); EXPECT_NEAR(tmp(1), test(1), TOL); EXPECT_NEAR(tmp(2), test(2), TOL); } { ScaleTranslateMap map(Vec3d(1,2,3), Vec3d(3,5,4)); const Vec3d test(1,2,3); const Vec3d origin(0,0,0); Vec3d result = map.applyJacobian(test); Vec3d expected = map.applyMap(test) - map.applyMap(origin); EXPECT_NEAR(result(0), expected(0), TOL); EXPECT_NEAR(result(1), expected(1), TOL); EXPECT_NEAR(result(2), expected(2), TOL); Vec3d tmp = map.applyInverseJacobian(result); EXPECT_NEAR(tmp(0), test(0), TOL); EXPECT_NEAR(tmp(1), test(1), TOL); EXPECT_NEAR(tmp(2), test(2), TOL); } { openvdb::BBoxd bbox(Vec3d(0), Vec3d(100)); NonlinearFrustumMap frustum(bbox, 1./6., 5); /// frustum will have depth, far plane - near plane = 5 /// the frustum has width 1 in the front and 6 in the back Vec3d trans(2,2,2); NonlinearFrustumMap::Ptr map = openvdb::StaticPtrCast<NonlinearFrustumMap, MapBase>( frustum.preScale(Vec3d(10,10,10))->postTranslate(trans)); const Vec3d test(1,2,3); const Vec3d origin(0, 0, 0); // these two drop down to just the linear part Vec3d lresult = map->applyJacobian(test); Vec3d ltmp = map->applyInverseJacobian(lresult); EXPECT_NEAR(ltmp(0), test(0), TOL); EXPECT_NEAR(ltmp(1), test(1), TOL); EXPECT_NEAR(ltmp(2), test(2), TOL); Vec3d isloc(4,5,6); // these two drop down to just the linear part Vec3d result = map->applyJacobian(test, isloc); Vec3d tmp = map->applyInverseJacobian(result, isloc); EXPECT_NEAR(tmp(0), test(0), TOL); EXPECT_NEAR(tmp(1), test(1), TOL); EXPECT_NEAR(tmp(2), test(2), TOL); } }
24,510
C++
30.79118
95
0.631171
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLeafMask.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <set> #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/Types.h> #include <openvdb/tools/Filter.h> #include <openvdb/tree/LeafNode.h> #include <openvdb/util/logging.h> #include "util.h" // for unittest_util::makeSphere() class TestLeafMask: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; typedef openvdb::tree::LeafNode<openvdb::ValueMask, 3> LeafType; //////////////////////////////////////// TEST_F(TestLeafMask, testGetValue) { { LeafType leaf1(openvdb::Coord(0, 0, 0)); openvdb::tree::LeafNode<bool, 3> leaf2(openvdb::Coord(0, 0, 0)); EXPECT_TRUE( leaf1.memUsage() < leaf2.memUsage() ); //std::cerr << "\nLeafNode<ActiveState, 3> uses " << leaf1.memUsage() << " bytes" << std::endl; //std::cerr << "LeafNode<bool, 3> uses " << leaf2.memUsage() << " bytes" << std::endl; } { LeafType leaf(openvdb::Coord(0, 0, 0), false); for (openvdb::Index n = 0; n < leaf.numValues(); ++n) { EXPECT_EQ(false, leaf.getValue(leaf.offsetToLocalCoord(n))); } } { LeafType leaf(openvdb::Coord(0, 0, 0), true); for (openvdb::Index n = 0; n < leaf.numValues(); ++n) { EXPECT_EQ(true, leaf.getValue(leaf.offsetToLocalCoord(n))); } } {// test Buffer::data() LeafType leaf(openvdb::Coord(0, 0, 0), false); leaf.fill(true); LeafType::Buffer::WordType* w = leaf.buffer().data(); for (openvdb::Index n = 0; n < LeafType::Buffer::WORD_COUNT; ++n) { EXPECT_EQ(~LeafType::Buffer::WordType(0), w[n]); } } {// test const Buffer::data() LeafType leaf(openvdb::Coord(0, 0, 0), false); leaf.fill(true); const LeafType& cleaf = leaf; const LeafType::Buffer::WordType* w = cleaf.buffer().data(); for (openvdb::Index n = 0; n < LeafType::Buffer::WORD_COUNT; ++n) { EXPECT_EQ(~LeafType::Buffer::WordType(0), w[n]); } } } TEST_F(TestLeafMask, testSetValue) { LeafType leaf(openvdb::Coord(0, 0, 0), false); openvdb::Coord xyz(0, 0, 0); EXPECT_TRUE(!leaf.isValueOn(xyz)); leaf.setValueOn(xyz); EXPECT_TRUE(leaf.isValueOn(xyz)); xyz.reset(7, 7, 7); EXPECT_TRUE(!leaf.isValueOn(xyz)); leaf.setValueOn(xyz); EXPECT_TRUE(leaf.isValueOn(xyz)); leaf.setValueOn(xyz, true); EXPECT_TRUE(leaf.isValueOn(xyz)); leaf.setValueOn(xyz, false); // value and state are the same! EXPECT_TRUE(!leaf.isValueOn(xyz)); leaf.setValueOff(xyz); EXPECT_TRUE(!leaf.isValueOn(xyz)); xyz.reset(2, 3, 6); leaf.setValueOn(xyz); EXPECT_TRUE(leaf.isValueOn(xyz)); leaf.setValueOff(xyz); EXPECT_TRUE(!leaf.isValueOn(xyz)); } TEST_F(TestLeafMask, testProbeValue) { LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.setValueOn(openvdb::Coord(1, 6, 5)); bool val; EXPECT_TRUE(leaf.probeValue(openvdb::Coord(1, 6, 5), val)); EXPECT_TRUE(!leaf.probeValue(openvdb::Coord(1, 6, 4), val)); } TEST_F(TestLeafMask, testIterators) { LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.setValueOn(openvdb::Coord(1, 2, 3)); leaf.setValueOn(openvdb::Coord(5, 2, 3)); openvdb::Coord sum; for (LeafType::ValueOnIter iter = leaf.beginValueOn(); iter; ++iter) { sum += iter.getCoord(); } EXPECT_EQ(openvdb::Coord(1 + 5, 2 + 2, 3 + 3), sum); openvdb::Index count = 0; for (LeafType::ValueOffIter iter = leaf.beginValueOff(); iter; ++iter, ++count); EXPECT_EQ(leaf.numValues() - 2, count); count = 0; for (LeafType::ValueAllIter iter = leaf.beginValueAll(); iter; ++iter, ++count); EXPECT_EQ(leaf.numValues(), count); count = 0; for (LeafType::ChildOnIter iter = leaf.beginChildOn(); iter; ++iter, ++count); EXPECT_EQ(openvdb::Index(0), count); count = 0; for (LeafType::ChildOffIter iter = leaf.beginChildOff(); iter; ++iter, ++count); EXPECT_EQ(openvdb::Index(0), count); count = 0; for (LeafType::ChildAllIter iter = leaf.beginChildAll(); iter; ++iter, ++count); EXPECT_EQ(leaf.numValues(), count); } TEST_F(TestLeafMask, testIteratorGetCoord) { using namespace openvdb; LeafType leaf(openvdb::Coord(8, 8, 0)); EXPECT_EQ(Coord(8, 8, 0), leaf.origin()); leaf.setValueOn(Coord(1, 2, 3), -3); leaf.setValueOn(Coord(5, 2, 3), 4); LeafType::ValueOnIter iter = leaf.beginValueOn(); Coord xyz = iter.getCoord(); EXPECT_EQ(Coord(9, 10, 3), xyz); ++iter; xyz = iter.getCoord(); EXPECT_EQ(Coord(13, 10, 3), xyz); } TEST_F(TestLeafMask, testEquivalence) { using openvdb::CoordBBox; using openvdb::Coord; { LeafType leaf(Coord(0, 0, 0), false); // false and inactive LeafType leaf2(Coord(0, 0, 0), true); // true and inactive EXPECT_TRUE(leaf != leaf2); leaf.fill(CoordBBox(Coord(0), Coord(LeafType::DIM - 1)), true, false); EXPECT_TRUE(leaf == leaf2); // true and inactive leaf.setValuesOn(); // true and active leaf2.fill(CoordBBox(Coord(0), Coord(LeafType::DIM - 1)), false); // false and active EXPECT_TRUE(leaf != leaf2); leaf.negate(); // false and active EXPECT_TRUE(leaf == leaf2); // Set some values. leaf.setValueOn(Coord(0, 0, 0), true); leaf.setValueOn(Coord(0, 1, 0), true); leaf.setValueOn(Coord(1, 1, 0), true); leaf.setValueOn(Coord(1, 1, 2), true); leaf2.setValueOn(Coord(0, 0, 0), true); leaf2.setValueOn(Coord(0, 1, 0), true); leaf2.setValueOn(Coord(1, 1, 0), true); leaf2.setValueOn(Coord(1, 1, 2), true); EXPECT_TRUE(leaf == leaf2); leaf2.setValueOn(Coord(0, 0, 1), true); EXPECT_TRUE(leaf != leaf2); leaf2.setValueOff(Coord(0, 0, 1), false); EXPECT_TRUE(leaf == leaf2);//values and states coinside leaf2.setValueOn(Coord(0, 0, 1)); EXPECT_TRUE(leaf != leaf2);//values and states coinside } {// test LeafNode<bool>::operator==() LeafType leaf1(Coord(0 , 0, 0), true); // true and inactive LeafType leaf2(Coord(1 , 0, 0), true); // true and inactive LeafType leaf3(Coord(LeafType::DIM, 0, 0), true); // true and inactive LeafType leaf4(Coord(0 , 0, 0), true, true);//true and active EXPECT_TRUE(leaf1 == leaf2); EXPECT_TRUE(leaf1 != leaf3); EXPECT_TRUE(leaf2 != leaf3); EXPECT_TRUE(leaf1 == leaf4); EXPECT_TRUE(leaf2 == leaf4); EXPECT_TRUE(leaf3 != leaf4); } } TEST_F(TestLeafMask, testGetOrigin) { { LeafType leaf(openvdb::Coord(1, 0, 0), 1); EXPECT_EQ(openvdb::Coord(0, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(0, 0, 0), 1); EXPECT_EQ(openvdb::Coord(0, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(8, 0, 0), 1); EXPECT_EQ(openvdb::Coord(8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(8, 1, 0), 1); EXPECT_EQ(openvdb::Coord(8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(1024, 1, 3), 1); EXPECT_EQ(openvdb::Coord(128*8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(1023, 1, 3), 1); EXPECT_EQ(openvdb::Coord(127*8, 0, 0), leaf.origin()); } { LeafType leaf(openvdb::Coord(512, 512, 512), 1); EXPECT_EQ(openvdb::Coord(512, 512, 512), leaf.origin()); } { LeafType leaf(openvdb::Coord(2, 52, 515), 1); EXPECT_EQ(openvdb::Coord(0, 48, 512), leaf.origin()); } } TEST_F(TestLeafMask, testNegativeIndexing) { using namespace openvdb; LeafType leaf(openvdb::Coord(-9, -2, -8)); EXPECT_EQ(Coord(-16, -8, -8), leaf.origin()); leaf.setValueOn(Coord(1, 2, 3)); leaf.setValueOn(Coord(5, 2, 3)); EXPECT_TRUE(leaf.isValueOn(Coord(1, 2, 3))); EXPECT_TRUE(leaf.isValueOn(Coord(5, 2, 3))); LeafType::ValueOnIter iter = leaf.beginValueOn(); Coord xyz = iter.getCoord(); EXPECT_EQ(Coord(-15, -6, -5), xyz); ++iter; xyz = iter.getCoord(); EXPECT_EQ(Coord(-11, -6, -5), xyz); } TEST_F(TestLeafMask, testIO) { LeafType leaf(openvdb::Coord(1, 3, 5)); const openvdb::Coord origin = leaf.origin(); leaf.setValueOn(openvdb::Coord(0, 1, 0)); leaf.setValueOn(openvdb::Coord(1, 0, 0)); std::ostringstream ostr(std::ios_base::binary); leaf.writeBuffers(ostr); leaf.setValueOff(openvdb::Coord(0, 1, 0)); leaf.setValueOn(openvdb::Coord(0, 1, 1)); std::istringstream istr(ostr.str(), std::ios_base::binary); // Since the input stream doesn't include a VDB header with file format version info, // tag the input stream explicitly with the current version number. openvdb::io::setCurrentVersion(istr); leaf.readBuffers(istr); EXPECT_EQ(origin, leaf.origin()); EXPECT_TRUE(leaf.isValueOn(openvdb::Coord(0, 1, 0))); EXPECT_TRUE(leaf.isValueOn(openvdb::Coord(1, 0, 0))); EXPECT_TRUE(leaf.onVoxelCount() == 2); } TEST_F(TestLeafMask, testTopologyCopy) { using openvdb::Coord; // LeafNode<float, Log2Dim> having the same Log2Dim as LeafType typedef LeafType::ValueConverter<float>::Type FloatLeafType; FloatLeafType fleaf(Coord(10, 20, 30), -1.0); std::set<Coord> coords; for (openvdb::Index n = 0; n < fleaf.numValues(); n += 10) { Coord xyz = fleaf.offsetToGlobalCoord(n); fleaf.setValueOn(xyz, float(n)); coords.insert(xyz); } LeafType leaf(fleaf, openvdb::TopologyCopy()); EXPECT_EQ(fleaf.onVoxelCount(), leaf.onVoxelCount()); EXPECT_TRUE(leaf.hasSameTopology(&fleaf)); for (LeafType::ValueOnIter iter = leaf.beginValueOn(); iter; ++iter) { coords.erase(iter.getCoord()); } EXPECT_TRUE(coords.empty()); } TEST_F(TestLeafMask, testMerge) { LeafType leaf(openvdb::Coord(0, 0, 0)); for (openvdb::Index n = 0; n < leaf.numValues(); n += 10) { leaf.setValueOn(n); } EXPECT_TRUE(!leaf.isValueMaskOn()); EXPECT_TRUE(!leaf.isValueMaskOff()); bool val = false, active = false; EXPECT_TRUE(!leaf.isConstant(val, active)); LeafType leaf2(leaf); leaf2.getValueMask().toggle(); EXPECT_TRUE(!leaf2.isValueMaskOn()); EXPECT_TRUE(!leaf2.isValueMaskOff()); val = active = false; EXPECT_TRUE(!leaf2.isConstant(val, active)); leaf.merge<openvdb::MERGE_ACTIVE_STATES>(leaf2); EXPECT_TRUE(leaf.isValueMaskOn()); EXPECT_TRUE(!leaf.isValueMaskOff()); val = active = false; EXPECT_TRUE(leaf.isConstant(val, active)); EXPECT_TRUE(active); } TEST_F(TestLeafMask, testCombine) { struct Local { static void op(openvdb::CombineArgs<bool>& args) { args.setResult(args.aIsActive() ^ args.bIsActive());// state = value } }; LeafType leaf(openvdb::Coord(0, 0, 0)); for (openvdb::Index n = 0; n < leaf.numValues(); n += 10) leaf.setValueOn(n); EXPECT_TRUE(!leaf.isValueMaskOn()); EXPECT_TRUE(!leaf.isValueMaskOff()); const LeafType::NodeMaskType savedMask = leaf.getValueMask(); OPENVDB_LOG_DEBUG_RUNTIME(leaf.str()); LeafType leaf2(leaf); for (openvdb::Index n = 0; n < leaf.numValues(); n += 4) leaf2.setValueOn(n); EXPECT_TRUE(!leaf2.isValueMaskOn()); EXPECT_TRUE(!leaf2.isValueMaskOff()); OPENVDB_LOG_DEBUG_RUNTIME(leaf2.str()); leaf.combine(leaf2, Local::op); OPENVDB_LOG_DEBUG_RUNTIME(leaf.str()); EXPECT_TRUE(leaf.getValueMask() == (savedMask ^ leaf2.getValueMask())); } TEST_F(TestLeafMask, testTopologyTree) { using namespace openvdb; #if 0 FloatGrid::Ptr inGrid; FloatTree::Ptr inTree; { //io::File vdbFile("/work/rd/fx_tools/vdb_unittest/TestGridCombine::testCsg/large1.vdb2"); io::File vdbFile("/hosts/whitestar/usr/pic1/VDB/bunny_0256.vdb2"); vdbFile.open(); inGrid = gridPtrCast<FloatGrid>(vdbFile.readGrid("LevelSet")); EXPECT_TRUE(inGrid.get() != NULL); inTree = inGrid->treePtr(); EXPECT_TRUE(inTree.get() != NULL); } #else FloatGrid::Ptr inGrid = FloatGrid::create(); EXPECT_TRUE(inGrid.get() != NULL); FloatTree& inTree = inGrid->tree(); inGrid->setName("LevelSet"); unittest_util::makeSphere<FloatGrid>(Coord(128),//dim Vec3f(0, 0, 0),//center 5,//radius *inGrid, unittest_util::SPHERE_DENSE); #endif const Index64 floatTreeMem = inTree.memUsage(), floatTreeLeafCount = inTree.leafCount(), floatTreeVoxelCount = inTree.activeVoxelCount(); TreeBase::Ptr outTree(new TopologyTree(inTree, false, true, TopologyCopy())); EXPECT_TRUE(outTree.get() != NULL); TopologyGrid::Ptr outGrid = TopologyGrid::create(*inGrid); // copy transform and metadata outGrid->setTree(outTree); outGrid->setName("Boolean"); const Index64 boolTreeMem = outTree->memUsage(), boolTreeLeafCount = outTree->leafCount(), boolTreeVoxelCount = outTree->activeVoxelCount(); #if 0 GridPtrVec grids; grids.push_back(inGrid); grids.push_back(outGrid); io::File vdbFile("bool_tree.vdb2"); vdbFile.write(grids); vdbFile.close(); #endif EXPECT_EQ(floatTreeLeafCount, boolTreeLeafCount); EXPECT_EQ(floatTreeVoxelCount, boolTreeVoxelCount); //std::cerr << "\nboolTree mem=" << boolTreeMem << " bytes" << std::endl; //std::cerr << "floatTree mem=" << floatTreeMem << " bytes" << std::endl; // Considering only voxel buffer memory usage, the BoolTree would be expected // to use (2 mask bits/voxel / ((32 value bits + 1 mask bit)/voxel)) = ~1/16 // as much memory as the FloatTree. Considering total memory usage, verify that // the BoolTree is no more than 1/10 the size of the FloatTree. EXPECT_TRUE(boolTreeMem * 10 <= floatTreeMem); } TEST_F(TestLeafMask, testMedian) { using namespace openvdb; LeafType leaf(openvdb::Coord(0, 0, 0), /*background=*/false); bool state = false; EXPECT_EQ(Index(0), leaf.medianOn(state)); EXPECT_TRUE(state == true); EXPECT_EQ(leaf.numValues(), leaf.medianOff(state)); EXPECT_TRUE(state == false); EXPECT_TRUE(!leaf.medianAll()); leaf.setValue(Coord(0,0,0), true); EXPECT_EQ(Index(1), leaf.medianOn(state)); EXPECT_TRUE(state == true); EXPECT_EQ(leaf.numValues()-1, leaf.medianOff(state)); EXPECT_TRUE(state == false); EXPECT_TRUE(!leaf.medianAll()); leaf.setValue(Coord(0,0,1), true); EXPECT_EQ(Index(2), leaf.medianOn(state)); EXPECT_TRUE(state == true); EXPECT_EQ(leaf.numValues()-2, leaf.medianOff(state)); EXPECT_TRUE(state == false); EXPECT_TRUE(!leaf.medianAll()); leaf.setValue(Coord(5,0,1), true); EXPECT_EQ(Index(3), leaf.medianOn(state)); EXPECT_TRUE(state == true); EXPECT_EQ(leaf.numValues()-3, leaf.medianOff(state)); EXPECT_TRUE(state == false); EXPECT_TRUE(!leaf.medianAll()); leaf.fill(false, false); EXPECT_EQ(Index(0), leaf.medianOn(state)); EXPECT_TRUE(state == true); EXPECT_EQ(leaf.numValues(), leaf.medianOff(state)); EXPECT_TRUE(state == false); EXPECT_TRUE(!leaf.medianAll()); for (Index i=0; i<leaf.numValues()/2; ++i) { leaf.setValueOn(i, true); EXPECT_TRUE(!leaf.medianAll()); EXPECT_EQ(Index(i+1), leaf.medianOn(state)); EXPECT_TRUE(state == true); EXPECT_EQ(leaf.numValues()-i-1, leaf.medianOff(state)); EXPECT_TRUE(state == false); } for (Index i=leaf.numValues()/2; i<leaf.numValues(); ++i) { leaf.setValueOn(i, true); EXPECT_TRUE(leaf.medianAll()); EXPECT_EQ(Index(i+1), leaf.medianOn(state)); EXPECT_TRUE(state == true); EXPECT_EQ(leaf.numValues()-i-1, leaf.medianOff(state)); EXPECT_TRUE(state == false); } } // void // TestLeafMask::testFilter() // { // using namespace openvdb; // BoolGrid::Ptr grid = BoolGrid::create(); // EXPECT_TRUE(grid.get() != NULL); // BoolTree::Ptr tree = grid->treePtr(); // EXPECT_TRUE(tree.get() != NULL); // grid->setName("filtered"); // unittest_util::makeSphere<BoolGrid>(Coord(32),// dim // Vec3f(0, 0, 0),// center // 10,// radius // *grid, unittest_util::SPHERE_DENSE); // BoolTree::Ptr copyOfTree(new BoolTree(*tree)); // BoolGrid::Ptr copyOfGrid = BoolGrid::create(copyOfTree); // copyOfGrid->setName("original"); // tools::Filter<BoolGrid> filter(*grid); // filter.offset(1); // #if 0 // GridPtrVec grids; // grids.push_back(copyOfGrid); // grids.push_back(grid); // io::File vdbFile("TestLeafMask::testFilter.vdb2"); // vdbFile.write(grids); // vdbFile.close(); // #endif // // Verify that offsetting all active voxels by 1 (true) has no effect, // // since the active voxels were all true to begin with. // EXPECT_TRUE(tree->hasSameTopology(*copyOfTree)); // }
17,401
C++
29.8
103
0.604333
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestUtil.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <tbb/task_scheduler_init.h> #include <tbb/enumerable_thread_specific.h> #include <tbb/parallel_for.h> #include <tbb/blocked_range.h> #include <openvdb/Exceptions.h> #include <openvdb/util/CpuTimer.h> #include <openvdb/util/PagedArray.h> #include <openvdb/util/Formats.h> #include <chrono> #include <iostream> //#define BENCHMARK_PAGED_ARRAY // For benchmark comparisons #ifdef BENCHMARK_PAGED_ARRAY #include <deque> // for std::deque #include <vector> // for std::vector #include <tbb/tbb.h> // for tbb::concurrent_vector #endif class TestUtil: public ::testing::Test { public: using RangeT = tbb::blocked_range<size_t>; // Multi-threading ArrayT::ValueBuffer::push_back template<typename ArrayT> struct BufferPushBack { BufferPushBack(ArrayT& array) : mBuffer(array) {} void parallel(size_t size) { tbb::parallel_for(RangeT(size_t(0), size, 256*mBuffer.pageSize()), *this); } void serial(size_t size) { (*this)(RangeT(size_t(0), size)); } void operator()(const RangeT& r) const { for (size_t i=r.begin(), n=r.end(); i!=n; ++i) mBuffer.push_back(i); } mutable typename ArrayT::ValueBuffer mBuffer;//local instance }; // Thread Local Storage version of BufferPushBack template<typename ArrayT> struct TLS_BufferPushBack { using PoolT = tbb::enumerable_thread_specific<typename ArrayT::ValueBuffer>; TLS_BufferPushBack(ArrayT &array) : mArray(&array), mPool(nullptr) {} void parallel(size_t size) { typename ArrayT::ValueBuffer exemplar(*mArray);//dummy used for initialization mPool = new PoolT(exemplar);//thread local storage pool of ValueBuffers tbb::parallel_for(RangeT(size_t(0), size, 256*mArray->pageSize()), *this); for (auto i=mPool->begin(); i!=mPool->end(); ++i) i->flush(); delete mPool; } void operator()(const RangeT& r) const { typename PoolT::reference buffer = mPool->local(); for (size_t i=r.begin(), n=r.end(); i!=n; ++i) buffer.push_back(i); } ArrayT *mArray; PoolT *mPool; }; }; TEST_F(TestUtil, testFormats) { {// TODO: add unit tests for printBytes } {// TODO: add a unit tests for printNumber } {// test long format printTime const int width = 4, precision = 1, verbose = 1; const int days = 1; const int hours = 3; const int minutes = 59; const int seconds = 12; const double milliseconds = 347.6; const double mseconds = milliseconds + (seconds + (minutes + (hours + days*24)*60)*60)*1000.0; std::ostringstream ostr1, ostr2; EXPECT_EQ(4, openvdb::util::printTime(ostr2, mseconds, "Completed in ", "", width, precision, verbose )); ostr1 << std::setprecision(precision) << std::setiosflags(std::ios::fixed); ostr1 << "Completed in " << days << " day, " << hours << " hours, " << minutes << " minutes, " << seconds << " seconds and " << std::setw(width) << milliseconds << " milliseconds (" << mseconds << "ms)"; //std::cerr << ostr2.str() << std::endl; EXPECT_EQ(ostr1.str(), ostr2.str()); } {// test compact format printTime const int width = 4, precision = 1, verbose = 0; const int days = 1; const int hours = 3; const int minutes = 59; const int seconds = 12; const double milliseconds = 347.6; const double mseconds = milliseconds + (seconds + (minutes + (hours + days*24)*60)*60)*1000.0; std::ostringstream ostr1, ostr2; EXPECT_EQ(4, openvdb::util::printTime(ostr2, mseconds, "Completed in ", "", width, precision, verbose )); ostr1 << std::setprecision(precision) << std::setiosflags(std::ios::fixed); ostr1 << "Completed in " << days << "d " << hours << "h " << minutes << "m " << std::setw(width) << (seconds + milliseconds/1000.0) << "s"; //std::cerr << ostr2.str() << std::endl; EXPECT_EQ(ostr1.str(), ostr2.str()); } } TEST_F(TestUtil, testCpuTimer) { // std::this_thread::sleep_for() only guarantees that the time slept is no less // than the requested time, which can be inaccurate, particularly on Windows, // so use this more accurate, but non-asynchronous implementation for unit testing auto sleep_for = [&](int ms) -> void { auto start = std::chrono::steady_clock::now(); while (true) { auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::steady_clock::now() - start); if (duration.count() > ms) return; } }; const int expected = 159, tolerance = 20;//milliseconds { openvdb::util::CpuTimer timer; sleep_for(expected); const int actual1 = static_cast<int>(timer.milliseconds()); EXPECT_NEAR(expected, actual1, tolerance); sleep_for(expected); const int actual2 = static_cast<int>(timer.milliseconds()); EXPECT_NEAR(2*expected, actual2, tolerance); } { openvdb::util::CpuTimer timer; sleep_for(expected); auto t1 = timer.restart(); sleep_for(expected); sleep_for(expected); auto t2 = timer.restart(); EXPECT_NEAR(2*t1, t2, tolerance); } } TEST_F(TestUtil, testPagedArray) { #ifdef BENCHMARK_PAGED_ARRAY const size_t problemSize = 2560000; openvdb::util::CpuTimer timer; std::cerr << "\nProblem size for benchmark: " << problemSize << std::endl; #else const size_t problemSize = 256000; #endif {//serial PagedArray::push_back (check return value) openvdb::util::PagedArray<int> d; EXPECT_TRUE(d.isEmpty()); EXPECT_EQ(size_t(0), d.size()); EXPECT_EQ(size_t(10), d.log2PageSize()); EXPECT_EQ(size_t(1)<<d.log2PageSize(), d.pageSize()); EXPECT_EQ(size_t(0), d.pageCount()); EXPECT_EQ(size_t(0), d.capacity()); EXPECT_EQ(size_t(0), d.push_back_unsafe(10)); EXPECT_EQ(10, d[0]); EXPECT_TRUE(!d.isEmpty()); EXPECT_EQ(size_t(1), d.size()); EXPECT_EQ(size_t(1), d.pageCount()); EXPECT_EQ(d.pageSize(), d.capacity()); EXPECT_EQ(size_t(1), d.push_back_unsafe(1)); EXPECT_EQ(size_t(2), d.size()); EXPECT_EQ(size_t(1), d.pageCount()); EXPECT_EQ(d.pageSize(), d.capacity()); for (size_t i=2; i<d.pageSize(); ++i) EXPECT_EQ(i, d.push_back_unsafe(int(i))); EXPECT_EQ(d.pageSize(), d.size()); EXPECT_EQ(size_t(1), d.pageCount()); EXPECT_EQ(d.pageSize(), d.capacity()); for (int i=2, n=int(d.size()); i<n; ++i) EXPECT_EQ(i, d[i]); EXPECT_EQ(d.pageSize(), d.push_back_unsafe(1)); EXPECT_EQ(d.pageSize()+1, d.size()); EXPECT_EQ(size_t(2), d.pageCount()); EXPECT_EQ(2*d.pageSize(), d.capacity()); } {//serial PagedArray::push_back_unsafe #ifdef BENCHMARK_PAGED_ARRAY timer.start("2: Serial PagedArray::push_back_unsafe with default page size"); #endif openvdb::util::PagedArray<size_t> d; for (size_t i=0; i<problemSize; ++i) d.push_back_unsafe(i); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif EXPECT_EQ(problemSize, d.size()); for (size_t i=0; i<problemSize; ++i) EXPECT_EQ(i, d[i]); } #ifdef BENCHMARK_PAGED_ARRAY {//benchmark against a std::vector timer.start("5: Serial std::vector::push_back"); std::vector<size_t> v; for (size_t i=0; i<problemSize; ++i) v.push_back(i); timer.stop(); EXPECT_EQ(problemSize, v.size()); for (size_t i=0; i<problemSize; ++i) EXPECT_EQ(i, v[i]); } {//benchmark against a std::deque timer.start("6: Serial std::deque::push_back"); std::deque<size_t> d; for (size_t i=0; i<problemSize; ++i) d.push_back(i); timer.stop(); EXPECT_EQ(problemSize, d.size()); for (size_t i=0; i<problemSize; ++i) EXPECT_EQ(i, d[i]); EXPECT_EQ(problemSize, d.size()); std::deque<int> d2; EXPECT_EQ(size_t(0), d2.size()); d2.resize(1234); EXPECT_EQ(size_t(1234), d2.size()); } {//benchmark against a tbb::concurrent_vector::push_back timer.start("7: Serial tbb::concurrent_vector::push_back"); tbb::concurrent_vector<size_t> v; for (size_t i=0; i<problemSize; ++i) v.push_back(i); timer.stop(); EXPECT_EQ(problemSize, v.size()); for (size_t i=0; i<problemSize; ++i) EXPECT_EQ(i, v[i]); v.clear(); timer.start("8: Parallel tbb::concurrent_vector::push_back"); using ArrayT = openvdb::util::PagedArray<size_t>; tbb::parallel_for(tbb::blocked_range<size_t>(0, problemSize, ArrayT::pageSize()), [&v](const tbb::blocked_range<size_t> &range){ for (size_t i=range.begin(); i!=range.end(); ++i) v.push_back(i);}); timer.stop(); tbb::parallel_sort(v.begin(), v.end()); for (size_t i=0; i<problemSize; ++i) EXPECT_EQ(i, v[i]); } #endif {//serial PagedArray::ValueBuffer::push_back using ArrayT = openvdb::util::PagedArray<size_t, 3UL>; ArrayT d; EXPECT_EQ(size_t(0), d.size()); d.resize(problemSize); EXPECT_EQ(problemSize, d.size()); EXPECT_EQ(size_t(1)<<d.log2PageSize(), d.pageSize()); // pageCount - 1 = max index >> log2PageSize EXPECT_EQ((problemSize-1)>>d.log2PageSize(), d.pageCount()-1); EXPECT_EQ(d.pageCount()*d.pageSize(), d.capacity()); d.clear(); EXPECT_EQ(size_t(0), d.size()); #ifdef BENCHMARK_PAGED_ARRAY timer.start("9: Serial PagedArray::ValueBuffer::push_back"); #endif BufferPushBack<ArrayT> tmp(d); tmp.serial(problemSize); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif EXPECT_EQ(problemSize, d.size()); for (size_t i=0; i<problemSize; ++i) EXPECT_EQ(i, d[i]); size_t unsorted = 0; for (size_t i=0, n=d.size(); i<n; ++i) unsorted += i != d[i]; EXPECT_EQ(size_t(0), unsorted); #ifdef BENCHMARK_PAGED_ARRAY timer.start("parallel sort"); #endif d.sort(); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif for (size_t i=0, n=d.size(); i<n; ++i) EXPECT_EQ(i, d[i]); EXPECT_EQ(problemSize, d.size()); EXPECT_EQ(size_t(1)<<d.log2PageSize(), d.pageSize()); EXPECT_EQ((d.size()-1)>>d.log2PageSize(), d.pageCount()-1); EXPECT_EQ(d.pageCount()*d.pageSize(), d.capacity()); } {//parallel PagedArray::ValueBuffer::push_back using ArrayT = openvdb::util::PagedArray<size_t>; ArrayT d; #ifdef BENCHMARK_PAGED_ARRAY timer.start("10: Parallel PagedArray::ValueBuffer::push_back"); #endif BufferPushBack<ArrayT> tmp(d); tmp.parallel(problemSize); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif EXPECT_EQ(problemSize, d.size()); EXPECT_EQ(size_t(1)<<d.log2PageSize(), d.pageSize()); EXPECT_EQ((d.size()-1)>>d.log2PageSize(), d.pageCount()-1); EXPECT_EQ(d.pageCount()*d.pageSize(), d.capacity()); // Test sorting #ifdef BENCHMARK_PAGED_ARRAY timer.start("parallel sort"); #endif d.sort(); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif for (size_t i=0; i<d.size(); ++i) EXPECT_EQ(i, d[i]); #ifdef BENCHMARK_PAGED_ARRAY timer.start("parallel inverse sort"); #endif d.invSort(); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif for (size_t i=0, n=d.size()-1; i<=n; ++i) EXPECT_EQ(n-i, d[i]); EXPECT_EQ(problemSize, d.push_back_unsafe(1)); EXPECT_EQ(problemSize+1, d.size()); EXPECT_EQ(size_t(1)<<d.log2PageSize(), d.pageSize()); // pageCount - 1 = max index >> log2PageSize EXPECT_EQ(size_t(1)+(problemSize>>d.log2PageSize()), d.pageCount()); EXPECT_EQ(d.pageCount()*d.pageSize(), d.capacity()); // test PagedArray::fill const size_t v = 13; d.fill(v); for (size_t i=0, n=d.capacity(); i<n; ++i) EXPECT_EQ(v, d[i]); } {//test PagedArray::ValueBuffer::flush using ArrayT = openvdb::util::PagedArray<size_t>; ArrayT d; EXPECT_EQ(size_t(0), d.size()); { //ArrayT::ValueBuffer vc(d); auto vc = d.getBuffer(); vc.push_back(1); vc.push_back(2); EXPECT_EQ(size_t(0), d.size()); vc.flush(); EXPECT_EQ(size_t(2), d.size()); EXPECT_EQ(size_t(1), d[0]); EXPECT_EQ(size_t(2), d[1]); } EXPECT_EQ(size_t(2), d.size()); EXPECT_EQ(size_t(1), d[0]); EXPECT_EQ(size_t(2), d[1]); } {//thread-local-storage PagedArray::ValueBuffer::push_back followed by parallel sort using ArrayT = openvdb::util::PagedArray<size_t>; ArrayT d; #ifdef BENCHMARK_PAGED_ARRAY timer.start("11: Parallel TLS PagedArray::ValueBuffer::push_back"); #endif {// for some reason this: TLS_BufferPushBack<ArrayT> tmp(d); tmp.parallel(problemSize); }// is faster than: //ArrayT::ValueBuffer exemplar(d);//dummy used for initialization ///tbb::enumerable_thread_specific<ArrayT::ValueBuffer> pool(exemplar);//thread local storage pool of ValueBuffers //tbb::parallel_for(tbb::blocked_range<size_t>(0, problemSize, d.pageSize()), // [&pool](const tbb::blocked_range<size_t> &range){ // ArrayT::ValueBuffer &buffer = pool.local(); // for (size_t i=range.begin(); i!=range.end(); ++i) buffer.push_back(i);}); //for (auto i=pool.begin(); i!=pool.end(); ++i) i->flush(); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif //std::cerr << "Number of threads for TLS = " << (buffer.end()-buffer.begin()) << std::endl; //d.print(); EXPECT_EQ(problemSize, d.size()); EXPECT_EQ(size_t(1)<<d.log2PageSize(), d.pageSize()); EXPECT_EQ((d.size()-1)>>d.log2PageSize(), d.pageCount()-1); EXPECT_EQ(d.pageCount()*d.pageSize(), d.capacity()); // Not guaranteed to pass //size_t unsorted = 0; //for (size_t i=0, n=d.size(); i<n; ++i) unsorted += i != d[i]; //EXPECT_TRUE( unsorted > 0 ); #ifdef BENCHMARK_PAGED_ARRAY timer.start("parallel sort"); #endif d.sort(); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif for (size_t i=0, n=d.size(); i<n; ++i) EXPECT_EQ(i, d[i]); } {//parallel PagedArray::merge followed by parallel sort using ArrayT = openvdb::util::PagedArray<size_t>; ArrayT d, d2; tbb::parallel_for(tbb::blocked_range<size_t>(0, problemSize, d.pageSize()), [&d](const tbb::blocked_range<size_t> &range){ ArrayT::ValueBuffer buffer(d); for (size_t i=range.begin(); i!=range.end(); ++i) buffer.push_back(i);}); EXPECT_EQ(problemSize, d.size()); EXPECT_EQ(size_t(1)<<d.log2PageSize(), d.pageSize()); EXPECT_EQ((d.size()-1)>>d.log2PageSize(), d.pageCount()-1); EXPECT_EQ(d.pageCount()*d.pageSize(), d.capacity()); EXPECT_TRUE(!d.isPartiallyFull()); d.push_back_unsafe(problemSize); EXPECT_TRUE(d.isPartiallyFull()); tbb::parallel_for(tbb::blocked_range<size_t>(problemSize+1, 2*problemSize+1, d2.pageSize()), [&d2](const tbb::blocked_range<size_t> &range){ ArrayT::ValueBuffer buffer(d2); for (size_t i=range.begin(); i!=range.end(); ++i) buffer.push_back(i);}); //for (size_t i=d.size(), n=i+problemSize; i<n; ++i) d2.push_back(i); EXPECT_TRUE(!d2.isPartiallyFull()); EXPECT_EQ(problemSize, d2.size()); EXPECT_EQ(size_t(1)<<d2.log2PageSize(), d2.pageSize()); EXPECT_EQ((d2.size()-1)>>d2.log2PageSize(), d2.pageCount()-1); EXPECT_EQ(d2.pageCount()*d2.pageSize(), d2.capacity()); //d.print(); //d2.print(); #ifdef BENCHMARK_PAGED_ARRAY timer.start("parallel PagedArray::merge"); #endif d.merge(d2); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif EXPECT_TRUE(d.isPartiallyFull()); //d.print(); //d2.print(); EXPECT_EQ(2*problemSize+1, d.size()); EXPECT_EQ((d.size()-1)>>d.log2PageSize(), d.pageCount()-1); EXPECT_EQ(size_t(0), d2.size()); EXPECT_EQ(size_t(0), d2.pageCount()); #ifdef BENCHMARK_PAGED_ARRAY timer.start("parallel sort of merged array"); #endif d.sort(); #ifdef BENCHMARK_PAGED_ARRAY timer.stop(); #endif for (size_t i=0, n=d.size(); i<n; ++i) EXPECT_EQ(i, d[i]); } {//examples in doxygen {// 1 openvdb::util::PagedArray<int> array; for (int i=0; i<100000; ++i) array.push_back_unsafe(i); for (int i=0; i<100000; ++i) EXPECT_EQ(i, array[i]); } {//2A openvdb::util::PagedArray<int> array; openvdb::util::PagedArray<int>::ValueBuffer buffer(array); for (int i=0; i<100000; ++i) buffer.push_back(i); buffer.flush(); for (int i=0; i<100000; ++i) EXPECT_EQ(i, array[i]); } {//2B openvdb::util::PagedArray<int> array; {//local scope of a single thread openvdb::util::PagedArray<int>::ValueBuffer buffer(array); for (int i=0; i<100000; ++i) buffer.push_back(i); } for (int i=0; i<100000; ++i) EXPECT_EQ(i, array[i]); } {//3A openvdb::util::PagedArray<int> array; array.resize(100000); for (int i=0; i<100000; ++i) array[i] = i; for (int i=0; i<100000; ++i) EXPECT_EQ(i, array[i]); } {//3B using ArrayT = openvdb::util::PagedArray<int>; ArrayT array; array.resize(100000); for (ArrayT::Iterator i=array.begin(); i!=array.end(); ++i) *i = int(i.pos()); for (int i=0; i<100000; ++i) EXPECT_EQ(i, array[i]); } } }
18,437
C++
36.705521
122
0.568856
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestTransform.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/math/Transform.h> #include <sstream> class TestTransform: public ::testing::Test { public: void SetUp() override; void TearDown() override; }; //////////////////////////////////////// void TestTransform::SetUp() { openvdb::math::MapRegistry::clear(); openvdb::math::AffineMap::registerMap(); openvdb::math::ScaleMap::registerMap(); openvdb::math::UniformScaleMap::registerMap(); openvdb::math::TranslationMap::registerMap(); openvdb::math::ScaleTranslateMap::registerMap(); openvdb::math::UniformScaleTranslateMap::registerMap(); } void TestTransform::TearDown() { openvdb::math::MapRegistry::clear(); } ////openvdb:://////////////////////////////////// TEST_F(TestTransform, testLinearTransform) { using namespace openvdb; double TOL = 1e-7; // Test: Scaling math::Transform::Ptr t = math::Transform::createLinearTransform(0.5); Vec3R voxelSize = t->voxelSize(); EXPECT_NEAR(0.5, voxelSize[0], TOL); EXPECT_NEAR(0.5, voxelSize[1], TOL); EXPECT_NEAR(0.5, voxelSize[2], TOL); EXPECT_TRUE(t->hasUniformScale()); // world to index space Vec3R xyz(-1.0, 2.0, 4.0); xyz = t->worldToIndex(xyz); EXPECT_NEAR(-2.0, xyz[0], TOL); EXPECT_NEAR( 4.0, xyz[1], TOL); EXPECT_NEAR( 8.0, xyz[2], TOL); xyz = Vec3R(-0.7, 2.4, 4.7); // cell centered conversion Coord ijk = t->worldToIndexCellCentered(xyz); EXPECT_EQ(Coord(-1, 5, 9), ijk); // node centrered conversion ijk = t->worldToIndexNodeCentered(xyz); EXPECT_EQ(Coord(-2, 4, 9), ijk); // index to world space ijk = Coord(4, 2, -8); xyz = t->indexToWorld(ijk); EXPECT_NEAR( 2.0, xyz[0], TOL); EXPECT_NEAR( 1.0, xyz[1], TOL); EXPECT_NEAR(-4.0, xyz[2], TOL); // I/O test { std::stringstream ss(std::stringstream::in | std::stringstream::out | std::stringstream::binary); t->write(ss); t = math::Transform::createLinearTransform(); // Since we wrote only a fragment of a VDB file (in particular, we didn't // write the header), set the file format version number explicitly. io::setCurrentVersion(ss); t->read(ss); } // check map type EXPECT_EQ(math::UniformScaleMap::mapType(), t->baseMap()->type()); voxelSize = t->voxelSize(); EXPECT_NEAR(0.5, voxelSize[0], TOL); EXPECT_NEAR(0.5, voxelSize[1], TOL); EXPECT_NEAR(0.5, voxelSize[2], TOL); ////////// // Test: Scale, translation & rotation t = math::Transform::createLinearTransform(2.0); // rotate, 180 deg, (produces a diagonal matrix that can be simplified into a scale map) // with diagonal -2, 2, -2 const double PI = std::atan(1.0)*4; t->preRotate(PI, math::Y_AXIS); // this is just a rotation so it will have uniform scale EXPECT_TRUE(t->hasUniformScale()); EXPECT_EQ(math::ScaleMap::mapType(), t->baseMap()->type()); voxelSize = t->voxelSize(); xyz = t->worldToIndex(Vec3R(-2.0, -2.0, -2.0)); EXPECT_NEAR(2.0, voxelSize[0], TOL); EXPECT_NEAR(2.0, voxelSize[1], TOL); EXPECT_NEAR(2.0, voxelSize[2], TOL); EXPECT_NEAR( 1.0, xyz[0], TOL); EXPECT_NEAR(-1.0, xyz[1], TOL); EXPECT_NEAR( 1.0, xyz[2], TOL); // translate t->postTranslate(Vec3d(1.0, 0.0, 1.0)); EXPECT_EQ(math::ScaleTranslateMap::mapType(), t->baseMap()->type()); voxelSize = t->voxelSize(); xyz = t->worldToIndex(Vec3R(-2.0, -2.0, -2.0)); EXPECT_NEAR(2.0, voxelSize[0], TOL); EXPECT_NEAR(2.0, voxelSize[1], TOL); EXPECT_NEAR(2.0, voxelSize[2], TOL); EXPECT_NEAR( 1.5, xyz[0], TOL); EXPECT_NEAR(-1.0, xyz[1], TOL); EXPECT_NEAR( 1.5, xyz[2], TOL); // I/O test { std::stringstream ss(std::stringstream::in | std::stringstream::out | std::stringstream::binary); t->write(ss); t = math::Transform::createLinearTransform(); // Since we wrote only a fragment of a VDB file (in particular, we didn't // write the header), set the file format version number explicitly. io::setCurrentVersion(ss); t->read(ss); } // check map type EXPECT_EQ(math::ScaleTranslateMap::mapType(), t->baseMap()->type()); voxelSize = t->voxelSize(); EXPECT_NEAR(2.0, voxelSize[0], TOL); EXPECT_NEAR(2.0, voxelSize[1], TOL); EXPECT_NEAR(2.0, voxelSize[2], TOL); xyz = t->worldToIndex(Vec3R(-2.0, -2.0, -2.0)); EXPECT_NEAR( 1.5, xyz[0], TOL); EXPECT_NEAR(-1.0, xyz[1], TOL); EXPECT_NEAR( 1.5, xyz[2], TOL); // new transform t = math::Transform::createLinearTransform(1.0); // rotate 90 deg t->preRotate( std::atan(1.0) * 2 , math::Y_AXIS); // check map type EXPECT_EQ(math::AffineMap::mapType(), t->baseMap()->type()); xyz = t->worldToIndex(Vec3R(1.0, 1.0, 1.0)); EXPECT_NEAR(-1.0, xyz[0], TOL); EXPECT_NEAR( 1.0, xyz[1], TOL); EXPECT_NEAR( 1.0, xyz[2], TOL); // I/O test { std::stringstream ss(std::stringstream::in | std::stringstream::out | std::stringstream::binary); t->write(ss); t = math::Transform::createLinearTransform(); EXPECT_EQ(math::UniformScaleMap::mapType(), t->baseMap()->type()); xyz = t->worldToIndex(Vec3R(1.0, 1.0, 1.0)); EXPECT_NEAR(1.0, xyz[0], TOL); EXPECT_NEAR(1.0, xyz[1], TOL); EXPECT_NEAR(1.0, xyz[2], TOL); // Since we wrote only a fragment of a VDB file (in particular, we didn't // write the header), set the file format version number explicitly. io::setCurrentVersion(ss); t->read(ss); } // check map type EXPECT_EQ(math::AffineMap::mapType(), t->baseMap()->type()); xyz = t->worldToIndex(Vec3R(1.0, 1.0, 1.0)); EXPECT_NEAR(-1.0, xyz[0], TOL); EXPECT_NEAR( 1.0, xyz[1], TOL); EXPECT_NEAR( 1.0, xyz[2], TOL); } //////////////////////////////////////// TEST_F(TestTransform, testTransformEquality) { using namespace openvdb; // maps created in different ways may be equivalent math::Transform::Ptr t1 = math::Transform::createLinearTransform(0.5); math::Mat4d mat = math::Mat4d::identity(); mat.preScale(math::Vec3d(0.5, 0.5, 0.5)); math::Transform::Ptr t2 = math::Transform::createLinearTransform(mat); EXPECT_TRUE( *t1 == *t2); // test that the auto-convert to the simplest form worked EXPECT_TRUE( t1->mapType() == t2->mapType()); mat.preScale(math::Vec3d(1., 1., .4)); math::Transform::Ptr t3 = math::Transform::createLinearTransform(mat); EXPECT_TRUE( *t1 != *t3); // test equality between different but equivalent maps math::UniformScaleTranslateMap::Ptr ustmap( new math::UniformScaleTranslateMap(1.0, math::Vec3d(0,0,0))); math::Transform::Ptr t4( new math::Transform( ustmap) ); EXPECT_TRUE( t4->baseMap()->isType<math::UniformScaleMap>() ); math::Transform::Ptr t5( new math::Transform); // constructs with a scale map EXPECT_TRUE( t5->baseMap()->isType<math::ScaleMap>() ); EXPECT_TRUE( *t5 == *t4); EXPECT_TRUE( t5->mapType() != t4->mapType() ); // test inequatlity of two maps of the same type math::UniformScaleTranslateMap::Ptr ustmap2( new math::UniformScaleTranslateMap(1.0, math::Vec3d(1,0,0))); math::Transform::Ptr t6( new math::Transform( ustmap2) ); EXPECT_TRUE( t6->baseMap()->isType<math::UniformScaleTranslateMap>() ); EXPECT_TRUE( *t6 != *t4); // test comparison of linear to nonlinear map openvdb::BBoxd bbox(math::Vec3d(0), math::Vec3d(100)); math::Transform::Ptr frustum = math::Transform::createFrustumTransform(bbox, 0.25, 10); EXPECT_TRUE( *frustum != *t1 ); } //////////////////////////////////////// TEST_F(TestTransform, testBackwardCompatibility) { using namespace openvdb; double TOL = 1e-7; // Register maps math::MapRegistry::clear(); math::AffineMap::registerMap(); math::ScaleMap::registerMap(); math::TranslationMap::registerMap(); math::ScaleTranslateMap::registerMap(); std::stringstream ss(std::stringstream::in | std::stringstream::out | std::stringstream::binary); ////////// // Construct and write out an old transform that gets converted // into a ScaleMap on read. // First write the old transform type name writeString(ss, Name("LinearTransform")); // Second write the old transform's base class membes. Coord tmpMin(0), tmpMax(1); ss.write(reinterpret_cast<char*>(&tmpMin), sizeof(Coord::ValueType) * 3); ss.write(reinterpret_cast<char*>(&tmpMax), sizeof(Coord::ValueType) * 3); // Last write out the old linear transform's members math::Mat4d tmpLocalToWorld = math::Mat4d::identity(), tmpWorldToLocal = math::Mat4d::identity(), tmpVoxelToLocal = math::Mat4d::identity(), tmpLocalToVoxel = math::Mat4d::identity(); tmpVoxelToLocal.preScale(math::Vec3d(0.5, 0.5, 0.5)); tmpLocalToWorld.write(ss); tmpWorldToLocal.write(ss); tmpVoxelToLocal.write(ss); tmpLocalToVoxel.write(ss); // Read in the old transform and converting it to the new map based implementation. math::Transform::Ptr t = math::Transform::createLinearTransform(1.0); t->read(ss); // check map type EXPECT_EQ(math::UniformScaleMap::mapType(), t->baseMap()->type()); Vec3d voxelSize = t->voxelSize(); EXPECT_NEAR(0.5, voxelSize[0], TOL); EXPECT_NEAR(0.5, voxelSize[1], TOL); EXPECT_NEAR(0.5, voxelSize[2], TOL); Vec3d xyz = t->worldToIndex(Vec3d(-1.0, 2.0, 4.0)); EXPECT_NEAR(-2.0, xyz[0], TOL); EXPECT_NEAR( 4.0, xyz[1], TOL); EXPECT_NEAR( 8.0, xyz[2], TOL); ////////// // Construct and write out an old transform that gets converted // into a ScaleTranslateMap on read. ss.clear(); writeString(ss, Name("LinearTransform")); ss.write(reinterpret_cast<char*>(&tmpMin), sizeof(Coord::ValueType) * 3); ss.write(reinterpret_cast<char*>(&tmpMax), sizeof(Coord::ValueType) * 3); tmpLocalToWorld = math::Mat4d::identity(), tmpWorldToLocal = math::Mat4d::identity(), tmpVoxelToLocal = math::Mat4d::identity(), tmpLocalToVoxel = math::Mat4d::identity(); tmpVoxelToLocal.preScale(math::Vec3d(2.0, 2.0, 2.0)); tmpLocalToWorld.setTranslation(math::Vec3d(1.0, 0.0, 1.0)); tmpLocalToWorld.write(ss); tmpWorldToLocal.write(ss); tmpVoxelToLocal.write(ss); tmpLocalToVoxel.write(ss); // Read in the old transform and converting it to the new map based implementation. t = math::Transform::createLinearTransform(); // rest transform t->read(ss); EXPECT_EQ(math::UniformScaleTranslateMap::mapType(), t->baseMap()->type()); voxelSize = t->voxelSize(); EXPECT_NEAR(2.0, voxelSize[0], TOL); EXPECT_NEAR(2.0, voxelSize[1], TOL); EXPECT_NEAR(2.0, voxelSize[2], TOL); xyz = t->worldToIndex(Vec3d(1.0, 1.0, 1.0)); EXPECT_NEAR(0.0, xyz[0], TOL); EXPECT_NEAR(0.5, xyz[1], TOL); EXPECT_NEAR(0.0, xyz[2], TOL); ////////// // Construct and write out an old transform that gets converted // into a AffineMap on read. ss.clear(); writeString(ss, Name("LinearTransform")); ss.write(reinterpret_cast<char*>(&tmpMin), sizeof(Coord::ValueType) * 3); ss.write(reinterpret_cast<char*>(&tmpMax), sizeof(Coord::ValueType) * 3); tmpLocalToWorld = math::Mat4d::identity(), tmpWorldToLocal = math::Mat4d::identity(), tmpVoxelToLocal = math::Mat4d::identity(), tmpLocalToVoxel = math::Mat4d::identity(); tmpVoxelToLocal.preScale(math::Vec3d(1.0, 1.0, 1.0)); tmpLocalToWorld.preRotate( math::Y_AXIS, std::atan(1.0) * 2); tmpLocalToWorld.write(ss); tmpWorldToLocal.write(ss); tmpVoxelToLocal.write(ss); tmpLocalToVoxel.write(ss); // Read in the old transform and converting it to the new map based implementation. t = math::Transform::createLinearTransform(); // rest transform t->read(ss); EXPECT_EQ(math::AffineMap::mapType(), t->baseMap()->type()); voxelSize = t->voxelSize(); EXPECT_NEAR(1.0, voxelSize[0], TOL); EXPECT_NEAR(1.0, voxelSize[1], TOL); EXPECT_NEAR(1.0, voxelSize[2], TOL); xyz = t->worldToIndex(Vec3d(1.0, 1.0, 1.0)); EXPECT_NEAR(-1.0, xyz[0], TOL); EXPECT_NEAR( 1.0, xyz[1], TOL); EXPECT_NEAR( 1.0, xyz[2], TOL); } TEST_F(TestTransform, testIsIdentity) { using namespace openvdb; math::Transform::Ptr t = math::Transform::createLinearTransform(1.0); EXPECT_TRUE(t->isIdentity()); t->preScale(Vec3d(2,2,2)); EXPECT_TRUE(!t->isIdentity()); t->preScale(Vec3d(0.5,0.5,0.5)); EXPECT_TRUE(t->isIdentity()); BBoxd bbox(math::Vec3d(-5,-5,0), Vec3d(5,5,10)); math::Transform::Ptr f = math::Transform::createFrustumTransform(bbox, /*taper*/ 1, /*depth*/ 1, /*voxel size*/ 1); f->preScale(Vec3d(10,10,10)); EXPECT_TRUE(f->isIdentity()); // rotate by PI/2 f->postRotate(std::atan(1.0)*2, math::Y_AXIS); EXPECT_TRUE(!f->isIdentity()); f->postRotate(std::atan(1.0)*6, math::Y_AXIS); EXPECT_TRUE(f->isIdentity()); } TEST_F(TestTransform, testBoundingBoxes) { using namespace openvdb; { math::Transform::ConstPtr t = math::Transform::createLinearTransform(0.5); const BBoxd bbox(Vec3d(-8.0), Vec3d(16.0)); BBoxd xBBox = t->indexToWorld(bbox); EXPECT_EQ(Vec3d(-4.0), xBBox.min()); EXPECT_EQ(Vec3d(8.0), xBBox.max()); xBBox = t->worldToIndex(xBBox); EXPECT_EQ(bbox.min(), xBBox.min()); EXPECT_EQ(bbox.max(), xBBox.max()); } { const double PI = std::atan(1.0) * 4.0, SQRT2 = std::sqrt(2.0); math::Transform::Ptr t = math::Transform::createLinearTransform(1.0); t->preRotate(PI / 4.0, math::Z_AXIS); const BBoxd bbox(Vec3d(-10.0), Vec3d(10.0)); BBoxd xBBox = t->indexToWorld(bbox); // expand in x and y by sqrt(2) EXPECT_TRUE(Vec3d(-10.0 * SQRT2, -10.0 * SQRT2, -10.0).eq(xBBox.min())); EXPECT_TRUE(Vec3d(10.0 * SQRT2, 10.0 * SQRT2, 10.0).eq(xBBox.max())); xBBox = t->worldToIndex(xBBox); // expand again in x and y by sqrt(2) EXPECT_TRUE(Vec3d(-20.0, -20.0, -10.0).eq(xBBox.min())); EXPECT_TRUE(Vec3d(20.0, 20.0, 10.0).eq(xBBox.max())); } /// @todo frustum transform } //////////////////////////////////////// /// @todo Test the new frustum transform. /* TEST_F(TestTransform, testNonlinearTransform) { using namespace openvdb; double TOL = 1e-7; } */
14,983
C++
28.151751
92
0.602283
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMat4Metadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Metadata.h> class TestMat4Metadata : public ::testing::Test { }; TEST_F(TestMat4Metadata, testMat4s) { using namespace openvdb; Metadata::Ptr m(new Mat4SMetadata(openvdb::math::Mat4s(1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f))); Metadata::Ptr m3 = m->copy(); EXPECT_TRUE(dynamic_cast<Mat4SMetadata*>( m.get()) != 0); EXPECT_TRUE(dynamic_cast<Mat4SMetadata*>(m3.get()) != 0); EXPECT_TRUE( m->typeName().compare("mat4s") == 0); EXPECT_TRUE(m3->typeName().compare("mat4s") == 0); Mat4SMetadata *s = dynamic_cast<Mat4SMetadata*>(m.get()); EXPECT_TRUE(s->value() == openvdb::math::Mat4s(1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f)); s->value() = openvdb::math::Mat4s(3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f); EXPECT_TRUE(s->value() == openvdb::math::Mat4s(3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f)); m3->copy(*s); s = dynamic_cast<Mat4SMetadata*>(m3.get()); EXPECT_TRUE(s->value() == openvdb::math::Mat4s(3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f)); } TEST_F(TestMat4Metadata, testMat4d) { using namespace openvdb; Metadata::Ptr m(new Mat4DMetadata(openvdb::math::Mat4d(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0))); Metadata::Ptr m3 = m->copy(); EXPECT_TRUE(dynamic_cast<Mat4DMetadata*>( m.get()) != 0); EXPECT_TRUE(dynamic_cast<Mat4DMetadata*>(m3.get()) != 0); EXPECT_TRUE( m->typeName().compare("mat4d") == 0); EXPECT_TRUE(m3->typeName().compare("mat4d") == 0); Mat4DMetadata *s = dynamic_cast<Mat4DMetadata*>(m.get()); EXPECT_TRUE(s->value() == openvdb::math::Mat4d(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)); s->value() = openvdb::math::Mat4d(3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0); EXPECT_TRUE(s->value() == openvdb::math::Mat4d(3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0)); m3->copy(*s); s = dynamic_cast<Mat4DMetadata*>(m3.get()); EXPECT_TRUE(s->value() == openvdb::math::Mat4d(3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0)); }
4,125
C++
45.35955
85
0.358303
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointDataLeaf.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/PointDataGrid.h> #include <openvdb/openvdb.h> #include <openvdb/io/io.h> #include <cmath> #include <ios> #include <limits> #include <memory> #include <sstream> #include <vector> using namespace openvdb; using namespace openvdb::points; class TestPointDataLeaf: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestPointDataLeaf using LeafType = PointDataTree::LeafNodeType; using ValueType = LeafType::ValueType; using BufferType = LeafType::Buffer; namespace { bool matchingNamePairs(const openvdb::NamePair& lhs, const openvdb::NamePair& rhs) { if (lhs.first != rhs.first) return false; if (lhs.second != rhs.second) return false; return true; } bool zeroLeafValues(const LeafType* leafNode) { for (openvdb::Index i = 0; i < LeafType::SIZE; i++) { if (leafNode->buffer().getValue(i) != LeafType::ValueType(0)) return false; } return true; } bool noAttributeData(const LeafType* leafNode) { const AttributeSet& attributeSet = leafNode->attributeSet(); return attributeSet.size() == 0 && attributeSet.descriptor().size() == 0; } bool monotonicOffsets(const LeafType& leafNode) { int previous = -1; for (auto iter = leafNode.cbeginValueOn(); iter; ++iter) { if (previous > int(*iter)) return false; previous = int(*iter); } return true; } // (borrowed from PointIndexGrid unit test) class PointList { public: using PosType = openvdb::Vec3R; using value_type = openvdb::Vec3R; PointList(const std::vector<openvdb::Vec3R>& points) : mPoints(&points) { } size_t size() const { return mPoints->size(); } void getPos(size_t n, openvdb::Vec3R& xyz) const { xyz = (*mPoints)[n]; } protected: std::vector<openvdb::Vec3R> const * const mPoints; }; // PointList // Generate random points by uniformly distributing points // on a unit-sphere. // (borrowed from PointIndexGrid unit test) std::vector<openvdb::Vec3R> genPoints(const int numPoints) { // init openvdb::math::Random01 randNumber(0); const int n = int(std::sqrt(double(numPoints))); const double xScale = (2.0 * M_PI) / double(n); const double yScale = M_PI / double(n); double x, y, theta, phi; std::vector<openvdb::Vec3R> points; points.reserve(n*n); // loop over a [0 to n) x [0 to n) grid. for (int a = 0; a < n; ++a) { for (int b = 0; b < n; ++b) { // jitter, move to random pos. inside the current cell x = double(a) + randNumber(); y = double(b) + randNumber(); // remap to a lat/long map theta = y * yScale; // [0 to PI] phi = x * xScale; // [0 to 2PI] // convert to cartesian coordinates on a unit sphere. // spherical coordinate triplet (r=1, theta, phi) points.emplace_back( std::sin(theta)*std::cos(phi), std::sin(theta)*std::sin(phi), std::cos(theta) ); } } return points; } } // namespace TEST_F(TestPointDataLeaf, testEmptyLeaf) { // empty leaf construction { LeafType* leafNode = new LeafType(); EXPECT_TRUE(leafNode); EXPECT_TRUE(leafNode->isEmpty()); EXPECT_TRUE(!leafNode->buffer().empty()); EXPECT_TRUE(zeroLeafValues(leafNode)); EXPECT_TRUE(noAttributeData(leafNode)); EXPECT_TRUE(leafNode->origin() == openvdb::Coord(0, 0, 0)); delete leafNode; } // empty leaf with non-zero origin construction { openvdb::Coord coord(20, 30, 40); LeafType* leafNode = new LeafType(coord); EXPECT_TRUE(leafNode); EXPECT_TRUE(leafNode->isEmpty()); EXPECT_TRUE(!leafNode->buffer().empty()); EXPECT_TRUE(zeroLeafValues(leafNode)); EXPECT_TRUE(noAttributeData(leafNode)); EXPECT_TRUE(leafNode->origin() == openvdb::Coord(16, 24, 40)); delete leafNode; } } TEST_F(TestPointDataLeaf, testOffsets) { // offsets for one point per voxel (active = true) { LeafType* leafNode = new LeafType(); for (openvdb::Index i = 0; i < LeafType::SIZE; i++) { leafNode->setOffsetOn(i, i); } EXPECT_TRUE(leafNode->getValue(10) == 10); EXPECT_TRUE(leafNode->isDense()); delete leafNode; } // offsets for one point per voxel (active = false) { LeafType* leafNode = new LeafType(); for (openvdb::Index i = 0; i < LeafType::SIZE; i++) { leafNode->setOffsetOnly(i, i); } EXPECT_TRUE(leafNode->getValue(10) == 10); EXPECT_TRUE(leafNode->isEmpty()); delete leafNode; } // test bulk offset replacement without activity mask update { LeafType* leafNode = new LeafType(); for (openvdb::Index i = 0; i < LeafType::SIZE; ++i) { leafNode->setOffsetOn(i, 10); } std::vector<LeafType::ValueType> newOffsets(LeafType::SIZE); leafNode->setOffsets(newOffsets, /*updateValueMask*/false); const LeafType::NodeMaskType& valueMask = leafNode->getValueMask(); for (openvdb::Index i = 0; i < LeafType::SIZE; ++i ) { EXPECT_TRUE(valueMask.isOn(i)); } delete leafNode; } // test bulk offset replacement with activity mask update { LeafType* leafNode = new LeafType(); for (openvdb::Index i = 0; i < LeafType::SIZE; ++i) { leafNode->setOffsetOn(i, 10); } std::vector<LeafType::ValueType> newOffsets(LeafType::SIZE); leafNode->setOffsets(newOffsets, /*updateValueMask*/true); const LeafType::NodeMaskType& valueMask = leafNode->getValueMask(); for (openvdb::Index i = 0; i < LeafType::SIZE; ++i ) { EXPECT_TRUE(valueMask.isOff(i)); } delete leafNode; } // ensure bulk offset replacement fails when vector size doesn't equal number of voxels { LeafType* leafNode = new LeafType(); std::vector<LeafType::ValueType> newOffsets; EXPECT_THROW(leafNode->setOffsets(newOffsets), openvdb::ValueError); delete leafNode; } // test offset validation { using AttributeVec3s = TypedAttributeArray<Vec3s>; using AttributeS = TypedAttributeArray<float>; using Descriptor = AttributeSet::Descriptor; // empty Descriptor should throw on leaf node initialize auto emptyDescriptor = std::make_shared<Descriptor>(); LeafType* emptyLeafNode = new LeafType(); EXPECT_THROW(emptyLeafNode->initializeAttributes(emptyDescriptor, 5), openvdb::IndexError); // create a non-empty Descriptor Descriptor::Ptr descriptor = Descriptor::create(AttributeVec3s::attributeType()); // ensure validateOffsets succeeds for monotonically increasing offsets that fully // utilise the underlying attribute arrays { const size_t numAttributes = 1; LeafType* leafNode = new LeafType(); leafNode->initializeAttributes(descriptor, numAttributes); descriptor = descriptor->duplicateAppend("density", AttributeS::attributeType()); leafNode->appendAttribute(leafNode->attributeSet().descriptor(), descriptor, descriptor->find("density")); std::vector<LeafType::ValueType> offsets(LeafType::SIZE); offsets.back() = numAttributes; leafNode->setOffsets(offsets); EXPECT_NO_THROW(leafNode->validateOffsets()); delete leafNode; } // ensure validateOffsets detects non-monotonic offset values { LeafType* leafNode = new LeafType(); std::vector<LeafType::ValueType> offsets(LeafType::SIZE); *offsets.begin() = 1; leafNode->setOffsets(offsets); EXPECT_THROW(leafNode->validateOffsets(), openvdb::ValueError); delete leafNode; } // ensure validateOffsets detects inconsistent attribute array sizes { descriptor = Descriptor::create(AttributeVec3s::attributeType()); const size_t numAttributes = 1; LeafType* leafNode = new LeafType(); leafNode->initializeAttributes(descriptor, numAttributes); descriptor = descriptor->duplicateAppend("density", AttributeS::attributeType()); leafNode->appendAttribute(leafNode->attributeSet().descriptor(), descriptor, descriptor->find("density")); AttributeSet* newSet = new AttributeSet(leafNode->attributeSet(), numAttributes); newSet->replace("density", AttributeS::create(numAttributes+1)); leafNode->replaceAttributeSet(newSet); std::vector<LeafType::ValueType> offsets(LeafType::SIZE); offsets.back() = numAttributes; leafNode->setOffsets(offsets); EXPECT_THROW(leafNode->validateOffsets(), openvdb::ValueError); delete leafNode; } // ensure validateOffsets detects unused attributes (e.g. final voxel offset not // equal to size of attribute arrays) { descriptor = Descriptor::create(AttributeVec3s::attributeType()); const size_t numAttributes = 1; LeafType* leafNode = new LeafType(); leafNode->initializeAttributes(descriptor, numAttributes); descriptor = descriptor->duplicateAppend("density", AttributeS::attributeType()); leafNode->appendAttribute(leafNode->attributeSet().descriptor(), descriptor, descriptor->find("density")); std::vector<LeafType::ValueType> offsets(LeafType::SIZE); offsets.back() = numAttributes - 1; leafNode->setOffsets(offsets); EXPECT_THROW(leafNode->validateOffsets(), openvdb::ValueError); delete leafNode; } // ensure validateOffsets detects out-of-bounds offset values { descriptor = Descriptor::create(AttributeVec3s::attributeType()); const size_t numAttributes = 1; LeafType* leafNode = new LeafType(); leafNode->initializeAttributes(descriptor, numAttributes); descriptor = descriptor->duplicateAppend("density", AttributeS::attributeType()); leafNode->appendAttribute(leafNode->attributeSet().descriptor(), descriptor, descriptor->find("density")); std::vector<LeafType::ValueType> offsets(LeafType::SIZE); offsets.back() = numAttributes + 1; leafNode->setOffsets(offsets); EXPECT_THROW(leafNode->validateOffsets(), openvdb::ValueError); delete leafNode; } } } TEST_F(TestPointDataLeaf, testSetValue) { // the following tests are not run when in debug mode due to assertions firing #ifdef NDEBUG LeafType leaf(openvdb::Coord(0, 0, 0)); openvdb::Coord xyz(0, 0, 0); openvdb::Index index(LeafType::coordToOffset(xyz)); // ensure all non-modifiable operations are no-ops leaf.setValueOnly(xyz, 10); leaf.setValueOnly(index, 10); leaf.setValueOff(xyz, 10); leaf.setValueOff(index, 10); leaf.setValueOn(xyz, 10); leaf.setValueOn(index, 10); struct Local { static inline void op(unsigned int& n) { n = 10; } }; leaf.modifyValue(xyz, Local::op); leaf.modifyValue(index, Local::op); leaf.modifyValueAndActiveState(xyz, Local::op); EXPECT_EQ(0, int(leaf.getValue(xyz))); #endif } TEST_F(TestPointDataLeaf, testMonotonicity) { LeafType leaf(openvdb::Coord(0, 0, 0)); // assign aggregate values and activate all non-even coordinate sums unsigned sum = 0; for (unsigned int i = 0; i < LeafType::DIM; i++) { for (unsigned int j = 0; j < LeafType::DIM; j++) { for (unsigned int k = 0; k < LeafType::DIM; k++) { if (((i + j + k) % 2) == 0) continue; leaf.setOffsetOn(LeafType::coordToOffset(openvdb::Coord(i, j, k)), sum++); } } } EXPECT_TRUE(monotonicOffsets(leaf)); // manually change a value and ensure offsets become non-monotonic leaf.setOffsetOn(500, 4); EXPECT_TRUE(!monotonicOffsets(leaf)); } TEST_F(TestPointDataLeaf, testAttributes) { using AttributeVec3s = TypedAttributeArray<Vec3s>; using AttributeI = TypedAttributeArray<int32_t>; // create a descriptor using Descriptor = AttributeSet::Descriptor; Descriptor::Ptr descrA = Descriptor::create(AttributeVec3s::attributeType()); // create a leaf and initialize attributes using this descriptor LeafType leaf(openvdb::Coord(0, 0, 0)); EXPECT_EQ(leaf.attributeSet().size(), size_t(0)); leaf.initializeAttributes(descrA, /*arrayLength=*/100); TypedMetadata<int> defaultValue(7); Metadata& baseDefaultValue = defaultValue; descrA = descrA->duplicateAppend("id", AttributeI::attributeType()); leaf.appendAttribute(leaf.attributeSet().descriptor(), descrA, descrA->find("id"), Index(1), true, &baseDefaultValue); // note that the default value has not been added to the replacement descriptor, // however the default value of the attribute is as expected EXPECT_EQ(0, leaf.attributeSet().descriptor().getDefaultValue<int>("id")); EXPECT_EQ(7, AttributeI::cast(*leaf.attributeSet().getConst("id")).get(0)); EXPECT_EQ(leaf.attributeSet().size(), size_t(2)); { const AttributeArray* array = leaf.attributeSet().get(/*pos=*/0); EXPECT_EQ(array->size(), Index(100)); } // manually set a voxel leaf.setOffsetOn(LeafType::SIZE - 1, 10); EXPECT_TRUE(!zeroLeafValues(&leaf)); // neither dense nor empty EXPECT_TRUE(!leaf.isDense()); EXPECT_TRUE(!leaf.isEmpty()); // clear the attributes and check voxel values are zero but value mask is not touched leaf.clearAttributes(/*updateValueMask=*/ false); EXPECT_TRUE(!leaf.isDense()); EXPECT_TRUE(!leaf.isEmpty()); EXPECT_EQ(leaf.attributeSet().size(), size_t(2)); EXPECT_TRUE(zeroLeafValues(&leaf)); // call clearAttributes again, updating the value mask and check it is now inactive leaf.clearAttributes(); EXPECT_TRUE(leaf.isEmpty()); // ensure arrays are uniform const AttributeArray* array0 = leaf.attributeSet().get(/*pos=*/0); const AttributeArray* array1 = leaf.attributeSet().get(/*pos=*/1); EXPECT_EQ(array0->size(), Index(1)); EXPECT_EQ(array1->size(), Index(1)); // test leaf returns expected result for hasAttribute() EXPECT_TRUE(leaf.hasAttribute(/*pos*/0)); EXPECT_TRUE(leaf.hasAttribute("P")); EXPECT_TRUE(leaf.hasAttribute(/*pos*/1)); EXPECT_TRUE(leaf.hasAttribute("id")); EXPECT_TRUE(!leaf.hasAttribute(/*pos*/2)); EXPECT_TRUE(!leaf.hasAttribute("test")); // test underlying attributeArray can be accessed by name and index, // and that their types are as expected. const LeafType* constLeaf = &leaf; EXPECT_TRUE(matchingNamePairs(leaf.attributeArray(/*pos*/0).type(), AttributeVec3s::attributeType())); EXPECT_TRUE(matchingNamePairs(leaf.attributeArray("P").type(), AttributeVec3s::attributeType())); EXPECT_TRUE(matchingNamePairs(leaf.attributeArray(/*pos*/1).type(), AttributeI::attributeType())); EXPECT_TRUE(matchingNamePairs(leaf.attributeArray("id").type(), AttributeI::attributeType())); EXPECT_TRUE(matchingNamePairs(constLeaf->attributeArray(/*pos*/0).type(), AttributeVec3s::attributeType())); EXPECT_TRUE(matchingNamePairs(constLeaf->attributeArray("P").type(), AttributeVec3s::attributeType())); EXPECT_TRUE(matchingNamePairs(constLeaf->attributeArray(/*pos*/1).type(), AttributeI::attributeType())); EXPECT_TRUE(matchingNamePairs(constLeaf->attributeArray("id").type(), AttributeI::attributeType())); // check invalid pos or name throws EXPECT_THROW(leaf.attributeArray(/*pos=*/3), openvdb::LookupError); EXPECT_THROW(leaf.attributeArray("not_there"), openvdb::LookupError); EXPECT_THROW(constLeaf->attributeArray(/*pos=*/3), openvdb::LookupError); EXPECT_THROW(constLeaf->attributeArray("not_there"), openvdb::LookupError); // test leaf can be successfully cast to TypedAttributeArray and check types EXPECT_TRUE(matchingNamePairs(leaf.attributeArray(/*pos=*/0).type(), AttributeVec3s::attributeType())); EXPECT_TRUE(matchingNamePairs(leaf.attributeArray("P").type(), AttributeVec3s::attributeType())); EXPECT_TRUE(matchingNamePairs(leaf.attributeArray(/*pos=*/1).type(), AttributeI::attributeType())); EXPECT_TRUE(matchingNamePairs(leaf.attributeArray("id").type(), AttributeI::attributeType())); EXPECT_TRUE(matchingNamePairs(constLeaf->attributeArray(/*pos=*/0).type(), AttributeVec3s::attributeType())); EXPECT_TRUE(matchingNamePairs(constLeaf->attributeArray("P").type(), AttributeVec3s::attributeType())); EXPECT_TRUE(matchingNamePairs(constLeaf->attributeArray(/*pos=*/1).type(), AttributeI::attributeType())); EXPECT_TRUE(matchingNamePairs(constLeaf->attributeArray("id").type(), AttributeI::attributeType())); // check invalid pos or name throws EXPECT_THROW(leaf.attributeArray(/*pos=*/2), openvdb::LookupError); EXPECT_THROW(leaf.attributeArray("test"), openvdb::LookupError); EXPECT_THROW(constLeaf->attributeArray(/*pos=*/2), openvdb::LookupError); EXPECT_THROW(constLeaf->attributeArray("test"), openvdb::LookupError); // check memory usage = attribute set + base leaf // leaf.initializeAttributes(descrA, /*arrayLength=*/100); const LeafType::BaseLeaf& baseLeaf = static_cast<LeafType::BaseLeaf&>(leaf); const Index64 memUsage = baseLeaf.memUsage() + leaf.attributeSet().memUsage(); EXPECT_EQ(memUsage, leaf.memUsage()); } TEST_F(TestPointDataLeaf, testSteal) { using AttributeVec3s = TypedAttributeArray<Vec3s>; using Descriptor = AttributeSet::Descriptor; // create a descriptor Descriptor::Ptr descrA = Descriptor::create(AttributeVec3s::attributeType()); // create a leaf and initialize attributes using this descriptor LeafType leaf(openvdb::Coord(0, 0, 0)); EXPECT_EQ(leaf.attributeSet().size(), size_t(0)); leaf.initializeAttributes(descrA, /*arrayLength=*/100); EXPECT_EQ(leaf.attributeSet().size(), size_t(1)); // steal the attribute set AttributeSet::UniquePtr attributeSet = leaf.stealAttributeSet(); EXPECT_TRUE(attributeSet); EXPECT_EQ(attributeSet->size(), size_t(1)); // ensure a new attribute set has been inserted in it's place EXPECT_EQ(leaf.attributeSet().size(), size_t(0)); } TEST_F(TestPointDataLeaf, testTopologyCopy) { // test topology copy from a float Leaf { using FloatLeaf = openvdb::FloatTree::LeafNodeType; // create a float leaf and activate some values FloatLeaf floatLeaf(openvdb::Coord(0, 0, 0)); floatLeaf.setValueOn(1); floatLeaf.setValueOn(4); floatLeaf.setValueOn(7); floatLeaf.setValueOn(8); EXPECT_EQ(floatLeaf.onVoxelCount(), Index64(4)); // validate construction of a PointDataLeaf using a TopologyCopy LeafType leaf(floatLeaf, 0, openvdb::TopologyCopy()); EXPECT_EQ(leaf.onVoxelCount(), Index64(4)); LeafType leaf2(openvdb::Coord(8, 8, 8)); leaf2.setValueOn(1); leaf2.setValueOn(4); leaf2.setValueOn(7); EXPECT_TRUE(!leaf.hasSameTopology(&leaf2)); leaf2.setValueOn(8); EXPECT_TRUE(leaf.hasSameTopology(&leaf2)); // validate construction of a PointDataLeaf using an Off-On TopologyCopy LeafType leaf3(floatLeaf, 1, 2, openvdb::TopologyCopy()); EXPECT_EQ(leaf3.onVoxelCount(), Index64(4)); } // test topology copy from a PointIndexLeaf { // generate points // (borrowed from PointIndexGrid unit test) const float voxelSize = 0.01f; const openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(voxelSize); std::vector<openvdb::Vec3R> points = genPoints(40000); PointList pointList(points); // construct point index grid using PointIndexGrid = openvdb::tools::PointIndexGrid; PointIndexGrid::Ptr pointGridPtr = openvdb::tools::createPointIndexGrid<PointIndexGrid>(pointList, *transform); auto iter = pointGridPtr->tree().cbeginLeaf(); EXPECT_TRUE(iter); // check that the active voxel counts match for all leaves for ( ; iter; ++iter) { LeafType leaf(*iter); EXPECT_EQ(iter->onVoxelCount(), leaf.onVoxelCount()); } } } TEST_F(TestPointDataLeaf, testEquivalence) { using AttributeVec3s = TypedAttributeArray<openvdb::Vec3s>; using AttributeF = TypedAttributeArray<float>; using AttributeI = TypedAttributeArray<int32_t>; // create a descriptor using Descriptor = AttributeSet::Descriptor; Descriptor::Ptr descrA = Descriptor::create(AttributeVec3s::attributeType()); // create a leaf and initialize attributes using this descriptor LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.initializeAttributes(descrA, /*arrayLength=*/100); descrA = descrA->duplicateAppend("density", AttributeF::attributeType()); leaf.appendAttribute(leaf.attributeSet().descriptor(), descrA, descrA->find("density")); descrA = descrA->duplicateAppend("id", AttributeI::attributeType()); leaf.appendAttribute(leaf.attributeSet().descriptor(), descrA, descrA->find("id")); // manually activate some voxels leaf.setValueOn(1); leaf.setValueOn(4); leaf.setValueOn(7); // manually change some values in the density array TypedAttributeArray<float>& attr = TypedAttributeArray<float>::cast(leaf.attributeArray("density")); attr.set(0, 5.0f); attr.set(50, 2.0f); attr.set(51, 8.1f); // check deep copy construction (topology and attributes) { LeafType leaf2(leaf); EXPECT_EQ(leaf.onVoxelCount(), leaf2.onVoxelCount()); EXPECT_TRUE(leaf.hasSameTopology(&leaf2)); EXPECT_EQ(leaf.attributeSet().size(), leaf2.attributeSet().size()); EXPECT_EQ(leaf.attributeSet().get(0)->size(), leaf2.attributeSet().get(0)->size()); } // check equivalence { LeafType leaf2(leaf); EXPECT_TRUE(leaf == leaf2); leaf2.setOrigin(openvdb::Coord(0, 8, 0)); EXPECT_TRUE(leaf != leaf2); } { LeafType leaf2(leaf); EXPECT_TRUE(leaf == leaf2); leaf2.setValueOn(10); EXPECT_TRUE(leaf != leaf2); } } TEST_F(TestPointDataLeaf, testIterators) { using AttributeVec3s = TypedAttributeArray<openvdb::Vec3s>; using AttributeF = TypedAttributeArray<float>; // create a descriptor using Descriptor = AttributeSet::Descriptor; Descriptor::Ptr descrA = Descriptor::create(AttributeVec3s::attributeType()); // create a leaf and initialize attributes using this descriptor const size_t size = LeafType::NUM_VOXELS; LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.initializeAttributes(descrA, /*arrayLength=*/size/2); descrA = descrA->duplicateAppend("density", AttributeF::attributeType()); leaf.appendAttribute(leaf.attributeSet().descriptor(), descrA, descrA->find("density")); { // uniform monotonic offsets, only even active int offset = 0; for (Index i = 0; i < size; i++) { if ((i % 2) == 0) { leaf.setOffsetOn(i, ++offset); } else { leaf.setOffsetOnly(i, ++offset); leaf.setValueOff(i); } } } { // test index on LeafType::IndexOnIter iterOn(leaf.beginIndexOn()); EXPECT_EQ(iterCount(iterOn), Index64(size/2)); for (int i = 0; iterOn; ++iterOn, i += 2) { EXPECT_EQ(*iterOn, Index32(i)); } } { // test index off LeafType::IndexOffIter iterOff(leaf.beginIndexOff()); EXPECT_EQ(iterCount(iterOff), Index64(size/2)); for (int i = 1; iterOff; ++iterOff, i += 2) { EXPECT_EQ(*iterOff, Index32(i)); } } { // test index all LeafType::IndexAllIter iterAll(leaf.beginIndexAll()); EXPECT_EQ(iterCount(iterAll), Index64(size)); for (int i = 0; iterAll; ++iterAll, ++i) { EXPECT_EQ(*iterAll, Index32(i)); } } } TEST_F(TestPointDataLeaf, testReadWriteCompression) { using namespace openvdb; util::NodeMask<3> valueMask; util::NodeMask<3> childMask; io::StreamMetadata::Ptr nullMetadata; io::StreamMetadata::Ptr streamMetadata(new io::StreamMetadata); { // simple read/write test std::stringstream ss; Index count = 8*8*8; std::unique_ptr<PointDataIndex32[]> srcBuf(new PointDataIndex32[count]); for (Index i = 0; i < count; i++) srcBuf[i] = i; { io::writeCompressedValues(ss, srcBuf.get(), count, valueMask, childMask, false); std::unique_ptr<PointDataIndex32[]> destBuf(new PointDataIndex32[count]); io::readCompressedValues(ss, destBuf.get(), count, valueMask, false); for (Index i = 0; i < count; i++) { EXPECT_EQ(srcBuf.get()[i], destBuf.get()[i]); } } const char* charBuffer = reinterpret_cast<const char*>(srcBuf.get()); size_t referenceBytes = compression::bloscCompressedSize(charBuffer, count*sizeof(PointDataIndex32)); { ss.str(""); io::setStreamMetadataPtr(ss, streamMetadata); io::writeCompressedValuesSize(ss, srcBuf.get(), count); io::writeCompressedValues(ss, srcBuf.get(), count, valueMask, childMask, false); int magic = 1924674; ss.write(reinterpret_cast<const char*>(&magic), sizeof(int)); std::unique_ptr<PointDataIndex32[]> destBuf(new PointDataIndex32[count]); uint16_t size; ss.read(reinterpret_cast<char*>(&size), sizeof(uint16_t)); if (size == std::numeric_limits<uint16_t>::max()) size = 0; EXPECT_EQ(size_t(size), referenceBytes); io::readCompressedValues(ss, destBuf.get(), count, valueMask, false); int magic2; ss.read(reinterpret_cast<char*>(&magic2), sizeof(int)); EXPECT_EQ(magic, magic2); for (Index i = 0; i < count; i++) { EXPECT_EQ(srcBuf.get()[i], destBuf.get()[i]); } io::setStreamMetadataPtr(ss, nullMetadata); } { // repeat but using nullptr for destination to force seek behaviour ss.str(""); io::setStreamMetadataPtr(ss, streamMetadata); io::writeCompressedValuesSize(ss, srcBuf.get(), count); io::writeCompressedValues(ss, srcBuf.get(), count, valueMask, childMask, false); int magic = 3829250; ss.write(reinterpret_cast<const char*>(&magic), sizeof(int)); uint16_t size; ss.read(reinterpret_cast<char*>(&size), sizeof(uint16_t)); uint16_t actualSize(size); if (size == std::numeric_limits<uint16_t>::max()) actualSize = 0; EXPECT_EQ(size_t(actualSize), referenceBytes); streamMetadata->setPass(size); PointDataIndex32* forceSeek = nullptr; io::readCompressedValues(ss, forceSeek, count, valueMask, false); int magic2; ss.read(reinterpret_cast<char*>(&magic2), sizeof(int)); EXPECT_EQ(magic, magic2); io::setStreamMetadataPtr(ss, nullMetadata); } #ifndef OPENVDB_USE_BLOSC { // write to indicate Blosc compression std::stringstream ssInvalid; uint16_t bytes16(100); // clamp to 16-bit unsigned integer ssInvalid.write(reinterpret_cast<const char*>(&bytes16), sizeof(uint16_t)); std::unique_ptr<PointDataIndex32[]> destBuf(new PointDataIndex32[count]); EXPECT_THROW(io::readCompressedValues(ssInvalid, destBuf.get(), count, valueMask, false), RuntimeError); } #endif #ifdef OPENVDB_USE_BLOSC { // mis-matching destination bytes cause decompression failures std::unique_ptr<PointDataIndex32[]> destBuf(new PointDataIndex32[count]); ss.str(""); io::writeCompressedValues(ss, srcBuf.get(), count, valueMask, childMask, false); EXPECT_THROW(io::readCompressedValues(ss, destBuf.get(), count+1, valueMask, false), RuntimeError); ss.str(""); io::writeCompressedValues(ss, srcBuf.get(), count, valueMask, childMask, false); EXPECT_THROW(io::readCompressedValues(ss, destBuf.get(), 1, valueMask, false), RuntimeError); } #endif { // seek ss.str(""); io::writeCompressedValues(ss, srcBuf.get(), count, valueMask, childMask, false); int test(10772832); ss.write(reinterpret_cast<const char*>(&test), sizeof(int)); PointDataIndex32* buf = nullptr; io::readCompressedValues(ss, buf, count, valueMask, false); int test2; ss.read(reinterpret_cast<char*>(&test2), sizeof(int)); EXPECT_EQ(test, test2); } } { // two values for non-compressible example std::stringstream ss; Index count = 2; std::unique_ptr<PointDataIndex32[]> srcBuf(new PointDataIndex32[count]); for (Index i = 0; i < count; i++) srcBuf[i] = i; io::writeCompressedValues(ss, srcBuf.get(), count, valueMask, childMask, false); std::unique_ptr<PointDataIndex32[]> destBuf(new PointDataIndex32[count]); io::readCompressedValues(ss, destBuf.get(), count, valueMask, false); for (Index i = 0; i < count; i++) { EXPECT_EQ(srcBuf.get()[i], destBuf.get()[i]); } } { // throw at limit of 16-bit std::stringstream ss; PointDataIndex32* buf = nullptr; Index count = std::numeric_limits<uint16_t>::max(); EXPECT_THROW(io::writeCompressedValues(ss, buf, count, valueMask, childMask, false), IoError); EXPECT_THROW(io::readCompressedValues(ss, buf, count, valueMask, false), IoError); } } TEST_F(TestPointDataLeaf, testIO) { using AttributeVec3s = TypedAttributeArray<openvdb::Vec3s>; using AttributeF = TypedAttributeArray<float>; // create a descriptor using Descriptor = AttributeSet::Descriptor; Descriptor::Ptr descrA = Descriptor::create(AttributeVec3s::attributeType()); // create a leaf and initialize attributes using this descriptor const size_t size = LeafType::NUM_VOXELS; LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.initializeAttributes(descrA, /*arrayLength=*/size/2); descrA = descrA->duplicateAppend("density", AttributeF::attributeType()); leaf.appendAttribute(leaf.attributeSet().descriptor(), descrA, descrA->find("density")); // manually activate some voxels leaf.setOffsetOn(1, 10); leaf.setOffsetOn(4, 20); leaf.setOffsetOn(7, 5); // manually change some values in the density array TypedAttributeArray<float>& attr = TypedAttributeArray<float>::cast(leaf.attributeArray("density")); attr.set(0, 5.0f); attr.set(50, 2.0f); attr.set(51, 8.1f); // read and write topology to disk { LeafType leaf2(openvdb::Coord(0, 0, 0)); std::ostringstream ostr(std::ios_base::binary); leaf.writeTopology(ostr); std::istringstream istr(ostr.str(), std::ios_base::binary); leaf2.readTopology(istr); // check topology matches EXPECT_EQ(leaf.onVoxelCount(), leaf2.onVoxelCount()); EXPECT_TRUE(leaf2.isValueOn(4)); EXPECT_TRUE(!leaf2.isValueOn(5)); // check only topology (values and attributes still empty) EXPECT_EQ(leaf2.getValue(4), ValueType(0)); EXPECT_EQ(leaf2.attributeSet().size(), size_t(0)); } // read and write buffers to disk { LeafType leaf2(openvdb::Coord(0, 0, 0)); io::StreamMetadata::Ptr streamMetadata(new io::StreamMetadata); std::ostringstream ostr(std::ios_base::binary); io::setStreamMetadataPtr(ostr, streamMetadata); io::setDataCompression(ostr, io::COMPRESS_BLOSC); leaf.writeTopology(ostr); for (Index b = 0; b < leaf.buffers(); b++) { uint32_t pass = (uint32_t(leaf.buffers()) << 16) | uint32_t(b); streamMetadata->setPass(pass); leaf.writeBuffers(ostr); } { // error checking streamMetadata->setPass(1000); leaf.writeBuffers(ostr); io::StreamMetadata::Ptr meta; io::setStreamMetadataPtr(ostr, meta); EXPECT_THROW(leaf.writeBuffers(ostr), openvdb::IoError); } std::istringstream istr(ostr.str(), std::ios_base::binary); io::setStreamMetadataPtr(istr, streamMetadata); io::setDataCompression(istr, io::COMPRESS_BLOSC); // Since the input stream doesn't include a VDB header with file format version info, // tag the input stream explicitly with the current version number. io::setCurrentVersion(istr); leaf2.readTopology(istr); for (Index b = 0; b < leaf.buffers(); b++) { uint32_t pass = (uint32_t(leaf.buffers()) << 16) | uint32_t(b); streamMetadata->setPass(pass); leaf2.readBuffers(istr); } // check topology matches EXPECT_EQ(leaf.onVoxelCount(), leaf2.onVoxelCount()); EXPECT_TRUE(leaf2.isValueOn(4)); EXPECT_TRUE(!leaf2.isValueOn(5)); // check only topology (values and attributes still empty) EXPECT_EQ(leaf2.getValue(4), ValueType(20)); EXPECT_EQ(leaf2.attributeSet().size(), size_t(2)); } { // test multi-buffer IO // create a new grid with a single origin leaf PointDataGrid::Ptr grid = PointDataGrid::create(); grid->setName("points"); grid->tree().addLeaf(new LeafType(leaf)); openvdb::GridCPtrVec grids; grids.push_back(grid); // write to file { io::File file("leaf.vdb"); file.write(grids); file.close(); } { // read grids from file (using delayed loading) PointDataGrid::Ptr gridFromDisk; { io::File file("leaf.vdb"); file.open(); openvdb::GridBase::Ptr baseGrid = file.readGrid("points"); file.close(); gridFromDisk = openvdb::gridPtrCast<PointDataGrid>(baseGrid); } LeafType* leafFromDisk = gridFromDisk->tree().probeLeaf(openvdb::Coord(0, 0, 0)); EXPECT_TRUE(leafFromDisk); EXPECT_TRUE(leaf == *leafFromDisk); } { // read grids from file and pre-fetch PointDataGrid::Ptr gridFromDisk; { io::File file("leaf.vdb"); file.open(); openvdb::GridBase::Ptr baseGrid = file.readGrid("points"); file.close(); gridFromDisk = openvdb::gridPtrCast<PointDataGrid>(baseGrid); } LeafType* leafFromDisk = gridFromDisk->tree().probeLeaf(openvdb::Coord(0, 0, 0)); EXPECT_TRUE(leafFromDisk); const AttributeVec3s& position( AttributeVec3s::cast(leafFromDisk->constAttributeArray("P"))); const AttributeF& density( AttributeF::cast(leafFromDisk->constAttributeArray("density"))); EXPECT_TRUE(leafFromDisk->buffer().isOutOfCore()); #if OPENVDB_USE_BLOSC EXPECT_TRUE(position.isOutOfCore()); EXPECT_TRUE(density.isOutOfCore()); #else // delayed-loading is only available on attribute arrays when using Blosc EXPECT_TRUE(!position.isOutOfCore()); EXPECT_TRUE(!density.isOutOfCore()); #endif // prefetch voxel data only prefetch(gridFromDisk->tree(), /*position=*/false, /*attributes=*/false); // ensure out-of-core data is now in-core after pre-fetching EXPECT_TRUE(!leafFromDisk->buffer().isOutOfCore()); #if OPENVDB_USE_BLOSC EXPECT_TRUE(position.isOutOfCore()); EXPECT_TRUE(density.isOutOfCore()); #else EXPECT_TRUE(!position.isOutOfCore()); EXPECT_TRUE(!density.isOutOfCore()); #endif { // re-open io::File file("leaf.vdb"); file.open(); openvdb::GridBase::Ptr baseGrid = file.readGrid("points"); file.close(); gridFromDisk = openvdb::gridPtrCast<PointDataGrid>(baseGrid); } leafFromDisk = gridFromDisk->tree().probeLeaf(openvdb::Coord(0, 0, 0)); EXPECT_TRUE(leafFromDisk); const AttributeVec3s& position2( AttributeVec3s::cast(leafFromDisk->constAttributeArray("P"))); const AttributeF& density2( AttributeF::cast(leafFromDisk->constAttributeArray("density"))); // prefetch voxel and position attribute data prefetch(gridFromDisk->tree(), /*position=*/true, /*attribute=*/false); // ensure out-of-core voxel and position data is now in-core after pre-fetching EXPECT_TRUE(!leafFromDisk->buffer().isOutOfCore()); EXPECT_TRUE(!position2.isOutOfCore()); #if OPENVDB_USE_BLOSC EXPECT_TRUE(density2.isOutOfCore()); #else EXPECT_TRUE(!density2.isOutOfCore()); #endif { // re-open io::File file("leaf.vdb"); file.open(); openvdb::GridBase::Ptr baseGrid = file.readGrid("points"); file.close(); gridFromDisk = openvdb::gridPtrCast<PointDataGrid>(baseGrid); } leafFromDisk = gridFromDisk->tree().probeLeaf(openvdb::Coord(0, 0, 0)); EXPECT_TRUE(leafFromDisk); const AttributeVec3s& position3( AttributeVec3s::cast(leafFromDisk->constAttributeArray("P"))); const AttributeF& density3( AttributeF::cast(leafFromDisk->constAttributeArray("density"))); // prefetch all data prefetch(gridFromDisk->tree()); // ensure out-of-core voxel and position data is now in-core after pre-fetching EXPECT_TRUE(!leafFromDisk->buffer().isOutOfCore()); EXPECT_TRUE(!position3.isOutOfCore()); EXPECT_TRUE(!density3.isOutOfCore()); } remove("leaf.vdb"); } { // test multi-buffer IO with varying attribute storage per-leaf // create a new grid with three leaf nodes PointDataGrid::Ptr grid = PointDataGrid::create(); grid->setName("points"); Descriptor::Ptr descrB = Descriptor::create(AttributeVec3s::attributeType()); // create leaf nodes and initialize attributes using this descriptor LeafType leaf0(openvdb::Coord(0, 0, 0)); LeafType leaf1(openvdb::Coord(0, 8, 0)); LeafType leaf2(openvdb::Coord(0, 0, 8)); leaf0.initializeAttributes(descrB, /*arrayLength=*/2); leaf1.initializeAttributes(descrB, /*arrayLength=*/2); leaf2.initializeAttributes(descrB, /*arrayLength=*/2); descrB = descrB->duplicateAppend("density", AttributeF::attributeType()); size_t index = descrB->find("density"); // append density attribute to leaf 0 and leaf 2 (not leaf 1) leaf0.appendAttribute(leaf0.attributeSet().descriptor(), descrB, index); leaf2.appendAttribute(leaf2.attributeSet().descriptor(), descrB, index); // manually change some values in the density array for leaf 0 and leaf 2 TypedAttributeArray<float>& attr0 = TypedAttributeArray<float>::cast(leaf0.attributeArray("density")); attr0.set(0, 2.0f); attr0.set(1, 2.0f); attr0.compact(); // compact only the attribute array in the second leaf TypedAttributeArray<float>& attr2 = TypedAttributeArray<float>::cast(leaf2.attributeArray("density")); attr2.set(0, 5.0f); attr2.set(1, 5.0f); attr2.compact(); EXPECT_TRUE(attr0.isUniform()); EXPECT_TRUE(attr2.isUniform()); grid->tree().addLeaf(new LeafType(leaf0)); grid->tree().addLeaf(new LeafType(leaf1)); grid->tree().addLeaf(new LeafType(leaf2)); openvdb::GridCPtrVec grids; grids.push_back(grid); { // write to file io::File file("leaf.vdb"); file.write(grids); file.close(); } { // read grids from file (using delayed loading) PointDataGrid::Ptr gridFromDisk; { io::File file("leaf.vdb"); file.open(); openvdb::GridBase::Ptr baseGrid = file.readGrid("points"); file.close(); gridFromDisk = openvdb::gridPtrCast<PointDataGrid>(baseGrid); } LeafType* leafFromDisk = gridFromDisk->tree().probeLeaf(openvdb::Coord(0, 0, 0)); EXPECT_TRUE(leafFromDisk); EXPECT_TRUE(leaf0 == *leafFromDisk); leafFromDisk = gridFromDisk->tree().probeLeaf(openvdb::Coord(0, 8, 0)); EXPECT_TRUE(leafFromDisk); EXPECT_TRUE(leaf1 == *leafFromDisk); leafFromDisk = gridFromDisk->tree().probeLeaf(openvdb::Coord(0, 0, 8)); EXPECT_TRUE(leafFromDisk); EXPECT_TRUE(leaf2 == *leafFromDisk); } remove("leaf.vdb"); } } TEST_F(TestPointDataLeaf, testSwap) { using AttributeVec3s = TypedAttributeArray<openvdb::Vec3s>; using AttributeF = TypedAttributeArray<float>; using AttributeI = TypedAttributeArray<int>; // create a descriptor using Descriptor = AttributeSet::Descriptor; Descriptor::Ptr descrA = Descriptor::create(AttributeVec3s::attributeType()); // create a leaf and initialize attributes using this descriptor const Index initialArrayLength = 100; LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.initializeAttributes(descrA, /*arrayLength=*/initialArrayLength); descrA = descrA->duplicateAppend("density", AttributeF::attributeType()); leaf.appendAttribute(leaf.attributeSet().descriptor(), descrA, descrA->find("density")); descrA = descrA->duplicateAppend("id", AttributeI::attributeType()); leaf.appendAttribute(leaf.attributeSet().descriptor(), descrA, descrA->find("id")); // swap out the underlying attribute set with a new attribute set with a matching // descriptor EXPECT_EQ(initialArrayLength, leaf.attributeSet().get("density")->size()); EXPECT_EQ(initialArrayLength, leaf.attributeSet().get("id")->size()); descrA = Descriptor::create(AttributeVec3s::attributeType()); const Index newArrayLength = initialArrayLength / 2; AttributeSet* newAttributeSet(new AttributeSet(descrA, /*arrayLength*/newArrayLength)); newAttributeSet->appendAttribute("density", AttributeF::attributeType()); newAttributeSet->appendAttribute("id", AttributeI::attributeType()); leaf.replaceAttributeSet(newAttributeSet); EXPECT_EQ(newArrayLength, leaf.attributeSet().get("density")->size()); EXPECT_EQ(newArrayLength, leaf.attributeSet().get("id")->size()); // ensure we refuse to swap when the attribute set is null EXPECT_THROW(leaf.replaceAttributeSet(nullptr), openvdb::ValueError); // ensure we refuse to swap when the descriptors do not match, // unless we explicitly allow a mismatch. Descriptor::Ptr descrB = Descriptor::create(AttributeVec3s::attributeType()); AttributeSet* attributeSet = new AttributeSet(descrB, newArrayLength); attributeSet->appendAttribute("extra", AttributeF::attributeType()); EXPECT_THROW(leaf.replaceAttributeSet(attributeSet), openvdb::ValueError); leaf.replaceAttributeSet(attributeSet, true); EXPECT_EQ(const_cast<AttributeSet*>(&leaf.attributeSet()), attributeSet); } TEST_F(TestPointDataLeaf, testCopyOnWrite) { using AttributeVec3s = TypedAttributeArray<openvdb::Vec3s>; using AttributeF = TypedAttributeArray<float>; // create a descriptor using Descriptor = AttributeSet::Descriptor; Descriptor::Ptr descrA = Descriptor::create(AttributeVec3s::attributeType()); // create a leaf and initialize attributes using this descriptor const Index initialArrayLength = 100; LeafType leaf(openvdb::Coord(0, 0, 0)); leaf.initializeAttributes(descrA, /*arrayLength=*/initialArrayLength); descrA = descrA->duplicateAppend("density", AttributeF::attributeType()); leaf.appendAttribute(leaf.attributeSet().descriptor(), descrA, descrA->find("density")); const AttributeSet& attributeSet = leaf.attributeSet(); EXPECT_EQ(attributeSet.size(), size_t(2)); // ensure attribute arrays are shared between leaf nodes until write const LeafType leafCopy(leaf); const AttributeSet& attributeSetCopy = leafCopy.attributeSet(); EXPECT_TRUE(attributeSet.isShared(/*pos=*/1)); EXPECT_TRUE(attributeSetCopy.isShared(/*pos=*/1)); // test that from a const leaf, accesses to the attribute arrays do not // make then unique const AttributeArray* constArray = attributeSetCopy.getConst(/*pos=*/1); EXPECT_TRUE(constArray); EXPECT_TRUE(attributeSet.isShared(/*pos=*/1)); EXPECT_TRUE(attributeSetCopy.isShared(/*pos=*/1)); constArray = attributeSetCopy.get(/*pos=*/1); EXPECT_TRUE(attributeSet.isShared(/*pos=*/1)); EXPECT_TRUE(attributeSetCopy.isShared(/*pos=*/1)); constArray = &(leafCopy.attributeArray(/*pos=*/1)); EXPECT_TRUE(attributeSet.isShared(/*pos=*/1)); EXPECT_TRUE(attributeSetCopy.isShared(/*pos=*/1)); constArray = &(leafCopy.attributeArray("density")); EXPECT_TRUE(attributeSet.isShared(/*pos=*/1)); EXPECT_TRUE(attributeSetCopy.isShared(/*pos=*/1)); // test makeUnique is called from non const getters AttributeArray* attributeArray = &(leaf.attributeArray(/*pos=*/1)); EXPECT_TRUE(attributeArray); EXPECT_TRUE(!attributeSet.isShared(/*pos=*/1)); EXPECT_TRUE(!attributeSetCopy.isShared(/*pos=*/1)); } TEST_F(TestPointDataLeaf, testCopyDescriptor) { using AttributeVec3s = TypedAttributeArray<Vec3s>; using AttributeS = TypedAttributeArray<float>; using LeafNode = PointDataTree::LeafNodeType; PointDataTree tree; LeafNode* leaf = tree.touchLeaf(openvdb::Coord(0, 0, 0)); LeafNode* leaf2 = tree.touchLeaf(openvdb::Coord(0, 8, 0)); // create a descriptor using Descriptor = AttributeSet::Descriptor; Descriptor::Inserter names; names.add("density", AttributeS::attributeType()); Descriptor::Ptr descrA = Descriptor::create(AttributeVec3s::attributeType()); // initialize attributes using this descriptor leaf->initializeAttributes(descrA, /*arrayLength=*/100); leaf2->initializeAttributes(descrA, /*arrayLength=*/50); // copy the PointDataTree and ensure that descriptors are shared PointDataTree tree2(tree); EXPECT_EQ(tree2.leafCount(), openvdb::Index32(2)); descrA->setGroup("test", size_t(1)); PointDataTree::LeafCIter iter2 = tree2.cbeginLeaf(); EXPECT_TRUE(iter2->attributeSet().descriptor().hasGroup("test")); ++iter2; EXPECT_TRUE(iter2->attributeSet().descriptor().hasGroup("test")); // call makeDescriptorUnique and ensure that descriptors are no longer shared Descriptor::Ptr newDescriptor = makeDescriptorUnique(tree2); EXPECT_TRUE(newDescriptor); descrA->setGroup("test2", size_t(2)); iter2 = tree2.cbeginLeaf(); EXPECT_TRUE(!iter2->attributeSet().descriptor().hasGroup("test2")); ++iter2; EXPECT_TRUE(!iter2->attributeSet().descriptor().hasGroup("test2")); }
49,117
C++
30.812176
93
0.625221
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointAttribute.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/AttributeArrayString.h> #include <openvdb/points/PointAttribute.h> #include <openvdb/points/PointConversion.h> #include <vector> using namespace openvdb; using namespace openvdb::points; class TestPointAttribute: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestPointAttribute //////////////////////////////////////// TEST_F(TestPointAttribute, testAppendDrop) { using AttributeI = TypedAttributeArray<int>; std::vector<Vec3s> positions{{1, 1, 1}, {1, 10, 1}, {10, 1, 1}, {10, 10, 1}}; const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree = grid->tree(); // check one leaf per point EXPECT_EQ(tree.leafCount(), Index32(4)); // retrieve first and last leaf attribute sets auto leafIter = tree.cbeginLeaf(); const AttributeSet& attributeSet = leafIter->attributeSet(); ++leafIter; ++leafIter; ++leafIter; const AttributeSet& attributeSet4 = leafIter->attributeSet(); // check just one attribute exists (position) EXPECT_EQ(attributeSet.descriptor().size(), size_t(1)); { // append an attribute, different initial values and collapse appendAttribute<int>(tree, "id"); EXPECT_TRUE(tree.beginLeaf()->hasAttribute("id")); AttributeArray& array = tree.beginLeaf()->attributeArray("id"); EXPECT_TRUE(array.isUniform()); EXPECT_EQ(AttributeI::cast(array).get(0), zeroVal<AttributeI::ValueType>()); dropAttribute(tree, "id"); appendAttribute<int>(tree, "id", 10, /*stride*/1); EXPECT_TRUE(tree.beginLeaf()->hasAttribute("id")); AttributeArray& array2 = tree.beginLeaf()->attributeArray("id"); EXPECT_TRUE(array2.isUniform()); EXPECT_EQ(AttributeI::cast(array2).get(0), AttributeI::ValueType(10)); array2.expand(); EXPECT_TRUE(!array2.isUniform()); collapseAttribute<int>(tree, "id", 50); AttributeArray& array3 = tree.beginLeaf()->attributeArray("id"); EXPECT_TRUE(array3.isUniform()); EXPECT_EQ(AttributeI::cast(array3).get(0), AttributeI::ValueType(50)); dropAttribute(tree, "id"); appendAttribute<Name>(tree, "name", "test"); AttributeArray& array4 = tree.beginLeaf()->attributeArray("name"); EXPECT_TRUE(array4.isUniform()); StringAttributeHandle handle(array4, attributeSet.descriptor().getMetadata()); EXPECT_EQ(handle.get(0), Name("test")); dropAttribute(tree, "name"); } { // append a strided attribute appendAttribute<int>(tree, "id", 0, /*stride=*/1); AttributeArray& array = tree.beginLeaf()->attributeArray("id"); EXPECT_EQ(array.stride(), Index(1)); dropAttribute(tree, "id"); appendAttribute<int>(tree, "id", 0, /*stride=*/10); EXPECT_TRUE(tree.beginLeaf()->hasAttribute("id")); AttributeArray& array2 = tree.beginLeaf()->attributeArray("id"); EXPECT_EQ(array2.stride(), Index(10)); dropAttribute(tree, "id"); } { // append an attribute, check descriptors are as expected, default value test TypedMetadata<int> meta(10); appendAttribute<int>(tree, "id", /*uniformValue*/0, /*stride=*/1, /*constantStride=*/true, /*defaultValue*/&meta, /*hidden=*/false, /*transient=*/false); EXPECT_EQ(attributeSet.descriptor().size(), size_t(2)); EXPECT_TRUE(attributeSet.descriptor() == attributeSet4.descriptor()); EXPECT_TRUE(&attributeSet.descriptor() == &attributeSet4.descriptor()); EXPECT_TRUE(attributeSet.descriptor().getMetadata()["default:id"]); AttributeArray& array = tree.beginLeaf()->attributeArray("id"); EXPECT_TRUE(array.isUniform()); AttributeHandle<int> handle(array); EXPECT_EQ(0, handle.get(0)); } { // append three attributes, check ordering is consistent with insertion appendAttribute<float>(tree, "test3"); appendAttribute<float>(tree, "test1"); appendAttribute<float>(tree, "test2"); EXPECT_EQ(attributeSet.descriptor().size(), size_t(5)); EXPECT_EQ(attributeSet.descriptor().find("P"), size_t(0)); EXPECT_EQ(attributeSet.descriptor().find("id"), size_t(1)); EXPECT_EQ(attributeSet.descriptor().find("test3"), size_t(2)); EXPECT_EQ(attributeSet.descriptor().find("test1"), size_t(3)); EXPECT_EQ(attributeSet.descriptor().find("test2"), size_t(4)); } { // drop an attribute by index, check ordering remains consistent std::vector<size_t> indices{2}; dropAttributes(tree, indices); EXPECT_EQ(attributeSet.descriptor().size(), size_t(4)); EXPECT_EQ(attributeSet.descriptor().find("P"), size_t(0)); EXPECT_EQ(attributeSet.descriptor().find("id"), size_t(1)); EXPECT_EQ(attributeSet.descriptor().find("test1"), size_t(2)); EXPECT_EQ(attributeSet.descriptor().find("test2"), size_t(3)); } { // drop attributes by index, check ordering remains consistent std::vector<size_t> indices{1, 3}; dropAttributes(tree, indices); EXPECT_EQ(attributeSet.descriptor().size(), size_t(2)); EXPECT_EQ(attributeSet.descriptor().find("P"), size_t(0)); EXPECT_EQ(attributeSet.descriptor().find("test1"), size_t(1)); } { // drop last non-position attribute std::vector<size_t> indices{1}; dropAttributes(tree, indices); EXPECT_EQ(attributeSet.descriptor().size(), size_t(1)); } { // attempt (and fail) to drop position std::vector<size_t> indices{0}; EXPECT_THROW(dropAttributes(tree, indices), openvdb::KeyError); EXPECT_EQ(attributeSet.descriptor().size(), size_t(1)); EXPECT_TRUE(attributeSet.descriptor().find("P") != AttributeSet::INVALID_POS); } { // add back previous attributes appendAttribute<int>(tree, "id"); appendAttribute<float>(tree, "test3"); appendAttribute<float>(tree, "test1"); appendAttribute<float>(tree, "test2"); EXPECT_EQ(attributeSet.descriptor().size(), size_t(5)); } { // attempt (and fail) to drop non-existing attribute std::vector<Name> names{"test1000"}; EXPECT_THROW(dropAttributes(tree, names), openvdb::KeyError); EXPECT_EQ(attributeSet.descriptor().size(), size_t(5)); } { // drop by name std::vector<Name> names{"test1", "test2"}; dropAttributes(tree, names); EXPECT_EQ(attributeSet.descriptor().size(), size_t(3)); EXPECT_TRUE(attributeSet.descriptor() == attributeSet4.descriptor()); EXPECT_TRUE(&attributeSet.descriptor() == &attributeSet4.descriptor()); EXPECT_EQ(attributeSet.descriptor().find("P"), size_t(0)); EXPECT_EQ(attributeSet.descriptor().find("id"), size_t(1)); EXPECT_EQ(attributeSet.descriptor().find("test3"), size_t(2)); } { // attempt (and fail) to drop position std::vector<Name> names{"P"}; EXPECT_THROW(dropAttributes(tree, names), openvdb::KeyError); EXPECT_EQ(attributeSet.descriptor().size(), size_t(3)); EXPECT_TRUE(attributeSet.descriptor().find("P") != AttributeSet::INVALID_POS); } { // drop one attribute by name dropAttribute(tree, "test3"); EXPECT_EQ(attributeSet.descriptor().size(), size_t(2)); EXPECT_EQ(attributeSet.descriptor().find("P"), size_t(0)); EXPECT_EQ(attributeSet.descriptor().find("id"), size_t(1)); } { // drop one attribute by id dropAttribute(tree, 1); EXPECT_EQ(attributeSet.descriptor().size(), size_t(1)); EXPECT_EQ(attributeSet.descriptor().find("P"), size_t(0)); } { // attempt to add an attribute with a name that already exists appendAttribute<float>(tree, "test3"); EXPECT_THROW(appendAttribute<float>(tree, "test3"), openvdb::KeyError); EXPECT_EQ(attributeSet.descriptor().size(), size_t(2)); } { // attempt to add an attribute with an unregistered type (Vec2R) EXPECT_THROW(appendAttribute<Vec2R>(tree, "unregistered"), openvdb::KeyError); } { // append attributes marked as hidden, transient, group and string appendAttribute<float>(tree, "testHidden", 0, /*stride=*/1, /*constantStride=*/true, nullptr, true, false); appendAttribute<float>(tree, "testTransient", 0, /*stride=*/1, /*constantStride=*/true, nullptr, false, true); appendAttribute<Name>(tree, "testString", "", /*stride=*/1, /*constantStride=*/true, nullptr, false, false); const AttributeArray& arrayHidden = leafIter->attributeArray("testHidden"); const AttributeArray& arrayTransient = leafIter->attributeArray("testTransient"); const AttributeArray& arrayString = leafIter->attributeArray("testString"); EXPECT_TRUE(arrayHidden.isHidden()); EXPECT_TRUE(!arrayTransient.isHidden()); EXPECT_TRUE(!arrayHidden.isTransient()); EXPECT_TRUE(arrayTransient.isTransient()); EXPECT_TRUE(!arrayString.isTransient()); EXPECT_TRUE(!isGroup(arrayHidden)); EXPECT_TRUE(!isGroup(arrayTransient)); EXPECT_TRUE(!isGroup(arrayString)); EXPECT_TRUE(!isString(arrayHidden)); EXPECT_TRUE(!isString(arrayTransient)); EXPECT_TRUE(isString(arrayString)); } { // collapsing non-existing attribute throws exception EXPECT_THROW(collapseAttribute<int>(tree, "unknown", 0), openvdb::KeyError); EXPECT_THROW(collapseAttribute<Name>(tree, "unknown", "unknown"), openvdb::KeyError); } } TEST_F(TestPointAttribute, testRename) { std::vector<Vec3s> positions{{1, 1, 1}, {1, 10, 1}, {10, 1, 1}, {10, 10, 1}}; const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree = grid->tree(); // check one leaf per point EXPECT_EQ(tree.leafCount(), Index32(4)); const openvdb::TypedMetadata<float> defaultValue(5.0f); appendAttribute<float>(tree, "test1", 0, /*stride=*/1, /*constantStride=*/true, &defaultValue); appendAttribute<int>(tree, "id"); appendAttribute<float>(tree, "test2"); // retrieve first and last leaf attribute sets auto leafIter = tree.cbeginLeaf(); const AttributeSet& attributeSet = leafIter->attributeSet(); ++leafIter; const AttributeSet& attributeSet4 = leafIter->attributeSet(); { // rename one attribute renameAttribute(tree, "test1", "test1renamed"); EXPECT_EQ(attributeSet.descriptor().size(), size_t(4)); EXPECT_TRUE(attributeSet.descriptor().find("test1") == AttributeSet::INVALID_POS); EXPECT_TRUE(attributeSet.descriptor().find("test1renamed") != AttributeSet::INVALID_POS); EXPECT_EQ(attributeSet4.descriptor().size(), size_t(4)); EXPECT_TRUE(attributeSet4.descriptor().find("test1") == AttributeSet::INVALID_POS); EXPECT_TRUE(attributeSet4.descriptor().find("test1renamed") != AttributeSet::INVALID_POS); renameAttribute(tree, "test1renamed", "test1"); } { // rename non-existing, matching and existing attributes EXPECT_THROW(renameAttribute(tree, "nonexist", "newname"), openvdb::KeyError); EXPECT_THROW(renameAttribute(tree, "test1", "test1"), openvdb::KeyError); EXPECT_THROW(renameAttribute(tree, "test2", "test1"), openvdb::KeyError); } { // rename multiple attributes std::vector<Name> oldNames{"test1", "test2"}; std::vector<Name> newNames{"test1renamed"}; EXPECT_THROW(renameAttributes(tree, oldNames, newNames), openvdb::ValueError); newNames.push_back("test2renamed"); renameAttributes(tree, oldNames, newNames); renameAttribute(tree, "test1renamed", "test1"); renameAttribute(tree, "test2renamed", "test2"); } { // rename an attribute with a default value EXPECT_TRUE(attributeSet.descriptor().hasDefaultValue("test1")); renameAttribute(tree, "test1", "test1renamed"); EXPECT_TRUE(attributeSet.descriptor().hasDefaultValue("test1renamed")); } } TEST_F(TestPointAttribute, testBloscCompress) { std::vector<Vec3s> positions; for (float i = 1.f; i < 6.f; i += 0.1f) { positions.emplace_back(1, i, 1); positions.emplace_back(1, 1, i); positions.emplace_back(10, i, 1); positions.emplace_back(10, 1, i); } const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree = grid->tree(); // check two leaves EXPECT_EQ(tree.leafCount(), Index32(2)); // retrieve first and last leaf attribute sets auto leafIter = tree.beginLeaf(); auto leafIter2 = ++tree.beginLeaf(); { // append an attribute, check descriptors are as expected appendAttribute<int>(tree, "compact"); appendAttribute<int>(tree, "id"); appendAttribute<int>(tree, "id2"); } using AttributeHandleRWI = AttributeWriteHandle<int>; { // set some id values (leaf 1) AttributeHandleRWI handleCompact(leafIter->attributeArray("compact")); AttributeHandleRWI handleId(leafIter->attributeArray("id")); AttributeHandleRWI handleId2(leafIter->attributeArray("id2")); const int size = leafIter->attributeArray("id").size(); EXPECT_EQ(size, 102); for (int i = 0; i < size; i++) { handleCompact.set(i, 5); handleId.set(i, i); handleId2.set(i, i); } } { // set some id values (leaf 2) AttributeHandleRWI handleCompact(leafIter2->attributeArray("compact")); AttributeHandleRWI handleId(leafIter2->attributeArray("id")); AttributeHandleRWI handleId2(leafIter2->attributeArray("id2")); const int size = leafIter2->attributeArray("id").size(); EXPECT_EQ(size, 102); for (int i = 0; i < size; i++) { handleCompact.set(i, 10); handleId.set(i, i); handleId2.set(i, i); } } compactAttributes(tree); EXPECT_TRUE(leafIter->attributeArray("compact").isUniform()); EXPECT_TRUE(leafIter2->attributeArray("compact").isUniform()); }
15,072
C++
34.382629
99
0.633758
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestDoubleMetadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Metadata.h> class TestDoubleMetadata : public ::testing::Test { }; TEST_F(TestDoubleMetadata, test) { using namespace openvdb; Metadata::Ptr m(new DoubleMetadata(1.23)); Metadata::Ptr m2 = m->copy(); EXPECT_TRUE(dynamic_cast<DoubleMetadata*>(m.get()) != 0); EXPECT_TRUE(dynamic_cast<DoubleMetadata*>(m2.get()) != 0); EXPECT_TRUE(m->typeName().compare("double") == 0); EXPECT_TRUE(m2->typeName().compare("double") == 0); DoubleMetadata *s = dynamic_cast<DoubleMetadata*>(m.get()); //EXPECT_TRUE(s->value() == 1.23); EXPECT_NEAR(1.23,s->value(),0); s->value() = 4.56; //EXPECT_TRUE(s->value() == 4.56); EXPECT_NEAR(4.56,s->value(),0); m2->copy(*s); s = dynamic_cast<DoubleMetadata*>(m2.get()); //EXPECT_TRUE(s->value() == 4.56); EXPECT_NEAR(4.56,s->value(),0); }
998
C++
25.289473
63
0.627255
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPotentialFlow.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file unittest/TestPotentialFlow.cc #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/PotentialFlow.h> class TestPotentialFlow: public ::testing::Test { }; TEST_F(TestPotentialFlow, testMask) { using namespace openvdb; const float radius = 1.5f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 0.25f; const float halfWidth = 3.0f; FloatGrid::Ptr sphere = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, halfWidth); const int dilation = 5; MaskGrid::Ptr mask = tools::createPotentialFlowMask(*sphere, dilation); MaskGrid::Ptr defaultMask = tools::createPotentialFlowMask(*sphere); EXPECT_TRUE(*mask == *defaultMask); auto acc = mask->getAccessor(); // the isosurface of this sphere is at y = 6 // this mask forms a band dilated outwards from the isosurface by 5 voxels EXPECT_TRUE(!acc.isValueOn(Coord(0, 5, 0))); EXPECT_TRUE(acc.isValueOn(Coord(0, 6, 0))); EXPECT_TRUE(acc.isValueOn(Coord(0, 10, 0))); EXPECT_TRUE(!acc.isValueOn(Coord(0, 11, 0))); { // error on non-uniform voxel size FloatGrid::Ptr nonUniformSphere = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, halfWidth); math::Transform::Ptr nonUniformTransform(new math::Transform( math::MapBase::Ptr(new math::ScaleMap(Vec3d(0.1, 0.2, 0.3))))); nonUniformSphere->setTransform(nonUniformTransform); EXPECT_THROW(tools::createPotentialFlowMask(*nonUniformSphere, dilation), openvdb::ValueError); } // this is the minimum mask of one voxel either side of the isosurface mask = tools::createPotentialFlowMask(*sphere, 2); acc = mask->getAccessor(); EXPECT_TRUE(!acc.isValueOn(Coord(0, 5, 0))); EXPECT_TRUE(acc.isValueOn(Coord(0, 6, 0))); EXPECT_TRUE(acc.isValueOn(Coord(0, 7, 0))); EXPECT_TRUE(!acc.isValueOn(Coord(0, 8, 0))); // these should all produce the same masks as the dilation value is clamped MaskGrid::Ptr negativeMask = tools::createPotentialFlowMask(*sphere, -1); MaskGrid::Ptr zeroMask = tools::createPotentialFlowMask(*sphere, 0); MaskGrid::Ptr oneMask = tools::createPotentialFlowMask(*sphere, 1); EXPECT_TRUE(*negativeMask == *mask); EXPECT_TRUE(*zeroMask == *mask); EXPECT_TRUE(*oneMask == *mask); } TEST_F(TestPotentialFlow, testNeumannVelocities) { using namespace openvdb; const float radius = 1.5f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 0.25f; const float halfWidth = 3.0f; FloatGrid::Ptr sphere = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, halfWidth); MaskGrid::Ptr domain = tools::createPotentialFlowMask(*sphere); { // test identical potential from a wind velocity supplied through grid or background value Vec3d windVelocityValue(0, 0, 10); Vec3dTree::Ptr windTree(new Vec3dTree(sphere->tree(), zeroVal<Vec3d>(), TopologyCopy())); dilateVoxels(*windTree, 2, tools::NN_FACE_EDGE_VERTEX); windTree->voxelizeActiveTiles(); for (auto leaf = windTree->beginLeaf(); leaf; ++leaf) { for (auto iter = leaf->beginValueOn(); iter; ++iter) { iter.setValue(windVelocityValue); } } Vec3dGrid::Ptr windGrid(Vec3dGrid::create(windTree)); windGrid->setTransform(sphere->transform().copy()); auto windPotentialFromGrid = tools::createPotentialFlowNeumannVelocities( *sphere, *domain, windGrid, Vec3d(0)); EXPECT_EQ(windPotentialFromGrid->transform(), sphere->transform()); auto windPotentialFromBackground = tools::createPotentialFlowNeumannVelocities( *sphere, *domain, Vec3dGrid::Ptr(), windVelocityValue); auto accessor = windPotentialFromGrid->getConstAccessor(); auto accessor2 = windPotentialFromBackground->getConstAccessor(); EXPECT_EQ(windPotentialFromGrid->activeVoxelCount(), windPotentialFromBackground->activeVoxelCount()); for (auto leaf = windPotentialFromGrid->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { EXPECT_EQ(accessor.isValueOn(iter.getCoord()), accessor2.isValueOn(iter.getCoord())); EXPECT_EQ(accessor.getValue(iter.getCoord()), accessor2.getValue(iter.getCoord())); } } // test potential from a wind velocity supplied through grid background value Vec3dTree::Ptr emptyWindTree( new Vec3dTree(sphere->tree(), windVelocityValue, TopologyCopy())); Vec3dGrid::Ptr emptyWindGrid(Vec3dGrid::create(emptyWindTree)); emptyWindGrid->setTransform(sphere->transform().copy()); auto windPotentialFromGridBackground = tools::createPotentialFlowNeumannVelocities( *sphere, *domain, emptyWindGrid, Vec3d(0)); EXPECT_EQ(windPotentialFromGridBackground->transform(), sphere->transform()); accessor = windPotentialFromGridBackground->getConstAccessor(); accessor2 = windPotentialFromBackground->getConstAccessor(); EXPECT_EQ(windPotentialFromGridBackground->activeVoxelCount(), windPotentialFromBackground->activeVoxelCount()); for (auto leaf = windPotentialFromGridBackground->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { EXPECT_EQ(accessor.isValueOn(iter.getCoord()), accessor2.isValueOn(iter.getCoord())); EXPECT_EQ(accessor.getValue(iter.getCoord()), accessor2.getValue(iter.getCoord())); } } // test potential values are double when applying wind velocity // through grid and background values auto windPotentialFromBoth = tools::createPotentialFlowNeumannVelocities( *sphere, *domain, windGrid, windVelocityValue); tools::prune(windPotentialFromBoth->tree(), Vec3d(1e-3)); tools::prune(windPotentialFromBackground->tree(), Vec3d(1e-3)); accessor = windPotentialFromBoth->getConstAccessor(); accessor2 = windPotentialFromBackground->getConstAccessor(); for (auto leaf = windPotentialFromBoth->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { EXPECT_EQ(accessor.isValueOn(iter.getCoord()), accessor2.isValueOn(iter.getCoord())); EXPECT_EQ(accessor.getValue(iter.getCoord()), accessor2.getValue(iter.getCoord()) * 2); } } EXPECT_TRUE(*windPotentialFromBoth == *windPotentialFromBackground); } Vec3dGrid::Ptr zeroVelocity = Vec3dGrid::create(Vec3d(0)); { // error if grid is not a levelset FloatGrid::Ptr nonLevelSetSphere = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, halfWidth); nonLevelSetSphere->setGridClass(GRID_FOG_VOLUME); EXPECT_THROW(tools::createPotentialFlowNeumannVelocities( *nonLevelSetSphere, *domain, zeroVelocity, Vec3d(5)), openvdb::TypeError); } { // accept double level set grid DoubleGrid::Ptr doubleSphere = tools::createLevelSetSphere<DoubleGrid>(radius, center, voxelSize, halfWidth); EXPECT_NO_THROW(tools::createPotentialFlowNeumannVelocities( *doubleSphere, *domain, zeroVelocity, Vec3d(5))); } { // zero boundary velocities and background velocity Vec3d zeroVelocityValue(zeroVal<Vec3d>()); auto neumannVelocities = tools::createPotentialFlowNeumannVelocities( *sphere, *domain, zeroVelocity, zeroVelocityValue); EXPECT_EQ(neumannVelocities->activeVoxelCount(), Index64(0)); } } TEST_F(TestPotentialFlow, testUniformStream) { // this unit test checks the scalar potential and velocity flow field // for a uniform stream which consists of a 100x100x100 cube of // neumann voxels with constant velocity (0, 0, 1) using namespace openvdb; auto transform = math::Transform::createLinearTransform(1.0); auto mask = MaskGrid::create(false); mask->setTransform(transform); auto maskAccessor = mask->getAccessor(); auto neumann = Vec3dGrid::create(Vec3d(0)); auto neumannAccessor = neumann->getAccessor(); for (int i = -50; i < 50; i++) { for (int j = -50; j < 50; j++) { for (int k = -50; k < 50; k++) { Coord ijk(i, j, k); maskAccessor.setValueOn(ijk, true); neumannAccessor.setValueOn(ijk, Vec3d(0, 0, 1)); } } } openvdb::math::pcg::State state = math::pcg::terminationDefaults<float>(); state.iterations = 2000; state.absoluteError = 1e-8; auto potential = tools::computeScalarPotential(*mask, *neumann, state); // check convergence EXPECT_TRUE(state.success); EXPECT_TRUE(state.iterations > 0 && state.iterations < 1000); EXPECT_TRUE(state.absoluteError < 1e-6); EXPECT_EQ(potential->activeVoxelCount(), mask->activeVoxelCount()); // for uniform flow along the z-axis, the scalar potential should be equal to the z co-ordinate for (auto leaf = potential->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { const double staggeredZ = iter.getCoord().z() + 0.5; EXPECT_TRUE(math::isApproxEqual(iter.getValue(), staggeredZ, /*tolerance*/0.1)); } } auto flow = tools::computePotentialFlow(*potential, *neumann); EXPECT_EQ(flow->activeVoxelCount(), mask->activeVoxelCount()); // flow velocity should be equal to the input velocity (0, 0, 1) for (auto leaf = flow->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { EXPECT_TRUE(math::isApproxEqual(iter.getValue().x(), 0.0, /*tolerance*/1e-6)); EXPECT_TRUE(math::isApproxEqual(iter.getValue().y(), 0.0, /*tolerance*/1e-6)); EXPECT_TRUE(math::isApproxEqual(iter.getValue().z(), 1.0, /*tolerance*/1e-6)); } } } TEST_F(TestPotentialFlow, testFlowAroundSphere) { using namespace openvdb; const float radius = 1.5f; const Vec3f center(0.0f, 0.0f, 0.0f); const float voxelSize = 0.25f; const float halfWidth = 3.0f; const int dilation = 50; FloatGrid::Ptr sphere = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize, halfWidth); MaskGrid::Ptr domain = tools::createPotentialFlowMask(*sphere, dilation); { // compute potential flow for a global wind velocity around a sphere Vec3f windVelocity(0, 0, 1); Vec3fGrid::Ptr neumann = tools::createPotentialFlowNeumannVelocities(*sphere, *domain, Vec3fGrid::Ptr(), windVelocity); openvdb::math::pcg::State state = math::pcg::terminationDefaults<float>(); state.iterations = 2000; state.absoluteError = 1e-8; FloatGrid::Ptr potential = tools::computeScalarPotential(*domain, *neumann, state); // compute a laplacian of the potential within the domain (excluding neumann voxels) // and ensure it evaluates to zero auto mask = BoolGrid::create(/*background=*/false); mask->setTransform(potential->transform().copy()); mask->topologyUnion(*potential); auto dilatedSphereMask = tools::interiorMask(*sphere); tools::dilateActiveValues(dilatedSphereMask->tree(), 1); mask->topologyDifference(*dilatedSphereMask); FloatGrid::Ptr laplacian = tools::laplacian(*potential, *mask); for (auto leaf = laplacian->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { EXPECT_TRUE(math::isApproxEqual(iter.getValue(), 0.0f, /*tolerance*/1e-3f)); } } Vec3fGrid::Ptr flowVel = tools::computePotentialFlow(*potential, *neumann); // compute the divergence of the flow velocity within the domain // (excluding neumann voxels and exterior voxels) // and ensure it evaluates to zero tools::erodeVoxels(mask->tree(), 2, tools::NN_FACE); FloatGrid::Ptr divergence = tools::divergence(*flowVel, *mask); for (auto leaf = divergence->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { EXPECT_TRUE(math::isApproxEqual(iter.getValue(), 0.0f, /*tolerance*/0.1f)); } } // check the background velocity has been applied correctly Vec3fGrid::Ptr flowVelBackground = tools::computePotentialFlow(*potential, *neumann, windVelocity); EXPECT_EQ(flowVelBackground->activeVoxelCount(), flowVelBackground->activeVoxelCount()); auto maskAccessor = mask->getConstAccessor(); auto accessor = flowVel->getConstAccessor(); auto accessor2 = flowVelBackground->getConstAccessor(); for (auto leaf = flowVelBackground->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { // ignore values near the neumann boundary if (!maskAccessor.isValueOn(iter.getCoord())) continue; const Vec3f value1 = accessor.getValue(iter.getCoord()); const Vec3f value2 = accessor2.getValue(iter.getCoord()) + windVelocity; EXPECT_TRUE(math::isApproxEqual(value1.x(), value2.x(), /*tolerance=*/1e-3f)); EXPECT_TRUE(math::isApproxEqual(value1.y(), value2.y(), /*tolerance=*/1e-3f)); EXPECT_TRUE(math::isApproxEqual(value1.z(), value2.z(), /*tolerance=*/1e-3f)); } } } { // check double-precision solve DoubleGrid::Ptr sphereDouble = tools::createLevelSetSphere<DoubleGrid>(radius, center, voxelSize, halfWidth); Vec3d windVelocity(0, 0, 1); Vec3dGrid::Ptr neumann = tools::createPotentialFlowNeumannVelocities(*sphereDouble, *domain, Vec3dGrid::Ptr(), windVelocity); openvdb::math::pcg::State state = math::pcg::terminationDefaults<float>(); state.iterations = 2000; state.absoluteError = 1e-8; DoubleGrid::Ptr potential = tools::computeScalarPotential(*domain, *neumann, state); EXPECT_TRUE(potential); // compute a laplacian of the potential within the domain (excluding neumann voxels) // and ensure it evaluates to zero auto mask = BoolGrid::create(/*background=*/false); mask->setTransform(potential->transform().copy()); mask->topologyUnion(*potential); auto dilatedSphereMask = tools::interiorMask(*sphereDouble); tools::dilateActiveValues(dilatedSphereMask->tree(), 1); mask->topologyDifference(*dilatedSphereMask); DoubleGrid::Ptr laplacian = tools::laplacian(*potential, *mask); for (auto leaf = laplacian->tree().cbeginLeaf(); leaf; ++leaf) { for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { EXPECT_TRUE(math::isApproxEqual(iter.getValue(), 0.0, /*tolerance*/1e-5)); } } Vec3dGrid::Ptr flowVel = tools::computePotentialFlow(*potential, *neumann); EXPECT_TRUE(flowVel); } }
15,675
C++
36.956416
99
0.643955
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestTreeVisitor.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file TestTreeVisitor.h /// /// @author Peter Cucka #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/tree/Tree.h> #include <map> #include <set> #include <sstream> #include <type_traits> class TestTreeVisitor: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } void testVisitTreeBool() { visitTree<openvdb::BoolTree>(); } void testVisitTreeInt32() { visitTree<openvdb::Int32Tree>(); } void testVisitTreeFloat() { visitTree<openvdb::FloatTree>(); } void testVisitTreeVec2I() { visitTree<openvdb::Vec2ITree>(); } void testVisitTreeVec3S() { visitTree<openvdb::VectorTree>(); } void testVisit2Trees(); protected: template<typename TreeT> TreeT createTestTree() const; template<typename TreeT> void visitTree(); }; //////////////////////////////////////// template<typename TreeT> TreeT TestTreeVisitor::createTestTree() const { using ValueT = typename TreeT::ValueType; const ValueT zero = openvdb::zeroVal<ValueT>(), one = zero + 1; // Create a sparse test tree comprising the eight corners of // a 200 x 200 x 200 cube. TreeT tree(/*background=*/one); tree.setValue(openvdb::Coord( 0, 0, 0), /*value=*/zero); tree.setValue(openvdb::Coord(200, 0, 0), zero); tree.setValue(openvdb::Coord( 0, 200, 0), zero); tree.setValue(openvdb::Coord( 0, 0, 200), zero); tree.setValue(openvdb::Coord(200, 0, 200), zero); tree.setValue(openvdb::Coord( 0, 200, 200), zero); tree.setValue(openvdb::Coord(200, 200, 0), zero); tree.setValue(openvdb::Coord(200, 200, 200), zero); // Verify that the bounding box of all On values is 200 x 200 x 200. openvdb::CoordBBox bbox; EXPECT_TRUE(tree.evalActiveVoxelBoundingBox(bbox)); EXPECT_TRUE(bbox.min() == openvdb::Coord(0, 0, 0)); EXPECT_TRUE(bbox.max() == openvdb::Coord(200, 200, 200)); return tree; } //////////////////////////////////////// namespace { /// Single-tree visitor that accumulates node counts class Visitor { public: using NodeMap = std::map<openvdb::Index, std::set<const void*> >; Visitor(): mSkipLeafNodes(false) { reset(); } void reset() { mSkipLeafNodes = false; mNodes.clear(); mNonConstIterUseCount = mConstIterUseCount = 0; } void setSkipLeafNodes(bool b) { mSkipLeafNodes = b; } template<typename IterT> bool operator()(IterT& iter) { incrementIterUseCount(std::is_const<typename IterT::NodeType>::value); EXPECT_TRUE(iter.getParentNode() != nullptr); if (mSkipLeafNodes && iter.parent().getLevel() == 1) return true; using ValueT = typename IterT::NonConstValueType; using ChildT = typename IterT::ChildNodeType; ValueT value; if (const ChildT* child = iter.probeChild(value)) { insertChild<ChildT>(child); } return false; } openvdb::Index leafCount() const { NodeMap::const_iterator it = mNodes.find(0); return openvdb::Index((it != mNodes.end()) ? it->second.size() : 0); } openvdb::Index nonLeafCount() const { openvdb::Index count = 1; // root node for (NodeMap::const_iterator i = mNodes.begin(), e = mNodes.end(); i != e; ++i) { if (i->first != 0) count = openvdb::Index(count + i->second.size()); } return count; } bool usedOnlyConstIterators() const { return (mConstIterUseCount > 0 && mNonConstIterUseCount == 0); } bool usedOnlyNonConstIterators() const { return (mConstIterUseCount == 0 && mNonConstIterUseCount > 0); } private: template<typename ChildT> void insertChild(const ChildT* child) { if (child != nullptr) { const openvdb::Index level = child->getLevel(); if (!mSkipLeafNodes || level > 0) { mNodes[level].insert(child); } } } void incrementIterUseCount(bool isConst) { if (isConst) ++mConstIterUseCount; else ++mNonConstIterUseCount; } bool mSkipLeafNodes; NodeMap mNodes; int mNonConstIterUseCount, mConstIterUseCount; }; /// Specialization for LeafNode iterators, whose ChildNodeType is void /// (therefore can't call child->getLevel()) template<> inline void Visitor::insertChild<void>(const void*) {} } // unnamed namespace template<typename TreeT> void TestTreeVisitor::visitTree() { OPENVDB_NO_DEPRECATION_WARNING_BEGIN TreeT tree = createTestTree<TreeT>(); { // Traverse the tree, accumulating node counts. Visitor visitor; const_cast<const TreeT&>(tree).visit(visitor); EXPECT_TRUE(visitor.usedOnlyConstIterators()); EXPECT_EQ(tree.leafCount(), visitor.leafCount()); EXPECT_EQ(tree.nonLeafCount(), visitor.nonLeafCount()); } { // Traverse the tree, accumulating node counts as above, // but using non-const iterators. Visitor visitor; tree.visit(visitor); EXPECT_TRUE(visitor.usedOnlyNonConstIterators()); EXPECT_EQ(tree.leafCount(), visitor.leafCount()); EXPECT_EQ(tree.nonLeafCount(), visitor.nonLeafCount()); } { // Traverse the tree, accumulating counts of non-leaf nodes only. Visitor visitor; visitor.setSkipLeafNodes(true); const_cast<const TreeT&>(tree).visit(visitor); EXPECT_TRUE(visitor.usedOnlyConstIterators()); EXPECT_EQ(0U, visitor.leafCount()); // leaf nodes were skipped EXPECT_EQ(tree.nonLeafCount(), visitor.nonLeafCount()); } OPENVDB_NO_DEPRECATION_WARNING_END } //////////////////////////////////////// namespace { /// Two-tree visitor that accumulates node counts class Visitor2 { public: using NodeMap = std::map<openvdb::Index, std::set<const void*> >; Visitor2() { reset(); } void reset() { mSkipALeafNodes = mSkipBLeafNodes = false; mANodeCount.clear(); mBNodeCount.clear(); } void setSkipALeafNodes(bool b) { mSkipALeafNodes = b; } void setSkipBLeafNodes(bool b) { mSkipBLeafNodes = b; } openvdb::Index aLeafCount() const { return leafCount(/*useA=*/true); } openvdb::Index bLeafCount() const { return leafCount(/*useA=*/false); } openvdb::Index aNonLeafCount() const { return nonLeafCount(/*useA=*/true); } openvdb::Index bNonLeafCount() const { return nonLeafCount(/*useA=*/false); } template<typename AIterT, typename BIterT> int operator()(AIterT& aIter, BIterT& bIter) { EXPECT_TRUE(aIter.getParentNode() != nullptr); EXPECT_TRUE(bIter.getParentNode() != nullptr); typename AIterT::NodeType& aNode = aIter.parent(); typename BIterT::NodeType& bNode = bIter.parent(); const openvdb::Index aLevel = aNode.getLevel(), bLevel = bNode.getLevel(); mANodeCount[aLevel].insert(&aNode); mBNodeCount[bLevel].insert(&bNode); int skipBranch = 0; if (aLevel == 1 && mSkipALeafNodes) skipBranch = (skipBranch | 1); if (bLevel == 1 && mSkipBLeafNodes) skipBranch = (skipBranch | 2); return skipBranch; } private: openvdb::Index leafCount(bool useA) const { const NodeMap& theMap = (useA ? mANodeCount : mBNodeCount); NodeMap::const_iterator it = theMap.find(0); if (it != theMap.end()) return openvdb::Index(it->second.size()); return 0; } openvdb::Index nonLeafCount(bool useA) const { openvdb::Index count = 0; const NodeMap& theMap = (useA ? mANodeCount : mBNodeCount); for (NodeMap::const_iterator i = theMap.begin(), e = theMap.end(); i != e; ++i) { if (i->first != 0) count = openvdb::Index(count + i->second.size()); } return count; } bool mSkipALeafNodes, mSkipBLeafNodes; NodeMap mANodeCount, mBNodeCount; }; } // unnamed namespace TEST_F(TestTreeVisitor, testVisitTreeBool) { visitTree<openvdb::BoolTree>(); } TEST_F(TestTreeVisitor, testVisitTreeInt32) { visitTree<openvdb::Int32Tree>(); } TEST_F(TestTreeVisitor, testVisitTreeFloat) { visitTree<openvdb::FloatTree>(); } TEST_F(TestTreeVisitor, testVisitTreeVec2I) { visitTree<openvdb::Vec2ITree>(); } TEST_F(TestTreeVisitor, testVisitTreeVec3S) { visitTree<openvdb::VectorTree>(); } TEST_F(TestTreeVisitor, testVisit2Trees) { OPENVDB_NO_DEPRECATION_WARNING_BEGIN using TreeT = openvdb::FloatTree; using Tree2T = openvdb::VectorTree; using ValueT = TreeT::ValueType; // Create a test tree. TreeT tree = createTestTree<TreeT>(); // Create another test tree of a different type but with the same topology. Tree2T tree2 = createTestTree<Tree2T>(); // Traverse both trees. Visitor2 visitor; tree.visit2(tree2, visitor); //EXPECT_TRUE(visitor.usedOnlyConstIterators()); EXPECT_EQ(tree.leafCount(), visitor.aLeafCount()); EXPECT_EQ(tree2.leafCount(), visitor.bLeafCount()); EXPECT_EQ(tree.nonLeafCount(), visitor.aNonLeafCount()); EXPECT_EQ(tree2.nonLeafCount(), visitor.bNonLeafCount()); visitor.reset(); // Change the topology of the first tree. tree.setValue(openvdb::Coord(-200, -200, -200), openvdb::zeroVal<ValueT>()); // Traverse both trees. tree.visit2(tree2, visitor); EXPECT_EQ(tree.leafCount(), visitor.aLeafCount()); EXPECT_EQ(tree2.leafCount(), visitor.bLeafCount()); EXPECT_EQ(tree.nonLeafCount(), visitor.aNonLeafCount()); EXPECT_EQ(tree2.nonLeafCount(), visitor.bNonLeafCount()); visitor.reset(); // Traverse the two trees in the opposite order. tree2.visit2(tree, visitor); EXPECT_EQ(tree2.leafCount(), visitor.aLeafCount()); EXPECT_EQ(tree.leafCount(), visitor.bLeafCount()); EXPECT_EQ(tree2.nonLeafCount(), visitor.aNonLeafCount()); EXPECT_EQ(tree.nonLeafCount(), visitor.bNonLeafCount()); // Repeat, skipping leaf nodes of tree2. visitor.reset(); visitor.setSkipALeafNodes(true); tree2.visit2(tree, visitor); EXPECT_EQ(0U, visitor.aLeafCount()); EXPECT_EQ(tree.leafCount(), visitor.bLeafCount()); EXPECT_EQ(tree2.nonLeafCount(), visitor.aNonLeafCount()); EXPECT_EQ(tree.nonLeafCount(), visitor.bNonLeafCount()); // Repeat, skipping leaf nodes of tree. visitor.reset(); visitor.setSkipBLeafNodes(true); tree2.visit2(tree, visitor); EXPECT_EQ(tree2.leafCount(), visitor.aLeafCount()); EXPECT_EQ(0U, visitor.bLeafCount()); EXPECT_EQ(tree2.nonLeafCount(), visitor.aNonLeafCount()); EXPECT_EQ(tree.nonLeafCount(), visitor.bNonLeafCount()); OPENVDB_NO_DEPRECATION_WARNING_END }
10,829
C++
30.300578
89
0.644011
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointCount.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/PointDataGrid.h> #include <openvdb/openvdb.h> #include <openvdb/points/PointGroup.h> #include <openvdb/points/PointCount.h> #include <openvdb/points/PointConversion.h> #include <cmath> #include <cstdio> // for std::remove() #include <cstdlib> // for std::getenv() #include <string> #include <vector> #ifdef _MSC_VER #include <windows.h> #endif using namespace openvdb; using namespace openvdb::points; class TestPointCount: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestPointCount using LeafType = PointDataTree::LeafNodeType; using ValueType = LeafType::ValueType; struct NotZeroFilter { NotZeroFilter() = default; static bool initialized() { return true; } template <typename LeafT> void reset(const LeafT&) { } template <typename IterT> bool valid(const IterT& iter) const { return *iter != 0; } }; TEST_F(TestPointCount, testCount) { // create a tree and check there are no points PointDataGrid::Ptr grid = createGrid<PointDataGrid>(); PointDataTree& tree = grid->tree(); EXPECT_EQ(pointCount(tree), Index64(0)); // add a new leaf to a tree and re-test LeafType* leafPtr = tree.touchLeaf(openvdb::Coord(0, 0, 0)); LeafType& leaf(*leafPtr); EXPECT_EQ(pointCount(tree), Index64(0)); // now manually set some offsets leaf.setOffsetOn(0, 4); leaf.setOffsetOn(1, 7); ValueVoxelCIter voxelIter = leaf.beginValueVoxel(openvdb::Coord(0, 0, 0)); IndexIter<ValueVoxelCIter, NullFilter> testIter(voxelIter, NullFilter()); leaf.beginIndexVoxel(openvdb::Coord(0, 0, 0)); EXPECT_EQ(int(*leaf.beginIndexVoxel(openvdb::Coord(0, 0, 0))), 0); EXPECT_EQ(int(leaf.beginIndexVoxel(openvdb::Coord(0, 0, 0)).end()), 4); EXPECT_EQ(int(*leaf.beginIndexVoxel(openvdb::Coord(0, 0, 1))), 4); EXPECT_EQ(int(leaf.beginIndexVoxel(openvdb::Coord(0, 0, 1)).end()), 7); // test filtered, index voxel iterator EXPECT_EQ(int(*leaf.beginIndexVoxel(openvdb::Coord(0, 0, 0), NotZeroFilter())), 1); EXPECT_EQ(int(leaf.beginIndexVoxel(openvdb::Coord(0, 0, 0), NotZeroFilter()).end()), 4); { LeafType::IndexVoxelIter iter = leaf.beginIndexVoxel(openvdb::Coord(0, 0, 0)); EXPECT_EQ(int(*iter), 0); EXPECT_EQ(int(iter.end()), 4); LeafType::IndexVoxelIter iter2 = leaf.beginIndexVoxel(openvdb::Coord(0, 0, 1)); EXPECT_EQ(int(*iter2), 4); EXPECT_EQ(int(iter2.end()), 7); EXPECT_EQ(iterCount(iter2), Index64(7 - 4)); // check pointCount ignores active/inactive state leaf.setValueOff(1); LeafType::IndexVoxelIter iter3 = leaf.beginIndexVoxel(openvdb::Coord(0, 0, 1)); EXPECT_EQ(iterCount(iter3), Index64(7 - 4)); leaf.setValueOn(1); } // one point per voxel for (unsigned int i = 0; i < LeafType::SIZE; i++) { leaf.setOffsetOn(i, i); } EXPECT_EQ(leaf.pointCount(), Index64(LeafType::SIZE - 1)); EXPECT_EQ(leaf.onPointCount(), Index64(LeafType::SIZE - 1)); EXPECT_EQ(leaf.offPointCount(), Index64(0)); EXPECT_EQ(pointCount(tree), Index64(LeafType::SIZE - 1)); EXPECT_EQ(pointCount(tree, ActiveFilter()), Index64(LeafType::SIZE - 1)); EXPECT_EQ(pointCount(tree, InactiveFilter()), Index64(0)); // manually de-activate two voxels leaf.setValueOff(100); leaf.setValueOff(101); EXPECT_EQ(leaf.pointCount(), Index64(LeafType::SIZE - 1)); EXPECT_EQ(leaf.onPointCount(), Index64(LeafType::SIZE - 3)); EXPECT_EQ(leaf.offPointCount(), Index64(2)); EXPECT_EQ(pointCount(tree), Index64(LeafType::SIZE - 1)); EXPECT_EQ(pointCount(tree, ActiveFilter()), Index64(LeafType::SIZE - 3)); EXPECT_EQ(pointCount(tree, InactiveFilter()), Index64(2)); // one point per every other voxel and de-activate empty voxels unsigned sum = 0; for (unsigned int i = 0; i < LeafType::SIZE; i++) { leaf.setOffsetOn(i, sum); if (i % 2 == 0) sum++; } leaf.updateValueMask(); EXPECT_EQ(leaf.pointCount(), Index64(LeafType::SIZE / 2)); EXPECT_EQ(leaf.onPointCount(), Index64(LeafType::SIZE / 2)); EXPECT_EQ(leaf.offPointCount(), Index64(0)); EXPECT_EQ(pointCount(tree), Index64(LeafType::SIZE / 2)); EXPECT_EQ(pointCount(tree, ActiveFilter()), Index64(LeafType::SIZE / 2)); EXPECT_EQ(pointCount(tree, InactiveFilter()), Index64(0)); // add a new non-empty leaf and check totalPointCount is correct LeafType* leaf2Ptr = tree.touchLeaf(openvdb::Coord(0, 0, 8)); LeafType& leaf2(*leaf2Ptr); // on adding, tree now obtains ownership and is reponsible for deletion for (unsigned int i = 0; i < LeafType::SIZE; i++) { leaf2.setOffsetOn(i, i); } EXPECT_EQ(pointCount(tree), Index64(LeafType::SIZE / 2 + LeafType::SIZE - 1)); EXPECT_EQ(pointCount(tree, ActiveFilter()), Index64(LeafType::SIZE / 2 + LeafType::SIZE - 1)); EXPECT_EQ(pointCount(tree, InactiveFilter()), Index64(0)); } TEST_F(TestPointCount, testGroup) { using namespace openvdb::math; using Descriptor = AttributeSet::Descriptor; // four points in the same leaf std::vector<Vec3s> positions{{1, 1, 1}, {1, 2, 1}, {2, 1, 1}, {2, 2, 1}}; const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree = grid->tree(); // setup temp directory std::string tempDir; if (const char* dir = std::getenv("TMPDIR")) tempDir = dir; #ifdef _MSC_VER if (tempDir.empty()) { char tempDirBuffer[MAX_PATH+1]; int tempDirLen = GetTempPath(MAX_PATH+1, tempDirBuffer); EXPECT_TRUE(tempDirLen > 0 && tempDirLen <= MAX_PATH); tempDir = tempDirBuffer; } #else if (tempDir.empty()) tempDir = P_tmpdir; #endif std::string filename; // check one leaf EXPECT_EQ(tree.leafCount(), Index32(1)); // retrieve first and last leaf attribute sets PointDataTree::LeafIter leafIter = tree.beginLeaf(); const AttributeSet& firstAttributeSet = leafIter->attributeSet(); // ensure zero groups EXPECT_EQ(firstAttributeSet.descriptor().groupMap().size(), size_t(0)); {// add an empty group appendGroup(tree, "test"); EXPECT_EQ(firstAttributeSet.descriptor().groupMap().size(), size_t(1)); EXPECT_EQ(pointCount(tree), Index64(4)); EXPECT_EQ(pointCount(tree, ActiveFilter()), Index64(4)); EXPECT_EQ(pointCount(tree, InactiveFilter()), Index64(0)); EXPECT_EQ(leafIter->pointCount(), Index64(4)); EXPECT_EQ(leafIter->onPointCount(), Index64(4)); EXPECT_EQ(leafIter->offPointCount(), Index64(0)); // no points found when filtered by the empty group EXPECT_EQ(pointCount(tree, GroupFilter("test", firstAttributeSet)), Index64(0)); EXPECT_EQ(leafIter->groupPointCount("test"), Index64(0)); } { // assign two points to the group, test offsets and point counts const Descriptor::GroupIndex index = firstAttributeSet.groupIndex("test"); EXPECT_TRUE(index.first != AttributeSet::INVALID_POS); EXPECT_TRUE(index.first < firstAttributeSet.size()); AttributeArray& array = leafIter->attributeArray(index.first); EXPECT_TRUE(isGroup(array)); GroupAttributeArray& groupArray = GroupAttributeArray::cast(array); groupArray.set(0, GroupType(1) << index.second); groupArray.set(3, GroupType(1) << index.second); // only two out of four points should be found when group filtered GroupFilter firstGroupFilter("test", firstAttributeSet); EXPECT_EQ(pointCount(tree, GroupFilter("test", firstAttributeSet)), Index64(2)); EXPECT_EQ(leafIter->groupPointCount("test"), Index64(2)); { EXPECT_EQ(pointCount(tree, BinaryFilter<GroupFilter, ActiveFilter>( firstGroupFilter, ActiveFilter())), Index64(2)); EXPECT_EQ(pointCount(tree, BinaryFilter<GroupFilter, InactiveFilter>( firstGroupFilter, InactiveFilter())), Index64(0)); } EXPECT_NO_THROW(leafIter->validateOffsets()); // manually modify offsets so one of the points is marked as inactive std::vector<ValueType> offsets, modifiedOffsets; offsets.resize(PointDataTree::LeafNodeType::SIZE); modifiedOffsets.resize(PointDataTree::LeafNodeType::SIZE); for (Index n = 0; n < PointDataTree::LeafNodeType::NUM_VALUES; n++) { const unsigned offset = leafIter->getValue(n); offsets[n] = offset; modifiedOffsets[n] = offset > 0 ? offset - 1 : offset; } leafIter->setOffsets(modifiedOffsets); // confirm that validation fails EXPECT_THROW(leafIter->validateOffsets(), openvdb::ValueError); // replace offsets with original offsets but leave value mask leafIter->setOffsets(offsets, /*updateValueMask=*/ false); // confirm that validation now succeeds EXPECT_NO_THROW(leafIter->validateOffsets()); // ensure active / inactive point counts are correct EXPECT_EQ(pointCount(tree, GroupFilter("test", firstAttributeSet)), Index64(2)); EXPECT_EQ(leafIter->groupPointCount("test"), Index64(2)); EXPECT_EQ(pointCount(tree, BinaryFilter<GroupFilter, ActiveFilter>( firstGroupFilter, ActiveFilter())), Index64(1)); EXPECT_EQ(pointCount(tree, BinaryFilter<GroupFilter, InactiveFilter>( firstGroupFilter, InactiveFilter())), Index64(1)); EXPECT_EQ(pointCount(tree), Index64(4)); EXPECT_EQ(pointCount(tree, ActiveFilter()), Index64(3)); EXPECT_EQ(pointCount(tree, InactiveFilter()), Index64(1)); // write out grid to a temp file { filename = tempDir + "/openvdb_test_point_load"; io::File fileOut(filename); GridCPtrVec grids{grid}; fileOut.write(grids); } // test point count of a delay-loaded grid { io::File fileIn(filename); fileIn.open(); GridPtrVecPtr grids = fileIn.getGrids(); fileIn.close(); EXPECT_EQ(grids->size(), size_t(1)); PointDataGrid::Ptr inputGrid = GridBase::grid<PointDataGrid>((*grids)[0]); EXPECT_TRUE(inputGrid); PointDataTree& inputTree = inputGrid->tree(); const auto& attributeSet = inputTree.cbeginLeaf()->attributeSet(); GroupFilter groupFilter("test", attributeSet); bool inCoreOnly = true; EXPECT_EQ(pointCount(inputTree, NullFilter(), inCoreOnly), Index64(0)); EXPECT_EQ(pointCount(inputTree, ActiveFilter(), inCoreOnly), Index64(0)); EXPECT_EQ(pointCount(inputTree, InactiveFilter(), inCoreOnly), Index64(0)); EXPECT_EQ(pointCount(inputTree, groupFilter, inCoreOnly), Index64(0)); EXPECT_EQ(pointCount(inputTree, BinaryFilter<GroupFilter, ActiveFilter>( groupFilter, ActiveFilter()), inCoreOnly), Index64(0)); EXPECT_EQ(pointCount(inputTree, BinaryFilter<GroupFilter, InactiveFilter>( groupFilter, InactiveFilter()), inCoreOnly), Index64(0)); inCoreOnly = false; EXPECT_EQ(pointCount(inputTree, NullFilter(), inCoreOnly), Index64(4)); EXPECT_EQ(pointCount(inputTree, ActiveFilter(), inCoreOnly), Index64(3)); EXPECT_EQ(pointCount(inputTree, InactiveFilter(), inCoreOnly), Index64(1)); EXPECT_EQ(pointCount(inputTree, groupFilter, inCoreOnly), Index64(2)); EXPECT_EQ(pointCount(inputTree, BinaryFilter<GroupFilter, ActiveFilter>( groupFilter, ActiveFilter()), inCoreOnly), Index64(1)); EXPECT_EQ(pointCount(inputTree, BinaryFilter<GroupFilter, InactiveFilter>( groupFilter, InactiveFilter()), inCoreOnly), Index64(1)); } // update the value mask and confirm point counts once again leafIter->updateValueMask(); EXPECT_NO_THROW(leafIter->validateOffsets()); auto& attributeSet = tree.cbeginLeaf()->attributeSet(); EXPECT_EQ(pointCount(tree, GroupFilter("test", attributeSet)), Index64(2)); EXPECT_EQ(leafIter->groupPointCount("test"), Index64(2)); EXPECT_EQ(pointCount(tree, BinaryFilter<GroupFilter, ActiveFilter>( firstGroupFilter, ActiveFilter())), Index64(2)); EXPECT_EQ(pointCount(tree, BinaryFilter<GroupFilter, InactiveFilter>( firstGroupFilter, InactiveFilter())), Index64(0)); EXPECT_EQ(pointCount(tree), Index64(4)); EXPECT_EQ(pointCount(tree, ActiveFilter()), Index64(4)); EXPECT_EQ(pointCount(tree, InactiveFilter()), Index64(0)); } // create a tree with multiple leaves positions.emplace_back(20, 1, 1); positions.emplace_back(1, 20, 1); positions.emplace_back(1, 1, 20); grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree2 = grid->tree(); EXPECT_EQ(tree2.leafCount(), Index32(4)); leafIter = tree2.beginLeaf(); appendGroup(tree2, "test"); { // assign two points to the group const auto& attributeSet = leafIter->attributeSet(); const Descriptor::GroupIndex index = attributeSet.groupIndex("test"); EXPECT_TRUE(index.first != AttributeSet::INVALID_POS); EXPECT_TRUE(index.first < attributeSet.size()); AttributeArray& array = leafIter->attributeArray(index.first); EXPECT_TRUE(isGroup(array)); GroupAttributeArray& groupArray = GroupAttributeArray::cast(array); groupArray.set(0, GroupType(1) << index.second); groupArray.set(3, GroupType(1) << index.second); EXPECT_EQ(pointCount(tree2, GroupFilter("test", attributeSet)), Index64(2)); EXPECT_EQ(leafIter->groupPointCount("test"), Index64(2)); EXPECT_EQ(pointCount(tree2), Index64(7)); } ++leafIter; EXPECT_TRUE(leafIter); { // assign another point to the group in a different leaf const auto& attributeSet = leafIter->attributeSet(); const Descriptor::GroupIndex index = attributeSet.groupIndex("test"); EXPECT_TRUE(index.first != AttributeSet::INVALID_POS); EXPECT_TRUE(index.first < leafIter->attributeSet().size()); AttributeArray& array = leafIter->attributeArray(index.first); EXPECT_TRUE(isGroup(array)); GroupAttributeArray& groupArray = GroupAttributeArray::cast(array); groupArray.set(0, GroupType(1) << index.second); EXPECT_EQ(pointCount(tree2, GroupFilter("test", attributeSet)), Index64(3)); EXPECT_EQ(leafIter->groupPointCount("test"), Index64(1)); EXPECT_EQ(pointCount(tree2), Index64(7)); } } TEST_F(TestPointCount, testOffsets) { using namespace openvdb::math; const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); // five points across four leafs std::vector<Vec3s> positions{{1, 1, 1}, {1, 101, 1}, {2, 101, 1}, {101, 1, 1}, {101, 101, 1}}; PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree = grid->tree(); { // all point offsets std::vector<Index64> offsets; Index64 total = pointOffsets(offsets, tree); EXPECT_EQ(offsets.size(), size_t(4)); EXPECT_EQ(offsets[0], Index64(1)); EXPECT_EQ(offsets[1], Index64(3)); EXPECT_EQ(offsets[2], Index64(4)); EXPECT_EQ(offsets[3], Index64(5)); EXPECT_EQ(total, Index64(5)); } { // all point offsets when using a non-existant exclude group std::vector<Index64> offsets; std::vector<Name> includeGroups; std::vector<Name> excludeGroups{"empty"}; MultiGroupFilter filter(includeGroups, excludeGroups, tree.cbeginLeaf()->attributeSet()); Index64 total = pointOffsets(offsets, tree, filter); EXPECT_EQ(offsets.size(), size_t(4)); EXPECT_EQ(offsets[0], Index64(1)); EXPECT_EQ(offsets[1], Index64(3)); EXPECT_EQ(offsets[2], Index64(4)); EXPECT_EQ(offsets[3], Index64(5)); EXPECT_EQ(total, Index64(5)); } appendGroup(tree, "test"); // add one point to the group from the leaf that contains two points PointDataTree::LeafIter iter = ++tree.beginLeaf(); GroupWriteHandle groupHandle = iter->groupWriteHandle("test"); groupHandle.set(0, true); { // include this group std::vector<Index64> offsets; std::vector<Name> includeGroups{"test"}; std::vector<Name> excludeGroups; MultiGroupFilter filter(includeGroups, excludeGroups, tree.cbeginLeaf()->attributeSet()); Index64 total = pointOffsets(offsets, tree, filter); EXPECT_EQ(offsets.size(), size_t(4)); EXPECT_EQ(offsets[0], Index64(0)); EXPECT_EQ(offsets[1], Index64(1)); EXPECT_EQ(offsets[2], Index64(1)); EXPECT_EQ(offsets[3], Index64(1)); EXPECT_EQ(total, Index64(1)); } { // exclude this group std::vector<Index64> offsets; std::vector<Name> includeGroups; std::vector<Name> excludeGroups{"test"}; MultiGroupFilter filter(includeGroups, excludeGroups, tree.cbeginLeaf()->attributeSet()); Index64 total = pointOffsets(offsets, tree, filter); EXPECT_EQ(offsets.size(), size_t(4)); EXPECT_EQ(offsets[0], Index64(1)); EXPECT_EQ(offsets[1], Index64(2)); EXPECT_EQ(offsets[2], Index64(3)); EXPECT_EQ(offsets[3], Index64(4)); EXPECT_EQ(total, Index64(4)); } // setup temp directory std::string tempDir; if (const char* dir = std::getenv("TMPDIR")) tempDir = dir; #ifdef _MSC_VER if (tempDir.empty()) { char tempDirBuffer[MAX_PATH+1]; int tempDirLen = GetTempPath(MAX_PATH+1, tempDirBuffer); EXPECT_TRUE(tempDirLen > 0 && tempDirLen <= MAX_PATH); tempDir = tempDirBuffer; } #else if (tempDir.empty()) tempDir = P_tmpdir; #endif std::string filename; // write out grid to a temp file { filename = tempDir + "/openvdb_test_point_load"; io::File fileOut(filename); GridCPtrVec grids{grid}; fileOut.write(grids); } // test point offsets for a delay-loaded grid { io::File fileIn(filename); fileIn.open(); GridPtrVecPtr grids = fileIn.getGrids(); fileIn.close(); EXPECT_EQ(grids->size(), size_t(1)); PointDataGrid::Ptr inputGrid = GridBase::grid<PointDataGrid>((*grids)[0]); EXPECT_TRUE(inputGrid); PointDataTree& inputTree = inputGrid->tree(); std::vector<Index64> offsets; std::vector<Name> includeGroups; std::vector<Name> excludeGroups; MultiGroupFilter filter(includeGroups, excludeGroups, inputTree.cbeginLeaf()->attributeSet()); Index64 total = pointOffsets(offsets, inputTree, filter, /*inCoreOnly=*/true); EXPECT_EQ(offsets.size(), size_t(4)); EXPECT_EQ(offsets[0], Index64(0)); EXPECT_EQ(offsets[1], Index64(0)); EXPECT_EQ(offsets[2], Index64(0)); EXPECT_EQ(offsets[3], Index64(0)); EXPECT_EQ(total, Index64(0)); offsets.clear(); total = pointOffsets(offsets, inputTree, filter, /*inCoreOnly=*/false); EXPECT_EQ(offsets.size(), size_t(4)); EXPECT_EQ(offsets[0], Index64(1)); EXPECT_EQ(offsets[1], Index64(3)); EXPECT_EQ(offsets[2], Index64(4)); EXPECT_EQ(offsets[3], Index64(5)); EXPECT_EQ(total, Index64(5)); } std::remove(filename.c_str()); } namespace { // sum all voxel values template<typename GridT> inline Index64 voxelSum(const GridT& grid) { Index64 total = 0; for (auto iter = grid.cbeginValueOn(); iter; ++iter) { total += static_cast<Index64>(*iter); } return total; } // Generate random points by uniformly distributing points on a unit-sphere. inline void genPoints(std::vector<Vec3R>& positions, const int numPoints, const double scale) { // init math::Random01 randNumber(0); const int n = int(std::sqrt(double(numPoints))); const double xScale = (2.0 * M_PI) / double(n); const double yScale = M_PI / double(n); double x, y, theta, phi; Vec3R pos; positions.reserve(n*n); // loop over a [0 to n) x [0 to n) grid. for (int a = 0; a < n; ++a) { for (int b = 0; b < n; ++b) { // jitter, move to random pos. inside the current cell x = double(a) + randNumber(); y = double(b) + randNumber(); // remap to a lat/long map theta = y * yScale; // [0 to PI] phi = x * xScale; // [0 to 2PI] // convert to cartesian coordinates on a unit sphere. // spherical coordinate triplet (r=1, theta, phi) pos[0] = static_cast<float>(std::sin(theta)*std::cos(phi)*scale); pos[1] = static_cast<float>(std::sin(theta)*std::sin(phi)*scale); pos[2] = static_cast<float>(std::cos(theta)*scale); positions.push_back(pos); } } } } // namespace TEST_F(TestPointCount, testCountGrid) { using namespace openvdb::math; { // five points std::vector<Vec3s> positions{ {1, 1, 1}, {1, 101, 1}, {2, 101, 1}, {101, 1, 1}, {101, 101, 1}}; { // in five voxels math::Transform::Ptr transform(math::Transform::createLinearTransform(1.0f)); PointDataGrid::Ptr points = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); // generate a count grid with the same transform Int32Grid::Ptr count = pointCountGrid(*points); EXPECT_EQ(count->activeVoxelCount(), points->activeVoxelCount()); EXPECT_EQ(count->evalActiveVoxelBoundingBox(), points->evalActiveVoxelBoundingBox()); EXPECT_EQ(voxelSum(*count), pointCount(points->tree())); } { // in four voxels math::Transform::Ptr transform(math::Transform::createLinearTransform(10.0f)); PointDataGrid::Ptr points = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); // generate a count grid with the same transform Int32Grid::Ptr count = pointCountGrid(*points); EXPECT_EQ(count->activeVoxelCount(), points->activeVoxelCount()); EXPECT_EQ(count->evalActiveVoxelBoundingBox(), points->evalActiveVoxelBoundingBox()); EXPECT_EQ(voxelSum(*count), pointCount(points->tree())); } { // in one voxel math::Transform::Ptr transform(math::Transform::createLinearTransform(1000.0f)); PointDataGrid::Ptr points = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); // generate a count grid with the same transform Int32Grid::Ptr count = pointCountGrid(*points); EXPECT_EQ(count->activeVoxelCount(), points->activeVoxelCount()); EXPECT_EQ(count->evalActiveVoxelBoundingBox(), points->evalActiveVoxelBoundingBox()); EXPECT_EQ(voxelSum(*count), pointCount(points->tree())); } { // in four voxels, Int64 grid math::Transform::Ptr transform(math::Transform::createLinearTransform(10.0f)); PointDataGrid::Ptr points = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); // generate a count grid with the same transform Int64Grid::Ptr count = pointCountGrid<PointDataGrid, Int64Grid>(*points); EXPECT_EQ(count->activeVoxelCount(), points->activeVoxelCount()); EXPECT_EQ(count->evalActiveVoxelBoundingBox(), points->evalActiveVoxelBoundingBox()); EXPECT_EQ(voxelSum(*count), pointCount(points->tree())); } { // in four voxels, float grid math::Transform::Ptr transform(math::Transform::createLinearTransform(10.0f)); PointDataGrid::Ptr points = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); // generate a count grid with the same transform FloatGrid::Ptr count = pointCountGrid<PointDataGrid, FloatGrid>(*points); EXPECT_EQ(count->activeVoxelCount(), points->activeVoxelCount()); EXPECT_EQ(count->evalActiveVoxelBoundingBox(), points->evalActiveVoxelBoundingBox()); EXPECT_EQ(voxelSum(*count), pointCount(points->tree())); } { // in four voxels math::Transform::Ptr transform(math::Transform::createLinearTransform(10.0f)); const PointAttributeVector<Vec3s> pointList(positions); tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(pointList, *transform); PointDataGrid::Ptr points = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList, *transform); auto& tree = points->tree(); // assign point 3 to new group "test" appendGroup(tree, "test"); std::vector<short> groups{0,0,1,0,0}; setGroup(tree, pointIndexGrid->tree(), groups, "test"); std::vector<std::string> includeGroups{"test"}; std::vector<std::string> excludeGroups; // generate a count grid with the same transform MultiGroupFilter filter(includeGroups, excludeGroups, tree.cbeginLeaf()->attributeSet()); Int32Grid::Ptr count = pointCountGrid(*points, filter); EXPECT_EQ(count->activeVoxelCount(), Index64(1)); EXPECT_EQ(voxelSum(*count), Index64(1)); MultiGroupFilter filter2(excludeGroups, includeGroups, tree.cbeginLeaf()->attributeSet()); count = pointCountGrid(*points, filter2); EXPECT_EQ(count->activeVoxelCount(), Index64(4)); EXPECT_EQ(voxelSum(*count), Index64(4)); } } { // 40,000 points on a unit sphere std::vector<Vec3R> positions; const size_t total = 40000; genPoints(positions, total, /*scale=*/100.0); EXPECT_EQ(positions.size(), total); math::Transform::Ptr transform1(math::Transform::createLinearTransform(1.0f)); math::Transform::Ptr transform5(math::Transform::createLinearTransform(5.0f)); PointDataGrid::Ptr points1 = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform1); PointDataGrid::Ptr points5 = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform5); EXPECT_TRUE(points1->activeVoxelCount() != points5->activeVoxelCount()); EXPECT_TRUE(points1->evalActiveVoxelBoundingBox() != points5->evalActiveVoxelBoundingBox()); EXPECT_EQ(pointCount(points1->tree()), pointCount(points5->tree())); { // generate count grids with the same transform Int32Grid::Ptr count1 = pointCountGrid(*points1); EXPECT_EQ(count1->activeVoxelCount(), points1->activeVoxelCount()); EXPECT_EQ(count1->evalActiveVoxelBoundingBox(), points1->evalActiveVoxelBoundingBox()); EXPECT_EQ(voxelSum(*count1), pointCount(points1->tree())); Int32Grid::Ptr count5 = pointCountGrid(*points5); EXPECT_EQ(count5->activeVoxelCount(), points5->activeVoxelCount()); EXPECT_EQ(count5->evalActiveVoxelBoundingBox(), points5->evalActiveVoxelBoundingBox()); EXPECT_EQ(voxelSum(*count5), pointCount(points5->tree())); } { // generate count grids with differing transforms Int32Grid::Ptr count1 = pointCountGrid(*points5, *transform1); EXPECT_EQ(count1->activeVoxelCount(), points1->activeVoxelCount()); EXPECT_EQ(count1->evalActiveVoxelBoundingBox(), points1->evalActiveVoxelBoundingBox()); EXPECT_EQ(voxelSum(*count1), pointCount(points5->tree())); Int32Grid::Ptr count5 = pointCountGrid(*points1, *transform5); EXPECT_EQ(count5->activeVoxelCount(), points5->activeVoxelCount()); EXPECT_EQ(count5->evalActiveVoxelBoundingBox(), points5->evalActiveVoxelBoundingBox()); EXPECT_EQ(voxelSum(*count5), pointCount(points1->tree())); } } }
29,059
C++
34.054282
109
0.631095
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestStream.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Exceptions.h> #include <openvdb/io/Stream.h> #include <openvdb/Metadata.h> #include <openvdb/math/Maps.h> #include <openvdb/math/Transform.h> #include <openvdb/version.h> #include <openvdb/openvdb.h> #include "gtest/gtest.h" #include <cstdio> // for remove() #include <fstream> #define ASSERT_DOUBLES_EXACTLY_EQUAL(a, b) \ EXPECT_NEAR((a), (b), /*tolerance=*/0.0); class TestStream: public ::testing::Test { public: void SetUp() override; void TearDown() override; void testFileReadFromStream(); protected: static openvdb::GridPtrVecPtr createTestGrids(openvdb::MetaMap::Ptr&); static void verifyTestGrids(openvdb::GridPtrVecPtr, openvdb::MetaMap::Ptr); }; //////////////////////////////////////// void TestStream::SetUp() { openvdb::uninitialize(); openvdb::Int32Grid::registerGrid(); openvdb::FloatGrid::registerGrid(); openvdb::StringMetadata::registerType(); openvdb::Int32Metadata::registerType(); openvdb::Int64Metadata::registerType(); openvdb::Vec3IMetadata::registerType(); openvdb::io::DelayedLoadMetadata::registerType(); // Register maps openvdb::math::MapRegistry::clear(); openvdb::math::AffineMap::registerMap(); openvdb::math::ScaleMap::registerMap(); openvdb::math::UniformScaleMap::registerMap(); openvdb::math::TranslationMap::registerMap(); openvdb::math::ScaleTranslateMap::registerMap(); openvdb::math::UniformScaleTranslateMap::registerMap(); openvdb::math::NonlinearFrustumMap::registerMap(); } void TestStream::TearDown() { openvdb::uninitialize(); } //////////////////////////////////////// openvdb::GridPtrVecPtr TestStream::createTestGrids(openvdb::MetaMap::Ptr& metadata) { using namespace openvdb; // Create trees Int32Tree::Ptr tree1(new Int32Tree(1)); FloatTree::Ptr tree2(new FloatTree(2.0)); // Set some values tree1->setValue(Coord(0, 0, 0), 5); tree1->setValue(Coord(100, 0, 0), 6); tree2->setValue(Coord(0, 0, 0), 10); tree2->setValue(Coord(0, 100, 0), 11); // Create grids GridBase::Ptr grid1 = createGrid(tree1), grid2 = createGrid(tree1), // instance of grid1 grid3 = createGrid(tree2); grid1->setName("density"); grid2->setName("density_copy"); grid3->setName("temperature"); // Create transforms math::Transform::Ptr trans1 = math::Transform::createLinearTransform(0.1); math::Transform::Ptr trans2 = math::Transform::createLinearTransform(0.1); grid1->setTransform(trans1); grid2->setTransform(trans2); grid3->setTransform(trans2); metadata.reset(new MetaMap); metadata->insertMeta("author", StringMetadata("Einstein")); metadata->insertMeta("year", Int32Metadata(2009)); GridPtrVecPtr grids(new GridPtrVec); grids->push_back(grid1); grids->push_back(grid2); grids->push_back(grid3); return grids; } void TestStream::verifyTestGrids(openvdb::GridPtrVecPtr grids, openvdb::MetaMap::Ptr meta) { using namespace openvdb; EXPECT_TRUE(grids.get() != nullptr); EXPECT_TRUE(meta.get() != nullptr); // Verify the metadata. EXPECT_EQ(2, int(meta->metaCount())); EXPECT_EQ(std::string("Einstein"), meta->metaValue<std::string>("author")); EXPECT_EQ(2009, meta->metaValue<int32_t>("year")); // Verify the grids. EXPECT_EQ(3, int(grids->size())); GridBase::Ptr grid = findGridByName(*grids, "density"); EXPECT_TRUE(grid.get() != nullptr); Int32Tree::Ptr density = gridPtrCast<Int32Grid>(grid)->treePtr(); EXPECT_TRUE(density.get() != nullptr); grid.reset(); grid = findGridByName(*grids, "density_copy"); EXPECT_TRUE(grid.get() != nullptr); EXPECT_TRUE(gridPtrCast<Int32Grid>(grid)->treePtr().get() != nullptr); // Verify that "density_copy" is an instance of (i.e., shares a tree with) "density". EXPECT_EQ(density, gridPtrCast<Int32Grid>(grid)->treePtr()); grid.reset(); grid = findGridByName(*grids, "temperature"); EXPECT_TRUE(grid.get() != nullptr); FloatTree::Ptr temperature = gridPtrCast<FloatGrid>(grid)->treePtr(); EXPECT_TRUE(temperature.get() != nullptr); ASSERT_DOUBLES_EXACTLY_EQUAL(5, density->getValue(Coord(0, 0, 0))); ASSERT_DOUBLES_EXACTLY_EQUAL(6, density->getValue(Coord(100, 0, 0))); ASSERT_DOUBLES_EXACTLY_EQUAL(10, temperature->getValue(Coord(0, 0, 0))); ASSERT_DOUBLES_EXACTLY_EQUAL(11, temperature->getValue(Coord(0, 100, 0))); } //////////////////////////////////////// TEST_F(TestStream, testWrite) { using namespace openvdb; // Create test grids and stream them to a string. MetaMap::Ptr meta; GridPtrVecPtr grids = createTestGrids(meta); std::ostringstream ostr(std::ios_base::binary); io::Stream(ostr).write(*grids, *meta); //std::ofstream file("debug.vdb2", std::ios_base::binary); //file << ostr.str(); // Stream the grids back in. std::istringstream is(ostr.str(), std::ios_base::binary); io::Stream strm(is); meta = strm.getMetadata(); grids = strm.getGrids(); verifyTestGrids(grids, meta); } TEST_F(TestStream, testRead) { using namespace openvdb; // Create test grids and write them to a file. MetaMap::Ptr meta; GridPtrVecPtr grids = createTestGrids(meta); const char* filename = "something.vdb2"; io::File(filename).write(*grids, *meta); SharedPtr<const char> scopedFile(filename, ::remove); // Stream the grids back in. std::ifstream is(filename, std::ios_base::binary); io::Stream strm(is); meta = strm.getMetadata(); grids = strm.getGrids(); verifyTestGrids(grids, meta); } /// Stream grids to a file using io::Stream, then read the file back using io::File. void TestStream::testFileReadFromStream() { using namespace openvdb; MetaMap::Ptr meta; GridPtrVecPtr grids; // Create test grids and stream them to a file (and then close the file). const char* filename = "something.vdb2"; SharedPtr<const char> scopedFile(filename, ::remove); { std::ofstream os(filename, std::ios_base::binary); grids = createTestGrids(meta); io::Stream(os).write(*grids, *meta); } // Read the grids back in. io::File file(filename); EXPECT_TRUE(file.inputHasGridOffsets()); EXPECT_THROW(file.getGrids(), IoError); file.open(); meta = file.getMetadata(); grids = file.getGrids(); EXPECT_TRUE(!file.inputHasGridOffsets()); EXPECT_TRUE(meta.get() != nullptr); EXPECT_TRUE(grids.get() != nullptr); EXPECT_TRUE(!grids->empty()); verifyTestGrids(grids, meta); } TEST_F(TestStream, testFileReadFromStream) { testFileReadFromStream(); }
6,795
C++
27.554622
89
0.659161
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestTreeGetSetValues.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/tree/Tree.h> #include <openvdb/tools/ValueTransformer.h> // for tools::setValueOnMin() et al. #include <openvdb/tools/Prune.h> #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); class TestTreeGetSetValues: public ::testing::Test { }; namespace { typedef openvdb::tree::Tree4<float, 3, 2, 3>::Type Tree323f; // 8^3 x 4^3 x 8^3 } TEST_F(TestTreeGetSetValues, testGetBackground) { const float background = 256.0f; Tree323f tree(background); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.background()); } TEST_F(TestTreeGetSetValues, testGetValues) { Tree323f tree(/*background=*/256.0f); tree.setValue(openvdb::Coord(0, 0, 0), 1.0); tree.setValue(openvdb::Coord(1, 0, 0), 1.5); tree.setValue(openvdb::Coord(0, 0, 8), 2.0); tree.setValue(openvdb::Coord(1, 0, 8), 2.5); tree.setValue(openvdb::Coord(0, 0, 16), 3.0); tree.setValue(openvdb::Coord(1, 0, 16), 3.5); tree.setValue(openvdb::Coord(0, 0, 24), 4.0); tree.setValue(openvdb::Coord(1, 0, 24), 4.5); ASSERT_DOUBLES_EXACTLY_EQUAL(1.0, tree.getValue(openvdb::Coord(0, 0, 0))); ASSERT_DOUBLES_EXACTLY_EQUAL(1.5, tree.getValue(openvdb::Coord(1, 0, 0))); ASSERT_DOUBLES_EXACTLY_EQUAL(2.0, tree.getValue(openvdb::Coord(0, 0, 8))); ASSERT_DOUBLES_EXACTLY_EQUAL(2.5, tree.getValue(openvdb::Coord(1, 0, 8))); ASSERT_DOUBLES_EXACTLY_EQUAL(3.0, tree.getValue(openvdb::Coord(0, 0, 16))); ASSERT_DOUBLES_EXACTLY_EQUAL(3.5, tree.getValue(openvdb::Coord(1, 0, 16))); ASSERT_DOUBLES_EXACTLY_EQUAL(4.0, tree.getValue(openvdb::Coord(0, 0, 24))); ASSERT_DOUBLES_EXACTLY_EQUAL(4.5, tree.getValue(openvdb::Coord(1, 0, 24))); } TEST_F(TestTreeGetSetValues, testSetValues) { using namespace openvdb; const float background = 256.0; Tree323f tree(background); for (int activeTile = 0; activeTile < 2; ++activeTile) { if (activeTile) tree.fill(CoordBBox(Coord(0), Coord(31)), background, /*active=*/true); tree.setValue(openvdb::Coord(0, 0, 0), 1.0); tree.setValue(openvdb::Coord(1, 0, 0), 1.5); tree.setValue(openvdb::Coord(0, 0, 8), 2.0); tree.setValue(openvdb::Coord(1, 0, 8), 2.5); tree.setValue(openvdb::Coord(0, 0, 16), 3.0); tree.setValue(openvdb::Coord(1, 0, 16), 3.5); tree.setValue(openvdb::Coord(0, 0, 24), 4.0); tree.setValue(openvdb::Coord(1, 0, 24), 4.5); const int expectedActiveCount = (!activeTile ? 8 : 32 * 32 * 32); EXPECT_EQ(expectedActiveCount, int(tree.activeVoxelCount())); float val = 1.f; for (Tree323f::LeafCIter iter = tree.cbeginLeaf(); iter; ++iter) { ASSERT_DOUBLES_EXACTLY_EQUAL(val, iter->getValue(openvdb::Coord(0, 0, 0))); ASSERT_DOUBLES_EXACTLY_EQUAL(val+0.5, iter->getValue(openvdb::Coord(1, 0, 0))); val = val + 1.f; } } } TEST_F(TestTreeGetSetValues, testUnsetValues) { using namespace openvdb; const float background = 256.0; Tree323f tree(background); for (int activeTile = 0; activeTile < 2; ++activeTile) { if (activeTile) tree.fill(CoordBBox(Coord(0), Coord(31)), background, /*active=*/true); Coord setCoords[8] = { Coord(0, 0, 0), Coord(1, 0, 0), Coord(0, 0, 8), Coord(1, 0, 8), Coord(0, 0, 16), Coord(1, 0, 16), Coord(0, 0, 24), Coord(1, 0, 24) }; for (int i = 0; i < 8; ++i) { tree.setValue(setCoords[i], 1.0); } const int expectedActiveCount = (!activeTile ? 8 : 32 * 32 * 32); EXPECT_EQ(expectedActiveCount, int(tree.activeVoxelCount())); // Unset some voxels. for (int i = 0; i < 8; i += 2) { tree.setValueOff(setCoords[i]); } EXPECT_EQ(expectedActiveCount - 4, int(tree.activeVoxelCount())); // Unset some voxels, but change their values. for (int i = 0; i < 8; i += 2) { tree.setValueOff(setCoords[i], background); } EXPECT_EQ(expectedActiveCount - 4, int(tree.activeVoxelCount())); for (int i = 0; i < 8; i += 2) { ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(setCoords[i])); } } } TEST_F(TestTreeGetSetValues, testFill) { using openvdb::CoordBBox; using openvdb::Coord; const float background = 256.0; Tree323f tree(background); // Fill from (-2,-2,-2) to (2,2,2) with active value 2. tree.fill(CoordBBox(Coord(-2), Coord(2)), 2.0); Coord xyz, xyzMin = Coord::max(), xyzMax = Coord::min(); for (Tree323f::ValueOnCIter iter = tree.cbeginValueOn(); iter; ++iter) { xyz = iter.getCoord(); xyzMin = std::min(xyzMin, xyz); xyzMax = std::max(xyz, xyzMax); ASSERT_DOUBLES_EXACTLY_EQUAL(2.0, *iter); } EXPECT_EQ(openvdb::Index64(5*5*5), tree.activeVoxelCount()); EXPECT_EQ(Coord(-2), xyzMin); EXPECT_EQ(Coord( 2), xyzMax); // Fill from (1,1,1) to (3,3,3) with active value 3. tree.fill(CoordBBox(Coord(1), Coord(3)), 3.0); xyzMin = Coord::max(); xyzMax = Coord::min(); for (Tree323f::ValueOnCIter iter = tree.cbeginValueOn(); iter; ++iter) { xyz = iter.getCoord(); xyzMin = std::min(xyzMin, xyz); xyzMax = std::max(xyz, xyzMax); const float expectedValue = (xyz[0] >= 1 && xyz[1] >= 1 && xyz[2] >= 1 && xyz[0] <= 3 && xyz[1] <= 3 && xyz[2] <= 3) ? 3.0 : 2.0; ASSERT_DOUBLES_EXACTLY_EQUAL(expectedValue, *iter); } openvdb::Index64 expectedCount = 5*5*5 // (-2,-2,-2) to (2,2,2) + 3*3*3 // (1,1,1) to (3,3,3) - 2*2*2; // (1,1,1) to (2,2,2) overlap EXPECT_EQ(expectedCount, tree.activeVoxelCount()); EXPECT_EQ(Coord(-2), xyzMin); EXPECT_EQ(Coord( 3), xyzMax); // Fill from (10,10,10) to (20,20,20) with active value 10. tree.fill(CoordBBox(Coord(10), Coord(20)), 10.0); xyzMin = Coord::max(); xyzMax = Coord::min(); for (Tree323f::ValueOnCIter iter = tree.cbeginValueOn(); iter; ++iter) { xyz = iter.getCoord(); xyzMin = std::min(xyzMin, xyz); xyzMax = std::max(xyz, xyzMax); float expectedValue = 2.0; if (xyz[0] >= 1 && xyz[1] >= 1 && xyz[2] >= 1 && xyz[0] <= 3 && xyz[1] <= 3 && xyz[2] <= 3) { expectedValue = 3.0; } else if (xyz[0] >= 10 && xyz[1] >= 10 && xyz[2] >= 10 && xyz[0] <= 20 && xyz[1] <= 20 && xyz[2] <= 20) { expectedValue = 10.0; } ASSERT_DOUBLES_EXACTLY_EQUAL(expectedValue, *iter); } expectedCount = 5*5*5 // (-2,-2,-2) to (2,2,2) + 3*3*3 // (1,1,1) to (3,3,3) - 2*2*2 // (1,1,1) to (2,2,2) overlap + 11*11*11; // (10,10,10) to (20,20,20) EXPECT_EQ(expectedCount, tree.activeVoxelCount()); EXPECT_EQ(Coord(-2), xyzMin); EXPECT_EQ(Coord(20), xyzMax); // "Undo" previous fill from (10,10,10) to (20,20,20). tree.fill(CoordBBox(Coord(10), Coord(20)), background, /*active=*/false); xyzMin = Coord::max(); xyzMax = Coord::min(); for (Tree323f::ValueOnCIter iter = tree.cbeginValueOn(); iter; ++iter) { xyz = iter.getCoord(); xyzMin = std::min(xyzMin, xyz); xyzMax = std::max(xyz, xyzMax); const float expectedValue = (xyz[0] >= 1 && xyz[1] >= 1 && xyz[2] >= 1 && xyz[0] <= 3 && xyz[1] <= 3 && xyz[2] <= 3) ? 3.0 : 2.0; ASSERT_DOUBLES_EXACTLY_EQUAL(expectedValue, *iter); } expectedCount = 5*5*5 // (-2,-2,-2) to (2,2,2) + 3*3*3 // (1,1,1) to (3,3,3) - 2*2*2; // (1,1,1) to (2,2,2) overlap EXPECT_EQ(expectedCount, tree.activeVoxelCount()); EXPECT_EQ(Coord(-2), xyzMin); EXPECT_EQ(Coord( 3), xyzMax); // The following tests assume a [3,2,3] tree configuration. tree.clear(); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(1), tree.nonLeafCount()); // root node // Partially fill a single leaf node. tree.fill(CoordBBox(Coord(8), Coord(14)), 0.0); EXPECT_EQ(openvdb::Index32(1), tree.leafCount()); EXPECT_EQ(openvdb::Index32(3), tree.nonLeafCount()); // Completely fill the leaf node, replacing it with a tile. tree.fill(CoordBBox(Coord(8), Coord(15)), 0.0); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(3), tree.nonLeafCount()); { const int activeVoxelCount = int(tree.activeVoxelCount()); // Fill a single voxel of the tile with a different (active) value. tree.fill(CoordBBox(Coord(10), Coord(10)), 1.0); EXPECT_EQ(openvdb::Index32(1), tree.leafCount()); EXPECT_EQ(openvdb::Index32(3), tree.nonLeafCount()); EXPECT_EQ(activeVoxelCount, int(tree.activeVoxelCount())); // Fill the voxel with an inactive value. tree.fill(CoordBBox(Coord(10), Coord(10)), 1.0, /*active=*/false); EXPECT_EQ(openvdb::Index32(1), tree.leafCount()); EXPECT_EQ(openvdb::Index32(3), tree.nonLeafCount()); EXPECT_EQ(activeVoxelCount - 1, int(tree.activeVoxelCount())); // Completely fill the leaf node, replacing it with a tile again. tree.fill(CoordBBox(Coord(8), Coord(15)), 0.0); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(3), tree.nonLeafCount()); } // Expand by one voxel, creating seven neighboring leaf nodes. tree.fill(CoordBBox(Coord(8), Coord(16)), 0.0); EXPECT_EQ(openvdb::Index32(7), tree.leafCount()); EXPECT_EQ(openvdb::Index32(3), tree.nonLeafCount()); // Completely fill the internal node containing the tile, replacing it with // a tile at the next level of the tree. tree.fill(CoordBBox(Coord(0), Coord(31)), 0.0); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(2), tree.nonLeafCount()); // Expand by one voxel, creating a layer of leaf nodes on three faces. tree.fill(CoordBBox(Coord(0), Coord(32)), 0.0); EXPECT_EQ(openvdb::Index32(5*5 + 4*5 + 4*4), tree.leafCount()); EXPECT_EQ(openvdb::Index32(2 + 7), tree.nonLeafCount()); // +7 internal nodes // Completely fill the second-level internal node, replacing it with a root-level tile. tree.fill(CoordBBox(Coord(0), Coord(255)), 0.0); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(1), tree.nonLeafCount()); // Repeat, filling with an inactive value. tree.clear(); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(1), tree.nonLeafCount()); // root node // Partially fill a single leaf node. tree.fill(CoordBBox(Coord(8), Coord(14)), 0.0, /*active=*/false); EXPECT_EQ(openvdb::Index32(1), tree.leafCount()); EXPECT_EQ(openvdb::Index32(3), tree.nonLeafCount()); // Completely fill the leaf node, replacing it with a tile. tree.fill(CoordBBox(Coord(8), Coord(15)), 0.0, /*active=*/false); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(3), tree.nonLeafCount()); // Expand by one voxel, creating seven neighboring leaf nodes. tree.fill(CoordBBox(Coord(8), Coord(16)), 0.0, /*active=*/false); EXPECT_EQ(openvdb::Index32(7), tree.leafCount()); EXPECT_EQ(openvdb::Index32(3), tree.nonLeafCount()); // Completely fill the internal node containing the tile, replacing it with // a tile at the next level of the tree. tree.fill(CoordBBox(Coord(0), Coord(31)), 0.0, /*active=*/false); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(2), tree.nonLeafCount()); // Expand by one voxel, creating a layer of leaf nodes on three faces. tree.fill(CoordBBox(Coord(0), Coord(32)), 0.0, /*active=*/false); EXPECT_EQ(openvdb::Index32(5*5 + 4*5 + 4*4), tree.leafCount()); EXPECT_EQ(openvdb::Index32(2 + 7), tree.nonLeafCount()); // +7 internal nodes // Completely fill the second-level internal node, replacing it with a root-level tile. tree.fill(CoordBBox(Coord(0), Coord(255)), 0.0, /*active=*/false); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(1), tree.nonLeafCount()); tree.clear(); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(1), tree.nonLeafCount()); // root node EXPECT_TRUE(tree.empty()); // Partially fill a region with inactive background values. tree.fill(CoordBBox(Coord(27), Coord(254)), background, /*active=*/false); // Confirm that after pruning, the tree is empty. openvdb::tools::prune(tree); EXPECT_EQ(openvdb::Index32(0), tree.leafCount()); EXPECT_EQ(openvdb::Index32(1), tree.nonLeafCount()); // root node EXPECT_TRUE(tree.empty()); } // Verify that setting voxels inside active tiles works correctly. // In particular, it should preserve the active states of surrounding voxels. TEST_F(TestTreeGetSetValues, testSetActiveStates) { using namespace openvdb; const float background = 256.0; Tree323f tree(background); const Coord xyz(10); const float val = 42.0; const int expectedActiveCount = 32 * 32 * 32; #define RESET_TREE() \ tree.fill(CoordBBox(Coord(0), Coord(31)), background, /*active=*/true) // create an active tile RESET_TREE(); EXPECT_EQ(expectedActiveCount, int(tree.activeVoxelCount())); tree.setValueOff(xyz); EXPECT_EQ(expectedActiveCount - 1, int(tree.activeVoxelCount())); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(xyz)); RESET_TREE(); tree.setValueOn(xyz); EXPECT_EQ(expectedActiveCount, int(tree.activeVoxelCount())); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(xyz)); RESET_TREE(); tree.setValueOff(xyz, val); EXPECT_EQ(expectedActiveCount - 1, int(tree.activeVoxelCount())); ASSERT_DOUBLES_EXACTLY_EQUAL(val, tree.getValue(xyz)); RESET_TREE(); tree.setActiveState(xyz, true); EXPECT_EQ(expectedActiveCount, int(tree.activeVoxelCount())); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(xyz)); RESET_TREE(); tree.setActiveState(xyz, false); EXPECT_EQ(expectedActiveCount - 1, int(tree.activeVoxelCount())); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(xyz)); RESET_TREE(); tree.setValueOn(xyz, val); EXPECT_EQ(expectedActiveCount, int(tree.activeVoxelCount())); ASSERT_DOUBLES_EXACTLY_EQUAL(val, tree.getValue(xyz)); RESET_TREE(); tools::setValueOnMin(tree, xyz, val); EXPECT_EQ(expectedActiveCount, int(tree.activeVoxelCount())); ASSERT_DOUBLES_EXACTLY_EQUAL(std::min(val, background), tree.getValue(xyz)); RESET_TREE(); tools::setValueOnMax(tree, xyz, val); EXPECT_EQ(expectedActiveCount, int(tree.activeVoxelCount())); ASSERT_DOUBLES_EXACTLY_EQUAL(std::max(val, background), tree.getValue(xyz)); RESET_TREE(); tools::setValueOnSum(tree, xyz, val); EXPECT_EQ(expectedActiveCount, int(tree.activeVoxelCount())); ASSERT_DOUBLES_EXACTLY_EQUAL(val + background, tree.getValue(xyz)); #undef RESET_TREE } TEST_F(TestTreeGetSetValues, testHasActiveTiles) { Tree323f tree(/*background=*/256.0f); EXPECT_TRUE(!tree.hasActiveTiles()); // Fill from (-2,-2,-2) to (2,2,2) with active value 2. tree.fill(openvdb::CoordBBox(openvdb::Coord(-2), openvdb::Coord(2)), 2.0f); EXPECT_TRUE(!tree.hasActiveTiles()); // Fill from (-200,-200,-200) to (-4,-4,-4) with active value 3. tree.fill(openvdb::CoordBBox(openvdb::Coord(-200), openvdb::Coord(-4)), 3.0f); EXPECT_TRUE(tree.hasActiveTiles()); }
15,907
C++
37.61165
99
0.620796
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLeafIO.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/tree/LeafNode.h> #include <openvdb/Types.h> #include <cctype> // for toupper() #include <iostream> #include <sstream> template<typename T> class TestLeafIO { public: static void testBuffer(); }; template<typename T> void TestLeafIO<T>::testBuffer() { openvdb::tree::LeafNode<T, 3> leaf(openvdb::Coord(0, 0, 0)); leaf.setValueOn(openvdb::Coord(0, 1, 0), T(1)); leaf.setValueOn(openvdb::Coord(1, 0, 0), T(1)); std::ostringstream ostr(std::ios_base::binary); leaf.writeBuffers(ostr); leaf.setValueOn(openvdb::Coord(0, 1, 0), T(0)); leaf.setValueOn(openvdb::Coord(0, 1, 1), T(1)); std::istringstream istr(ostr.str(), std::ios_base::binary); // Since the input stream doesn't include a VDB header with file format version info, // tag the input stream explicitly with the current version number. openvdb::io::setCurrentVersion(istr); leaf.readBuffers(istr); EXPECT_NEAR(T(1), leaf.getValue(openvdb::Coord(0, 1, 0)), /*tolerance=*/0); EXPECT_NEAR(T(1), leaf.getValue(openvdb::Coord(1, 0, 0)), /*tolerance=*/0); EXPECT_TRUE(leaf.onVoxelCount() == 2); } class TestLeafIOTest: public ::testing::Test { }; TEST_F(TestLeafIOTest, testBufferInt) { TestLeafIO<int>::testBuffer(); } TEST_F(TestLeafIOTest, testBufferFloat) { TestLeafIO<float>::testBuffer(); } TEST_F(TestLeafIOTest, testBufferDouble) { TestLeafIO<double>::testBuffer(); } TEST_F(TestLeafIOTest, testBufferBool) { TestLeafIO<bool>::testBuffer(); } TEST_F(TestLeafIOTest, testBufferByte) { TestLeafIO<openvdb::Byte>::testBuffer(); } TEST_F(TestLeafIOTest, testBufferString) { openvdb::tree::LeafNode<std::string, 3> leaf(openvdb::Coord(0, 0, 0), std::string()); leaf.setValueOn(openvdb::Coord(0, 1, 0), std::string("test")); leaf.setValueOn(openvdb::Coord(1, 0, 0), std::string("test")); std::ostringstream ostr(std::ios_base::binary); leaf.writeBuffers(ostr); leaf.setValueOn(openvdb::Coord(0, 1, 0), std::string("douche")); leaf.setValueOn(openvdb::Coord(0, 1, 1), std::string("douche")); std::istringstream istr(ostr.str(), std::ios_base::binary); // Since the input stream doesn't include a VDB header with file format version info, // tag the input stream explicitly with the current version number. openvdb::io::setCurrentVersion(istr); leaf.readBuffers(istr); EXPECT_EQ(std::string("test"), leaf.getValue(openvdb::Coord(0, 1, 0))); EXPECT_EQ(std::string("test"), leaf.getValue(openvdb::Coord(1, 0, 0))); EXPECT_TRUE(leaf.onVoxelCount() == 2); } TEST_F(TestLeafIOTest, testBufferVec3R) { openvdb::tree::LeafNode<openvdb::Vec3R, 3> leaf(openvdb::Coord(0, 0, 0)); leaf.setValueOn(openvdb::Coord(0, 1, 0), openvdb::Vec3R(1, 1, 1)); leaf.setValueOn(openvdb::Coord(1, 0, 0), openvdb::Vec3R(1, 1, 1)); std::ostringstream ostr(std::ios_base::binary); leaf.writeBuffers(ostr); leaf.setValueOn(openvdb::Coord(0, 1, 0), openvdb::Vec3R(0, 0, 0)); leaf.setValueOn(openvdb::Coord(0, 1, 1), openvdb::Vec3R(1, 1, 1)); std::istringstream istr(ostr.str(), std::ios_base::binary); // Since the input stream doesn't include a VDB header with file format version info, // tag the input stream explicitly with the current version number. openvdb::io::setCurrentVersion(istr); leaf.readBuffers(istr); EXPECT_TRUE(leaf.getValue(openvdb::Coord(0, 1, 0)) == openvdb::Vec3R(1, 1, 1)); EXPECT_TRUE(leaf.getValue(openvdb::Coord(1, 0, 0)) == openvdb::Vec3R(1, 1, 1)); EXPECT_TRUE(leaf.onVoxelCount() == 2); }
3,726
C++
30.584746
89
0.674987
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMerge.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/tools/Merge.h> using namespace openvdb; class TestMerge: public ::testing::Test { }; namespace { auto getTileCount = [](const auto& node) -> Index { Index sum = 0; for (auto iter = node.cbeginValueAll(); iter; ++iter) sum++; return sum; }; auto getActiveTileCount = [](const auto& node) -> Index { Index sum = 0; for (auto iter = node.cbeginValueOn(); iter; ++iter) sum++; return sum; }; auto getInactiveTileCount = [](const auto& node) -> Index { Index sum = 0; for (auto iter = node.cbeginValueOff(); iter; ++iter) sum++; return sum; }; auto getInsideTileCount = [](const auto& node) -> Index { using ValueT = typename std::remove_reference<decltype(node)>::type::ValueType; Index sum = 0; for (auto iter = node.cbeginValueAll(); iter; ++iter) { if (iter.getValue() < zeroVal<ValueT>()) sum++; } return sum; }; auto getOutsideTileCount = [](const auto& node) -> Index { using ValueT = typename std::remove_reference<decltype(node)>::type::ValueType; Index sum = 0; for (auto iter = node.cbeginValueAll(); iter; ++iter) { if (iter.getValue() > zeroVal<ValueT>()) sum++; } return sum; }; auto getChildCount = [](const auto& node) -> Index { return node.childCount(); }; auto hasOnlyInactiveNegativeBackgroundTiles = [](const auto& node) -> bool { if (getActiveTileCount(node) > Index(0)) return false; for (auto iter = node.cbeginValueAll(); iter; ++iter) { if (iter.getValue() != -node.background()) return false; } return true; }; } // namespace TEST_F(TestMerge, testTreeToMerge) { using RootChildNode = FloatTree::RootNodeType::ChildNodeType; using LeafNode = FloatTree::LeafNodeType; { // non-const tree FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().touchLeaf(Coord(8)); EXPECT_EQ(Index(1), grid->tree().leafCount()); tools::TreeToMerge<FloatTree> treeToMerge{grid->tree(), Steal()}; EXPECT_EQ(&grid->constTree().root(), treeToMerge.rootPtr()); // probe root child const RootChildNode* nodePtr = treeToMerge.probeConstNode<RootChildNode>(Coord(8)); EXPECT_TRUE(nodePtr); EXPECT_EQ(grid->constTree().probeConstNode<RootChildNode>(Coord(8)), nodePtr); // probe leaf node const LeafNode* leafNode = treeToMerge.probeConstNode<LeafNode>(Coord(8)); EXPECT_TRUE(leafNode); EXPECT_EQ(grid->constTree().probeConstLeaf(Coord(8)), leafNode); EXPECT_EQ(Index(1), grid->tree().leafCount()); EXPECT_EQ(Index(1), grid->tree().root().childCount()); // steal leaf node std::unique_ptr<LeafNode> leafNodePtr = treeToMerge.stealOrDeepCopyNode<LeafNode>(Coord(8)); EXPECT_TRUE(leafNodePtr); EXPECT_EQ(Index(0), grid->tree().leafCount()); EXPECT_EQ(leafNodePtr->origin(), Coord(8)); EXPECT_EQ(Index(1), grid->tree().root().childCount()); // steal root child grid->tree().touchLeaf(Coord(8)); std::unique_ptr<RootChildNode> node2Ptr = treeToMerge.stealOrDeepCopyNode<RootChildNode>(Coord(8)); EXPECT_TRUE(node2Ptr); EXPECT_EQ(Index(0), grid->tree().root().childCount()); // attempt to add leaf node tile (set value) grid->tree().touchLeaf(Coord(8)); EXPECT_EQ(Index64(0), grid->tree().activeTileCount()); treeToMerge.addTile<LeafNode>(Coord(8), 1.6f, true); // value has not been set EXPECT_EQ(3.0f, grid->tree().probeConstLeaf(Coord(8))->getFirstValue()); // add root child tile treeToMerge.addTile<RootChildNode>(Coord(8), 1.7f, true); EXPECT_EQ(Index64(1), grid->tree().activeTileCount()); // tile in node that does not exist grid->tree().clear(); treeToMerge.addTile<RootChildNode>(Coord(0), 1.8f, true); EXPECT_EQ(Index64(0), grid->tree().activeTileCount()); } { // const tree FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().touchLeaf(Coord(8)); EXPECT_EQ(Index(1), grid->tree().leafCount()); tools::TreeToMerge<FloatTree> treeToMerge{grid->constTree(), DeepCopy(), /*initialize=*/false}; EXPECT_TRUE(!treeToMerge.hasMask()); treeToMerge.initializeMask(); EXPECT_TRUE(treeToMerge.hasMask()); EXPECT_EQ(&grid->constTree().root(), treeToMerge.rootPtr()); // probe root child const RootChildNode* nodePtr = treeToMerge.probeConstNode<RootChildNode>(Coord(8)); EXPECT_TRUE(nodePtr); EXPECT_EQ(grid->constTree().probeConstNode<RootChildNode>(Coord(8)), nodePtr); // probe leaf node const LeafNode* leafNode = treeToMerge.probeConstNode<LeafNode>(Coord(8)); EXPECT_TRUE(leafNode); EXPECT_EQ(grid->constTree().probeConstLeaf(Coord(8)), leafNode); EXPECT_EQ(Index(1), grid->tree().leafCount()); EXPECT_EQ(Index(1), grid->tree().root().childCount()); { // deep copy leaf node tools::TreeToMerge<FloatTree> treeToMerge2{grid->constTree(), DeepCopy()}; std::unique_ptr<LeafNode> leafNodePtr = treeToMerge2.stealOrDeepCopyNode<LeafNode>(Coord(8)); EXPECT_TRUE(leafNodePtr); EXPECT_EQ(Index(1), grid->tree().leafCount()); // leaf has not been stolen EXPECT_EQ(leafNodePtr->origin(), Coord(8)); EXPECT_EQ(Index(1), grid->tree().root().childCount()); } { // deep copy root child tools::TreeToMerge<FloatTree> treeToMerge2{grid->constTree(), DeepCopy()}; grid->tree().touchLeaf(Coord(8)); std::unique_ptr<RootChildNode> node2Ptr = treeToMerge2.stealOrDeepCopyNode<RootChildNode>(Coord(8)); EXPECT_TRUE(node2Ptr); EXPECT_EQ(Index(1), grid->tree().root().childCount()); } { // add root child tile tools::TreeToMerge<FloatTree> treeToMerge2{grid->constTree(), DeepCopy()}; EXPECT_TRUE(treeToMerge2.probeConstNode<RootChildNode>(Coord(8))); treeToMerge2.addTile<RootChildNode>(Coord(8), 1.7f, true); EXPECT_TRUE(!treeToMerge2.probeConstNode<RootChildNode>(Coord(8))); // tile has been added to mask EXPECT_EQ(Index64(0), grid->tree().activeTileCount()); } // tile in node that does not exist grid->tree().clear(); treeToMerge.addTile<RootChildNode>(Coord(0), 1.8f, true); EXPECT_EQ(Index64(0), grid->tree().activeTileCount()); } { // non-const tree shared pointer { // shared pointer constructor FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().touchLeaf(Coord(8)); tools::TreeToMerge<FloatTree> treeToMerge(grid->treePtr(), Steal()); // verify tree shared ownership EXPECT_TRUE(treeToMerge.treeToSteal()); EXPECT_TRUE(!treeToMerge.treeToDeepCopy()); EXPECT_TRUE(treeToMerge.rootPtr()); EXPECT_TRUE(treeToMerge.probeConstNode<FloatTree::LeafNodeType>(Coord(8))); } // empty tree FloatTree tree; tools::TreeToMerge<FloatTree> treeToMerge(tree, DeepCopy()); EXPECT_TRUE(!treeToMerge.treeToSteal()); EXPECT_TRUE(treeToMerge.treeToDeepCopy()); EXPECT_TRUE(treeToMerge.rootPtr()); EXPECT_TRUE(!treeToMerge.probeConstNode<FloatTree::LeafNodeType>(Coord(8))); { FloatTree::Ptr emptyPtr; EXPECT_THROW(treeToMerge.reset(emptyPtr, Steal()), RuntimeError); FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().touchLeaf(Coord(8)); EXPECT_EQ(Index(1), grid->tree().leafCount()); treeToMerge.reset(grid->treePtr(), Steal()); } // verify tree shared ownership EXPECT_TRUE(treeToMerge.treeToSteal()); EXPECT_TRUE(!treeToMerge.treeToDeepCopy()); EXPECT_TRUE(treeToMerge.rootPtr()); EXPECT_TRUE(treeToMerge.probeConstNode<FloatTree::LeafNodeType>(Coord(8))); // verify tree pointers are updated on reset() const FloatTree tree2; tools::TreeToMerge<FloatTree> treeToMerge2(tree2, DeepCopy()); treeToMerge2.initializeMask(); // no-op EXPECT_TRUE(!treeToMerge2.treeToSteal()); EXPECT_TRUE(treeToMerge2.treeToDeepCopy()); EXPECT_EQ(Index(0), treeToMerge2.treeToDeepCopy()->leafCount()); FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().touchLeaf(Coord(8)); treeToMerge2.reset(grid->treePtr(), Steal()); EXPECT_TRUE(treeToMerge2.treeToSteal()); EXPECT_TRUE(!treeToMerge2.treeToDeepCopy()); EXPECT_EQ(Index(1), treeToMerge2.treeToSteal()->leafCount()); } } TEST_F(TestMerge, testCsgUnion) { using RootChildType = FloatTree::RootNodeType::ChildNodeType; using LeafParentType = RootChildType::ChildNodeType; using LeafT = FloatTree::LeafNodeType; { // construction FloatTree tree1; FloatTree tree2; const FloatTree tree3; { // one non-const tree (steal) tools::CsgUnionOp<FloatTree> mergeOp(tree1, Steal()); EXPECT_EQ(size_t(1), mergeOp.size()); } { // one non-const tree (deep-copy) tools::CsgUnionOp<FloatTree> mergeOp(tree1, DeepCopy()); EXPECT_EQ(size_t(1), mergeOp.size()); } { // one const tree (deep-copy) tools::CsgUnionOp<FloatTree> mergeOp(tree2, DeepCopy()); EXPECT_EQ(size_t(1), mergeOp.size()); } { // vector of tree pointers std::vector<FloatTree*> trees{&tree1, &tree2}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); EXPECT_EQ(size_t(2), mergeOp.size()); } { // deque of tree pointers std::deque<FloatTree*> trees{&tree1, &tree2}; tools::CsgUnionOp<FloatTree> mergeOp(trees, DeepCopy()); EXPECT_EQ(size_t(2), mergeOp.size()); } { // vector of TreesToMerge (to mix const and non-const trees) std::vector<tools::TreeToMerge<FloatTree>> trees; trees.emplace_back(tree1, Steal()); trees.emplace_back(tree3, DeepCopy()); // const tree trees.emplace_back(tree2, Steal()); tools::CsgUnionOp<FloatTree> mergeOp(trees); EXPECT_EQ(size_t(3), mergeOp.size()); } { // implicit copy constructor std::vector<FloatTree*> trees{&tree1, &tree2}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tools::CsgUnionOp<FloatTree> mergeOp2(mergeOp); EXPECT_EQ(size_t(2), mergeOp2.size()); } { // implicit assignment operator std::vector<FloatTree*> trees{&tree1, &tree2}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tools::CsgUnionOp<FloatTree> mergeOp2 = mergeOp; EXPECT_EQ(size_t(2), mergeOp2.size()); } } ///////////////////////////////////////////////////////////////////////// { // empty merge trees FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); std::vector<FloatTree*> trees; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); EXPECT_EQ(size_t(0), mergeOp.size()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), root.getTableSize()); } ///////////////////////////////////////////////////////////////////////// { // test one tile or one child { // test one background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getTileCount(root)); EXPECT_EQ(-grid->background(), grid->cbeginValueAll().getValue()); } { // test one background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getTileCount(root)); EXPECT_EQ(-grid->background(), grid->cbeginValueAll().getValue()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(-grid->background(), root.cbeginChildOn()->getFirstValue()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), 1.0, false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(1.0, root.cbeginChildOn()->getFirstValue()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), 1.0, true)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(1.0, root.cbeginChildOn()->getFirstValue()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(-grid->background(), root.cbeginChildOn()->getFirstValue()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), 1.0, false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(1.0, root.cbeginChildOn()->getFirstValue()); } } ///////////////////////////////////////////////////////////////////////// { // test two tiles { // test outside background tiles FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test inside vs outside background tiles FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_TRUE(hasOnlyInactiveNegativeBackgroundTiles(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } { // test inside vs outside background tiles FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_TRUE(hasOnlyInactiveNegativeBackgroundTiles(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } { // test inside background tiles FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_TRUE(hasOnlyInactiveNegativeBackgroundTiles(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } { // test outside background tiles (different background values) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test inside vs outside background tiles (different background values) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_TRUE(hasOnlyInactiveNegativeBackgroundTiles(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } { // test inside vs outside background tiles (different background values) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_TRUE(hasOnlyInactiveNegativeBackgroundTiles(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } { // test inside background tiles (different background values) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_TRUE(hasOnlyInactiveNegativeBackgroundTiles(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } } ///////////////////////////////////////////////////////////////////////// { // test one tile, one child { // test background tiles vs child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } { // test background tiles vs child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); } { // test background tiles vs child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(-grid->background(), root.cbeginChildOn()->getFirstValue()); } { // test background tiles vs child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } } ///////////////////////////////////////////////////////////////////////// { // test two children { // test two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), true)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(false, root.cbeginChildOn()->isValueOn(0)); } { // test two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), true)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(true, root.cbeginChildOn()->isValueOn(0)); } { // test two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), true)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(-grid->background(), root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(false, root.cbeginChildOn()->isValueOn(0)); } { // test two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), true)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(-grid->background(), root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(false, root.cbeginChildOn()->isValueOn(0)); } } ///////////////////////////////////////////////////////////////////////// { // test multiple root node elements { // merge a child node into a grid with an existing child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); root.addTile(Coord(8192, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), grid2->background(), false); root2.addChild(new RootChildType(Coord(8192, 0, 0), 2.0f, false)); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getChildCount(root)); EXPECT_TRUE(root.cbeginChildOn()->cbeginValueAll()); EXPECT_EQ(1.0f, root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(2.0f, (++root.cbeginChildOn())->getFirstValue()); } { // merge a child node into a grid with an existing child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), grid->background(), false); root.addChild(new RootChildType(Coord(8192, 0, 0), 2.0f, false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); root2.addTile(Coord(8192, 0, 0), grid2->background(), false); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getChildCount(root)); EXPECT_TRUE(root.cbeginChildOn()->cbeginValueAll()); EXPECT_EQ(1.0f, root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(2.0f, (++root.cbeginChildOn())->getFirstValue()); } { // merge background tiles and child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); root.addTile(Coord(8192, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), -grid2->background(), false); root2.addChild(new RootChildType(Coord(8192, 0, 0), 2.0f, false)); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); EXPECT_EQ(-grid->background(), *(++root.cbeginValueOff())); } } ///////////////////////////////////////////////////////////////////////// { // test merging internal node children { // merge two internal nodes into a grid with an inside tile and an outside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); auto rootChild = std::make_unique<RootChildType>(Coord(0, 0, 0), -123.0f, false); rootChild->addTile(0, grid->background(), false); root.addChild(rootChild.release()); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); auto rootChild2 = std::make_unique<RootChildType>(Coord(0, 0, 0), 55.0f, false); rootChild2->addChild(new LeafParentType(Coord(0, 0, 0), 29.0f, false)); rootChild2->addChild(new LeafParentType(Coord(0, 0, 128), 31.0f, false)); rootChild2->addTile(2, -grid->background(), false); root2.addChild(rootChild2.release()); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), getChildCount(*root.cbeginChildOn())); EXPECT_EQ(Index(0), getOutsideTileCount(*root.cbeginChildOn())); EXPECT_TRUE(root.cbeginChildOn()->isChildMaskOn(0)); EXPECT_TRUE(!root.cbeginChildOn()->isChildMaskOn(1)); EXPECT_EQ(29.0f, root.cbeginChildOn()->cbeginChildOn()->getFirstValue()); EXPECT_EQ(-123.0f, root.cbeginChildOn()->cbeginValueAll().getValue()); } { // merge two internal nodes into a grid with an inside tile and an outside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); auto rootChild = std::make_unique<RootChildType>(Coord(0, 0, 0), 123.0f, false); root.addChild(rootChild.release()); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); auto rootChild2 = std::make_unique<RootChildType>(Coord(0, 0, 0), -55.0f, false); rootChild2->addChild(new LeafParentType(Coord(0, 0, 0), 29.0f, false)); rootChild2->addChild(new LeafParentType(Coord(0, 0, 128), 31.0f, false)); rootChild2->addTile(2, -140.0f, false); rootChild2->addTile(3, grid2->background(), false); root2.addChild(rootChild2.release()); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getChildCount(*root.cbeginChildOn())); EXPECT_EQ(Index(1), getOutsideTileCount(*root.cbeginChildOn())); EXPECT_TRUE(root.cbeginChildOn()->isChildMaskOn(0)); EXPECT_TRUE(root.cbeginChildOn()->isChildMaskOn(1)); EXPECT_TRUE(!root.cbeginChildOn()->isChildMaskOn(2)); EXPECT_TRUE(!root.cbeginChildOn()->isChildMaskOn(3)); EXPECT_EQ(29.0f, root.cbeginChildOn()->cbeginChildOn()->getFirstValue()); EXPECT_EQ(-grid->background(), root.cbeginChildOn()->cbeginValueAll().getItem(2)); EXPECT_EQ(123.0f, root.cbeginChildOn()->cbeginValueAll().getItem(3)); } } ///////////////////////////////////////////////////////////////////////// { // test merging leaf nodes { // merge a leaf node into an empty grid FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().touchLeaf(Coord(0, 0, 0)); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index32(1), grid->tree().leafCount()); } { // merge a leaf node into a grid with an outside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -10.0f, false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().touchLeaf(Coord(0, 0, 0)); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } { // merge a leaf node into a grid with an outside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().touchLeaf(Coord(0, 0, 0)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), -10.0f, false); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } { // merge a leaf node into a grid with an internal node inside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto rootChild = std::make_unique<RootChildType>(Coord(0, 0, 0), grid->background(), false); grid->tree().root().addChild(rootChild.release()); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto* leaf = grid2->tree().touchLeaf(Coord(0, 0, 0)); leaf->setValueOnly(11, grid2->background()); leaf->setValueOnly(12, -grid2->background()); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index32(1), grid->tree().leafCount()); EXPECT_EQ(Index32(0), grid2->tree().leafCount()); // test background values are remapped const auto* testLeaf = grid->tree().probeConstLeaf(Coord(0, 0, 0)); EXPECT_EQ(grid->background(), testLeaf->getValue(11)); EXPECT_EQ(-grid->background(), testLeaf->getValue(12)); } { // merge a leaf node into a grid with a partially constructed leaf node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid->tree().addLeaf(new LeafT(PartialCreate(), Coord(0, 0, 0))); auto* leaf = grid2->tree().touchLeaf(Coord(0, 0, 0)); leaf->setValueOnly(10, -2.3f); tools::CsgUnionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto* testLeaf = grid->tree().probeConstLeaf(Coord(0, 0, 0)); EXPECT_EQ(-2.3f, testLeaf->getValue(10)); } { // merge three leaf nodes from different grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/7); auto* leaf = grid->tree().touchLeaf(Coord(0, 0, 0)); auto* leaf2 = grid2->tree().touchLeaf(Coord(0, 0, 0)); auto* leaf3 = grid3->tree().touchLeaf(Coord(0, 0, 0)); // active state from the voxel with the minimum value preserved leaf->setValueOnly(5, 4.0f); leaf2->setValueOnly(5, 2.0f); leaf2->setValueOn(5); leaf3->setValueOnly(5, 3.0f); leaf->setValueOnly(7, 2.0f); leaf->setValueOn(7); leaf2->setValueOnly(7, 3.0f); leaf3->setValueOnly(7, 4.0f); leaf->setValueOnly(9, 4.0f); leaf->setValueOn(9); leaf2->setValueOnly(9, 3.0f); leaf3->setValueOnly(9, 2.0f); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto* testLeaf = grid->tree().probeConstLeaf(Coord(0, 0, 0)); EXPECT_EQ(2.0f, testLeaf->getValue(5)); EXPECT_TRUE(testLeaf->isValueOn(5)); EXPECT_EQ(2.0f, testLeaf->getValue(7)); EXPECT_TRUE(testLeaf->isValueOn(7)); EXPECT_EQ(2.0f, testLeaf->getValue(9)); EXPECT_TRUE(!testLeaf->isValueOn(9)); } { // merge a leaf node into an empty grid from a const grid FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), 1.0f, false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().touchLeaf(Coord(0, 0, 0)); EXPECT_EQ(Index32(0), grid->tree().leafCount()); EXPECT_EQ(Index32(1), grid2->tree().leafCount()); // merge from a const tree std::vector<tools::TreeToMerge<FloatTree>> treesToMerge; treesToMerge.emplace_back(grid2->constTree(), DeepCopy()); tools::CsgUnionOp<FloatTree> mergeOp(treesToMerge); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index32(1), grid->tree().leafCount()); // leaf has been deep copied not stolen EXPECT_EQ(Index32(1), grid2->tree().leafCount()); } } ///////////////////////////////////////////////////////////////////////// { // merge multiple grids { // merge two background root tiles from two different grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), /*background=*/grid2->background(), false); root2.addTile(Coord(8192, 0, 0), /*background=*/-grid2->background(), false); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/7); auto& root3 = grid3->tree().root(); root3.addTile(Coord(0, 0, 0), /*background=*/-grid3->background(), false); root3.addTile(Coord(8192, 0, 0), /*background=*/grid3->background(), false); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getTileCount(root)); EXPECT_EQ(-grid->background(), root.cbeginValueAll().getValue()); EXPECT_EQ(-grid->background(), (++root.cbeginValueAll()).getValue()); } { // merge two outside root tiles from two different grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), /*background=*/-10.0f, false); root2.addTile(Coord(8192, 0, 0), /*background=*/grid2->background(), false); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/7); auto& root3 = grid3->tree().root(); root3.addTile(Coord(0, 0, 0), /*background=*/grid3->background(), false); root3.addTile(Coord(8192, 0, 0), /*background=*/-11.0f, false); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getTileCount(root)); EXPECT_EQ(-grid->background(), root.cbeginValueAll().getValue()); EXPECT_EQ(-grid->background(), (++root.cbeginValueAll()).getValue()); } { // merge two active, outside root tiles from two different grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), /*background=*/grid->background(), false); root.addTile(Coord(8192, 0, 0), /*background=*/grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), /*background=*/-10.0f, true); root2.addTile(Coord(8192, 0, 0), /*background=*/grid2->background(), false); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/7); auto& root3 = grid3->tree().root(); root3.addTile(Coord(0, 0, 0), /*background=*/grid3->background(), false); root3.addTile(Coord(8192, 0, 0), /*background=*/-11.0f, true); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getTileCount(root)); EXPECT_EQ(-grid->background(), root.cbeginValueAll().getValue()); EXPECT_EQ(-grid->background(), (++root.cbeginValueAll()).getValue()); } { // merge three root tiles, one of which is a background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), grid->background(), true); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), -grid2->background(), true); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(); auto& root3 = grid3->tree().root(); root3.addTile(Coord(0, 0, 0), -grid3->background(), false); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgUnionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), getTileCount(root)); EXPECT_EQ(-grid->background(), root.cbeginValueOn().getValue()); } } } TEST_F(TestMerge, testCsgIntersection) { using RootChildType = FloatTree::RootNodeType::ChildNodeType; using LeafParentType = RootChildType::ChildNodeType; using LeafT = FloatTree::LeafNodeType; { // construction FloatTree tree1; FloatTree tree2; const FloatTree tree3; { // one non-const tree (steal) tools::CsgIntersectionOp<FloatTree> mergeOp(tree1, Steal()); EXPECT_EQ(size_t(1), mergeOp.size()); } { // one non-const tree (deep-copy) tools::CsgIntersectionOp<FloatTree> mergeOp(tree1, DeepCopy()); EXPECT_EQ(size_t(1), mergeOp.size()); } { // one const tree (deep-copy) tools::CsgIntersectionOp<FloatTree> mergeOp(tree2, DeepCopy()); EXPECT_EQ(size_t(1), mergeOp.size()); } { // vector of tree pointers std::vector<FloatTree*> trees{&tree1, &tree2}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); EXPECT_EQ(size_t(2), mergeOp.size()); } { // deque of tree pointers std::deque<FloatTree*> trees{&tree1, &tree2}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); EXPECT_EQ(size_t(2), mergeOp.size()); } { // vector of TreesToMerge (to mix const and non-const trees) std::vector<tools::TreeToMerge<FloatTree>> trees; trees.emplace_back(tree1, Steal()); trees.emplace_back(tree3, DeepCopy()); // const tree trees.emplace_back(tree2, Steal()); tools::CsgIntersectionOp<FloatTree> mergeOp(trees); EXPECT_EQ(size_t(3), mergeOp.size()); } { // implicit copy constructor std::vector<FloatTree*> trees{&tree1, &tree2}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tools::CsgIntersectionOp<FloatTree> mergeOp2(mergeOp); EXPECT_EQ(size_t(2), mergeOp2.size()); } { // implicit assignment operator std::vector<FloatTree*> trees{&tree1, &tree2}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tools::CsgIntersectionOp<FloatTree> mergeOp2 = mergeOp; EXPECT_EQ(size_t(2), mergeOp2.size()); } } ///////////////////////////////////////////////////////////////////////// { // empty merge trees FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); std::vector<FloatTree*> trees; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); EXPECT_EQ(size_t(0), mergeOp.size()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), root.getTableSize()); } ///////////////////////////////////////////////////////////////////////// { // test one tile or one child { // test one background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), 1.0, false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), 1.0, true)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test one child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), 1.0, false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } } ///////////////////////////////////////////////////////////////////////// { // test two tiles { // test outside background tiles FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test inside vs outside background tiles FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test inside vs outside background tiles FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test inside background tiles FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_TRUE(hasOnlyInactiveNegativeBackgroundTiles(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } { // test outside background tiles (different background values) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test inside vs outside background tiles (different background values) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test inside vs outside background tiles (different background values) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test inside background tiles (different background values) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), -grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_TRUE(hasOnlyInactiveNegativeBackgroundTiles(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(-grid->background(), *root.cbeginValueOff()); } } ///////////////////////////////////////////////////////////////////////// { // test one tile, one child { // test background tiles vs child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test background tiles vs child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); } { // test background tiles vs child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // test background tiles vs child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(-grid->background(), root.cbeginChildOn()->getFirstValue()); } { // test background tiles vs child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), grid2->background(), false); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } } ///////////////////////////////////////////////////////////////////////// { // test two children { // test two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), true)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(false, root.cbeginChildOn()->isValueOn(0)); } { // test two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), true)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(true, root.cbeginChildOn()->isValueOn(0)); } { // test two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid->background(), false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid2->background(), true)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(true, root.cbeginChildOn()->isValueOn(0)); } { // test two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addChild(new RootChildType(Coord(0, 0, 0), grid->background(), true)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addChild(new RootChildType(Coord(0, 0, 0), -grid2->background(), false)); std::vector<FloatTree*> trees{&grid2->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto& root = grid->tree().root(); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(grid->background(), root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(true, root.cbeginChildOn()->isValueOn(0)); } } ///////////////////////////////////////////////////////////////////////// { // test multiple root node elements { // merge a child node into a grid with an existing child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); root.addTile(Coord(8192, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), -grid2->background(), false); root2.addChild(new RootChildType(Coord(8192, 0, 0), 2.0f, false)); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getChildCount(root)); EXPECT_TRUE(root.cbeginChildOn()->cbeginValueAll()); EXPECT_EQ(1.0f, root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(2.0f, (++root.cbeginChildOn())->getFirstValue()); } { // merge a child node into a grid with an existing child node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), -grid->background(), false); root.addChild(new RootChildType(Coord(8192, 0, 0), 2.0f, false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); root2.addTile(Coord(8192, 0, 0), -grid2->background(), false); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getChildCount(root)); EXPECT_TRUE(root.cbeginChildOn()->cbeginValueAll()); EXPECT_EQ(1.0f, root.cbeginChildOn()->getFirstValue()); EXPECT_EQ(2.0f, (++root.cbeginChildOn())->getFirstValue()); } { // merge background tiles and child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); root.addTile(Coord(8192, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), grid2->background(), false); root2.addChild(new RootChildType(Coord(8192, 0, 0), 2.0f, false)); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), getTileCount(root)); } } ///////////////////////////////////////////////////////////////////////// { // test merging internal node children { // merge two internal nodes into a grid with an inside tile and an outside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); auto rootChild = std::make_unique<RootChildType>(Coord(0, 0, 0), 123.0f, false); rootChild->addTile(0, -grid->background(), false); root.addChild(rootChild.release()); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); auto rootChild2 = std::make_unique<RootChildType>(Coord(0, 0, 0), 55.0f, false); rootChild2->addChild(new LeafParentType(Coord(0, 0, 0), 29.0f, false)); rootChild2->addChild(new LeafParentType(Coord(0, 0, 128), 31.0f, false)); rootChild2->addTile(2, -grid->background(), false); root2.addChild(rootChild2.release()); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), getChildCount(*root.cbeginChildOn())); EXPECT_EQ(Index(0), getInsideTileCount(*root.cbeginChildOn())); EXPECT_TRUE(root.cbeginChildOn()->isChildMaskOn(0)); EXPECT_TRUE(!root.cbeginChildOn()->isChildMaskOn(1)); EXPECT_EQ(29.0f, root.cbeginChildOn()->cbeginChildOn()->getFirstValue()); EXPECT_EQ(123.0f, root.cbeginChildOn()->cbeginValueAll().getValue()); } { // merge two internal nodes into a grid with an inside tile and an outside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); auto rootChild = std::make_unique<RootChildType>(Coord(0, 0, 0), -123.0f, false); root.addChild(rootChild.release()); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); auto rootChild2 = std::make_unique<RootChildType>(Coord(0, 0, 0), 55.0f, false); rootChild2->addChild(new LeafParentType(Coord(0, 0, 0), 29.0f, false)); rootChild2->addChild(new LeafParentType(Coord(0, 0, 128), 31.0f, false)); rootChild2->addTile(2, 140.0f, false); rootChild2->addTile(3, -grid2->background(), false); root2.addChild(rootChild2.release()); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getChildCount(*root.cbeginChildOn())); EXPECT_EQ(Index(1), getInsideTileCount(*root.cbeginChildOn())); EXPECT_TRUE(root.cbeginChildOn()->isChildMaskOn(0)); EXPECT_TRUE(root.cbeginChildOn()->isChildMaskOn(1)); EXPECT_TRUE(!root.cbeginChildOn()->isChildMaskOn(2)); EXPECT_TRUE(!root.cbeginChildOn()->isChildMaskOn(3)); EXPECT_EQ(29.0f, root.cbeginChildOn()->cbeginChildOn()->getFirstValue()); EXPECT_EQ(grid->background(), root.cbeginChildOn()->cbeginValueAll().getItem(2)); EXPECT_EQ(-123.0f, root.cbeginChildOn()->cbeginValueAll().getItem(3)); } } ///////////////////////////////////////////////////////////////////////// { // test merging leaf nodes { // merge a leaf node into an empty grid FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().touchLeaf(Coord(0, 0, 0)); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index32(0), grid->tree().leafCount()); } { // merge a leaf node into a grid with a background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().touchLeaf(Coord(0, 0, 0)); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index32(0), grid->tree().leafCount()); } { // merge a leaf node into a grid with an outside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), 10.0f, false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().touchLeaf(Coord(0, 0, 0)); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // merge a leaf node into a grid with an outside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().touchLeaf(Coord(0, 0, 0)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); grid2->tree().root().addTile(Coord(0, 0, 0), 10.0f, false); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } { // merge a leaf node into a grid with an internal node inside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto rootChild = std::make_unique<RootChildType>(Coord(0, 0, 0), -grid->background(), false); grid->tree().root().addChild(rootChild.release()); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto* leaf = grid2->tree().touchLeaf(Coord(0, 0, 0)); leaf->setValueOnly(11, grid2->background()); leaf->setValueOnly(12, -grid2->background()); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index32(1), grid->tree().leafCount()); EXPECT_EQ(Index32(0), grid2->tree().leafCount()); // test background values are remapped const auto* testLeaf = grid->tree().probeConstLeaf(Coord(0, 0, 0)); EXPECT_EQ(grid->background(), testLeaf->getValue(11)); EXPECT_EQ(-grid->background(), testLeaf->getValue(12)); } { // merge a leaf node into a grid with a partially constructed leaf node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid->tree().addLeaf(new LeafT(PartialCreate(), Coord(0, 0, 0))); auto* leaf = grid2->tree().touchLeaf(Coord(0, 0, 0)); leaf->setValueOnly(10, 6.4f); tools::CsgIntersectionOp<FloatTree> mergeOp{grid2->tree(), Steal()}; tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto* testLeaf = grid->tree().probeConstLeaf(Coord(0, 0, 0)); EXPECT_EQ(6.4f, testLeaf->getValue(10)); } { // merge three leaf nodes from different grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/7); auto* leaf = grid->tree().touchLeaf(Coord(0, 0, 0)); auto* leaf2 = grid2->tree().touchLeaf(Coord(0, 0, 0)); auto* leaf3 = grid3->tree().touchLeaf(Coord(0, 0, 0)); // active state from the voxel with the maximum value preserved leaf->setValueOnly(5, 4.0f); leaf2->setValueOnly(5, 2.0f); leaf2->setValueOn(5); leaf3->setValueOnly(5, 3.0f); leaf->setValueOnly(7, 2.0f); leaf->setValueOn(7); leaf2->setValueOnly(7, 3.0f); leaf3->setValueOnly(7, 4.0f); leaf->setValueOnly(9, 4.0f); leaf->setValueOn(9); leaf2->setValueOnly(9, 3.0f); leaf3->setValueOnly(9, 2.0f); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto* testLeaf = grid->tree().probeConstLeaf(Coord(0, 0, 0)); EXPECT_EQ(4.0f, testLeaf->getValue(5)); EXPECT_TRUE(!testLeaf->isValueOn(5)); EXPECT_EQ(4.0f, testLeaf->getValue(7)); EXPECT_TRUE(!testLeaf->isValueOn(7)); EXPECT_EQ(4.0f, testLeaf->getValue(9)); EXPECT_TRUE(testLeaf->isValueOn(9)); } { // merge a leaf node into an empty grid from a const grid FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().root().addTile(Coord(0, 0, 0), -1.0f, false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().touchLeaf(Coord(0, 0, 0)); EXPECT_EQ(Index32(0), grid->tree().leafCount()); EXPECT_EQ(Index32(1), grid2->tree().leafCount()); // merge from a const tree std::vector<tools::TreeToMerge<FloatTree>> treesToMerge; treesToMerge.emplace_back(grid2->constTree(), DeepCopy()); tools::CsgIntersectionOp<FloatTree> mergeOp(treesToMerge); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index32(1), grid->tree().leafCount()); // leaf has been deep copied not stolen EXPECT_EQ(Index32(1), grid2->tree().leafCount()); } { // merge three leaf nodes from four grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid4 = createLevelSet<FloatGrid>(); auto* leaf = grid->tree().touchLeaf(Coord(0, 0, 0)); auto* leaf2 = grid2->tree().touchLeaf(Coord(0, 0, 0)); auto* leaf3 = grid3->tree().touchLeaf(Coord(0, 0, 0)); // active state from the voxel with the maximum value preserved leaf->setValueOnly(5, 4.0f); leaf2->setValueOnly(5, 2.0f); leaf2->setValueOn(5); leaf3->setValueOnly(5, 3.0f); leaf->setValueOnly(7, 2.0f); leaf->setValueOn(7); leaf2->setValueOnly(7, 3.0f); leaf3->setValueOnly(7, 4.0f); leaf->setValueOnly(9, 4.0f); leaf->setValueOn(9); leaf2->setValueOnly(9, 3.0f); leaf3->setValueOnly(9, 2.0f); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree(), &grid4->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), grid->tree().root().getTableSize()); } } ///////////////////////////////////////////////////////////////////////// { // merge multiple grids { // merge two background root tiles from two different grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), /*background=*/-grid->background(), false); root.addTile(Coord(8192, 0, 0), /*background=*/-grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), /*background=*/grid2->background(), false); root2.addTile(Coord(8192, 0, 0), /*background=*/-grid2->background(), false); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/7); auto& root3 = grid3->tree().root(); root3.addTile(Coord(0, 0, 0), /*background=*/-grid3->background(), false); root3.addTile(Coord(8192, 0, 0), /*background=*/grid3->background(), false); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), root.getTableSize()); } { // merge two outside root tiles from two different grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), /*background=*/-grid->background(), false); root.addTile(Coord(8192, 0, 0), /*background=*/-grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), /*background=*/10.0f, false); root2.addTile(Coord(8192, 0, 0), /*background=*/-grid2->background(), false); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/7); auto& root3 = grid3->tree().root(); root3.addTile(Coord(0, 0, 0), /*background=*/-grid3->background(), false); root3.addTile(Coord(8192, 0, 0), /*background=*/11.0f, false); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), root.getTableSize()); } { // merge two active, outside root tiles from two different grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), /*background=*/-grid->background(), false); root.addTile(Coord(8192, 0, 0), /*background=*/-grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/5); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), /*background=*/10.0f, true); root2.addTile(Coord(8192, 0, 0), /*background=*/-grid2->background(), false); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*narrowBandWidth=*/7); auto& root3 = grid3->tree().root(); root3.addTile(Coord(0, 0, 0), /*background=*/-grid3->background(), false); root3.addTile(Coord(8192, 0, 0), /*background=*/11.0f, true); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), getTileCount(root)); EXPECT_EQ(grid->background(), root.cbeginValueAll().getValue()); EXPECT_EQ(grid->background(), (++root.cbeginValueAll()).getValue()); } { // merge three root tiles, one of which is a background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), -grid->background(), true); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), grid2->background(), true); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(); auto& root3 = grid3->tree().root(); root3.addTile(Coord(0, 0, 0), grid3->background(), false); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), root.getTableSize()); EXPECT_EQ(Index(1), getTileCount(root)); } { // merge three root tiles, one of which is a background tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), -grid->background(), true); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), grid2->background(), false); FloatGrid::Ptr grid3 = createLevelSet<FloatGrid>(); auto& root3 = grid3->tree().root(); root3.addTile(Coord(0, 0, 0), grid3->background(), true); std::vector<FloatTree*> trees{&grid2->tree(), &grid3->tree()}; tools::CsgIntersectionOp<FloatTree> mergeOp(trees, Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), root.getTableSize()); } } } TEST_F(TestMerge, testCsgDifference) { using RootChildType = FloatTree::RootNodeType::ChildNodeType; using LeafParentType = RootChildType::ChildNodeType; using LeafT = FloatTree::LeafNodeType; { // construction FloatTree tree1; const FloatTree tree2; { // one non-const tree (steal) tools::CsgDifferenceOp<FloatTree> mergeOp(tree1, Steal()); EXPECT_EQ(size_t(1), mergeOp.size()); } { // one non-const tree (deep-copy) tools::CsgDifferenceOp<FloatTree> mergeOp(tree1, DeepCopy()); EXPECT_EQ(size_t(1), mergeOp.size()); } { // one const tree (deep-copy) tools::CsgDifferenceOp<FloatTree> mergeOp(tree2, DeepCopy()); EXPECT_EQ(size_t(1), mergeOp.size()); } { // one non-const tree wrapped in TreeToMerge tools::TreeToMerge<FloatTree> tree3(tree1, Steal()); tools::CsgDifferenceOp<FloatTree> mergeOp(tree3); EXPECT_EQ(size_t(1), mergeOp.size()); } { // one const tree wrapped in TreeToMerge tools::TreeToMerge<FloatTree> tree4(tree2, DeepCopy()); tools::CsgDifferenceOp<FloatTree> mergeOp(tree4); EXPECT_EQ(size_t(1), mergeOp.size()); } { // implicit copy constructor tools::CsgDifferenceOp<FloatTree> mergeOp(tree2, DeepCopy()); EXPECT_EQ(size_t(1), mergeOp.size()); tools::CsgDifferenceOp<FloatTree> mergeOp2(mergeOp); EXPECT_EQ(size_t(1), mergeOp2.size()); } { // implicit assignment operator tools::CsgDifferenceOp<FloatTree> mergeOp(tree2, DeepCopy()); EXPECT_EQ(size_t(1), mergeOp.size()); tools::CsgDifferenceOp<FloatTree> mergeOp2 = mergeOp; EXPECT_EQ(size_t(1), mergeOp2.size()); } } { // merge two different outside root tiles from one grid into an empty grid (noop) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), grid->background(), false); root2.addTile(Coord(8192, 0, 0), grid->background(), true); EXPECT_EQ(Index(2), root2.getTableSize()); EXPECT_EQ(Index(2), getTileCount(root2)); EXPECT_EQ(Index(1), getActiveTileCount(root2)); EXPECT_EQ(Index(1), getInactiveTileCount(root2)); // test container constructor here tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), root.getTableSize()); } { // merge an outside root tile to a grid which already has this tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), grid->background(), true); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), root.getTableSize()); } { // merge an outside root tile to a grid which already has this tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), grid->background(), true); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), grid->background(), false); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), root.getTableSize()); EXPECT_EQ(Index(1), getTileCount(root)); // tile in merge grid should not replace existing tile - tile should remain inactive EXPECT_EQ(Index(1), getActiveTileCount(root)); EXPECT_EQ(Index(0), getInactiveTileCount(root)); EXPECT_EQ(Index(0), getInsideTileCount(root)); EXPECT_EQ(Index(1), getOutsideTileCount(root)); } { // merge an outside root tile to a grid which has an inside tile (noop) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), -grid->background(), false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), 123.0f, true); EXPECT_EQ(Index(1), getInsideTileCount(root)); EXPECT_EQ(Index(0), getOutsideTileCount(root)); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), root.getTableSize()); EXPECT_EQ(Index(1), getTileCount(root)); EXPECT_EQ(Index(0), getActiveTileCount(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(Index(1), getInsideTileCount(root)); EXPECT_EQ(Index(0), getOutsideTileCount(root)); } { // merge an outside root tile to a grid which has a child (noop) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), 123.0f, true); EXPECT_EQ(Index(1), getChildCount(root)); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), root.getTableSize()); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(Index(1), getChildCount(root)); } { // merge a child to a grid which has an outside root tile (noop) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), 123.0f, true); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); EXPECT_EQ(Index(0), getInsideTileCount(root)); EXPECT_EQ(Index(1), getOutsideTileCount(root)); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), root.getTableSize()); EXPECT_EQ(Index(1), getTileCount(root)); EXPECT_EQ(Index(0), getChildCount(root)); EXPECT_EQ(Index(1), getActiveTileCount(root)); EXPECT_EQ(Index(0), getInactiveTileCount(root)); EXPECT_EQ(Index(0), getInsideTileCount(root)); EXPECT_EQ(Index(1), getOutsideTileCount(root)); } { // merge an inside root tile to a grid which has an outside tile (noop) FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), grid->background(), true); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), -123.0f, true); EXPECT_EQ(Index(0), getInsideTileCount(root)); EXPECT_EQ(Index(1), getOutsideTileCount(root)); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), root.getTableSize()); EXPECT_EQ(Index(1), getTileCount(root)); EXPECT_EQ(Index(1), getActiveTileCount(root)); EXPECT_EQ(Index(0), getInactiveTileCount(root)); EXPECT_EQ(Index(0), getInsideTileCount(root)); EXPECT_EQ(Index(1), getOutsideTileCount(root)); } { // merge two grids with outside tiles, active state should be carried across FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), 0.1f, false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), 0.2f, true); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), root.getTableSize()); EXPECT_EQ(Index(1), getTileCount(root)); // outside tile should now be inactive EXPECT_EQ(Index(0), getActiveTileCount(root)); EXPECT_EQ(Index(1), getInactiveTileCount(root)); EXPECT_EQ(Index(0), getInsideTileCount(root)); EXPECT_EQ(Index(1), getOutsideTileCount(root)); EXPECT_EQ(0.1f, root.cbeginValueAll().getValue()); } { // merge two grids with outside tiles, active state should be carried across FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), -0.1f, true); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), -0.2f, false); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(0), root.getTableSize()); } { // merge an inside root tile to a grid which has a child, inside tile has precedence FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), -123.0f, true); EXPECT_EQ(Index(1), getChildCount(root)); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), root.getTableSize()); EXPECT_EQ(Index(1), getTileCount(root)); EXPECT_EQ(Index(0), getChildCount(root)); EXPECT_EQ(Index(1), getActiveTileCount(root)); EXPECT_EQ(Index(0), getInactiveTileCount(root)); EXPECT_EQ(Index(0), getInsideTileCount(root)); EXPECT_EQ(Index(1), getOutsideTileCount(root)); EXPECT_EQ(grid->background(), root.cbeginValueAll().getValue()); } { // merge a child to a grid which has an inside root tile, child should be stolen FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), -123.0f, true); // use a different background value FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(/*voxelSize=*/1.0, /*halfWidth=*/5); auto& root2 = grid2->tree().root(); auto childPtr = std::make_unique<RootChildType>(Coord(0, 0, 0), 5.0f, false); childPtr->addTile(Index(1), 1.3f, true); root2.addChild(childPtr.release()); EXPECT_EQ(Index(1), getInsideTileCount(root)); EXPECT_EQ(Index(0), getOutsideTileCount(root)); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), root.getTableSize()); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(Index(0), getChildCount(root2)); EXPECT_TRUE(!root.cbeginChildOn()->isValueOn(Index(0))); EXPECT_TRUE(root.cbeginChildOn()->isValueOn(Index(1))); auto iter = root.cbeginChildOn()->cbeginValueAll(); EXPECT_EQ(-3.0f, iter.getValue()); ++iter; EXPECT_EQ(-1.3f, iter.getValue()); } { // merge two child nodes into a grid with two inside tiles FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addTile(Coord(0, 0, 0), -2.0f, false); root.addTile(Coord(8192, 0, 0), -4.0f, false); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addChild(new RootChildType(Coord(0, 0, 0), 1.0f, false)); root2.addChild(new RootChildType(Coord(8192, 0, 0), -123.0f, true)); EXPECT_EQ(Index(2), root2.getTableSize()); EXPECT_EQ(Index(0), getTileCount(root2)); EXPECT_EQ(Index(2), getChildCount(root2)); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(2), root.getTableSize()); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(Index(2), getChildCount(root)); } { // merge an inside tile and an outside tile into a grid with two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addChild(new RootChildType(Coord(0, 0, 0), 123.0f, false)); root.addChild(new RootChildType(Coord(8192, 0, 0), 1.9f, false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), 15.0f, true); // should not replace child root2.addTile(Coord(8192, 0, 0), -25.0f, true); // should replace child tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(Index(1), getTileCount(root)); EXPECT_EQ(123.0f, root.cbeginChildOn()->getFirstValue()); EXPECT_TRUE(root.cbeginChildAll().isChildNode()); EXPECT_TRUE(!(++root.cbeginChildAll()).isChildNode()); EXPECT_EQ(grid->background(), root.cbeginValueOn().getValue()); } { // merge an inside tile and an outside tile into a grid with two child nodes FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); root.addChild(new RootChildType(Coord(0, 0, 0), 123.0f, false)); root.addChild(new RootChildType(Coord(8192, 0, 0), 1.9f, false)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); root2.addTile(Coord(0, 0, 0), 15.0f, false); // should not replace child root2.addTile(Coord(8192, 0, 0), -25.0f, false); // should replace child EXPECT_EQ(Index(2), getChildCount(root)); EXPECT_EQ(Index(2), getTileCount(root2)); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), getChildCount(root)); EXPECT_EQ(Index(0), getTileCount(root)); EXPECT_EQ(123.0f, root.cbeginChildOn()->getFirstValue()); } { // merge two internal nodes into a grid with an inside tile and an outside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); auto& root = grid->tree().root(); auto rootChild = std::make_unique<RootChildType>(Coord(0, 0, 0), 123.0f, false); rootChild->addTile(0, -14.0f, false); rootChild->addTile(1, 15.0f, false); rootChild->addTile(2, -13.0f, false); root.addChild(rootChild.release()); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto& root2 = grid2->tree().root(); auto rootChild2 = std::make_unique<RootChildType>(Coord(0, 0, 0), 55.0f, false); rootChild2->addChild(new LeafParentType(Coord(0, 0, 0), 29.0f, false)); rootChild2->addChild(new LeafParentType(Coord(0, 0, 128), 31.0f, false)); rootChild2->addTile(2, -17.0f, true); rootChild2->addTile(9, 19.0f, true); root2.addChild(rootChild2.release()); EXPECT_EQ(Index(2), getInsideTileCount(*root.cbeginChildOn())); EXPECT_EQ(Index(0), getActiveTileCount(*root.cbeginChildOn())); EXPECT_EQ(Index(2), getChildCount(*root2.cbeginChildOn())); EXPECT_EQ(Index(1), getInsideTileCount(*root2.cbeginChildOn())); EXPECT_EQ(Index(2), getActiveTileCount(*root2.cbeginChildOn())); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index(1), getChildCount(*root.cbeginChildOn())); EXPECT_EQ(Index(0), getInsideTileCount(*root.cbeginChildOn())); EXPECT_EQ(Index(1), getActiveTileCount(*root.cbeginChildOn())); EXPECT_TRUE(root.cbeginChildOn()->isChildMaskOn(0)); EXPECT_TRUE(!root.cbeginChildOn()->isChildMaskOn(1)); EXPECT_EQ(-29.0f, root.cbeginChildOn()->cbeginChildOn()->getFirstValue()); auto iter = root.cbeginChildOn()->cbeginValueAll(); EXPECT_EQ(15.0f, iter.getValue()); ++iter; EXPECT_EQ(3.0f, iter.getValue()); EXPECT_EQ(Index(1), getChildCount(*root2.cbeginChildOn())); } { // merge a leaf node into a grid with an inside tile FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().addTile(1, Coord(0, 0, 0), -1.3f, true); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().touchLeaf(Coord(0, 0, 0)); EXPECT_EQ(Index32(0), grid->tree().leafCount()); EXPECT_EQ(Index32(1), grid2->tree().leafCount()); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index32(1), grid->tree().leafCount()); EXPECT_EQ(Index32(0), grid2->tree().leafCount()); } { // merge two leaf nodes into a grid FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().touchLeaf(Coord(0, 0, 0)); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().touchLeaf(Coord(0, 0, 0)); EXPECT_EQ(Index32(1), grid->tree().leafCount()); EXPECT_EQ(Index32(1), grid2->tree().leafCount()); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto* leaf = grid->tree().probeConstLeaf(Coord(0, 0, 0)); EXPECT_TRUE(leaf); } { // merge a leaf node into a grid with a partially constructed leaf node FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid->tree().addLeaf(new LeafT(PartialCreate(), Coord(0, 0, 0))); auto* leaf = grid2->tree().touchLeaf(Coord(0, 0, 0)); leaf->setValueOnly(10, 6.4f); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto* testLeaf = grid->tree().probeConstLeaf(Coord(0, 0, 0)); EXPECT_EQ(3.0f, testLeaf->getValue(10)); } { // merge two leaf nodes from different grids FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); auto* leaf = grid->tree().touchLeaf(Coord(0, 0, 0)); auto* leaf2 = grid2->tree().touchLeaf(Coord(0, 0, 0)); // active state from the voxel with the maximum value preserved leaf->setValueOnly(5, 98.0f); leaf2->setValueOnly(5, 2.0f); leaf2->setValueOn(5); leaf->setValueOnly(7, 2.0f); leaf->setValueOn(7); leaf2->setValueOnly(7, 100.0f); leaf->setValueOnly(9, 4.0f); leaf->setValueOn(9); leaf2->setValueOnly(9, -100.0f); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->tree(), Steal()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); const auto* testLeaf = grid->tree().probeConstLeaf(Coord(0, 0, 0)); EXPECT_EQ(98.0f, testLeaf->getValue(5)); EXPECT_TRUE(!testLeaf->isValueOn(5)); EXPECT_EQ(2.0f, testLeaf->getValue(7)); EXPECT_TRUE(testLeaf->isValueOn(7)); EXPECT_EQ(100.0f, testLeaf->getValue(9)); EXPECT_TRUE(!testLeaf->isValueOn(9)); } { // merge a leaf node into a grid with an inside tile from a const tree FloatGrid::Ptr grid = createLevelSet<FloatGrid>(); grid->tree().addTile(1, Coord(0, 0, 0), -1.3f, true); FloatGrid::Ptr grid2 = createLevelSet<FloatGrid>(); grid2->tree().touchLeaf(Coord(0, 0, 0)); EXPECT_EQ(Index32(0), grid->tree().leafCount()); EXPECT_EQ(Index32(1), grid2->tree().leafCount()); tools::CsgDifferenceOp<FloatTree> mergeOp(grid2->constTree(), DeepCopy()); tree::DynamicNodeManager<FloatTree, 3> nodeManager(grid->tree()); nodeManager.foreachTopDown(mergeOp); EXPECT_EQ(Index32(1), grid->tree().leafCount()); EXPECT_EQ(Index32(1), grid2->tree().leafCount()); } }
121,505
C++
46.278599
112
0.591572
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestValueAccessor.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <tbb/task.h> #include <openvdb/openvdb.h> #include <openvdb/tools/Prune.h> #include <type_traits> #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); using ValueType = float; using Tree2Type = openvdb::tree::Tree< openvdb::tree::RootNode< openvdb::tree::LeafNode<ValueType, 3> > >; using Tree3Type = openvdb::tree::Tree< openvdb::tree::RootNode< openvdb::tree::InternalNode< openvdb::tree::LeafNode<ValueType, 3>, 4> > >; using Tree4Type = openvdb::tree::Tree4<ValueType, 5, 4, 3>::Type; using Tree5Type = openvdb::tree::Tree< openvdb::tree::RootNode< openvdb::tree::InternalNode< openvdb::tree::InternalNode< openvdb::tree::InternalNode< openvdb::tree::LeafNode<ValueType, 3>, 4>, 5>, 5> > >; using TreeType = Tree4Type; using namespace openvdb::tree; class TestValueAccessor: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } // Test odd combinations of trees and ValueAccessors // cache node level 0 and 1 void testTree3Accessor2() { accessorTest<ValueAccessor<Tree3Type, true, 2> >(); accessorTest<ValueAccessor<Tree3Type, false, 2> >(); } void testTree3ConstAccessor2() { constAccessorTest<ValueAccessor<const Tree3Type, true, 2> >(); constAccessorTest<ValueAccessor<const Tree3Type, false, 2> >(); } void testTree4Accessor2() { accessorTest<ValueAccessor<Tree4Type, true, 2> >(); accessorTest<ValueAccessor<Tree4Type, false, 2> >(); } void testTree4ConstAccessor2() { constAccessorTest<ValueAccessor<const Tree4Type, true, 2> >(); constAccessorTest<ValueAccessor<const Tree4Type, false, 2> >(); } void testTree5Accessor2() { accessorTest<ValueAccessor<Tree5Type, true, 2> >(); accessorTest<ValueAccessor<Tree5Type, false, 2> >(); } void testTree5ConstAccessor2() { constAccessorTest<ValueAccessor<const Tree5Type, true, 2> >(); constAccessorTest<ValueAccessor<const Tree5Type, false, 2> >(); } // only cache leaf level void testTree4Accessor1() { accessorTest<ValueAccessor<Tree5Type, true, 1> >(); accessorTest<ValueAccessor<Tree5Type, false, 1> >(); } void testTree4ConstAccessor1() { constAccessorTest<ValueAccessor<const Tree5Type, true, 1> >(); constAccessorTest<ValueAccessor<const Tree5Type, false, 1> >(); } // disable node caching void testTree4Accessor0() { accessorTest<ValueAccessor<Tree5Type, true, 0> >(); accessorTest<ValueAccessor<Tree5Type, false, 0> >(); } void testTree4ConstAccessor0() { constAccessorTest<ValueAccessor<const Tree5Type, true, 0> >(); constAccessorTest<ValueAccessor<const Tree5Type, false, 0> >(); } //cache node level 2 void testTree4Accessor12() { accessorTest<ValueAccessor1<Tree4Type, true, 2> >(); accessorTest<ValueAccessor1<Tree4Type, false, 2> >(); } //cache node level 1 and 3 void testTree5Accessor213() { accessorTest<ValueAccessor2<Tree5Type, true, 1,3> >(); accessorTest<ValueAccessor2<Tree5Type, false, 1,3> >(); } protected: template<typename AccessorT> void accessorTest(); template<typename AccessorT> void constAccessorTest(); }; //////////////////////////////////////// namespace { struct Plus { float addend; Plus(float f): addend(f) {} inline void operator()(float& f) const { f += addend; } inline void operator()(float& f, bool& b) const { f += addend; b = false; } }; } template<typename AccessorT> void TestValueAccessor::accessorTest() { using TreeType = typename AccessorT::TreeType; const int leafDepth = int(TreeType::DEPTH) - 1; // subtract one because getValueDepth() returns 0 for values at the root const ValueType background = 5.0f, value = -9.345f; const openvdb::Coord c0(5, 10, 20), c1(500000, 200000, 300000); { TreeType tree(background); EXPECT_TRUE(!tree.isValueOn(c0)); EXPECT_TRUE(!tree.isValueOn(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c1)); tree.setValue(c0, value); EXPECT_TRUE(tree.isValueOn(c0)); EXPECT_TRUE(!tree.isValueOn(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c1)); } { TreeType tree(background); AccessorT acc(tree); ValueType v; EXPECT_TRUE(!tree.isValueOn(c0)); EXPECT_TRUE(!tree.isValueOn(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c1)); EXPECT_TRUE(!acc.isCached(c0)); EXPECT_TRUE(!acc.isCached(c1)); EXPECT_TRUE(!acc.probeValue(c0,v)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, v); EXPECT_TRUE(!acc.probeValue(c1,v)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, v); EXPECT_EQ(-1, acc.getValueDepth(c0)); EXPECT_EQ(-1, acc.getValueDepth(c1)); EXPECT_TRUE(!acc.isVoxel(c0)); EXPECT_TRUE(!acc.isVoxel(c1)); acc.setValue(c0, value); EXPECT_TRUE(tree.isValueOn(c0)); EXPECT_TRUE(!tree.isValueOn(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c1)); EXPECT_TRUE(acc.probeValue(c0,v)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, v); EXPECT_TRUE(!acc.probeValue(c1,v)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, v); EXPECT_EQ(leafDepth, acc.getValueDepth(c0)); // leaf-level voxel value EXPECT_EQ(-1, acc.getValueDepth(c1)); // background value EXPECT_EQ(leafDepth, acc.getValueDepth(openvdb::Coord(7, 10, 20))); const int depth = leafDepth == 1 ? -1 : leafDepth - 1; EXPECT_EQ(depth, acc.getValueDepth(openvdb::Coord(8, 10, 20))); EXPECT_TRUE( acc.isVoxel(c0)); // leaf-level voxel value EXPECT_TRUE(!acc.isVoxel(c1)); EXPECT_TRUE( acc.isVoxel(openvdb::Coord(7, 10, 20))); EXPECT_TRUE(!acc.isVoxel(openvdb::Coord(8, 10, 20))); ASSERT_DOUBLES_EXACTLY_EQUAL(background, acc.getValue(c1)); EXPECT_TRUE(!acc.isCached(c1)); // uncached background value EXPECT_TRUE(!acc.isValueOn(c1)); // inactive background value ASSERT_DOUBLES_EXACTLY_EQUAL(value, acc.getValue(c0)); EXPECT_TRUE( (acc.numCacheLevels()>0) == acc.isCached(c0)); // active, leaf-level voxel value EXPECT_TRUE(acc.isValueOn(c0)); acc.setValue(c1, value); EXPECT_TRUE(acc.isValueOn(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree.getValue(c1)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, acc.getValue(c1)); EXPECT_TRUE(!acc.isCached(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, acc.getValue(c0)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c0)); EXPECT_EQ(leafDepth, acc.getValueDepth(c0)); EXPECT_EQ(leafDepth, acc.getValueDepth(c1)); EXPECT_TRUE(acc.isVoxel(c0)); EXPECT_TRUE(acc.isVoxel(c1)); tree.setValueOff(c1); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree.getValue(c1)); EXPECT_TRUE(!acc.isCached(c0)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c1)); EXPECT_TRUE( acc.isValueOn(c0)); EXPECT_TRUE(!acc.isValueOn(c1)); acc.setValueOn(c1); EXPECT_TRUE(!acc.isCached(c0)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c1)); EXPECT_TRUE( acc.isValueOn(c0)); EXPECT_TRUE( acc.isValueOn(c1)); acc.modifyValueAndActiveState(c1, Plus(-value)); // subtract value & mark inactive EXPECT_TRUE(!acc.isValueOn(c1)); acc.modifyValue(c1, Plus(-value)); // subtract value again & mark active EXPECT_TRUE(acc.isValueOn(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(-value, tree.getValue(c1)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(-value, acc.getValue(c1)); EXPECT_TRUE(!acc.isCached(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, acc.getValue(c0)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c0)); EXPECT_EQ(leafDepth, acc.getValueDepth(c0)); EXPECT_EQ(leafDepth, acc.getValueDepth(c1)); EXPECT_TRUE(acc.isVoxel(c0)); EXPECT_TRUE(acc.isVoxel(c1)); acc.setValueOnly(c1, 3*value); EXPECT_TRUE(acc.isValueOn(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(3*value, tree.getValue(c1)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(3*value, acc.getValue(c1)); EXPECT_TRUE(!acc.isCached(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, acc.getValue(c0)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c0)); EXPECT_EQ(leafDepth, acc.getValueDepth(c0)); EXPECT_EQ(leafDepth, acc.getValueDepth(c1)); EXPECT_TRUE(acc.isVoxel(c0)); EXPECT_TRUE(acc.isVoxel(c1)); acc.clear(); EXPECT_TRUE(!acc.isCached(c0)); EXPECT_TRUE(!acc.isCached(c1)); } } template<typename AccessorT> void TestValueAccessor::constAccessorTest() { using TreeType = typename std::remove_const<typename AccessorT::TreeType>::type; const int leafDepth = int(TreeType::DEPTH) - 1; // subtract one because getValueDepth() returns 0 for values at the root const ValueType background = 5.0f, value = -9.345f; const openvdb::Coord c0(5, 10, 20), c1(500000, 200000, 300000); ValueType v; TreeType tree(background); AccessorT acc(tree); EXPECT_TRUE(!tree.isValueOn(c0)); EXPECT_TRUE(!tree.isValueOn(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c1)); EXPECT_TRUE(!acc.isCached(c0)); EXPECT_TRUE(!acc.isCached(c1)); EXPECT_TRUE(!acc.probeValue(c0,v)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, v); EXPECT_TRUE(!acc.probeValue(c1,v)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, v); EXPECT_EQ(-1, acc.getValueDepth(c0)); EXPECT_EQ(-1, acc.getValueDepth(c1)); EXPECT_TRUE(!acc.isVoxel(c0)); EXPECT_TRUE(!acc.isVoxel(c1)); tree.setValue(c0, value); EXPECT_TRUE(tree.isValueOn(c0)); EXPECT_TRUE(!tree.isValueOn(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, acc.getValue(c1)); EXPECT_TRUE(!acc.isCached(c1)); EXPECT_TRUE(!acc.isCached(c0)); EXPECT_TRUE(acc.isValueOn(c0)); EXPECT_TRUE(!acc.isValueOn(c1)); EXPECT_TRUE(acc.probeValue(c0,v)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, v); EXPECT_TRUE(!acc.probeValue(c1,v)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, v); EXPECT_EQ(leafDepth, acc.getValueDepth(c0)); EXPECT_EQ(-1, acc.getValueDepth(c1)); EXPECT_TRUE( acc.isVoxel(c0)); EXPECT_TRUE(!acc.isVoxel(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, acc.getValue(c0)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, acc.getValue(c1)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c0)); EXPECT_TRUE(!acc.isCached(c1)); EXPECT_TRUE(acc.isValueOn(c0)); EXPECT_TRUE(!acc.isValueOn(c1)); tree.setValue(c1, value); ASSERT_DOUBLES_EXACTLY_EQUAL(value, acc.getValue(c1)); EXPECT_TRUE(!acc.isCached(c0)); EXPECT_TRUE((acc.numCacheLevels()>0) == acc.isCached(c1)); EXPECT_TRUE(acc.isValueOn(c0)); EXPECT_TRUE(acc.isValueOn(c1)); EXPECT_EQ(leafDepth, acc.getValueDepth(c0)); EXPECT_EQ(leafDepth, acc.getValueDepth(c1)); EXPECT_TRUE(acc.isVoxel(c0)); EXPECT_TRUE(acc.isVoxel(c1)); // The next two lines should not compile, because the acc references a const tree: //acc.setValue(c1, value); //acc.setValueOff(c1); acc.clear(); EXPECT_TRUE(!acc.isCached(c0)); EXPECT_TRUE(!acc.isCached(c1)); } // cache all node levels TEST_F(TestValueAccessor, testTree2Accessor) { accessorTest<ValueAccessor<Tree2Type> >(); } TEST_F(TestValueAccessor, testTree2AccessorRW) { accessorTest<ValueAccessorRW<Tree2Type> >(); } TEST_F(TestValueAccessor, testTree2ConstAccessor) { constAccessorTest<ValueAccessor<const Tree2Type> >(); } TEST_F(TestValueAccessor, testTree2ConstAccessorRW) { constAccessorTest<ValueAccessorRW<const Tree2Type> >(); } // cache all node levels TEST_F(TestValueAccessor, testTree3Accessor) { accessorTest<ValueAccessor<Tree3Type> >(); } TEST_F(TestValueAccessor, testTree3AccessorRW) { accessorTest<ValueAccessorRW<Tree3Type> >(); } TEST_F(TestValueAccessor, testTree3ConstAccessor) { constAccessorTest<ValueAccessor<const Tree3Type> >(); } TEST_F(TestValueAccessor, testTree3ConstAccessorRW) { constAccessorTest<ValueAccessorRW<const Tree3Type> >(); } // cache all node levels TEST_F(TestValueAccessor, testTree4Accessor) { accessorTest<ValueAccessor<Tree4Type> >(); } TEST_F(TestValueAccessor, testTree4AccessorRW) { accessorTest<ValueAccessorRW<Tree4Type> >(); } TEST_F(TestValueAccessor, testTree4ConstAccessor) { constAccessorTest<ValueAccessor<const Tree4Type> >(); } TEST_F(TestValueAccessor, testTree4ConstAccessorRW) { constAccessorTest<ValueAccessorRW<const Tree4Type> >(); } // cache all node levels TEST_F(TestValueAccessor, testTree5Accessor) { accessorTest<ValueAccessor<Tree5Type> >(); } TEST_F(TestValueAccessor, testTree5AccessorRW) { accessorTest<ValueAccessorRW<Tree5Type> >(); } TEST_F(TestValueAccessor, testTree5ConstAccessor) { constAccessorTest<ValueAccessor<const Tree5Type> >(); } TEST_F(TestValueAccessor, testTree5ConstAccessorRW) { constAccessorTest<ValueAccessorRW<const Tree5Type> >(); } TEST_F(TestValueAccessor, testMultithreadedAccessor) { #define MAX_COORD 5000 using AccessorT = openvdb::tree::ValueAccessorRW<Tree4Type>; // Substituting the following alias typically results in assertion failures: //using AccessorT = openvdb::tree::ValueAccessor<Tree4Type>; // Task to perform multiple reads through a shared accessor struct ReadTask: public tbb::task { AccessorT& acc; ReadTask(AccessorT& c): acc(c) {} tbb::task* execute() { for (int i = -MAX_COORD; i < MAX_COORD; ++i) { ASSERT_DOUBLES_EXACTLY_EQUAL(double(i), acc.getValue(openvdb::Coord(i))); } return nullptr; } }; // Task to perform multiple writes through a shared accessor struct WriteTask: public tbb::task { AccessorT& acc; WriteTask(AccessorT& c): acc(c) {} tbb::task* execute() { for (int i = -MAX_COORD; i < MAX_COORD; ++i) { float f = acc.getValue(openvdb::Coord(i)); ASSERT_DOUBLES_EXACTLY_EQUAL(float(i), f); acc.setValue(openvdb::Coord(i), float(i)); ASSERT_DOUBLES_EXACTLY_EQUAL(float(i), acc.getValue(openvdb::Coord(i))); } return nullptr; } }; // Parent task to spawn multiple parallel read and write tasks struct RootTask: public tbb::task { AccessorT& acc; RootTask(AccessorT& c): acc(c) {} tbb::task* execute() { ReadTask* r[3]; WriteTask* w[3]; for (int i = 0; i < 3; ++i) { r[i] = new(allocate_child()) ReadTask(acc); w[i] = new(allocate_child()) WriteTask(acc); } set_ref_count(6 /*children*/ + 1 /*wait*/); for (int i = 0; i < 3; ++i) { spawn(*r[i]); spawn(*w[i]); } wait_for_all(); return nullptr; } }; Tree4Type tree(/*background=*/0.5); AccessorT acc(tree); // Populate the tree. for (int i = -MAX_COORD; i < MAX_COORD; ++i) { acc.setValue(openvdb::Coord(i), float(i)); } // Run multiple read and write tasks in parallel. RootTask& root = *new(tbb::task::allocate_root()) RootTask(acc); tbb::task::spawn_root_and_wait(root); #undef MAX_COORD } TEST_F(TestValueAccessor, testAccessorRegistration) { using openvdb::Index; const float background = 5.0f, value = -9.345f; const openvdb::Coord c0(5, 10, 20); openvdb::FloatTree::Ptr tree(new openvdb::FloatTree(background)); openvdb::tree::ValueAccessor<openvdb::FloatTree> acc(*tree); // Set a single leaf voxel via the accessor and verify that // the cache is populated. acc.setValue(c0, value); EXPECT_EQ(Index(1), tree->leafCount()); EXPECT_EQ(tree->root().getLevel(), tree->nonLeafCount()); EXPECT_TRUE(acc.getNode<openvdb::FloatTree::LeafNodeType>() != nullptr); // Reset the voxel to the background value and verify that no nodes // have been deleted and that the cache is still populated. tree->setValueOff(c0, background); EXPECT_EQ(Index(1), tree->leafCount()); EXPECT_EQ(tree->root().getLevel(), tree->nonLeafCount()); EXPECT_TRUE(acc.getNode<openvdb::FloatTree::LeafNodeType>() != nullptr); // Prune the tree and verify that only the root node remains and that // the cache has been cleared. openvdb::tools::prune(*tree); //tree->prune(); EXPECT_EQ(Index(0), tree->leafCount()); EXPECT_EQ(Index(1), tree->nonLeafCount()); // root node only EXPECT_TRUE(acc.getNode<openvdb::FloatTree::LeafNodeType>() == nullptr); // Set the leaf voxel again and verify that the cache is repopulated. acc.setValue(c0, value); EXPECT_EQ(Index(1), tree->leafCount()); EXPECT_EQ(tree->root().getLevel(), tree->nonLeafCount()); EXPECT_TRUE(acc.getNode<openvdb::FloatTree::LeafNodeType>() != nullptr); // Delete the tree and verify that the cache has been cleared. tree.reset(); EXPECT_TRUE(acc.getTree() == nullptr); EXPECT_TRUE(acc.getNode<openvdb::FloatTree::RootNodeType>() == nullptr); EXPECT_TRUE(acc.getNode<openvdb::FloatTree::LeafNodeType>() == nullptr); } TEST_F(TestValueAccessor, testGetNode) { using LeafT = Tree4Type::LeafNodeType; const ValueType background = 5.0f, value = -9.345f; const openvdb::Coord c0(5, 10, 20); Tree4Type tree(background); tree.setValue(c0, value); { openvdb::tree::ValueAccessor<Tree4Type> acc(tree); // Prime the cache. acc.getValue(c0); // Verify that the cache contains a leaf node. LeafT* node = acc.getNode<LeafT>(); EXPECT_TRUE(node != nullptr); // Erase the leaf node from the cache and verify that it is gone. acc.eraseNode<LeafT>(); node = acc.getNode<LeafT>(); EXPECT_TRUE(node == nullptr); } { // As above, but with a const tree. openvdb::tree::ValueAccessor<const Tree4Type> acc(tree); acc.getValue(c0); const LeafT* node = acc.getNode<const LeafT>(); EXPECT_TRUE(node != nullptr); acc.eraseNode<LeafT>(); node = acc.getNode<const LeafT>(); EXPECT_TRUE(node == nullptr); } }
19,779
C++
37.038461
111
0.645988
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointDelete.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/PointGroup.h> #include <openvdb/points/PointCount.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/PointDelete.h> #include <string> #include <vector> #ifdef _MSC_VER #include <windows.h> #endif using namespace openvdb::points; class TestPointDelete: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestPointDelete //////////////////////////////////////// TEST_F(TestPointDelete, testDeleteFromGroups) { using openvdb::math::Vec3s; using openvdb::tools::PointIndexGrid; using openvdb::Index64; const float voxelSize(1.0); openvdb::math::Transform::Ptr transform(openvdb::math::Transform::createLinearTransform(voxelSize)); const std::vector<Vec3s> positions6Points = { {1, 1, 1}, {1, 2, 1}, {2, 1, 1}, {2, 2, 1}, {100, 100, 100}, {100, 101, 100} }; const PointAttributeVector<Vec3s> pointList6Points(positions6Points); { // delete from a tree with 2 leaves, checking that group membership is updated as // expected PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<PointIndexGrid>(pointList6Points, *transform); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList6Points, *transform); PointDataTree& tree = grid->tree(); // first test will delete 3 groups, with the third one empty. appendGroup(tree, "test1"); appendGroup(tree, "test2"); appendGroup(tree, "test3"); appendGroup(tree, "test4"); EXPECT_EQ(pointCount(tree), Index64(6)); std::vector<short> membership1{1, 0, 0, 0, 0, 1}; setGroup(tree, pointIndexGrid->tree(), membership1, "test1"); std::vector<short> membership2{0, 0, 1, 1, 0, 1}; setGroup(tree, pointIndexGrid->tree(), membership2, "test2"); std::vector<std::string> groupsToDelete{"test1", "test2", "test3"}; deleteFromGroups(tree, groupsToDelete); // 4 points should have been deleted, so only 2 remain EXPECT_EQ(pointCount(tree), Index64(2)); // check that first three groups are deleted but the last is not const PointDataTree::LeafCIter leafIterAfterDeletion = tree.cbeginLeaf(); AttributeSet attributeSetAfterDeletion = leafIterAfterDeletion->attributeSet(); AttributeSet::Descriptor& descriptor = attributeSetAfterDeletion.descriptor(); EXPECT_TRUE(!descriptor.hasGroup("test1")); EXPECT_TRUE(!descriptor.hasGroup("test2")); EXPECT_TRUE(!descriptor.hasGroup("test3")); EXPECT_TRUE(descriptor.hasGroup("test4")); } { // check deletion from a single leaf tree and that attribute values are preserved // correctly after deletion std::vector<Vec3s> positions4Points = { {1, 1, 1}, {1, 2, 1}, {2, 1, 1}, {2, 2, 1}, }; const PointAttributeVector<Vec3s> pointList4Points(positions4Points); PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<PointIndexGrid>(pointList4Points, *transform); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList4Points, *transform); PointDataTree& tree = grid->tree(); appendGroup(tree, "test"); appendAttribute(tree, "testAttribute", TypedAttributeArray<int32_t>::attributeType()); EXPECT_TRUE(tree.beginLeaf()); const PointDataTree::LeafIter leafIter = tree.beginLeaf(); AttributeWriteHandle<int> testAttributeWriteHandle(leafIter->attributeArray("testAttribute")); for(int i = 0; i < 4; i++) { testAttributeWriteHandle.set(i,i+1); } std::vector<short> membership{0, 1, 1, 0}; setGroup(tree, pointIndexGrid->tree(), membership, "test"); deleteFromGroup(tree, "test"); EXPECT_EQ(pointCount(tree), Index64(2)); const PointDataTree::LeafCIter leafIterAfterDeletion = tree.cbeginLeaf(); const AttributeSet attributeSetAfterDeletion = leafIterAfterDeletion->attributeSet(); const AttributeSet::Descriptor& descriptor = attributeSetAfterDeletion.descriptor(); EXPECT_TRUE(descriptor.find("testAttribute") != AttributeSet::INVALID_POS); AttributeHandle<int> testAttributeHandle(*attributeSetAfterDeletion.get("testAttribute")); EXPECT_EQ(1, testAttributeHandle.get(0)); EXPECT_EQ(4, testAttributeHandle.get(1)); } { // test the invert flag using data similar to that used in the first test PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<PointIndexGrid>(pointList6Points, *transform); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList6Points, *transform); PointDataTree& tree = grid->tree(); appendGroup(tree, "test1"); appendGroup(tree, "test2"); appendGroup(tree, "test3"); appendGroup(tree, "test4"); EXPECT_EQ(pointCount(tree), Index64(6)); std::vector<short> membership1{1, 0, 1, 1, 0, 1}; setGroup(tree, pointIndexGrid->tree(), membership1, "test1"); std::vector<short> membership2{0, 0, 1, 1, 0, 1}; setGroup(tree, pointIndexGrid->tree(), membership2, "test2"); std::vector<std::string> groupsToDelete{"test1", "test3"}; deleteFromGroups(tree, groupsToDelete, /*invert=*/ true); const PointDataTree::LeafCIter leafIterAfterDeletion = tree.cbeginLeaf(); const AttributeSet attributeSetAfterDeletion = leafIterAfterDeletion->attributeSet(); const AttributeSet::Descriptor& descriptor = attributeSetAfterDeletion.descriptor(); // no groups should be dropped when invert = true EXPECT_EQ(static_cast<size_t>(descriptor.groupMap().size()), static_cast<size_t>(4)); // 4 points should remain since test1 and test3 have 4 members between then EXPECT_EQ(static_cast<size_t>(pointCount(tree)), static_cast<size_t>(4)); } { // similar to first test, but don't drop groups PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<PointIndexGrid>(pointList6Points, *transform); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList6Points, *transform); PointDataTree& tree = grid->tree(); // first test will delete 3 groups, with the third one empty. appendGroup(tree, "test1"); appendGroup(tree, "test2"); appendGroup(tree, "test3"); appendGroup(tree, "test4"); std::vector<short> membership1{1, 0, 0, 0, 0, 1}; setGroup(tree, pointIndexGrid->tree(), membership1, "test1"); std::vector<short> membership2{0, 0, 1, 1, 0, 1}; setGroup(tree, pointIndexGrid->tree(), membership2, "test2"); std::vector<std::string> groupsToDelete{"test1", "test2", "test3"}; deleteFromGroups(tree, groupsToDelete, /*invert=*/ false, /*drop=*/ false); // 4 points should have been deleted, so only 2 remain EXPECT_EQ(pointCount(tree), Index64(2)); // check that first three groups are deleted but the last is not const PointDataTree::LeafCIter leafIterAfterDeletion = tree.cbeginLeaf(); AttributeSet attributeSetAfterDeletion = leafIterAfterDeletion->attributeSet(); AttributeSet::Descriptor& descriptor = attributeSetAfterDeletion.descriptor(); // all group should still be present EXPECT_TRUE(descriptor.hasGroup("test1")); EXPECT_TRUE(descriptor.hasGroup("test2")); EXPECT_TRUE(descriptor.hasGroup("test3")); EXPECT_TRUE(descriptor.hasGroup("test4")); } }
8,763
C++
35.365145
104
0.608239
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestIndexFilter.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/IndexIterator.h> #include <openvdb/points/IndexFilter.h> #include <openvdb/points/PointAttribute.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/PointGroup.h> #include <openvdb/points/PointCount.h> #include <sstream> #include <iostream> #include <utility> using namespace openvdb; using namespace openvdb::points; class TestIndexFilter: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } void testRandomLeafFilterImpl(); }; // class TestIndexFilter //////////////////////////////////////// struct OriginLeaf { OriginLeaf(const openvdb::Coord& _leafOrigin, const size_t _size = size_t(0)): leafOrigin(_leafOrigin), size(_size) { } openvdb::Coord origin() const { return leafOrigin; } size_t pointCount() const { return size; } const openvdb::Coord leafOrigin; const size_t size; }; struct SimpleIter { SimpleIter() : i(0) { } int operator*() const { return i; } void operator++() { i++; } openvdb::Coord getCoord() const { return coord; } int i; openvdb::Coord coord; }; template <bool LessThan> class ThresholdFilter { public: ThresholdFilter(const int threshold) : mThreshold(threshold) { } bool isPositiveInteger() const { return mThreshold > 0; } bool isMax() const { return mThreshold == std::numeric_limits<int>::max(); } static bool initialized() { return true; } inline index::State state() const { if (LessThan) { if (isMax()) return index::ALL; else if (!isPositiveInteger()) return index::NONE; } else { if (isMax()) return index::NONE; else if (!isPositiveInteger()) return index::ALL; } return index::PARTIAL; } template <typename LeafT> static index::State state(const LeafT&) { return index::PARTIAL; } template <typename LeafT> void reset(const LeafT&) { } template <typename IterT> bool valid(const IterT& iter) const { return LessThan ? *iter < mThreshold : *iter > mThreshold; } private: const int mThreshold; }; // class ThresholdFilter /// @brief Generates the signed distance to a sphere located at @a center /// and with a specified @a radius (both in world coordinates). Only voxels /// in the domain [0,0,0] -> @a dim are considered. Also note that the /// level set is either dense, dense narrow-band or sparse narrow-band. /// /// @note This method is VERY SLOW and should only be used for debugging purposes! /// However it works for any transform and even with open level sets. /// A faster approch for closed narrow band generation is to only set voxels /// sparsely and then use grid::signedFloodFill to define the sign /// of the background values and tiles! This is implemented in openvdb/tools/LevelSetSphere.h template<class GridType> inline void makeSphere(const openvdb::Coord& dim, const openvdb::Vec3f& center, float radius, GridType& grid) { using ValueT = typename GridType::ValueType; const ValueT zero = openvdb::zeroVal<ValueT>(); typename GridType::Accessor acc = grid.getAccessor(); openvdb::Coord xyz; for (xyz[0]=0; xyz[0]<dim[0]; ++xyz[0]) { for (xyz[1]=0; xyz[1]<dim[1]; ++xyz[1]) { for (xyz[2]=0; xyz[2]<dim[2]; ++xyz[2]) { const openvdb::Vec3R p = grid.transform().indexToWorld(xyz); const float dist = float((p-center).length() - radius); ValueT val = ValueT(zero + dist); acc.setValue(xyz, val); } } } } template <typename LeafT> bool multiGroupMatches( const LeafT& leaf, const Index32 size, const std::vector<Name>& include, const std::vector<Name>& exclude, const std::vector<int>& indices) { using IndexGroupIter = IndexIter<ValueVoxelCIter, MultiGroupFilter>; ValueVoxelCIter indexIter(0, size); MultiGroupFilter filter(include, exclude, leaf.attributeSet()); filter.reset(leaf); IndexGroupIter iter(indexIter, filter); for (unsigned i = 0; i < indices.size(); ++i, ++iter) { if (!iter) return false; if (*iter != Index32(indices[i])) return false; } return !iter; } TEST_F(TestIndexFilter, testActiveFilter) { // create a point grid, three points are stored in two leafs PointDataGrid::Ptr points; std::vector<Vec3s> positions{{1, 1, 1}, {1, 2, 1}, {10.1f, 10, 1}}; const double voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); points = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); // check there are two leafs EXPECT_EQ(Index32(2), points->tree().leafCount()); ActiveFilter activeFilter; InactiveFilter inActiveFilter; EXPECT_EQ(index::PARTIAL, activeFilter.state()); EXPECT_EQ(index::PARTIAL, inActiveFilter.state()); { // test default active / inactive values auto leafIter = points->tree().cbeginLeaf(); EXPECT_EQ(index::PARTIAL, activeFilter.state(*leafIter)); EXPECT_EQ(index::PARTIAL, inActiveFilter.state(*leafIter)); auto indexIter = leafIter->beginIndexAll(); activeFilter.reset(*leafIter); inActiveFilter.reset(*leafIter); EXPECT_TRUE(activeFilter.valid(indexIter)); EXPECT_TRUE(!inActiveFilter.valid(indexIter)); ++indexIter; EXPECT_TRUE(activeFilter.valid(indexIter)); EXPECT_TRUE(!inActiveFilter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); ++leafIter; indexIter = leafIter->beginIndexAll(); activeFilter.reset(*leafIter); inActiveFilter.reset(*leafIter); EXPECT_TRUE(activeFilter.valid(indexIter)); EXPECT_TRUE(!inActiveFilter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); } auto firstLeaf = points->tree().beginLeaf(); { // set all voxels to be inactive in the first leaf firstLeaf->getValueMask().set(false); auto leafIter = points->tree().cbeginLeaf(); EXPECT_EQ(index::NONE, activeFilter.state(*leafIter)); EXPECT_EQ(index::ALL, inActiveFilter.state(*leafIter)); auto indexIter = leafIter->beginIndexAll(); activeFilter.reset(*leafIter); inActiveFilter.reset(*leafIter); EXPECT_TRUE(!activeFilter.valid(indexIter)); EXPECT_TRUE(inActiveFilter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!activeFilter.valid(indexIter)); EXPECT_TRUE(inActiveFilter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); ++leafIter; indexIter = leafIter->beginIndexAll(); activeFilter.reset(*leafIter); inActiveFilter.reset(*leafIter); EXPECT_TRUE(activeFilter.valid(indexIter)); EXPECT_TRUE(!inActiveFilter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); } { // set all voxels to be active in the first leaf firstLeaf->getValueMask().set(true); auto leafIter = points->tree().cbeginLeaf(); EXPECT_EQ(index::ALL, activeFilter.state(*leafIter)); EXPECT_EQ(index::NONE, inActiveFilter.state(*leafIter)); auto indexIter = leafIter->beginIndexAll(); activeFilter.reset(*leafIter); inActiveFilter.reset(*leafIter); EXPECT_TRUE(activeFilter.valid(indexIter)); EXPECT_TRUE(!inActiveFilter.valid(indexIter)); ++indexIter; EXPECT_TRUE(activeFilter.valid(indexIter)); EXPECT_TRUE(!inActiveFilter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); ++leafIter; indexIter = leafIter->beginIndexAll(); activeFilter.reset(*leafIter); inActiveFilter.reset(*leafIter); EXPECT_TRUE(activeFilter.valid(indexIter)); EXPECT_TRUE(!inActiveFilter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); } } TEST_F(TestIndexFilter, testMultiGroupFilter) { using LeafNode = PointDataTree::LeafNodeType; using AttributeVec3f = TypedAttributeArray<Vec3f>; PointDataTree tree; LeafNode* leaf = tree.touchLeaf(openvdb::Coord(0, 0, 0)); using Descriptor = AttributeSet::Descriptor; Descriptor::Ptr descriptor = Descriptor::create(AttributeVec3f::attributeType()); const Index size = 5; leaf->initializeAttributes(descriptor, size); appendGroup(tree, "even"); appendGroup(tree, "odd"); appendGroup(tree, "all"); appendGroup(tree, "first"); { // construction, copy construction std::vector<Name> includeGroups; std::vector<Name> excludeGroups; MultiGroupFilter filter(includeGroups, excludeGroups, leaf->attributeSet()); EXPECT_TRUE(!filter.initialized()); MultiGroupFilter filter2 = filter; EXPECT_TRUE(!filter2.initialized()); filter.reset(*leaf); EXPECT_TRUE(filter.initialized()); MultiGroupFilter filter3 = filter; EXPECT_TRUE(filter3.initialized()); } // group population { // even GroupWriteHandle groupHandle = leaf->groupWriteHandle("even"); groupHandle.set(0, true); groupHandle.set(2, true); groupHandle.set(4, true); } { // odd GroupWriteHandle groupHandle = leaf->groupWriteHandle("odd"); groupHandle.set(1, true); groupHandle.set(3, true); } setGroup(tree, "all", true); { // first GroupWriteHandle groupHandle = leaf->groupWriteHandle("first"); groupHandle.set(0, true); } { // test state() std::vector<Name> include; std::vector<Name> exclude; MultiGroupFilter filter(include, exclude, leaf->attributeSet()); EXPECT_EQ(filter.state(), index::ALL); include.push_back("all"); MultiGroupFilter filter2(include, exclude, leaf->attributeSet()); EXPECT_EQ(filter2.state(), index::PARTIAL); } // test multi group iteration { // all (implicit, no include or exclude) std::vector<Name> include; std::vector<Name> exclude; std::vector<int> indices{0, 1, 2, 3, 4}; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // all include std::vector<Name> include{"all"}; std::vector<Name> exclude; std::vector<int> indices{0, 1, 2, 3, 4}; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // all exclude std::vector<Name> include; std::vector<Name> exclude{"all"}; std::vector<int> indices; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // all include and exclude std::vector<Name> include{"all"}; std::vector<Name> exclude{"all"}; std::vector<int> indices; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // even include std::vector<Name> include{"even"}; std::vector<Name> exclude; std::vector<int> indices{0, 2, 4}; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // odd include std::vector<Name> include{"odd"}; std::vector<Name> exclude; std::vector<int> indices{1, 3}; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // odd include and exclude std::vector<Name> include{"odd"}; std::vector<Name> exclude{"odd"}; std::vector<int> indices; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // odd and first include std::vector<Name> include{"odd", "first"}; std::vector<Name> exclude; std::vector<int> indices{0, 1, 3}; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // even include, first exclude std::vector<Name> include{"even"}; std::vector<Name> exclude{"first"}; std::vector<int> indices{2, 4}; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // all include, first and odd exclude std::vector<Name> include{"all"}; std::vector<Name> exclude{"first", "odd"}; std::vector<int> indices{2, 4}; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } { // odd and first include, even exclude std::vector<Name> include{"odd", "first"}; std::vector<Name> exclude{"even"}; std::vector<int> indices{1, 3}; EXPECT_TRUE(multiGroupMatches(*leaf, size, include, exclude, indices)); } } void TestIndexFilter::testRandomLeafFilterImpl() { { // generateRandomSubset std::vector<int> values = index_filter_internal::generateRandomSubset<std::mt19937, int>( /*seed*/unsigned(0), 1, 20); EXPECT_EQ(values.size(), size_t(1)); // different seed std::vector<int> values2 = index_filter_internal::generateRandomSubset<std::mt19937, int>( /*seed*/unsigned(1), 1, 20); EXPECT_EQ(values2.size(), size_t(1)); EXPECT_TRUE(values[0] != values2[0]); // different integer type std::vector<long> values3 = index_filter_internal::generateRandomSubset<std::mt19937, long>( /*seed*/unsigned(0), 1, 20); EXPECT_EQ(values3.size(), size_t(1)); EXPECT_TRUE(values[0] == values3[0]); // different random number generator values = index_filter_internal::generateRandomSubset<std::mt19937_64, int>( /*seed*/unsigned(1), 1, 20); EXPECT_EQ(values.size(), size_t(1)); EXPECT_TRUE(values[0] != values2[0]); // no values values = index_filter_internal::generateRandomSubset<std::mt19937, int>( /*seed*/unsigned(0), 0, 20); EXPECT_EQ(values.size(), size_t(0)); // all values values = index_filter_internal::generateRandomSubset<std::mt19937, int>( /*seed*/unsigned(0), 1000, 1000); EXPECT_EQ(values.size(), size_t(1000)); // ensure all numbers are represented std::sort(values.begin(), values.end()); for (int i = 0; i < 1000; i++) { EXPECT_EQ(values[i], i); } } { // RandomLeafFilter using RandFilter = RandomLeafFilter<PointDataTree, std::mt19937>; PointDataTree tree; RandFilter filter(tree, 0); EXPECT_TRUE(filter.state() == index::PARTIAL); filter.mLeafMap[Coord(0, 0, 0)] = std::make_pair(0, 10); filter.mLeafMap[Coord(0, 0, 8)] = std::make_pair(1, 1); filter.mLeafMap[Coord(0, 8, 0)] = std::make_pair(2, 50); { // construction, copy construction EXPECT_TRUE(filter.initialized()); RandFilter filter2 = filter; EXPECT_TRUE(filter2.initialized()); filter.reset(OriginLeaf(Coord(0, 0, 0), 10)); EXPECT_TRUE(filter.initialized()); RandFilter filter3 = filter; EXPECT_TRUE(filter3.initialized()); } { // all 10 values filter.reset(OriginLeaf(Coord(0, 0, 0), 10)); std::vector<int> values; for (SimpleIter iter; *iter < 100; ++iter) { if (filter.valid(iter)) values.push_back(*iter); } EXPECT_EQ(values.size(), size_t(10)); for (int i = 0; i < 10; i++) { EXPECT_EQ(values[i], i); } } { // 50 of 100 filter.reset(OriginLeaf(Coord(0, 8, 0), 100)); std::vector<int> values; for (SimpleIter iter; *iter < 100; ++iter) { if (filter.valid(iter)) values.push_back(*iter); } EXPECT_EQ(values.size(), size_t(50)); // ensure no duplicates std::sort(values.begin(), values.end()); auto it = std::adjacent_find(values.begin(), values.end()); EXPECT_TRUE(it == values.end()); } } } TEST_F(TestIndexFilter, testRandomLeafFilter) { testRandomLeafFilterImpl(); } inline void setId(PointDataTree& tree, const size_t index, const std::vector<int>& ids) { int offset = 0; for (auto leafIter = tree.beginLeaf(); leafIter; ++leafIter) { auto id = AttributeWriteHandle<int>::create(leafIter->attributeArray(index)); for (auto iter = leafIter->beginIndexAll(); iter; ++iter) { if (offset >= int(ids.size())) throw std::runtime_error("Out of range"); id->set(*iter, ids[offset++]); } } } TEST_F(TestIndexFilter, testAttributeHashFilter) { std::vector<Vec3s> positions{{1, 1, 1}, {2, 2, 2}, {11, 11, 11}, {12, 12, 12}}; const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree = grid->tree(); // four points, two leafs EXPECT_EQ(tree.leafCount(), Index32(2)); appendAttribute<int>(tree, "id"); const size_t index = tree.cbeginLeaf()->attributeSet().descriptor().find("id"); // ascending integers, block one std::vector<int> ids{1, 2, 3, 4}; setId(tree, index, ids); using HashFilter = AttributeHashFilter<std::mt19937, int>; { // construction, copy construction HashFilter filter(index, 0.0f); EXPECT_TRUE(filter.state() == index::PARTIAL); EXPECT_TRUE(!filter.initialized()); HashFilter filter2 = filter; EXPECT_TRUE(!filter2.initialized()); filter.reset(*tree.cbeginLeaf()); EXPECT_TRUE(filter.initialized()); HashFilter filter3 = filter; EXPECT_TRUE(filter3.initialized()); } { // zero percent HashFilter filter(index, 0.0f); auto leafIter = tree.cbeginLeaf(); auto indexIter = leafIter->beginIndexAll(); filter.reset(*leafIter); EXPECT_TRUE(!filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); ++leafIter; indexIter = leafIter->beginIndexAll(); filter.reset(*leafIter); EXPECT_TRUE(!filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); } { // one hundred percent HashFilter filter(index, 100.0f); auto leafIter = tree.cbeginLeaf(); auto indexIter = leafIter->beginIndexAll(); filter.reset(*leafIter); EXPECT_TRUE(filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); ++leafIter; indexIter = leafIter->beginIndexAll(); filter.reset(*leafIter); EXPECT_TRUE(filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); } { // fifty percent HashFilter filter(index, 50.0f); auto leafIter = tree.cbeginLeaf(); auto indexIter = leafIter->beginIndexAll(); filter.reset(*leafIter); EXPECT_TRUE(!filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); ++leafIter; indexIter = leafIter->beginIndexAll(); filter.reset(*leafIter); EXPECT_TRUE(filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); } { // fifty percent, new seed HashFilter filter(index, 50.0f, /*seed=*/100); auto leafIter = tree.cbeginLeaf(); auto indexIter = leafIter->beginIndexAll(); filter.reset(*leafIter); EXPECT_TRUE(!filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); ++leafIter; indexIter = leafIter->beginIndexAll(); filter.reset(*leafIter); EXPECT_TRUE(filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(filter.valid(indexIter)); ++indexIter; EXPECT_TRUE(!indexIter); } } TEST_F(TestIndexFilter, testLevelSetFilter) { // create a point grid PointDataGrid::Ptr points; { std::vector<Vec3s> positions{{1, 1, 1}, {1, 2, 1}, {10.1f, 10, 1}}; const double voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); points = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); } // create a sphere levelset FloatGrid::Ptr sphere; { double voxelSize = 0.5; sphere = FloatGrid::create(/*backgroundValue=*/5.0); sphere->setTransform(math::Transform::createLinearTransform(voxelSize)); const openvdb::Coord dim(10, 10, 10); const openvdb::Vec3f center(0.0f, 0.0f, 0.0f); const float radius = 2; makeSphere<FloatGrid>(dim, center, radius, *sphere); } using LSFilter = LevelSetFilter<FloatGrid>; { // construction, copy construction LSFilter filter(*sphere, points->transform(), -4.0f, 4.0f); EXPECT_TRUE(filter.state() == index::PARTIAL); EXPECT_TRUE(!filter.initialized()); LSFilter filter2 = filter; EXPECT_TRUE(!filter2.initialized()); filter.reset(* points->tree().cbeginLeaf()); EXPECT_TRUE(filter.initialized()); LSFilter filter3 = filter; EXPECT_TRUE(filter3.initialized()); } { // capture both points near origin LSFilter filter(*sphere, points->transform(), -4.0f, 4.0f); auto leafIter = points->tree().cbeginLeaf(); auto iter = leafIter->beginIndexOn(); filter.reset(*leafIter); EXPECT_TRUE(filter.valid(iter)); ++iter; EXPECT_TRUE(filter.valid(iter)); ++iter; EXPECT_TRUE(!iter); ++leafIter; iter = leafIter->beginIndexOn(); filter.reset(*leafIter); EXPECT_TRUE(iter); EXPECT_TRUE(!filter.valid(iter)); ++iter; EXPECT_TRUE(!iter); } { // capture just the inner-most point LSFilter filter(*sphere, points->transform(), -0.3f, -0.25f); auto leafIter = points->tree().cbeginLeaf(); auto iter = leafIter->beginIndexOn(); filter.reset(*leafIter); EXPECT_TRUE(filter.valid(iter)); ++iter; EXPECT_TRUE(!filter.valid(iter)); ++iter; EXPECT_TRUE(!iter); ++leafIter; iter = leafIter->beginIndexOn(); filter.reset(*leafIter); EXPECT_TRUE(iter); EXPECT_TRUE(!filter.valid(iter)); ++iter; EXPECT_TRUE(!iter); } { // capture everything but the second point (min > max) LSFilter filter(*sphere, points->transform(), -0.25f, -0.3f); auto leafIter = points->tree().cbeginLeaf(); auto iter = leafIter->beginIndexOn(); filter.reset(*leafIter); EXPECT_TRUE(!filter.valid(iter)); ++iter; EXPECT_TRUE(filter.valid(iter)); ++iter; EXPECT_TRUE(!iter); ++leafIter; iter = leafIter->beginIndexOn(); filter.reset(*leafIter); EXPECT_TRUE(iter); EXPECT_TRUE(filter.valid(iter)); ++iter; EXPECT_TRUE(!iter); } { std::vector<Vec3s> positions{{1, 1, 1}, {1, 2, 1}, {10.1f, 10, 1}}; const double voxelSize(0.25); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); points = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); } { double voxelSize = 1.0; sphere = FloatGrid::create(/*backgroundValue=*/5.0); sphere->setTransform(math::Transform::createLinearTransform(voxelSize)); const openvdb::Coord dim(40, 40, 40); const openvdb::Vec3f center(10.0f, 10.0f, 0.1f); const float radius = 0.2f; makeSphere<FloatGrid>(dim, center, radius, *sphere); } { // capture only the last point using a different transform and a new sphere LSFilter filter(*sphere, points->transform(), 0.5f, 1.0f); auto leafIter = points->tree().cbeginLeaf(); auto iter = leafIter->beginIndexOn(); filter.reset(*leafIter); EXPECT_TRUE(!filter.valid(iter)); ++iter; EXPECT_TRUE(!iter); ++leafIter; iter = leafIter->beginIndexOn(); filter.reset(*leafIter); EXPECT_TRUE(!filter.valid(iter)); ++iter; EXPECT_TRUE(!iter); ++leafIter; iter = leafIter->beginIndexOn(); filter.reset(*leafIter); EXPECT_TRUE(iter); EXPECT_TRUE(filter.valid(iter)); ++iter; EXPECT_TRUE(!iter); } } TEST_F(TestIndexFilter, testBBoxFilter) { std::vector<Vec3s> positions{{1, 1, 1}, {1, 2, 1}, {10.1f, 10, 1}}; const float voxelSize(0.5); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree = grid->tree(); // check one leaf per point EXPECT_EQ(tree.leafCount(), Index32(2)); // build some bounding box filters to test BBoxFilter filter1(*transform, BBoxd({0.5, 0.5, 0.5}, {1.5, 1.5, 1.5})); BBoxFilter filter2(*transform, BBoxd({0.5, 0.5, 0.5}, {1.5, 2.01, 1.5})); BBoxFilter filter3(*transform, BBoxd({0.5, 0.5, 0.5}, {11, 11, 1.5})); BBoxFilter filter4(*transform, BBoxd({-10, 0, 0}, {11, 1.2, 1.2})); { // construction, copy construction EXPECT_TRUE(!filter1.initialized()); BBoxFilter filter5 = filter1; EXPECT_TRUE(!filter5.initialized()); filter1.reset(*tree.cbeginLeaf()); EXPECT_TRUE(filter1.initialized()); BBoxFilter filter6 = filter1; EXPECT_TRUE(filter6.initialized()); } // leaf 1 auto leafIter = tree.cbeginLeaf(); { auto iter(leafIter->beginIndexOn()); // point 1 filter1.reset(*leafIter); EXPECT_TRUE(filter1.valid(iter)); filter2.reset(*leafIter); EXPECT_TRUE(filter2.valid(iter)); filter3.reset(*leafIter); EXPECT_TRUE(filter3.valid(iter)); filter4.reset(*leafIter); EXPECT_TRUE(filter4.valid(iter)); ++iter; // point 2 filter1.reset(*leafIter); EXPECT_TRUE(!filter1.valid(iter)); filter2.reset(*leafIter); EXPECT_TRUE(filter2.valid(iter)); filter3.reset(*leafIter); EXPECT_TRUE(filter3.valid(iter)); filter4.reset(*leafIter); EXPECT_TRUE(!filter4.valid(iter)); ++iter; EXPECT_TRUE(!iter); } ++leafIter; // leaf 2 { auto iter(leafIter->beginIndexOn()); // point 3 filter1.reset(*leafIter); EXPECT_TRUE(!filter1.valid(iter)); filter2.reset(*leafIter); EXPECT_TRUE(!filter2.valid(iter)); filter3.reset(*leafIter); EXPECT_TRUE(filter3.valid(iter)); filter4.reset(*leafIter); EXPECT_TRUE(!filter4.valid(iter)); ++iter; EXPECT_TRUE(!iter); } } struct NeedsInitializeFilter { inline bool initialized() const { return mInitialized; } static index::State state() { return index::PARTIAL; } template <typename LeafT> inline index::State state(const LeafT&) { return index::PARTIAL; } template <typename LeafT> void reset(const LeafT&) { mInitialized = true; } private: bool mInitialized = false; }; TEST_F(TestIndexFilter, testBinaryFilter) { const int intMax = std::numeric_limits<int>::max(); { // construction, copy construction using InitializeBinaryFilter = BinaryFilter<NeedsInitializeFilter, NeedsInitializeFilter, /*And=*/true>; NeedsInitializeFilter needs1; NeedsInitializeFilter needs2; InitializeBinaryFilter filter(needs1, needs2); EXPECT_TRUE(filter.state() == index::PARTIAL); EXPECT_TRUE(!filter.initialized()); InitializeBinaryFilter filter2 = filter; EXPECT_TRUE(!filter2.initialized()); filter.reset(OriginLeaf(Coord(0, 0, 0))); EXPECT_TRUE(filter.initialized()); InitializeBinaryFilter filter3 = filter; EXPECT_TRUE(filter3.initialized()); } using LessThanFilter = ThresholdFilter<true>; using GreaterThanFilter = ThresholdFilter<false>; { // less than LessThanFilter zeroFilter(0); // all invalid EXPECT_TRUE(zeroFilter.state() == index::NONE); LessThanFilter maxFilter(intMax); // all valid EXPECT_TRUE(maxFilter.state() == index::ALL); LessThanFilter filter(5); filter.reset(OriginLeaf(Coord(0, 0, 0))); std::vector<int> values; for (SimpleIter iter; *iter < 100; ++iter) { if (filter.valid(iter)) values.push_back(*iter); } EXPECT_EQ(values.size(), size_t(5)); for (int i = 0; i < 5; i++) { EXPECT_EQ(values[i], i); } } { // greater than GreaterThanFilter zeroFilter(0); // all valid EXPECT_TRUE(zeroFilter.state() == index::ALL); GreaterThanFilter maxFilter(intMax); // all invalid EXPECT_TRUE(maxFilter.state() == index::NONE); GreaterThanFilter filter(94); filter.reset(OriginLeaf(Coord(0, 0, 0))); std::vector<int> values; for (SimpleIter iter; *iter < 100; ++iter) { if (filter.valid(iter)) values.push_back(*iter); } EXPECT_EQ(values.size(), size_t(5)); int offset = 0; for (int i = 95; i < 100; i++) { EXPECT_EQ(values[offset++], i); } } { // binary and using RangeFilter = BinaryFilter<LessThanFilter, GreaterThanFilter, /*And=*/true>; RangeFilter zeroFilter(LessThanFilter(0), GreaterThanFilter(10)); // all invalid EXPECT_TRUE(zeroFilter.state() == index::NONE); RangeFilter maxFilter(LessThanFilter(intMax), GreaterThanFilter(0)); // all valid EXPECT_TRUE(maxFilter.state() == index::ALL); RangeFilter filter(LessThanFilter(55), GreaterThanFilter(45)); EXPECT_TRUE(filter.state() == index::PARTIAL); filter.reset(OriginLeaf(Coord(0, 0, 0))); std::vector<int> values; for (SimpleIter iter; *iter < 100; ++iter) { if (filter.valid(iter)) values.push_back(*iter); } EXPECT_EQ(values.size(), size_t(9)); int offset = 0; for (int i = 46; i < 55; i++) { EXPECT_EQ(values[offset++], i); } } { // binary or using HeadTailFilter = BinaryFilter<LessThanFilter, GreaterThanFilter, /*And=*/false>; HeadTailFilter zeroFilter(LessThanFilter(0), GreaterThanFilter(10)); // some valid EXPECT_TRUE(zeroFilter.state() == index::PARTIAL); HeadTailFilter maxFilter(LessThanFilter(intMax), GreaterThanFilter(0)); // all valid EXPECT_TRUE(maxFilter.state() == index::ALL); HeadTailFilter filter(LessThanFilter(5), GreaterThanFilter(95)); filter.reset(OriginLeaf(Coord(0, 0, 0))); std::vector<int> values; for (SimpleIter iter; *iter < 100; ++iter) { if (filter.valid(iter)) values.push_back(*iter); } EXPECT_EQ(values.size(), size_t(9)); int offset = 0; for (int i = 0; i < 5; i++) { EXPECT_EQ(values[offset++], i); } for (int i = 96; i < 100; i++) { EXPECT_EQ(values[offset++], i); } } }
32,311
C++
29.425612
112
0.600291
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointsToMask.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/openvdb.h> #include <openvdb/math/Math.h> // for math::Random01 #include <openvdb/tools/PointsToMask.h> #include <openvdb/util/CpuTimer.h> #include "gtest/gtest.h" #include <vector> #include <algorithm> #include <cmath> #include "util.h" // for genPoints struct TestPointsToMask: public ::testing::Test { }; //////////////////////////////////////// namespace { class PointList { public: PointList(const std::vector<openvdb::Vec3R>& points) : mPoints(&points) {} size_t size() const { return mPoints->size(); } void getPos(size_t n, openvdb::Vec3R& xyz) const { xyz = (*mPoints)[n]; } protected: std::vector<openvdb::Vec3R> const * const mPoints; }; // PointList } // namespace //////////////////////////////////////// TEST_F(TestPointsToMask, testPointsToMask) { {// BoolGrid // generate one point std::vector<openvdb::Vec3R> points; points.push_back( openvdb::Vec3R(-19.999, 4.50001, 6.71) ); //points.push_back( openvdb::Vec3R( 20,-4.5,-5.2) ); PointList pointList(points); // construct an empty mask grid openvdb::BoolGrid grid( false ); const float voxelSize = 0.1f; grid.setTransform( openvdb::math::Transform::createLinearTransform(voxelSize) ); EXPECT_TRUE( grid.empty() ); // generate mask from points openvdb::tools::PointsToMask<openvdb::BoolGrid> mask( grid ); mask.addPoints( pointList ); EXPECT_TRUE(!grid.empty() ); EXPECT_EQ( 1, int(grid.activeVoxelCount()) ); openvdb::BoolGrid::ValueOnCIter iter = grid.cbeginValueOn(); //std::cerr << "Coord = " << iter.getCoord() << std::endl; const openvdb::Coord p(-200, 45, 67); EXPECT_TRUE( iter.getCoord() == p ); EXPECT_TRUE(grid.tree().isValueOn( p ) ); } {// MaskGrid // generate one point std::vector<openvdb::Vec3R> points; points.push_back( openvdb::Vec3R(-19.999, 4.50001, 6.71) ); //points.push_back( openvdb::Vec3R( 20,-4.5,-5.2) ); PointList pointList(points); // construct an empty mask grid openvdb::MaskGrid grid( false ); const float voxelSize = 0.1f; grid.setTransform( openvdb::math::Transform::createLinearTransform(voxelSize) ); EXPECT_TRUE( grid.empty() ); // generate mask from points openvdb::tools::PointsToMask<> mask( grid ); mask.addPoints( pointList ); EXPECT_TRUE(!grid.empty() ); EXPECT_EQ( 1, int(grid.activeVoxelCount()) ); openvdb::TopologyGrid::ValueOnCIter iter = grid.cbeginValueOn(); //std::cerr << "Coord = " << iter.getCoord() << std::endl; const openvdb::Coord p(-200, 45, 67); EXPECT_TRUE( iter.getCoord() == p ); EXPECT_TRUE(grid.tree().isValueOn( p ) ); } // generate shared transformation openvdb::Index64 voxelCount = 0; const float voxelSize = 0.001f; const openvdb::math::Transform::Ptr xform = openvdb::math::Transform::createLinearTransform(voxelSize); // generate lots of points std::vector<openvdb::Vec3R> points; unittest_util::genPoints(15000000, points); PointList pointList(points); //openvdb::util::CpuTimer timer; {// serial BoolGrid // construct an empty mask grid openvdb::BoolGrid grid( false ); grid.setTransform( xform ); EXPECT_TRUE( grid.empty() ); // generate mask from points openvdb::tools::PointsToMask<openvdb::BoolGrid> mask( grid ); //timer.start("\nSerial BoolGrid"); mask.addPoints( pointList, 0 ); //timer.stop(); EXPECT_TRUE(!grid.empty() ); //grid.print(std::cerr, 3); voxelCount = grid.activeVoxelCount(); } {// parallel BoolGrid // construct an empty mask grid openvdb::BoolGrid grid( false ); grid.setTransform( xform ); EXPECT_TRUE( grid.empty() ); // generate mask from points openvdb::tools::PointsToMask<openvdb::BoolGrid> mask( grid ); //timer.start("\nParallel BoolGrid"); mask.addPoints( pointList ); //timer.stop(); EXPECT_TRUE(!grid.empty() ); //grid.print(std::cerr, 3); EXPECT_EQ( voxelCount, grid.activeVoxelCount() ); } {// parallel MaskGrid // construct an empty mask grid openvdb::MaskGrid grid( false ); grid.setTransform( xform ); EXPECT_TRUE( grid.empty() ); // generate mask from points openvdb::tools::PointsToMask<> mask( grid ); //timer.start("\nParallel MaskGrid"); mask.addPoints( pointList ); //timer.stop(); EXPECT_TRUE(!grid.empty() ); //grid.print(std::cerr, 3); EXPECT_EQ( voxelCount, grid.activeVoxelCount() ); } {// parallel create TopologyGrid //timer.start("\nParallel Create MaskGrid"); openvdb::MaskGrid::Ptr grid = openvdb::tools::createPointMask(pointList, *xform); //timer.stop(); EXPECT_TRUE(!grid->empty() ); //grid->print(std::cerr, 3); EXPECT_EQ( voxelCount, grid->activeVoxelCount() ); } }
5,265
C++
30.722891
89
0.596581
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLeafManager.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Types.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/util/CpuTimer.h> #include "util.h" // for unittest_util::makeSphere() #include "gtest/gtest.h" class TestLeafManager: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestLeafManager, testBasics) { using openvdb::CoordBBox; using openvdb::Coord; using openvdb::Vec3f; using openvdb::FloatGrid; using openvdb::FloatTree; const Vec3f center(0.35f, 0.35f, 0.35f); const float radius = 0.15f; const int dim = 128, half_width = 5; const float voxel_size = 1.0f/dim; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/half_width*voxel_size); FloatTree& tree = grid->tree(); grid->setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/voxel_size)); unittest_util::makeSphere<FloatGrid>( Coord(dim), center, radius, *grid, unittest_util::SPHERE_SPARSE_NARROW_BAND); const size_t leafCount = tree.leafCount(); //grid->print(std::cout, 3); {// test with no aux buffers openvdb::tree::LeafManager<FloatTree> r(tree); EXPECT_EQ(leafCount, r.leafCount()); EXPECT_EQ(size_t(0), r.auxBufferCount()); EXPECT_EQ(size_t(0), r.auxBuffersPerLeaf()); size_t n = 0; for (FloatTree::LeafCIter iter=tree.cbeginLeaf(); iter; ++iter, ++n) { EXPECT_TRUE(r.leaf(n) == *iter); EXPECT_TRUE(r.getBuffer(n,0) == iter->buffer()); } EXPECT_EQ(r.leafCount(), n); EXPECT_TRUE(!r.swapBuffer(0,0)); r.rebuildAuxBuffers(2); EXPECT_EQ(leafCount, r.leafCount()); EXPECT_EQ(size_t(2), r.auxBuffersPerLeaf()); EXPECT_EQ(size_t(2*leafCount),r.auxBufferCount()); for (n=0; n<leafCount; ++n) { EXPECT_TRUE(r.getBuffer(n,0) == r.getBuffer(n,1)); EXPECT_TRUE(r.getBuffer(n,1) == r.getBuffer(n,2)); EXPECT_TRUE(r.getBuffer(n,0) == r.getBuffer(n,2)); } } {// test with 2 aux buffers openvdb::tree::LeafManager<FloatTree> r(tree, 2); EXPECT_EQ(leafCount, r.leafCount()); EXPECT_EQ(size_t(2), r.auxBuffersPerLeaf()); EXPECT_EQ(size_t(2*leafCount),r.auxBufferCount()); size_t n = 0; for (FloatTree::LeafCIter iter=tree.cbeginLeaf(); iter; ++iter, ++n) { EXPECT_TRUE(r.leaf(n) == *iter); EXPECT_TRUE(r.getBuffer(n,0) == iter->buffer()); EXPECT_TRUE(r.getBuffer(n,0) == r.getBuffer(n,1)); EXPECT_TRUE(r.getBuffer(n,1) == r.getBuffer(n,2)); EXPECT_TRUE(r.getBuffer(n,0) == r.getBuffer(n,2)); } EXPECT_EQ(r.leafCount(), n); for (n=0; n<leafCount; ++n) r.leaf(n).buffer().setValue(4,2.4f); for (n=0; n<leafCount; ++n) { EXPECT_TRUE(r.getBuffer(n,0) != r.getBuffer(n,1)); EXPECT_TRUE(r.getBuffer(n,1) == r.getBuffer(n,2)); EXPECT_TRUE(r.getBuffer(n,0) != r.getBuffer(n,2)); } r.syncAllBuffers(); for (n=0; n<leafCount; ++n) { EXPECT_TRUE(r.getBuffer(n,0) == r.getBuffer(n,1)); EXPECT_TRUE(r.getBuffer(n,1) == r.getBuffer(n,2)); EXPECT_TRUE(r.getBuffer(n,0) == r.getBuffer(n,2)); } for (n=0; n<leafCount; ++n) r.getBuffer(n,1).setValue(4,5.4f); for (n=0; n<leafCount; ++n) { EXPECT_TRUE(r.getBuffer(n,0) != r.getBuffer(n,1)); EXPECT_TRUE(r.getBuffer(n,1) != r.getBuffer(n,2)); EXPECT_TRUE(r.getBuffer(n,0) == r.getBuffer(n,2)); } EXPECT_TRUE(r.swapLeafBuffer(1)); for (n=0; n<leafCount; ++n) { EXPECT_TRUE(r.getBuffer(n,0) != r.getBuffer(n,1)); EXPECT_TRUE(r.getBuffer(n,1) == r.getBuffer(n,2)); EXPECT_TRUE(r.getBuffer(n,0) != r.getBuffer(n,2)); } r.syncAuxBuffer(1); for (n=0; n<leafCount; ++n) { EXPECT_TRUE(r.getBuffer(n,0) == r.getBuffer(n,1)); EXPECT_TRUE(r.getBuffer(n,1) != r.getBuffer(n,2)); EXPECT_TRUE(r.getBuffer(n,0) != r.getBuffer(n,2)); } r.syncAuxBuffer(2); for (n=0; n<leafCount; ++n) { EXPECT_TRUE(r.getBuffer(n,0) == r.getBuffer(n,1)); EXPECT_TRUE(r.getBuffer(n,1) == r.getBuffer(n,2)); } } {// test with const tree (buffers are not swappable) openvdb::tree::LeafManager<const FloatTree> r(tree); for (size_t numAuxBuffers = 0; numAuxBuffers <= 2; ++numAuxBuffers += 2) { r.rebuildAuxBuffers(numAuxBuffers); EXPECT_EQ(leafCount, r.leafCount()); EXPECT_EQ(int(numAuxBuffers * leafCount), int(r.auxBufferCount())); EXPECT_EQ(numAuxBuffers, r.auxBuffersPerLeaf()); size_t n = 0; for (FloatTree::LeafCIter iter = tree.cbeginLeaf(); iter; ++iter, ++n) { EXPECT_TRUE(r.leaf(n) == *iter); // Verify that each aux buffer was initialized with a copy of the leaf buffer. for (size_t bufIdx = 0; bufIdx < numAuxBuffers; ++bufIdx) { EXPECT_TRUE(r.getBuffer(n, bufIdx) == iter->buffer()); } } EXPECT_EQ(r.leafCount(), n); for (size_t i = 0; i < numAuxBuffers; ++i) { for (size_t j = 0; j < numAuxBuffers; ++j) { // Verify that swapping buffers with themselves and swapping // leaf buffers with aux buffers have no effect. const bool canSwap = (i != j && i != 0 && j != 0); EXPECT_EQ(canSwap, r.swapBuffer(i, j)); } } } } } TEST_F(TestLeafManager, testActiveLeafVoxelCount) { using namespace openvdb; for (const Int32 dim: { 87, 1023, 1024, 2023 }) { const CoordBBox denseBBox{Coord{0}, Coord{dim - 1}}; const auto size = denseBBox.volume(); // Create a large dense tree for testing but use a MaskTree to // minimize the memory overhead MaskTree tree{false}; tree.denseFill(denseBBox, true, true); // Add some tiles, which should not contribute to the leaf voxel count. tree.addTile(/*level=*/2, Coord{10000}, true, true); tree.addTile(/*level=*/1, Coord{-10000}, true, true); tree.addTile(/*level=*/1, Coord{20000}, false, false); tree::LeafManager<MaskTree> mgr(tree); // On a dual CPU Intel(R) Xeon(R) E5-2697 v3 @ 2.60GHz // the speedup of LeafManager::activeLeafVoxelCount over // Tree::activeLeafVoxelCount is ~15x (assuming a LeafManager already exists) //openvdb::util::CpuTimer t("\nTree::activeVoxelCount"); const auto treeActiveVoxels = tree.activeVoxelCount(); //t.restart("\nTree::activeLeafVoxelCount"); const auto treeActiveLeafVoxels = tree.activeLeafVoxelCount(); //t.restart("\nLeafManager::activeLeafVoxelCount"); const auto mgrActiveLeafVoxels = mgr.activeLeafVoxelCount();//multi-threaded //t.stop(); //std::cerr << "Old1 = " << treeActiveVoxels << " old2 = " << treeActiveLeafVoxels // << " New = " << mgrActiveLeafVoxels << std::endl; EXPECT_TRUE(size < treeActiveVoxels); EXPECT_EQ(size, treeActiveLeafVoxels); EXPECT_EQ(size, mgrActiveLeafVoxels); } } namespace { struct ForeachOp { ForeachOp(float v) : mV(v) {} template <typename T> void operator()(T &leaf, size_t) const { for (typename T::ValueOnIter iter = leaf.beginValueOn(); iter; ++iter) { if ( *iter > mV) iter.setValue( 2.0f ); } } const float mV; };// ForeachOp struct ReduceOp { ReduceOp(float v) : mV(v), mN(0) {} ReduceOp(const ReduceOp &other) : mV(other.mV), mN(other.mN) {} ReduceOp(const ReduceOp &other, tbb::split) : mV(other.mV), mN(0) {} template <typename T> void operator()(T &leaf, size_t) { for (typename T::ValueOnIter iter = leaf.beginValueOn(); iter; ++iter) { if ( *iter > mV) ++mN; } } void join(const ReduceOp &other) {mN += other.mN;} const float mV; openvdb::Index mN; };// ReduceOp }//unnamed namespace TEST_F(TestLeafManager, testForeach) { using namespace openvdb; FloatTree tree( 0.0f ); const int dim = int(FloatTree::LeafNodeType::dim()); const CoordBBox bbox1(Coord(0),Coord(dim-1)); const CoordBBox bbox2(Coord(dim),Coord(2*dim-1)); tree.fill( bbox1, -1.0f); tree.fill( bbox2, 1.0f); tree.voxelizeActiveTiles(); for (CoordBBox::Iterator<true> iter(bbox1); iter; ++iter) { EXPECT_EQ( -1.0f, tree.getValue(*iter)); } for (CoordBBox::Iterator<true> iter(bbox2); iter; ++iter) { EXPECT_EQ( 1.0f, tree.getValue(*iter)); } tree::LeafManager<FloatTree> r(tree); EXPECT_EQ(size_t(2), r.leafCount()); EXPECT_EQ(size_t(0), r.auxBufferCount()); EXPECT_EQ(size_t(0), r.auxBuffersPerLeaf()); ForeachOp op(0.0f); r.foreach(op); EXPECT_EQ(size_t(2), r.leafCount()); EXPECT_EQ(size_t(0), r.auxBufferCount()); EXPECT_EQ(size_t(0), r.auxBuffersPerLeaf()); for (CoordBBox::Iterator<true> iter(bbox1); iter; ++iter) { EXPECT_EQ( -1.0f, tree.getValue(*iter)); } for (CoordBBox::Iterator<true> iter(bbox2); iter; ++iter) { EXPECT_EQ( 2.0f, tree.getValue(*iter)); } } TEST_F(TestLeafManager, testReduce) { using namespace openvdb; FloatTree tree( 0.0f ); const int dim = int(FloatTree::LeafNodeType::dim()); const CoordBBox bbox1(Coord(0),Coord(dim-1)); const CoordBBox bbox2(Coord(dim),Coord(2*dim-1)); tree.fill( bbox1, -1.0f); tree.fill( bbox2, 1.0f); tree.voxelizeActiveTiles(); for (CoordBBox::Iterator<true> iter(bbox1); iter; ++iter) { EXPECT_EQ( -1.0f, tree.getValue(*iter)); } for (CoordBBox::Iterator<true> iter(bbox2); iter; ++iter) { EXPECT_EQ( 1.0f, tree.getValue(*iter)); } tree::LeafManager<FloatTree> r(tree); EXPECT_EQ(size_t(2), r.leafCount()); EXPECT_EQ(size_t(0), r.auxBufferCount()); EXPECT_EQ(size_t(0), r.auxBuffersPerLeaf()); ReduceOp op(0.0f); r.reduce(op); EXPECT_EQ(FloatTree::LeafNodeType::numValues(), op.mN); EXPECT_EQ(size_t(2), r.leafCount()); EXPECT_EQ(size_t(0), r.auxBufferCount()); EXPECT_EQ(size_t(0), r.auxBuffersPerLeaf()); Index n = 0; for (CoordBBox::Iterator<true> iter(bbox1); iter; ++iter) { ++n; EXPECT_EQ( -1.0f, tree.getValue(*iter)); } EXPECT_EQ(FloatTree::LeafNodeType::numValues(), n); n = 0; for (CoordBBox::Iterator<true> iter(bbox2); iter; ++iter) { ++n; EXPECT_EQ( 1.0f, tree.getValue(*iter)); } EXPECT_EQ(FloatTree::LeafNodeType::numValues(), n); }
11,020
C++
34.899023
99
0.581125
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestVec2Metadata.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Metadata.h> class TestVec2Metadata : public ::testing::Test { }; TEST_F(TestVec2Metadata, testVec2i) { using namespace openvdb; Metadata::Ptr m(new Vec2IMetadata(openvdb::Vec2i(1, 1))); Metadata::Ptr m2 = m->copy(); EXPECT_TRUE(dynamic_cast<Vec2IMetadata*>(m.get()) != 0); EXPECT_TRUE(dynamic_cast<Vec2IMetadata*>(m2.get()) != 0); EXPECT_TRUE(m->typeName().compare("vec2i") == 0); EXPECT_TRUE(m2->typeName().compare("vec2i") == 0); Vec2IMetadata *s = dynamic_cast<Vec2IMetadata*>(m.get()); EXPECT_TRUE(s->value() == openvdb::Vec2i(1, 1)); s->value() = openvdb::Vec2i(2, 2); EXPECT_TRUE(s->value() == openvdb::Vec2i(2, 2)); m2->copy(*s); s = dynamic_cast<Vec2IMetadata*>(m2.get()); EXPECT_TRUE(s->value() == openvdb::Vec2i(2, 2)); } TEST_F(TestVec2Metadata, testVec2s) { using namespace openvdb; Metadata::Ptr m(new Vec2SMetadata(openvdb::Vec2s(1, 1))); Metadata::Ptr m2 = m->copy(); EXPECT_TRUE(dynamic_cast<Vec2SMetadata*>(m.get()) != 0); EXPECT_TRUE(dynamic_cast<Vec2SMetadata*>(m2.get()) != 0); EXPECT_TRUE(m->typeName().compare("vec2s") == 0); EXPECT_TRUE(m2->typeName().compare("vec2s") == 0); Vec2SMetadata *s = dynamic_cast<Vec2SMetadata*>(m.get()); EXPECT_TRUE(s->value() == openvdb::Vec2s(1, 1)); s->value() = openvdb::Vec2s(2, 2); EXPECT_TRUE(s->value() == openvdb::Vec2s(2, 2)); m2->copy(*s); s = dynamic_cast<Vec2SMetadata*>(m2.get()); EXPECT_TRUE(s->value() == openvdb::Vec2s(2, 2)); } TEST_F(TestVec2Metadata, testVec2d) { using namespace openvdb; Metadata::Ptr m(new Vec2DMetadata(openvdb::Vec2d(1, 1))); Metadata::Ptr m2 = m->copy(); EXPECT_TRUE(dynamic_cast<Vec2DMetadata*>(m.get()) != 0); EXPECT_TRUE(dynamic_cast<Vec2DMetadata*>(m2.get()) != 0); EXPECT_TRUE(m->typeName().compare("vec2d") == 0); EXPECT_TRUE(m2->typeName().compare("vec2d") == 0); Vec2DMetadata *s = dynamic_cast<Vec2DMetadata*>(m.get()); EXPECT_TRUE(s->value() == openvdb::Vec2d(1, 1)); s->value() = openvdb::Vec2d(2, 2); EXPECT_TRUE(s->value() == openvdb::Vec2d(2, 2)); m2->copy(*s); s = dynamic_cast<Vec2DMetadata*>(m2.get()); EXPECT_TRUE(s->value() == openvdb::Vec2d(2, 2)); }
2,418
C++
27.797619
61
0.621175
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestTree.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <cstdio> // for remove() #include <fstream> #include <sstream> #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <openvdb/tools/ValueTransformer.h> // for tools::setValueOnMin(), et al. #include <openvdb/tree/LeafNode.h> #include <openvdb/io/Compression.h> // for io::RealToHalf #include <openvdb/math/Math.h> // for Abs() #include <openvdb/openvdb.h> #include <openvdb/util/CpuTimer.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/Prune.h> #include <openvdb/tools/ChangeBackground.h> #include <openvdb/tools/SignedFloodFill.h> #include "util.h" // for unittest_util::makeSphere() #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); using ValueType = float; using LeafNodeType = openvdb::tree::LeafNode<ValueType,3>; using InternalNodeType1 = openvdb::tree::InternalNode<LeafNodeType,4>; using InternalNodeType2 = openvdb::tree::InternalNode<InternalNodeType1,5>; using RootNodeType = openvdb::tree::RootNode<InternalNodeType2>; class TestTree: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } protected: template<typename TreeType> void testWriteHalf(); template<typename TreeType> void doTestMerge(openvdb::MergePolicy); }; TEST_F(TestTree, testChangeBackground) { const int dim = 128; const openvdb::Vec3f center(0.35f, 0.35f, 0.35f); const float radius = 0.15f, voxelSize = 1.0f / (dim-1), halfWidth = 4, gamma = halfWidth * voxelSize; using GridT = openvdb::FloatGrid; const openvdb::Coord inside(int(center[0]*dim), int(center[1]*dim), int(center[2]*dim)); const openvdb::Coord outside(dim); {//changeBackground GridT::Ptr grid = openvdb::tools::createLevelSetSphere<GridT>( radius, center, voxelSize, halfWidth); openvdb::FloatTree& tree = grid->tree(); EXPECT_TRUE(grid->tree().isValueOff(outside)); ASSERT_DOUBLES_EXACTLY_EQUAL( gamma, tree.getValue(outside)); EXPECT_TRUE(tree.isValueOff(inside)); ASSERT_DOUBLES_EXACTLY_EQUAL(-gamma, tree.getValue(inside)); const float background = gamma*3.43f; openvdb::tools::changeBackground(tree, background); EXPECT_TRUE(grid->tree().isValueOff(outside)); ASSERT_DOUBLES_EXACTLY_EQUAL( background, tree.getValue(outside)); EXPECT_TRUE(tree.isValueOff(inside)); ASSERT_DOUBLES_EXACTLY_EQUAL(-background, tree.getValue(inside)); } {//changeLevelSetBackground GridT::Ptr grid = openvdb::tools::createLevelSetSphere<GridT>( radius, center, voxelSize, halfWidth); openvdb::FloatTree& tree = grid->tree(); EXPECT_TRUE(grid->tree().isValueOff(outside)); ASSERT_DOUBLES_EXACTLY_EQUAL( gamma, tree.getValue(outside)); EXPECT_TRUE(tree.isValueOff(inside)); ASSERT_DOUBLES_EXACTLY_EQUAL(-gamma, tree.getValue(inside)); const float v1 = gamma*3.43f, v2 = -gamma*6.457f; openvdb::tools::changeAsymmetricLevelSetBackground(tree, v1, v2); EXPECT_TRUE(grid->tree().isValueOff(outside)); ASSERT_DOUBLES_EXACTLY_EQUAL( v1, tree.getValue(outside)); EXPECT_TRUE(tree.isValueOff(inside)); ASSERT_DOUBLES_EXACTLY_EQUAL( v2, tree.getValue(inside)); } } TEST_F(TestTree, testHalf) { testWriteHalf<openvdb::FloatTree>(); testWriteHalf<openvdb::DoubleTree>(); testWriteHalf<openvdb::Vec2STree>(); testWriteHalf<openvdb::Vec2DTree>(); testWriteHalf<openvdb::Vec3STree>(); testWriteHalf<openvdb::Vec3DTree>(); // Verify that non-floating-point grids are saved correctly. testWriteHalf<openvdb::BoolTree>(); testWriteHalf<openvdb::Int32Tree>(); testWriteHalf<openvdb::Int64Tree>(); } template<class TreeType> void TestTree::testWriteHalf() { using GridType = openvdb::Grid<TreeType>; using ValueT = typename TreeType::ValueType; ValueT background(5); GridType grid(background); unittest_util::makeSphere<GridType>(openvdb::Coord(64, 64, 64), openvdb::Vec3f(35, 30, 40), /*radius=*/10, grid, /*dx=*/1.0f, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid.tree().empty()); // Write grid blocks in both float and half formats. std::ostringstream outFull(std::ios_base::binary); grid.setSaveFloatAsHalf(false); grid.writeBuffers(outFull); outFull.flush(); const size_t fullBytes = outFull.str().size(); if (fullBytes == 0) FAIL() << "wrote empty full float buffers"; std::ostringstream outHalf(std::ios_base::binary); grid.setSaveFloatAsHalf(true); grid.writeBuffers(outHalf); outHalf.flush(); const size_t halfBytes = outHalf.str().size(); if (halfBytes == 0) FAIL() << "wrote empty half float buffers"; if (openvdb::io::RealToHalf<ValueT>::isReal) { // Verify that the half float file is "significantly smaller" than the full float file. if (halfBytes >= size_t(0.75 * double(fullBytes))) { FAIL() << "half float buffers not significantly smaller than full float (" << halfBytes << " vs. " << fullBytes << " bytes)"; } } else { // For non-real data types, "half float" and "full float" file sizes should be the same. if (halfBytes != fullBytes) { FAIL() << "full float and half float file sizes differ for data of type " + std::string(openvdb::typeNameAsString<ValueT>()); } } // Read back the half float data (converting back to full float in the process), // then write it out again in half float format. Verify that the resulting file // is identical to the original half float file. { openvdb::Grid<TreeType> gridCopy(grid); gridCopy.setSaveFloatAsHalf(true); std::istringstream is(outHalf.str(), std::ios_base::binary); // Since the input stream doesn't include a VDB header with file format version info, // tag the input stream explicitly with the current version number. openvdb::io::setCurrentVersion(is); gridCopy.readBuffers(is); std::ostringstream outDiff(std::ios_base::binary); gridCopy.writeBuffers(outDiff); outDiff.flush(); if (outHalf.str() != outDiff.str()) { FAIL() << "half-from-full and half-from-half buffers differ"; } } } TEST_F(TestTree, testValues) { ValueType background=5.0f; { const openvdb::Coord c0(5,10,20), c1(50000,20000,30000); RootNodeType root_node(background); const float v0=0.234f, v1=4.5678f; EXPECT_TRUE(root_node.empty()); ASSERT_DOUBLES_EXACTLY_EQUAL(root_node.getValue(c0), background); ASSERT_DOUBLES_EXACTLY_EQUAL(root_node.getValue(c1), background); root_node.setValueOn(c0, v0); root_node.setValueOn(c1, v1); ASSERT_DOUBLES_EXACTLY_EQUAL(v0,root_node.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(v1,root_node.getValue(c1)); int count=0; for (int i =0; i<256; ++i) { for (int j=0; j<256; ++j) { for (int k=0; k<256; ++k) { if (root_node.getValue(openvdb::Coord(i,j,k))<1.0f) ++count; } } } EXPECT_TRUE(count == 1); } { const openvdb::Coord min(-30,-25,-60), max(60,80,100); const openvdb::Coord c0(-5,-10,-20), c1(50,20,90), c2(59,67,89); const float v0=0.234f, v1=4.5678f, v2=-5.673f; RootNodeType root_node(background); EXPECT_TRUE(root_node.empty()); ASSERT_DOUBLES_EXACTLY_EQUAL(background,root_node.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background,root_node.getValue(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(background,root_node.getValue(c2)); root_node.setValueOn(c0, v0); root_node.setValueOn(c1, v1); root_node.setValueOn(c2, v2); ASSERT_DOUBLES_EXACTLY_EQUAL(v0,root_node.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(v1,root_node.getValue(c1)); ASSERT_DOUBLES_EXACTLY_EQUAL(v2,root_node.getValue(c2)); int count=0; for (int i =min[0]; i<max[0]; ++i) { for (int j=min[1]; j<max[1]; ++j) { for (int k=min[2]; k<max[2]; ++k) { if (root_node.getValue(openvdb::Coord(i,j,k))<1.0f) ++count; } } } EXPECT_TRUE(count == 2); } } TEST_F(TestTree, testSetValue) { const float background = 5.0f; openvdb::FloatTree tree(background); const openvdb::Coord c0( 5, 10, 20), c1(-5,-10,-20); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c1)); EXPECT_EQ(-1, tree.getValueDepth(c0)); EXPECT_EQ(-1, tree.getValueDepth(c1)); EXPECT_TRUE(tree.isValueOff(c0)); EXPECT_TRUE(tree.isValueOff(c1)); tree.setValue(c0, 10.0); ASSERT_DOUBLES_EXACTLY_EQUAL(10.0, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c1)); EXPECT_EQ( 3, tree.getValueDepth(c0)); EXPECT_EQ(-1, tree.getValueDepth(c1)); EXPECT_EQ( 3, tree.getValueDepth(openvdb::Coord(7, 10, 20))); EXPECT_EQ( 2, tree.getValueDepth(openvdb::Coord(8, 10, 20))); EXPECT_TRUE(tree.isValueOn(c0)); EXPECT_TRUE(tree.isValueOff(c1)); tree.setValue(c1, 20.0); ASSERT_DOUBLES_EXACTLY_EQUAL(10.0, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(20.0, tree.getValue(c1)); EXPECT_EQ( 3, tree.getValueDepth(c0)); EXPECT_EQ( 3, tree.getValueDepth(c1)); EXPECT_TRUE(tree.isValueOn(c0)); EXPECT_TRUE(tree.isValueOn(c1)); struct Local { static inline void minOp(float& f, bool& b) { f = std::min(f, 15.f); b = true; } static inline void maxOp(float& f, bool& b) { f = std::max(f, 12.f); b = true; } static inline void sumOp(float& f, bool& b) { f += /*background=*/5.f; b = true; } }; openvdb::tools::setValueOnMin(tree, c0, 15.0); tree.modifyValueAndActiveState(c1, Local::minOp); ASSERT_DOUBLES_EXACTLY_EQUAL(10.0, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(15.0, tree.getValue(c1)); openvdb::tools::setValueOnMax(tree, c0, 12.0); tree.modifyValueAndActiveState(c1, Local::maxOp); ASSERT_DOUBLES_EXACTLY_EQUAL(12.0, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(15.0, tree.getValue(c1)); EXPECT_EQ(2, int(tree.activeVoxelCount())); float minVal = -999.0, maxVal = -999.0; tree.evalMinMax(minVal, maxVal); ASSERT_DOUBLES_EXACTLY_EQUAL(12.0, minVal); ASSERT_DOUBLES_EXACTLY_EQUAL(15.0, maxVal); tree.setValueOff(c0, background); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(15.0, tree.getValue(c1)); EXPECT_EQ(1, int(tree.activeVoxelCount())); openvdb::tools::setValueOnSum(tree, c0, background); tree.modifyValueAndActiveState(c1, Local::sumOp); ASSERT_DOUBLES_EXACTLY_EQUAL(2*background, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(15.0+background, tree.getValue(c1)); EXPECT_EQ(2, int(tree.activeVoxelCount())); // Test the extremes of the coordinate range ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(openvdb::Coord::min())); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(openvdb::Coord::max())); //std::cerr << "min=" << openvdb::Coord::min() << " max= " << openvdb::Coord::max() << "\n"; tree.setValue(openvdb::Coord::min(), 1.0f); tree.setValue(openvdb::Coord::max(), 2.0f); ASSERT_DOUBLES_EXACTLY_EQUAL(1.0f, tree.getValue(openvdb::Coord::min())); ASSERT_DOUBLES_EXACTLY_EQUAL(2.0f, tree.getValue(openvdb::Coord::max())); } TEST_F(TestTree, testSetValueOnly) { const float background = 5.0f; openvdb::FloatTree tree(background); const openvdb::Coord c0( 5, 10, 20), c1(-5,-10,-20); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c1)); EXPECT_EQ(-1, tree.getValueDepth(c0)); EXPECT_EQ(-1, tree.getValueDepth(c1)); EXPECT_TRUE(tree.isValueOff(c0)); EXPECT_TRUE(tree.isValueOff(c1)); tree.setValueOnly(c0, 10.0); ASSERT_DOUBLES_EXACTLY_EQUAL(10.0, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree.getValue(c1)); EXPECT_EQ( 3, tree.getValueDepth(c0)); EXPECT_EQ(-1, tree.getValueDepth(c1)); EXPECT_EQ( 3, tree.getValueDepth(openvdb::Coord(7, 10, 20))); EXPECT_EQ( 2, tree.getValueDepth(openvdb::Coord(8, 10, 20))); EXPECT_TRUE(tree.isValueOff(c0)); EXPECT_TRUE(tree.isValueOff(c1)); tree.setValueOnly(c1, 20.0); ASSERT_DOUBLES_EXACTLY_EQUAL(10.0, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(20.0, tree.getValue(c1)); EXPECT_EQ( 3, tree.getValueDepth(c0)); EXPECT_EQ( 3, tree.getValueDepth(c1)); EXPECT_TRUE(tree.isValueOff(c0)); EXPECT_TRUE(tree.isValueOff(c1)); tree.setValue(c0, 30.0); ASSERT_DOUBLES_EXACTLY_EQUAL(30.0, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(20.0, tree.getValue(c1)); EXPECT_EQ( 3, tree.getValueDepth(c0)); EXPECT_EQ( 3, tree.getValueDepth(c1)); EXPECT_TRUE(tree.isValueOn(c0)); EXPECT_TRUE(tree.isValueOff(c1)); tree.setValueOnly(c0, 40.0); ASSERT_DOUBLES_EXACTLY_EQUAL(40.0, tree.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(20.0, tree.getValue(c1)); EXPECT_EQ( 3, tree.getValueDepth(c0)); EXPECT_EQ( 3, tree.getValueDepth(c1)); EXPECT_TRUE(tree.isValueOn(c0)); EXPECT_TRUE(tree.isValueOff(c1)); EXPECT_EQ(1, int(tree.activeVoxelCount())); } namespace { // Simple float wrapper with required interface to be used as ValueType in tree::LeafNode // Throws on copy-construction to ensure that all modifications are done in-place. struct FloatThrowOnCopy { float value = 0.0f; using T = FloatThrowOnCopy; FloatThrowOnCopy() = default; explicit FloatThrowOnCopy(float _value): value(_value) { } FloatThrowOnCopy(const FloatThrowOnCopy&) { throw openvdb::RuntimeError("No Copy"); } FloatThrowOnCopy& operator=(const FloatThrowOnCopy&) = default; T operator+(const float rhs) const { return T(value + rhs); } T operator-() const { return T(-value); } bool operator<(const T& other) const { return value < other.value; } bool operator>(const T& other) const { return value > other.value; } bool operator==(const T& other) const { return value == other.value; } friend std::ostream& operator<<(std::ostream &stream, const T& other) { stream << other.value; return stream; } }; } // namespace namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace math { OPENVDB_EXACT_IS_APPROX_EQUAL(FloatThrowOnCopy) } // namespace math template<> inline std::string TypedMetadata<FloatThrowOnCopy>::str() const { return ""; } template <> inline std::string TypedMetadata<FloatThrowOnCopy>::typeName() const { return ""; } } // namespace OPENVDB_VERSION_NAME } // namespace openvdb TEST_F(TestTree, testSetValueInPlace) { using FloatThrowOnCopyTree = openvdb::tree::Tree4<FloatThrowOnCopy, 5, 4, 3>::Type; using FloatThrowOnCopyGrid = openvdb::Grid<FloatThrowOnCopyTree>; FloatThrowOnCopyGrid::registerGrid(); FloatThrowOnCopyTree tree; const openvdb::Coord c0(5, 10, 20), c1(-5,-10,-20); // tile values can legitimately be copied to assess whether a change in value // requires the tile to be voxelized, so activate and voxelize active tiles first tree.setActiveState(c0, true); tree.setActiveState(c1, true); tree.voxelizeActiveTiles(/*threaded=*/true); EXPECT_NO_THROW(tree.modifyValue(c0, [](FloatThrowOnCopy& lhs) { lhs.value = 1.4f; } )); EXPECT_NO_THROW(tree.modifyValueAndActiveState(c1, [](FloatThrowOnCopy& lhs, bool& b) { lhs.value = 2.7f; b = false; } )); EXPECT_NEAR(1.4f, tree.getValue(c0).value, 1.0e-7); EXPECT_NEAR(2.7f, tree.getValue(c1).value, 1.0e-7); EXPECT_TRUE(tree.isValueOn(c0)); EXPECT_TRUE(!tree.isValueOn(c1)); // use slower de-allocation to ensure that no value copying occurs tree.root().clear(); } namespace { /// Helper function to test openvdb::tree::Tree::evalMinMax() for various tree types template<typename TreeT> void evalMinMaxTest() { using ValueT = typename TreeT::ValueType; struct Local { static bool isEqual(const ValueT& a, const ValueT& b) { using namespace openvdb; // for operator>() return !(math::Abs(a - b) > zeroVal<ValueT>()); } }; const ValueT zero = openvdb::zeroVal<ValueT>(), minusTwo = zero + (-2), plusTwo = zero + 2, five = zero + 5; TreeT tree(/*background=*/five); // No set voxels (defaults to min = max = zero) ValueT minVal = five, maxVal = five; tree.evalMinMax(minVal, maxVal); EXPECT_TRUE(Local::isEqual(minVal, zero)); EXPECT_TRUE(Local::isEqual(maxVal, zero)); // Only one set voxel tree.setValue(openvdb::Coord(0, 0, 0), minusTwo); minVal = maxVal = five; tree.evalMinMax(minVal, maxVal); EXPECT_TRUE(Local::isEqual(minVal, minusTwo)); EXPECT_TRUE(Local::isEqual(maxVal, minusTwo)); // Multiple set voxels, single value tree.setValue(openvdb::Coord(10, 10, 10), minusTwo); minVal = maxVal = five; tree.evalMinMax(minVal, maxVal); EXPECT_TRUE(Local::isEqual(minVal, minusTwo)); EXPECT_TRUE(Local::isEqual(maxVal, minusTwo)); // Multiple set voxels, multiple values tree.setValue(openvdb::Coord(10, 10, 10), plusTwo); tree.setValue(openvdb::Coord(-10, -10, -10), zero); minVal = maxVal = five; tree.evalMinMax(minVal, maxVal); EXPECT_TRUE(Local::isEqual(minVal, minusTwo)); EXPECT_TRUE(Local::isEqual(maxVal, plusTwo)); } /// Specialization for boolean trees template<> void evalMinMaxTest<openvdb::BoolTree>() { openvdb::BoolTree tree(/*background=*/false); // No set voxels (defaults to min = max = zero) bool minVal = true, maxVal = false; tree.evalMinMax(minVal, maxVal); EXPECT_EQ(false, minVal); EXPECT_EQ(false, maxVal); // Only one set voxel tree.setValue(openvdb::Coord(0, 0, 0), true); minVal = maxVal = false; tree.evalMinMax(minVal, maxVal); EXPECT_EQ(true, minVal); EXPECT_EQ(true, maxVal); // Multiple set voxels, single value tree.setValue(openvdb::Coord(-10, -10, -10), true); minVal = maxVal = false; tree.evalMinMax(minVal, maxVal); EXPECT_EQ(true, minVal); EXPECT_EQ(true, maxVal); // Multiple set voxels, multiple values tree.setValue(openvdb::Coord(10, 10, 10), false); minVal = true; maxVal = false; tree.evalMinMax(minVal, maxVal); EXPECT_EQ(false, minVal); EXPECT_EQ(true, maxVal); } /// Specialization for string trees template<> void evalMinMaxTest<openvdb::StringTree>() { const std::string echidna("echidna"), loris("loris"), pangolin("pangolin"); openvdb::StringTree tree(/*background=*/loris); // No set voxels (defaults to min = max = zero) std::string minVal, maxVal; tree.evalMinMax(minVal, maxVal); EXPECT_EQ(std::string(), minVal); EXPECT_EQ(std::string(), maxVal); // Only one set voxel tree.setValue(openvdb::Coord(0, 0, 0), pangolin); minVal.clear(); maxVal.clear(); tree.evalMinMax(minVal, maxVal); EXPECT_EQ(pangolin, minVal); EXPECT_EQ(pangolin, maxVal); // Multiple set voxels, single value tree.setValue(openvdb::Coord(-10, -10, -10), pangolin); minVal.clear(); maxVal.clear(); tree.evalMinMax(minVal, maxVal); EXPECT_EQ(pangolin, minVal); EXPECT_EQ(pangolin, maxVal); // Multiple set voxels, multiple values tree.setValue(openvdb::Coord(10, 10, 10), echidna); minVal.clear(); maxVal.clear(); tree.evalMinMax(minVal, maxVal); EXPECT_EQ(echidna, minVal); EXPECT_EQ(pangolin, maxVal); } /// Specialization for Coord trees template<> void evalMinMaxTest<openvdb::Coord>() { using CoordTree = openvdb::tree::Tree4<openvdb::Coord,5,4,3>::Type; const openvdb::Coord backg(5,4,-6), a(5,4,-7), b(5,5,-6); CoordTree tree(backg); // No set voxels (defaults to min = max = zero) openvdb::Coord minVal=openvdb::Coord::max(), maxVal=openvdb::Coord::min(); tree.evalMinMax(minVal, maxVal); EXPECT_EQ(openvdb::Coord(0), minVal); EXPECT_EQ(openvdb::Coord(0), maxVal); // Only one set voxel tree.setValue(openvdb::Coord(0, 0, 0), a); minVal=openvdb::Coord::max(); maxVal=openvdb::Coord::min(); tree.evalMinMax(minVal, maxVal); EXPECT_EQ(a, minVal); EXPECT_EQ(a, maxVal); // Multiple set voxels tree.setValue(openvdb::Coord(-10, -10, -10), b); minVal=openvdb::Coord::max(); maxVal=openvdb::Coord::min(); tree.evalMinMax(minVal, maxVal); EXPECT_EQ(a, minVal); EXPECT_EQ(b, maxVal); } } // unnamed namespace TEST_F(TestTree, testEvalMinMax) { evalMinMaxTest<openvdb::BoolTree>(); evalMinMaxTest<openvdb::FloatTree>(); evalMinMaxTest<openvdb::Int32Tree>(); evalMinMaxTest<openvdb::Vec3STree>(); evalMinMaxTest<openvdb::Vec2ITree>(); evalMinMaxTest<openvdb::StringTree>(); evalMinMaxTest<openvdb::Coord>(); } TEST_F(TestTree, testResize) { ValueType background=5.0f; //use this when resize is implemented RootNodeType root_node(background); EXPECT_TRUE(root_node.getLevel()==3); ASSERT_DOUBLES_EXACTLY_EQUAL(background, root_node.getValue(openvdb::Coord(5,10,20))); //fprintf(stdout,"Root grid dim=(%i,%i,%i)\n", // root_node.getGridDim(0), root_node.getGridDim(1), root_node.getGridDim(2)); root_node.setValueOn(openvdb::Coord(5,10,20),0.234f); ASSERT_DOUBLES_EXACTLY_EQUAL(root_node.getValue(openvdb::Coord(5,10,20)) , 0.234f); root_node.setValueOn(openvdb::Coord(500,200,300),4.5678f); ASSERT_DOUBLES_EXACTLY_EQUAL(root_node.getValue(openvdb::Coord(500,200,300)) , 4.5678f); { ValueType sum=0.0f; for (RootNodeType::ChildOnIter root_iter = root_node.beginChildOn(); root_iter.test(); ++root_iter) { for (InternalNodeType2::ChildOnIter internal_iter2 = root_iter->beginChildOn(); internal_iter2.test(); ++internal_iter2) { for (InternalNodeType1::ChildOnIter internal_iter1 = internal_iter2->beginChildOn(); internal_iter1.test(); ++internal_iter1) { for (LeafNodeType::ValueOnIter block_iter = internal_iter1->beginValueOn(); block_iter.test(); ++block_iter) { sum += *block_iter; } } } } ASSERT_DOUBLES_EXACTLY_EQUAL(sum, (0.234f + 4.5678f)); } EXPECT_TRUE(root_node.getLevel()==3); ASSERT_DOUBLES_EXACTLY_EQUAL(background, root_node.getValue(openvdb::Coord(5,11,20))); { ValueType sum=0.0f; for (RootNodeType::ChildOnIter root_iter = root_node.beginChildOn(); root_iter.test(); ++root_iter) { for (InternalNodeType2::ChildOnIter internal_iter2 = root_iter->beginChildOn(); internal_iter2.test(); ++internal_iter2) { for (InternalNodeType1::ChildOnIter internal_iter1 = internal_iter2->beginChildOn(); internal_iter1.test(); ++internal_iter1) { for (LeafNodeType::ValueOnIter block_iter = internal_iter1->beginValueOn(); block_iter.test(); ++block_iter) { sum += *block_iter; } } } } ASSERT_DOUBLES_EXACTLY_EQUAL(sum, (0.234f + 4.5678f)); } } TEST_F(TestTree, testHasSameTopology) { // Test using trees of the same type. { const float background1=5.0f; openvdb::FloatTree tree1(background1); const float background2=6.0f; openvdb::FloatTree tree2(background2); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); tree1.setValue(openvdb::Coord(-10,40,845),3.456f); EXPECT_TRUE(!tree1.hasSameTopology(tree2)); EXPECT_TRUE(!tree2.hasSameTopology(tree1)); tree2.setValue(openvdb::Coord(-10,40,845),-3.456f); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); tree1.setValue(openvdb::Coord(1,-500,-8), 1.0f); EXPECT_TRUE(!tree1.hasSameTopology(tree2)); EXPECT_TRUE(!tree2.hasSameTopology(tree1)); tree2.setValue(openvdb::Coord(1,-500,-8),1.0f); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); } // Test using trees of different types. { const float background1=5.0f; openvdb::FloatTree tree1(background1); const openvdb::Vec3f background2(1.0f,3.4f,6.0f); openvdb::Vec3fTree tree2(background2); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); tree1.setValue(openvdb::Coord(-10,40,845),3.456f); EXPECT_TRUE(!tree1.hasSameTopology(tree2)); EXPECT_TRUE(!tree2.hasSameTopology(tree1)); tree2.setValue(openvdb::Coord(-10,40,845),openvdb::Vec3f(1.0f,2.0f,-3.0f)); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); tree1.setValue(openvdb::Coord(1,-500,-8), 1.0f); EXPECT_TRUE(!tree1.hasSameTopology(tree2)); EXPECT_TRUE(!tree2.hasSameTopology(tree1)); tree2.setValue(openvdb::Coord(1,-500,-8),openvdb::Vec3f(1.0f,2.0f,-3.0f)); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); } } TEST_F(TestTree, testTopologyCopy) { // Test using trees of the same type. { const float background1=5.0f; openvdb::FloatTree tree1(background1); tree1.setValue(openvdb::Coord(-10,40,845),3.456f); tree1.setValue(openvdb::Coord(1,-50,-8), 1.0f); const float background2=6.0f, setValue2=3.0f; openvdb::FloatTree tree2(tree1,background2,setValue2,openvdb::TopologyCopy()); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); ASSERT_DOUBLES_EXACTLY_EQUAL(background2, tree2.getValue(openvdb::Coord(1,2,3))); ASSERT_DOUBLES_EXACTLY_EQUAL(setValue2, tree2.getValue(openvdb::Coord(-10,40,845))); ASSERT_DOUBLES_EXACTLY_EQUAL(setValue2, tree2.getValue(openvdb::Coord(1,-50,-8))); tree1.setValue(openvdb::Coord(1,-500,-8), 1.0f); EXPECT_TRUE(!tree1.hasSameTopology(tree2)); EXPECT_TRUE(!tree2.hasSameTopology(tree1)); tree2.setValue(openvdb::Coord(1,-500,-8),1.0f); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); } // Test using trees of different types. { const openvdb::Vec3f background1(1.0f,3.4f,6.0f); openvdb::Vec3fTree tree1(background1); tree1.setValue(openvdb::Coord(-10,40,845),openvdb::Vec3f(3.456f,-2.3f,5.6f)); tree1.setValue(openvdb::Coord(1,-50,-8), openvdb::Vec3f(1.0f,3.0f,4.5f)); const float background2=6.0f, setValue2=3.0f; openvdb::FloatTree tree2(tree1,background2,setValue2,openvdb::TopologyCopy()); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); ASSERT_DOUBLES_EXACTLY_EQUAL(background2, tree2.getValue(openvdb::Coord(1,2,3))); ASSERT_DOUBLES_EXACTLY_EQUAL(setValue2, tree2.getValue(openvdb::Coord(-10,40,845))); ASSERT_DOUBLES_EXACTLY_EQUAL(setValue2, tree2.getValue(openvdb::Coord(1,-50,-8))); tree1.setValue(openvdb::Coord(1,-500,-8), openvdb::Vec3f(1.0f,0.0f,-3.0f)); EXPECT_TRUE(!tree1.hasSameTopology(tree2)); EXPECT_TRUE(!tree2.hasSameTopology(tree1)); tree2.setValue(openvdb::Coord(1,-500,-8), 1.0f); EXPECT_TRUE(tree1.hasSameTopology(tree2)); EXPECT_TRUE(tree2.hasSameTopology(tree1)); } } TEST_F(TestTree, testIterators) { ValueType background=5.0f; RootNodeType root_node(background); root_node.setValueOn(openvdb::Coord(5,10,20),0.234f); root_node.setValueOn(openvdb::Coord(50000,20000,30000),4.5678f); { ValueType sum=0.0f; for (RootNodeType::ChildOnIter root_iter = root_node.beginChildOn(); root_iter.test(); ++root_iter) { for (InternalNodeType2::ChildOnIter internal_iter2 = root_iter->beginChildOn(); internal_iter2.test(); ++internal_iter2) { for (InternalNodeType1::ChildOnIter internal_iter1 = internal_iter2->beginChildOn(); internal_iter1.test(); ++internal_iter1) { for (LeafNodeType::ValueOnIter block_iter = internal_iter1->beginValueOn(); block_iter.test(); ++block_iter) { sum += *block_iter; } } } } ASSERT_DOUBLES_EXACTLY_EQUAL((0.234f + 4.5678f), sum); } { // As above, but using dense iterators. ValueType sum = 0.0f, val = 0.0f; for (RootNodeType::ChildAllIter rootIter = root_node.beginChildAll(); rootIter.test(); ++rootIter) { if (!rootIter.isChildNode()) continue; for (InternalNodeType2::ChildAllIter internalIter2 = rootIter.probeChild(val)->beginChildAll(); internalIter2; ++internalIter2) { if (!internalIter2.isChildNode()) continue; for (InternalNodeType1::ChildAllIter internalIter1 = internalIter2.probeChild(val)->beginChildAll(); internalIter1; ++internalIter1) { if (!internalIter1.isChildNode()) continue; for (LeafNodeType::ValueOnIter leafIter = internalIter1.probeChild(val)->beginValueOn(); leafIter; ++leafIter) { sum += *leafIter; } } } } ASSERT_DOUBLES_EXACTLY_EQUAL((0.234f + 4.5678f), sum); } { ValueType v_sum=0.0f; openvdb::Coord xyz0, xyz1, xyz2, xyz3, xyzSum(0, 0, 0); for (RootNodeType::ChildOnIter root_iter = root_node.beginChildOn(); root_iter.test(); ++root_iter) { root_iter.getCoord(xyz3); for (InternalNodeType2::ChildOnIter internal_iter2 = root_iter->beginChildOn(); internal_iter2.test(); ++internal_iter2) { internal_iter2.getCoord(xyz2); xyz2 = xyz2 - internal_iter2.parent().origin(); for (InternalNodeType1::ChildOnIter internal_iter1 = internal_iter2->beginChildOn(); internal_iter1.test(); ++internal_iter1) { internal_iter1.getCoord(xyz1); xyz1 = xyz1 - internal_iter1.parent().origin(); for (LeafNodeType::ValueOnIter block_iter = internal_iter1->beginValueOn(); block_iter.test(); ++block_iter) { block_iter.getCoord(xyz0); xyz0 = xyz0 - block_iter.parent().origin(); v_sum += *block_iter; xyzSum = xyzSum + xyz0 + xyz1 + xyz2 + xyz3; } } } } ASSERT_DOUBLES_EXACTLY_EQUAL((0.234f + 4.5678f), v_sum); EXPECT_EQ(openvdb::Coord(5 + 50000, 10 + 20000, 20 + 30000), xyzSum); } } TEST_F(TestTree, testIO) { const char* filename = "testIO.dbg"; openvdb::SharedPtr<const char> scopedFile(filename, ::remove); { ValueType background=5.0f; RootNodeType root_node(background); root_node.setValueOn(openvdb::Coord(5,10,20),0.234f); root_node.setValueOn(openvdb::Coord(50000,20000,30000),4.5678f); std::ofstream os(filename, std::ios_base::binary); root_node.writeTopology(os); root_node.writeBuffers(os); EXPECT_TRUE(!os.fail()); } { ValueType background=2.0f; RootNodeType root_node(background); ASSERT_DOUBLES_EXACTLY_EQUAL(background, root_node.getValue(openvdb::Coord(5,10,20))); { std::ifstream is(filename, std::ios_base::binary); // Since the test file doesn't include a VDB header with file format version info, // tag the input stream explicitly with the current version number. openvdb::io::setCurrentVersion(is); root_node.readTopology(is); root_node.readBuffers(is); EXPECT_TRUE(!is.fail()); } ASSERT_DOUBLES_EXACTLY_EQUAL(0.234f, root_node.getValue(openvdb::Coord(5,10,20))); ASSERT_DOUBLES_EXACTLY_EQUAL(5.0f, root_node.getValue(openvdb::Coord(5,11,20))); ValueType sum=0.0f; for (RootNodeType::ChildOnIter root_iter = root_node.beginChildOn(); root_iter.test(); ++root_iter) { for (InternalNodeType2::ChildOnIter internal_iter2 = root_iter->beginChildOn(); internal_iter2.test(); ++internal_iter2) { for (InternalNodeType1::ChildOnIter internal_iter1 = internal_iter2->beginChildOn(); internal_iter1.test(); ++internal_iter1) { for (LeafNodeType::ValueOnIter block_iter = internal_iter1->beginValueOn(); block_iter.test(); ++block_iter) { sum += *block_iter; } } } } ASSERT_DOUBLES_EXACTLY_EQUAL(sum, (0.234f + 4.5678f)); } } TEST_F(TestTree, testNegativeIndexing) { ValueType background=5.0f; openvdb::FloatTree tree(background); EXPECT_TRUE(tree.empty()); ASSERT_DOUBLES_EXACTLY_EQUAL(tree.getValue(openvdb::Coord(5,-10,20)), background); ASSERT_DOUBLES_EXACTLY_EQUAL(tree.getValue(openvdb::Coord(-5000,2000,3000)), background); tree.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree.setValue(openvdb::Coord(-5, 10, 20),0.1f); tree.setValue(openvdb::Coord( 5,-10, 20),0.2f); tree.setValue(openvdb::Coord( 5, 10,-20),0.3f); tree.setValue(openvdb::Coord(-5,-10, 20),0.4f); tree.setValue(openvdb::Coord(-5, 10,-20),0.5f); tree.setValue(openvdb::Coord( 5,-10,-20),0.6f); tree.setValue(openvdb::Coord(-5,-10,-20),0.7f); tree.setValue(openvdb::Coord(-5000, 2000,-3000),4.5678f); tree.setValue(openvdb::Coord( 5000,-2000,-3000),4.5678f); tree.setValue(openvdb::Coord(-5000,-2000, 3000),4.5678f); ASSERT_DOUBLES_EXACTLY_EQUAL(0.0f, tree.getValue(openvdb::Coord( 5, 10, 20))); ASSERT_DOUBLES_EXACTLY_EQUAL(0.1f, tree.getValue(openvdb::Coord(-5, 10, 20))); ASSERT_DOUBLES_EXACTLY_EQUAL(0.2f, tree.getValue(openvdb::Coord( 5,-10, 20))); ASSERT_DOUBLES_EXACTLY_EQUAL(0.3f, tree.getValue(openvdb::Coord( 5, 10,-20))); ASSERT_DOUBLES_EXACTLY_EQUAL(0.4f, tree.getValue(openvdb::Coord(-5,-10, 20))); ASSERT_DOUBLES_EXACTLY_EQUAL(0.5f, tree.getValue(openvdb::Coord(-5, 10,-20))); ASSERT_DOUBLES_EXACTLY_EQUAL(0.6f, tree.getValue(openvdb::Coord( 5,-10,-20))); ASSERT_DOUBLES_EXACTLY_EQUAL(0.7f, tree.getValue(openvdb::Coord(-5,-10,-20))); ASSERT_DOUBLES_EXACTLY_EQUAL(4.5678f, tree.getValue(openvdb::Coord(-5000, 2000,-3000))); ASSERT_DOUBLES_EXACTLY_EQUAL(4.5678f, tree.getValue(openvdb::Coord( 5000,-2000,-3000))); ASSERT_DOUBLES_EXACTLY_EQUAL(4.5678f, tree.getValue(openvdb::Coord(-5000,-2000, 3000))); int count=0; for (int i =-25; i<25; ++i) { for (int j=-25; j<25; ++j) { for (int k=-25; k<25; ++k) { if (tree.getValue(openvdb::Coord(i,j,k))<1.0f) { //fprintf(stderr,"(%i,%i,%i)=%f\n",i,j,k,tree.getValue(openvdb::Coord(i,j,k))); ++count; } } } } EXPECT_TRUE(count == 8); int count2 = 0; openvdb::Coord xyz; for (openvdb::FloatTree::ValueOnCIter iter = tree.cbeginValueOn(); iter; ++iter) { ++count2; xyz = iter.getCoord(); //std::cerr << xyz << " = " << *iter << "\n"; } EXPECT_TRUE(count2 == 11); EXPECT_TRUE(tree.activeVoxelCount() == 11); { count2 = 0; for (openvdb::FloatTree::ValueOnCIter iter = tree.cbeginValueOn(); iter; ++iter) { ++count2; xyz = iter.getCoord(); //std::cerr << xyz << " = " << *iter << "\n"; } EXPECT_TRUE(count2 == 11); EXPECT_TRUE(tree.activeVoxelCount() == 11); } } TEST_F(TestTree, testDeepCopy) { // set up a tree const float fillValue1=5.0f; openvdb::FloatTree tree1(fillValue1); tree1.setValue(openvdb::Coord(-10,40,845), 3.456f); tree1.setValue(openvdb::Coord(1,-50,-8), 1.0f); // make a deep copy of the tree openvdb::TreeBase::Ptr newTree = tree1.copy(); // cast down to the concrete type to query values openvdb::FloatTree *pTree2 = dynamic_cast<openvdb::FloatTree *>(newTree.get()); // compare topology EXPECT_TRUE(tree1.hasSameTopology(*pTree2)); EXPECT_TRUE(pTree2->hasSameTopology(tree1)); // trees should be equal ASSERT_DOUBLES_EXACTLY_EQUAL(fillValue1, pTree2->getValue(openvdb::Coord(1,2,3))); ASSERT_DOUBLES_EXACTLY_EQUAL(3.456f, pTree2->getValue(openvdb::Coord(-10,40,845))); ASSERT_DOUBLES_EXACTLY_EQUAL(1.0f, pTree2->getValue(openvdb::Coord(1,-50,-8))); // change 1 value in tree2 openvdb::Coord changeCoord(1, -500, -8); pTree2->setValue(changeCoord, 1.0f); // topology should no longer match EXPECT_TRUE(!tree1.hasSameTopology(*pTree2)); EXPECT_TRUE(!pTree2->hasSameTopology(tree1)); // query changed value and make sure it's different between trees ASSERT_DOUBLES_EXACTLY_EQUAL(fillValue1, tree1.getValue(changeCoord)); ASSERT_DOUBLES_EXACTLY_EQUAL(1.0f, pTree2->getValue(changeCoord)); } TEST_F(TestTree, testMerge) { ValueType background=5.0f; openvdb::FloatTree tree0(background), tree1(background), tree2(background); EXPECT_TRUE(tree2.empty()); tree0.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree0.setValue(openvdb::Coord(-5, 10, 20),0.1f); tree0.setValue(openvdb::Coord( 5,-10, 20),0.2f); tree0.setValue(openvdb::Coord( 5, 10,-20),0.3f); tree1.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree1.setValue(openvdb::Coord(-5, 10, 20),0.1f); tree1.setValue(openvdb::Coord( 5,-10, 20),0.2f); tree1.setValue(openvdb::Coord( 5, 10,-20),0.3f); tree0.setValue(openvdb::Coord(-5,-10, 20),0.4f); tree0.setValue(openvdb::Coord(-5, 10,-20),0.5f); tree0.setValue(openvdb::Coord( 5,-10,-20),0.6f); tree0.setValue(openvdb::Coord(-5,-10,-20),0.7f); tree0.setValue(openvdb::Coord(-5000, 2000,-3000),4.5678f); tree0.setValue(openvdb::Coord( 5000,-2000,-3000),4.5678f); tree0.setValue(openvdb::Coord(-5000,-2000, 3000),4.5678f); tree2.setValue(openvdb::Coord(-5,-10, 20),0.4f); tree2.setValue(openvdb::Coord(-5, 10,-20),0.5f); tree2.setValue(openvdb::Coord( 5,-10,-20),0.6f); tree2.setValue(openvdb::Coord(-5,-10,-20),0.7f); tree2.setValue(openvdb::Coord(-5000, 2000,-3000),4.5678f); tree2.setValue(openvdb::Coord( 5000,-2000,-3000),4.5678f); tree2.setValue(openvdb::Coord(-5000,-2000, 3000),4.5678f); EXPECT_TRUE(tree0.leafCount()!=tree1.leafCount()); EXPECT_TRUE(tree0.leafCount()!=tree2.leafCount()); EXPECT_TRUE(!tree2.empty()); tree1.merge(tree2, openvdb::MERGE_ACTIVE_STATES); EXPECT_TRUE(tree2.empty()); EXPECT_TRUE(tree0.leafCount()==tree1.leafCount()); EXPECT_TRUE(tree0.nonLeafCount()==tree1.nonLeafCount()); EXPECT_TRUE(tree0.activeLeafVoxelCount()==tree1.activeLeafVoxelCount()); EXPECT_TRUE(tree0.inactiveLeafVoxelCount()==tree1.inactiveLeafVoxelCount()); EXPECT_TRUE(tree0.activeVoxelCount()==tree1.activeVoxelCount()); EXPECT_TRUE(tree0.inactiveVoxelCount()==tree1.inactiveVoxelCount()); for (openvdb::FloatTree::ValueOnCIter iter0 = tree0.cbeginValueOn(); iter0; ++iter0) { ASSERT_DOUBLES_EXACTLY_EQUAL(*iter0,tree1.getValue(iter0.getCoord())); } // Test active tile support. { using namespace openvdb; FloatTree treeA(/*background*/0.0), treeB(/*background*/0.0); treeA.fill(CoordBBox(Coord(16,16,16), Coord(31,31,31)), /*value*/1.0); treeB.fill(CoordBBox(Coord(0,0,0), Coord(15,15,15)), /*value*/1.0); EXPECT_EQ(4096, int(treeA.activeVoxelCount())); EXPECT_EQ(4096, int(treeB.activeVoxelCount())); treeA.merge(treeB, MERGE_ACTIVE_STATES); EXPECT_EQ(8192, int(treeA.activeVoxelCount())); EXPECT_EQ(0, int(treeB.activeVoxelCount())); } doTestMerge<openvdb::FloatTree>(openvdb::MERGE_NODES); doTestMerge<openvdb::FloatTree>(openvdb::MERGE_ACTIVE_STATES); doTestMerge<openvdb::FloatTree>(openvdb::MERGE_ACTIVE_STATES_AND_NODES); doTestMerge<openvdb::BoolTree>(openvdb::MERGE_NODES); doTestMerge<openvdb::BoolTree>(openvdb::MERGE_ACTIVE_STATES); doTestMerge<openvdb::BoolTree>(openvdb::MERGE_ACTIVE_STATES_AND_NODES); } template<typename TreeType> void TestTree::doTestMerge(openvdb::MergePolicy policy) { using namespace openvdb; TreeType treeA, treeB; using RootT = typename TreeType::RootNodeType; using LeafT = typename TreeType::LeafNodeType; const typename TreeType::ValueType val(1); const int depth = static_cast<int>(treeA.treeDepth()), leafDim = static_cast<int>(LeafT::dim()), leafSize = static_cast<int>(LeafT::size()); // Coords that are in a different top-level branch than (0, 0, 0) const Coord pos(static_cast<int>(RootT::getChildDim())); treeA.setValueOff(pos, val); treeA.setValueOff(-pos, val); treeB.setValueOff(Coord(0), val); treeB.fill(CoordBBox(pos, pos.offsetBy(leafDim - 1)), val, /*active=*/true); treeB.setValueOn(-pos, val); // treeA treeB . // . // R R . // / \ /|\ . // I I I I I . // / \ / | \ . // I I I I I . // / \ / | on x SIZE . // L L L L . // off off on off . EXPECT_EQ(0, int(treeA.activeVoxelCount())); EXPECT_EQ(leafSize + 1, int(treeB.activeVoxelCount())); EXPECT_EQ(2, int(treeA.leafCount())); EXPECT_EQ(2, int(treeB.leafCount())); EXPECT_EQ(2*(depth-2)+1, int(treeA.nonLeafCount())); // 2 branches (II+II+R) EXPECT_EQ(3*(depth-2)+1, int(treeB.nonLeafCount())); // 3 branches (II+II+II+R) treeA.merge(treeB, policy); // MERGE_NODES MERGE_ACTIVE_STATES MERGE_ACTIVE_STATES_AND_NODES . // . // R R R . // /|\ /|\ /|\ . // I I I I I I I I I . // / | \ / | \ / | \ . // I I I I I I I I I . // / | \ / | on x SIZE / | \ . // L L L L L L L L . // off off off on off on off on x SIZE . switch (policy) { case MERGE_NODES: EXPECT_EQ(0, int(treeA.activeVoxelCount())); EXPECT_EQ(2 + 1, int(treeA.leafCount())); // 1 leaf node stolen from B EXPECT_EQ(3*(depth-2)+1, int(treeA.nonLeafCount())); // 3 branches (II+II+II+R) break; case MERGE_ACTIVE_STATES: EXPECT_EQ(2, int(treeA.leafCount())); // 1 leaf stolen, 1 replaced with tile EXPECT_EQ(3*(depth-2)+1, int(treeA.nonLeafCount())); // 3 branches (II+II+II+R) EXPECT_EQ(leafSize + 1, int(treeA.activeVoxelCount())); break; case MERGE_ACTIVE_STATES_AND_NODES: EXPECT_EQ(2 + 1, int(treeA.leafCount())); // 1 leaf node stolen from B EXPECT_EQ(3*(depth-2)+1, int(treeA.nonLeafCount())); // 3 branches (II+II+II+R) EXPECT_EQ(leafSize + 1, int(treeA.activeVoxelCount())); break; } EXPECT_TRUE(treeB.empty()); } TEST_F(TestTree, testVoxelizeActiveTiles) { using openvdb::CoordBBox; using openvdb::Coord; // Use a small custom tree so we don't run out of memory when // tiles are converted to dense leafs :) using MyTree = openvdb::tree::Tree4<float,2, 2, 2>::Type; float background=5.0f; const Coord xyz[] = {Coord(-1,-2,-3),Coord( 1, 2, 3)}; //check two leaf nodes and two tiles at each level 1, 2 and 3 const int tile_size[4]={0, 1<<2, 1<<(2*2), 1<<(3*2)}; // serial version for (int level=0; level<=3; ++level) { MyTree tree(background); EXPECT_EQ(-1,tree.getValueDepth(xyz[0])); EXPECT_EQ(-1,tree.getValueDepth(xyz[1])); if (level==0) { tree.setValue(xyz[0], 1.0f); tree.setValue(xyz[1], 1.0f); } else { const int n = tile_size[level]; tree.fill(CoordBBox::createCube(Coord(-n,-n,-n), n), 1.0f, true); tree.fill(CoordBBox::createCube(Coord( 0, 0, 0), n), 1.0f, true); } EXPECT_EQ(3-level,tree.getValueDepth(xyz[0])); EXPECT_EQ(3-level,tree.getValueDepth(xyz[1])); tree.voxelizeActiveTiles(false); EXPECT_EQ(3 ,tree.getValueDepth(xyz[0])); EXPECT_EQ(3 ,tree.getValueDepth(xyz[1])); } // multi-threaded version for (int level=0; level<=3; ++level) { MyTree tree(background); EXPECT_EQ(-1,tree.getValueDepth(xyz[0])); EXPECT_EQ(-1,tree.getValueDepth(xyz[1])); if (level==0) { tree.setValue(xyz[0], 1.0f); tree.setValue(xyz[1], 1.0f); } else { const int n = tile_size[level]; tree.fill(CoordBBox::createCube(Coord(-n,-n,-n), n), 1.0f, true); tree.fill(CoordBBox::createCube(Coord( 0, 0, 0), n), 1.0f, true); } EXPECT_EQ(3-level,tree.getValueDepth(xyz[0])); EXPECT_EQ(3-level,tree.getValueDepth(xyz[1])); tree.voxelizeActiveTiles(true); EXPECT_EQ(3 ,tree.getValueDepth(xyz[0])); EXPECT_EQ(3 ,tree.getValueDepth(xyz[1])); } #if 0 const CoordBBox bbox(openvdb::Coord(-30,-50,-30), openvdb::Coord(530,610,623)); {// benchmark serial MyTree tree(background); tree.sparseFill( bbox, 1.0f, /*state*/true); openvdb::util::CpuTimer timer("\nserial voxelizeActiveTiles"); tree.voxelizeActiveTiles(/*threaded*/false); timer.stop(); } {// benchmark parallel MyTree tree(background); tree.sparseFill( bbox, 1.0f, /*state*/true); openvdb::util::CpuTimer timer("\nparallel voxelizeActiveTiles"); tree.voxelizeActiveTiles(/*threaded*/true); timer.stop(); } #endif } TEST_F(TestTree, testTopologyUnion) { {//super simple test with only two active values const ValueType background=0.0f; openvdb::FloatTree tree0(background), tree1(background); tree0.setValue(openvdb::Coord( 500, 300, 200), 1.0f); tree1.setValue(openvdb::Coord( 8, 11, 11), 2.0f); openvdb::FloatTree tree2(tree1); tree1.topologyUnion(tree0); for (openvdb::FloatTree::ValueOnCIter iter = tree0.cbeginValueOn(); iter; ++iter) { EXPECT_TRUE(tree1.isValueOn(iter.getCoord())); } for (openvdb::FloatTree::ValueOnCIter iter = tree2.cbeginValueOn(); iter; ++iter) { EXPECT_TRUE(tree1.isValueOn(iter.getCoord())); } for (openvdb::FloatTree::ValueOnCIter iter = tree1.cbeginValueOn(); iter; ++iter) { ASSERT_DOUBLES_EXACTLY_EQUAL(*iter,tree2.getValue(iter.getCoord())); } } {// test using setValue ValueType background=5.0f; openvdb::FloatTree tree0(background), tree1(background), tree2(background); EXPECT_TRUE(tree2.empty()); // tree0 = tree1.topologyUnion(tree2) tree0.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree0.setValue(openvdb::Coord(-5, 10, 20),0.1f); tree0.setValue(openvdb::Coord( 5,-10, 20),0.2f); tree0.setValue(openvdb::Coord( 5, 10,-20),0.3f); tree1.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree1.setValue(openvdb::Coord(-5, 10, 20),0.1f); tree1.setValue(openvdb::Coord( 5,-10, 20),0.2f); tree1.setValue(openvdb::Coord( 5, 10,-20),0.3f); tree0.setValue(openvdb::Coord(-5,-10, 20),background); tree0.setValue(openvdb::Coord(-5, 10,-20),background); tree0.setValue(openvdb::Coord( 5,-10,-20),background); tree0.setValue(openvdb::Coord(-5,-10,-20),background); tree0.setValue(openvdb::Coord(-5000, 2000,-3000),background); tree0.setValue(openvdb::Coord( 5000,-2000,-3000),background); tree0.setValue(openvdb::Coord(-5000,-2000, 3000),background); tree2.setValue(openvdb::Coord(-5,-10, 20),0.4f); tree2.setValue(openvdb::Coord(-5, 10,-20),0.5f); tree2.setValue(openvdb::Coord( 5,-10,-20),0.6f); tree2.setValue(openvdb::Coord(-5,-10,-20),0.7f); tree2.setValue(openvdb::Coord(-5000, 2000,-3000),4.5678f); tree2.setValue(openvdb::Coord( 5000,-2000,-3000),4.5678f); tree2.setValue(openvdb::Coord(-5000,-2000, 3000),4.5678f); // tree3 has the same topology as tree2 but a different value type const openvdb::Vec3f background2(1.0f,3.4f,6.0f), vec_val(3.1f,5.3f,-9.5f); openvdb::Vec3fTree tree3(background2); for (openvdb::FloatTree::ValueOnCIter iter2 = tree2.cbeginValueOn(); iter2; ++iter2) { tree3.setValue(iter2.getCoord(), vec_val); } EXPECT_TRUE(tree0.leafCount()!=tree1.leafCount()); EXPECT_TRUE(tree0.leafCount()!=tree2.leafCount()); EXPECT_TRUE(tree0.leafCount()!=tree3.leafCount()); EXPECT_TRUE(!tree2.empty()); EXPECT_TRUE(!tree3.empty()); openvdb::FloatTree tree1_copy(tree1); //tree1.topologyUnion(tree2);//should make tree1 = tree0 tree1.topologyUnion(tree3);//should make tree1 = tree0 EXPECT_TRUE(tree0.leafCount()==tree1.leafCount()); EXPECT_TRUE(tree0.nonLeafCount()==tree1.nonLeafCount()); EXPECT_TRUE(tree0.activeLeafVoxelCount()==tree1.activeLeafVoxelCount()); EXPECT_TRUE(tree0.inactiveLeafVoxelCount()==tree1.inactiveLeafVoxelCount()); EXPECT_TRUE(tree0.activeVoxelCount()==tree1.activeVoxelCount()); EXPECT_TRUE(tree0.inactiveVoxelCount()==tree1.inactiveVoxelCount()); EXPECT_TRUE(tree1.hasSameTopology(tree0)); EXPECT_TRUE(tree0.hasSameTopology(tree1)); for (openvdb::FloatTree::ValueOnCIter iter2 = tree2.cbeginValueOn(); iter2; ++iter2) { EXPECT_TRUE(tree1.isValueOn(iter2.getCoord())); } for (openvdb::FloatTree::ValueOnCIter iter1 = tree1.cbeginValueOn(); iter1; ++iter1) { EXPECT_TRUE(tree0.isValueOn(iter1.getCoord())); } for (openvdb::FloatTree::ValueOnCIter iter0 = tree0.cbeginValueOn(); iter0; ++iter0) { EXPECT_TRUE(tree1.isValueOn(iter0.getCoord())); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter0,tree1.getValue(iter0.getCoord())); } for (openvdb::FloatTree::ValueOnCIter iter = tree1_copy.cbeginValueOn(); iter; ++iter) { EXPECT_TRUE(tree1.isValueOn(iter.getCoord())); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter,tree1.getValue(iter.getCoord())); } for (openvdb::FloatTree::ValueOnCIter iter = tree1.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree3.isValueOn(p) || tree1_copy.isValueOn(p)); } } { ValueType background=5.0f; openvdb::FloatTree tree0(background), tree1(background), tree2(background); EXPECT_TRUE(tree2.empty()); // tree0 = tree1.topologyUnion(tree2) tree0.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree0.setValue(openvdb::Coord(-5, 10, 20),0.1f); tree0.setValue(openvdb::Coord( 5,-10, 20),0.2f); tree0.setValue(openvdb::Coord( 5, 10,-20),0.3f); tree1.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree1.setValue(openvdb::Coord(-5, 10, 20),0.1f); tree1.setValue(openvdb::Coord( 5,-10, 20),0.2f); tree1.setValue(openvdb::Coord( 5, 10,-20),0.3f); tree0.setValue(openvdb::Coord(-5,-10, 20),background); tree0.setValue(openvdb::Coord(-5, 10,-20),background); tree0.setValue(openvdb::Coord( 5,-10,-20),background); tree0.setValue(openvdb::Coord(-5,-10,-20),background); tree0.setValue(openvdb::Coord(-5000, 2000,-3000),background); tree0.setValue(openvdb::Coord( 5000,-2000,-3000),background); tree0.setValue(openvdb::Coord(-5000,-2000, 3000),background); tree2.setValue(openvdb::Coord(-5,-10, 20),0.4f); tree2.setValue(openvdb::Coord(-5, 10,-20),0.5f); tree2.setValue(openvdb::Coord( 5,-10,-20),0.6f); tree2.setValue(openvdb::Coord(-5,-10,-20),0.7f); tree2.setValue(openvdb::Coord(-5000, 2000,-3000),4.5678f); tree2.setValue(openvdb::Coord( 5000,-2000,-3000),4.5678f); tree2.setValue(openvdb::Coord(-5000,-2000, 3000),4.5678f); // tree3 has the same topology as tree2 but a different value type const openvdb::Vec3f background2(1.0f,3.4f,6.0f), vec_val(3.1f,5.3f,-9.5f); openvdb::Vec3fTree tree3(background2); for (openvdb::FloatTree::ValueOnCIter iter2 = tree2.cbeginValueOn(); iter2; ++iter2) { tree3.setValue(iter2.getCoord(), vec_val); } openvdb::FloatTree tree4(tree1);//tree4 = tree1 openvdb::FloatTree tree5(tree1);//tree5 = tree1 tree1.topologyUnion(tree3);//should make tree1 = tree0 EXPECT_TRUE(tree1.hasSameTopology(tree0)); for (openvdb::Vec3fTree::ValueOnCIter iter3 = tree3.cbeginValueOn(); iter3; ++iter3) { tree4.setValueOn(iter3.getCoord()); const openvdb::Coord p = iter3.getCoord(); ASSERT_DOUBLES_EXACTLY_EQUAL(tree1.getValue(p),tree5.getValue(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(tree4.getValue(p),tree5.getValue(p)); } EXPECT_TRUE(tree4.hasSameTopology(tree0)); for (openvdb::FloatTree::ValueOnCIter iter4 = tree4.cbeginValueOn(); iter4; ++iter4) { const openvdb::Coord p = iter4.getCoord(); ASSERT_DOUBLES_EXACTLY_EQUAL(tree0.getValue(p),tree5.getValue(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(tree1.getValue(p),tree5.getValue(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(tree4.getValue(p),tree5.getValue(p)); } for (openvdb::FloatTree::ValueOnCIter iter = tree1.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree3.isValueOn(p) || tree4.isValueOn(p)); } } {// test overlapping spheres const float background=5.0f, R0=10.0f, R1=5.6f; const openvdb::Vec3f C0(35.0f, 30.0f, 40.0f), C1(22.3f, 30.5f, 31.0f); const openvdb::Coord dim(32, 32, 32); openvdb::FloatGrid grid0(background); openvdb::FloatGrid grid1(background); unittest_util::makeSphere<openvdb::FloatGrid>(dim, C0, R0, grid0, 1.0f, unittest_util::SPHERE_SPARSE_NARROW_BAND); unittest_util::makeSphere<openvdb::FloatGrid>(dim, C1, R1, grid1, 1.0f, unittest_util::SPHERE_SPARSE_NARROW_BAND); openvdb::FloatTree& tree0 = grid0.tree(); openvdb::FloatTree& tree1 = grid1.tree(); openvdb::FloatTree tree0_copy(tree0); tree0.topologyUnion(tree1); const openvdb::Index64 n0 = tree0_copy.activeVoxelCount(); const openvdb::Index64 n = tree0.activeVoxelCount(); const openvdb::Index64 n1 = tree1.activeVoxelCount(); //fprintf(stderr,"Union of spheres: n=%i, n0=%i n1=%i n0+n1=%i\n",n,n0,n1, n0+n1); EXPECT_TRUE( n > n0 ); EXPECT_TRUE( n > n1 ); EXPECT_TRUE( n < n0 + n1 ); for (openvdb::FloatTree::ValueOnCIter iter = tree1.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree0.isValueOn(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(tree0.getValue(p), tree0_copy.getValue(p)); } for (openvdb::FloatTree::ValueOnCIter iter = tree0_copy.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree0.isValueOn(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(tree0.getValue(p), *iter); } } {// test union of a leaf and a tile if (openvdb::FloatTree::DEPTH > 2) { const int leafLevel = openvdb::FloatTree::DEPTH - 1; const int tileLevel = leafLevel - 1; const openvdb::Coord xyz(0); openvdb::FloatTree tree0; tree0.addTile(tileLevel, xyz, /*value=*/0, /*activeState=*/true); EXPECT_TRUE(tree0.isValueOn(xyz)); openvdb::FloatTree tree1; tree1.touchLeaf(xyz)->setValuesOn(); EXPECT_TRUE(tree1.isValueOn(xyz)); tree0.topologyUnion(tree1); EXPECT_TRUE(tree0.isValueOn(xyz)); EXPECT_EQ(tree0.getValueDepth(xyz), leafLevel); } } }// testTopologyUnion TEST_F(TestTree, testTopologyIntersection) { {//no overlapping voxels const ValueType background=0.0f; openvdb::FloatTree tree0(background), tree1(background); tree0.setValue(openvdb::Coord( 500, 300, 200), 1.0f); tree1.setValue(openvdb::Coord( 8, 11, 11), 2.0f); EXPECT_EQ(openvdb::Index64(1), tree0.activeVoxelCount()); EXPECT_EQ(openvdb::Index64(1), tree1.activeVoxelCount()); tree1.topologyIntersection(tree0); EXPECT_EQ(tree1.activeVoxelCount(), openvdb::Index64(0)); EXPECT_TRUE(!tree1.empty()); openvdb::tools::pruneInactive(tree1); EXPECT_TRUE(tree1.empty()); } {//two overlapping voxels const ValueType background=0.0f; openvdb::FloatTree tree0(background), tree1(background); tree0.setValue(openvdb::Coord( 500, 300, 200), 1.0f); tree1.setValue(openvdb::Coord( 8, 11, 11), 2.0f); tree1.setValue(openvdb::Coord( 500, 300, 200), 1.0f); EXPECT_EQ( openvdb::Index64(1), tree0.activeVoxelCount() ); EXPECT_EQ( openvdb::Index64(2), tree1.activeVoxelCount() ); tree1.topologyIntersection(tree0); EXPECT_EQ( openvdb::Index64(1), tree1.activeVoxelCount() ); EXPECT_TRUE(!tree1.empty()); openvdb::tools::pruneInactive(tree1); EXPECT_TRUE(!tree1.empty()); } {//4 overlapping voxels const ValueType background=0.0f; openvdb::FloatTree tree0(background), tree1(background); tree0.setValue(openvdb::Coord( 500, 300, 200), 1.0f); tree0.setValue(openvdb::Coord( 400, 30, 20), 2.0f); tree0.setValue(openvdb::Coord( 8, 11, 11), 3.0f); EXPECT_EQ(openvdb::Index64(3), tree0.activeVoxelCount()); EXPECT_EQ(openvdb::Index32(3), tree0.leafCount() ); tree1.setValue(openvdb::Coord( 500, 301, 200), 4.0f); tree1.setValue(openvdb::Coord( 400, 30, 20), 5.0f); tree1.setValue(openvdb::Coord( 8, 11, 11), 6.0f); EXPECT_EQ(openvdb::Index64(3), tree1.activeVoxelCount()); EXPECT_EQ(openvdb::Index32(3), tree1.leafCount() ); tree1.topologyIntersection(tree0); EXPECT_EQ( openvdb::Index32(3), tree1.leafCount() ); EXPECT_EQ( openvdb::Index64(2), tree1.activeVoxelCount() ); EXPECT_TRUE(!tree1.empty()); openvdb::tools::pruneInactive(tree1); EXPECT_TRUE(!tree1.empty()); EXPECT_EQ( openvdb::Index32(2), tree1.leafCount() ); EXPECT_EQ( openvdb::Index64(2), tree1.activeVoxelCount() ); } {//passive tile const ValueType background=0.0f; const openvdb::Index64 dim = openvdb::FloatTree::RootNodeType::ChildNodeType::DIM; openvdb::FloatTree tree0(background), tree1(background); tree0.fill(openvdb::CoordBBox(openvdb::Coord(0),openvdb::Coord(dim-1)),2.0f, false); EXPECT_EQ(openvdb::Index64(0), tree0.activeVoxelCount()); EXPECT_EQ(openvdb::Index32(0), tree0.leafCount() ); tree1.setValue(openvdb::Coord( 500, 301, 200), 4.0f); tree1.setValue(openvdb::Coord( 400, 30, 20), 5.0f); tree1.setValue(openvdb::Coord( dim, 11, 11), 6.0f); EXPECT_EQ(openvdb::Index32(3), tree1.leafCount() ); EXPECT_EQ(openvdb::Index64(3), tree1.activeVoxelCount()); tree1.topologyIntersection(tree0); EXPECT_EQ( openvdb::Index32(0), tree1.leafCount() ); EXPECT_EQ( openvdb::Index64(0), tree1.activeVoxelCount() ); EXPECT_TRUE(tree1.empty()); } {//active tile const ValueType background=0.0f; const openvdb::Index64 dim = openvdb::FloatTree::RootNodeType::ChildNodeType::DIM; openvdb::FloatTree tree0(background), tree1(background); tree1.fill(openvdb::CoordBBox(openvdb::Coord(0),openvdb::Coord(dim-1)),2.0f, true); EXPECT_EQ(dim*dim*dim, tree1.activeVoxelCount()); EXPECT_EQ(openvdb::Index32(0), tree1.leafCount() ); tree0.setValue(openvdb::Coord( 500, 301, 200), 4.0f); tree0.setValue(openvdb::Coord( 400, 30, 20), 5.0f); tree0.setValue(openvdb::Coord( dim, 11, 11), 6.0f); EXPECT_EQ(openvdb::Index64(3), tree0.activeVoxelCount()); EXPECT_EQ(openvdb::Index32(3), tree0.leafCount() ); tree1.topologyIntersection(tree0); EXPECT_EQ( openvdb::Index32(2), tree1.leafCount() ); EXPECT_EQ( openvdb::Index64(2), tree1.activeVoxelCount() ); EXPECT_TRUE(!tree1.empty()); openvdb::tools::pruneInactive(tree1); EXPECT_TRUE(!tree1.empty()); } {// use tree with different voxel type ValueType background=5.0f; openvdb::FloatTree tree0(background), tree1(background), tree2(background); EXPECT_TRUE(tree2.empty()); // tree0 = tree1.topologyIntersection(tree2) tree0.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree0.setValue(openvdb::Coord(-5, 10,-20),0.1f); tree0.setValue(openvdb::Coord( 5,-10,-20),0.2f); tree0.setValue(openvdb::Coord(-5,-10,-20),0.3f); tree1.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree1.setValue(openvdb::Coord(-5, 10,-20),0.1f); tree1.setValue(openvdb::Coord( 5,-10,-20),0.2f); tree1.setValue(openvdb::Coord(-5,-10,-20),0.3f); tree2.setValue(openvdb::Coord( 5, 10, 20),0.4f); tree2.setValue(openvdb::Coord(-5, 10,-20),0.5f); tree2.setValue(openvdb::Coord( 5,-10,-20),0.6f); tree2.setValue(openvdb::Coord(-5,-10,-20),0.7f); tree2.setValue(openvdb::Coord(-5000, 2000,-3000),4.5678f); tree2.setValue(openvdb::Coord( 5000,-2000,-3000),4.5678f); tree2.setValue(openvdb::Coord(-5000,-2000, 3000),4.5678f); openvdb::FloatTree tree1_copy(tree1); // tree3 has the same topology as tree2 but a different value type const openvdb::Vec3f background2(1.0f,3.4f,6.0f), vec_val(3.1f,5.3f,-9.5f); openvdb::Vec3fTree tree3(background2); for (openvdb::FloatTree::ValueOnCIter iter = tree2.cbeginValueOn(); iter; ++iter) { tree3.setValue(iter.getCoord(), vec_val); } EXPECT_EQ(openvdb::Index32(4), tree0.leafCount()); EXPECT_EQ(openvdb::Index32(4), tree1.leafCount()); EXPECT_EQ(openvdb::Index32(7), tree2.leafCount()); EXPECT_EQ(openvdb::Index32(7), tree3.leafCount()); //tree1.topologyInterection(tree2);//should make tree1 = tree0 tree1.topologyIntersection(tree3);//should make tree1 = tree0 EXPECT_TRUE(tree0.leafCount()==tree1.leafCount()); EXPECT_TRUE(tree0.nonLeafCount()==tree1.nonLeafCount()); EXPECT_TRUE(tree0.activeLeafVoxelCount()==tree1.activeLeafVoxelCount()); EXPECT_TRUE(tree0.inactiveLeafVoxelCount()==tree1.inactiveLeafVoxelCount()); EXPECT_TRUE(tree0.activeVoxelCount()==tree1.activeVoxelCount()); EXPECT_TRUE(tree0.inactiveVoxelCount()==tree1.inactiveVoxelCount()); EXPECT_TRUE(tree1.hasSameTopology(tree0)); EXPECT_TRUE(tree0.hasSameTopology(tree1)); for (openvdb::FloatTree::ValueOnCIter iter = tree0.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree1.isValueOn(p)); EXPECT_TRUE(tree2.isValueOn(p)); EXPECT_TRUE(tree3.isValueOn(p)); EXPECT_TRUE(tree1_copy.isValueOn(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter,tree1.getValue(p)); } for (openvdb::FloatTree::ValueOnCIter iter = tree1_copy.cbeginValueOn(); iter; ++iter) { EXPECT_TRUE(tree1.isValueOn(iter.getCoord())); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter,tree1.getValue(iter.getCoord())); } for (openvdb::FloatTree::ValueOnCIter iter = tree1.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree0.isValueOn(p)); EXPECT_TRUE(tree2.isValueOn(p)); EXPECT_TRUE(tree3.isValueOn(p)); EXPECT_TRUE(tree1_copy.isValueOn(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter,tree0.getValue(p)); } } {// test overlapping spheres const float background=5.0f, R0=10.0f, R1=5.6f; const openvdb::Vec3f C0(35.0f, 30.0f, 40.0f), C1(22.3f, 30.5f, 31.0f); const openvdb::Coord dim(32, 32, 32); openvdb::FloatGrid grid0(background); openvdb::FloatGrid grid1(background); unittest_util::makeSphere<openvdb::FloatGrid>(dim, C0, R0, grid0, 1.0f, unittest_util::SPHERE_SPARSE_NARROW_BAND); unittest_util::makeSphere<openvdb::FloatGrid>(dim, C1, R1, grid1, 1.0f, unittest_util::SPHERE_SPARSE_NARROW_BAND); openvdb::FloatTree& tree0 = grid0.tree(); openvdb::FloatTree& tree1 = grid1.tree(); openvdb::FloatTree tree0_copy(tree0); tree0.topologyIntersection(tree1); const openvdb::Index64 n0 = tree0_copy.activeVoxelCount(); const openvdb::Index64 n = tree0.activeVoxelCount(); const openvdb::Index64 n1 = tree1.activeVoxelCount(); //fprintf(stderr,"Intersection of spheres: n=%i, n0=%i n1=%i n0+n1=%i\n",n,n0,n1, n0+n1); EXPECT_TRUE( n < n0 ); EXPECT_TRUE( n < n1 ); for (openvdb::FloatTree::ValueOnCIter iter = tree0.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree1.isValueOn(p)); EXPECT_TRUE(tree0_copy.isValueOn(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter, tree0_copy.getValue(p)); } } {// Test based on boolean grids openvdb::CoordBBox bigRegion(openvdb::Coord(-9), openvdb::Coord(10)); openvdb::CoordBBox smallRegion(openvdb::Coord( 1), openvdb::Coord(10)); openvdb::BoolGrid::Ptr gridBig = openvdb::BoolGrid::create(false); gridBig->fill(bigRegion, true/*value*/, true /*make active*/); EXPECT_EQ(8, int(gridBig->tree().activeTileCount())); EXPECT_EQ((20 * 20 * 20), int(gridBig->activeVoxelCount())); openvdb::BoolGrid::Ptr gridSmall = openvdb::BoolGrid::create(false); gridSmall->fill(smallRegion, true/*value*/, true /*make active*/); EXPECT_EQ(0, int(gridSmall->tree().activeTileCount())); EXPECT_EQ((10 * 10 * 10), int(gridSmall->activeVoxelCount())); // change the topology of gridBig by intersecting with gridSmall gridBig->topologyIntersection(*gridSmall); // Should be unchanged EXPECT_EQ(0, int(gridSmall->tree().activeTileCount())); EXPECT_EQ((10 * 10 * 10), int(gridSmall->activeVoxelCount())); // In this case the interesection should be exactly "small" EXPECT_EQ(0, int(gridBig->tree().activeTileCount())); EXPECT_EQ((10 * 10 * 10), int(gridBig->activeVoxelCount())); } }// testTopologyIntersection TEST_F(TestTree, testTopologyDifference) { {//no overlapping voxels const ValueType background=0.0f; openvdb::FloatTree tree0(background), tree1(background); tree0.setValue(openvdb::Coord( 500, 300, 200), 1.0f); tree1.setValue(openvdb::Coord( 8, 11, 11), 2.0f); EXPECT_EQ(openvdb::Index64(1), tree0.activeVoxelCount()); EXPECT_EQ(openvdb::Index64(1), tree1.activeVoxelCount()); tree1.topologyDifference(tree0); EXPECT_EQ(tree1.activeVoxelCount(), openvdb::Index64(1)); EXPECT_TRUE(!tree1.empty()); openvdb::tools::pruneInactive(tree1); EXPECT_TRUE(!tree1.empty()); } {//two overlapping voxels const ValueType background=0.0f; openvdb::FloatTree tree0(background), tree1(background); tree0.setValue(openvdb::Coord( 500, 300, 200), 1.0f); tree1.setValue(openvdb::Coord( 8, 11, 11), 2.0f); tree1.setValue(openvdb::Coord( 500, 300, 200), 1.0f); EXPECT_EQ( openvdb::Index64(1), tree0.activeVoxelCount() ); EXPECT_EQ( openvdb::Index64(2), tree1.activeVoxelCount() ); EXPECT_TRUE( tree0.isValueOn(openvdb::Coord( 500, 300, 200))); EXPECT_TRUE( tree1.isValueOn(openvdb::Coord( 500, 300, 200))); EXPECT_TRUE( tree1.isValueOn(openvdb::Coord( 8, 11, 11))); tree1.topologyDifference(tree0); EXPECT_EQ( openvdb::Index64(1), tree1.activeVoxelCount() ); EXPECT_TRUE( tree0.isValueOn(openvdb::Coord( 500, 300, 200))); EXPECT_TRUE(!tree1.isValueOn(openvdb::Coord( 500, 300, 200))); EXPECT_TRUE( tree1.isValueOn(openvdb::Coord( 8, 11, 11))); EXPECT_TRUE(!tree1.empty()); openvdb::tools::pruneInactive(tree1); EXPECT_TRUE(!tree1.empty()); } {//4 overlapping voxels const ValueType background=0.0f; openvdb::FloatTree tree0(background), tree1(background); tree0.setValue(openvdb::Coord( 500, 300, 200), 1.0f); tree0.setValue(openvdb::Coord( 400, 30, 20), 2.0f); tree0.setValue(openvdb::Coord( 8, 11, 11), 3.0f); EXPECT_EQ(openvdb::Index64(3), tree0.activeVoxelCount()); EXPECT_EQ(openvdb::Index32(3), tree0.leafCount() ); tree1.setValue(openvdb::Coord( 500, 301, 200), 4.0f); tree1.setValue(openvdb::Coord( 400, 30, 20), 5.0f); tree1.setValue(openvdb::Coord( 8, 11, 11), 6.0f); EXPECT_EQ(openvdb::Index64(3), tree1.activeVoxelCount()); EXPECT_EQ(openvdb::Index32(3), tree1.leafCount() ); tree1.topologyDifference(tree0); EXPECT_EQ( openvdb::Index32(3), tree1.leafCount() ); EXPECT_EQ( openvdb::Index64(1), tree1.activeVoxelCount() ); EXPECT_TRUE(!tree1.empty()); openvdb::tools::pruneInactive(tree1); EXPECT_TRUE(!tree1.empty()); EXPECT_EQ( openvdb::Index32(1), tree1.leafCount() ); EXPECT_EQ( openvdb::Index64(1), tree1.activeVoxelCount() ); } {//passive tile const ValueType background=0.0f; const openvdb::Index64 dim = openvdb::FloatTree::RootNodeType::ChildNodeType::DIM; openvdb::FloatTree tree0(background), tree1(background); tree0.fill(openvdb::CoordBBox(openvdb::Coord(0),openvdb::Coord(dim-1)),2.0f, false); EXPECT_EQ(openvdb::Index64(0), tree0.activeVoxelCount()); EXPECT_TRUE(!tree0.hasActiveTiles()); EXPECT_EQ(openvdb::Index64(0), tree0.root().onTileCount()); EXPECT_EQ(openvdb::Index32(0), tree0.leafCount() ); tree1.setValue(openvdb::Coord( 500, 301, 200), 4.0f); tree1.setValue(openvdb::Coord( 400, 30, 20), 5.0f); tree1.setValue(openvdb::Coord( dim, 11, 11), 6.0f); EXPECT_EQ(openvdb::Index64(3), tree1.activeVoxelCount()); EXPECT_TRUE(!tree1.hasActiveTiles()); EXPECT_EQ(openvdb::Index32(3), tree1.leafCount() ); tree1.topologyDifference(tree0); EXPECT_EQ( openvdb::Index32(3), tree1.leafCount() ); EXPECT_EQ( openvdb::Index64(3), tree1.activeVoxelCount() ); EXPECT_TRUE(!tree1.empty()); openvdb::tools::pruneInactive(tree1); EXPECT_EQ( openvdb::Index32(3), tree1.leafCount() ); EXPECT_EQ( openvdb::Index64(3), tree1.activeVoxelCount() ); EXPECT_TRUE(!tree1.empty()); } {//active tile const ValueType background=0.0f; const openvdb::Index64 dim = openvdb::FloatTree::RootNodeType::ChildNodeType::DIM; openvdb::FloatTree tree0(background), tree1(background); tree1.fill(openvdb::CoordBBox(openvdb::Coord(0),openvdb::Coord(dim-1)),2.0f, true); EXPECT_EQ(dim*dim*dim, tree1.activeVoxelCount()); EXPECT_TRUE(tree1.hasActiveTiles()); EXPECT_EQ(openvdb::Index64(1), tree1.root().onTileCount()); EXPECT_EQ(openvdb::Index32(0), tree0.leafCount() ); tree0.setValue(openvdb::Coord( 500, 301, 200), 4.0f); tree0.setValue(openvdb::Coord( 400, 30, 20), 5.0f); tree0.setValue(openvdb::Coord( int(dim), 11, 11), 6.0f); EXPECT_TRUE(!tree0.hasActiveTiles()); EXPECT_EQ(openvdb::Index64(3), tree0.activeVoxelCount()); EXPECT_EQ(openvdb::Index32(3), tree0.leafCount() ); EXPECT_TRUE( tree0.isValueOn(openvdb::Coord( int(dim), 11, 11))); EXPECT_TRUE(!tree1.isValueOn(openvdb::Coord( int(dim), 11, 11))); tree1.topologyDifference(tree0); EXPECT_TRUE(tree1.root().onTileCount() > 1); EXPECT_EQ( dim*dim*dim - 2, tree1.activeVoxelCount() ); EXPECT_TRUE(!tree1.empty()); openvdb::tools::pruneInactive(tree1); EXPECT_EQ( dim*dim*dim - 2, tree1.activeVoxelCount() ); EXPECT_TRUE(!tree1.empty()); } {//active tile const ValueType background=0.0f; const openvdb::Index64 dim = openvdb::FloatTree::RootNodeType::ChildNodeType::DIM; openvdb::FloatTree tree0(background), tree1(background); tree1.fill(openvdb::CoordBBox(openvdb::Coord(0),openvdb::Coord(dim-1)),2.0f, true); EXPECT_EQ(dim*dim*dim, tree1.activeVoxelCount()); EXPECT_TRUE(tree1.hasActiveTiles()); EXPECT_EQ(openvdb::Index64(1), tree1.root().onTileCount()); EXPECT_EQ(openvdb::Index32(0), tree0.leafCount() ); tree0.setValue(openvdb::Coord( 500, 301, 200), 4.0f); tree0.setValue(openvdb::Coord( 400, 30, 20), 5.0f); tree0.setValue(openvdb::Coord( dim, 11, 11), 6.0f); EXPECT_TRUE(!tree0.hasActiveTiles()); EXPECT_EQ(openvdb::Index64(3), tree0.activeVoxelCount()); EXPECT_EQ(openvdb::Index32(3), tree0.leafCount() ); tree0.topologyDifference(tree1); EXPECT_EQ( openvdb::Index32(1), tree0.leafCount() ); EXPECT_EQ( openvdb::Index64(1), tree0.activeVoxelCount() ); EXPECT_TRUE(!tree0.empty()); openvdb::tools::pruneInactive(tree0); EXPECT_EQ( openvdb::Index32(1), tree0.leafCount() ); EXPECT_EQ( openvdb::Index64(1), tree0.activeVoxelCount() ); EXPECT_TRUE(!tree1.empty()); } {// use tree with different voxel type ValueType background=5.0f; openvdb::FloatTree tree0(background), tree1(background), tree2(background); EXPECT_TRUE(tree2.empty()); // tree0 = tree1.topologyIntersection(tree2) tree0.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree0.setValue(openvdb::Coord(-5, 10,-20),0.1f); tree0.setValue(openvdb::Coord( 5,-10,-20),0.2f); tree0.setValue(openvdb::Coord(-5,-10,-20),0.3f); tree1.setValue(openvdb::Coord( 5, 10, 20),0.0f); tree1.setValue(openvdb::Coord(-5, 10,-20),0.1f); tree1.setValue(openvdb::Coord( 5,-10,-20),0.2f); tree1.setValue(openvdb::Coord(-5,-10,-20),0.3f); tree2.setValue(openvdb::Coord( 5, 10, 20),0.4f); tree2.setValue(openvdb::Coord(-5, 10,-20),0.5f); tree2.setValue(openvdb::Coord( 5,-10,-20),0.6f); tree2.setValue(openvdb::Coord(-5,-10,-20),0.7f); tree2.setValue(openvdb::Coord(-5000, 2000,-3000),4.5678f); tree2.setValue(openvdb::Coord( 5000,-2000,-3000),4.5678f); tree2.setValue(openvdb::Coord(-5000,-2000, 3000),4.5678f); openvdb::FloatTree tree1_copy(tree1); // tree3 has the same topology as tree2 but a different value type const openvdb::Vec3f background2(1.0f,3.4f,6.0f), vec_val(3.1f,5.3f,-9.5f); openvdb::Vec3fTree tree3(background2); for (openvdb::FloatTree::ValueOnCIter iter = tree2.cbeginValueOn(); iter; ++iter) { tree3.setValue(iter.getCoord(), vec_val); } EXPECT_EQ(openvdb::Index32(4), tree0.leafCount()); EXPECT_EQ(openvdb::Index32(4), tree1.leafCount()); EXPECT_EQ(openvdb::Index32(7), tree2.leafCount()); EXPECT_EQ(openvdb::Index32(7), tree3.leafCount()); //tree1.topologyInterection(tree2);//should make tree1 = tree0 tree1.topologyIntersection(tree3);//should make tree1 = tree0 EXPECT_TRUE(tree0.leafCount()==tree1.leafCount()); EXPECT_TRUE(tree0.nonLeafCount()==tree1.nonLeafCount()); EXPECT_TRUE(tree0.activeLeafVoxelCount()==tree1.activeLeafVoxelCount()); EXPECT_TRUE(tree0.inactiveLeafVoxelCount()==tree1.inactiveLeafVoxelCount()); EXPECT_TRUE(tree0.activeVoxelCount()==tree1.activeVoxelCount()); EXPECT_TRUE(tree0.inactiveVoxelCount()==tree1.inactiveVoxelCount()); EXPECT_TRUE(tree1.hasSameTopology(tree0)); EXPECT_TRUE(tree0.hasSameTopology(tree1)); for (openvdb::FloatTree::ValueOnCIter iter = tree0.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree1.isValueOn(p)); EXPECT_TRUE(tree2.isValueOn(p)); EXPECT_TRUE(tree3.isValueOn(p)); EXPECT_TRUE(tree1_copy.isValueOn(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter,tree1.getValue(p)); } for (openvdb::FloatTree::ValueOnCIter iter = tree1_copy.cbeginValueOn(); iter; ++iter) { EXPECT_TRUE(tree1.isValueOn(iter.getCoord())); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter,tree1.getValue(iter.getCoord())); } for (openvdb::FloatTree::ValueOnCIter iter = tree1.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree0.isValueOn(p)); EXPECT_TRUE(tree2.isValueOn(p)); EXPECT_TRUE(tree3.isValueOn(p)); EXPECT_TRUE(tree1_copy.isValueOn(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter,tree0.getValue(p)); } } {// test overlapping spheres const float background=5.0f, R0=10.0f, R1=5.6f; const openvdb::Vec3f C0(35.0f, 30.0f, 40.0f), C1(22.3f, 30.5f, 31.0f); const openvdb::Coord dim(32, 32, 32); openvdb::FloatGrid grid0(background); openvdb::FloatGrid grid1(background); unittest_util::makeSphere<openvdb::FloatGrid>(dim, C0, R0, grid0, 1.0f, unittest_util::SPHERE_SPARSE_NARROW_BAND); unittest_util::makeSphere<openvdb::FloatGrid>(dim, C1, R1, grid1, 1.0f, unittest_util::SPHERE_SPARSE_NARROW_BAND); openvdb::FloatTree& tree0 = grid0.tree(); openvdb::FloatTree& tree1 = grid1.tree(); openvdb::FloatTree tree0_copy(tree0); tree0.topologyDifference(tree1); const openvdb::Index64 n0 = tree0_copy.activeVoxelCount(); const openvdb::Index64 n = tree0.activeVoxelCount(); EXPECT_TRUE( n < n0 ); for (openvdb::FloatTree::ValueOnCIter iter = tree0.cbeginValueOn(); iter; ++iter) { const openvdb::Coord p = iter.getCoord(); EXPECT_TRUE(tree1.isValueOff(p)); EXPECT_TRUE(tree0_copy.isValueOn(p)); ASSERT_DOUBLES_EXACTLY_EQUAL(*iter, tree0_copy.getValue(p)); } } } // testTopologyDifference //////////////////////////////////////// TEST_F(TestTree, testFill) { // Use a custom tree configuration to ensure we flood-fill at all levels! using LeafT = openvdb::tree::LeafNode<float,2>;//4^3 using InternalT = openvdb::tree::InternalNode<LeafT,2>;//4^3 using RootT = openvdb::tree::RootNode<InternalT>;// child nodes are 16^3 using TreeT = openvdb::tree::Tree<RootT>; const float outside = 2.0f, inside = -outside; const openvdb::CoordBBox bbox{openvdb::Coord{-3, -50, 30}, openvdb::Coord{13, 11, 323}}, otherBBox{openvdb::Coord{400, 401, 402}, openvdb::Coord{600}}; {// sparse fill openvdb::Grid<TreeT>::Ptr grid = openvdb::Grid<TreeT>::create(outside); TreeT& tree = grid->tree(); EXPECT_TRUE(!tree.hasActiveTiles()); EXPECT_EQ(openvdb::Index64(0), tree.activeVoxelCount()); for (openvdb::CoordBBox::Iterator<true> ijk(bbox); ijk; ++ijk) { ASSERT_DOUBLES_EXACTLY_EQUAL(outside, tree.getValue(*ijk)); } tree.sparseFill(bbox, inside, /*active=*/true); EXPECT_TRUE(tree.hasActiveTiles()); EXPECT_EQ(openvdb::Index64(bbox.volume()), tree.activeVoxelCount()); for (openvdb::CoordBBox::Iterator<true> ijk(bbox); ijk; ++ijk) { ASSERT_DOUBLES_EXACTLY_EQUAL(inside, tree.getValue(*ijk)); } } {// dense fill openvdb::Grid<TreeT>::Ptr grid = openvdb::Grid<TreeT>::create(outside); TreeT& tree = grid->tree(); EXPECT_TRUE(!tree.hasActiveTiles()); EXPECT_EQ(openvdb::Index64(0), tree.activeVoxelCount()); for (openvdb::CoordBBox::Iterator<true> ijk(bbox); ijk; ++ijk) { ASSERT_DOUBLES_EXACTLY_EQUAL(outside, tree.getValue(*ijk)); } // Add some active tiles. tree.sparseFill(otherBBox, inside, /*active=*/true); EXPECT_TRUE(tree.hasActiveTiles()); EXPECT_EQ(otherBBox.volume(), tree.activeVoxelCount()); tree.denseFill(bbox, inside, /*active=*/true); // In OpenVDB 4.0.0 and earlier, denseFill() densified active tiles // throughout the tree. Verify that it no longer does that. EXPECT_TRUE(tree.hasActiveTiles()); // i.e., otherBBox EXPECT_EQ(bbox.volume() + otherBBox.volume(), tree.activeVoxelCount()); for (openvdb::CoordBBox::Iterator<true> ijk(bbox); ijk; ++ijk) { ASSERT_DOUBLES_EXACTLY_EQUAL(inside, tree.getValue(*ijk)); } tree.clear(); EXPECT_TRUE(!tree.hasActiveTiles()); tree.sparseFill(otherBBox, inside, /*active=*/true); EXPECT_TRUE(tree.hasActiveTiles()); tree.denseFill(bbox, inside, /*active=*/false); EXPECT_TRUE(tree.hasActiveTiles()); // i.e., otherBBox EXPECT_EQ(otherBBox.volume(), tree.activeVoxelCount()); // In OpenVDB 4.0.0 and earlier, denseFill() filled sparsely if given // an inactive fill value. Verify that it now fills densely. const int leafDepth = int(tree.treeDepth()) - 1; for (openvdb::CoordBBox::Iterator<true> ijk(bbox); ijk; ++ijk) { EXPECT_EQ(leafDepth, tree.getValueDepth(*ijk)); ASSERT_DOUBLES_EXACTLY_EQUAL(inside, tree.getValue(*ijk)); } } }// testFill TEST_F(TestTree, testSignedFloodFill) { // Use a custom tree configuration to ensure we flood-fill at all levels! using LeafT = openvdb::tree::LeafNode<float,2>;//4^3 using InternalT = openvdb::tree::InternalNode<LeafT,2>;//4^3 using RootT = openvdb::tree::RootNode<InternalT>;// child nodes are 16^3 using TreeT = openvdb::tree::Tree<RootT>; const float outside = 2.0f, inside = -outside, radius = 20.0f; {//first test flood filling of a leaf node const LeafT::ValueType fill0=5, fill1=-fill0; openvdb::tools::SignedFloodFillOp<TreeT> sff(fill0, fill1); int D = LeafT::dim(), C=D/2; openvdb::Coord origin(0,0,0), left(0,0,C-1), right(0,0,C); LeafT leaf(origin,fill0); for (int i=0; i<D; ++i) { left[0]=right[0]=i; for (int j=0; j<D; ++j) { left[1]=right[1]=j; leaf.setValueOn(left,fill0); leaf.setValueOn(right,fill1); } } const openvdb::Coord first(0,0,0), last(D-1,D-1,D-1); EXPECT_TRUE(!leaf.isValueOn(first)); EXPECT_TRUE(!leaf.isValueOn(last)); EXPECT_EQ(fill0, leaf.getValue(first)); EXPECT_EQ(fill0, leaf.getValue(last)); sff(leaf); EXPECT_TRUE(!leaf.isValueOn(first)); EXPECT_TRUE(!leaf.isValueOn(last)); EXPECT_EQ(fill0, leaf.getValue(first)); EXPECT_EQ(fill1, leaf.getValue(last)); } openvdb::Grid<TreeT>::Ptr grid = openvdb::Grid<TreeT>::create(outside); TreeT& tree = grid->tree(); const RootT& root = tree.root(); const openvdb::Coord dim(3*16, 3*16, 3*16); const openvdb::Coord C(16+8,16+8,16+8); EXPECT_TRUE(!tree.isValueOn(C)); EXPECT_TRUE(root.getTableSize()==0); //make narrow band of sphere without setting sign for the background values! openvdb::Grid<TreeT>::Accessor acc = grid->getAccessor(); const openvdb::Vec3f center(static_cast<float>(C[0]), static_cast<float>(C[1]), static_cast<float>(C[2])); openvdb::Coord xyz; for (xyz[0]=0; xyz[0]<dim[0]; ++xyz[0]) { for (xyz[1]=0; xyz[1]<dim[1]; ++xyz[1]) { for (xyz[2]=0; xyz[2]<dim[2]; ++xyz[2]) { const openvdb::Vec3R p = grid->transform().indexToWorld(xyz); const float dist = float((p-center).length() - radius); if (fabs(dist) > outside) continue; acc.setValue(xyz, dist); } } } // Check narrow band with incorrect background const size_t size_before = root.getTableSize(); EXPECT_TRUE(size_before>0); EXPECT_TRUE(!tree.isValueOn(C)); ASSERT_DOUBLES_EXACTLY_EQUAL(outside,tree.getValue(C)); for (xyz[0]=0; xyz[0]<dim[0]; ++xyz[0]) { for (xyz[1]=0; xyz[1]<dim[1]; ++xyz[1]) { for (xyz[2]=0; xyz[2]<dim[2]; ++xyz[2]) { const openvdb::Vec3R p = grid->transform().indexToWorld(xyz); const float dist = float((p-center).length() - radius); const float val = acc.getValue(xyz); if (dist < inside) { ASSERT_DOUBLES_EXACTLY_EQUAL( val, outside); } else if (dist>outside) { ASSERT_DOUBLES_EXACTLY_EQUAL( val, outside); } else { ASSERT_DOUBLES_EXACTLY_EQUAL( val, dist ); } } } } EXPECT_TRUE(tree.getValueDepth(C) == -1);//i.e. background value openvdb::tools::signedFloodFill(tree); EXPECT_TRUE(tree.getValueDepth(C) == 0);//added inside tile to root // Check narrow band with correct background for (xyz[0]=0; xyz[0]<dim[0]; ++xyz[0]) { for (xyz[1]=0; xyz[1]<dim[1]; ++xyz[1]) { for (xyz[2]=0; xyz[2]<dim[2]; ++xyz[2]) { const openvdb::Vec3R p = grid->transform().indexToWorld(xyz); const float dist = float((p-center).length() - radius); const float val = acc.getValue(xyz); if (dist < inside) { ASSERT_DOUBLES_EXACTLY_EQUAL( val, inside); } else if (dist>outside) { ASSERT_DOUBLES_EXACTLY_EQUAL( val, outside); } else { ASSERT_DOUBLES_EXACTLY_EQUAL( val, dist ); } } } } EXPECT_TRUE(root.getTableSize()>size_before);//added inside root tiles EXPECT_TRUE(!tree.isValueOn(C)); ASSERT_DOUBLES_EXACTLY_EQUAL(inside,tree.getValue(C)); } TEST_F(TestTree, testPruneInactive) { using openvdb::Coord; using openvdb::Index32; using openvdb::Index64; const float background = 5.0; openvdb::FloatTree tree(background); // Verify that the newly-constructed tree is empty and that pruning it has no effect. EXPECT_TRUE(tree.empty()); openvdb::tools::prune(tree); EXPECT_TRUE(tree.empty()); openvdb::tools::pruneInactive(tree); EXPECT_TRUE(tree.empty()); // Set some active values. tree.setValue(Coord(-5, 10, 20), 0.1f); tree.setValue(Coord(-5,-10, 20), 0.4f); tree.setValue(Coord(-5, 10,-20), 0.5f); tree.setValue(Coord(-5,-10,-20), 0.7f); tree.setValue(Coord( 5, 10, 20), 0.0f); tree.setValue(Coord( 5,-10, 20), 0.2f); tree.setValue(Coord( 5,-10,-20), 0.6f); tree.setValue(Coord( 5, 10,-20), 0.3f); // Verify that the tree has the expected numbers of active voxels and leaf nodes. EXPECT_EQ(Index64(8), tree.activeVoxelCount()); EXPECT_EQ(Index32(8), tree.leafCount()); // Verify that prune() has no effect, since the values are all different. openvdb::tools::prune(tree); EXPECT_EQ(Index64(8), tree.activeVoxelCount()); EXPECT_EQ(Index32(8), tree.leafCount()); // Verify that pruneInactive() has no effect, since the values are active. openvdb::tools::pruneInactive(tree); EXPECT_EQ(Index64(8), tree.activeVoxelCount()); EXPECT_EQ(Index32(8), tree.leafCount()); // Make some of the active values inactive, without changing their values. tree.setValueOff(Coord(-5, 10, 20)); tree.setValueOff(Coord(-5,-10, 20)); tree.setValueOff(Coord(-5, 10,-20)); tree.setValueOff(Coord(-5,-10,-20)); EXPECT_EQ(Index64(4), tree.activeVoxelCount()); EXPECT_EQ(Index32(8), tree.leafCount()); // Verify that prune() has no effect, since the values are still different. openvdb::tools::prune(tree); EXPECT_EQ(Index64(4), tree.activeVoxelCount()); EXPECT_EQ(Index32(8), tree.leafCount()); // Verify that pruneInactive() prunes the nodes containing only inactive voxels. openvdb::tools::pruneInactive(tree); EXPECT_EQ(Index64(4), tree.activeVoxelCount()); EXPECT_EQ(Index32(4), tree.leafCount()); // Make all of the active values inactive, without changing their values. tree.setValueOff(Coord( 5, 10, 20)); tree.setValueOff(Coord( 5,-10, 20)); tree.setValueOff(Coord( 5,-10,-20)); tree.setValueOff(Coord( 5, 10,-20)); EXPECT_EQ(Index64(0), tree.activeVoxelCount()); EXPECT_EQ(Index32(4), tree.leafCount()); // Verify that prune() has no effect, since the values are still different. openvdb::tools::prune(tree); EXPECT_EQ(Index64(0), tree.activeVoxelCount()); EXPECT_EQ(Index32(4), tree.leafCount()); // Verify that pruneInactive() prunes all of the remaining leaf nodes. openvdb::tools::pruneInactive(tree); EXPECT_TRUE(tree.empty()); } TEST_F(TestTree, testPruneLevelSet) { const float background=10.0f, R=5.6f; const openvdb::Vec3f C(12.3f, 15.5f, 10.0f); const openvdb::Coord dim(32, 32, 32); openvdb::FloatGrid grid(background); unittest_util::makeSphere<openvdb::FloatGrid>(dim, C, R, grid, 1.0f, unittest_util::SPHERE_SPARSE_NARROW_BAND); openvdb::FloatTree& tree = grid.tree(); openvdb::Index64 count = 0; openvdb::Coord xyz; for (xyz[0]=0; xyz[0]<dim[0]; ++xyz[0]) { for (xyz[1]=0; xyz[1]<dim[1]; ++xyz[1]) { for (xyz[2]=0; xyz[2]<dim[2]; ++xyz[2]) { if (fabs(tree.getValue(xyz))<background) ++count; } } } const openvdb::Index32 leafCount = tree.leafCount(); EXPECT_EQ(tree.activeVoxelCount(), count); EXPECT_EQ(tree.activeLeafVoxelCount(), count); openvdb::Index64 removed = 0; const float new_width = background - 9.0f; // This version is fast since it only visits voxel and avoids // random access to set the voxels off. using VoxelOnIter = openvdb::FloatTree::LeafNodeType::ValueOnIter; for (openvdb::FloatTree::LeafIter lIter = tree.beginLeaf(); lIter; ++lIter) { for (VoxelOnIter vIter = lIter->beginValueOn(); vIter; ++vIter) { if (fabs(*vIter)<new_width) continue; lIter->setValueOff(vIter.pos(), *vIter > 0.0f ? background : -background); ++removed; } } // The following version is slower since it employs // FloatTree::ValueOnIter that visits both tiles and voxels and // also uses random acceess to set the voxels off. /* for (openvdb::FloatTree::ValueOnIter i = tree.beginValueOn(); i; ++i) { if (fabs(*i)<new_width) continue; tree.setValueOff(i.getCoord(), *i > 0.0f ? background : -background); ++removed2; } */ EXPECT_EQ(leafCount, tree.leafCount()); //std::cerr << "Leaf count=" << tree.leafCount() << std::endl; EXPECT_EQ(tree.activeVoxelCount(), count-removed); EXPECT_EQ(tree.activeLeafVoxelCount(), count-removed); openvdb::tools::pruneLevelSet(tree); EXPECT_TRUE(tree.leafCount() < leafCount); //std::cerr << "Leaf count=" << tree.leafCount() << std::endl; EXPECT_EQ(tree.activeVoxelCount(), count-removed); EXPECT_EQ(tree.activeLeafVoxelCount(), count-removed); openvdb::FloatTree::ValueOnCIter i = tree.cbeginValueOn(); for (; i; ++i) EXPECT_TRUE( *i < new_width); for (xyz[0]=0; xyz[0]<dim[0]; ++xyz[0]) { for (xyz[1]=0; xyz[1]<dim[1]; ++xyz[1]) { for (xyz[2]=0; xyz[2]<dim[2]; ++xyz[2]) { const float val = tree.getValue(xyz); if (fabs(val)<new_width) EXPECT_TRUE(tree.isValueOn(xyz)); else if (val < 0.0f) { EXPECT_TRUE(tree.isValueOff(xyz)); ASSERT_DOUBLES_EXACTLY_EQUAL( -background, val ); } else { EXPECT_TRUE(tree.isValueOff(xyz)); ASSERT_DOUBLES_EXACTLY_EQUAL( background, val ); } } } } } TEST_F(TestTree, testTouchLeaf) { const float background=10.0f; const openvdb::Coord xyz(-20,30,10); {// test tree openvdb::FloatTree::Ptr tree(new openvdb::FloatTree(background)); EXPECT_EQ(-1, tree->getValueDepth(xyz)); EXPECT_EQ( 0, int(tree->leafCount())); EXPECT_TRUE(tree->touchLeaf(xyz) != nullptr); EXPECT_EQ( 3, tree->getValueDepth(xyz)); EXPECT_EQ( 1, int(tree->leafCount())); EXPECT_TRUE(!tree->isValueOn(xyz)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, tree->getValue(xyz)); } {// test accessor openvdb::FloatTree::Ptr tree(new openvdb::FloatTree(background)); openvdb::tree::ValueAccessor<openvdb::FloatTree> acc(*tree); EXPECT_EQ(-1, acc.getValueDepth(xyz)); EXPECT_EQ( 0, int(tree->leafCount())); EXPECT_TRUE(acc.touchLeaf(xyz) != nullptr); EXPECT_EQ( 3, tree->getValueDepth(xyz)); EXPECT_EQ( 1, int(tree->leafCount())); EXPECT_TRUE(!acc.isValueOn(xyz)); ASSERT_DOUBLES_EXACTLY_EQUAL(background, acc.getValue(xyz)); } } TEST_F(TestTree, testProbeLeaf) { const float background=10.0f, value = 2.0f; const openvdb::Coord xyz(-20,30,10); {// test Tree::probeLeaf openvdb::FloatTree::Ptr tree(new openvdb::FloatTree(background)); EXPECT_EQ(-1, tree->getValueDepth(xyz)); EXPECT_EQ( 0, int(tree->leafCount())); EXPECT_TRUE(tree->probeLeaf(xyz) == nullptr); EXPECT_EQ(-1, tree->getValueDepth(xyz)); EXPECT_EQ( 0, int(tree->leafCount())); tree->setValue(xyz, value); EXPECT_EQ( 3, tree->getValueDepth(xyz)); EXPECT_EQ( 1, int(tree->leafCount())); EXPECT_TRUE(tree->probeLeaf(xyz) != nullptr); EXPECT_EQ( 3, tree->getValueDepth(xyz)); EXPECT_EQ( 1, int(tree->leafCount())); EXPECT_TRUE(tree->isValueOn(xyz)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree->getValue(xyz)); } {// test Tree::probeConstLeaf const openvdb::FloatTree tree1(background); EXPECT_EQ(-1, tree1.getValueDepth(xyz)); EXPECT_EQ( 0, int(tree1.leafCount())); EXPECT_TRUE(tree1.probeConstLeaf(xyz) == nullptr); EXPECT_EQ(-1, tree1.getValueDepth(xyz)); EXPECT_EQ( 0, int(tree1.leafCount())); openvdb::FloatTree tmp(tree1); tmp.setValue(xyz, value); const openvdb::FloatTree tree2(tmp); EXPECT_EQ( 3, tree2.getValueDepth(xyz)); EXPECT_EQ( 1, int(tree2.leafCount())); EXPECT_TRUE(tree2.probeConstLeaf(xyz) != nullptr); EXPECT_EQ( 3, tree2.getValueDepth(xyz)); EXPECT_EQ( 1, int(tree2.leafCount())); EXPECT_TRUE(tree2.isValueOn(xyz)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, tree2.getValue(xyz)); } {// test ValueAccessor::probeLeaf openvdb::FloatTree::Ptr tree(new openvdb::FloatTree(background)); openvdb::tree::ValueAccessor<openvdb::FloatTree> acc(*tree); EXPECT_EQ(-1, acc.getValueDepth(xyz)); EXPECT_EQ( 0, int(tree->leafCount())); EXPECT_TRUE(acc.probeLeaf(xyz) == nullptr); EXPECT_EQ(-1, acc.getValueDepth(xyz)); EXPECT_EQ( 0, int(tree->leafCount())); acc.setValue(xyz, value); EXPECT_EQ( 3, acc.getValueDepth(xyz)); EXPECT_EQ( 1, int(tree->leafCount())); EXPECT_TRUE(acc.probeLeaf(xyz) != nullptr); EXPECT_EQ( 3, acc.getValueDepth(xyz)); EXPECT_EQ( 1, int(tree->leafCount())); EXPECT_TRUE(acc.isValueOn(xyz)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, acc.getValue(xyz)); } {// test ValueAccessor::probeConstLeaf const openvdb::FloatTree tree1(background); openvdb::tree::ValueAccessor<const openvdb::FloatTree> acc1(tree1); EXPECT_EQ(-1, acc1.getValueDepth(xyz)); EXPECT_EQ( 0, int(tree1.leafCount())); EXPECT_TRUE(acc1.probeConstLeaf(xyz) == nullptr); EXPECT_EQ(-1, acc1.getValueDepth(xyz)); EXPECT_EQ( 0, int(tree1.leafCount())); openvdb::FloatTree tmp(tree1); tmp.setValue(xyz, value); const openvdb::FloatTree tree2(tmp); openvdb::tree::ValueAccessor<const openvdb::FloatTree> acc2(tree2); EXPECT_EQ( 3, acc2.getValueDepth(xyz)); EXPECT_EQ( 1, int(tree2.leafCount())); EXPECT_TRUE(acc2.probeConstLeaf(xyz) != nullptr); EXPECT_EQ( 3, acc2.getValueDepth(xyz)); EXPECT_EQ( 1, int(tree2.leafCount())); EXPECT_TRUE(acc2.isValueOn(xyz)); ASSERT_DOUBLES_EXACTLY_EQUAL(value, acc2.getValue(xyz)); } } TEST_F(TestTree, testAddLeaf) { using namespace openvdb; using LeafT = FloatTree::LeafNodeType; const Coord ijk(100); FloatGrid grid; FloatTree& tree = grid.tree(); tree.setValue(ijk, 5.0); const LeafT* oldLeaf = tree.probeLeaf(ijk); EXPECT_TRUE(oldLeaf != nullptr); ASSERT_DOUBLES_EXACTLY_EQUAL(5.0, oldLeaf->getValue(ijk)); LeafT* newLeaf = new LeafT; newLeaf->setOrigin(oldLeaf->origin()); newLeaf->fill(3.0); tree.addLeaf(newLeaf); EXPECT_EQ(newLeaf, tree.probeLeaf(ijk)); ASSERT_DOUBLES_EXACTLY_EQUAL(3.0, tree.getValue(ijk)); } TEST_F(TestTree, testAddTile) { using namespace openvdb; const Coord ijk(100); FloatGrid grid; FloatTree& tree = grid.tree(); tree.setValue(ijk, 5.0); EXPECT_TRUE(tree.probeLeaf(ijk) != nullptr); const Index lvl = FloatTree::DEPTH >> 1; OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (lvl > 0) tree.addTile(lvl,ijk, 3.0, /*active=*/true); else tree.addTile(1,ijk, 3.0, /*active=*/true); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END EXPECT_TRUE(tree.probeLeaf(ijk) == nullptr); ASSERT_DOUBLES_EXACTLY_EQUAL(3.0, tree.getValue(ijk)); } struct BBoxOp { std::vector<openvdb::CoordBBox> bbox; std::vector<openvdb::Index> level; // This method is required by Tree::visitActiveBBox // Since it will return false if LEVEL==0 it will never descent to // the active voxels. In other words the smallest BBoxes // correspond to LeafNodes or active tiles at LEVEL=1 template<openvdb::Index LEVEL> inline bool descent() { return LEVEL>0; } // This method is required by Tree::visitActiveBBox template<openvdb::Index LEVEL> inline void operator()(const openvdb::CoordBBox &_bbox) { bbox.push_back(_bbox); level.push_back(LEVEL); } }; TEST_F(TestTree, testProcessBBox) { OPENVDB_NO_DEPRECATION_WARNING_BEGIN using openvdb::Coord; using openvdb::CoordBBox; //check two leaf nodes and two tiles at each level 1, 2 and 3 const int size[4]={1<<3, 1<<3, 1<<(3+4), 1<<(3+4+5)}; for (int level=0; level<=3; ++level) { openvdb::FloatTree tree; const int n = size[level]; const CoordBBox bbox[]={CoordBBox::createCube(Coord(-n,-n,-n), n), CoordBBox::createCube(Coord( 0, 0, 0), n)}; if (level==0) { tree.setValue(Coord(-1,-2,-3), 1.0f); tree.setValue(Coord( 1, 2, 3), 1.0f); } else { tree.fill(bbox[0], 1.0f, true); tree.fill(bbox[1], 1.0f, true); } BBoxOp op; tree.visitActiveBBox(op); EXPECT_EQ(2, int(op.bbox.size())); for (int i=0; i<2; ++i) { //std::cerr <<"\nLevel="<<level<<" op.bbox["<<i<<"]="<<op.bbox[i] // <<" op.level["<<i<<"]= "<<op.level[i]<<std::endl; EXPECT_EQ(level,int(op.level[i])); EXPECT_TRUE(op.bbox[i] == bbox[i]); } } OPENVDB_NO_DEPRECATION_WARNING_END } TEST_F(TestTree, testGetNodes) { //openvdb::util::CpuTimer timer; using openvdb::CoordBBox; using openvdb::Coord; using openvdb::Vec3f; using openvdb::FloatGrid; using openvdb::FloatTree; const Vec3f center(0.35f, 0.35f, 0.35f); const float radius = 0.15f; const int dim = 128, half_width = 5; const float voxel_size = 1.0f/dim; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/half_width*voxel_size); FloatTree& tree = grid->tree(); grid->setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/voxel_size)); unittest_util::makeSphere<FloatGrid>( Coord(dim), center, radius, *grid, unittest_util::SPHERE_SPARSE_NARROW_BAND); const size_t leafCount = tree.leafCount(); const size_t voxelCount = tree.activeVoxelCount(); {//testing Tree::getNodes() with std::vector<T*> std::vector<openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::vector<T*> and Tree::getNodes()"); tree.getNodes(array); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(leafCount, size_t(tree.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::getNodes() with std::vector<const T*> std::vector<const openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::vector<const T*> and Tree::getNodes()"); tree.getNodes(array); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(leafCount, size_t(tree.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::getNodes() const with std::vector<const T*> std::vector<const openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::vector<const T*> and Tree::getNodes() const"); const FloatTree& tmp = tree; tmp.getNodes(array); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(leafCount, size_t(tree.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::getNodes() with std::vector<T*> and std::vector::reserve std::vector<openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::vector<T*>, std::vector::reserve and Tree::getNodes"); array.reserve(tree.leafCount()); tree.getNodes(array); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(leafCount, size_t(tree.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::getNodes() with std::deque<T*> std::deque<const openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::deque<T*> and Tree::getNodes"); tree.getNodes(array); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(leafCount, size_t(tree.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::getNodes() with std::deque<T*> std::deque<const openvdb::FloatTree::RootNodeType::ChildNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::deque<T*> and Tree::getNodes"); tree.getNodes(array); //timer.stop(); EXPECT_EQ(size_t(1), array.size()); EXPECT_EQ(leafCount, size_t(tree.leafCount())); } {//testing Tree::getNodes() with std::deque<T*> std::deque<const openvdb::FloatTree::RootNodeType::ChildNodeType::ChildNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::deque<T*> and Tree::getNodes"); tree.getNodes(array); //timer.stop(); EXPECT_EQ(size_t(1), array.size()); EXPECT_EQ(leafCount, size_t(tree.leafCount())); } /* {//testing Tree::getNodes() with std::deque<T*> where T is not part of the tree configuration using NodeT = openvdb::tree::LeafNode<float, 5>; std::deque<const NodeT*> array; tree.getNodes(array);//should NOT compile since NodeT is not part of the FloatTree configuration } {//testing Tree::getNodes() const with std::deque<T*> where T is not part of the tree configuration using NodeT = openvdb::tree::LeafNode<float, 5>; std::deque<const NodeT*> array; const FloatTree& tmp = tree; tmp.getNodes(array);//should NOT compile since NodeT is not part of the FloatTree configuration } */ }// testGetNodes TEST_F(TestTree, testStealNodes) { //openvdb::util::CpuTimer timer; using openvdb::CoordBBox; using openvdb::Coord; using openvdb::Vec3f; using openvdb::FloatGrid; using openvdb::FloatTree; const Vec3f center(0.35f, 0.35f, 0.35f); const float radius = 0.15f; const int dim = 128, half_width = 5; const float voxel_size = 1.0f/dim; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/half_width*voxel_size); const FloatTree& tree = grid->tree(); grid->setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/voxel_size)); unittest_util::makeSphere<FloatGrid>( Coord(dim), center, radius, *grid, unittest_util::SPHERE_SPARSE_NARROW_BAND); const size_t leafCount = tree.leafCount(); const size_t voxelCount = tree.activeVoxelCount(); {//testing Tree::stealNodes() with std::vector<T*> FloatTree tree2 = tree; std::vector<openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::vector<T*> and Tree::stealNodes()"); tree2.stealNodes(array); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(size_t(0), size_t(tree2.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::stealNodes() with std::vector<const T*> FloatTree tree2 = tree; std::vector<const openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::vector<const T*> and Tree::stealNodes()"); tree2.stealNodes(array); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(size_t(0), size_t(tree2.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::stealNodes() const with std::vector<const T*> FloatTree tree2 = tree; std::vector<const openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::vector<const T*> and Tree::stealNodes() const"); tree2.stealNodes(array); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(size_t(0), size_t(tree2.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::stealNodes() with std::vector<T*> and std::vector::reserve FloatTree tree2 = tree; std::vector<openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::vector<T*>, std::vector::reserve and Tree::stealNodes"); array.reserve(tree2.leafCount()); tree2.stealNodes(array, 0.0f, false); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(size_t(0), size_t(tree2.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::getNodes() with std::deque<T*> FloatTree tree2 = tree; std::deque<const openvdb::FloatTree::LeafNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::deque<T*> and Tree::stealNodes"); tree2.stealNodes(array); //timer.stop(); EXPECT_EQ(leafCount, array.size()); EXPECT_EQ(size_t(0), size_t(tree2.leafCount())); size_t sum = 0; for (size_t i=0; i<array.size(); ++i) sum += array[i]->onVoxelCount(); EXPECT_EQ(voxelCount, sum); } {//testing Tree::getNodes() with std::deque<T*> FloatTree tree2 = tree; std::deque<const openvdb::FloatTree::RootNodeType::ChildNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::deque<T*> and Tree::stealNodes"); tree2.stealNodes(array, 0.0f, true); //timer.stop(); EXPECT_EQ(size_t(1), array.size()); EXPECT_EQ(size_t(0), size_t(tree2.leafCount())); } {//testing Tree::getNodes() with std::deque<T*> FloatTree tree2 = tree; std::deque<const openvdb::FloatTree::RootNodeType::ChildNodeType::ChildNodeType*> array; EXPECT_EQ(size_t(0), array.size()); //timer.start("\nstd::deque<T*> and Tree::stealNodes"); tree2.stealNodes(array); //timer.stop(); EXPECT_EQ(size_t(1), array.size()); EXPECT_EQ(size_t(0), size_t(tree2.leafCount())); } /* {//testing Tree::stealNodes() with std::deque<T*> where T is not part of the tree configuration FloatTree tree2 = tree; using NodeT = openvdb::tree::LeafNode<float, 5>; std::deque<const NodeT*> array; //should NOT compile since NodeT is not part of the FloatTree configuration tree2.stealNodes(array, 0.0f, true); } */ }// testStealNodes TEST_F(TestTree, testStealNode) { using openvdb::Index; using openvdb::FloatTree; const float background=0.0f, value = 5.6f, epsilon=0.000001f; const openvdb::Coord xyz(-23,42,70); {// stal a LeafNode using NodeT = FloatTree::LeafNodeType; EXPECT_EQ(Index(0), NodeT::getLevel()); FloatTree tree(background); EXPECT_EQ(Index(0), tree.leafCount()); EXPECT_TRUE(!tree.isValueOn(xyz)); EXPECT_NEAR(background, tree.getValue(xyz), epsilon); EXPECT_TRUE(tree.root().stealNode<NodeT>(xyz, value, false) == nullptr); tree.setValue(xyz, value); EXPECT_EQ(Index(1), tree.leafCount()); EXPECT_TRUE(tree.isValueOn(xyz)); EXPECT_NEAR(value, tree.getValue(xyz), epsilon); NodeT* node = tree.root().stealNode<NodeT>(xyz, background, false); EXPECT_TRUE(node != nullptr); EXPECT_EQ(Index(0), tree.leafCount()); EXPECT_TRUE(!tree.isValueOn(xyz)); EXPECT_NEAR(background, tree.getValue(xyz), epsilon); EXPECT_TRUE(tree.root().stealNode<NodeT>(xyz, value, false) == nullptr); EXPECT_NEAR(value, node->getValue(xyz), epsilon); EXPECT_TRUE(node->isValueOn(xyz)); delete node; } {// steal a bottom InternalNode using NodeT = FloatTree::RootNodeType::ChildNodeType::ChildNodeType; EXPECT_EQ(Index(1), NodeT::getLevel()); FloatTree tree(background); EXPECT_EQ(Index(0), tree.leafCount()); EXPECT_TRUE(!tree.isValueOn(xyz)); EXPECT_NEAR(background, tree.getValue(xyz), epsilon); EXPECT_TRUE(tree.root().stealNode<NodeT>(xyz, value, false) == nullptr); tree.setValue(xyz, value); EXPECT_EQ(Index(1), tree.leafCount()); EXPECT_TRUE(tree.isValueOn(xyz)); EXPECT_NEAR(value, tree.getValue(xyz), epsilon); NodeT* node = tree.root().stealNode<NodeT>(xyz, background, false); EXPECT_TRUE(node != nullptr); EXPECT_EQ(Index(0), tree.leafCount()); EXPECT_TRUE(!tree.isValueOn(xyz)); EXPECT_NEAR(background, tree.getValue(xyz), epsilon); EXPECT_TRUE(tree.root().stealNode<NodeT>(xyz, value, false) == nullptr); EXPECT_NEAR(value, node->getValue(xyz), epsilon); EXPECT_TRUE(node->isValueOn(xyz)); delete node; } {// steal a top InternalNode using NodeT = FloatTree::RootNodeType::ChildNodeType; EXPECT_EQ(Index(2), NodeT::getLevel()); FloatTree tree(background); EXPECT_EQ(Index(0), tree.leafCount()); EXPECT_TRUE(!tree.isValueOn(xyz)); EXPECT_NEAR(background, tree.getValue(xyz), epsilon); EXPECT_TRUE(tree.root().stealNode<NodeT>(xyz, value, false) == nullptr); tree.setValue(xyz, value); EXPECT_EQ(Index(1), tree.leafCount()); EXPECT_TRUE(tree.isValueOn(xyz)); EXPECT_NEAR(value, tree.getValue(xyz), epsilon); NodeT* node = tree.root().stealNode<NodeT>(xyz, background, false); EXPECT_TRUE(node != nullptr); EXPECT_EQ(Index(0), tree.leafCount()); EXPECT_TRUE(!tree.isValueOn(xyz)); EXPECT_NEAR(background, tree.getValue(xyz), epsilon); EXPECT_TRUE(tree.root().stealNode<NodeT>(xyz, value, false) == nullptr); EXPECT_NEAR(value, node->getValue(xyz), epsilon); EXPECT_TRUE(node->isValueOn(xyz)); delete node; } } #if OPENVDB_ABI_VERSION_NUMBER >= 7 TEST_F(TestTree, testNodeCount) { //openvdb::util::CpuTimer timer;// use for benchmark test const openvdb::Vec3f center(0.0f, 0.0f, 0.0f); const float radius = 1.0f; //const int dim = 4096, halfWidth = 3;// use for benchmark test const int dim = 512, halfWidth = 3;// use for unit test //timer.start("\nGenerate level set sphere");// use for benchmark test auto grid = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>(radius, center, radius/dim, halfWidth); //timer.stop();// use for benchmark test auto& tree = grid->tree(); std::vector<openvdb::Index> dims; tree.getNodeLog2Dims(dims); std::vector<openvdb::Index32> nodeCount1(dims.size()); //timer.start("Old technique");// use for benchmark test for (auto it = tree.cbeginNode(); it; ++it) ++(nodeCount1[dims.size()-1-it.getDepth()]); //timer.restart("New technique");// use for benchmark test const auto nodeCount2 = tree.nodeCount(); //timer.stop();// use for benchmark test EXPECT_EQ(nodeCount1.size(), nodeCount2.size()); //for (size_t i=0; i<nodeCount2.size(); ++i) std::cerr << "nodeCount1("<<i<<") OLD/NEW: " << nodeCount1[i] << "/" << nodeCount2[i] << std::endl; EXPECT_EQ(1U, nodeCount2.back());// one root node EXPECT_EQ(tree.leafCount(), nodeCount2.front());// leaf nodes for (size_t i=0; i<nodeCount2.size(); ++i) EXPECT_EQ( nodeCount1[i], nodeCount2[i]); } #endif TEST_F(TestTree, testRootNode) { using ChildType = RootNodeType::ChildNodeType; const openvdb::Coord c0(0,0,0), c1(49152, 16384, 28672); { // test inserting child nodes directly and indirectly RootNodeType root(0.0f); EXPECT_TRUE(root.empty()); EXPECT_EQ(openvdb::Index32(0), root.childCount()); // populate the tree by inserting the two leaf nodes containing c0 and c1 root.touchLeaf(c0); root.touchLeaf(c1); EXPECT_EQ(openvdb::Index(2), root.getTableSize()); EXPECT_EQ(openvdb::Index32(2), root.childCount()); EXPECT_TRUE(!root.hasActiveTiles()); { // verify c0 and c1 are the root node coordinates auto rootIter = root.cbeginChildOn(); EXPECT_EQ(c0, rootIter.getCoord()); ++rootIter; EXPECT_EQ(c1, rootIter.getCoord()); } // copy the root node RootNodeType rootCopy(root); // steal the root node children leaving the root node empty again std::vector<ChildType*> children; root.stealNodes(children); EXPECT_TRUE(root.empty()); // insert the root node children directly for (ChildType* child : children) { root.addChild(child); } EXPECT_EQ(openvdb::Index(2), root.getTableSize()); EXPECT_EQ(openvdb::Index32(2), root.childCount()); { // verify the coordinates of the root node children auto rootIter = root.cbeginChildOn(); EXPECT_EQ(c0, rootIter.getCoord()); ++rootIter; EXPECT_EQ(c1, rootIter.getCoord()); } } { // test inserting tiles and replacing them with child nodes RootNodeType root(0.0f); EXPECT_TRUE(root.empty()); // no-op root.addChild(nullptr); // populate the root node by inserting tiles root.addTile(c0, /*value=*/1.0f, /*state=*/true); root.addTile(c1, /*value=*/2.0f, /*state=*/true); EXPECT_EQ(openvdb::Index(2), root.getTableSize()); EXPECT_EQ(openvdb::Index32(0), root.childCount()); EXPECT_TRUE(root.hasActiveTiles()); ASSERT_DOUBLES_EXACTLY_EQUAL(1.0f, root.getValue(c0)); ASSERT_DOUBLES_EXACTLY_EQUAL(2.0f, root.getValue(c1)); // insert child nodes with the same coordinates root.addChild(new ChildType(c0, 3.0f)); root.addChild(new ChildType(c1, 4.0f)); // insert a new child at c0 root.addChild(new ChildType(c0, 5.0f)); // verify active tiles have been replaced by child nodes EXPECT_EQ(openvdb::Index(2), root.getTableSize()); EXPECT_EQ(openvdb::Index32(2), root.childCount()); EXPECT_TRUE(!root.hasActiveTiles()); { // verify the coordinates of the root node children auto rootIter = root.cbeginChildOn(); EXPECT_EQ(c0, rootIter.getCoord()); ASSERT_DOUBLES_EXACTLY_EQUAL(5.0f, root.getValue(c0)); ++rootIter; EXPECT_EQ(c1, rootIter.getCoord()); } } } TEST_F(TestTree, testInternalNode) { const openvdb::Coord c0(1000, 1000, 1000); const openvdb::Coord c1(896, 896, 896); using InternalNodeType = InternalNodeType1; using ChildType = LeafNodeType; { // test inserting child nodes directly and indirectly openvdb::Coord c2 = c1.offsetBy(8,0,0); openvdb::Coord c3 = c1.offsetBy(16,16,16); InternalNodeType internalNode(c1, 0.0f); internalNode.touchLeaf(c2); internalNode.touchLeaf(c3); EXPECT_EQ(openvdb::Index(2), internalNode.leafCount()); EXPECT_EQ(openvdb::Index32(2), internalNode.childCount()); EXPECT_TRUE(!internalNode.hasActiveTiles()); { // verify c0 and c1 are the root node coordinates auto childIter = internalNode.cbeginChildOn(); EXPECT_EQ(c2, childIter.getCoord()); ++childIter; EXPECT_EQ(c3, childIter.getCoord()); } // copy the internal node InternalNodeType internalNodeCopy(internalNode); // steal the internal node children leaving it empty again std::vector<ChildType*> children; internalNode.stealNodes(children, 0.0f, false); EXPECT_EQ(openvdb::Index(0), internalNode.leafCount()); EXPECT_EQ(openvdb::Index32(0), internalNode.childCount()); // insert the root node children directly for (ChildType* child : children) { internalNode.addChild(child); } EXPECT_EQ(openvdb::Index(2), internalNode.leafCount()); EXPECT_EQ(openvdb::Index32(2), internalNode.childCount()); { // verify the coordinates of the root node children auto childIter = internalNode.cbeginChildOn(); EXPECT_EQ(c2, childIter.getCoord()); ++childIter; EXPECT_EQ(c3, childIter.getCoord()); } } { // test inserting a tile and replacing with a child node InternalNodeType internalNode(c1, 0.0f); EXPECT_TRUE(!internalNode.hasActiveTiles()); EXPECT_EQ(openvdb::Index(0), internalNode.leafCount()); EXPECT_EQ(openvdb::Index32(0), internalNode.childCount()); // add a tile internalNode.addTile(openvdb::Index(0), /*value=*/1.0f, /*state=*/true); EXPECT_TRUE(internalNode.hasActiveTiles()); EXPECT_EQ(openvdb::Index(0), internalNode.leafCount()); EXPECT_EQ(openvdb::Index32(0), internalNode.childCount()); // replace the tile with a child node EXPECT_TRUE(internalNode.addChild(new ChildType(c1, 2.0f))); EXPECT_TRUE(!internalNode.hasActiveTiles()); EXPECT_EQ(openvdb::Index(1), internalNode.leafCount()); EXPECT_EQ(openvdb::Index32(1), internalNode.childCount()); EXPECT_EQ(c1, internalNode.cbeginChildOn().getCoord()); ASSERT_DOUBLES_EXACTLY_EQUAL(2.0f, internalNode.cbeginChildOn()->getValue(0)); // replace the child node with another child node EXPECT_TRUE(internalNode.addChild(new ChildType(c1, 3.0f))); ASSERT_DOUBLES_EXACTLY_EQUAL(3.0f, internalNode.cbeginChildOn()->getValue(0)); } { // test inserting child nodes that do and do not belong to the internal node InternalNodeType internalNode(c1, 0.0f); // succeed if child belongs to this internal node EXPECT_TRUE(internalNode.addChild(new ChildType(c0.offsetBy(8,0,0)))); EXPECT_TRUE(internalNode.probeLeaf(c0.offsetBy(8,0,0))); openvdb::Index index1 = internalNode.coordToOffset(c0); openvdb::Index index2 = internalNode.coordToOffset(c0.offsetBy(8,0,0)); EXPECT_TRUE(!internalNode.isChildMaskOn(index1)); EXPECT_TRUE(internalNode.isChildMaskOn(index2)); // fail otherwise EXPECT_TRUE(!internalNode.addChild(new ChildType(c0.offsetBy(8000,0,0)))); } } // Copyright (c) DreamWorks Animation LLC // All rights reserved. This software is distributed under the // Mozilla Public License 2.0 ( http://www.mozilla.org/MPL/2.0/ )
125,148
C++
39.606424
148
0.612211
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLaplacian.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/tools/GridOperators.h> #include "util.h" // for unittest_util::makeSphere() #include "gtest/gtest.h" #include <sstream> class TestLaplacian: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; TEST_F(TestLaplacian, testISLaplacian) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const Coord dim(64,64,64); const Coord c(35,30,40); const openvdb::Vec3f center(static_cast<float>(c[0]), static_cast<float>(c[1]), static_cast<float>(c[2])); const float radius=0.0f;//point at {35,30,40} unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); Coord xyz(35,10,40); // Index Space Laplacian random access FloatGrid::ConstAccessor inAccessor = grid->getConstAccessor(); FloatGrid::ValueType result; result = math::ISLaplacian<math::CD_SECOND>::result(inAccessor, xyz); EXPECT_NEAR(2.0/20.0, result, /*tolerance=*/0.01); result = math::ISLaplacian<math::CD_FOURTH>::result(inAccessor, xyz); EXPECT_NEAR(2.0/20.0, result, /*tolerance=*/0.01); result = math::ISLaplacian<math::CD_SIXTH>::result(inAccessor, xyz); EXPECT_NEAR(2.0/20.0, result, /*tolerance=*/0.01); } TEST_F(TestLaplacian, testISLaplacianStencil) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const Coord dim(64,64,64); const Coord c(35,30,40); const openvdb::Vec3f center(static_cast<float>(c[0]), static_cast<float>(c[1]), static_cast<float>(c[2])); const float radius=0;//point at {35,30,40} unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); Coord xyz(35,10,40); // Index Space Laplacian stencil access FloatGrid::ValueType result; math::SevenPointStencil<FloatGrid> sevenpt(*grid); sevenpt.moveTo(xyz); result = math::ISLaplacian<math::CD_SECOND>::result(sevenpt); EXPECT_NEAR(2.0/20.0, result, /*tolerance=*/0.01); math::ThirteenPointStencil<FloatGrid> thirteenpt(*grid); thirteenpt.moveTo(xyz); result = math::ISLaplacian<math::CD_FOURTH>::result(thirteenpt); EXPECT_NEAR(2.0/20.0, result, /*tolerance=*/0.01); math::NineteenPointStencil<FloatGrid> nineteenpt(*grid); nineteenpt.moveTo(xyz); result = math::ISLaplacian<math::CD_SIXTH>::result(nineteenpt); EXPECT_NEAR(2.0/20.0, result, /*tolerance=*/0.01); } TEST_F(TestLaplacian, testWSLaplacian) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const Coord dim(64,64,64); const Coord c(35,30,40); const openvdb::Vec3f center(static_cast<float>(c[0]), static_cast<float>(c[1]), static_cast<float>(c[2])); const float radius=0.0f;//point at {35,30,40} unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); Coord xyz(35,10,40); FloatGrid::ValueType result; FloatGrid::ConstAccessor inAccessor = grid->getConstAccessor(); // try with a map math::UniformScaleMap map; math::MapBase::Ptr rotated_map = map.preRotate(1.5, math::X_AXIS); // verify the new map is an affine map EXPECT_TRUE(rotated_map->type() == math::AffineMap::mapType()); math::AffineMap::Ptr affine_map = StaticPtrCast<math::AffineMap, math::MapBase>(rotated_map); // the laplacian is invariant to rotation result = math::Laplacian<math::AffineMap, math::CD_SECOND>::result( *affine_map, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::AffineMap, math::CD_FOURTH>::result( *affine_map, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::AffineMap, math::CD_SIXTH>::result( *affine_map, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); // test uniform map math::UniformScaleMap uniform; result = math::Laplacian<math::UniformScaleMap, math::CD_SECOND>::result( uniform, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::UniformScaleMap, math::CD_FOURTH>::result( uniform, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::UniformScaleMap, math::CD_SIXTH>::result( uniform, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); // test the GenericMap Grid interface { math::GenericMap generic_map(*grid); result = math::Laplacian<math::GenericMap, math::CD_SECOND>::result( generic_map, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::GenericMap, math::CD_FOURTH>::result( generic_map, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); } { // test the GenericMap Transform interface math::GenericMap generic_map(grid->transform()); result = math::Laplacian<math::GenericMap, math::CD_SECOND>::result( generic_map, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); } { // test the GenericMap Map interface math::GenericMap generic_map(rotated_map); result = math::Laplacian<math::GenericMap, math::CD_SECOND>::result( generic_map, inAccessor, xyz); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); } } TEST_F(TestLaplacian, testWSLaplacianFrustum) { using namespace openvdb; // Create a Frustum Map: openvdb::BBoxd bbox(Vec3d(0), Vec3d(100)); math::NonlinearFrustumMap frustum(bbox, 1./6., 5); /// frustum will have depth, far plane - near plane = 5 /// the frustum has width 1 in the front and 6 in the back math::Vec3d trans(2,2,2); math::NonlinearFrustumMap::Ptr map = StaticPtrCast<math::NonlinearFrustumMap, math::MapBase>( frustum.preScale(Vec3d(10,10,10))->postTranslate(trans)); EXPECT_TRUE(!map->hasUniformScale()); math::Vec3d result; result = map->voxelSize(); EXPECT_TRUE( math::isApproxEqual(result.x(), 0.1)); EXPECT_TRUE( math::isApproxEqual(result.y(), 0.1)); EXPECT_TRUE( math::isApproxEqual(result.z(), 0.5, 0.0001)); // Create a tree FloatGrid::Ptr grid = FloatGrid::create(/*background=*/0.0); FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); // Load cos(x)sin(y)cos(z) Coord ijk(10,10,10); for (Int32& i=ijk.x(); i < 20; ++i) { for (Int32& j=ijk.y(); j < 20; ++j) { for (Int32& k=ijk.z(); k < 20; ++k) { // world space image of the ijk coord const Vec3d ws = map->applyMap(ijk.asVec3d()); const float value = float(cos(ws.x() ) * sin( ws.y()) * cos(ws.z())); tree.setValue(ijk, value); } } } const Coord testloc(16,16,16); float test_result = math::Laplacian<math::NonlinearFrustumMap, math::CD_SECOND>::result( *map, tree, testloc); float expected_result = -3.f * tree.getValue(testloc); // The exact solution of Laplacian( cos(x)sin(y)cos(z) ) = -3 cos(x) sin(y) cos(z) EXPECT_TRUE( math::isApproxEqual(test_result, expected_result, /*tolerance=*/0.02f) ); } TEST_F(TestLaplacian, testWSLaplacianStencil) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const Coord dim(64,64,64); const Coord c(35,30,40); const openvdb::Vec3f center(static_cast<float>(c[0]), static_cast<float>(c[1]), static_cast<float>(c[2])); const float radius=0.0f;//point at {35,30,40} unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); Coord xyz(35,10,40); FloatGrid::ValueType result; // try with a map math::UniformScaleMap map; math::MapBase::Ptr rotated_map = map.preRotate(1.5, math::X_AXIS); // verify the new map is an affine map EXPECT_TRUE(rotated_map->type() == math::AffineMap::mapType()); math::AffineMap::Ptr affine_map = StaticPtrCast<math::AffineMap, math::MapBase>(rotated_map); // the laplacian is invariant to rotation math::SevenPointStencil<FloatGrid> sevenpt(*grid); math::ThirteenPointStencil<FloatGrid> thirteenpt(*grid); math::NineteenPointStencil<FloatGrid> nineteenpt(*grid); math::SecondOrderDenseStencil<FloatGrid> dense_2nd(*grid); math::FourthOrderDenseStencil<FloatGrid> dense_4th(*grid); math::SixthOrderDenseStencil<FloatGrid> dense_6th(*grid); sevenpt.moveTo(xyz); thirteenpt.moveTo(xyz); nineteenpt.moveTo(xyz); dense_2nd.moveTo(xyz); dense_4th.moveTo(xyz); dense_6th.moveTo(xyz); result = math::Laplacian<math::AffineMap, math::CD_SECOND>::result(*affine_map, dense_2nd); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::AffineMap, math::CD_FOURTH>::result(*affine_map, dense_4th); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::AffineMap, math::CD_SIXTH>::result(*affine_map, dense_6th); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); // test uniform map math::UniformScaleMap uniform; result = math::Laplacian<math::UniformScaleMap, math::CD_SECOND>::result(uniform, sevenpt); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::UniformScaleMap, math::CD_FOURTH>::result(uniform, thirteenpt); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::UniformScaleMap, math::CD_SIXTH>::result(uniform, nineteenpt); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); // test the GenericMap Grid interface { math::GenericMap generic_map(*grid); result = math::Laplacian<math::GenericMap, math::CD_SECOND>::result(generic_map, dense_2nd); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); result = math::Laplacian<math::GenericMap, math::CD_FOURTH>::result(generic_map, dense_4th); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); } { // test the GenericMap Transform interface math::GenericMap generic_map(grid->transform()); result = math::Laplacian<math::GenericMap, math::CD_SECOND>::result(generic_map, dense_2nd); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); } { // test the GenericMap Map interface math::GenericMap generic_map(rotated_map); result = math::Laplacian<math::GenericMap, math::CD_SECOND>::result(generic_map, dense_2nd); EXPECT_NEAR(2.0/20., result, /*tolerance=*/0.01); } } TEST_F(TestLaplacian, testOldStyleStencils) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*backgroundValue=*/5.0); grid->setTransform(math::Transform::createLinearTransform(/*voxel size=*/0.5)); EXPECT_TRUE(grid->empty()); const Coord dim(32, 32, 32); const Coord c(35,30,40); const openvdb::Vec3f center(6.0f, 8.0f, 10.0f);//i.e. (12,16,20) in index space const float radius=10.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!grid->empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(grid->activeVoxelCount())); math::GradStencil<FloatGrid> gs(*grid); math::WenoStencil<FloatGrid> ws(*grid); math::CurvatureStencil<FloatGrid> cs(*grid); Coord xyz(20,16,20);//i.e. 8 voxel or 4 world units away from the center gs.moveTo(xyz); EXPECT_NEAR(2.0/4.0, gs.laplacian(), 0.01);// 2/distance from center ws.moveTo(xyz); EXPECT_NEAR(2.0/4.0, ws.laplacian(), 0.01);// 2/distance from center cs.moveTo(xyz); EXPECT_NEAR(2.0/4.0, cs.laplacian(), 0.01);// 2/distance from center xyz.reset(12,16,10);//i.e. 10 voxel or 5 world units away from the center gs.moveTo(xyz); EXPECT_NEAR(2.0/5.0, gs.laplacian(), 0.01);// 2/distance from center ws.moveTo(xyz); EXPECT_NEAR(2.0/5.0, ws.laplacian(), 0.01);// 2/distance from center cs.moveTo(xyz); EXPECT_NEAR(2.0/5.0, cs.laplacian(), 0.01);// 2/distance from center } TEST_F(TestLaplacian, testLaplacianTool) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const Coord dim(64, 64, 64); const openvdb::Vec3f center(35.0f, 30.0f, 40.0f); const float radius=0.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); FloatGrid::Ptr lap = tools::laplacian(*grid); EXPECT_EQ(int(tree.activeVoxelCount()), int(lap->activeVoxelCount())); Coord xyz(35,30,30); EXPECT_NEAR( 2.0/10.0, lap->getConstAccessor().getValue(xyz), 0.01);// 2/distance from center xyz.reset(35,10,40); EXPECT_NEAR( 2.0/20.0, lap->getConstAccessor().getValue(xyz),0.01);// 2/distance from center } TEST_F(TestLaplacian, testLaplacianMaskedTool) { using namespace openvdb; FloatGrid::Ptr grid = FloatGrid::create(/*background=*/5.0); FloatTree& tree = grid->tree(); EXPECT_TRUE(tree.empty()); const Coord dim(64, 64, 64); const openvdb::Vec3f center(35.0f, 30.0f, 40.0f); const float radius=0.0f; unittest_util::makeSphere<FloatGrid>(dim, center, radius, *grid, unittest_util::SPHERE_DENSE); EXPECT_TRUE(!tree.empty()); EXPECT_EQ(dim[0]*dim[1]*dim[2], int(tree.activeVoxelCount())); const openvdb::CoordBBox maskbbox(openvdb::Coord(35, 30, 30), openvdb::Coord(41, 41, 41)); BoolGrid::Ptr maskGrid = BoolGrid::create(false); maskGrid->fill(maskbbox, true/*value*/, true/*activate*/); FloatGrid::Ptr lap = tools::laplacian(*grid, *maskGrid); {// outside the masked region Coord xyz(34,30,30); EXPECT_TRUE(!maskbbox.isInside(xyz)); EXPECT_NEAR( 0, lap->getConstAccessor().getValue(xyz), 0.01);// 2/distance from center xyz.reset(35,10,40); EXPECT_NEAR( 0, lap->getConstAccessor().getValue(xyz),0.01);// 2/distance from center } {// inside the masked region Coord xyz(35,30,30); EXPECT_TRUE(maskbbox.isInside(xyz)); EXPECT_NEAR( 2.0/10.0, lap->getConstAccessor().getValue(xyz), 0.01);// 2/distance from center } }
15,460
C++
34.706697
100
0.644761
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointConversion.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointAttribute.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/PointCount.h> #include <openvdb/points/PointGroup.h> #ifdef _MSC_VER #include <windows.h> #endif using namespace openvdb; using namespace openvdb::points; class TestPointConversion: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestPointConversion // Simple Attribute Wrapper template <typename T> struct AttributeWrapper { using ValueType = T; using PosType = T; using value_type = T; struct Handle { Handle(AttributeWrapper<T>& attribute) : mBuffer(attribute.mAttribute) , mStride(attribute.mStride) { } template <typename ValueType> void set(size_t n, openvdb::Index m, const ValueType& value) { mBuffer[n * mStride + m] = static_cast<T>(value); } template <typename ValueType> void set(size_t n, openvdb::Index m, const openvdb::math::Vec3<ValueType>& value) { mBuffer[n * mStride + m] = static_cast<T>(value); } private: std::vector<T>& mBuffer; Index mStride; }; // struct Handle explicit AttributeWrapper(const Index stride) : mStride(stride) { } void expand() { } void compact() { } void resize(const size_t n) { mAttribute.resize(n); } size_t size() const { return mAttribute.size(); } std::vector<T>& buffer() { return mAttribute; } template <typename ValueT> void get(ValueT& value, size_t n, openvdb::Index m = 0) const { value = mAttribute[n * mStride + m]; } template <typename ValueT> void getPos(size_t n, ValueT& value) const { this->get<ValueT>(value, n); } private: std::vector<T> mAttribute; Index mStride; }; // struct AttributeWrapper struct GroupWrapper { GroupWrapper() = default; void setOffsetOn(openvdb::Index index) { mGroup[index] = short(1); } void finalize() { } void resize(const size_t n) { mGroup.resize(n, short(0)); } size_t size() const { return mGroup.size(); } std::vector<short>& buffer() { return mGroup; } private: std::vector<short> mGroup; }; // struct GroupWrapper struct PointData { int id; Vec3f position; Vec3i xyz; float uniform; openvdb::Name string; short group; bool operator<(const PointData& other) const { return id < other.id; } }; // PointData // Generate random points by uniformly distributing points // on a unit-sphere. inline void genPoints(const int numPoints, const double scale, const bool stride, AttributeWrapper<Vec3f>& position, AttributeWrapper<int>& xyz, AttributeWrapper<int>& id, AttributeWrapper<float>& uniform, AttributeWrapper<openvdb::Name>& string, GroupWrapper& group) { // init openvdb::math::Random01 randNumber(0); const int n = int(std::sqrt(double(numPoints))); const double xScale = (2.0 * M_PI) / double(n); const double yScale = M_PI / double(n); double x, y, theta, phi; openvdb::Vec3f pos; position.resize(n*n); xyz.resize(stride ? n*n*3 : 1); id.resize(n*n); uniform.resize(n*n); string.resize(n*n); group.resize(n*n); AttributeWrapper<Vec3f>::Handle positionHandle(position); AttributeWrapper<int>::Handle xyzHandle(xyz); AttributeWrapper<int>::Handle idHandle(id); AttributeWrapper<float>::Handle uniformHandle(uniform); AttributeWrapper<openvdb::Name>::Handle stringHandle(string); int i = 0; // loop over a [0 to n) x [0 to n) grid. for (int a = 0; a < n; ++a) { for (int b = 0; b < n; ++b) { // jitter, move to random pos. inside the current cell x = double(a) + randNumber(); y = double(b) + randNumber(); // remap to a lat/long map theta = y * yScale; // [0 to PI] phi = x * xScale; // [0 to 2PI] // convert to cartesian coordinates on a unit sphere. // spherical coordinate triplet (r=1, theta, phi) pos[0] = static_cast<float>(std::sin(theta)*std::cos(phi)*scale); pos[1] = static_cast<float>(std::sin(theta)*std::sin(phi)*scale); pos[2] = static_cast<float>(std::cos(theta)*scale); positionHandle.set(i, /*stride*/0, pos); idHandle.set(i, /*stride*/0, i); uniformHandle.set(i, /*stride*/0, 100.0f); if (stride) { xyzHandle.set(i, 0, i); xyzHandle.set(i, 1, i*i); xyzHandle.set(i, 2, i*i*i); } // add points with even id to the group if ((i % 2) == 0) { group.setOffsetOn(i); stringHandle.set(i, /*stride*/0, "testA"); } else { stringHandle.set(i, /*stride*/0, "testB"); } i++; } } } //////////////////////////////////////// TEST_F(TestPointConversion, testPointConversion) { // generate points const size_t count(1000000); AttributeWrapper<Vec3f> position(1); AttributeWrapper<int> xyz(1); AttributeWrapper<int> id(1); AttributeWrapper<float> uniform(1); AttributeWrapper<openvdb::Name> string(1); GroupWrapper group; genPoints(count, /*scale=*/ 100.0, /*stride=*/false, position, xyz, id, uniform, string, group); EXPECT_EQ(position.size(), count); EXPECT_EQ(id.size(), count); EXPECT_EQ(uniform.size(), count); EXPECT_EQ(string.size(), count); EXPECT_EQ(group.size(), count); // convert point positions into a Point Data Grid const float voxelSize = 1.0f; openvdb::math::Transform::Ptr transform(openvdb::math::Transform::createLinearTransform(voxelSize)); tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(position, *transform); PointDataGrid::Ptr pointDataGrid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, position, *transform); tools::PointIndexTree& indexTree = pointIndexGrid->tree(); PointDataTree& tree = pointDataGrid->tree(); // add id and populate appendAttribute<int>(tree, "id"); populateAttribute<PointDataTree, tools::PointIndexTree, AttributeWrapper<int>>(tree, indexTree, "id", id); // add uniform and populate appendAttribute<float>(tree, "uniform"); populateAttribute<PointDataTree, tools::PointIndexTree, AttributeWrapper<float>>(tree, indexTree, "uniform", uniform); // add string and populate appendAttribute<Name>(tree, "string"); // reset the descriptors PointDataTree::LeafIter leafIter = tree.beginLeaf(); const AttributeSet::Descriptor& descriptor = leafIter->attributeSet().descriptor(); auto newDescriptor = std::make_shared<AttributeSet::Descriptor>(descriptor); for (; leafIter; ++leafIter) { leafIter->resetDescriptor(newDescriptor); } populateAttribute<PointDataTree, tools::PointIndexTree, AttributeWrapper<openvdb::Name>>( tree, indexTree, "string", string); // add group and set membership appendGroup(tree, "test"); setGroup(tree, indexTree, group.buffer(), "test"); EXPECT_EQ(indexTree.leafCount(), tree.leafCount()); // read/write grid to a temp file std::string tempDir; if (const char* dir = std::getenv("TMPDIR")) tempDir = dir; #ifdef _MSC_VER if (tempDir.empty()) { char tempDirBuffer[MAX_PATH+1]; int tempDirLen = GetTempPath(MAX_PATH+1, tempDirBuffer); EXPECT_TRUE(tempDirLen > 0 && tempDirLen <= MAX_PATH); tempDir = tempDirBuffer; } #else if (tempDir.empty()) tempDir = P_tmpdir; #endif std::string filename = tempDir + "/openvdb_test_point_conversion"; io::File fileOut(filename); GridCPtrVec grids; grids.push_back(pointDataGrid); fileOut.write(grids); fileOut.close(); io::File fileIn(filename); fileIn.open(); GridPtrVecPtr readGrids = fileIn.getGrids(); fileIn.close(); EXPECT_EQ(readGrids->size(), size_t(1)); pointDataGrid = GridBase::grid<PointDataGrid>((*readGrids)[0]); PointDataTree& inputTree = pointDataGrid->tree(); // create accessor and iterator for Point Data Tree PointDataTree::LeafCIter leafCIter = inputTree.cbeginLeaf(); EXPECT_EQ(5, int(leafCIter->attributeSet().size())); EXPECT_TRUE(leafCIter->attributeSet().find("id") != AttributeSet::INVALID_POS); EXPECT_TRUE(leafCIter->attributeSet().find("uniform") != AttributeSet::INVALID_POS); EXPECT_TRUE(leafCIter->attributeSet().find("P") != AttributeSet::INVALID_POS); EXPECT_TRUE(leafCIter->attributeSet().find("string") != AttributeSet::INVALID_POS); const auto idIndex = static_cast<Index>(leafCIter->attributeSet().find("id")); const auto uniformIndex = static_cast<Index>(leafCIter->attributeSet().find("uniform")); const auto stringIndex = static_cast<Index>(leafCIter->attributeSet().find("string")); const AttributeSet::Descriptor::GroupIndex groupIndex = leafCIter->attributeSet().groupIndex("test"); // convert back into linear point attribute data AttributeWrapper<Vec3f> outputPosition(1); AttributeWrapper<int> outputId(1); AttributeWrapper<float> outputUniform(1); AttributeWrapper<openvdb::Name> outputString(1); GroupWrapper outputGroup; // test offset the whole point block by an arbitrary amount Index64 startOffset = 10; outputPosition.resize(startOffset + position.size()); outputId.resize(startOffset + id.size()); outputUniform.resize(startOffset + uniform.size()); outputString.resize(startOffset + string.size()); outputGroup.resize(startOffset + group.size()); std::vector<Name> includeGroups; std::vector<Name> excludeGroups; std::vector<Index64> offsets; MultiGroupFilter filter(includeGroups, excludeGroups, inputTree.cbeginLeaf()->attributeSet()); pointOffsets(offsets, inputTree, filter); convertPointDataGridPosition(outputPosition, *pointDataGrid, offsets, startOffset, filter); convertPointDataGridAttribute(outputId, inputTree, offsets, startOffset, idIndex, 1, filter); convertPointDataGridAttribute(outputUniform, inputTree, offsets, startOffset, uniformIndex, 1, filter); convertPointDataGridAttribute(outputString, inputTree, offsets, startOffset, stringIndex, 1, filter); convertPointDataGridGroup(outputGroup, inputTree, offsets, startOffset, groupIndex, filter); // pack and sort the new buffers based on id std::vector<PointData> pointData(count); for (unsigned int i = 0; i < count; i++) { pointData[i].id = outputId.buffer()[startOffset + i]; pointData[i].position = outputPosition.buffer()[startOffset + i]; pointData[i].uniform = outputUniform.buffer()[startOffset + i]; pointData[i].string = outputString.buffer()[startOffset + i]; pointData[i].group = outputGroup.buffer()[startOffset + i]; } std::sort(pointData.begin(), pointData.end()); // compare old and new buffers for (unsigned int i = 0; i < count; i++) { EXPECT_EQ(id.buffer()[i], pointData[i].id); EXPECT_EQ(group.buffer()[i], pointData[i].group); EXPECT_EQ(uniform.buffer()[i], pointData[i].uniform); EXPECT_EQ(string.buffer()[i], pointData[i].string); EXPECT_NEAR(position.buffer()[i].x(), pointData[i].position.x(), /*tolerance=*/1e-6); EXPECT_NEAR(position.buffer()[i].y(), pointData[i].position.y(), /*tolerance=*/1e-6); EXPECT_NEAR(position.buffer()[i].z(), pointData[i].position.z(), /*tolerance=*/1e-6); } // convert based on even group const size_t halfCount = count / 2; outputPosition.resize(startOffset + halfCount); outputId.resize(startOffset + halfCount); outputUniform.resize(startOffset + halfCount); outputString.resize(startOffset + halfCount); outputGroup.resize(startOffset + halfCount); includeGroups.push_back("test"); offsets.clear(); MultiGroupFilter filter2(includeGroups, excludeGroups, inputTree.cbeginLeaf()->attributeSet()); pointOffsets(offsets, inputTree, filter2); convertPointDataGridPosition(outputPosition, *pointDataGrid, offsets, startOffset, filter2); convertPointDataGridAttribute(outputId, inputTree, offsets, startOffset, idIndex, /*stride*/1, filter2); convertPointDataGridAttribute(outputUniform, inputTree, offsets, startOffset, uniformIndex, /*stride*/1, filter2); convertPointDataGridAttribute(outputString, inputTree, offsets, startOffset, stringIndex, /*stride*/1, filter2); convertPointDataGridGroup(outputGroup, inputTree, offsets, startOffset, groupIndex, filter2); EXPECT_EQ(size_t(outputPosition.size() - startOffset), size_t(halfCount)); EXPECT_EQ(size_t(outputId.size() - startOffset), size_t(halfCount)); EXPECT_EQ(size_t(outputUniform.size() - startOffset), size_t(halfCount)); EXPECT_EQ(size_t(outputString.size() - startOffset), size_t(halfCount)); EXPECT_EQ(size_t(outputGroup.size() - startOffset), size_t(halfCount)); pointData.clear(); for (unsigned int i = 0; i < halfCount; i++) { PointData data; data.id = outputId.buffer()[startOffset + i]; data.position = outputPosition.buffer()[startOffset + i]; data.uniform = outputUniform.buffer()[startOffset + i]; data.string = outputString.buffer()[startOffset + i]; data.group = outputGroup.buffer()[startOffset + i]; pointData.push_back(data); } std::sort(pointData.begin(), pointData.end()); // compare old and new buffers for (unsigned int i = 0; i < halfCount; i++) { EXPECT_EQ(id.buffer()[i*2], pointData[i].id); EXPECT_EQ(group.buffer()[i*2], pointData[i].group); EXPECT_EQ(uniform.buffer()[i*2], pointData[i].uniform); EXPECT_EQ(string.buffer()[i*2], pointData[i].string); EXPECT_NEAR(position.buffer()[i*2].x(), pointData[i].position.x(), /*tolerance=*/1e-6); EXPECT_NEAR(position.buffer()[i*2].y(), pointData[i].position.y(), /*tolerance=*/1e-6); EXPECT_NEAR(position.buffer()[i*2].z(), pointData[i].position.z(), /*tolerance=*/1e-6); } std::remove(filename.c_str()); } //////////////////////////////////////// TEST_F(TestPointConversion, testPointConversionNans) { // generate points const size_t count(25); AttributeWrapper<Vec3f> position(1); AttributeWrapper<int> xyz(1); AttributeWrapper<int> id(1); AttributeWrapper<float> uniform(1); AttributeWrapper<openvdb::Name> string(1); GroupWrapper group; genPoints(count, /*scale=*/ 1.0, /*stride=*/false, position, xyz, id, uniform, string, group); // set point numbers 0, 10, 20 and 24 to a nan position const std::vector<int> nanIndices = { 0, 10, 20, 24 }; AttributeWrapper<Vec3f>::Handle positionHandle(position); const Vec3f nanPos(std::nan("0")); EXPECT_TRUE(nanPos.isNan()); for (const int& idx : nanIndices) { positionHandle.set(idx, /*stride*/0, nanPos); } EXPECT_EQ(count, position.size()); EXPECT_EQ(count, id.size()); EXPECT_EQ(count, uniform.size()); EXPECT_EQ(count, string.size()); EXPECT_EQ(count, group.size()); // convert point positions into a Point Data Grid openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(/*voxelsize*/1.0f); tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(position, *transform); PointDataGrid::Ptr pointDataGrid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, position, *transform); tools::PointIndexTree& indexTree = pointIndexGrid->tree(); PointDataTree& tree = pointDataGrid->tree(); // set expected point count to the total minus the number of nan positions const size_t expected = count - nanIndices.size(); EXPECT_EQ(expected, static_cast<size_t>(pointCount(tree))); // add id and populate appendAttribute<int>(tree, "id"); populateAttribute<PointDataTree, tools::PointIndexTree, AttributeWrapper<int>>(tree, indexTree, "id", id); // add uniform and populate appendAttribute<float>(tree, "uniform"); populateAttribute<PointDataTree, tools::PointIndexTree, AttributeWrapper<float>>(tree, indexTree, "uniform", uniform); // add string and populate appendAttribute<Name>(tree, "string"); populateAttribute<PointDataTree, tools::PointIndexTree, AttributeWrapper<openvdb::Name>>( tree, indexTree, "string", string); // add group and set membership appendGroup(tree, "test"); setGroup(tree, indexTree, group.buffer(), "test"); // create accessor and iterator for Point Data Tree const auto leafCIter = tree.cbeginLeaf(); EXPECT_EQ(5, int(leafCIter->attributeSet().size())); EXPECT_TRUE(leafCIter->attributeSet().find("id") != AttributeSet::INVALID_POS); EXPECT_TRUE(leafCIter->attributeSet().find("uniform") != AttributeSet::INVALID_POS); EXPECT_TRUE(leafCIter->attributeSet().find("P") != AttributeSet::INVALID_POS); EXPECT_TRUE(leafCIter->attributeSet().find("string") != AttributeSet::INVALID_POS); const auto idIndex = static_cast<Index>(leafCIter->attributeSet().find("id")); const auto uniformIndex = static_cast<Index>(leafCIter->attributeSet().find("uniform")); const auto stringIndex = static_cast<Index>(leafCIter->attributeSet().find("string")); const AttributeSet::Descriptor::GroupIndex groupIndex = leafCIter->attributeSet().groupIndex("test"); // convert back into linear point attribute data AttributeWrapper<Vec3f> outputPosition(1); AttributeWrapper<int> outputId(1); AttributeWrapper<float> outputUniform(1); AttributeWrapper<openvdb::Name> outputString(1); GroupWrapper outputGroup; outputPosition.resize(position.size()); outputId.resize(id.size()); outputUniform.resize(uniform.size()); outputString.resize(string.size()); outputGroup.resize(group.size()); std::vector<Index64> offsets; pointOffsets(offsets, tree); convertPointDataGridPosition(outputPosition, *pointDataGrid, offsets, 0); convertPointDataGridAttribute(outputId, tree, offsets, 0, idIndex, 1); convertPointDataGridAttribute(outputUniform, tree, offsets, 0, uniformIndex, 1); convertPointDataGridAttribute(outputString, tree, offsets, 0, stringIndex, 1); convertPointDataGridGroup(outputGroup, tree, offsets, 0, groupIndex); // pack and sort the new buffers based on id std::vector<PointData> pointData(expected); for (unsigned int i = 0; i < expected; i++) { pointData[i].id = outputId.buffer()[i]; pointData[i].position = outputPosition.buffer()[i]; pointData[i].uniform = outputUniform.buffer()[i]; pointData[i].string = outputString.buffer()[i]; pointData[i].group = outputGroup.buffer()[i]; } std::sort(pointData.begin(), pointData.end()); // compare old and new buffers, taking into account the nan position // which should not have been converted for (unsigned int i = 0; i < expected; ++i) { size_t iOffset = i; for (const int& idx : nanIndices) { if (int(iOffset) >= idx) iOffset += 1; } EXPECT_EQ(id.buffer()[iOffset], pointData[i].id); EXPECT_EQ(group.buffer()[iOffset], pointData[i].group); EXPECT_EQ(uniform.buffer()[iOffset], pointData[i].uniform); EXPECT_EQ(string.buffer()[iOffset], pointData[i].string); EXPECT_NEAR(position.buffer()[iOffset].x(), pointData[i].position.x(), /*tolerance=*/1e-6); EXPECT_NEAR(position.buffer()[iOffset].y(), pointData[i].position.y(), /*tolerance=*/1e-6); EXPECT_NEAR(position.buffer()[iOffset].z(), pointData[i].position.z(), /*tolerance=*/1e-6); } } //////////////////////////////////////// TEST_F(TestPointConversion, testStride) { // generate points const size_t count(40000); AttributeWrapper<Vec3f> position(1); AttributeWrapper<int> xyz(3); AttributeWrapper<int> id(1); AttributeWrapper<float> uniform(1); AttributeWrapper<openvdb::Name> string(1); GroupWrapper group; genPoints(count, /*scale=*/ 100.0, /*stride=*/true, position, xyz, id, uniform, string, group); EXPECT_EQ(position.size(), count); EXPECT_EQ(xyz.size(), count*3); EXPECT_EQ(id.size(), count); // convert point positions into a Point Data Grid const float voxelSize = 1.0f; openvdb::math::Transform::Ptr transform(openvdb::math::Transform::createLinearTransform(voxelSize)); tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(position, *transform); PointDataGrid::Ptr pointDataGrid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, position, *transform); tools::PointIndexTree& indexTree = pointIndexGrid->tree(); PointDataTree& tree = pointDataGrid->tree(); // add id and populate appendAttribute<int>(tree, "id"); populateAttribute<PointDataTree, tools::PointIndexTree, AttributeWrapper<int>>(tree, indexTree, "id", id); // add xyz and populate appendAttribute<int>(tree, "xyz", 0, /*stride=*/3); populateAttribute<PointDataTree, tools::PointIndexTree, AttributeWrapper<int>>(tree, indexTree, "xyz", xyz, /*stride=*/3); // create accessor and iterator for Point Data Tree PointDataTree::LeafCIter leafCIter = tree.cbeginLeaf(); EXPECT_EQ(3, int(leafCIter->attributeSet().size())); EXPECT_TRUE(leafCIter->attributeSet().find("id") != AttributeSet::INVALID_POS); EXPECT_TRUE(leafCIter->attributeSet().find("P") != AttributeSet::INVALID_POS); EXPECT_TRUE(leafCIter->attributeSet().find("xyz") != AttributeSet::INVALID_POS); const auto idIndex = static_cast<Index>(leafCIter->attributeSet().find("id")); const auto xyzIndex = static_cast<Index>(leafCIter->attributeSet().find("xyz")); // convert back into linear point attribute data AttributeWrapper<Vec3f> outputPosition(1); AttributeWrapper<int> outputXyz(3); AttributeWrapper<int> outputId(1); // test offset the whole point block by an arbitrary amount Index64 startOffset = 10; outputPosition.resize(startOffset + position.size()); outputXyz.resize((startOffset + id.size())*3); outputId.resize(startOffset + id.size()); std::vector<Index64> offsets; pointOffsets(offsets, tree); convertPointDataGridPosition(outputPosition, *pointDataGrid, offsets, startOffset); convertPointDataGridAttribute(outputId, tree, offsets, startOffset, idIndex); convertPointDataGridAttribute(outputXyz, tree, offsets, startOffset, xyzIndex, /*stride=*/3); // pack and sort the new buffers based on id std::vector<PointData> pointData; pointData.resize(count); for (unsigned int i = 0; i < count; i++) { pointData[i].id = outputId.buffer()[startOffset + i]; pointData[i].position = outputPosition.buffer()[startOffset + i]; for (unsigned int j = 0; j < 3; j++) { pointData[i].xyz[j] = outputXyz.buffer()[startOffset * 3 + i * 3 + j]; } } std::sort(pointData.begin(), pointData.end()); // compare old and new buffers for (unsigned int i = 0; i < count; i++) { EXPECT_EQ(id.buffer()[i], pointData[i].id); EXPECT_NEAR(position.buffer()[i].x(), pointData[i].position.x(), /*tolerance=*/1e-6); EXPECT_NEAR(position.buffer()[i].y(), pointData[i].position.y(), /*tolerance=*/1e-6); EXPECT_NEAR(position.buffer()[i].z(), pointData[i].position.z(), /*tolerance=*/1e-6); EXPECT_EQ(Vec3i(xyz.buffer()[i*3], xyz.buffer()[i*3+1], xyz.buffer()[i*3+2]), pointData[i].xyz); } } //////////////////////////////////////// TEST_F(TestPointConversion, testComputeVoxelSize) { struct Local { static PointDataGrid::Ptr genPointsGrid(const float voxelSize, const AttributeWrapper<Vec3f>& positions) { math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(positions, *transform); return createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, positions, *transform); } }; // minimum and maximum voxel sizes const auto minimumVoxelSize = static_cast<float>(math::Pow(double(3e-15), 1.0/3.0)); const auto maximumVoxelSize = static_cast<float>(math::Pow(double(std::numeric_limits<float>::max()), 1.0/3.0)); AttributeWrapper<Vec3f> position(/*stride*/1); AttributeWrapper<Vec3d> positionD(/*stride*/1); // test with no positions { const float voxelSize = computeVoxelSize(position, /*points per voxel*/8); EXPECT_EQ(voxelSize, 0.1f); } // test with one point { position.resize(1); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(0.0f)); const float voxelSize = computeVoxelSize(position, /*points per voxel*/8); EXPECT_EQ(voxelSize, 0.1f); } // test with n points, where n > 1 && n <= num points per voxel { position.resize(7); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(-8.6f, 0.0f,-23.8f)); positionHandle.set(1, 0, Vec3f( 8.6f, 7.8f, 23.8f)); for (size_t i = 2; i < 7; ++ i) positionHandle.set(i, 0, Vec3f(0.0f)); float voxelSize = computeVoxelSize(position, /*points per voxel*/8); EXPECT_NEAR(18.5528f, voxelSize, /*tolerance=*/1e-4); voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_NEAR(5.51306f, voxelSize, /*tolerance=*/1e-4); // test decimal place accuracy voxelSize = computeVoxelSize(position, /*points per voxel*/1, math::Mat4d::identity(), 10); EXPECT_NEAR(5.5130610466f, voxelSize, /*tolerance=*/1e-9); voxelSize = computeVoxelSize(position, /*points per voxel*/1, math::Mat4d::identity(), 1); EXPECT_EQ(5.5f, voxelSize); voxelSize = computeVoxelSize(position, /*points per voxel*/1, math::Mat4d::identity(), 0); EXPECT_EQ(6.0f, voxelSize); } // test coplanar points (Y=0) { position.resize(5); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(0.0f, 0.0f, 10.0f)); positionHandle.set(1, 0, Vec3f(0.0f, 0.0f, -10.0f)); positionHandle.set(2, 0, Vec3f(20.0f, 0.0f, -10.0f)); positionHandle.set(3, 0, Vec3f(20.0f, 0.0f, 10.0f)); positionHandle.set(4, 0, Vec3f(10.0f, 0.0f, 0.0f)); float voxelSize = computeVoxelSize(position, /*points per voxel*/5); EXPECT_NEAR(20.0f, voxelSize, /*tolerance=*/1e-4); voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_NEAR(11.696f, voxelSize, /*tolerance=*/1e-4); } // test collinear points (X=0, Y=0) { position.resize(5); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(0.0f, 0.0f, 10.0f)); positionHandle.set(1, 0, Vec3f(0.0f, 0.0f, -10.0f)); positionHandle.set(2, 0, Vec3f(0.0f, 0.0f, -10.0f)); positionHandle.set(3, 0, Vec3f(0.0f, 0.0f, 10.0f)); positionHandle.set(4, 0, Vec3f(0.0f, 0.0f, 0.0f)); float voxelSize = computeVoxelSize(position, /*points per voxel*/5); EXPECT_NEAR(20.0f, voxelSize, /*tolerance=*/1e-4); voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_NEAR(8.32034f, voxelSize, /*tolerance=*/1e-4); } // test min limit collinear points (X=0, Y=0, Z=+/-float min) { position.resize(2); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(0.0f, 0.0f, -std::numeric_limits<float>::min())); positionHandle.set(1, 0, Vec3f(0.0f, 0.0f, std::numeric_limits<float>::min())); float voxelSize = computeVoxelSize(position, /*points per voxel*/2); EXPECT_NEAR(minimumVoxelSize, voxelSize, /*tolerance=*/1e-4); voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_NEAR(minimumVoxelSize, voxelSize, /*tolerance=*/1e-4); } // test max limit collinear points (X=+/-float max, Y=0, Z=0) { position.resize(2); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(-std::numeric_limits<float>::max(), 0.0f, 0.0f)); positionHandle.set(1, 0, Vec3f(std::numeric_limits<float>::max(), 0.0f, 0.0f)); float voxelSize = computeVoxelSize(position, /*points per voxel*/2); EXPECT_NEAR(maximumVoxelSize, voxelSize, /*tolerance=*/1e-4); voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_NEAR(maximumVoxelSize, voxelSize, /*tolerance=*/1e-4); } // max pointsPerVoxel { position.resize(2); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(0)); positionHandle.set(1, 0, Vec3f(1)); float voxelSize = computeVoxelSize(position, /*points per voxel*/std::numeric_limits<uint32_t>::max()); EXPECT_EQ(voxelSize, 1.0f); } // limits test { positionD.resize(2); AttributeWrapper<Vec3d>::Handle positionHandleD(positionD); positionHandleD.set(0, 0, Vec3d(0)); positionHandleD.set(1, 0, Vec3d(std::numeric_limits<double>::max())); float voxelSize = computeVoxelSize(positionD, /*points per voxel*/2); EXPECT_EQ(voxelSize, maximumVoxelSize); } { const float smallest(std::numeric_limits<float>::min()); position.resize(4); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(0.0f)); positionHandle.set(1, 0, Vec3f(smallest)); positionHandle.set(2, 0, Vec3f(smallest, 0.0f, 0.0f)); positionHandle.set(3, 0, Vec3f(smallest, 0.0f, smallest)); float voxelSize = computeVoxelSize(position, /*points per voxel*/4); EXPECT_EQ(voxelSize, minimumVoxelSize); voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_NEAR(minimumVoxelSize, voxelSize, /*tolerance=*/1e-4); PointDataGrid::Ptr grid = Local::genPointsGrid(voxelSize, position); EXPECT_EQ(grid->activeVoxelCount(), Index64(1)); } // the smallest possible vector extent that can exist from an input set // without being clamped to the minimum voxel size // is Tolerance<Real>::value() + std::numeric_limits<Real>::min() { position.resize(2); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(0.0f)); positionHandle.set(1, 0, Vec3f(math::Tolerance<Real>::value() + std::numeric_limits<Real>::min())); float voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_EQ(voxelSize, minimumVoxelSize); } // in-between smallest extent and ScaleMap determinant test { position.resize(2); AttributeWrapper<Vec3f>::Handle positionHandle(position); positionHandle.set(0, 0, Vec3f(0.0f)); positionHandle.set(1, 0, Vec3f(math::Tolerance<Real>::value()*1e8 + std::numeric_limits<Real>::min())); float voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_EQ(voxelSize, float(math::Pow(double(3e-15), 1.0/3.0))); } { const float smallValue(1e-5f); position.resize(300000); AttributeWrapper<Vec3f>::Handle positionHandle(position); for (size_t i = 0; i < 100000; ++ i) { positionHandle.set(i, 0, Vec3f(smallValue*float(i), 0, 0)); positionHandle.set(i+100000, 0, Vec3f(0, smallValue*float(i), 0)); positionHandle.set(i+200000, 0, Vec3f(0, 0, smallValue*float(i))); } float voxelSize = computeVoxelSize(position, /*points per voxel*/10); EXPECT_NEAR(0.00012f, voxelSize, /*tolerance=*/1e-4); voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_NEAR(2e-5, voxelSize, /*tolerance=*/1e-6); PointDataGrid::Ptr grid = Local::genPointsGrid(voxelSize, position); EXPECT_EQ(grid->activeVoxelCount(), Index64(150001)); // check zero decimal place still returns valid result voxelSize = computeVoxelSize(position, /*points per voxel*/1, math::Mat4d::identity(), 0); EXPECT_NEAR(2e-5, voxelSize, /*tolerance=*/1e-6); } // random position generation within two bounds of equal size. // This test distributes 1000 points within a 1x1x1 box centered at (0,0,0) // and another 1000 points within a separate 1x1x1 box centered at (20,20,20). // Points are randomly positioned however can be defined as having a stochastic // distribution. Tests that sparsity between these data sets causes no issues // and that computeVoxelSize produces accurate results { position.resize(2000); AttributeWrapper<Vec3f>::Handle positionHandle(position); openvdb::math::Random01 randNumber(0); // positions between -0.5 and 0.5 for (size_t i = 0; i < 1000; ++ i) { const Vec3f pos(randNumber() - 0.5f); positionHandle.set(i, 0, pos); } // positions between 19.5 and 20.5 for (size_t i = 1000; i < 2000; ++ i) { const Vec3f pos(randNumber() - 0.5f + 20.0f); positionHandle.set(i, 0, pos); } float voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_NEAR(0.00052f, voxelSize, /*tolerance=*/1e-4); PointDataGrid::Ptr grid = Local::genPointsGrid(voxelSize, position); const auto pointsPerVoxel = static_cast<Index64>( math::Round(2000.0f / static_cast<float>(grid->activeVoxelCount()))); EXPECT_EQ(pointsPerVoxel, Index64(1)); } // random position generation within three bounds of varying size. // This test distributes 1000 points within a 1x1x1 box centered at (0.5,0.5,0,5) // another 1000 points within a separate 10x10x10 box centered at (15,15,15) and // a final 1000 points within a separate 50x50x50 box centered at (75,75,75) // Points are randomly positioned however can be defined as having a stochastic // distribution. Tests that sparsity between these data sets causes no issues as // well as computeVoxelSize producing a good average result { position.resize(3000); AttributeWrapper<Vec3f>::Handle positionHandle(position); openvdb::math::Random01 randNumber(0); // positions between 0 and 1 for (size_t i = 0; i < 1000; ++ i) { const Vec3f pos(randNumber()); positionHandle.set(i, 0, pos); } // positions between 10 and 20 for (size_t i = 1000; i < 2000; ++ i) { const Vec3f pos((randNumber() * 10.0f) + 10.0f); positionHandle.set(i, 0, pos); } // positions between 50 and 100 for (size_t i = 2000; i < 3000; ++ i) { const Vec3f pos((randNumber() * 50.0f) + 50.0f); positionHandle.set(i, 0, pos); } float voxelSize = computeVoxelSize(position, /*points per voxel*/10); EXPECT_NEAR(0.24758f, voxelSize, /*tolerance=*/1e-3); PointDataGrid::Ptr grid = Local::genPointsGrid(voxelSize, position); auto pointsPerVoxel = static_cast<Index64>( math::Round(3000.0f/ static_cast<float>(grid->activeVoxelCount()))); EXPECT_TRUE(math::isApproxEqual(pointsPerVoxel, Index64(10), Index64(2))); voxelSize = computeVoxelSize(position, /*points per voxel*/1); EXPECT_NEAR(0.00231f, voxelSize, /*tolerance=*/1e-4); grid = Local::genPointsGrid(voxelSize, position); pointsPerVoxel = static_cast<Index64>( math::Round(3000.0f/ static_cast<float>(grid->activeVoxelCount()))); EXPECT_EQ(pointsPerVoxel, Index64(1)); } // Generate a sphere // NOTE: The sphere does NOT provide uniform distribution const size_t count(40000); position.resize(0); AttributeWrapper<int> xyz(1); AttributeWrapper<int> id(1); AttributeWrapper<float> uniform(1); AttributeWrapper<openvdb::Name> string(1); GroupWrapper group; genPoints(count, /*scale=*/ 100.0, /*stride=*/false, position, xyz, id, uniform, string, group); EXPECT_EQ(position.size(), count); EXPECT_EQ(id.size(), count); EXPECT_EQ(uniform.size(), count); EXPECT_EQ(string.size(), count); EXPECT_EQ(group.size(), count); // test a distributed point set around a sphere { const float voxelSize = computeVoxelSize(position, /*points per voxel*/2); EXPECT_NEAR(2.6275f, voxelSize, /*tolerance=*/0.01); PointDataGrid::Ptr grid = Local::genPointsGrid(voxelSize, position); const Index64 pointsPerVoxel = count / grid->activeVoxelCount(); EXPECT_EQ(pointsPerVoxel, Index64(2)); } // test with given target transforms { // test that a different scale doesn't change the result openvdb::math::Transform::Ptr transform1(openvdb::math::Transform::createLinearTransform(0.33)); openvdb::math::Transform::Ptr transform2(openvdb::math::Transform::createLinearTransform(0.87)); math::UniformScaleMap::ConstPtr scaleMap1 = transform1->constMap<math::UniformScaleMap>(); math::UniformScaleMap::ConstPtr scaleMap2 = transform2->constMap<math::UniformScaleMap>(); EXPECT_TRUE(scaleMap1.get()); EXPECT_TRUE(scaleMap2.get()); math::AffineMap::ConstPtr affineMap1 = scaleMap1->getAffineMap(); math::AffineMap::ConstPtr affineMap2 = scaleMap2->getAffineMap(); float voxelSize1 = computeVoxelSize(position, /*points per voxel*/2, affineMap1->getMat4()); float voxelSize2 = computeVoxelSize(position, /*points per voxel*/2, affineMap2->getMat4()); EXPECT_EQ(voxelSize1, voxelSize2); // test that applying a rotation roughly calculates to the same result for this example // NOTE: distribution is not uniform // Rotate by 45 degrees in X, Y, Z transform1->postRotate(M_PI / 4.0, math::X_AXIS); transform1->postRotate(M_PI / 4.0, math::Y_AXIS); transform1->postRotate(M_PI / 4.0, math::Z_AXIS); affineMap1 = transform1->constMap<math::AffineMap>(); EXPECT_TRUE(affineMap1.get()); float voxelSize3 = computeVoxelSize(position, /*points per voxel*/2, affineMap1->getMat4()); EXPECT_NEAR(voxelSize1, voxelSize3, 0.1); // test that applying a translation roughly calculates to the same result for this example transform1->postTranslate(Vec3d(-5.0f, 3.3f, 20.1f)); affineMap1 = transform1->constMap<math::AffineMap>(); EXPECT_TRUE(affineMap1.get()); float voxelSize4 = computeVoxelSize(position, /*points per voxel*/2, affineMap1->getMat4()); EXPECT_NEAR(voxelSize1, voxelSize4, 0.1); } } TEST_F(TestPointConversion, testPrecision) { const double tolerance = math::Tolerance<float>::value(); { // test values far from origin const double voxelSize = 0.5; const float halfVoxelSize = 0.25f; auto transform = math::Transform::createLinearTransform(voxelSize); float onBorder = 1000.0f + halfVoxelSize; // can be represented exactly in floating-point float beforeBorder = std::nextafterf(onBorder, /*to=*/0.0f); float afterBorder = std::nextafterf(onBorder, /*to=*/2000.0f); const Vec3f positionBefore(beforeBorder, afterBorder, onBorder); std::vector<Vec3f> points{positionBefore}; PointAttributeVector<Vec3f> wrapper(points); auto pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>( wrapper, *transform); Vec3f positionAfterNull; Vec3f positionAfterFixed16; { // null codec auto points = createPointDataGrid<NullCodec, PointDataGrid>( *pointIndexGrid, wrapper, *transform); auto leafIter = points->tree().cbeginLeaf(); auto indexIter = leafIter->beginIndexOn(); auto handle = AttributeHandle<Vec3f>(leafIter->constAttributeArray("P")); const auto& ijk = indexIter.getCoord(); EXPECT_EQ(ijk.x(), 2000); EXPECT_EQ(ijk.y(), 2001); EXPECT_EQ(ijk.z(), 2001); // on border value is stored in the higher voxel const Vec3f positionVoxelSpace = handle.get(*indexIter); // voxel-space range: -0.5f >= value > 0.5f EXPECT_TRUE(positionVoxelSpace.x() > 0.49f && positionVoxelSpace.x() < 0.5f); EXPECT_TRUE(positionVoxelSpace.y() > -0.5f && positionVoxelSpace.y() < -0.49f); EXPECT_TRUE(positionVoxelSpace.z() == -0.5f); // on border value is stored at -0.5f positionAfterNull = Vec3f(transform->indexToWorld(positionVoxelSpace + ijk.asVec3d())); EXPECT_NEAR(positionAfterNull.x(), positionBefore.x(), tolerance); EXPECT_NEAR(positionAfterNull.y(), positionBefore.y(), tolerance); EXPECT_NEAR(positionAfterNull.z(), positionBefore.z(), tolerance); } { // fixed 16-bit codec auto points = createPointDataGrid<FixedPointCodec<false>, PointDataGrid>( *pointIndexGrid, wrapper, *transform); auto leafIter = points->tree().cbeginLeaf(); auto indexIter = leafIter->beginIndexOn(); auto handle = AttributeHandle<Vec3f>(leafIter->constAttributeArray("P")); const auto& ijk = indexIter.getCoord(); EXPECT_EQ(ijk.x(), 2000); EXPECT_EQ(ijk.y(), 2001); EXPECT_EQ(ijk.z(), 2001); // on border value is stored in the higher voxel const Vec3f positionVoxelSpace = handle.get(*indexIter); // voxel-space range: -0.5f >= value > 0.5f EXPECT_TRUE(positionVoxelSpace.x() > 0.49f && positionVoxelSpace.x() < 0.5f); EXPECT_TRUE(positionVoxelSpace.y() > -0.5f && positionVoxelSpace.y() < -0.49f); EXPECT_TRUE(positionVoxelSpace.z() == -0.5f); // on border value is stored at -0.5f positionAfterFixed16 = Vec3f(transform->indexToWorld( positionVoxelSpace + ijk.asVec3d())); EXPECT_NEAR(positionAfterFixed16.x(), positionBefore.x(), tolerance); EXPECT_NEAR(positionAfterFixed16.y(), positionBefore.y(), tolerance); EXPECT_NEAR(positionAfterFixed16.z(), positionBefore.z(), tolerance); } // at this precision null codec == fixed-point 16-bit codec EXPECT_EQ(positionAfterNull.x(), positionAfterFixed16.x()); EXPECT_EQ(positionAfterNull.y(), positionAfterFixed16.y()); EXPECT_EQ(positionAfterNull.z(), positionAfterFixed16.z()); } { // test values near to origin const double voxelSize = 0.5; const float halfVoxelSize = 0.25f; auto transform = math::Transform::createLinearTransform(voxelSize); float onBorder = 0.0f+halfVoxelSize; float beforeBorder = std::nextafterf(onBorder, /*to=*/0.0f); float afterBorder = std::nextafterf(onBorder, /*to=*/2000.0f); const Vec3f positionBefore(beforeBorder, afterBorder, onBorder); std::vector<Vec3f> points{positionBefore}; PointAttributeVector<Vec3f> wrapper(points); auto pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>( wrapper, *transform); Vec3f positionAfterNull; Vec3f positionAfterFixed16; { // null codec auto points = createPointDataGrid<NullCodec, PointDataGrid>( *pointIndexGrid, wrapper, *transform); auto leafIter = points->tree().cbeginLeaf(); auto indexIter = leafIter->beginIndexOn(); auto handle = AttributeHandle<Vec3f>(leafIter->constAttributeArray("P")); const auto& ijk = indexIter.getCoord(); EXPECT_EQ(ijk.x(), 0); EXPECT_EQ(ijk.y(), 1); EXPECT_EQ(ijk.z(), 1); // on border value is stored in the higher voxel const Vec3f positionVoxelSpace = handle.get(*indexIter); // voxel-space range: -0.5f >= value > 0.5f EXPECT_TRUE(positionVoxelSpace.x() > 0.49f && positionVoxelSpace.x() < 0.5f); EXPECT_TRUE(positionVoxelSpace.y() > -0.5f && positionVoxelSpace.y() < -0.49f); EXPECT_TRUE(positionVoxelSpace.z() == -0.5f); // on border value is stored at -0.5f positionAfterNull = Vec3f(transform->indexToWorld(positionVoxelSpace + ijk.asVec3d())); EXPECT_NEAR(positionAfterNull.x(), positionBefore.x(), tolerance); EXPECT_NEAR(positionAfterNull.y(), positionBefore.y(), tolerance); EXPECT_NEAR(positionAfterNull.z(), positionBefore.z(), tolerance); } { // fixed 16-bit codec - at this precision, this codec results in lossy compression auto points = createPointDataGrid<FixedPointCodec<false>, PointDataGrid>( *pointIndexGrid, wrapper, *transform); auto leafIter = points->tree().cbeginLeaf(); auto indexIter = leafIter->beginIndexOn(); auto handle = AttributeHandle<Vec3f>(leafIter->constAttributeArray("P")); const auto& ijk = indexIter.getCoord(); EXPECT_EQ(ijk.x(), 0); EXPECT_EQ(ijk.y(), 1); EXPECT_EQ(ijk.z(), 1); // on border value is stored in the higher voxel const Vec3f positionVoxelSpace = handle.get(*indexIter); // voxel-space range: -0.5f >= value > 0.5f EXPECT_TRUE(positionVoxelSpace.x() == 0.5f); // before border is clamped to 0.5f EXPECT_TRUE(positionVoxelSpace.y() == -0.5f); // after border is clamped to -0.5f EXPECT_TRUE(positionVoxelSpace.z() == -0.5f); // on border is stored at -0.5f positionAfterFixed16 = Vec3f(transform->indexToWorld( positionVoxelSpace + ijk.asVec3d())); // reduce tolerance to handle lack of precision EXPECT_NEAR(positionAfterFixed16.x(), positionBefore.x(), 1e-6); EXPECT_NEAR(positionAfterFixed16.y(), positionBefore.y(), 1e-6); EXPECT_NEAR(positionAfterFixed16.z(), positionBefore.z(), tolerance); } // only z matches precisely due to lossy compression EXPECT_TRUE(positionAfterNull.x() != positionAfterFixed16.x()); EXPECT_TRUE(positionAfterNull.y() != positionAfterFixed16.y()); EXPECT_EQ(positionAfterNull.z(), positionAfterFixed16.z()); } }
47,392
C++
36.673291
130
0.644898
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPrePostAPI.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/math/Mat4.h> #include <openvdb/math/Maps.h> #include <openvdb/math/Transform.h> #include <openvdb/util/MapsUtil.h> class TestPrePostAPI: public ::testing::Test { }; TEST_F(TestPrePostAPI, testMat4) { using namespace openvdb::math; double TOL = 1e-7; Mat4d m = Mat4d::identity(); Mat4d minv = Mat4d::identity(); // create matrix with pre-API // Translate Shear Rotate Translate Scale matrix m.preScale(Vec3d(1, 2, 3)); m.preTranslate(Vec3d(2, 3, 4)); m.preRotate(X_AXIS, 20); m.preShear(X_AXIS, Y_AXIS, 2); m.preTranslate(Vec3d(2, 2, 2)); // create inverse using the post-API minv.postScale(Vec3d(1.f, 1.f/2.f, 1.f/3.f)); minv.postTranslate(-Vec3d(2, 3, 4)); minv.postRotate(X_AXIS,-20); minv.postShear(X_AXIS, Y_AXIS, -2); minv.postTranslate(-Vec3d(2, 2, 2)); Mat4d mtest = minv * m; // verify that the results is an identity EXPECT_NEAR(mtest[0][0], 1, TOL); EXPECT_NEAR(mtest[1][1], 1, TOL); EXPECT_NEAR(mtest[2][2], 1, TOL); EXPECT_NEAR(mtest[0][1], 0, TOL); EXPECT_NEAR(mtest[0][2], 0, TOL); EXPECT_NEAR(mtest[0][3], 0, TOL); EXPECT_NEAR(mtest[1][0], 0, TOL); EXPECT_NEAR(mtest[1][2], 0, TOL); EXPECT_NEAR(mtest[1][3], 0, TOL); EXPECT_NEAR(mtest[2][0], 0, TOL); EXPECT_NEAR(mtest[2][1], 0, TOL); EXPECT_NEAR(mtest[2][3], 0, TOL); EXPECT_NEAR(mtest[3][0], 0, TOL); EXPECT_NEAR(mtest[3][1], 0, TOL); EXPECT_NEAR(mtest[3][2], 0, TOL); EXPECT_NEAR(mtest[3][3], 1, TOL); } TEST_F(TestPrePostAPI, testMat4Rotate) { using namespace openvdb::math; double TOL = 1e-7; Mat4d rx, ry, rz; const double angle1 = 20. * M_PI / 180.; const double angle2 = 64. * M_PI / 180.; const double angle3 = 125. *M_PI / 180.; rx.setToRotation(Vec3d(1,0,0), angle1); ry.setToRotation(Vec3d(0,1,0), angle2); rz.setToRotation(Vec3d(0,0,1), angle3); Mat4d shear = Mat4d::identity(); shear.setToShear(X_AXIS, Z_AXIS, 2.0); shear.preShear(Y_AXIS, X_AXIS, 3.0); shear.preTranslate(Vec3d(2,4,1)); const Mat4d preResult = rz*ry*rx*shear; Mat4d mpre = shear; mpre.preRotate(X_AXIS, angle1); mpre.preRotate(Y_AXIS, angle2); mpre.preRotate(Z_AXIS, angle3); EXPECT_TRUE( mpre.eq(preResult, TOL) ); const Mat4d postResult = shear*rx*ry*rz; Mat4d mpost = shear; mpost.postRotate(X_AXIS, angle1); mpost.postRotate(Y_AXIS, angle2); mpost.postRotate(Z_AXIS, angle3); EXPECT_TRUE( mpost.eq(postResult, TOL) ); EXPECT_TRUE( !mpost.eq(mpre, TOL)); } TEST_F(TestPrePostAPI, testMat4Scale) { using namespace openvdb::math; double TOL = 1e-7; Mat4d mpre, mpost; double* pre = mpre.asPointer(); double* post = mpost.asPointer(); for (int i = 0; i < 16; ++i) { pre[i] = double(i); post[i] = double(i); } Mat4d scale = Mat4d::identity(); scale.setToScale(Vec3d(2, 3, 5.5)); Mat4d preResult = scale * mpre; Mat4d postResult = mpost * scale; mpre.preScale(Vec3d(2, 3, 5.5)); mpost.postScale(Vec3d(2, 3, 5.5)); EXPECT_TRUE( mpre.eq(preResult, TOL) ); EXPECT_TRUE( mpost.eq(postResult, TOL) ); } TEST_F(TestPrePostAPI, testMat4Shear) { using namespace openvdb::math; double TOL = 1e-7; Mat4d mpre, mpost; double* pre = mpre.asPointer(); double* post = mpost.asPointer(); for (int i = 0; i < 16; ++i) { pre[i] = double(i); post[i] = double(i); } Mat4d shear = Mat4d::identity(); shear.setToShear(X_AXIS, Z_AXIS, 13.); Mat4d preResult = shear * mpre; Mat4d postResult = mpost * shear; mpre.preShear(X_AXIS, Z_AXIS, 13.); mpost.postShear(X_AXIS, Z_AXIS, 13.); EXPECT_TRUE( mpre.eq(preResult, TOL) ); EXPECT_TRUE( mpost.eq(postResult, TOL) ); } TEST_F(TestPrePostAPI, testMaps) { using namespace openvdb::math; double TOL = 1e-7; { // pre translate UniformScaleMap usm; UniformScaleTranslateMap ustm; ScaleMap sm; ScaleTranslateMap stm; AffineMap am; const Vec3d trans(1,2,3); Mat4d correct = Mat4d::identity(); correct.preTranslate(trans); { MapBase::Ptr base = usm.preTranslate(trans); Mat4d result = (base->getAffineMap())->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = ustm.preTranslate(trans)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = sm.preTranslate(trans)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = stm.preTranslate(trans)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = am.preTranslate(trans)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } } { // post translate UniformScaleMap usm; UniformScaleTranslateMap ustm; ScaleMap sm; ScaleTranslateMap stm; AffineMap am; const Vec3d trans(1,2,3); Mat4d correct = Mat4d::identity(); correct.postTranslate(trans); { const Mat4d result = usm.postTranslate(trans)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = ustm.postTranslate(trans)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = sm.postTranslate(trans)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = stm.postTranslate(trans)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = am.postTranslate(trans)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } } { // pre scale UniformScaleMap usm; UniformScaleTranslateMap ustm; ScaleMap sm; ScaleTranslateMap stm; AffineMap am; const Vec3d scale(1,2,3); Mat4d correct = Mat4d::identity(); correct.preScale(scale); { const Mat4d result = usm.preScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = ustm.preScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = sm.preScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = stm.preScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = am.preScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } } { // post scale UniformScaleMap usm; UniformScaleTranslateMap ustm; ScaleMap sm; ScaleTranslateMap stm; AffineMap am; const Vec3d scale(1,2,3); Mat4d correct = Mat4d::identity(); correct.postScale(scale); { const Mat4d result = usm.postScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = ustm.postScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = sm.postScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = stm.postScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = am.postScale(scale)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } } { // pre shear UniformScaleMap usm; UniformScaleTranslateMap ustm; ScaleMap sm; ScaleTranslateMap stm; AffineMap am; Mat4d correct = Mat4d::identity(); correct.preShear(X_AXIS, Z_AXIS, 13.); { const Mat4d result = usm.preShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = ustm.preShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = sm.preShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = stm.preShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = am.preShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } } { // post shear UniformScaleMap usm; UniformScaleTranslateMap ustm; ScaleMap sm; ScaleTranslateMap stm; AffineMap am; Mat4d correct = Mat4d::identity(); correct.postShear(X_AXIS, Z_AXIS, 13.); { const Mat4d result = usm.postShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = ustm.postShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = sm.postShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = stm.postShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = am.postShear(13., X_AXIS, Z_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } } { // pre rotate const double angle1 = 20. * M_PI / 180.; UniformScaleMap usm; UniformScaleTranslateMap ustm; ScaleMap sm; ScaleTranslateMap stm; AffineMap am; Mat4d correct = Mat4d::identity(); correct.preRotate(X_AXIS, angle1); { const Mat4d result = usm.preRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = ustm.preRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = sm.preRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = stm.preRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = am.preRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } } { // post rotate const double angle1 = 20. * M_PI / 180.; UniformScaleMap usm; UniformScaleTranslateMap ustm; ScaleMap sm; ScaleTranslateMap stm; AffineMap am; Mat4d correct = Mat4d::identity(); correct.postRotate(X_AXIS, angle1); { const Mat4d result = usm.postRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = ustm.postRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = sm.postRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = stm.postRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } { const Mat4d result = am.postRotate(angle1, X_AXIS)->getAffineMap()->getConstMat4(); EXPECT_TRUE( correct.eq(result, TOL)); } } } TEST_F(TestPrePostAPI, testLinearTransform) { using namespace openvdb::math; double TOL = 1e-7; { Transform::Ptr t = Transform::createLinearTransform(1.f); Transform::Ptr tinv = Transform::createLinearTransform(1.f); // create matrix with pre-API // Translate Shear Rotate Translate Scale matrix t->preScale(Vec3d(1, 2, 3)); t->preTranslate(Vec3d(2, 3, 4)); t->preRotate(20); t->preShear(2, X_AXIS, Y_AXIS); t->preTranslate(Vec3d(2, 2, 2)); // create inverse using the post-API tinv->postScale(Vec3d(1.f, 1.f/2.f, 1.f/3.f)); tinv->postTranslate(-Vec3d(2, 3, 4)); tinv->postRotate(-20); tinv->postShear(-2, X_AXIS, Y_AXIS); tinv->postTranslate(-Vec3d(2, 2, 2)); // test this by verifying that equvilent interal matrix // represenations are inverses Mat4d m = t->baseMap()->getAffineMap()->getMat4(); Mat4d minv = tinv->baseMap()->getAffineMap()->getMat4(); Mat4d mtest = minv * m; // verify that the results is an identity EXPECT_NEAR(mtest[0][0], 1, TOL); EXPECT_NEAR(mtest[1][1], 1, TOL); EXPECT_NEAR(mtest[2][2], 1, TOL); EXPECT_NEAR(mtest[0][1], 0, TOL); EXPECT_NEAR(mtest[0][2], 0, TOL); EXPECT_NEAR(mtest[0][3], 0, TOL); EXPECT_NEAR(mtest[1][0], 0, TOL); EXPECT_NEAR(mtest[1][2], 0, TOL); EXPECT_NEAR(mtest[1][3], 0, TOL); EXPECT_NEAR(mtest[2][0], 0, TOL); EXPECT_NEAR(mtest[2][1], 0, TOL); EXPECT_NEAR(mtest[2][3], 0, TOL); EXPECT_NEAR(mtest[3][0], 0, TOL); EXPECT_NEAR(mtest[3][1], 0, TOL); EXPECT_NEAR(mtest[3][2], 0, TOL); EXPECT_NEAR(mtest[3][3], 1, TOL); } { Transform::Ptr t = Transform::createLinearTransform(1.f); Mat4d m = Mat4d::identity(); // create matrix with pre-API // Translate Shear Rotate Translate Scale matrix m.preScale(Vec3d(1, 2, 3)); m.preTranslate(Vec3d(2, 3, 4)); m.preRotate(X_AXIS, 20); m.preShear(X_AXIS, Y_AXIS, 2); m.preTranslate(Vec3d(2, 2, 2)); t->preScale(Vec3d(1,2,3)); t->preMult(m); t->postMult(m); Mat4d minv = Mat4d::identity(); // create inverse using the post-API minv.postScale(Vec3d(1.f, 1.f/2.f, 1.f/3.f)); minv.postTranslate(-Vec3d(2, 3, 4)); minv.postRotate(X_AXIS,-20); minv.postShear(X_AXIS, Y_AXIS, -2); minv.postTranslate(-Vec3d(2, 2, 2)); t->preMult(minv); t->postMult(minv); Mat4d mtest = t->baseMap()->getAffineMap()->getMat4(); // verify that the results is the scale EXPECT_NEAR(mtest[0][0], 1, TOL); EXPECT_NEAR(mtest[1][1], 2, TOL); EXPECT_NEAR(mtest[2][2], 3, 1e-6); EXPECT_NEAR(mtest[0][1], 0, TOL); EXPECT_NEAR(mtest[0][2], 0, TOL); EXPECT_NEAR(mtest[0][3], 0, TOL); EXPECT_NEAR(mtest[1][0], 0, TOL); EXPECT_NEAR(mtest[1][2], 0, TOL); EXPECT_NEAR(mtest[1][3], 0, TOL); EXPECT_NEAR(mtest[2][0], 0, TOL); EXPECT_NEAR(mtest[2][1], 0, TOL); EXPECT_NEAR(mtest[2][3], 0, TOL); EXPECT_NEAR(mtest[3][0], 0, 1e-6); EXPECT_NEAR(mtest[3][1], 0, 1e-6); EXPECT_NEAR(mtest[3][2], 0, TOL); EXPECT_NEAR(mtest[3][3], 1, TOL); } } TEST_F(TestPrePostAPI, testFrustumTransform) { using namespace openvdb::math; using BBoxd = BBox<Vec3d>; double TOL = 1e-7; { BBoxd bbox(Vec3d(-5,-5,0), Vec3d(5,5,10)); Transform::Ptr t = Transform::createFrustumTransform( bbox, /* taper*/ 1, /*depth*/10, /* voxel size */1.f); Transform::Ptr tinv = Transform::createFrustumTransform( bbox, /* taper*/ 1, /*depth*/10, /* voxel size */1.f); // create matrix with pre-API // Translate Shear Rotate Translate Scale matrix t->preScale(Vec3d(1, 2, 3)); t->preTranslate(Vec3d(2, 3, 4)); t->preRotate(20); t->preShear(2, X_AXIS, Y_AXIS); t->preTranslate(Vec3d(2, 2, 2)); // create inverse using the post-API tinv->postScale(Vec3d(1.f, 1.f/2.f, 1.f/3.f)); tinv->postTranslate(-Vec3d(2, 3, 4)); tinv->postRotate(-20); tinv->postShear(-2, X_AXIS, Y_AXIS); tinv->postTranslate(-Vec3d(2, 2, 2)); // test this by verifying that equvilent interal matrix // represenations are inverses NonlinearFrustumMap::Ptr frustum = openvdb::StaticPtrCast<NonlinearFrustumMap, MapBase>(t->baseMap()); NonlinearFrustumMap::Ptr frustuminv = openvdb::StaticPtrCast<NonlinearFrustumMap, MapBase>(tinv->baseMap()); Mat4d m = frustum->secondMap().getMat4(); Mat4d minv = frustuminv->secondMap().getMat4(); Mat4d mtest = minv * m; // verify that the results is an identity EXPECT_NEAR(mtest[0][0], 1, TOL); EXPECT_NEAR(mtest[1][1], 1, TOL); EXPECT_NEAR(mtest[2][2], 1, TOL); EXPECT_NEAR(mtest[0][1], 0, TOL); EXPECT_NEAR(mtest[0][2], 0, TOL); EXPECT_NEAR(mtest[0][3], 0, TOL); EXPECT_NEAR(mtest[1][0], 0, TOL); EXPECT_NEAR(mtest[1][2], 0, TOL); EXPECT_NEAR(mtest[1][3], 0, TOL); EXPECT_NEAR(mtest[2][0], 0, TOL); EXPECT_NEAR(mtest[2][1], 0, TOL); EXPECT_NEAR(mtest[2][3], 0, TOL); EXPECT_NEAR(mtest[3][0], 0, TOL); EXPECT_NEAR(mtest[3][1], 0, TOL); EXPECT_NEAR(mtest[3][2], 0, TOL); EXPECT_NEAR(mtest[3][3], 1, TOL); } { BBoxd bbox(Vec3d(-5,-5,0), Vec3d(5,5,10)); Transform::Ptr t = Transform::createFrustumTransform( bbox, /* taper*/ 1, /*depth*/10, /* voxel size */1.f); Mat4d m = Mat4d::identity(); // create matrix with pre-API // Translate Shear Rotate Translate Scale matrix m.preScale(Vec3d(1, 2, 3)); m.preTranslate(Vec3d(2, 3, 4)); m.preRotate(X_AXIS, 20); m.preShear(X_AXIS, Y_AXIS, 2); m.preTranslate(Vec3d(2, 2, 2)); t->preScale(Vec3d(1,2,3)); t->preMult(m); t->postMult(m); Mat4d minv = Mat4d::identity(); // create inverse using the post-API minv.postScale(Vec3d(1.f, 1.f/2.f, 1.f/3.f)); minv.postTranslate(-Vec3d(2, 3, 4)); minv.postRotate(X_AXIS,-20); minv.postShear(X_AXIS, Y_AXIS, -2); minv.postTranslate(-Vec3d(2, 2, 2)); t->preMult(minv); t->postMult(minv); NonlinearFrustumMap::Ptr frustum = openvdb::StaticPtrCast<NonlinearFrustumMap, MapBase>(t->baseMap()); Mat4d mtest = frustum->secondMap().getMat4(); // verify that the results is the scale EXPECT_NEAR(mtest[0][0], 1, TOL); EXPECT_NEAR(mtest[1][1], 2, TOL); EXPECT_NEAR(mtest[2][2], 3, 1e-6); EXPECT_NEAR(mtest[0][1], 0, TOL); EXPECT_NEAR(mtest[0][2], 0, TOL); EXPECT_NEAR(mtest[0][3], 0, TOL); EXPECT_NEAR(mtest[1][0], 0, TOL); EXPECT_NEAR(mtest[1][2], 0, TOL); EXPECT_NEAR(mtest[1][3], 0, TOL); EXPECT_NEAR(mtest[2][0], 0, TOL); EXPECT_NEAR(mtest[2][1], 0, TOL); EXPECT_NEAR(mtest[2][3], 0, TOL); EXPECT_NEAR(mtest[3][0], 0, 1e-6); EXPECT_NEAR(mtest[3][1], 0, 1e-6); EXPECT_NEAR(mtest[3][2], 0, TOL); EXPECT_NEAR(mtest[3][3], 1, TOL); } }
20,479
C++
30.171994
100
0.559744
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestAttributeArrayString.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/AttributeArrayString.h> #include <openvdb/util/CpuTimer.h> #include <openvdb/openvdb.h> #include <iostream> using namespace openvdb; using namespace openvdb::points; class TestAttributeArrayString: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestAttributeArrayString //////////////////////////////////////// namespace { bool matchingNamePairs(const openvdb::NamePair& lhs, const openvdb::NamePair& rhs) { if (lhs.first != rhs.first) return false; if (lhs.second != rhs.second) return false; return true; } } // namespace //////////////////////////////////////// TEST_F(TestAttributeArrayString, testStringMetaCache) { { // cache with manual insertion StringMetaCache cache; EXPECT_TRUE(cache.empty()); EXPECT_EQ(size_t(0), cache.size()); cache.insert("test", 1); EXPECT_TRUE(!cache.empty()); EXPECT_EQ(size_t(1), cache.size()); auto it = cache.map().find("test"); EXPECT_TRUE(it != cache.map().end()); } { // cache with metadata insertion and reset MetaMap metadata; StringMetaInserter inserter(metadata); inserter.insert("test1"); inserter.insert("test2"); StringMetaCache cache(metadata); EXPECT_TRUE(!cache.empty()); EXPECT_EQ(size_t(2), cache.size()); auto it = cache.map().find("test1"); EXPECT_TRUE(it != cache.map().end()); EXPECT_EQ(Name("test1"), it->first); EXPECT_EQ(Index(1), it->second); it = cache.map().find("test2"); EXPECT_TRUE(it != cache.map().end()); EXPECT_EQ(Name("test2"), it->first); EXPECT_EQ(Index(2), it->second); MetaMap metadata2; StringMetaInserter inserter2(metadata2); inserter2.insert("test3"); cache.reset(metadata2); EXPECT_EQ(size_t(1), cache.size()); it = cache.map().find("test3"); EXPECT_TRUE(it != cache.map().end()); } } TEST_F(TestAttributeArrayString, testStringMetaInserter) { MetaMap metadata; StringMetaInserter inserter(metadata); { // insert one value Index index = inserter.insert("test"); EXPECT_EQ(metadata.metaCount(), size_t(1)); EXPECT_EQ(Index(1), index); EXPECT_TRUE(inserter.hasIndex(1)); EXPECT_TRUE(inserter.hasKey("test")); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:0"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test")); } { // insert another value Index index = inserter.insert("test2"); EXPECT_EQ(metadata.metaCount(), size_t(2)); EXPECT_EQ(Index(2), index); EXPECT_TRUE(inserter.hasIndex(1)); EXPECT_TRUE(inserter.hasKey("test")); EXPECT_TRUE(inserter.hasIndex(2)); EXPECT_TRUE(inserter.hasKey("test2")); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:0"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test")); meta = metadata.getMetadata<StringMetadata>("string:1"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test2")); } // remove a value and reset the cache metadata.removeMeta("string:1"); inserter.resetCache(); { // re-insert value Index index = inserter.insert("test3"); EXPECT_EQ(metadata.metaCount(), size_t(2)); EXPECT_EQ(Index(2), index); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:0"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test")); meta = metadata.getMetadata<StringMetadata>("string:1"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test3")); } { // insert and remove to create a gap Index index = inserter.insert("test4"); EXPECT_EQ(metadata.metaCount(), size_t(3)); EXPECT_EQ(Index(3), index); metadata.removeMeta("string:1"); inserter.resetCache(); EXPECT_EQ(metadata.metaCount(), size_t(2)); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:0"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test")); meta = metadata.getMetadata<StringMetadata>("string:2"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test4")); } { // insert to fill gap Index index = inserter.insert("test10"); EXPECT_EQ(metadata.metaCount(), size_t(3)); EXPECT_EQ(Index(2), index); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:0"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test")); meta = metadata.getMetadata<StringMetadata>("string:1"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test10")); meta = metadata.getMetadata<StringMetadata>("string:2"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test4")); } { // insert existing value EXPECT_EQ(metadata.metaCount(), size_t(3)); Index index = inserter.insert("test10"); EXPECT_EQ(metadata.metaCount(), size_t(3)); EXPECT_EQ(Index(2), index); } metadata.removeMeta("string:0"); metadata.removeMeta("string:2"); inserter.resetCache(); { // insert other value and string metadata metadata.insertMeta("int:1", Int32Metadata(5)); metadata.insertMeta("irrelevant", StringMetadata("irrelevant")); inserter.resetCache(); EXPECT_EQ(metadata.metaCount(), size_t(3)); Index index = inserter.insert("test15"); EXPECT_EQ(metadata.metaCount(), size_t(4)); EXPECT_EQ(Index(1), index); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:0"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test15")); meta = metadata.getMetadata<StringMetadata>("string:1"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test10")); } { // insert using a hint Index index = inserter.insert("test1000", 1000); EXPECT_EQ(metadata.metaCount(), size_t(5)); EXPECT_EQ(Index(1000), index); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:999"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test1000")); } { // insert using same hint (fail to use hint this time) Index index = inserter.insert("test1001", 1000); EXPECT_EQ(metadata.metaCount(), size_t(6)); EXPECT_EQ(Index(3), index); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:2"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test1001")); } { // insert using next adjacent hint Index index = inserter.insert("test1002", 1001); EXPECT_EQ(metadata.metaCount(), size_t(7)); EXPECT_EQ(Index(1001), index); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:1000"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test1002")); } { // insert using previous adjacent hint Index index = inserter.insert("test999", 999); EXPECT_EQ(metadata.metaCount(), size_t(8)); EXPECT_EQ(Index(999), index); StringMetadata::Ptr meta = metadata.getMetadata<StringMetadata>("string:998"); EXPECT_TRUE(meta); EXPECT_EQ(meta->value(), openvdb::Name("test999")); } } TEST_F(TestAttributeArrayString, testStringAttribute) { { // Typed class API const Index count = 50; StringAttributeArray attr(count); EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); EXPECT_TRUE(isString(attr)); attr.setTransient(true); EXPECT_TRUE(attr.isTransient()); EXPECT_TRUE(!attr.isHidden()); EXPECT_TRUE(isString(attr)); attr.setHidden(true); EXPECT_TRUE(attr.isTransient()); EXPECT_TRUE(attr.isHidden()); EXPECT_TRUE(isString(attr)); attr.setTransient(false); EXPECT_TRUE(!attr.isTransient()); EXPECT_TRUE(attr.isHidden()); EXPECT_TRUE(isString(attr)); StringAttributeArray attrB(attr); EXPECT_TRUE(matchingNamePairs(attr.type(), attrB.type())); EXPECT_EQ(attr.size(), attrB.size()); EXPECT_EQ(attr.memUsage(), attrB.memUsage()); EXPECT_EQ(attr.isUniform(), attrB.isUniform()); EXPECT_EQ(attr.isTransient(), attrB.isTransient()); EXPECT_EQ(attr.isHidden(), attrB.isHidden()); EXPECT_EQ(isString(attr), isString(attrB)); #if OPENVDB_ABI_VERSION_NUMBER >= 6 AttributeArray& baseAttr(attr); EXPECT_EQ(Name(typeNameAsString<Index>()), baseAttr.valueType()); EXPECT_EQ(Name("str"), baseAttr.codecType()); EXPECT_EQ(Index(4), baseAttr.valueTypeSize()); EXPECT_EQ(Index(4), baseAttr.storageTypeSize()); EXPECT_TRUE(!baseAttr.valueTypeIsFloatingPoint()); #endif } { // IO const Index count = 50; StringAttributeArray attrA(count); for (unsigned i = 0; i < unsigned(count); ++i) { attrA.set(i, int(i)); } attrA.setHidden(true); std::ostringstream ostr(std::ios_base::binary); attrA.write(ostr); StringAttributeArray attrB; std::istringstream istr(ostr.str(), std::ios_base::binary); attrB.read(istr); EXPECT_TRUE(matchingNamePairs(attrA.type(), attrB.type())); EXPECT_EQ(attrA.size(), attrB.size()); EXPECT_EQ(attrA.memUsage(), attrB.memUsage()); EXPECT_EQ(attrA.isUniform(), attrB.isUniform()); EXPECT_EQ(attrA.isTransient(), attrB.isTransient()); EXPECT_EQ(attrA.isHidden(), attrB.isHidden()); EXPECT_EQ(isString(attrA), isString(attrB)); for (unsigned i = 0; i < unsigned(count); ++i) { EXPECT_EQ(attrA.get(i), attrB.get(i)); } } } TEST_F(TestAttributeArrayString, testStringAttributeHandle) { MetaMap metadata; StringAttributeArray attr(4); StringAttributeHandle handle(attr, metadata); EXPECT_EQ(handle.size(), Index(4)); EXPECT_EQ(handle.size(), attr.size()); EXPECT_EQ(Index(1), handle.stride()); EXPECT_TRUE(handle.hasConstantStride()); { // index 0 should always be an empty string Name value = handle.get(0); EXPECT_EQ(value, Name("")); } // set first element to 101 EXPECT_TRUE(handle.isUniform()); attr.set(2, 102); EXPECT_TRUE(!handle.isUniform()); { // index 101 does not exist as metadata is empty EXPECT_EQ(handle.get(0), Name("")); EXPECT_THROW(handle.get(2), LookupError); } { // add an element to the metadata for 101 metadata.insertMeta("string:101", StringMetadata("test101")); EXPECT_EQ(handle.get(0), Name("")); EXPECT_NO_THROW(handle.get(2)); EXPECT_EQ(handle.get(2), Name("test101")); Name name; handle.get(name, 2); EXPECT_EQ(name, Name("test101")); } { // add a second element to the metadata metadata.insertMeta("string:102", StringMetadata("test102")); EXPECT_EQ(handle.get(0), Name("")); EXPECT_NO_THROW(handle.get(2)); EXPECT_EQ(handle.get(2), Name("test101")); Name name; handle.get(name, 2); EXPECT_EQ(name, Name("test101")); } { // set two more values in the array attr.set(0, 103); attr.set(1, 103); EXPECT_EQ(handle.get(0), Name("test102")); EXPECT_EQ(handle.get(1), Name("test102")); EXPECT_EQ(handle.get(2), Name("test101")); EXPECT_EQ(handle.get(3), Name("")); } { // change a value attr.set(1, 102); EXPECT_EQ(handle.get(0), Name("test102")); EXPECT_EQ(handle.get(1), Name("test101")); EXPECT_EQ(handle.get(2), Name("test101")); EXPECT_EQ(handle.get(3), Name("")); } { // cannot use a StringAttributeHandle with a non-string attribute TypedAttributeArray<float> invalidAttr(50); EXPECT_THROW(StringAttributeHandle(invalidAttr, metadata), TypeError); } // Test stride and hasConstantStride methods for string handles { StringAttributeArray attr(3, 2, true); StringAttributeHandle handle(attr, metadata); EXPECT_EQ(Index(3), handle.size()); EXPECT_EQ(handle.size(), attr.size()); EXPECT_EQ(Index(2), handle.stride()); EXPECT_TRUE(handle.hasConstantStride()); } { StringAttributeArray attr(4, 10, false); StringAttributeHandle handle(attr, metadata); EXPECT_EQ(Index(10), handle.size()); EXPECT_EQ(Index(4), attr.size()); EXPECT_EQ(Index(1), handle.stride()); EXPECT_TRUE(!handle.hasConstantStride()); } } TEST_F(TestAttributeArrayString, testStringAttributeWriteHandle) { MetaMap metadata; StringAttributeArray attr(4); StringAttributeWriteHandle handle(attr, metadata); { // add some values to metadata metadata.insertMeta("string:45", StringMetadata("testA")); metadata.insertMeta("string:90", StringMetadata("testB")); metadata.insertMeta("string:1000", StringMetadata("testC")); } { // no string values set EXPECT_EQ(handle.get(0), Name("")); EXPECT_EQ(handle.get(1), Name("")); EXPECT_EQ(handle.get(2), Name("")); EXPECT_EQ(handle.get(3), Name("")); } { // cache not reset since metadata changed EXPECT_THROW(handle.set(1, "testB"), LookupError); } { // empty string always has index 0 EXPECT_TRUE(handle.contains("")); } { // cache won't contain metadata until it has been reset EXPECT_TRUE(!handle.contains("testA")); EXPECT_TRUE(!handle.contains("testB")); EXPECT_TRUE(!handle.contains("testC")); } handle.resetCache(); { // empty string always has index 0 regardless of cache reset EXPECT_TRUE(handle.contains("")); } { // cache now reset EXPECT_TRUE(handle.contains("testA")); EXPECT_TRUE(handle.contains("testB")); EXPECT_TRUE(handle.contains("testC")); EXPECT_NO_THROW(handle.set(1, "testB")); EXPECT_EQ(handle.get(0), Name("")); EXPECT_EQ(handle.get(1), Name("testB")); EXPECT_EQ(handle.get(2), Name("")); EXPECT_EQ(handle.get(3), Name("")); } { // add another value handle.set(2, "testC"); EXPECT_EQ(handle.get(0), Name("")); EXPECT_EQ(handle.get(1), Name("testB")); EXPECT_EQ(handle.get(2), Name("testC")); EXPECT_EQ(handle.get(3), Name("")); } handle.resetCache(); { // compact tests EXPECT_TRUE(!handle.compact()); handle.set(0, "testA"); handle.set(1, "testA"); handle.set(2, "testA"); handle.set(3, "testA"); EXPECT_TRUE(handle.compact()); EXPECT_TRUE(handle.isUniform()); } { // expand tests EXPECT_TRUE(handle.isUniform()); handle.expand(); EXPECT_TRUE(!handle.isUniform()); EXPECT_EQ(handle.get(0), Name("testA")); EXPECT_EQ(handle.get(1), Name("testA")); EXPECT_EQ(handle.get(2), Name("testA")); EXPECT_EQ(handle.get(3), Name("testA")); } { // fill tests EXPECT_TRUE(!handle.isUniform()); handle.set(3, "testB"); handle.fill("testC"); EXPECT_TRUE(!handle.isUniform()); EXPECT_EQ(handle.get(0), Name("testC")); EXPECT_EQ(handle.get(1), Name("testC")); EXPECT_EQ(handle.get(2), Name("testC")); EXPECT_EQ(handle.get(3), Name("testC")); } { // collapse tests handle.set(2, "testB"); handle.collapse("testA"); EXPECT_TRUE(handle.isUniform()); EXPECT_EQ(handle.get(0), Name("testA")); handle.expand(); handle.set(2, "testB"); EXPECT_TRUE(!handle.isUniform()); handle.collapse(); EXPECT_EQ(handle.get(0), Name("")); } { // empty string tests handle.collapse(""); EXPECT_EQ(handle.get(0), Name("")); } } TEST_F(TestAttributeArrayString, testProfile) { #ifdef PROFILE struct Timer : public openvdb::util::CpuTimer {}; const size_t elements = 1000000; #else struct Timer { void start(const std::string&) {} void stop() {} }; const size_t elements = 10000; #endif MetaMap metadata; StringMetaInserter inserter(metadata); Timer timer; timer.start("StringMetaInserter initialise"); for (size_t i = 0; i < elements; ++i) { inserter.insert("test_string_" + std::to_string(i)); } timer.stop(); for (size_t i = 0; i < elements/2; ++i) { metadata.removeMeta("test_string_" + std::to_string(i*2)); } timer.start("StringMetaInserter resetCache()"); inserter.resetCache(); timer.stop(); timer.start("StringMetaInserter insert duplicates"); for (size_t i = 0; i < elements; ++i) { inserter.insert("test_string_" + std::to_string(i)); } timer.stop(); openvdb::points::StringAttributeArray attr(elements); for (size_t i = 0; i < elements; ++i) { attr.set(Index(i), Index(i)); } timer.start("StringAttributeWriteHandle construction"); openvdb::points::StringAttributeWriteHandle handle(attr, metadata); timer.stop(); timer.start("StringAttributeWriteHandle contains()"); // half the calls will miss caches volatile bool result = false; for (size_t i = 0; i < elements/2; ++i) { result |= handle.contains("test_string_" + std::to_string(i*4)); } timer.stop(); }
18,155
C++
29.26
87
0.595594
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestTopologyToLevelSet.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/tools/TopologyToLevelSet.h> class TopologyToLevelSet: public ::testing::Test { }; TEST_F(TopologyToLevelSet, testConversion) { typedef openvdb::tree::Tree4<bool, 5, 4, 3>::Type Tree543b; typedef openvdb::Grid<Tree543b> BoolGrid; typedef openvdb::tree::Tree4<float, 5, 4, 3>::Type Tree543f; typedef openvdb::Grid<Tree543f> FloatGrid; ///// const float voxelSize = 0.1f; const openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(voxelSize); BoolGrid maskGrid(false); maskGrid.setTransform(transform); // Define active region maskGrid.fill(openvdb::CoordBBox(openvdb::Coord(0), openvdb::Coord(7)), true); maskGrid.tree().voxelizeActiveTiles(); FloatGrid::Ptr sdfGrid = openvdb::tools::topologyToLevelSet(maskGrid); EXPECT_TRUE(sdfGrid.get() != NULL); EXPECT_TRUE(!sdfGrid->empty()); EXPECT_EQ(int(openvdb::GRID_LEVEL_SET), int(sdfGrid->getGridClass())); // test inside coord value EXPECT_TRUE(sdfGrid->tree().getValue(openvdb::Coord(3,3,3)) < 0.0f); // test outside coord value EXPECT_TRUE(sdfGrid->tree().getValue(openvdb::Coord(10,10,10)) > 0.0f); }
1,364
C++
28.673912
82
0.668622
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestRay.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/openvdb.h> #include <openvdb/math/Ray.h> #include <openvdb/math/DDA.h> #include <openvdb/math/BBox.h> #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <openvdb/tools/LevelSetSphere.h> #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); #define ASSERT_DOUBLES_APPROX_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/1.e-6); class TestRay : public ::testing::Test { }; // the Ray class makes use of infinity=1/0 so we test for it TEST_F(TestRay, testInfinity) { // This code generates compiler warnings which is why it's not // enabled by default. /* const double one=1, zero = 0, infinity = one / zero; EXPECT_NEAR( infinity , infinity,0);//not a NAN EXPECT_NEAR( infinity , infinity+1,0);//not a NAN EXPECT_NEAR( infinity , infinity*10,0);//not a NAN EXPECT_TRUE( zero < infinity); EXPECT_TRUE( zero > -infinity); EXPECT_NEAR( zero , one/infinity,0); EXPECT_NEAR( zero , -one/infinity,0); EXPECT_NEAR( infinity , one/zero,0); EXPECT_NEAR(-infinity , -one/zero,0); std::cerr << "inf: " << infinity << "\n"; std::cerr << "1 / inf: " << one / infinity << "\n"; std::cerr << "1 / (-inf): " << one / (-infinity) << "\n"; std::cerr << " inf * 0: " << infinity * 0 << "\n"; std::cerr << "-inf * 0: " << (-infinity) * 0 << "\n"; std::cerr << "(inf): " << (bool)(infinity) << "\n"; std::cerr << "inf == inf: " << (infinity == infinity) << "\n"; std::cerr << "inf > 0: " << (infinity > 0) << "\n"; std::cerr << "-inf > 0: " << ((-infinity) > 0) << "\n"; */ } TEST_F(TestRay, testRay) { using namespace openvdb; typedef double RealT; typedef math::Ray<RealT> RayT; typedef RayT::Vec3T Vec3T; typedef math::BBox<Vec3T> BBoxT; {//default constructor RayT ray; EXPECT_TRUE(ray.eye() == Vec3T(0,0,0)); EXPECT_TRUE(ray.dir() == Vec3T(1,0,0)); ASSERT_DOUBLES_APPROX_EQUAL( math::Delta<RealT>::value(), ray.t0()); ASSERT_DOUBLES_APPROX_EQUAL( std::numeric_limits<RealT>::max(), ray.t1()); } {// simple construction Vec3T eye(1.5,1.5,1.5), dir(1.5,1.5,1.5); dir.normalize(); RealT t0=0.1, t1=12589.0; RayT ray(eye, dir, t0, t1); EXPECT_TRUE(ray.eye()==eye); EXPECT_TRUE(ray.dir()==dir); ASSERT_DOUBLES_APPROX_EQUAL( t0, ray.t0()); ASSERT_DOUBLES_APPROX_EQUAL( t1, ray.t1()); } {// test transformation math::Transform::Ptr xform = math::Transform::createLinearTransform(); xform->preRotate(M_PI, math::Y_AXIS ); xform->postTranslate(math::Vec3d(1, 2, 3)); xform->preScale(Vec3R(0.1, 0.2, 0.4)); Vec3T eye(9,1,1), dir(1,2,0); dir.normalize(); RealT t0=0.1, t1=12589.0; RayT ray0(eye, dir, t0, t1); EXPECT_TRUE( ray0.test(t0)); EXPECT_TRUE( ray0.test(t1)); EXPECT_TRUE( ray0.test(0.5*(t0+t1))); EXPECT_TRUE(!ray0.test(t0-1)); EXPECT_TRUE(!ray0.test(t1+1)); //std::cerr << "Ray0: " << ray0 << std::endl; RayT ray1 = ray0.applyMap( *(xform->baseMap()) ); //std::cerr << "Ray1: " << ray1 << std::endl; RayT ray2 = ray1.applyInverseMap( *(xform->baseMap()) ); //std::cerr << "Ray2: " << ray2 << std::endl; ASSERT_DOUBLES_APPROX_EQUAL( eye[0], ray2.eye()[0]); ASSERT_DOUBLES_APPROX_EQUAL( eye[1], ray2.eye()[1]); ASSERT_DOUBLES_APPROX_EQUAL( eye[2], ray2.eye()[2]); ASSERT_DOUBLES_APPROX_EQUAL( dir[0], ray2.dir()[0]); ASSERT_DOUBLES_APPROX_EQUAL( dir[1], ray2.dir()[1]); ASSERT_DOUBLES_APPROX_EQUAL( dir[2], ray2.dir()[2]); ASSERT_DOUBLES_APPROX_EQUAL( dir[0], 1.0/ray2.invDir()[0]); ASSERT_DOUBLES_APPROX_EQUAL( dir[1], 1.0/ray2.invDir()[1]); ASSERT_DOUBLES_APPROX_EQUAL( dir[2], 1.0/ray2.invDir()[2]); ASSERT_DOUBLES_APPROX_EQUAL( t0, ray2.t0()); ASSERT_DOUBLES_APPROX_EQUAL( t1, ray2.t1()); } {// test transformation // This is the index to world transform math::Transform::Ptr xform = math::Transform::createLinearTransform(); xform->postRotate(M_PI, math::Y_AXIS ); xform->postTranslate(math::Vec3d(1, 2, 3)); xform->postScale(Vec3R(0.1, 0.1, 0.1));//voxel size // Define a ray in world space Vec3T eye(9,1,1), dir(1,2,0); dir.normalize(); RealT t0=0.1, t1=12589.0; RayT ray0(eye, dir, t0, t1); //std::cerr << "\nWorld Ray0: " << ray0 << std::endl; EXPECT_TRUE( ray0.test(t0)); EXPECT_TRUE( ray0.test(t1)); EXPECT_TRUE( ray0.test(0.5*(t0+t1))); EXPECT_TRUE(!ray0.test(t0-1)); EXPECT_TRUE(!ray0.test(t1+1)); Vec3T xyz0[3] = {ray0.start(), ray0.mid(), ray0.end()}; // Transform the ray to index space RayT ray1 = ray0.applyInverseMap( *(xform->baseMap()) ); //std::cerr << "\nIndex Ray1: " << ray1 << std::endl; Vec3T xyz1[3] = {ray1.start(), ray1.mid(), ray1.end()}; for (int i=0; i<3; ++i) { Vec3T pos = xform->baseMap()->applyMap(xyz1[i]); //std::cerr << "world0 ="<<xyz0[i] << " transformed index="<< pos << std::endl; for (int j=0; j<3; ++j) ASSERT_DOUBLES_APPROX_EQUAL(xyz0[i][j], pos[j]); } // Transform the ray back to world pace RayT ray2 = ray1.applyMap( *(xform->baseMap()) ); //std::cerr << "\nWorld Ray2: " << ray2 << std::endl; ASSERT_DOUBLES_APPROX_EQUAL( eye[0], ray2.eye()[0]); ASSERT_DOUBLES_APPROX_EQUAL( eye[1], ray2.eye()[1]); ASSERT_DOUBLES_APPROX_EQUAL( eye[2], ray2.eye()[2]); ASSERT_DOUBLES_APPROX_EQUAL( dir[0], ray2.dir()[0]); ASSERT_DOUBLES_APPROX_EQUAL( dir[1], ray2.dir()[1]); ASSERT_DOUBLES_APPROX_EQUAL( dir[2], ray2.dir()[2]); ASSERT_DOUBLES_APPROX_EQUAL( dir[0], 1.0/ray2.invDir()[0]); ASSERT_DOUBLES_APPROX_EQUAL( dir[1], 1.0/ray2.invDir()[1]); ASSERT_DOUBLES_APPROX_EQUAL( dir[2], 1.0/ray2.invDir()[2]); ASSERT_DOUBLES_APPROX_EQUAL( t0, ray2.t0()); ASSERT_DOUBLES_APPROX_EQUAL( t1, ray2.t1()); Vec3T xyz2[3] = {ray0.start(), ray0.mid(), ray0.end()}; for (int i=0; i<3; ++i) { //std::cerr << "world0 ="<<xyz0[i] << " world2 ="<< xyz2[i] << std::endl; for (int j=0; j<3; ++j) ASSERT_DOUBLES_APPROX_EQUAL(xyz0[i][j], xyz2[i][j]); } } {// test bbox intersection const Vec3T eye( 2.0, 1.0, 1.0), dir(-1.0, 2.0, 3.0); RayT ray(eye, dir); RealT t0=0, t1=0; // intersects the two faces of the box perpendicular to the y-axis! EXPECT_TRUE(ray.intersects(CoordBBox(Coord(0, 2, 2), Coord(2, 4, 6)), t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL( 0.5, t0); ASSERT_DOUBLES_APPROX_EQUAL( 1.5, t1); ASSERT_DOUBLES_APPROX_EQUAL( ray(0.5)[1], 2);//lower y component of intersection ASSERT_DOUBLES_APPROX_EQUAL( ray(1.5)[1], 4);//higher y component of intersection // intersects the lower edge anlong the z-axis of the box EXPECT_TRUE(ray.intersects(BBoxT(Vec3T(1.5, 2.0, 2.0), Vec3T(4.5, 4.0, 6.0)), t0, t1)); ASSERT_DOUBLES_APPROX_EQUAL( 0.5, t0); ASSERT_DOUBLES_APPROX_EQUAL( 0.5, t1); ASSERT_DOUBLES_APPROX_EQUAL( ray(0.5)[0], 1.5);//lower y component of intersection ASSERT_DOUBLES_APPROX_EQUAL( ray(0.5)[1], 2.0);//higher y component of intersection // no intersections EXPECT_TRUE(!ray.intersects(CoordBBox(Coord(4, 2, 2), Coord(6, 4, 6)))); } {// test sphere intersection const Vec3T dir(-1.0, 2.0, 3.0); const Vec3T eye( 2.0, 1.0, 1.0); RayT ray(eye, dir); RealT t0=0, t1=0; // intersects twice - second intersection exits sphere in lower y-z-plane Vec3T center(2.0,3.0,4.0); RealT radius = 1.0f; EXPECT_TRUE(ray.intersects(center, radius, t0, t1)); EXPECT_TRUE(t0 < t1); ASSERT_DOUBLES_APPROX_EQUAL( 1.0, t1); ASSERT_DOUBLES_APPROX_EQUAL(ray(t1)[1], center[1]); ASSERT_DOUBLES_APPROX_EQUAL(ray(t1)[2], center[2]); ASSERT_DOUBLES_APPROX_EQUAL((ray(t0)-center).length()-radius, 0); ASSERT_DOUBLES_APPROX_EQUAL((ray(t1)-center).length()-radius, 0); // no intersection center = Vec3T(3.0,3.0,4.0); radius = 1.0f; EXPECT_TRUE(!ray.intersects(center, radius, t0, t1)); } {// test bbox clip const Vec3T dir(-1.0, 2.0, 3.0); const Vec3T eye( 2.0, 1.0, 1.0); RealT t0=0.1, t1=12589.0; RayT ray(eye, dir, t0, t1); // intersects the two faces of the box perpendicular to the y-axis! EXPECT_TRUE(ray.clip(CoordBBox(Coord(0, 2, 2), Coord(2, 4, 6)))); ASSERT_DOUBLES_APPROX_EQUAL( 0.5, ray.t0()); ASSERT_DOUBLES_APPROX_EQUAL( 1.5, ray.t1()); ASSERT_DOUBLES_APPROX_EQUAL( ray(0.5)[1], 2);//lower y component of intersection ASSERT_DOUBLES_APPROX_EQUAL( ray(1.5)[1], 4);//higher y component of intersection ray.reset(eye, dir, t0, t1); // intersects the lower edge anlong the z-axis of the box EXPECT_TRUE(ray.clip(BBoxT(Vec3T(1.5, 2.0, 2.0), Vec3T(4.5, 4.0, 6.0)))); ASSERT_DOUBLES_APPROX_EQUAL( 0.5, ray.t0()); ASSERT_DOUBLES_APPROX_EQUAL( 0.5, ray.t1()); ASSERT_DOUBLES_APPROX_EQUAL( ray(0.5)[0], 1.5);//lower y component of intersection ASSERT_DOUBLES_APPROX_EQUAL( ray(0.5)[1], 2.0);//higher y component of intersection ray.reset(eye, dir, t0, t1); // no intersections EXPECT_TRUE(!ray.clip(CoordBBox(Coord(4, 2, 2), Coord(6, 4, 6)))); ASSERT_DOUBLES_APPROX_EQUAL( t0, ray.t0()); ASSERT_DOUBLES_APPROX_EQUAL( t1, ray.t1()); } {// test plane intersection const Vec3T dir(-1.0, 0.0, 0.0); const Vec3T eye( 0.5, 4.7,-9.8); RealT t0=1.0, t1=12589.0; RayT ray(eye, dir, t0, t1); Real t = 0.0; EXPECT_TRUE(!ray.intersects(Vec3T( 1.0, 0.0, 0.0), 4.0, t)); EXPECT_TRUE(!ray.intersects(Vec3T(-1.0, 0.0, 0.0),-4.0, t)); EXPECT_TRUE( ray.intersects(Vec3T( 1.0, 0.0, 0.0),-4.0, t)); ASSERT_DOUBLES_APPROX_EQUAL(4.5, t); EXPECT_TRUE( ray.intersects(Vec3T(-1.0, 0.0, 0.0), 4.0, t)); ASSERT_DOUBLES_APPROX_EQUAL(4.5, t); EXPECT_TRUE(!ray.intersects(Vec3T( 1.0, 0.0, 0.0),-0.4, t)); } {// test plane intersection const Vec3T dir( 0.0, 1.0, 0.0); const Vec3T eye( 4.7, 0.5,-9.8); RealT t0=1.0, t1=12589.0; RayT ray(eye, dir, t0, t1); Real t = 0.0; EXPECT_TRUE(!ray.intersects(Vec3T( 0.0,-1.0, 0.0), 4.0, t)); EXPECT_TRUE(!ray.intersects(Vec3T( 0.0, 1.0, 0.0),-4.0, t)); EXPECT_TRUE( ray.intersects(Vec3T( 0.0, 1.0, 0.0), 4.0, t)); ASSERT_DOUBLES_APPROX_EQUAL(3.5, t); EXPECT_TRUE( ray.intersects(Vec3T( 0.0,-1.0, 0.0),-4.0, t)); ASSERT_DOUBLES_APPROX_EQUAL(3.5, t); EXPECT_TRUE(!ray.intersects(Vec3T( 1.0, 0.0, 0.0), 0.4, t)); } } TEST_F(TestRay, testTimeSpan) { using namespace openvdb; typedef double RealT; typedef math::Ray<RealT>::TimeSpan TimeSpanT; TimeSpanT t(2.0, 5.0); ASSERT_DOUBLES_EXACTLY_EQUAL(2.0, t.t0); ASSERT_DOUBLES_EXACTLY_EQUAL(5.0, t.t1); ASSERT_DOUBLES_APPROX_EQUAL(3.5, t.mid()); EXPECT_TRUE(t.valid()); t.set(-1, -1); EXPECT_TRUE(!t.valid()); t.scale(5); ASSERT_DOUBLES_EXACTLY_EQUAL(-5.0, t.t0); ASSERT_DOUBLES_EXACTLY_EQUAL(-5.0, t.t1); ASSERT_DOUBLES_APPROX_EQUAL(-5.0, t.mid()); } TEST_F(TestRay, testDDA) { using namespace openvdb; typedef math::Ray<double> RayType; { typedef math::DDA<RayType,3+4+5> DDAType; const RayType::Vec3T dir( 1.0, 0.0, 0.0); const RayType::Vec3T eye(-1.0, 0.0, 0.0); const RayType ray(eye, dir); //std::cerr << ray << std::endl; DDAType dda(ray); ASSERT_DOUBLES_APPROX_EQUAL(math::Delta<double>::value(), dda.time()); ASSERT_DOUBLES_APPROX_EQUAL(1.0, dda.next()); //dda.print(); dda.step(); ASSERT_DOUBLES_APPROX_EQUAL(1.0, dda.time()); ASSERT_DOUBLES_APPROX_EQUAL(4096+1.0, dda.next()); //dda.print(); } {// Check for the notorious +-0 issue! typedef math::DDA<RayType,3> DDAType; //std::cerr << "\nPositive zero ray" << std::endl; const RayType::Vec3T dir1(1.0, 0.0, 0.0); const RayType::Vec3T eye1(2.0, 0.0, 0.0); const RayType ray1(eye1, dir1); //std::cerr << ray1 << std::endl; DDAType dda1(ray1); //dda1.print(); dda1.step(); //dda1.print(); //std::cerr << "\nNegative zero ray" << std::endl; const RayType::Vec3T dir2(1.0,-0.0,-0.0); const RayType::Vec3T eye2(2.0, 0.0, 0.0); const RayType ray2(eye2, dir2); //std::cerr << ray2 << std::endl; DDAType dda2(ray2); //dda2.print(); dda2.step(); //dda2.print(); //std::cerr << "\nNegative epsilon ray" << std::endl; const RayType::Vec3T dir3(1.0,-1e-9,-1e-9); const RayType::Vec3T eye3(2.0, 0.0, 0.0); const RayType ray3(eye3, dir3); //std::cerr << ray3 << std::endl; DDAType dda3(ray3); //dda3.print(); dda3.step(); //dda3.print(); //std::cerr << "\nPositive epsilon ray" << std::endl; const RayType::Vec3T dir4(1.0,-1e-9,-1e-9); const RayType::Vec3T eye4(2.0, 0.0, 0.0); const RayType ray4(eye3, dir4); //std::cerr << ray4 << std::endl; DDAType dda4(ray4); //dda4.print(); dda4.step(); //dda4.print(); ASSERT_DOUBLES_APPROX_EQUAL(dda1.time(), dda2.time()); ASSERT_DOUBLES_APPROX_EQUAL(dda2.time(), dda3.time()); ASSERT_DOUBLES_APPROX_EQUAL(dda3.time(), dda4.time()); ASSERT_DOUBLES_APPROX_EQUAL(dda1.next(), dda2.next()); ASSERT_DOUBLES_APPROX_EQUAL(dda2.next(), dda3.next()); ASSERT_DOUBLES_APPROX_EQUAL(dda3.next(), dda4.next()); } {// test voxel traversal along both directions of each axis typedef math::DDA<RayType> DDAType; const RayType::Vec3T eye( 0, 0, 0); for (int s = -1; s<=1; s+=2) { for (int a = 0; a<3; ++a) { const int d[3]={s*(a==0), s*(a==1), s*(a==2)}; const RayType::Vec3T dir(d[0], d[1], d[2]); RayType ray(eye, dir); DDAType dda(ray); //std::cerr << "\nray: "<<ray<<std::endl; //dda.print(); for (int i=1; i<=10; ++i) { //std::cerr << "i="<<i<<" voxel="<<dda.voxel()<<" time="<<dda.time()<<std::endl; //EXPECT_TRUE(dda.voxel()==Coord(i*d[0], i*d[1], i*d[2])); EXPECT_TRUE(dda.step()); ASSERT_DOUBLES_APPROX_EQUAL(i,dda.time()); } } } } {// test Node traversal along both directions of each axis typedef math::DDA<RayType,3> DDAType; const RayType::Vec3T eye(0, 0, 0); for (int s = -1; s<=1; s+=2) { for (int a = 0; a<3; ++a) { const int d[3]={s*(a==0), s*(a==1), s*(a==2)}; const RayType::Vec3T dir(d[0], d[1], d[2]); RayType ray(eye, dir); DDAType dda(ray); //std::cerr << "\nray: "<<ray<<std::endl; for (int i=1; i<=10; ++i) { //std::cerr << "i="<<i<<" voxel="<<dda.voxel()<<" time="<<dda.time()<<std::endl; //EXPECT_TRUE(dda.voxel()==Coord(8*i*d[0],8*i*d[1],8*i*d[2])); EXPECT_TRUE(dda.step()); ASSERT_DOUBLES_APPROX_EQUAL(8*i,dda.time()); } } } } {// test accelerated Node traversal along both directions of each axis typedef math::DDA<RayType,3> DDAType; const RayType::Vec3T eye(0, 0, 0); for (int s = -1; s<=1; s+=2) { for (int a = 0; a<3; ++a) { const int d[3]={s*(a==0), s*(a==1), s*(a==2)}; const RayType::Vec3T dir(2*d[0], 2*d[1], 2*d[2]); RayType ray(eye, dir); DDAType dda(ray); //ASSERT_DOUBLES_APPROX_EQUAL(0.0, dda.time()); //EXPECT_TRUE(dda.voxel()==Coord(0,0,0)); double next=0; //std::cerr << "\nray: "<<ray<<std::endl; for (int i=1; i<=10; ++i) { //std::cerr << "i="<<i<<" voxel="<<dda.voxel()<<" time="<<dda.time()<<std::endl; //EXPECT_TRUE(dda.voxel()==Coord(8*i*d[0],8*i*d[1],8*i*d[2])); EXPECT_TRUE(dda.step()); ASSERT_DOUBLES_APPROX_EQUAL(4*i, dda.time()); if (i>1) { ASSERT_DOUBLES_APPROX_EQUAL(dda.time(), next); } next = dda.next(); } } } } }
17,486
C++
37.688053
100
0.532941
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestParticleAtlas.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/tools/ParticleAtlas.h> #include <openvdb/math/Math.h> #include <vector> #include <algorithm> #include <cmath> #include "util.h" // for genPoints struct TestParticleAtlas: public ::testing::Test { }; //////////////////////////////////////// namespace { class ParticleList { public: typedef openvdb::Vec3R PosType; typedef PosType::value_type ScalarType; ParticleList(const std::vector<PosType>& points, const std::vector<ScalarType>& radius) : mPoints(&points) , mRadius(&radius) { } // Return the number of points in the array size_t size() const { return mPoints->size(); } // Return the world-space position for the nth particle. void getPos(size_t n, PosType& xyz) const { xyz = (*mPoints)[n]; } // Return the world-space radius for the nth particle. void getRadius(size_t n, ScalarType& radius) const { radius = (*mRadius)[n]; } protected: std::vector<PosType> const * const mPoints; std::vector<ScalarType> const * const mRadius; }; // ParticleList template<typename T> bool hasDuplicates(const std::vector<T>& items) { std::vector<T> vec(items); std::sort(vec.begin(), vec.end()); size_t duplicates = 0; for (size_t n = 1, N = vec.size(); n < N; ++n) { if (vec[n] == vec[n-1]) ++duplicates; } return duplicates != 0; } } // namespace //////////////////////////////////////// TEST_F(TestParticleAtlas, testParticleAtlas) { // generate points const size_t numParticle = 40000; const double minVoxelSize = 0.01; std::vector<openvdb::Vec3R> points; unittest_util::genPoints(numParticle, points); std::vector<double> radius; for (size_t n = 0, N = points.size() / 2; n < N; ++n) { radius.push_back(minVoxelSize); } for (size_t n = points.size() / 2, N = points.size(); n < N; ++n) { radius.push_back(minVoxelSize * 2.0); } ParticleList particles(points, radius); // construct data structure typedef openvdb::tools::ParticleAtlas<> ParticleAtlas; ParticleAtlas atlas; EXPECT_TRUE(atlas.empty()); EXPECT_TRUE(atlas.levels() == 0); atlas.construct(particles, minVoxelSize); EXPECT_TRUE(!atlas.empty()); EXPECT_TRUE(atlas.levels() == 2); EXPECT_TRUE( openvdb::math::isApproxEqual(atlas.minRadius(0), minVoxelSize)); EXPECT_TRUE( openvdb::math::isApproxEqual(atlas.minRadius(1), minVoxelSize * 2.0)); typedef openvdb::tools::ParticleAtlas<>::Iterator ParticleAtlasIterator; ParticleAtlasIterator it(atlas); EXPECT_TRUE(atlas.levels() == 2); std::vector<uint32_t> indices; indices.reserve(numParticle); it.updateFromLevel(0); EXPECT_TRUE(it); EXPECT_EQ(it.size(), numParticle - (points.size() / 2)); for (; it; ++it) { indices.push_back(*it); } it.updateFromLevel(1); EXPECT_TRUE(it); EXPECT_EQ(it.size(), (points.size() / 2)); for (; it; ++it) { indices.push_back(*it); } EXPECT_EQ(numParticle, indices.size()); EXPECT_TRUE(!hasDuplicates(indices)); openvdb::Vec3R center = points[0]; double searchRadius = minVoxelSize * 10.0; it.worldSpaceSearchAndUpdate(center, searchRadius, particles); EXPECT_TRUE(it); indices.clear(); for (; it; ++it) { indices.push_back(*it); } EXPECT_EQ(it.size(), indices.size()); EXPECT_TRUE(!hasDuplicates(indices)); openvdb::BBoxd bbox; for (size_t n = 0, N = points.size() / 2; n < N; ++n) { bbox.expand(points[n]); } it.worldSpaceSearchAndUpdate(bbox, particles); EXPECT_TRUE(it); indices.clear(); for (; it; ++it) { indices.push_back(*it); } EXPECT_EQ(it.size(), indices.size()); EXPECT_TRUE(!hasDuplicates(indices)); }
3,997
C++
20.728261
78
0.603703
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointAdvect.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include "util.h" #include <openvdb/points/PointAttribute.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/PointScatter.h> #include <openvdb/points/PointAdvect.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/tools/Composite.h> // csgDifference #include <openvdb/tools/MeshToVolume.h> // createLevelSetBox #include <openvdb/openvdb.h> #include <openvdb/Types.h> #include <string> #include <vector> using namespace openvdb; using namespace openvdb::points; class TestPointAdvect: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestPointAdvect //////////////////////////////////////// TEST_F(TestPointAdvect, testAdvect) { // generate four points const float voxelSize = 1.0f; std::vector<Vec3s> positions = { {5, 2, 3}, {2, 4, 1}, {50, 5, 1}, {3, 20, 1}, }; const PointAttributeVector<Vec3s> pointList(positions); math::Transform::Ptr pointTransform(math::Transform::createLinearTransform(voxelSize)); auto pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>( pointList, *pointTransform); auto points = createPointDataGrid<NullCodec, PointDataGrid>( *pointIndexGrid, pointList, *pointTransform); std::vector<int> id; id.push_back(0); id.push_back(1); id.push_back(2); id.push_back(3); auto idAttributeType = TypedAttributeArray<int>::attributeType(); appendAttribute(points->tree(), "id", idAttributeType); // create a wrapper around the id vector PointAttributeVector<int> idWrapper(id); populateAttribute<PointDataTree, tools::PointIndexTree, PointAttributeVector<int>>( points->tree(), pointIndexGrid->tree(), "id", idWrapper); // create "test" group which only contains third point appendGroup(points->tree(), "test"); std::vector<short> groups(positions.size(), 0); groups[2] = 1; setGroup(points->tree(), pointIndexGrid->tree(), groups, "test"); // create "test2" group which contains second and third point appendGroup(points->tree(), "test2"); groups[1] = 1; setGroup(points->tree(), pointIndexGrid->tree(), groups, "test2"); const Vec3s tolerance(1e-3f); // advect by velocity using all integration orders for (Index integrationOrder = 0; integrationOrder < 5; integrationOrder++) { Vec3s velocityBackground(1.0, 2.0, 3.0); double timeStep = 1.0; int steps = 1; auto velocity = Vec3SGrid::create(velocityBackground); // grid with background value only auto pointsToAdvect = points->deepCopy(); const auto& transform = pointsToAdvect->transform(); advectPoints(*pointsToAdvect, *velocity, integrationOrder, timeStep, steps); for (auto leaf = pointsToAdvect->tree().beginLeaf(); leaf; ++leaf) { AttributeHandle<Vec3s> positionHandle(leaf->constAttributeArray("P")); AttributeHandle<int> idHandle(leaf->constAttributeArray("id")); for (auto iter = leaf->beginIndexOn(); iter; ++iter) { int theId = idHandle.get(*iter); Vec3s position = transform.indexToWorld( positionHandle.get(*iter) + iter.getCoord().asVec3d()); Vec3s expectedPosition(positions[theId]); if (integrationOrder > 0) expectedPosition += velocityBackground; EXPECT_TRUE(math::isApproxEqual(position, expectedPosition, tolerance)); } } } // invalid advection scheme auto zeroVelocityGrid = Vec3SGrid::create(Vec3s(0)); EXPECT_THROW(advectPoints(*points, *zeroVelocityGrid, 5, 1.0, 1), ValueError); { // advect varying dt and steps Vec3s velocityBackground(1.0, 2.0, 3.0); Index integrationOrder = 4; double timeStep = 0.1; int steps = 100; auto velocity = Vec3SGrid::create(velocityBackground); // grid with background value only auto pointsToAdvect = points->deepCopy(); const auto& transform = pointsToAdvect->transform(); advectPoints(*pointsToAdvect, *velocity, integrationOrder, timeStep, steps); for (auto leaf = pointsToAdvect->tree().beginLeaf(); leaf; ++leaf) { AttributeHandle<Vec3s> positionHandle(leaf->constAttributeArray("P")); AttributeHandle<int> idHandle(leaf->constAttributeArray("id")); for (auto iter = leaf->beginIndexOn(); iter; ++iter) { int theId = idHandle.get(*iter); Vec3s position = transform.indexToWorld( positionHandle.get(*iter) + iter.getCoord().asVec3d()); Vec3s expectedPosition(positions[theId] + velocityBackground * 10.0f); EXPECT_TRUE(math::isApproxEqual(position, expectedPosition, tolerance)); } } } { // perform filtered advection Vec3s velocityBackground(1.0, 2.0, 3.0); Index integrationOrder = 4; double timeStep = 1.0; int steps = 1; auto velocity = Vec3SGrid::create(velocityBackground); // grid with background value only std::vector<std::string> advectIncludeGroups; std::vector<std::string> advectExcludeGroups; std::vector<std::string> includeGroups; std::vector<std::string> excludeGroups; { // only advect points in "test" group advectIncludeGroups.push_back("test"); auto leaf = points->tree().cbeginLeaf(); MultiGroupFilter advectFilter( advectIncludeGroups, advectExcludeGroups, leaf->attributeSet()); MultiGroupFilter filter(includeGroups, excludeGroups, leaf->attributeSet()); auto pointsToAdvect = points->deepCopy(); const auto& transform = pointsToAdvect->transform(); advectPoints(*pointsToAdvect, *velocity, integrationOrder, timeStep, steps, advectFilter, filter); EXPECT_EQ(Index64(4), pointCount(pointsToAdvect->tree())); for (auto leafIter = pointsToAdvect->tree().beginLeaf(); leafIter; ++leafIter) { AttributeHandle<Vec3s> positionHandle(leafIter->constAttributeArray("P")); AttributeHandle<int> idHandle(leafIter->constAttributeArray("id")); for (auto iter = leafIter->beginIndexOn(); iter; ++iter) { int theId = idHandle.get(*iter); Vec3s position = transform.indexToWorld( positionHandle.get(*iter) + iter.getCoord().asVec3d()); Vec3s expectedPosition(positions[theId]); if (theId == 2) expectedPosition += velocityBackground; EXPECT_TRUE(math::isApproxEqual(position, expectedPosition, tolerance)); } } advectIncludeGroups.clear(); } { // only keep points in "test" group includeGroups.push_back("test"); auto leaf = points->tree().cbeginLeaf(); MultiGroupFilter advectFilter( advectIncludeGroups, advectExcludeGroups, leaf->attributeSet()); MultiGroupFilter filter(includeGroups, excludeGroups, leaf->attributeSet()); auto pointsToAdvect = points->deepCopy(); const auto& transform = pointsToAdvect->transform(); advectPoints(*pointsToAdvect, *velocity, integrationOrder, timeStep, steps, advectFilter, filter); EXPECT_EQ(Index64(1), pointCount(pointsToAdvect->tree())); for (auto leafIter = pointsToAdvect->tree().beginLeaf(); leafIter; ++leafIter) { AttributeHandle<Vec3s> positionHandle(leafIter->constAttributeArray("P")); AttributeHandle<int> idHandle(leafIter->constAttributeArray("id")); for (auto iter = leafIter->beginIndexOn(); iter; ++iter) { int theId = idHandle.get(*iter); Vec3s position = transform.indexToWorld( positionHandle.get(*iter) + iter.getCoord().asVec3d()); Vec3s expectedPosition(positions[theId]); expectedPosition += velocityBackground; EXPECT_TRUE(math::isApproxEqual(position, expectedPosition, tolerance)); } } includeGroups.clear(); } { // only advect points in "test2" group, delete points in "test" group advectIncludeGroups.push_back("test2"); excludeGroups.push_back("test"); auto leaf = points->tree().cbeginLeaf(); MultiGroupFilter advectFilter( advectIncludeGroups, advectExcludeGroups, leaf->attributeSet()); MultiGroupFilter filter(includeGroups, excludeGroups, leaf->attributeSet()); auto pointsToAdvect = points->deepCopy(); const auto& transform = pointsToAdvect->transform(); advectPoints(*pointsToAdvect, *velocity, integrationOrder, timeStep, steps, advectFilter, filter); EXPECT_EQ(Index64(3), pointCount(pointsToAdvect->tree())); for (auto leafIter = pointsToAdvect->tree().beginLeaf(); leafIter; ++leafIter) { AttributeHandle<Vec3s> positionHandle(leafIter->constAttributeArray("P")); AttributeHandle<int> idHandle(leafIter->constAttributeArray("id")); for (auto iter = leafIter->beginIndexOn(); iter; ++iter) { int theId = idHandle.get(*iter); Vec3s position = transform.indexToWorld( positionHandle.get(*iter) + iter.getCoord().asVec3d()); Vec3s expectedPosition(positions[theId]); if (theId == 1) expectedPosition += velocityBackground; EXPECT_TRUE(math::isApproxEqual(position, expectedPosition, tolerance)); } } advectIncludeGroups.clear(); excludeGroups.clear(); } { // advect all points, caching disabled auto pointsToAdvect = points->deepCopy(); const auto& transform = pointsToAdvect->transform(); auto leaf = points->tree().cbeginLeaf(); MultiGroupFilter advectFilter( advectIncludeGroups, advectExcludeGroups, leaf->attributeSet()); MultiGroupFilter filter(includeGroups, excludeGroups, leaf->attributeSet()); advectPoints(*pointsToAdvect, *velocity, integrationOrder, timeStep, steps, advectFilter, filter, false); EXPECT_EQ(Index64(4), pointCount(pointsToAdvect->tree())); for (auto leafIter = pointsToAdvect->tree().beginLeaf(); leafIter; ++leafIter) { AttributeHandle<Vec3s> positionHandle(leafIter->constAttributeArray("P")); AttributeHandle<int> idHandle(leafIter->constAttributeArray("id")); for (auto iter = leafIter->beginIndexOn(); iter; ++iter) { int theId = idHandle.get(*iter); Vec3s position = transform.indexToWorld( positionHandle.get(*iter) + iter.getCoord().asVec3d()); Vec3s expectedPosition(positions[theId]); expectedPosition += velocityBackground; EXPECT_TRUE(math::isApproxEqual(position, expectedPosition, tolerance)); } } } { // only advect points in "test2" group, delete points in "test" group, caching disabled advectIncludeGroups.push_back("test2"); excludeGroups.push_back("test"); auto leaf = points->tree().cbeginLeaf(); MultiGroupFilter advectFilter( advectIncludeGroups, advectExcludeGroups, leaf->attributeSet()); MultiGroupFilter filter(includeGroups, excludeGroups, leaf->attributeSet()); auto pointsToAdvect = points->deepCopy(); const auto& transform = pointsToAdvect->transform(); advectPoints(*pointsToAdvect, *velocity, integrationOrder, timeStep, steps, advectFilter, filter, false); EXPECT_EQ(Index64(3), pointCount(pointsToAdvect->tree())); for (auto leafIter = pointsToAdvect->tree().beginLeaf(); leafIter; ++leafIter) { AttributeHandle<Vec3s> positionHandle(leafIter->constAttributeArray("P")); AttributeHandle<int> idHandle(leafIter->constAttributeArray("id")); for (auto iter = leafIter->beginIndexOn(); iter; ++iter) { int theId = idHandle.get(*iter); Vec3s position = transform.indexToWorld( positionHandle.get(*iter) + iter.getCoord().asVec3d()); Vec3s expectedPosition(positions[theId]); if (theId == 1) expectedPosition += velocityBackground; EXPECT_TRUE(math::isApproxEqual(position, expectedPosition, tolerance)); } } advectIncludeGroups.clear(); excludeGroups.clear(); } } } TEST_F(TestPointAdvect, testZalesaksDisk) { // advect a notched sphere known as Zalesak's disk in a rotational velocity field // build the level set sphere Vec3s center(0, 0, 0); float radius = 10; float voxelSize = 0.2f; auto zalesak = tools::createLevelSetSphere<FloatGrid>(radius, center, voxelSize); // create box for notch using width and depth relative to radius const math::Transform::Ptr xform = math::Transform::createLinearTransform(voxelSize); Vec3f min(center); Vec3f max(center); float notchWidth = 0.5f; float notchDepth = 1.5f; min.x() -= (radius * notchWidth) / 2; min.y() -= (radius * (notchDepth - 1)); min.z() -= radius * 1.1f; max.x() += (radius * notchWidth) / 2; max.y() += radius * 1.1f; max.z() += radius * 1.1f; math::BBox<Vec3f> bbox(min, max); auto notch = tools::createLevelSetBox<FloatGrid>(bbox, *xform); // subtract notch from the sphere tools::csgDifference(*zalesak, *notch); // scatter points inside the sphere auto points = points::denseUniformPointScatter(*zalesak, /*pointsPerVoxel=*/8); // append an integer "id" attribute auto idAttributeType = TypedAttributeArray<int>::attributeType(); appendAttribute(points->tree(), "id", idAttributeType); // populate it in serial based on iteration order int id = 0; for (auto leaf = points->tree().beginLeaf(); leaf; ++leaf) { AttributeWriteHandle<int> handle(leaf->attributeArray("id")); for (auto iter = leaf->beginIndexOn(); iter; ++iter) { handle.set(*iter, id++); } } // copy grid into new grid for advecting auto pointsToAdvect = points->deepCopy(); // populate a velocity grid that rotates in X auto velocity = Vec3SGrid::create(Vec3s(0)); velocity->setTransform(xform); CoordBBox activeBbox(zalesak->evalActiveVoxelBoundingBox()); activeBbox.expand(5); velocity->denseFill(activeBbox, Vec3s(0)); for (auto leaf = velocity->tree().beginLeaf(); leaf; ++leaf) { for (auto iter = leaf->beginValueOn(); iter; ++iter) { Vec3s position = xform->indexToWorld(iter.getCoord().asVec3d()); Vec3s vel = (position.cross(Vec3s(0, 0, 1)) * 2.0f * M_PI) / 10.0f; iter.setValue(vel); } } // extract original positions const Index count = Index(pointCount(points->constTree())); std::vector<Vec3f> preAdvectPositions(count, Vec3f(0)); for (auto leaf = points->constTree().cbeginLeaf(); leaf; ++leaf) { AttributeHandle<int> idHandle(leaf->constAttributeArray("id")); AttributeHandle<Vec3f> posHandle(leaf->constAttributeArray("P")); for (auto iter = leaf->beginIndexOn(); iter; ++iter) { Vec3f position = posHandle.get(*iter) + iter.getCoord().asVec3d(); preAdvectPositions[idHandle.get(*iter)] = Vec3f(xform->indexToWorld(position)); } } // advect points a half revolution points::advectPoints(*pointsToAdvect, *velocity, Index(4), 1.0, 5); // extract new positions std::vector<Vec3f> postAdvectPositions(count, Vec3f(0)); for (auto leaf = pointsToAdvect->constTree().cbeginLeaf(); leaf; ++leaf) { AttributeHandle<int> idHandle(leaf->constAttributeArray("id")); AttributeHandle<Vec3f> posHandle(leaf->constAttributeArray("P")); for (auto iter = leaf->beginIndexOn(); iter; ++iter) { Vec3f position = posHandle.get(*iter) + iter.getCoord().asVec3d(); postAdvectPositions[idHandle.get(*iter)] = Vec3f(xform->indexToWorld(position)); } } for (Index i = 0; i < count; i++) { EXPECT_TRUE(!math::isApproxEqual( preAdvectPositions[i], postAdvectPositions[i], Vec3f(0.1))); } // advect points another half revolution points::advectPoints(*pointsToAdvect, *velocity, Index(4), 1.0, 5); for (auto leaf = pointsToAdvect->constTree().cbeginLeaf(); leaf; ++leaf) { AttributeHandle<int> idHandle(leaf->constAttributeArray("id")); AttributeHandle<Vec3f> posHandle(leaf->constAttributeArray("P")); for (auto iter = leaf->beginIndexOn(); iter; ++iter) { Vec3f position = posHandle.get(*iter) + iter.getCoord().asVec3d(); postAdvectPositions[idHandle.get(*iter)] = Vec3f(xform->indexToWorld(position)); } } for (Index i = 0; i < count; i++) { EXPECT_TRUE(math::isApproxEqual( preAdvectPositions[i], postAdvectPositions[i], Vec3f(0.1))); } }
18,121
C++
39.181818
97
0.612494
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/util.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #ifndef OPENVDB_UNITTEST_UTIL_HAS_BEEN_INCLUDED #define OPENVDB_UNITTEST_UTIL_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/math/Math.h> // for math::Random01 #include <openvdb/tools/Prune.h>// for pruneLevelSet #include <sstream> namespace unittest_util { enum SphereMode { SPHERE_DENSE, SPHERE_DENSE_NARROW_BAND, SPHERE_SPARSE_NARROW_BAND }; /// @brief Generates the signed distance to a sphere located at @a center /// and with a specified @a radius (both in world coordinates). Only voxels /// in the domain [0,0,0] -> @a dim are considered. Also note that the /// level set is either dense, dense narrow-band or sparse narrow-band. /// /// @note This method is VERY SLOW and should only be used for debugging purposes! /// However it works for any transform and even with open level sets. /// A faster approch for closed narrow band generation is to only set voxels /// sparsely and then use grid::signedFloodFill to define the sign /// of the background values and tiles! This is implemented in openvdb/tools/LevelSetSphere.h template<class GridType> inline void makeSphere(const openvdb::Coord& dim, const openvdb::Vec3f& center, float radius, GridType& grid, SphereMode mode) { typedef typename GridType::ValueType ValueT; const ValueT zero = openvdb::zeroVal<ValueT>(), outside = grid.background(), inside = -outside; typename GridType::Accessor acc = grid.getAccessor(); openvdb::Coord xyz; for (xyz[0]=0; xyz[0]<dim[0]; ++xyz[0]) { for (xyz[1]=0; xyz[1]<dim[1]; ++xyz[1]) { for (xyz[2]=0; xyz[2]<dim[2]; ++xyz[2]) { const openvdb::Vec3R p = grid.transform().indexToWorld(xyz); const float dist = float((p-center).length() - radius); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN ValueT val = ValueT(zero + dist); OPENVDB_NO_TYPE_CONVERSION_WARNING_END switch (mode) { case SPHERE_DENSE: acc.setValue(xyz, val); break; case SPHERE_DENSE_NARROW_BAND: acc.setValue(xyz, val < inside ? inside : outside < val ? outside : val); break; case SPHERE_SPARSE_NARROW_BAND: if (val < inside) acc.setValueOff(xyz, inside); else if (outside < val) acc.setValueOff(xyz, outside); else acc.setValue(xyz, val); } } } } //if (mode == SPHERE_SPARSE_NARROW_BAND) grid.tree().prune(); if (mode == SPHERE_SPARSE_NARROW_BAND) openvdb::tools::pruneLevelSet(grid.tree()); } // Template specialization for boolean trees (mostly a dummy implementation) template<> inline void makeSphere<openvdb::BoolGrid>(const openvdb::Coord& dim, const openvdb::Vec3f& center, float radius, openvdb::BoolGrid& grid, SphereMode) { openvdb::BoolGrid::Accessor acc = grid.getAccessor(); openvdb::Coord xyz; for (xyz[0]=0; xyz[0]<dim[0]; ++xyz[0]) { for (xyz[1]=0; xyz[1]<dim[1]; ++xyz[1]) { for (xyz[2]=0; xyz[2]<dim[2]; ++xyz[2]) { const openvdb::Vec3R p = grid.transform().indexToWorld(xyz); const float dist = static_cast<float>((p-center).length() - radius); if (dist <= 0) acc.setValue(xyz, true); } } } } // This method will soon be replaced by the one above!!!!! template<class GridType> inline void makeSphere(const openvdb::Coord& dim, const openvdb::Vec3f& center, float radius, GridType &grid, float dx, SphereMode mode) { grid.setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/dx)); makeSphere<GridType>(dim, center, radius, grid, mode); } // Generate random points by uniformly distributing points // on a unit-sphere. inline void genPoints(const int numPoints, std::vector<openvdb::Vec3R>& points) { // init openvdb::math::Random01 randNumber(0); const int n = int(std::sqrt(double(numPoints))); const double xScale = (2.0 * M_PI) / double(n); const double yScale = M_PI / double(n); double x, y, theta, phi; openvdb::Vec3R pos; points.reserve(n*n); // loop over a [0 to n) x [0 to n) grid. for (int a = 0; a < n; ++a) { for (int b = 0; b < n; ++b) { // jitter, move to random pos. inside the current cell x = double(a) + randNumber(); y = double(b) + randNumber(); // remap to a lat/long map theta = y * yScale; // [0 to PI] phi = x * xScale; // [0 to 2PI] // convert to cartesian coordinates on a unit sphere. // spherical coordinate triplet (r=1, theta, phi) pos[0] = std::sin(theta)*std::cos(phi); pos[1] = std::sin(theta)*std::sin(phi); pos[2] = std::cos(theta); points.push_back(pos); } } } // @todo makePlane } // namespace unittest_util #endif // OPENVDB_UNITTEST_UTIL_HAS_BEEN_INCLUDED
5,260
C
36.312056
93
0.595817
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestFile.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <openvdb/Exceptions.h> #include <openvdb/io/File.h> #include <openvdb/io/io.h> #include <openvdb/io/Queue.h> #include <openvdb/io/Stream.h> #include <openvdb/Metadata.h> #include <openvdb/math/Transform.h> #include <openvdb/tools/LevelSetUtil.h> // for tools::sdfToFogVolume() #include <openvdb/util/logging.h> #include <openvdb/version.h> #include <openvdb/openvdb.h> #include "util.h" // for unittest_util::makeSphere() #include "gtest/gtest.h" #include <tbb/tbb_thread.h> // for tbb::this_tbb_thread::sleep() #include <algorithm> // for std::sort() #include <cstdio> // for remove() and rename() #include <fstream> #include <functional> // for std::bind() #include <iostream> #include <map> #include <memory> #include <set> #include <sstream> #include <string> #include <vector> #include <sys/types.h> // for stat() #include <sys/stat.h> #ifndef _WIN32 #include <unistd.h> #endif #ifdef OPENVDB_USE_BLOSC #include <blosc.h> #include <cstring> // for memset() #endif class TestFile: public ::testing::Test { public: void SetUp() override {} void TearDown() override { openvdb::uninitialize(); } void testHeader(); void testWriteGrid(); void testWriteMultipleGrids(); void testReadGridDescriptors(); void testEmptyGridIO(); void testOpen(); void testDelayedLoadMetadata(); void testNonVdbOpen(); }; //////////////////////////////////////// void TestFile::testHeader() { using namespace openvdb::io; File file("something.vdb2"); std::ostringstream ostr(std::ios_base::binary), ostr2(std::ios_base::binary); file.writeHeader(ostr2, /*seekable=*/true); std::string uuidStr = file.getUniqueTag(); file.writeHeader(ostr, /*seekable=*/true); // Verify that a file gets a new UUID each time it is written. EXPECT_TRUE(!file.isIdentical(uuidStr)); uuidStr = file.getUniqueTag(); std::istringstream istr(ostr.str(), std::ios_base::binary); bool unique=true; EXPECT_NO_THROW(unique=file.readHeader(istr)); EXPECT_TRUE(!unique);//reading same file again uint32_t version = openvdb::OPENVDB_FILE_VERSION; EXPECT_EQ(version, file.fileVersion()); EXPECT_EQ(openvdb::OPENVDB_LIBRARY_MAJOR_VERSION, file.libraryVersion().first); EXPECT_EQ(openvdb::OPENVDB_LIBRARY_MINOR_VERSION, file.libraryVersion().second); EXPECT_EQ(uuidStr, file.getUniqueTag()); //std::cerr << "\nuuid=" << uuidStr << std::endl; EXPECT_TRUE(file.isIdentical(uuidStr)); remove("something.vdb2"); } TEST_F(TestFile, testHeader) { testHeader(); } void TestFile::testWriteGrid() { using namespace openvdb; using namespace openvdb::io; using TreeType = Int32Tree; using GridType = Grid<TreeType>; logging::LevelScope suppressLogging{logging::Level::Fatal}; File file("something.vdb2"); std::ostringstream ostr(std::ios_base::binary); // Create a grid with transform. math::Transform::Ptr trans = math::Transform::createLinearTransform(0.1); GridType::Ptr grid = createGrid<GridType>(/*bg=*/1); TreeType& tree = grid->tree(); grid->setTransform(trans); tree.setValue(Coord(10, 1, 2), 10); tree.setValue(Coord(0, 0, 0), 5); // Add some metadata. Metadata::clearRegistry(); StringMetadata::registerType(); const std::string meta0Val, meta1Val("Hello, world."); Metadata::Ptr stringMetadata = Metadata::createMetadata(typeNameAsString<std::string>()); EXPECT_TRUE(stringMetadata); if (stringMetadata) { grid->insertMeta("meta0", *stringMetadata); grid->metaValue<std::string>("meta0") = meta0Val; grid->insertMeta("meta1", *stringMetadata); grid->metaValue<std::string>("meta1") = meta1Val; } // Create the grid descriptor out of this grid. GridDescriptor gd(Name("temperature"), grid->type()); // Write out the grid. file.writeGrid(gd, grid, ostr, /*seekable=*/true); EXPECT_TRUE(gd.getGridPos() != 0); EXPECT_TRUE(gd.getBlockPos() != 0); EXPECT_TRUE(gd.getEndPos() != 0); // Read in the grid descriptor. GridDescriptor gd2; std::istringstream istr(ostr.str(), std::ios_base::binary); // Since the input is only a fragment of a VDB file (in particular, // it doesn't have a header), set the file format version number explicitly. io::setCurrentVersion(istr); GridBase::Ptr gd2_grid; EXPECT_THROW(gd2.read(istr), openvdb::LookupError); // Register the grid and the transform and the blocks. GridBase::clearRegistry(); GridType::registerGrid(); // Register transform maps math::MapRegistry::clear(); math::AffineMap::registerMap(); math::ScaleMap::registerMap(); math::UniformScaleMap::registerMap(); math::TranslationMap::registerMap(); math::ScaleTranslateMap::registerMap(); math::UniformScaleTranslateMap::registerMap(); math::NonlinearFrustumMap::registerMap(); istr.seekg(0, std::ios_base::beg); EXPECT_NO_THROW(gd2_grid = gd2.read(istr)); EXPECT_EQ(gd.gridName(), gd2.gridName()); EXPECT_EQ(GridType::gridType(), gd2_grid->type()); EXPECT_EQ(gd.getGridPos(), gd2.getGridPos()); EXPECT_EQ(gd.getBlockPos(), gd2.getBlockPos()); EXPECT_EQ(gd.getEndPos(), gd2.getEndPos()); // Position the stream to beginning of the grid storage and read the grid. gd2.seekToGrid(istr); Archive::readGridCompression(istr); gd2_grid->readMeta(istr); gd2_grid->readTransform(istr); gd2_grid->readTopology(istr); // Remove delay load metadata if it exists. if ((*gd2_grid)["file_delayed_load"]) { gd2_grid->removeMeta("file_delayed_load"); } // Ensure that we have the same metadata. EXPECT_EQ(grid->metaCount(), gd2_grid->metaCount()); EXPECT_TRUE((*gd2_grid)["meta0"]); EXPECT_TRUE((*gd2_grid)["meta1"]); EXPECT_EQ(meta0Val, gd2_grid->metaValue<std::string>("meta0")); EXPECT_EQ(meta1Val, gd2_grid->metaValue<std::string>("meta1")); // Ensure that we have the same topology and transform. EXPECT_EQ( grid->baseTree().leafCount(), gd2_grid->baseTree().leafCount()); EXPECT_EQ( grid->baseTree().nonLeafCount(), gd2_grid->baseTree().nonLeafCount()); EXPECT_EQ( grid->baseTree().treeDepth(), gd2_grid->baseTree().treeDepth()); //EXPECT_EQ(0.1, gd2_grid->getTransform()->getVoxelSizeX()); //EXPECT_EQ(0.1, gd2_grid->getTransform()->getVoxelSizeY()); //EXPECT_EQ(0.1, gd2_grid->getTransform()->getVoxelSizeZ()); // Read in the data blocks. gd2.seekToBlocks(istr); gd2_grid->readBuffers(istr); TreeType::Ptr tree2 = DynamicPtrCast<TreeType>(gd2_grid->baseTreePtr()); EXPECT_TRUE(tree2.get() != nullptr); EXPECT_EQ(10, tree2->getValue(Coord(10, 1, 2))); EXPECT_EQ(5, tree2->getValue(Coord(0, 0, 0))); EXPECT_EQ(1, tree2->getValue(Coord(1000, 1000, 16000))); // Clear registries. GridBase::clearRegistry(); Metadata::clearRegistry(); math::MapRegistry::clear(); remove("something.vdb2"); } TEST_F(TestFile, testWriteGrid) { testWriteGrid(); } void TestFile::testWriteMultipleGrids() { using namespace openvdb; using namespace openvdb::io; using TreeType = Int32Tree; using GridType = Grid<TreeType>; logging::LevelScope suppressLogging{logging::Level::Fatal}; File file("something.vdb2"); std::ostringstream ostr(std::ios_base::binary); // Create a grid with transform. GridType::Ptr grid = createGrid<GridType>(/*bg=*/1); TreeType& tree = grid->tree(); tree.setValue(Coord(10, 1, 2), 10); tree.setValue(Coord(0, 0, 0), 5); math::Transform::Ptr trans = math::Transform::createLinearTransform(0.1); grid->setTransform(trans); GridType::Ptr grid2 = createGrid<GridType>(/*bg=*/2); TreeType& tree2 = grid2->tree(); tree2.setValue(Coord(0, 0, 0), 10); tree2.setValue(Coord(1000, 1000, 1000), 50); math::Transform::Ptr trans2 = math::Transform::createLinearTransform(0.2); grid2->setTransform(trans2); // Create the grid descriptor out of this grid. GridDescriptor gd(Name("temperature"), grid->type()); GridDescriptor gd2(Name("density"), grid2->type()); // Write out the grids. file.writeGrid(gd, grid, ostr, /*seekable=*/true); file.writeGrid(gd2, grid2, ostr, /*seekable=*/true); EXPECT_TRUE(gd.getGridPos() != 0); EXPECT_TRUE(gd.getBlockPos() != 0); EXPECT_TRUE(gd.getEndPos() != 0); EXPECT_TRUE(gd2.getGridPos() != 0); EXPECT_TRUE(gd2.getBlockPos() != 0); EXPECT_TRUE(gd2.getEndPos() != 0); // register the grid GridBase::clearRegistry(); GridType::registerGrid(); // register maps math::MapRegistry::clear(); math::AffineMap::registerMap(); math::ScaleMap::registerMap(); math::UniformScaleMap::registerMap(); math::TranslationMap::registerMap(); math::ScaleTranslateMap::registerMap(); math::UniformScaleTranslateMap::registerMap(); math::NonlinearFrustumMap::registerMap(); // Read in the first grid descriptor. GridDescriptor gd_in; std::istringstream istr(ostr.str(), std::ios_base::binary); io::setCurrentVersion(istr); GridBase::Ptr gd_in_grid; EXPECT_NO_THROW(gd_in_grid = gd_in.read(istr)); // Ensure read in the right values. EXPECT_EQ(gd.gridName(), gd_in.gridName()); EXPECT_EQ(GridType::gridType(), gd_in_grid->type()); EXPECT_EQ(gd.getGridPos(), gd_in.getGridPos()); EXPECT_EQ(gd.getBlockPos(), gd_in.getBlockPos()); EXPECT_EQ(gd.getEndPos(), gd_in.getEndPos()); // Position the stream to beginning of the grid storage and read the grid. gd_in.seekToGrid(istr); Archive::readGridCompression(istr); gd_in_grid->readMeta(istr); gd_in_grid->readTransform(istr); gd_in_grid->readTopology(istr); // Ensure that we have the same topology and transform. EXPECT_EQ( grid->baseTree().leafCount(), gd_in_grid->baseTree().leafCount()); EXPECT_EQ( grid->baseTree().nonLeafCount(), gd_in_grid->baseTree().nonLeafCount()); EXPECT_EQ( grid->baseTree().treeDepth(), gd_in_grid->baseTree().treeDepth()); // EXPECT_EQ(0.1, gd_in_grid->getTransform()->getVoxelSizeX()); // EXPECT_EQ(0.1, gd_in_grid->getTransform()->getVoxelSizeY()); // EXPECT_EQ(0.1, gd_in_grid->getTransform()->getVoxelSizeZ()); // Read in the data blocks. gd_in.seekToBlocks(istr); gd_in_grid->readBuffers(istr); TreeType::Ptr grid_in = DynamicPtrCast<TreeType>(gd_in_grid->baseTreePtr()); EXPECT_TRUE(grid_in.get() != nullptr); EXPECT_EQ(10, grid_in->getValue(Coord(10, 1, 2))); EXPECT_EQ(5, grid_in->getValue(Coord(0, 0, 0))); EXPECT_EQ(1, grid_in->getValue(Coord(1000, 1000, 16000))); ///////////////////////////////////////////////////////////////// // Now read in the second grid descriptor. Make use of hte end offset. /////////////////////////////////////////////////////////////// gd_in.seekToEnd(istr); GridDescriptor gd2_in; GridBase::Ptr gd2_in_grid; EXPECT_NO_THROW(gd2_in_grid = gd2_in.read(istr)); // Ensure that we read in the right values. EXPECT_EQ(gd2.gridName(), gd2_in.gridName()); EXPECT_EQ(TreeType::treeType(), gd2_in_grid->type()); EXPECT_EQ(gd2.getGridPos(), gd2_in.getGridPos()); EXPECT_EQ(gd2.getBlockPos(), gd2_in.getBlockPos()); EXPECT_EQ(gd2.getEndPos(), gd2_in.getEndPos()); // Position the stream to beginning of the grid storage and read the grid. gd2_in.seekToGrid(istr); Archive::readGridCompression(istr); gd2_in_grid->readMeta(istr); gd2_in_grid->readTransform(istr); gd2_in_grid->readTopology(istr); // Ensure that we have the same topology and transform. EXPECT_EQ( grid2->baseTree().leafCount(), gd2_in_grid->baseTree().leafCount()); EXPECT_EQ( grid2->baseTree().nonLeafCount(), gd2_in_grid->baseTree().nonLeafCount()); EXPECT_EQ( grid2->baseTree().treeDepth(), gd2_in_grid->baseTree().treeDepth()); // EXPECT_EQ(0.2, gd2_in_grid->getTransform()->getVoxelSizeX()); // EXPECT_EQ(0.2, gd2_in_grid->getTransform()->getVoxelSizeY()); // EXPECT_EQ(0.2, gd2_in_grid->getTransform()->getVoxelSizeZ()); // Read in the data blocks. gd2_in.seekToBlocks(istr); gd2_in_grid->readBuffers(istr); TreeType::Ptr grid2_in = DynamicPtrCast<TreeType>(gd2_in_grid->baseTreePtr()); EXPECT_TRUE(grid2_in.get() != nullptr); EXPECT_EQ(50, grid2_in->getValue(Coord(1000, 1000, 1000))); EXPECT_EQ(10, grid2_in->getValue(Coord(0, 0, 0))); EXPECT_EQ(2, grid2_in->getValue(Coord(100000, 100000, 16000))); // Clear registries. GridBase::clearRegistry(); math::MapRegistry::clear(); remove("something.vdb2"); } TEST_F(TestFile, testWriteMultipleGrids) { testWriteMultipleGrids(); } TEST_F(TestFile, testWriteFloatAsHalf) { using namespace openvdb; using namespace openvdb::io; using TreeType = Vec3STree; using GridType = Grid<TreeType>; // Register all grid types. initialize(); // Ensure that the registry is cleared on exit. struct Local { static void uninitialize(char*) { openvdb::uninitialize(); } }; SharedPtr<char> onExit(nullptr, Local::uninitialize); // Create two test grids. GridType::Ptr grid1 = createGrid<GridType>(/*bg=*/Vec3s(1, 1, 1)); TreeType& tree1 = grid1->tree(); EXPECT_TRUE(grid1.get() != nullptr); grid1->setTransform(math::Transform::createLinearTransform(0.1)); grid1->setName("grid1"); GridType::Ptr grid2 = createGrid<GridType>(/*bg=*/Vec3s(2, 2, 2)); EXPECT_TRUE(grid2.get() != nullptr); TreeType& tree2 = grid2->tree(); grid2->setTransform(math::Transform::createLinearTransform(0.2)); // Flag this grid for 16-bit float output. grid2->setSaveFloatAsHalf(true); grid2->setName("grid2"); for (int x = 0; x < 40; ++x) { for (int y = 0; y < 40; ++y) { for (int z = 0; z < 40; ++z) { tree1.setValue(Coord(x, y, z), Vec3s(float(x), float(y), float(z))); tree2.setValue(Coord(x, y, z), Vec3s(float(x), float(y), float(z))); } } } GridPtrVec grids; grids.push_back(grid1); grids.push_back(grid2); const char* filename = "something.vdb2"; { // Write both grids to a file. File vdbFile(filename); vdbFile.write(grids); } { // Verify that both grids can be read back successfully from the file. File vdbFile(filename); vdbFile.open(); GridBase::Ptr bgrid1 = vdbFile.readGrid("grid1"), bgrid2 = vdbFile.readGrid("grid2"); vdbFile.close(); EXPECT_TRUE(bgrid1.get() != nullptr); EXPECT_TRUE(bgrid1->isType<GridType>()); EXPECT_TRUE(bgrid2.get() != nullptr); EXPECT_TRUE(bgrid2->isType<GridType>()); const TreeType& btree1 = StaticPtrCast<GridType>(bgrid1)->tree(); EXPECT_EQ(Vec3s(10, 10, 10), btree1.getValue(Coord(10, 10, 10))); const TreeType& btree2 = StaticPtrCast<GridType>(bgrid2)->tree(); EXPECT_EQ(Vec3s(10, 10, 10), btree2.getValue(Coord(10, 10, 10))); } } TEST_F(TestFile, testWriteInstancedGrids) { using namespace openvdb; // Register data types. openvdb::initialize(); // Remove something.vdb2 when done. We must declare this here before the // other grid smart_ptr's because we re-use them in the test several times. // We will not be able to remove something.vdb2 on Windows if the pointers // are still referencing data opened by the "file" variable. const char* filename = "something.vdb2"; SharedPtr<const char> scopedFile(filename, ::remove); // Create grids. Int32Tree::Ptr tree1(new Int32Tree(1)); FloatTree::Ptr tree2(new FloatTree(2.0)); GridBase::Ptr grid1 = createGrid(tree1), grid2 = createGrid(tree1), // instance of grid1 grid3 = createGrid(tree2), grid4 = createGrid(tree2); // instance of grid3 grid1->setName("density"); grid2->setName("density_copy"); // Leave grid3 and grid4 unnamed. // Create transforms. math::Transform::Ptr trans1 = math::Transform::createLinearTransform(0.1); math::Transform::Ptr trans2 = math::Transform::createLinearTransform(0.1); grid1->setTransform(trans1); grid2->setTransform(trans2); grid3->setTransform(trans2); grid4->setTransform(trans1); // Set some values. tree1->setValue(Coord(0, 0, 0), 5); tree1->setValue(Coord(100, 0, 0), 6); tree2->setValue(Coord(0, 0, 0), 10); tree2->setValue(Coord(0, 100, 0), 11); MetaMap::Ptr meta(new MetaMap); meta->insertMeta("author", StringMetadata("Einstein")); meta->insertMeta("year", Int32Metadata(2009)); GridPtrVecPtr grids(new GridPtrVec); grids->push_back(grid1); grids->push_back(grid2); grids->push_back(grid3); grids->push_back(grid4); // Write the grids to a file and then close the file. { io::File vdbFile(filename); vdbFile.write(*grids, *meta); } meta.reset(); // Read the grids back in. io::File file(filename); file.open(); grids = file.getGrids(); meta = file.getMetadata(); // Verify the metadata. EXPECT_TRUE(meta.get() != nullptr); EXPECT_EQ(2, int(meta->metaCount())); EXPECT_EQ(std::string("Einstein"), meta->metaValue<std::string>("author")); EXPECT_EQ(2009, meta->metaValue<int32_t>("year")); // Verify the grids. EXPECT_TRUE(grids.get() != nullptr); EXPECT_EQ(4, int(grids->size())); GridBase::Ptr grid = findGridByName(*grids, "density"); EXPECT_TRUE(grid.get() != nullptr); Int32Tree::Ptr density = gridPtrCast<Int32Grid>(grid)->treePtr(); EXPECT_TRUE(density.get() != nullptr); grid.reset(); grid = findGridByName(*grids, "density_copy"); EXPECT_TRUE(grid.get() != nullptr); EXPECT_TRUE(gridPtrCast<Int32Grid>(grid)->treePtr().get() != nullptr); // Verify that "density_copy" is an instance of (i.e., shares a tree with) "density". EXPECT_EQ(density, gridPtrCast<Int32Grid>(grid)->treePtr()); grid.reset(); grid = findGridByName(*grids, ""); EXPECT_TRUE(grid.get() != nullptr); FloatTree::Ptr temperature = gridPtrCast<FloatGrid>(grid)->treePtr(); EXPECT_TRUE(temperature.get() != nullptr); grid.reset(); for (GridPtrVec::reverse_iterator it = grids->rbegin(); !grid && it != grids->rend(); ++it) { // Search for the second unnamed grid starting from the end of the list. if ((*it)->getName() == "") grid = *it; } EXPECT_TRUE(grid.get() != nullptr); EXPECT_TRUE(gridPtrCast<FloatGrid>(grid)->treePtr().get() != nullptr); // Verify that the second unnamed grid is an instance of the first. EXPECT_EQ(temperature, gridPtrCast<FloatGrid>(grid)->treePtr()); EXPECT_NEAR(5, density->getValue(Coord(0, 0, 0)), /*tolerance=*/0); EXPECT_NEAR(6, density->getValue(Coord(100, 0, 0)), /*tolerance=*/0); EXPECT_NEAR(10, temperature->getValue(Coord(0, 0, 0)), /*tolerance=*/0); EXPECT_NEAR(11, temperature->getValue(Coord(0, 100, 0)), /*tolerance=*/0); // Reread with instancing disabled. file.close(); file.setInstancingEnabled(false); file.open(); grids = file.getGrids(); EXPECT_EQ(4, int(grids->size())); grid = findGridByName(*grids, "density"); EXPECT_TRUE(grid.get() != nullptr); density = gridPtrCast<Int32Grid>(grid)->treePtr(); EXPECT_TRUE(density.get() != nullptr); grid = findGridByName(*grids, "density_copy"); EXPECT_TRUE(grid.get() != nullptr); EXPECT_TRUE(gridPtrCast<Int32Grid>(grid)->treePtr().get() != nullptr); // Verify that "density_copy" is *not* an instance of "density". EXPECT_TRUE(gridPtrCast<Int32Grid>(grid)->treePtr() != density); // Verify that the two unnamed grids are not instances of each other. grid = findGridByName(*grids, ""); EXPECT_TRUE(grid.get() != nullptr); temperature = gridPtrCast<FloatGrid>(grid)->treePtr(); EXPECT_TRUE(temperature.get() != nullptr); grid.reset(); for (GridPtrVec::reverse_iterator it = grids->rbegin(); !grid && it != grids->rend(); ++it) { // Search for the second unnamed grid starting from the end of the list. if ((*it)->getName() == "") grid = *it; } EXPECT_TRUE(grid.get() != nullptr); EXPECT_TRUE(gridPtrCast<FloatGrid>(grid)->treePtr().get() != nullptr); EXPECT_TRUE(gridPtrCast<FloatGrid>(grid)->treePtr() != temperature); // Rewrite with instancing disabled, then reread with instancing enabled. file.close(); { /// @todo (FX-7063) For now, write to a new file, then, when there's /// no longer a need for delayed load from the old file, replace it /// with the new file. const char* tempFilename = "somethingelse.vdb"; SharedPtr<const char> scopedTempFile(tempFilename, ::remove); io::File vdbFile(tempFilename); vdbFile.setInstancingEnabled(false); vdbFile.write(*grids, *meta); grids.reset(); // Note: Windows requires that the destination not exist, before we can rename to it. std::remove(filename); std::rename(tempFilename, filename); } file.setInstancingEnabled(true); file.open(); grids = file.getGrids(); EXPECT_EQ(4, int(grids->size())); // Verify that "density_copy" is not an instance of "density". grid = findGridByName(*grids, "density"); EXPECT_TRUE(grid.get() != nullptr); density = gridPtrCast<Int32Grid>(grid)->treePtr(); EXPECT_TRUE(density.get() != nullptr); EXPECT_TRUE(density->unallocatedLeafCount() > 0); EXPECT_EQ(density->leafCount(), density->unallocatedLeafCount()); grid = findGridByName(*grids, "density_copy"); EXPECT_TRUE(grid.get() != nullptr); EXPECT_TRUE(gridPtrCast<Int32Grid>(grid)->treePtr().get() != nullptr); EXPECT_TRUE(gridPtrCast<Int32Grid>(grid)->treePtr() != density); // Verify that the two unnamed grids are not instances of each other. grid = findGridByName(*grids, ""); EXPECT_TRUE(grid.get() != nullptr); temperature = gridPtrCast<FloatGrid>(grid)->treePtr(); EXPECT_TRUE(temperature.get() != nullptr); grid.reset(); for (GridPtrVec::reverse_iterator it = grids->rbegin(); !grid && it != grids->rend(); ++it) { // Search for the second unnamed grid starting from the end of the list. if ((*it)->getName() == "") grid = *it; } EXPECT_TRUE(grid.get() != nullptr); EXPECT_TRUE(gridPtrCast<FloatGrid>(grid)->treePtr().get() != nullptr); EXPECT_TRUE(gridPtrCast<FloatGrid>(grid)->treePtr() != temperature); } void TestFile::testReadGridDescriptors() { using namespace openvdb; using namespace openvdb::io; using GridType = Int32Grid; using TreeType = GridType::TreeType; File file("something.vdb2"); std::ostringstream ostr(std::ios_base::binary); // Create a grid with transform. GridType::Ptr grid = createGrid<GridType>(1); TreeType& tree = grid->tree(); tree.setValue(Coord(10, 1, 2), 10); tree.setValue(Coord(0, 0, 0), 5); math::Transform::Ptr trans = math::Transform::createLinearTransform(0.1); grid->setTransform(trans); // Create another grid with transform. GridType::Ptr grid2 = createGrid<GridType>(2); TreeType& tree2 = grid2->tree(); tree2.setValue(Coord(0, 0, 0), 10); tree2.setValue(Coord(1000, 1000, 1000), 50); math::Transform::Ptr trans2 = math::Transform::createLinearTransform(0.2); grid2->setTransform(trans2); // Create the grid descriptor out of this grid. GridDescriptor gd(Name("temperature"), grid->type()); GridDescriptor gd2(Name("density"), grid2->type()); // Write out the number of grids. int32_t gridCount = 2; ostr.write(reinterpret_cast<char*>(&gridCount), sizeof(int32_t)); // Write out the grids. file.writeGrid(gd, grid, ostr, /*seekable=*/true); file.writeGrid(gd2, grid2, ostr, /*seekable=*/true); // Register the grid and the transform and the blocks. GridBase::clearRegistry(); GridType::registerGrid(); // register maps math::MapRegistry::clear(); math::AffineMap::registerMap(); math::ScaleMap::registerMap(); math::UniformScaleMap::registerMap(); math::TranslationMap::registerMap(); math::ScaleTranslateMap::registerMap(); math::UniformScaleTranslateMap::registerMap(); math::NonlinearFrustumMap::registerMap(); // Read in the grid descriptors. File file2("something.vdb2"); std::istringstream istr(ostr.str(), std::ios_base::binary); io::setCurrentVersion(istr); file2.readGridDescriptors(istr); // Compare with the initial grid descriptors. File::NameMapCIter it = file2.findDescriptor("temperature"); EXPECT_TRUE(it != file2.gridDescriptors().end()); GridDescriptor file2gd = it->second; EXPECT_EQ(gd.gridName(), file2gd.gridName()); EXPECT_EQ(gd.getGridPos(), file2gd.getGridPos()); EXPECT_EQ(gd.getBlockPos(), file2gd.getBlockPos()); EXPECT_EQ(gd.getEndPos(), file2gd.getEndPos()); it = file2.findDescriptor("density"); EXPECT_TRUE(it != file2.gridDescriptors().end()); file2gd = it->second; EXPECT_EQ(gd2.gridName(), file2gd.gridName()); EXPECT_EQ(gd2.getGridPos(), file2gd.getGridPos()); EXPECT_EQ(gd2.getBlockPos(), file2gd.getBlockPos()); EXPECT_EQ(gd2.getEndPos(), file2gd.getEndPos()); // Clear registries. GridBase::clearRegistry(); math::MapRegistry::clear(); remove("something.vdb2"); } TEST_F(TestFile, testReadGridDescriptors) { testReadGridDescriptors(); } TEST_F(TestFile, testGridNaming) { using namespace openvdb; using namespace openvdb::io; using TreeType = Int32Tree; // Register data types. openvdb::initialize(); logging::LevelScope suppressLogging{logging::Level::Fatal}; // Create several grids that share a single tree. TreeType::Ptr tree(new TreeType(1)); tree->setValue(Coord(10, 1, 2), 10); tree->setValue(Coord(0, 0, 0), 5); GridBase::Ptr grid1 = openvdb::createGrid(tree), grid2 = openvdb::createGrid(tree), grid3 = openvdb::createGrid(tree); std::vector<GridBase::Ptr> gridVec; gridVec.push_back(grid1); gridVec.push_back(grid2); gridVec.push_back(grid3); // Give all grids the same name, but also some metadata to distinguish them. for (int n = 0; n <= 2; ++n) { gridVec[n]->setName("grid"); gridVec[n]->insertMeta("index", Int32Metadata(n)); } const char* filename = "testGridNaming.vdb2"; SharedPtr<const char> scopedFile(filename, ::remove); // Test first with grid instancing disabled, then with instancing enabled. for (int instancing = 0; instancing <= 1; ++instancing) { { // Write the grids out to a file. File file(filename); file.setInstancingEnabled(instancing); file.write(gridVec); } // Open the file for reading. File file(filename); file.setInstancingEnabled(instancing); file.open(); int n = 0; for (File::NameIterator i = file.beginName(), e = file.endName(); i != e; ++i, ++n) { EXPECT_TRUE(file.hasGrid(i.gridName())); } // Verify that the file contains three grids. EXPECT_EQ(3, n); // Read each grid. for (n = -1; n <= 2; ++n) { openvdb::Name name("grid"); // On the first iteration, read the grid named "grid", then read "grid[0]" // (which is synonymous with "grid"), then "grid[1]", then "grid[2]". if (n >= 0) { name = GridDescriptor::nameAsString(GridDescriptor::addSuffix(name, n)); } EXPECT_TRUE(file.hasGrid(name)); // Read the current grid. GridBase::ConstPtr grid = file.readGrid(name); EXPECT_TRUE(grid.get() != nullptr); // Verify that the grid is named "grid". EXPECT_EQ(openvdb::Name("grid"), grid->getName()); EXPECT_EQ((n < 0 ? 0 : n), grid->metaValue<openvdb::Int32>("index")); } // Read all three grids at once. GridPtrVecPtr allGrids = file.getGrids(); EXPECT_TRUE(allGrids.get() != nullptr); EXPECT_EQ(3, int(allGrids->size())); GridBase::ConstPtr firstGrid; std::vector<int> indices; for (GridPtrVecCIter i = allGrids->begin(), e = allGrids->end(); i != e; ++i) { GridBase::ConstPtr grid = *i; EXPECT_TRUE(grid.get() != nullptr); indices.push_back(grid->metaValue<openvdb::Int32>("index")); // If instancing is enabled, verify that all grids share the same tree. if (instancing) { if (!firstGrid) firstGrid = grid; EXPECT_EQ(firstGrid->baseTreePtr(), grid->baseTreePtr()); } } // Verify that three distinct grids were read, // by examining their "index" metadata. EXPECT_EQ(3, int(indices.size())); std::sort(indices.begin(), indices.end()); EXPECT_EQ(0, indices[0]); EXPECT_EQ(1, indices[1]); EXPECT_EQ(2, indices[2]); } { // Try writing and then reading a grid with a weird name // that might conflict with the grid name indexing scheme. const openvdb::Name weirdName("grid[4]"); gridVec[0]->setName(weirdName); { File file(filename); file.write(gridVec); } File file(filename); file.open(); // Verify that the grid can be read and that its index is 0. GridBase::ConstPtr grid = file.readGrid(weirdName); EXPECT_TRUE(grid.get() != nullptr); EXPECT_EQ(weirdName, grid->getName()); EXPECT_EQ(0, grid->metaValue<openvdb::Int32>("index")); // Verify that the other grids can still be read successfully. grid = file.readGrid("grid[0]"); EXPECT_TRUE(grid.get() != nullptr); EXPECT_EQ(openvdb::Name("grid"), grid->getName()); // Because there are now only two grids named "grid", the one with // index 1 is now "grid[0]". EXPECT_EQ(1, grid->metaValue<openvdb::Int32>("index")); grid = file.readGrid("grid[1]"); EXPECT_TRUE(grid.get() != nullptr); EXPECT_EQ(openvdb::Name("grid"), grid->getName()); // Because there are now only two grids named "grid", the one with // index 2 is now "grid[1]". EXPECT_EQ(2, grid->metaValue<openvdb::Int32>("index")); // Verify that there is no longer a third grid named "grid". EXPECT_THROW(file.readGrid("grid[2]"), openvdb::KeyError); } } TEST_F(TestFile, testEmptyFile) { using namespace openvdb; using namespace openvdb::io; const char* filename = "testEmptyFile.vdb2"; SharedPtr<const char> scopedFile(filename, ::remove); { File file(filename); file.write(GridPtrVec(), MetaMap()); } File file(filename); file.open(); GridPtrVecPtr grids = file.getGrids(); MetaMap::Ptr meta = file.getMetadata(); EXPECT_TRUE(grids.get() != nullptr); EXPECT_TRUE(grids->empty()); EXPECT_TRUE(meta.get() != nullptr); EXPECT_EQ(0, int(meta->metaCount())); } void TestFile::testEmptyGridIO() { using namespace openvdb; using namespace openvdb::io; using GridType = Int32Grid; logging::LevelScope suppressLogging{logging::Level::Fatal}; const char* filename = "something.vdb2"; SharedPtr<const char> scopedFile(filename, ::remove); File file(filename); std::ostringstream ostr(std::ios_base::binary); // Create a grid with transform. GridType::Ptr grid = createGrid<GridType>(/*bg=*/1); math::Transform::Ptr trans = math::Transform::createLinearTransform(0.1); grid->setTransform(trans); // Create another grid with transform. math::Transform::Ptr trans2 = math::Transform::createLinearTransform(0.2); GridType::Ptr grid2 = createGrid<GridType>(/*bg=*/2); grid2->setTransform(trans2); // Create the grid descriptor out of this grid. GridDescriptor gd(Name("temperature"), grid->type()); GridDescriptor gd2(Name("density"), grid2->type()); // Write out the number of grids. int32_t gridCount = 2; ostr.write(reinterpret_cast<char*>(&gridCount), sizeof(int32_t)); // Write out the grids. file.writeGrid(gd, grid, ostr, /*seekable=*/true); file.writeGrid(gd2, grid2, ostr, /*seekable=*/true); // Ensure that the block offset and the end offsets are equivalent. EXPECT_EQ(0, int(grid->baseTree().leafCount())); EXPECT_EQ(0, int(grid2->baseTree().leafCount())); EXPECT_EQ(gd.getEndPos(), gd.getBlockPos()); EXPECT_EQ(gd2.getEndPos(), gd2.getBlockPos()); // Register the grid and the transform and the blocks. GridBase::clearRegistry(); GridType::registerGrid(); // register maps math::MapRegistry::clear(); math::AffineMap::registerMap(); math::ScaleMap::registerMap(); math::UniformScaleMap::registerMap(); math::TranslationMap::registerMap(); math::ScaleTranslateMap::registerMap(); math::UniformScaleTranslateMap::registerMap(); math::NonlinearFrustumMap::registerMap(); // Read in the grid descriptors. File file2(filename); std::istringstream istr(ostr.str(), std::ios_base::binary); io::setCurrentVersion(istr); file2.readGridDescriptors(istr); // Compare with the initial grid descriptors. File::NameMapCIter it = file2.findDescriptor("temperature"); EXPECT_TRUE(it != file2.gridDescriptors().end()); GridDescriptor file2gd = it->second; file2gd.seekToGrid(istr); GridBase::Ptr gd_grid = GridBase::createGrid(file2gd.gridType()); Archive::readGridCompression(istr); gd_grid->readMeta(istr); gd_grid->readTransform(istr); gd_grid->readTopology(istr); EXPECT_EQ(gd.gridName(), file2gd.gridName()); EXPECT_TRUE(gd_grid.get() != nullptr); EXPECT_EQ(0, int(gd_grid->baseTree().leafCount())); //EXPECT_EQ(8, int(gd_grid->baseTree().nonLeafCount())); EXPECT_EQ(4, int(gd_grid->baseTree().treeDepth())); EXPECT_EQ(gd.getGridPos(), file2gd.getGridPos()); EXPECT_EQ(gd.getBlockPos(), file2gd.getBlockPos()); EXPECT_EQ(gd.getEndPos(), file2gd.getEndPos()); it = file2.findDescriptor("density"); EXPECT_TRUE(it != file2.gridDescriptors().end()); file2gd = it->second; file2gd.seekToGrid(istr); gd_grid = GridBase::createGrid(file2gd.gridType()); Archive::readGridCompression(istr); gd_grid->readMeta(istr); gd_grid->readTransform(istr); gd_grid->readTopology(istr); EXPECT_EQ(gd2.gridName(), file2gd.gridName()); EXPECT_TRUE(gd_grid.get() != nullptr); EXPECT_EQ(0, int(gd_grid->baseTree().leafCount())); //EXPECT_EQ(8, int(gd_grid->nonLeafCount())); EXPECT_EQ(4, int(gd_grid->baseTree().treeDepth())); EXPECT_EQ(gd2.getGridPos(), file2gd.getGridPos()); EXPECT_EQ(gd2.getBlockPos(), file2gd.getBlockPos()); EXPECT_EQ(gd2.getEndPos(), file2gd.getEndPos()); // Clear registries. GridBase::clearRegistry(); math::MapRegistry::clear(); } TEST_F(TestFile, testEmptyGridIO) { testEmptyGridIO(); } void TestFile::testOpen() { using namespace openvdb; using FloatGrid = openvdb::FloatGrid; using IntGrid = openvdb::Int32Grid; using FloatTree = FloatGrid::TreeType; using IntTree = Int32Grid::TreeType; // Create a VDB to write. // Create grids IntGrid::Ptr grid = createGrid<IntGrid>(/*bg=*/1); IntTree& tree = grid->tree(); grid->setName("density"); FloatGrid::Ptr grid2 = createGrid<FloatGrid>(/*bg=*/2.0); FloatTree& tree2 = grid2->tree(); grid2->setName("temperature"); // Create transforms math::Transform::Ptr trans = math::Transform::createLinearTransform(0.1); math::Transform::Ptr trans2 = math::Transform::createLinearTransform(0.1); grid->setTransform(trans); grid2->setTransform(trans2); // Set some values tree.setValue(Coord(0, 0, 0), 5); tree.setValue(Coord(100, 0, 0), 6); tree2.setValue(Coord(0, 0, 0), 10); tree2.setValue(Coord(0, 100, 0), 11); MetaMap meta; meta.insertMeta("author", StringMetadata("Einstein")); meta.insertMeta("year", Int32Metadata(2009)); GridPtrVec grids; grids.push_back(grid); grids.push_back(grid2); EXPECT_TRUE(findGridByName(grids, "density") == grid); EXPECT_TRUE(findGridByName(grids, "temperature") == grid2); EXPECT_TRUE(meta.metaValue<std::string>("author") == "Einstein"); EXPECT_EQ(2009, meta.metaValue<int32_t>("year")); // Register grid and transform. GridBase::clearRegistry(); IntGrid::registerGrid(); FloatGrid::registerGrid(); Metadata::clearRegistry(); StringMetadata::registerType(); Int32Metadata::registerType(); // register maps math::MapRegistry::clear(); math::AffineMap::registerMap(); math::ScaleMap::registerMap(); math::UniformScaleMap::registerMap(); math::TranslationMap::registerMap(); math::ScaleTranslateMap::registerMap(); math::UniformScaleTranslateMap::registerMap(); math::NonlinearFrustumMap::registerMap(); // Write the vdb out to a file. io::File vdbfile("something.vdb2"); vdbfile.write(grids, meta); // Now we can read in the file. EXPECT_TRUE(!vdbfile.open());//opening the same file // Can't open same file multiple times without closing. EXPECT_THROW(vdbfile.open(), openvdb::IoError); vdbfile.close(); EXPECT_TRUE(!vdbfile.open());//opening the same file EXPECT_TRUE(vdbfile.isOpen()); uint32_t version = OPENVDB_FILE_VERSION; EXPECT_EQ(version, vdbfile.fileVersion()); EXPECT_EQ(version, io::getFormatVersion(vdbfile.inputStream())); EXPECT_EQ(OPENVDB_LIBRARY_MAJOR_VERSION, vdbfile.libraryVersion().first); EXPECT_EQ(OPENVDB_LIBRARY_MINOR_VERSION, vdbfile.libraryVersion().second); EXPECT_EQ(OPENVDB_LIBRARY_MAJOR_VERSION, io::getLibraryVersion(vdbfile.inputStream()).first); EXPECT_EQ(OPENVDB_LIBRARY_MINOR_VERSION, io::getLibraryVersion(vdbfile.inputStream()).second); // Ensure that we read in the vdb metadata. EXPECT_TRUE(vdbfile.getMetadata()); EXPECT_TRUE(vdbfile.getMetadata()->metaValue<std::string>("author") == "Einstein"); EXPECT_EQ(2009, vdbfile.getMetadata()->metaValue<int32_t>("year")); // Ensure we got the grid descriptors. EXPECT_EQ(1, int(vdbfile.gridDescriptors().count("density"))); EXPECT_EQ(1, int(vdbfile.gridDescriptors().count("temperature"))); io::File::NameMapCIter it = vdbfile.findDescriptor("density"); EXPECT_TRUE(it != vdbfile.gridDescriptors().end()); io::GridDescriptor gd = it->second; EXPECT_EQ(IntTree::treeType(), gd.gridType()); it = vdbfile.findDescriptor("temperature"); EXPECT_TRUE(it != vdbfile.gridDescriptors().end()); gd = it->second; EXPECT_EQ(FloatTree::treeType(), gd.gridType()); // Ensure we throw an error if there is no file. io::File vdbfile2("somethingelses.vdb2"); EXPECT_THROW(vdbfile2.open(), openvdb::IoError); EXPECT_THROW(vdbfile2.inputStream(), openvdb::IoError); // Clear registries. GridBase::clearRegistry(); Metadata::clearRegistry(); math::MapRegistry::clear(); // Test closing the file. vdbfile.close(); EXPECT_TRUE(vdbfile.isOpen() == false); EXPECT_TRUE(vdbfile.fileMetadata().get() == nullptr); EXPECT_EQ(0, int(vdbfile.gridDescriptors().size())); EXPECT_THROW(vdbfile.inputStream(), openvdb::IoError); remove("something.vdb2"); } TEST_F(TestFile, testOpen) { testOpen(); } void TestFile::testNonVdbOpen() { std::ofstream file("dummy.vdb2", std::ios_base::binary); int64_t something = 1; file.write(reinterpret_cast<char*>(&something), sizeof(int64_t)); file.close(); openvdb::io::File vdbfile("dummy.vdb2"); EXPECT_THROW(vdbfile.open(), openvdb::IoError); EXPECT_THROW(vdbfile.inputStream(), openvdb::IoError); remove("dummy.vdb2"); } TEST_F(TestFile, testNonVdbOpen) { testNonVdbOpen(); } TEST_F(TestFile, testGetMetadata) { using namespace openvdb; GridPtrVec grids; MetaMap meta; meta.insertMeta("author", StringMetadata("Einstein")); meta.insertMeta("year", Int32Metadata(2009)); // Adjust registry before writing. Metadata::clearRegistry(); StringMetadata::registerType(); Int32Metadata::registerType(); // Write the vdb out to a file. io::File vdbfile("something.vdb2"); vdbfile.write(grids, meta); // Check if reading without opening the file EXPECT_THROW(vdbfile.getMetadata(), openvdb::IoError); vdbfile.open(); MetaMap::Ptr meta2 = vdbfile.getMetadata(); EXPECT_EQ(2, int(meta2->metaCount())); EXPECT_TRUE(meta2->metaValue<std::string>("author") == "Einstein"); EXPECT_EQ(2009, meta2->metaValue<int32_t>("year")); // Clear registry. Metadata::clearRegistry(); remove("something.vdb2"); } TEST_F(TestFile, testReadAll) { using namespace openvdb; using FloatGrid = openvdb::FloatGrid; using IntGrid = openvdb::Int32Grid; using FloatTree = FloatGrid::TreeType; using IntTree = Int32Grid::TreeType; // Create a vdb to write. // Create grids IntGrid::Ptr grid1 = createGrid<IntGrid>(/*bg=*/1); IntTree& tree = grid1->tree(); grid1->setName("density"); FloatGrid::Ptr grid2 = createGrid<FloatGrid>(/*bg=*/2.0); FloatTree& tree2 = grid2->tree(); grid2->setName("temperature"); // Create transforms math::Transform::Ptr trans = math::Transform::createLinearTransform(0.1); math::Transform::Ptr trans2 = math::Transform::createLinearTransform(0.1); grid1->setTransform(trans); grid2->setTransform(trans2); // Set some values tree.setValue(Coord(0, 0, 0), 5); tree.setValue(Coord(100, 0, 0), 6); tree2.setValue(Coord(0, 0, 0), 10); tree2.setValue(Coord(0, 100, 0), 11); MetaMap meta; meta.insertMeta("author", StringMetadata("Einstein")); meta.insertMeta("year", Int32Metadata(2009)); GridPtrVec grids; grids.push_back(grid1); grids.push_back(grid2); // Register grid and transform. openvdb::initialize(); // Write the vdb out to a file. io::File vdbfile("something.vdb2"); vdbfile.write(grids, meta); io::File vdbfile2("something.vdb2"); EXPECT_THROW(vdbfile2.getGrids(), openvdb::IoError); vdbfile2.open(); EXPECT_TRUE(vdbfile2.isOpen()); GridPtrVecPtr grids2 = vdbfile2.getGrids(); MetaMap::Ptr meta2 = vdbfile2.getMetadata(); // Ensure we have the metadata. EXPECT_EQ(2, int(meta2->metaCount())); EXPECT_TRUE(meta2->metaValue<std::string>("author") == "Einstein"); EXPECT_EQ(2009, meta2->metaValue<int32_t>("year")); // Ensure we got the grids. EXPECT_EQ(2, int(grids2->size())); GridBase::Ptr grid; grid.reset(); grid = findGridByName(*grids2, "density"); EXPECT_TRUE(grid.get() != nullptr); IntTree::Ptr density = gridPtrCast<IntGrid>(grid)->treePtr(); EXPECT_TRUE(density.get() != nullptr); grid.reset(); grid = findGridByName(*grids2, "temperature"); EXPECT_TRUE(grid.get() != nullptr); FloatTree::Ptr temperature = gridPtrCast<FloatGrid>(grid)->treePtr(); EXPECT_TRUE(temperature.get() != nullptr); EXPECT_NEAR(5, density->getValue(Coord(0, 0, 0)), /*tolerance=*/0); EXPECT_NEAR(6, density->getValue(Coord(100, 0, 0)), /*tolerance=*/0); EXPECT_NEAR(10, temperature->getValue(Coord(0, 0, 0)), /*tolerance=*/0); EXPECT_NEAR(11, temperature->getValue(Coord(0, 100, 0)), /*tolerance=*/0); // Clear registries. GridBase::clearRegistry(); Metadata::clearRegistry(); math::MapRegistry::clear(); vdbfile2.close(); remove("something.vdb2"); } TEST_F(TestFile, testWriteOpenFile) { using namespace openvdb; MetaMap::Ptr meta(new MetaMap); meta->insertMeta("author", StringMetadata("Einstein")); meta->insertMeta("year", Int32Metadata(2009)); // Register metadata Metadata::clearRegistry(); StringMetadata::registerType(); Int32Metadata::registerType(); // Write the metadata out to a file. io::File vdbfile("something.vdb2"); vdbfile.write(GridPtrVec(), *meta); io::File vdbfile2("something.vdb2"); EXPECT_THROW(vdbfile2.getGrids(), openvdb::IoError); vdbfile2.open(); EXPECT_TRUE(vdbfile2.isOpen()); GridPtrVecPtr grids = vdbfile2.getGrids(); meta = vdbfile2.getMetadata(); // Ensure we have the metadata. EXPECT_TRUE(meta.get() != nullptr); EXPECT_EQ(2, int(meta->metaCount())); EXPECT_TRUE(meta->metaValue<std::string>("author") == "Einstein"); EXPECT_EQ(2009, meta->metaValue<int32_t>("year")); // Ensure we got the grids. EXPECT_TRUE(grids.get() != nullptr); EXPECT_EQ(0, int(grids->size())); // Cannot write an open file. EXPECT_THROW(vdbfile2.write(*grids), openvdb::IoError); vdbfile2.close(); EXPECT_NO_THROW(vdbfile2.write(*grids)); // Clear registries. Metadata::clearRegistry(); remove("something.vdb2"); } TEST_F(TestFile, testReadGridMetadata) { using namespace openvdb; openvdb::initialize(); const char* filename = "testReadGridMetadata.vdb2"; SharedPtr<const char> scopedFile(filename, ::remove); // Create grids Int32Grid::Ptr igrid = createGrid<Int32Grid>(/*bg=*/1); FloatGrid::Ptr fgrid = createGrid<FloatGrid>(/*bg=*/2.0); // Add metadata. igrid->setName("igrid"); igrid->insertMeta("author", StringMetadata("Einstein")); igrid->insertMeta("year", Int32Metadata(2012)); fgrid->setName("fgrid"); fgrid->insertMeta("author", StringMetadata("Einstein")); fgrid->insertMeta("year", Int32Metadata(2012)); // Add transforms. math::Transform::Ptr trans = math::Transform::createLinearTransform(0.1); igrid->setTransform(trans); fgrid->setTransform(trans); // Set some values. igrid->tree().setValue(Coord(0, 0, 0), 5); igrid->tree().setValue(Coord(100, 0, 0), 6); fgrid->tree().setValue(Coord(0, 0, 0), 10); fgrid->tree().setValue(Coord(0, 100, 0), 11); GridPtrVec srcGrids; srcGrids.push_back(igrid); srcGrids.push_back(fgrid); std::map<std::string, GridBase::Ptr> srcGridMap; srcGridMap[igrid->getName()] = igrid; srcGridMap[fgrid->getName()] = fgrid; enum { OUTPUT_TO_FILE = 0, OUTPUT_TO_STREAM = 1 }; for (int outputMethod = OUTPUT_TO_FILE; outputMethod <= OUTPUT_TO_STREAM; ++outputMethod) { if (outputMethod == OUTPUT_TO_FILE) { // Write the grids to a file. io::File vdbfile(filename); vdbfile.write(srcGrids); } else { // Stream the grids to a file (i.e., without file offsets). std::ofstream ostrm(filename, std::ios_base::binary); io::Stream(ostrm).write(srcGrids); } // Read just the grid-level metadata from the file. io::File vdbfile(filename); // Verify that reading from an unopened file generates an exception. EXPECT_THROW(vdbfile.readGridMetadata("igrid"), openvdb::IoError); EXPECT_THROW(vdbfile.readGridMetadata("noname"), openvdb::IoError); EXPECT_THROW(vdbfile.readAllGridMetadata(), openvdb::IoError); vdbfile.open(); EXPECT_TRUE(vdbfile.isOpen()); // Verify that reading a nonexistent grid generates an exception. EXPECT_THROW(vdbfile.readGridMetadata("noname"), openvdb::KeyError); // Read all grids and store them in a list. GridPtrVecPtr gridMetadata = vdbfile.readAllGridMetadata(); EXPECT_TRUE(gridMetadata.get() != nullptr); EXPECT_EQ(2, int(gridMetadata->size())); // Read individual grids and append them to the list. GridBase::Ptr grid = vdbfile.readGridMetadata("igrid"); EXPECT_TRUE(grid.get() != nullptr); EXPECT_EQ(std::string("igrid"), grid->getName()); gridMetadata->push_back(grid); grid = vdbfile.readGridMetadata("fgrid"); EXPECT_TRUE(grid.get() != nullptr); EXPECT_EQ(std::string("fgrid"), grid->getName()); gridMetadata->push_back(grid); // Verify that the grids' metadata and transforms match the original grids'. for (size_t i = 0, N = gridMetadata->size(); i < N; ++i) { grid = (*gridMetadata)[i]; EXPECT_TRUE(grid.get() != nullptr); EXPECT_TRUE(grid->getName() == "igrid" || grid->getName() == "fgrid"); EXPECT_TRUE(grid->baseTreePtr().get() != nullptr); // Since we didn't read the grid's topology, the tree should be empty. EXPECT_EQ(0, int(grid->constBaseTreePtr()->leafCount())); EXPECT_EQ(0, int(grid->constBaseTreePtr()->activeVoxelCount())); // Retrieve the source grid of the same name. GridBase::ConstPtr srcGrid = srcGridMap[grid->getName()]; // Compare grid types and transforms. EXPECT_EQ(srcGrid->type(), grid->type()); EXPECT_EQ(srcGrid->transform(), grid->transform()); // Compare metadata, ignoring fields that were added when the file was written. MetaMap::Ptr statsMetadata = grid->getStatsMetadata(), otherMetadata = grid->copyMeta(); // shallow copy EXPECT_TRUE(statsMetadata->metaCount() != 0); statsMetadata->insertMeta(GridBase::META_FILE_COMPRESSION, StringMetadata("")); for (MetaMap::ConstMetaIterator it = grid->beginMeta(), end = grid->endMeta(); it != end; ++it) { // Keep all fields that exist in the source grid. if ((*srcGrid)[it->first]) continue; // Remove any remaining grid statistics fields. if ((*statsMetadata)[it->first]) { otherMetadata->removeMeta(it->first); } // Remove delay load metadata if it exists. if ((*otherMetadata)["file_delayed_load"]) { otherMetadata->removeMeta("file_delayed_load"); } } EXPECT_EQ(srcGrid->str(), otherMetadata->str()); const CoordBBox srcBBox = srcGrid->evalActiveVoxelBoundingBox(); EXPECT_EQ(srcBBox.min().asVec3i(), grid->metaValue<Vec3i>("file_bbox_min")); EXPECT_EQ(srcBBox.max().asVec3i(), grid->metaValue<Vec3i>("file_bbox_max")); EXPECT_EQ(srcGrid->activeVoxelCount(), Index64(grid->metaValue<Int64>("file_voxel_count"))); EXPECT_EQ(srcGrid->memUsage(), Index64(grid->metaValue<Int64>("file_mem_bytes"))); } } } TEST_F(TestFile, testReadGrid) { using namespace openvdb; using FloatGrid = openvdb::FloatGrid; using IntGrid = openvdb::Int32Grid; using FloatTree = FloatGrid::TreeType; using IntTree = Int32Grid::TreeType; // Create a vdb to write. // Create grids IntGrid::Ptr grid = createGrid<IntGrid>(/*bg=*/1); IntTree& tree = grid->tree(); grid->setName("density"); FloatGrid::Ptr grid2 = createGrid<FloatGrid>(/*bg=*/2.0); FloatTree& tree2 = grid2->tree(); grid2->setName("temperature"); // Create transforms math::Transform::Ptr trans = math::Transform::createLinearTransform(0.1); math::Transform::Ptr trans2 = math::Transform::createLinearTransform(0.1); grid->setTransform(trans); grid2->setTransform(trans2); // Set some values tree.setValue(Coord(0, 0, 0), 5); tree.setValue(Coord(100, 0, 0), 6); tree2.setValue(Coord(0, 0, 0), 10); tree2.setValue(Coord(0, 100, 0), 11); MetaMap meta; meta.insertMeta("author", StringMetadata("Einstein")); meta.insertMeta("year", Int32Metadata(2009)); GridPtrVec grids; grids.push_back(grid); grids.push_back(grid2); // Register grid and transform. openvdb::initialize(); // Write the vdb out to a file. io::File vdbfile("something.vdb2"); vdbfile.write(grids, meta); io::File vdbfile2("something.vdb2"); vdbfile2.open(); EXPECT_TRUE(vdbfile2.isOpen()); // Get Temperature GridBase::Ptr temperature = vdbfile2.readGrid("temperature"); EXPECT_TRUE(temperature.get() != nullptr); FloatTree::Ptr typedTemperature = gridPtrCast<FloatGrid>(temperature)->treePtr(); EXPECT_TRUE(typedTemperature.get() != nullptr); EXPECT_NEAR(10, typedTemperature->getValue(Coord(0, 0, 0)), 0); EXPECT_NEAR(11, typedTemperature->getValue(Coord(0, 100, 0)), 0); // Get Density GridBase::Ptr density = vdbfile2.readGrid("density"); EXPECT_TRUE(density.get() != nullptr); IntTree::Ptr typedDensity = gridPtrCast<IntGrid>(density)->treePtr(); EXPECT_TRUE(typedDensity.get() != nullptr); EXPECT_NEAR(5,typedDensity->getValue(Coord(0, 0, 0)), /*tolerance=*/0); EXPECT_NEAR(6,typedDensity->getValue(Coord(100, 0, 0)), /*tolerance=*/0); // Clear registries. GridBase::clearRegistry(); Metadata::clearRegistry(); math::MapRegistry::clear(); vdbfile2.close(); remove("something.vdb2"); } //////////////////////////////////////// template<typename GridT> void validateClippedGrid(const GridT& clipped, const typename GridT::ValueType& fg) { using namespace openvdb; using ValueT = typename GridT::ValueType; const CoordBBox bbox = clipped.evalActiveVoxelBoundingBox(); EXPECT_EQ(4, bbox.min().x()); EXPECT_EQ(4, bbox.min().y()); EXPECT_EQ(-6, bbox.min().z()); EXPECT_EQ(4, bbox.max().x()); EXPECT_EQ(4, bbox.max().y()); EXPECT_EQ(6, bbox.max().z()); EXPECT_EQ(6 + 6 + 1, int(clipped.activeVoxelCount())); EXPECT_EQ(2, int(clipped.constTree().leafCount())); typename GridT::ConstAccessor acc = clipped.getConstAccessor(); const ValueT bg = clipped.background(); Coord xyz; int &x = xyz[0], &y = xyz[1], &z = xyz[2]; for (x = -10; x <= 10; ++x) { for (y = -10; y <= 10; ++y) { for (z = -10; z <= 10; ++z) { if (x == 4 && y == 4 && z >= -6 && z <= 6) { EXPECT_EQ(fg, acc.getValue(Coord(4, 4, z))); } else { EXPECT_EQ(bg, acc.getValue(Coord(x, y, z))); } } } } } // See also TestGrid::testClipping() TEST_F(TestFile, testReadClippedGrid) { using namespace openvdb; // Register types. openvdb::initialize(); // World-space clipping region const BBoxd clipBox(Vec3d(4.0, 4.0, -6.0), Vec3d(4.9, 4.9, 6.0)); // Create grids of several types and fill a cubic region of each with a foreground value. const bool bfg = true; BoolGrid::Ptr bgrid = BoolGrid::create(/*bg=*/zeroVal<bool>()); bgrid->setName("bgrid"); bgrid->fill(CoordBBox(Coord(-10), Coord(10)), /*value=*/bfg, /*active=*/true); const float ffg = 5.f; FloatGrid::Ptr fgrid = FloatGrid::create(/*bg=*/zeroVal<float>()); fgrid->setName("fgrid"); fgrid->fill(CoordBBox(Coord(-10), Coord(10)), /*value=*/ffg, /*active=*/true); const Vec3s vfg(1.f, -2.f, 3.f); Vec3SGrid::Ptr vgrid = Vec3SGrid::create(/*bg=*/zeroVal<Vec3s>()); vgrid->setName("vgrid"); vgrid->fill(CoordBBox(Coord(-10), Coord(10)), /*value=*/vfg, /*active=*/true); GridPtrVec srcGrids; srcGrids.push_back(bgrid); srcGrids.push_back(fgrid); srcGrids.push_back(vgrid); const char* filename = "testReadClippedGrid.vdb"; SharedPtr<const char> scopedFile(filename, ::remove); enum { OUTPUT_TO_FILE = 0, OUTPUT_TO_STREAM = 1 }; for (int outputMethod = OUTPUT_TO_FILE; outputMethod <= OUTPUT_TO_STREAM; ++outputMethod) { if (outputMethod == OUTPUT_TO_FILE) { // Write the grids to a file. io::File vdbfile(filename); vdbfile.write(srcGrids); } else { // Stream the grids to a file (i.e., without file offsets). std::ofstream ostrm(filename, std::ios_base::binary); io::Stream(ostrm).write(srcGrids); } // Open the file for reading. io::File vdbfile(filename); vdbfile.open(); GridBase::Ptr grid; // Read and clip each grid. EXPECT_NO_THROW(grid = vdbfile.readGrid("bgrid", clipBox)); EXPECT_TRUE(grid.get() != nullptr); EXPECT_NO_THROW(bgrid = gridPtrCast<BoolGrid>(grid)); validateClippedGrid(*bgrid, bfg); EXPECT_NO_THROW(grid = vdbfile.readGrid("fgrid", clipBox)); EXPECT_TRUE(grid.get() != nullptr); EXPECT_NO_THROW(fgrid = gridPtrCast<FloatGrid>(grid)); validateClippedGrid(*fgrid, ffg); EXPECT_NO_THROW(grid = vdbfile.readGrid("vgrid", clipBox)); EXPECT_TRUE(grid.get() != nullptr); EXPECT_NO_THROW(vgrid = gridPtrCast<Vec3SGrid>(grid)); validateClippedGrid(*vgrid, vfg); } } //////////////////////////////////////// namespace { template<typename T, openvdb::Index Log2Dim> struct MultiPassLeafNode; // forward declaration // Dummy value type using MultiPassValue = openvdb::PointIndex<openvdb::Index32, 1000>; // Tree configured to match the default OpenVDB configuration using MultiPassTree = openvdb::tree::Tree< openvdb::tree::RootNode< openvdb::tree::InternalNode< openvdb::tree::InternalNode< MultiPassLeafNode<MultiPassValue, 3>, 4>, 5>>>; using MultiPassGrid = openvdb::Grid<MultiPassTree>; template<typename T, openvdb::Index Log2Dim> struct MultiPassLeafNode: public openvdb::tree::LeafNode<T, Log2Dim>, openvdb::io::MultiPass { // The following had to be copied from the LeafNode class // to make the derived class compatible with the tree structure. using LeafNodeType = MultiPassLeafNode; using Ptr = openvdb::SharedPtr<MultiPassLeafNode>; using BaseLeaf = openvdb::tree::LeafNode<T, Log2Dim>; using NodeMaskType = openvdb::util::NodeMask<Log2Dim>; using ValueType = T; using ValueOnCIter = typename BaseLeaf::template ValueIter<typename NodeMaskType::OnIterator, const MultiPassLeafNode, const ValueType, typename BaseLeaf::ValueOn>; using ChildOnIter = typename BaseLeaf::template ChildIter<typename NodeMaskType::OnIterator, MultiPassLeafNode, typename BaseLeaf::ChildOn>; using ChildOnCIter = typename BaseLeaf::template ChildIter< typename NodeMaskType::OnIterator, const MultiPassLeafNode, typename BaseLeaf::ChildOn>; MultiPassLeafNode(const openvdb::Coord& coords, const T& value, bool active = false) : BaseLeaf(coords, value, active) {} MultiPassLeafNode(openvdb::PartialCreate, const openvdb::Coord& coords, const T& value, bool active = false): BaseLeaf(openvdb::PartialCreate(), coords, value, active) {} MultiPassLeafNode(const MultiPassLeafNode& rhs): BaseLeaf(rhs) {} ValueOnCIter cbeginValueOn() const { return ValueOnCIter(this->getValueMask().beginOn(),this); } ChildOnCIter cbeginChildOn() const { return ChildOnCIter(this->getValueMask().endOn(), this); } ChildOnIter beginChildOn() { return ChildOnIter(this->getValueMask().endOn(), this); } // Methods in use for reading and writing multiple buffers void readBuffers(std::istream& is, const openvdb::CoordBBox&, bool fromHalf = false) { this->readBuffers(is, fromHalf); } void readBuffers(std::istream& is, bool /*fromHalf*/ = false) { const openvdb::io::StreamMetadata::Ptr meta = openvdb::io::getStreamMetadataPtr(is); if (!meta) { OPENVDB_THROW(openvdb::IoError, "Cannot write out a MultiBufferLeaf without StreamMetadata."); } // clamp pass to 16-bit integer const uint32_t pass(static_cast<uint16_t>(meta->pass())); // Read in the stored pass number. uint32_t readPass; is.read(reinterpret_cast<char*>(&readPass), sizeof(uint32_t)); EXPECT_EQ(pass, readPass); // Record the pass number. mReadPasses.push_back(readPass); if (pass == 0) { // Read in the node's origin. openvdb::Coord origin; is.read(reinterpret_cast<char*>(&origin), sizeof(openvdb::Coord)); EXPECT_EQ(origin, this->origin()); } } void writeBuffers(std::ostream& os, bool /*toHalf*/ = false) const { const openvdb::io::StreamMetadata::Ptr meta = openvdb::io::getStreamMetadataPtr(os); if (!meta) { OPENVDB_THROW(openvdb::IoError, "Cannot read in a MultiBufferLeaf without StreamMetadata."); } // clamp pass to 16-bit integer const uint32_t pass(static_cast<uint16_t>(meta->pass())); // Leaf traversal analysis deduces the number of passes to perform for this leaf // then updates the leaf traversal value to ensure all passes will be written. if (meta->countingPasses()) { if (mNumPasses > pass) meta->setPass(mNumPasses); return; } // Record the pass number. EXPECT_TRUE(mWritePassesPtr); const_cast<std::vector<int>&>(*mWritePassesPtr).push_back(pass); // Write out the pass number. os.write(reinterpret_cast<const char*>(&pass), sizeof(uint32_t)); if (pass == 0) { // Write out the node's origin and the pass number. const auto origin = this->origin(); os.write(reinterpret_cast<const char*>(&origin), sizeof(openvdb::Coord)); } } uint32_t mNumPasses = 0; // Pointer to external vector in which to record passes as they are written std::vector<int>* mWritePassesPtr = nullptr; // Vector in which to record passes as they are read // (this needs to be internal, because leaf nodes are constructed as a grid is read) std::vector<int> mReadPasses; }; // struct MultiPassLeafNode } // anonymous namespace TEST_F(TestFile, testMultiPassIO) { using namespace openvdb; openvdb::initialize(); MultiPassGrid::registerGrid(); // Create a multi-buffer grid. const MultiPassGrid::Ptr grid = openvdb::createGrid<MultiPassGrid>(); grid->setName("test"); grid->setTransform(math::Transform::createLinearTransform(1.0)); MultiPassGrid::TreeType& tree = grid->tree(); tree.setValue(Coord(0, 0, 0), 5); tree.setValue(Coord(0, 10, 0), 5); EXPECT_EQ(2, int(tree.leafCount())); const GridPtrVec grids{grid}; // Vector in which to record pass numbers (to ensure blocked ordering) std::vector<int> writePasses; { // Specify the required number of I/O passes for each leaf node. MultiPassGrid::TreeType::LeafIter leafIter = tree.beginLeaf(); leafIter->mNumPasses = 3; leafIter->mWritePassesPtr = &writePasses; ++leafIter; leafIter->mNumPasses = 2; leafIter->mWritePassesPtr = &writePasses; } const char* filename = "testMultiPassIO.vdb"; SharedPtr<const char> scopedFile(filename, ::remove); { // Verify that passes are written to a file in the correct order. io::File(filename).write(grids); EXPECT_EQ(6, int(writePasses.size())); EXPECT_EQ(0, writePasses[0]); // leaf 0 EXPECT_EQ(0, writePasses[1]); // leaf 1 EXPECT_EQ(1, writePasses[2]); // leaf 0 EXPECT_EQ(1, writePasses[3]); // leaf 1 EXPECT_EQ(2, writePasses[4]); // leaf 0 EXPECT_EQ(2, writePasses[5]); // leaf 1 } { // Verify that passes are read in the correct order. io::File file(filename); file.open(); const auto newGrid = GridBase::grid<MultiPassGrid>(file.readGrid("test")); auto leafIter = newGrid->tree().beginLeaf(); EXPECT_EQ(3, int(leafIter->mReadPasses.size())); EXPECT_EQ(0, leafIter->mReadPasses[0]); EXPECT_EQ(1, leafIter->mReadPasses[1]); EXPECT_EQ(2, leafIter->mReadPasses[2]); ++leafIter; EXPECT_EQ(3, int(leafIter->mReadPasses.size())); EXPECT_EQ(0, leafIter->mReadPasses[0]); EXPECT_EQ(1, leafIter->mReadPasses[1]); EXPECT_EQ(2, leafIter->mReadPasses[2]); } { // Verify that when using multi-pass and bbox clipping that each leaf node // is still being read before being clipped io::File file(filename); file.open(); const auto newGrid = GridBase::grid<MultiPassGrid>( file.readGrid("test", BBoxd(Vec3d(0), Vec3d(1)))); EXPECT_EQ(Index32(1), newGrid->tree().leafCount()); auto leafIter = newGrid->tree().beginLeaf(); EXPECT_EQ(3, int(leafIter->mReadPasses.size())); EXPECT_EQ(0, leafIter->mReadPasses[0]); EXPECT_EQ(1, leafIter->mReadPasses[1]); EXPECT_EQ(2, leafIter->mReadPasses[2]); ++leafIter; EXPECT_TRUE(!leafIter); // second leaf node has now been clipped } // Clear the pass data. writePasses.clear(); { // Verify that passes are written to and read from a non-seekable stream // in the correct order. std::ostringstream ostr(std::ios_base::binary); io::Stream(ostr).write(grids); EXPECT_EQ(6, int(writePasses.size())); EXPECT_EQ(0, writePasses[0]); // leaf 0 EXPECT_EQ(0, writePasses[1]); // leaf 1 EXPECT_EQ(1, writePasses[2]); // leaf 0 EXPECT_EQ(1, writePasses[3]); // leaf 1 EXPECT_EQ(2, writePasses[4]); // leaf 0 EXPECT_EQ(2, writePasses[5]); // leaf 1 std::istringstream is(ostr.str(), std::ios_base::binary); io::Stream strm(is); const auto streamedGrids = strm.getGrids(); EXPECT_EQ(1, int(streamedGrids->size())); const auto newGrid = gridPtrCast<MultiPassGrid>(*streamedGrids->begin()); EXPECT_TRUE(bool(newGrid)); auto leafIter = newGrid->tree().beginLeaf(); EXPECT_EQ(3, int(leafIter->mReadPasses.size())); EXPECT_EQ(0, leafIter->mReadPasses[0]); EXPECT_EQ(1, leafIter->mReadPasses[1]); EXPECT_EQ(2, leafIter->mReadPasses[2]); ++leafIter; EXPECT_EQ(3, int(leafIter->mReadPasses.size())); EXPECT_EQ(0, leafIter->mReadPasses[0]); EXPECT_EQ(1, leafIter->mReadPasses[1]); EXPECT_EQ(2, leafIter->mReadPasses[2]); } } //////////////////////////////////////// TEST_F(TestFile, testHasGrid) { using namespace openvdb; using namespace openvdb::io; using FloatGrid = openvdb::FloatGrid; using IntGrid = openvdb::Int32Grid; using FloatTree = FloatGrid::TreeType; using IntTree = Int32Grid::TreeType; // Create a vdb to write. // Create grids IntGrid::Ptr grid = createGrid<IntGrid>(/*bg=*/1); IntTree& tree = grid->tree(); grid->setName("density"); FloatGrid::Ptr grid2 = createGrid<FloatGrid>(/*bg=*/2.0); FloatTree& tree2 = grid2->tree(); grid2->setName("temperature"); // Create transforms math::Transform::Ptr trans = math::Transform::createLinearTransform(0.1); math::Transform::Ptr trans2 = math::Transform::createLinearTransform(0.1); grid->setTransform(trans); grid2->setTransform(trans2); // Set some values tree.setValue(Coord(0, 0, 0), 5); tree.setValue(Coord(100, 0, 0), 6); tree2.setValue(Coord(0, 0, 0), 10); tree2.setValue(Coord(0, 100, 0), 11); MetaMap meta; meta.insertMeta("author", StringMetadata("Einstein")); meta.insertMeta("year", Int32Metadata(2009)); GridPtrVec grids; grids.push_back(grid); grids.push_back(grid2); // Register grid and transform. GridBase::clearRegistry(); IntGrid::registerGrid(); FloatGrid::registerGrid(); Metadata::clearRegistry(); StringMetadata::registerType(); Int32Metadata::registerType(); // register maps math::MapRegistry::clear(); math::AffineMap::registerMap(); math::ScaleMap::registerMap(); math::UniformScaleMap::registerMap(); math::TranslationMap::registerMap(); math::ScaleTranslateMap::registerMap(); math::UniformScaleTranslateMap::registerMap(); math::NonlinearFrustumMap::registerMap(); // Write the vdb out to a file. io::File vdbfile("something.vdb2"); vdbfile.write(grids, meta); io::File vdbfile2("something.vdb2"); EXPECT_THROW(vdbfile2.hasGrid("density"), openvdb::IoError); vdbfile2.open(); EXPECT_TRUE(vdbfile2.hasGrid("density")); EXPECT_TRUE(vdbfile2.hasGrid("temperature")); EXPECT_TRUE(!vdbfile2.hasGrid("Temperature")); EXPECT_TRUE(!vdbfile2.hasGrid("densitY")); // Clear registries. GridBase::clearRegistry(); Metadata::clearRegistry(); math::MapRegistry::clear(); vdbfile2.close(); remove("something.vdb2"); } TEST_F(TestFile, testNameIterator) { using namespace openvdb; using namespace openvdb::io; using FloatGrid = openvdb::FloatGrid; using FloatTree = FloatGrid::TreeType; using IntTree = Int32Grid::TreeType; // Create trees. IntTree::Ptr itree(new IntTree(1)); itree->setValue(Coord(0, 0, 0), 5); itree->setValue(Coord(100, 0, 0), 6); FloatTree::Ptr ftree(new FloatTree(2.0)); ftree->setValue(Coord(0, 0, 0), 10.0); ftree->setValue(Coord(0, 100, 0), 11.0); // Create grids. GridPtrVec grids; GridBase::Ptr grid = createGrid(itree); grid->setName("density"); grids.push_back(grid); grid = createGrid(ftree); grid->setName("temperature"); grids.push_back(grid); // Create two unnamed grids. grids.push_back(createGrid(ftree)); grids.push_back(createGrid(ftree)); // Create two grids with the same name. grid = createGrid(ftree); grid->setName("level_set"); grids.push_back(grid); grid = createGrid(ftree); grid->setName("level_set"); grids.push_back(grid); // Register types. openvdb::initialize(); const char* filename = "testNameIterator.vdb2"; SharedPtr<const char> scopedFile(filename, ::remove); // Write the grids out to a file. { io::File vdbfile(filename); vdbfile.write(grids); } io::File vdbfile(filename); // Verify that name iteration fails if the file is not open. EXPECT_THROW(vdbfile.beginName(), openvdb::IoError); vdbfile.open(); // Names should appear in lexicographic order. Name names[6] = { "[0]", "[1]", "density", "level_set[0]", "level_set[1]", "temperature" }; int count = 0; for (io::File::NameIterator iter = vdbfile.beginName(); iter != vdbfile.endName(); ++iter) { EXPECT_EQ(names[count], *iter); EXPECT_EQ(names[count], iter.gridName()); ++count; grid = vdbfile.readGrid(*iter); EXPECT_TRUE(grid); } EXPECT_EQ(6, count); vdbfile.close(); } TEST_F(TestFile, testReadOldFileFormat) { /// @todo Save some old-format (prior to OPENVDB_FILE_VERSION) .vdb2 files /// to /work/rd/fx_tools/vdb_unittest/TestFile::testReadOldFileFormat/ /// Verify that the files can still be read correctly. } TEST_F(TestFile, testCompression) { using namespace openvdb; using namespace openvdb::io; using IntGrid = openvdb::Int32Grid; // Register types. openvdb::initialize(); // Create reference grids. IntGrid::Ptr intGrid = IntGrid::create(/*background=*/0); intGrid->fill(CoordBBox(Coord(0), Coord(49)), /*value=*/999, /*active=*/true); intGrid->fill(CoordBBox(Coord(6), Coord(43)), /*value=*/0, /*active=*/false); intGrid->fill(CoordBBox(Coord(21), Coord(22)), /*value=*/1, /*active=*/false); intGrid->fill(CoordBBox(Coord(23), Coord(24)), /*value=*/2, /*active=*/false); EXPECT_EQ(8, int(IntGrid::TreeType::LeafNodeType::DIM)); FloatGrid::Ptr lsGrid = createLevelSet<FloatGrid>(); unittest_util::makeSphere(/*dim=*/Coord(100), /*ctr=*/Vec3f(50, 50, 50), /*r=*/20.0, *lsGrid, unittest_util::SPHERE_SPARSE_NARROW_BAND); EXPECT_EQ(int(GRID_LEVEL_SET), int(lsGrid->getGridClass())); FloatGrid::Ptr fogGrid = lsGrid->deepCopy(); tools::sdfToFogVolume(*fogGrid); EXPECT_EQ(int(GRID_FOG_VOLUME), int(fogGrid->getGridClass())); GridPtrVec grids; grids.push_back(intGrid); grids.push_back(lsGrid); grids.push_back(fogGrid); const char* filename = "testCompression.vdb2"; SharedPtr<const char> scopedFile(filename, ::remove); size_t uncompressedSize = 0; { // Write the grids out to a file with compression disabled. io::File vdbfile(filename); vdbfile.setCompression(io::COMPRESS_NONE); vdbfile.write(grids); vdbfile.close(); // Get the size of the file in bytes. struct stat buf; buf.st_size = 0; EXPECT_EQ(0, ::stat(filename, &buf)); uncompressedSize = buf.st_size; } // Write the grids out with various combinations of compression options // and verify that they can be read back successfully. // See io/Compression.h for the flag values. #ifdef OPENVDB_USE_BLOSC #ifdef OPENVDB_USE_ZLIB std::vector<uint32_t> validFlags{0x0,0x1,0x2,0x3,0x4,0x6}; #else std::vector<uint32_t> validFlags{0x0,0x2,0x4,0x6}; #endif #else #ifdef OPENVDB_USE_ZLIB std::vector<uint32_t> validFlags{0x0,0x1,0x2,0x3}; #else std::vector<uint32_t> validFlags{0x0,0x2}; #endif #endif for (uint32_t flags : validFlags) { if (flags != io::COMPRESS_NONE) { io::File vdbfile(filename); vdbfile.setCompression(flags); vdbfile.write(grids); vdbfile.close(); } if (flags != io::COMPRESS_NONE) { // Verify that the compressed file is significantly smaller than // the uncompressed file. size_t compressedSize = 0; struct stat buf; buf.st_size = 0; EXPECT_EQ(0, ::stat(filename, &buf)); compressedSize = buf.st_size; EXPECT_TRUE(compressedSize < size_t(0.75 * double(uncompressedSize))); } { // Verify that the grids can be read back successfully. io::File vdbfile(filename); vdbfile.open(); GridPtrVecPtr inGrids = vdbfile.getGrids(); EXPECT_EQ(3, int(inGrids->size())); // Verify that the original and input grids are equal. { const IntGrid::Ptr grid = gridPtrCast<IntGrid>((*inGrids)[0]); EXPECT_TRUE(grid.get() != nullptr); EXPECT_EQ(int(intGrid->getGridClass()), int(grid->getGridClass())); EXPECT_TRUE(grid->tree().hasSameTopology(intGrid->tree())); EXPECT_EQ( intGrid->tree().getValue(Coord(0)), grid->tree().getValue(Coord(0))); // Verify that leaf nodes with more than two distinct inactive values // are handled correctly (FX-7085). EXPECT_EQ( intGrid->tree().getValue(Coord(6)), grid->tree().getValue(Coord(6))); EXPECT_EQ( intGrid->tree().getValue(Coord(21)), grid->tree().getValue(Coord(21))); EXPECT_EQ( intGrid->tree().getValue(Coord(23)), grid->tree().getValue(Coord(23))); // Verify that the only active value in this grid is 999. Int32 minVal = -1, maxVal = -1; grid->evalMinMax(minVal, maxVal); EXPECT_EQ(999, minVal); EXPECT_EQ(999, maxVal); } for (int idx = 1; idx <= 2; ++idx) { const FloatGrid::Ptr grid = gridPtrCast<FloatGrid>((*inGrids)[idx]), refGrid = gridPtrCast<FloatGrid>(grids[idx]); EXPECT_TRUE(grid.get() != nullptr); EXPECT_EQ(int(refGrid->getGridClass()), int(grid->getGridClass())); EXPECT_TRUE(grid->tree().hasSameTopology(refGrid->tree())); FloatGrid::ConstAccessor refAcc = refGrid->getConstAccessor(); for (FloatGrid::ValueAllCIter it = grid->cbeginValueAll(); it; ++it) { EXPECT_EQ(refAcc.getValue(it.getCoord()), *it); } } } } } //////////////////////////////////////// namespace { using namespace openvdb; struct TestAsyncHelper { std::set<io::Queue::Id> ids; std::map<io::Queue::Id, std::string> filenames; size_t refFileSize; bool verbose; TestAsyncHelper(size_t _refFileSize): refFileSize(_refFileSize), verbose(false) {} ~TestAsyncHelper() { // Remove output files. for (std::map<io::Queue::Id, std::string>::iterator it = filenames.begin(); it != filenames.end(); ++it) { ::remove(it->second.c_str()); } filenames.clear(); ids.clear(); } io::Queue::Notifier notifier() { return std::bind(&TestAsyncHelper::validate, this, std::placeholders::_1, std::placeholders::_2); } void insert(io::Queue::Id id, const std::string& filename) { ids.insert(id); filenames[id] = filename; if (verbose) std::cerr << "queued " << filename << " as task " << id << "\n"; } void validate(io::Queue::Id id, io::Queue::Status status) { if (verbose) { std::ostringstream ostr; ostr << "task " << id; switch (status) { case io::Queue::UNKNOWN: ostr << " is unknown"; break; case io::Queue::PENDING: ostr << " is pending"; break; case io::Queue::SUCCEEDED: ostr << " succeeded"; break; case io::Queue::FAILED: ostr << " failed"; break; } std::cerr << ostr.str() << "\n"; } if (status == io::Queue::SUCCEEDED) { // If the task completed successfully, verify that the output file's // size matches the reference file's size. struct stat buf; buf.st_size = 0; EXPECT_EQ(0, ::stat(filenames[id].c_str(), &buf)); EXPECT_EQ(Index64(refFileSize), Index64(buf.st_size)); } if (status == io::Queue::SUCCEEDED || status == io::Queue::FAILED) { ids.erase(id); } } }; // struct TestAsyncHelper } // unnamed namespace TEST_F(TestFile, testAsync) { using namespace openvdb; // Register types. openvdb::initialize(); // Create a grid. FloatGrid::Ptr lsGrid = createLevelSet<FloatGrid>(); unittest_util::makeSphere(/*dim=*/Coord(100), /*ctr=*/Vec3f(50, 50, 50), /*r=*/20.0, *lsGrid, unittest_util::SPHERE_SPARSE_NARROW_BAND); MetaMap fileMetadata; fileMetadata.insertMeta("author", StringMetadata("Einstein")); fileMetadata.insertMeta("year", Int32Metadata(2013)); GridPtrVec grids; grids.push_back(lsGrid); grids.push_back(lsGrid->deepCopy()); grids.push_back(lsGrid->deepCopy()); size_t refFileSize = 0; { // Write a reference file without using asynchronous I/O. const char* filename = "testAsyncref.vdb"; SharedPtr<const char> scopedFile(filename, ::remove); io::File f(filename); f.write(grids, fileMetadata); // Record the size of the reference file. struct stat buf; buf.st_size = 0; EXPECT_EQ(0, ::stat(filename, &buf)); refFileSize = buf.st_size; } { // Output multiple files using asynchronous I/O. // Use polling to get the status of the I/O tasks. TestAsyncHelper helper(refFileSize); io::Queue queue; for (int i = 1; i < 10; ++i) { std::ostringstream ostr; ostr << "testAsync." << i << ".vdb"; const std::string filename = ostr.str(); io::Queue::Id id = queue.write(grids, io::File(filename), fileMetadata); helper.insert(id, filename); } tbb::tick_count start = tbb::tick_count::now(); while (!helper.ids.empty()) { if ((tbb::tick_count::now() - start).seconds() > 60) break; // time out after 1 minute // Wait one second for tasks to complete. tbb::this_tbb_thread::sleep(tbb::tick_count::interval_t(1.0/*sec*/)); // Poll each task in the pending map. std::set<io::Queue::Id> ids = helper.ids; // iterate over a copy for (std::set<io::Queue::Id>::iterator it = ids.begin(); it != ids.end(); ++it) { const io::Queue::Id id = *it; const io::Queue::Status status = queue.status(id); helper.validate(id, status); } } EXPECT_TRUE(helper.ids.empty()); EXPECT_TRUE(queue.empty()); } { // Output multiple files using asynchronous I/O. // Use notifications to get the status of the I/O tasks. TestAsyncHelper helper(refFileSize); io::Queue queue(/*capacity=*/2); queue.addNotifier(helper.notifier()); for (int i = 1; i < 10; ++i) { std::ostringstream ostr; ostr << "testAsync" << i << ".vdb"; const std::string filename = ostr.str(); io::Queue::Id id = queue.write(grids, io::File(filename), fileMetadata); helper.insert(id, filename); } while (!queue.empty()) { tbb::this_tbb_thread::sleep(tbb::tick_count::interval_t(1.0/*sec*/)); } } { // Test queue timeout. io::Queue queue(/*capacity=*/1); queue.setTimeout(0/*sec*/); SharedPtr<const char> scopedFile1("testAsyncIOa.vdb", ::remove), scopedFile2("testAsyncIOb.vdb", ::remove); std::ofstream file1(scopedFile1.get()), file2(scopedFile2.get()); queue.write(grids, io::Stream(file1)); // With the queue length restricted to 1 and the timeout to 0 seconds, // the next write() call should time out immediately with an exception. // (It is possible, though highly unlikely, for the previous task to complete // in time for this write() to actually succeed.) EXPECT_THROW(queue.write(grids, io::Stream(file2)), openvdb::RuntimeError); while (!queue.empty()) { tbb::this_tbb_thread::sleep(tbb::tick_count::interval_t(1.0/*sec*/)); } } } #ifdef OPENVDB_USE_BLOSC // This tests for a data corruption bug that existed in versions of Blosc prior to 1.5.0 // (see https://github.com/Blosc/c-blosc/pull/63). TEST_F(TestFile, testBlosc) { openvdb::initialize(); const unsigned char rawdata[] = { 0x93, 0xb0, 0x49, 0xaf, 0x62, 0xad, 0xe3, 0xaa, 0xe4, 0xa5, 0x43, 0x20, 0x24, 0x29, 0xc9, 0xaf, 0xee, 0xad, 0x0b, 0xac, 0x3d, 0xa8, 0x1f, 0x99, 0x53, 0x27, 0xb6, 0x2b, 0x16, 0xb0, 0x5f, 0xae, 0x89, 0xac, 0x51, 0xa9, 0xfc, 0xa1, 0xc9, 0x24, 0x59, 0x2a, 0x2f, 0x2d, 0xb4, 0xae, 0xeb, 0xac, 0x2f, 0xaa, 0xec, 0xa4, 0x53, 0x21, 0x31, 0x29, 0x8f, 0x2c, 0x8e, 0x2e, 0x31, 0xad, 0xd6, 0xaa, 0x6d, 0xa6, 0xad, 0x1b, 0x3e, 0x28, 0x0a, 0x2c, 0xfd, 0x2d, 0xf8, 0x2f, 0x45, 0xab, 0x81, 0xa7, 0x1f, 0x95, 0x02, 0x27, 0x3d, 0x2b, 0x85, 0x2d, 0x75, 0x2f, 0xb6, 0x30, 0x13, 0xa8, 0xb2, 0x9c, 0xf3, 0x25, 0x9c, 0x2a, 0x28, 0x2d, 0x0b, 0x2f, 0x7b, 0x30, 0x68, 0x9e, 0x51, 0x25, 0x31, 0x2a, 0xe6, 0x2c, 0xbc, 0x2e, 0x4e, 0x30, 0x5a, 0xb0, 0xe6, 0xae, 0x0e, 0xad, 0x59, 0xaa, 0x08, 0xa5, 0x89, 0x21, 0x59, 0x29, 0xb0, 0x2c, 0x57, 0xaf, 0x8c, 0xad, 0x6f, 0xab, 0x65, 0xa7, 0xd3, 0x12, 0xf5, 0x27, 0xeb, 0x2b, 0xf6, 0x2d, 0xee, 0xad, 0x27, 0xac, 0xab, 0xa8, 0xb1, 0x9f, 0xa2, 0x25, 0xaa, 0x2a, 0x4a, 0x2d, 0x47, 0x2f, 0x7b, 0xac, 0x6d, 0xa9, 0x45, 0xa3, 0x73, 0x23, 0x9d, 0x29, 0xb7, 0x2c, 0xa8, 0x2e, 0x51, 0x30, 0xf7, 0xa9, 0xec, 0xa4, 0x79, 0x20, 0xc5, 0x28, 0x3f, 0x2c, 0x24, 0x2e, 0x09, 0x30, 0xc8, 0xa5, 0xb1, 0x1c, 0x23, 0x28, 0xc3, 0x2b, 0xba, 0x2d, 0x9c, 0x2f, 0xc3, 0x30, 0x44, 0x18, 0x6e, 0x27, 0x3d, 0x2b, 0x6b, 0x2d, 0x40, 0x2f, 0x8f, 0x30, 0x02, 0x27, 0xed, 0x2a, 0x36, 0x2d, 0xfe, 0x2e, 0x68, 0x30, 0x66, 0xae, 0x9e, 0xac, 0x96, 0xa9, 0x7c, 0xa3, 0xa9, 0x23, 0xc5, 0x29, 0xd8, 0x2c, 0xd7, 0x2e, 0x0e, 0xad, 0x90, 0xaa, 0xe4, 0xa5, 0xf8, 0x1d, 0x82, 0x28, 0x2b, 0x2c, 0x1e, 0x2e, 0x0c, 0x30, 0x53, 0xab, 0x9c, 0xa7, 0xd4, 0x96, 0xe7, 0x26, 0x30, 0x2b, 0x7f, 0x2d, 0x6e, 0x2f, 0xb3, 0x30, 0x74, 0xa8, 0xb1, 0x9f, 0x36, 0x25, 0x3e, 0x2a, 0xfa, 0x2c, 0xdd, 0x2e, 0x65, 0x30, 0xfc, 0xa1, 0xe0, 0x23, 0x82, 0x29, 0x8f, 0x2c, 0x66, 0x2e, 0x23, 0x30, 0x2d, 0x22, 0xfb, 0x28, 0x3f, 0x2c, 0x0a, 0x2e, 0xde, 0x2f, 0xaa, 0x28, 0x0a, 0x2c, 0xc8, 0x2d, 0x8f, 0x2f, 0xb0, 0x30, 0xde, 0x2b, 0xa0, 0x2d, 0x5a, 0x2f, 0x8f, 0x30, 0x12, 0xac, 0x9d, 0xa8, 0x0f, 0xa0, 0x51, 0x25, 0x66, 0x2a, 0x1b, 0x2d, 0x0b, 0x2f, 0x82, 0x30, 0x7b, 0xa9, 0xea, 0xa3, 0x63, 0x22, 0x3f, 0x29, 0x7b, 0x2c, 0x60, 0x2e, 0x26, 0x30, 0x76, 0xa5, 0xf8, 0x1d, 0x4c, 0x28, 0xeb, 0x2b, 0xce, 0x2d, 0xb0, 0x2f, 0xd3, 0x12, 0x1d, 0x27, 0x15, 0x2b, 0x57, 0x2d, 0x2c, 0x2f, 0x85, 0x30, 0x0e, 0x26, 0x74, 0x2a, 0xfa, 0x2c, 0xc3, 0x2e, 0x4a, 0x30, 0x08, 0x2a, 0xb7, 0x2c, 0x74, 0x2e, 0x1d, 0x30, 0x8f, 0x2c, 0x3f, 0x2e, 0xf8, 0x2f, 0x24, 0x2e, 0xd0, 0x2f, 0xc3, 0x30, 0xdb, 0xa6, 0xd3, 0x0e, 0x38, 0x27, 0x3d, 0x2b, 0x78, 0x2d, 0x5a, 0x2f, 0xa3, 0x30, 0x68, 0x9e, 0x51, 0x25, 0x31, 0x2a, 0xe6, 0x2c, 0xbc, 0x2e, 0x4e, 0x30, 0xa9, 0x23, 0x59, 0x29, 0x6e, 0x2c, 0x38, 0x2e, 0x06, 0x30, 0xb8, 0x28, 0x10, 0x2c, 0xce, 0x2d, 0x95, 0x2f, 0xb3, 0x30, 0x9b, 0x2b, 0x7f, 0x2d, 0x39, 0x2f, 0x7f, 0x30, 0x4a, 0x2d, 0xf8, 0x2e, 0x58, 0x30, 0xd0, 0x2e, 0x3d, 0x30, 0x30, 0x30, 0x53, 0x21, 0xc5, 0x28, 0x24, 0x2c, 0xef, 0x2d, 0xc3, 0x2f, 0xda, 0x27, 0x58, 0x2b, 0x6b, 0x2d, 0x33, 0x2f, 0x82, 0x30, 0x9c, 0x2a, 0x00, 0x2d, 0xbc, 0x2e, 0x41, 0x30, 0xb0, 0x2c, 0x60, 0x2e, 0x0c, 0x30, 0x1e, 0x2e, 0xca, 0x2f, 0xc0, 0x30, 0x95, 0x2f, 0x9f, 0x30, 0x8c, 0x30, 0x23, 0x2a, 0xc4, 0x2c, 0x81, 0x2e, 0x23, 0x30, 0x5a, 0x2c, 0x0a, 0x2e, 0xc3, 0x2f, 0xc3, 0x30, 0xad, 0x2d, 0x5a, 0x2f, 0x88, 0x30, 0x0b, 0x2f, 0x5b, 0x30, 0x3a, 0x30, 0x7f, 0x2d, 0x2c, 0x2f, 0x72, 0x30, 0xc3, 0x2e, 0x37, 0x30, 0x09, 0x30, 0xb6, 0x30 }; const char* indata = reinterpret_cast<const char*>(rawdata); size_t inbytes = sizeof(rawdata); const int compbufbytes = int(inbytes + BLOSC_MAX_OVERHEAD), decompbufbytes = int(inbytes + BLOSC_MAX_OVERHEAD); std::unique_ptr<char[]> compresseddata(new char[compbufbytes]), outdata(new char[decompbufbytes]); for (int compcode = 0; compcode <= BLOSC_ZLIB; ++compcode) { char* compname = nullptr; #if BLOSC_VERSION_MAJOR > 1 || (BLOSC_VERSION_MAJOR == 1 && BLOSC_VERSION_MINOR >= 15) if (0 > blosc_compcode_to_compname(compcode, const_cast<const char**>(&compname))) #else if (0 > blosc_compcode_to_compname(compcode, &compname)) #endif continue; /// @todo This changes the compressor setting globally. if (blosc_set_compressor(compname) < 0) continue; for (int typesize = 1; typesize <= 4; ++typesize) { // Compress the data. ::memset(compresseddata.get(), 0, compbufbytes); int compressedbytes = blosc_compress( /*clevel=*/9, /*doshuffle=*/true, typesize, /*srcsize=*/inbytes, /*src=*/indata, /*dest=*/compresseddata.get(), /*destsize=*/compbufbytes); EXPECT_TRUE(compressedbytes > 0); // Decompress the data. ::memset(outdata.get(), 0, decompbufbytes); int outbytes = blosc_decompress( compresseddata.get(), outdata.get(), decompbufbytes); EXPECT_TRUE(outbytes > 0); EXPECT_EQ(int(inbytes), outbytes); // Compare original and decompressed data. int diff = 0; for (size_t i = 0; i < inbytes; ++i) { if (outdata[i] != indata[i]) ++diff; } if (diff > 0) { if (diff != 0) { FAIL() << "Your version of the Blosc library is most likely" " out of date; please install the latest version. " "(Earlier versions have a bug that can cause data corruption.)"; } return; } } } } #endif void TestFile::testDelayedLoadMetadata() { openvdb::initialize(); using namespace openvdb; io::File file("something.vdb2"); // Create a level set grid. auto lsGrid = createLevelSet<FloatGrid>(); lsGrid->setName("sphere"); unittest_util::makeSphere(/*dim=*/Coord(100), /*ctr=*/Vec3f(50, 50, 50), /*r=*/20.0, *lsGrid, unittest_util::SPHERE_SPARSE_NARROW_BAND); // Write the VDB to a string stream. std::ostringstream ostr(std::ios_base::binary); // Create the grid descriptor out of this grid. io::GridDescriptor gd(Name("sphere"), lsGrid->type()); // Write out the grid. file.writeGrid(gd, lsGrid, ostr, /*seekable=*/true); // Duplicate VDB string stream. std::ostringstream ostr2(std::ios_base::binary); { // Read back in, clip and write out again to verify metadata is rebuilt. std::istringstream istr(ostr.str(), std::ios_base::binary); io::setVersion(istr, file.libraryVersion(), file.fileVersion()); io::GridDescriptor gd2; GridBase::Ptr grid = gd2.read(istr); gd2.seekToGrid(istr); const BBoxd clipBbox(Vec3d(-10.0,-10.0,-10.0), Vec3d(10.0,10.0,10.0)); io::Archive::readGrid(grid, gd2, istr, clipBbox); // Verify clipping is working as expected. EXPECT_TRUE(grid->baseTreePtr()->leafCount() < lsGrid->tree().leafCount()); file.writeGrid(gd, grid, ostr2, /*seekable=*/true); } // Since the input is only a fragment of a VDB file (in particular, // it doesn't have a header), set the file format version number explicitly. // On read, the delayed load metadata for OpenVDB library versions less than 6.1 // should be removed to ensure correctness as it possible for the metadata to // have been treated as unknown and blindly copied over when read and re-written // using this library version resulting in out-of-sync metadata. // By default, DelayedLoadMetadata is dropped from the grid during read so // as not to be exposed to the user. { // read using current library version std::istringstream istr(ostr2.str(), std::ios_base::binary); io::setVersion(istr, file.libraryVersion(), file.fileVersion()); io::GridDescriptor gd2; GridBase::Ptr grid = gd2.read(istr); gd2.seekToGrid(istr); io::Archive::readGrid(grid, gd2, istr); EXPECT_TRUE(!((*grid)[GridBase::META_FILE_DELAYED_LOAD])); } // To test the version mechanism, a stream metadata object is created with // a non-zero test value and set on the input stream. This disables the // behaviour where the DelayedLoadMetadata is dropped from the grid. io::StreamMetadata::Ptr streamMetadata(new io::StreamMetadata); streamMetadata->__setTest(uint32_t(1)); { // read using current library version std::istringstream istr(ostr2.str(), std::ios_base::binary); io::setVersion(istr, file.libraryVersion(), file.fileVersion()); io::setStreamMetadataPtr(istr, streamMetadata, /*transfer=*/false); io::GridDescriptor gd2; GridBase::Ptr grid = gd2.read(istr); gd2.seekToGrid(istr); io::Archive::readGrid(grid, gd2, istr); EXPECT_TRUE(((*grid)[GridBase::META_FILE_DELAYED_LOAD])); } { // read using library version of 5.0 std::istringstream istr(ostr2.str(), std::ios_base::binary); io::setVersion(istr, VersionId(5,0), file.fileVersion()); io::setStreamMetadataPtr(istr, streamMetadata, /*transfer=*/false); io::GridDescriptor gd2; GridBase::Ptr grid = gd2.read(istr); gd2.seekToGrid(istr); io::Archive::readGrid(grid, gd2, istr); EXPECT_TRUE(!((*grid)[GridBase::META_FILE_DELAYED_LOAD])); } { // read using library version of 4.9 std::istringstream istr(ostr2.str(), std::ios_base::binary); io::setVersion(istr, VersionId(4,9), file.fileVersion()); io::setStreamMetadataPtr(istr, streamMetadata, /*transfer=*/false); io::GridDescriptor gd2; GridBase::Ptr grid = gd2.read(istr); gd2.seekToGrid(istr); io::Archive::readGrid(grid, gd2, istr); EXPECT_TRUE(!((*grid)[GridBase::META_FILE_DELAYED_LOAD])); } { // read using library version of 6.1 std::istringstream istr(ostr2.str(), std::ios_base::binary); io::setVersion(istr, VersionId(6,1), file.fileVersion()); io::setStreamMetadataPtr(istr, streamMetadata, /*transfer=*/false); io::GridDescriptor gd2; GridBase::Ptr grid = gd2.read(istr); gd2.seekToGrid(istr); io::Archive::readGrid(grid, gd2, istr); EXPECT_TRUE(!((*grid)[GridBase::META_FILE_DELAYED_LOAD])); } { // read using library version of 6.2 std::istringstream istr(ostr2.str(), std::ios_base::binary); io::setVersion(istr, VersionId(6,2), file.fileVersion()); io::setStreamMetadataPtr(istr, streamMetadata, /*transfer=*/false); io::GridDescriptor gd2; GridBase::Ptr grid = gd2.read(istr); gd2.seekToGrid(istr); io::Archive::readGrid(grid, gd2, istr); EXPECT_TRUE(((*grid)[GridBase::META_FILE_DELAYED_LOAD])); } remove("something.vdb2"); } TEST_F(TestFile, testDelayedLoadMetadata) { testDelayedLoadMetadata(); }
94,613
C++
34.382947
100
0.62206
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestLevelSetUtil.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <vector> #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/Exceptions.h> #include <openvdb/tools/LevelSetUtil.h> #include <openvdb/tools/MeshToVolume.h> // for createLevelSetBox() #include <openvdb/tools/Composite.h> // for csgDifference() class TestLevelSetUtil: public ::testing::Test { }; //////////////////////////////////////// TEST_F(TestLevelSetUtil, testSDFToFogVolume) { openvdb::FloatGrid::Ptr grid = openvdb::FloatGrid::create(10.0); grid->fill(openvdb::CoordBBox(openvdb::Coord(-100), openvdb::Coord(100)), 9.0); grid->fill(openvdb::CoordBBox(openvdb::Coord(-50), openvdb::Coord(50)), -9.0); openvdb::tools::sdfToFogVolume(*grid); EXPECT_TRUE(grid->background() < 1e-7); openvdb::FloatGrid::ValueOnIter iter = grid->beginValueOn(); for (; iter; ++iter) { EXPECT_TRUE(iter.getValue() > 0.0); EXPECT_TRUE(std::abs(iter.getValue() - 1.0) < 1e-7); } } TEST_F(TestLevelSetUtil, testSDFInteriorMask) { typedef openvdb::FloatGrid FloatGrid; typedef openvdb::BoolGrid BoolGrid; typedef openvdb::Vec3s Vec3s; typedef openvdb::math::BBox<Vec3s> BBoxs; typedef openvdb::math::Transform Transform; BBoxs bbox(Vec3s(0.0, 0.0, 0.0), Vec3s(1.0, 1.0, 1.0)); Transform::Ptr transform = Transform::createLinearTransform(0.1); FloatGrid::Ptr sdfGrid = openvdb::tools::createLevelSetBox<FloatGrid>(bbox, *transform); BoolGrid::Ptr maskGrid = openvdb::tools::sdfInteriorMask(*sdfGrid); // test inside coord value openvdb::Coord ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(0.5, 0.5, 0.5)); EXPECT_TRUE(maskGrid->tree().getValue(ijk) == true); // test outside coord value ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(1.5, 1.5, 1.5)); EXPECT_TRUE(maskGrid->tree().getValue(ijk) == false); } TEST_F(TestLevelSetUtil, testExtractEnclosedRegion) { typedef openvdb::FloatGrid FloatGrid; typedef openvdb::BoolGrid BoolGrid; typedef openvdb::Vec3s Vec3s; typedef openvdb::math::BBox<Vec3s> BBoxs; typedef openvdb::math::Transform Transform; BBoxs regionA(Vec3s(0.0f, 0.0f, 0.0f), Vec3s(3.0f, 3.0f, 3.0f)); BBoxs regionB(Vec3s(1.0f, 1.0f, 1.0f), Vec3s(2.0f, 2.0f, 2.0f)); Transform::Ptr transform = Transform::createLinearTransform(0.1); FloatGrid::Ptr sdfGrid = openvdb::tools::createLevelSetBox<FloatGrid>(regionA, *transform); FloatGrid::Ptr sdfGridB = openvdb::tools::createLevelSetBox<FloatGrid>(regionB, *transform); openvdb::tools::csgDifference(*sdfGrid, *sdfGridB); BoolGrid::Ptr maskGrid = openvdb::tools::extractEnclosedRegion(*sdfGrid); // test inside ls region coord value openvdb::Coord ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(1.5, 1.5, 1.5)); EXPECT_TRUE(maskGrid->tree().getValue(ijk) == true); // test outside coord value ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(3.5, 3.5, 3.5)); EXPECT_TRUE(maskGrid->tree().getValue(ijk) == false); } TEST_F(TestLevelSetUtil, testSegmentationTools) { typedef openvdb::FloatGrid FloatGrid; typedef openvdb::Vec3s Vec3s; typedef openvdb::math::BBox<Vec3s> BBoxs; typedef openvdb::math::Transform Transform; { // Test SDF segmentation // Create two sdf boxes with overlapping narrow-bands. BBoxs regionA(Vec3s(0.0f, 0.0f, 0.0f), Vec3s(2.0f, 2.0f, 2.0f)); BBoxs regionB(Vec3s(2.5f, 0.0f, 0.0f), Vec3s(4.3f, 2.0f, 2.0f)); Transform::Ptr transform = Transform::createLinearTransform(0.1); FloatGrid::Ptr sdfGrid = openvdb::tools::createLevelSetBox<FloatGrid>(regionA, *transform); FloatGrid::Ptr sdfGridB = openvdb::tools::createLevelSetBox<FloatGrid>(regionB, *transform); openvdb::tools::csgUnion(*sdfGrid, *sdfGridB); std::vector<FloatGrid::Ptr> segments; // This tool will not identify two separate segments when the narrow-bands overlap. openvdb::tools::segmentActiveVoxels(*sdfGrid, segments); EXPECT_TRUE(segments.size() == 1); segments.clear(); // This tool should properly identify two separate segments openvdb::tools::segmentSDF(*sdfGrid, segments); EXPECT_TRUE(segments.size() == 2); // test inside ls region coord value openvdb::Coord ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(1.5, 1.5, 1.5)); EXPECT_TRUE(segments[0]->tree().getValue(ijk) < 0.0f); // test outside coord value ijk = transform->worldToIndexNodeCentered(openvdb::Vec3d(3.5, 3.5, 3.5)); EXPECT_TRUE(segments[0]->tree().getValue(ijk) > 0.0f); } { // Test empty SDF grid FloatGrid::Ptr sdfGrid = openvdb::FloatGrid::create(/*background=*/10.2f); sdfGrid->setGridClass(openvdb::GRID_LEVEL_SET); std::vector<FloatGrid::Ptr> segments; openvdb::tools::segmentSDF(*sdfGrid, segments); EXPECT_EQ(size_t(1), segments.size()); EXPECT_EQ(openvdb::Index32(0), segments[0]->tree().leafCount()); EXPECT_EQ(10.2f, segments[0]->background()); } { // Test SDF grid with inactive leaf nodes BBoxs bbox(Vec3s(0.0, 0.0, 0.0), Vec3s(1.0, 1.0, 1.0)); Transform::Ptr transform = Transform::createLinearTransform(0.1); FloatGrid::Ptr sdfGrid = openvdb::tools::createLevelSetBox<FloatGrid>(bbox, *transform, /*halfwidth=*/5); EXPECT_TRUE(sdfGrid->tree().activeVoxelCount() > openvdb::Index64(0)); // make all active voxels inactive for (auto leaf = sdfGrid->tree().beginLeaf(); leaf; ++leaf) { for (auto iter = leaf->beginValueOn(); iter; ++iter) { leaf->setValueOff(iter.getCoord()); } } EXPECT_EQ(openvdb::Index64(0), sdfGrid->tree().activeVoxelCount()); std::vector<FloatGrid::Ptr> segments; openvdb::tools::segmentSDF(*sdfGrid, segments); EXPECT_EQ(size_t(1), segments.size()); EXPECT_EQ(openvdb::Index32(0), segments[0]->tree().leafCount()); EXPECT_EQ(sdfGrid->background(), segments[0]->background()); } { // Test fog volume with active tiles openvdb::FloatGrid::Ptr grid = openvdb::FloatGrid::create(0.0); grid->fill(openvdb::CoordBBox(openvdb::Coord(0), openvdb::Coord(50)), 1.0); grid->fill(openvdb::CoordBBox(openvdb::Coord(60), openvdb::Coord(100)), 1.0); EXPECT_TRUE(grid->tree().hasActiveTiles() == true); std::vector<FloatGrid::Ptr> segments; openvdb::tools::segmentActiveVoxels(*grid, segments); EXPECT_EQ(size_t(2), segments.size()); } { // Test an empty fog volume openvdb::FloatGrid::Ptr grid = openvdb::FloatGrid::create(/*background=*/3.1f); EXPECT_EQ(openvdb::Index32(0), grid->tree().leafCount()); std::vector<FloatGrid::Ptr> segments; openvdb::tools::segmentActiveVoxels(*grid, segments); // note that an empty volume should segment into an empty volume EXPECT_EQ(size_t(1), segments.size()); EXPECT_EQ(openvdb::Index32(0), segments[0]->tree().leafCount()); EXPECT_EQ(3.1f, segments[0]->background()); } { // Test fog volume with two inactive leaf nodes openvdb::FloatGrid::Ptr grid = openvdb::FloatGrid::create(0.0); grid->tree().touchLeaf(openvdb::Coord(0,0,0)); grid->tree().touchLeaf(openvdb::Coord(100,100,100)); EXPECT_EQ(openvdb::Index32(2), grid->tree().leafCount()); EXPECT_EQ(openvdb::Index64(0), grid->tree().activeVoxelCount()); std::vector<FloatGrid::Ptr> segments; openvdb::tools::segmentActiveVoxels(*grid, segments); EXPECT_EQ(size_t(1), segments.size()); EXPECT_EQ(openvdb::Index32(0), segments[0]->tree().leafCount()); } }
8,028
C++
34.684444
100
0.640259
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointIndexGrid.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/tools/PointIndexGrid.h> #include <vector> #include <algorithm> #include <cmath> #include "util.h" // for genPoints struct TestPointIndexGrid: public ::testing::Test { }; //////////////////////////////////////// namespace { class PointList { public: typedef openvdb::Vec3R PosType; PointList(const std::vector<PosType>& points) : mPoints(&points) { } size_t size() const { return mPoints->size(); } void getPos(size_t n, PosType& xyz) const { xyz = (*mPoints)[n]; } protected: std::vector<PosType> const * const mPoints; }; // PointList template<typename T> bool hasDuplicates(const std::vector<T>& items) { std::vector<T> vec(items); std::sort(vec.begin(), vec.end()); size_t duplicates = 0; for (size_t n = 1, N = vec.size(); n < N; ++n) { if (vec[n] == vec[n-1]) ++duplicates; } return duplicates != 0; } template<typename T> struct WeightedAverageAccumulator { typedef T ValueType; WeightedAverageAccumulator(T const * const array, const T radius) : mValues(array), mInvRadius(1.0/radius), mWeightSum(0.0), mValueSum(0.0) {} void reset() { mWeightSum = mValueSum = T(0.0); } void operator()(const T distSqr, const size_t pointIndex) { const T weight = T(1.0) - openvdb::math::Sqrt(distSqr) * mInvRadius; mWeightSum += weight; mValueSum += weight * mValues[pointIndex]; } T result() const { return mWeightSum > T(0.0) ? mValueSum / mWeightSum : T(0.0); } private: T const * const mValues; const T mInvRadius; T mWeightSum, mValueSum; }; // struct WeightedAverageAccumulator } // namespace //////////////////////////////////////// TEST_F(TestPointIndexGrid, testPointIndexGrid) { const float voxelSize = 0.01f; const openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(voxelSize); // generate points std::vector<openvdb::Vec3R> points; unittest_util::genPoints(40000, points); PointList pointList(points); // construct data structure typedef openvdb::tools::PointIndexGrid PointIndexGrid; PointIndexGrid::Ptr pointGridPtr = openvdb::tools::createPointIndexGrid<PointIndexGrid>(pointList, *transform); openvdb::CoordBBox bbox; pointGridPtr->tree().evalActiveVoxelBoundingBox(bbox); // coord bbox search typedef PointIndexGrid::ConstAccessor ConstAccessor; typedef openvdb::tools::PointIndexIterator<> PointIndexIterator; ConstAccessor acc = pointGridPtr->getConstAccessor(); PointIndexIterator it(bbox, acc); EXPECT_TRUE(it.test()); EXPECT_EQ(points.size(), it.size()); // fractional bbox search openvdb::BBoxd region(bbox.min().asVec3d(), bbox.max().asVec3d()); // points are bucketed in a cell-centered fashion, we need to pad the // coordinate range to get the same search region in the fractional bbox. region.expand(voxelSize * 0.5); it.searchAndUpdate(region, acc, pointList, *transform); EXPECT_TRUE(it.test()); EXPECT_EQ(points.size(), it.size()); { std::vector<uint32_t> vec; vec.reserve(it.size()); for (; it; ++it) { vec.push_back(*it); } EXPECT_EQ(vec.size(), it.size()); EXPECT_TRUE(!hasDuplicates(vec)); } // radial search openvdb::Vec3d center = region.getCenter(); double radius = region.extents().x() * 0.5; it.searchAndUpdate(center, radius, acc, pointList, *transform); EXPECT_TRUE(it.test()); EXPECT_EQ(points.size(), it.size()); { std::vector<uint32_t> vec; vec.reserve(it.size()); for (; it; ++it) { vec.push_back(*it); } EXPECT_EQ(vec.size(), it.size()); EXPECT_TRUE(!hasDuplicates(vec)); } center = region.min(); it.searchAndUpdate(center, radius, acc, pointList, *transform); EXPECT_TRUE(it.test()); { std::vector<uint32_t> vec; vec.reserve(it.size()); for (; it; ++it) { vec.push_back(*it); } EXPECT_EQ(vec.size(), it.size()); EXPECT_TRUE(!hasDuplicates(vec)); // check that no points where missed. std::vector<unsigned char> indexMask(points.size(), 0); for (size_t n = 0, N = vec.size(); n < N; ++n) { indexMask[vec[n]] = 1; } const double r2 = radius * radius; openvdb::Vec3R v; for (size_t n = 0, N = indexMask.size(); n < N; ++n) { v = center - transform->worldToIndex(points[n]); if (indexMask[n] == 0) { EXPECT_TRUE(!(v.lengthSqr() < r2)); } else { EXPECT_TRUE(v.lengthSqr() < r2); } } } // Check partitioning EXPECT_TRUE(openvdb::tools::isValidPartition(pointList, *pointGridPtr)); points[10000].x() += 1.5; // manually modify a few points. points[20000].x() += 1.5; points[30000].x() += 1.5; EXPECT_TRUE(!openvdb::tools::isValidPartition(pointList, *pointGridPtr)); PointIndexGrid::Ptr pointGrid2Ptr = openvdb::tools::getValidPointIndexGrid<PointIndexGrid>(pointList, pointGridPtr); EXPECT_TRUE(openvdb::tools::isValidPartition(pointList, *pointGrid2Ptr)); } TEST_F(TestPointIndexGrid, testPointIndexFilter) { // generate points const float voxelSize = 0.01f; const size_t pointCount = 10000; const openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(voxelSize); std::vector<openvdb::Vec3d> points; unittest_util::genPoints(pointCount, points); PointList pointList(points); // construct data structure typedef openvdb::tools::PointIndexGrid PointIndexGrid; PointIndexGrid::Ptr pointGridPtr = openvdb::tools::createPointIndexGrid<PointIndexGrid>(pointList, *transform); std::vector<double> pointDensity(pointCount, 1.0); openvdb::tools::PointIndexFilter<PointList> filter(pointList, pointGridPtr->tree(), pointGridPtr->transform()); const double radius = 3.0 * voxelSize; WeightedAverageAccumulator<double> accumulator(&pointDensity.front(), radius); double sum = 0.0; for (size_t n = 0, N = points.size(); n < N; ++n) { accumulator.reset(); filter.searchAndApply(points[n], radius, accumulator); sum += accumulator.result(); } EXPECT_NEAR(sum, double(points.size()), 1e-6); } TEST_F(TestPointIndexGrid, testWorldSpaceSearchAndUpdate) { // Create random particles in a cube. openvdb::math::Rand01<> rnd(0); const size_t N = 1000000; std::vector<openvdb::Vec3d> pos; pos.reserve(N); // Create a box to query points. openvdb::BBoxd wsBBox(openvdb::Vec3d(0.25), openvdb::Vec3d(0.75)); std::set<size_t> indexListA; for (size_t i = 0; i < N; ++i) { openvdb::Vec3d p(rnd(), rnd(), rnd()); pos.push_back(p); if (wsBBox.isInside(p)) { indexListA.insert(i); } } // Create a point index grid const double dx = 0.025; openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform(dx); PointList pointArray(pos); openvdb::tools::PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<openvdb::tools::PointIndexGrid, PointList>(pointArray, *transform); // Search for points within the box. openvdb::tools::PointIndexGrid::ConstAccessor acc = pointIndexGrid->getConstAccessor(); openvdb::tools::PointIndexIterator<openvdb::tools::PointIndexTree> pointIndexIter; pointIndexIter.worldSpaceSearchAndUpdate<PointList>(wsBBox, acc, pointArray, pointIndexGrid->transform()); std::set<size_t> indexListB; for (; pointIndexIter; ++pointIndexIter) { indexListB.insert(*pointIndexIter); } EXPECT_EQ(indexListA.size(), indexListB.size()); }
8,097
C++
25.638158
114
0.625046
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestTreeCombine.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Types.h> #include <openvdb/openvdb.h> #include <openvdb/tools/Composite.h> #include <openvdb/tools/LevelSetSphere.h> #include <openvdb/util/CpuTimer.h> #include "util.h" // for unittest_util::makeSphere() #include <algorithm> // for std::max() and std::min() #include <cmath> // for std::isnan() and std::isinf() #include <limits> // for std::numeric_limits #include <sstream> #include <string> #include <type_traits> #define TEST_CSG_VERBOSE 0 #if TEST_CSG_VERBOSE #include <openvdb/util/CpuTimer.h> #include <iostream> #endif namespace { using Float433Tree = openvdb::tree::Tree4<float, 4, 3, 3>::Type; using Float433Grid = openvdb::Grid<Float433Tree>; } class TestTreeCombine: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); Float433Grid::registerGrid(); } void TearDown() override { openvdb::uninitialize(); } protected: template<class TreeT, typename TreeComp, typename ValueComp> void testComp(const TreeComp&, const ValueComp&); template<class TreeT> void testCompRepl(); template<typename TreeT, typename VisitorT> typename TreeT::Ptr visitCsg(const TreeT& a, const TreeT& b, const TreeT& ref, const VisitorT&); }; //////////////////////////////////////// namespace { namespace Local { template<typename ValueT> struct OrderDependentCombineOp { OrderDependentCombineOp() {} void operator()(const ValueT& a, const ValueT& b, ValueT& result) const { result = a + ValueT(100) * b; // result is order-dependent on A and B } }; /// Test Tree::combine(), which takes a functor that accepts three arguments /// (the a, b and result values). template<typename TreeT> void combine(TreeT& a, TreeT& b) { a.combine(b, OrderDependentCombineOp<typename TreeT::ValueType>()); } /// Test Tree::combineExtended(), which takes a functor that accepts a single /// CombineArgs argument, in which the functor can return a computed active state /// for the output value. template<typename TreeT> void extendedCombine(TreeT& a, TreeT& b) { using ValueT = typename TreeT::ValueType; struct ArgsOp { static void order(openvdb::CombineArgs<ValueT>& args) { // The result is order-dependent on A and B. args.setResult(args.a() + ValueT(100) * args.b()); args.setResultIsActive(args.aIsActive() || args.bIsActive()); } }; a.combineExtended(b, ArgsOp::order); } template<typename TreeT> void compMax(TreeT& a, TreeT& b) { openvdb::tools::compMax(a, b); } template<typename TreeT> void compMin(TreeT& a, TreeT& b) { openvdb::tools::compMin(a, b); } template<typename TreeT> void compSum(TreeT& a, TreeT& b) { openvdb::tools::compSum(a, b); } template<typename TreeT> void compMul(TreeT& a, TreeT& b) { openvdb::tools::compMul(a, b); }\ template<typename TreeT> void compDiv(TreeT& a, TreeT& b) { openvdb::tools::compDiv(a, b); }\ inline float orderf(float a, float b) { return a + 100.0f * b; } inline float maxf(float a, float b) { return std::max(a, b); } inline float minf(float a, float b) { return std::min(a, b); } inline float sumf(float a, float b) { return a + b; } inline float mulf(float a, float b) { return a * b; } inline float divf(float a, float b) { return a / b; } inline openvdb::Vec3f orderv(const openvdb::Vec3f& a, const openvdb::Vec3f& b) { return a+100.0f*b; } inline openvdb::Vec3f maxv(const openvdb::Vec3f& a, const openvdb::Vec3f& b) { const float aMag = a.lengthSqr(), bMag = b.lengthSqr(); return (aMag > bMag ? a : (bMag > aMag ? b : std::max(a, b))); } inline openvdb::Vec3f minv(const openvdb::Vec3f& a, const openvdb::Vec3f& b) { const float aMag = a.lengthSqr(), bMag = b.lengthSqr(); return (aMag < bMag ? a : (bMag < aMag ? b : std::min(a, b))); } inline openvdb::Vec3f sumv(const openvdb::Vec3f& a, const openvdb::Vec3f& b) { return a + b; } inline openvdb::Vec3f mulv(const openvdb::Vec3f& a, const openvdb::Vec3f& b) { return a * b; } inline openvdb::Vec3f divv(const openvdb::Vec3f& a, const openvdb::Vec3f& b) { return a / b; } } // namespace Local } // unnamed namespace TEST_F(TestTreeCombine, testCombine) { testComp<openvdb::FloatTree>(Local::combine<openvdb::FloatTree>, Local::orderf); testComp<openvdb::VectorTree>(Local::combine<openvdb::VectorTree>, Local::orderv); testComp<openvdb::FloatTree>(Local::extendedCombine<openvdb::FloatTree>, Local::orderf); testComp<openvdb::VectorTree>(Local::extendedCombine<openvdb::VectorTree>, Local::orderv); } TEST_F(TestTreeCombine, testCompMax) { testComp<openvdb::FloatTree>(Local::compMax<openvdb::FloatTree>, Local::maxf); testComp<openvdb::VectorTree>(Local::compMax<openvdb::VectorTree>, Local::maxv); } TEST_F(TestTreeCombine, testCompMin) { testComp<openvdb::FloatTree>(Local::compMin<openvdb::FloatTree>, Local::minf); testComp<openvdb::VectorTree>(Local::compMin<openvdb::VectorTree>, Local::minv); } TEST_F(TestTreeCombine, testCompSum) { testComp<openvdb::FloatTree>(Local::compSum<openvdb::FloatTree>, Local::sumf); testComp<openvdb::VectorTree>(Local::compSum<openvdb::VectorTree>, Local::sumv); } TEST_F(TestTreeCombine, testCompProd) { testComp<openvdb::FloatTree>(Local::compMul<openvdb::FloatTree>, Local::mulf); testComp<openvdb::VectorTree>(Local::compMul<openvdb::VectorTree>, Local::mulv); } TEST_F(TestTreeCombine, testCompDiv) { testComp<openvdb::FloatTree>(Local::compDiv<openvdb::FloatTree>, Local::divf); testComp<openvdb::VectorTree>(Local::compDiv<openvdb::VectorTree>, Local::divv); } TEST_F(TestTreeCombine, testCompDivByZero) { const openvdb::Coord c0(0), c1(1), c2(2), c3(3), c4(4); // Verify that integer-valued grids behave well w.r.t. division by zero. { const openvdb::Int32 inf = std::numeric_limits<openvdb::Int32>::max(); openvdb::Int32Tree a(/*background=*/1), b(0); a.setValueOn(c0); a.setValueOn(c1); a.setValueOn(c2, -1); a.setValueOn(c3, -1); a.setValueOn(c4, 0); b.setValueOn(c1); b.setValueOn(c3); openvdb::tools::compDiv(a, b); EXPECT_EQ( inf, a.getValue(c0)); // 1 / 0 EXPECT_EQ( inf, a.getValue(c1)); // 1 / 0 EXPECT_EQ(-inf, a.getValue(c2)); // -1 / 0 EXPECT_EQ(-inf, a.getValue(c3)); // -1 / 0 EXPECT_EQ( 0, a.getValue(c4)); // 0 / 0 } { const openvdb::Index32 zero(0), inf = std::numeric_limits<openvdb::Index32>::max(); openvdb::UInt32Tree a(/*background=*/1), b(0); a.setValueOn(c0); a.setValueOn(c1); a.setValueOn(c2, zero); b.setValueOn(c1); openvdb::tools::compDiv(a, b); EXPECT_EQ( inf, a.getValue(c0)); // 1 / 0 EXPECT_EQ( inf, a.getValue(c1)); // 1 / 0 EXPECT_EQ(zero, a.getValue(c2)); // 0 / 0 } // Verify that non-integer-valued grids don't use integer division semantics. { openvdb::FloatTree a(/*background=*/1.0), b(0.0); a.setValueOn(c0); a.setValueOn(c1); a.setValueOn(c2, -1.0); a.setValueOn(c3, -1.0); a.setValueOn(c4, 0.0); b.setValueOn(c1); b.setValueOn(c3); openvdb::tools::compDiv(a, b); EXPECT_TRUE(std::isinf(a.getValue(c0))); // 1 / 0 EXPECT_TRUE(std::isinf(a.getValue(c1))); // 1 / 0 EXPECT_TRUE(std::isinf(a.getValue(c2))); // -1 / 0 EXPECT_TRUE(std::isinf(a.getValue(c3))); // -1 / 0 EXPECT_TRUE(std::isnan(a.getValue(c4))); // 0 / 0 } } TEST_F(TestTreeCombine, testCompReplace) { testCompRepl<openvdb::FloatTree>(); testCompRepl<openvdb::VectorTree>(); } template<typename TreeT, typename TreeComp, typename ValueComp> void TestTreeCombine::testComp(const TreeComp& comp, const ValueComp& op) { using ValueT = typename TreeT::ValueType; const ValueT zero = openvdb::zeroVal<ValueT>(), minusOne = zero + (-1), minusTwo = zero + (-2), one = zero + 1, three = zero + 3, four = zero + 4, five = zero + 5; { TreeT aTree(/*background=*/one); aTree.setValueOn(openvdb::Coord(0, 0, 0), three); aTree.setValueOn(openvdb::Coord(0, 0, 1), three); aTree.setValueOn(openvdb::Coord(0, 0, 2), aTree.background()); aTree.setValueOn(openvdb::Coord(0, 1, 2), aTree.background()); aTree.setValueOff(openvdb::Coord(1, 0, 0), three); aTree.setValueOff(openvdb::Coord(1, 0, 1), three); TreeT bTree(five); bTree.setValueOn(openvdb::Coord(0, 0, 0), minusOne); bTree.setValueOn(openvdb::Coord(0, 1, 0), four); bTree.setValueOn(openvdb::Coord(0, 1, 2), minusTwo); bTree.setValueOff(openvdb::Coord(1, 0, 0), minusOne); bTree.setValueOff(openvdb::Coord(1, 1, 0), four); // Call aTree.compMax(bTree), aTree.compSum(bTree), etc. comp(aTree, bTree); // a = 3 (On), b = -1 (On) EXPECT_EQ(op(three, minusOne), aTree.getValue(openvdb::Coord(0, 0, 0))); // a = 3 (On), b = 5 (bg) EXPECT_EQ(op(three, five), aTree.getValue(openvdb::Coord(0, 0, 1))); EXPECT_TRUE(aTree.isValueOn(openvdb::Coord(0, 0, 1))); // a = 1 (On, = bg), b = 5 (bg) EXPECT_EQ(op(one, five), aTree.getValue(openvdb::Coord(0, 0, 2))); EXPECT_TRUE(aTree.isValueOn(openvdb::Coord(0, 0, 2))); // a = 1 (On, = bg), b = -2 (On) EXPECT_EQ(op(one, minusTwo), aTree.getValue(openvdb::Coord(0, 1, 2))); EXPECT_TRUE(aTree.isValueOn(openvdb::Coord(0, 1, 2))); // a = 1 (bg), b = 4 (On) EXPECT_EQ(op(one, four), aTree.getValue(openvdb::Coord(0, 1, 0))); EXPECT_TRUE(aTree.isValueOn(openvdb::Coord(0, 1, 0))); // a = 3 (Off), b = -1 (Off) EXPECT_EQ(op(three, minusOne), aTree.getValue(openvdb::Coord(1, 0, 0))); EXPECT_TRUE(aTree.isValueOff(openvdb::Coord(1, 0, 0))); // a = 3 (Off), b = 5 (bg) EXPECT_EQ(op(three, five), aTree.getValue(openvdb::Coord(1, 0, 1))); EXPECT_TRUE(aTree.isValueOff(openvdb::Coord(1, 0, 1))); // a = 1 (bg), b = 4 (Off) EXPECT_EQ(op(one, four), aTree.getValue(openvdb::Coord(1, 1, 0))); EXPECT_TRUE(aTree.isValueOff(openvdb::Coord(1, 1, 0))); // a = 1 (bg), b = 5 (bg) EXPECT_EQ(op(one, five), aTree.getValue(openvdb::Coord(1000, 1, 2))); EXPECT_TRUE(aTree.isValueOff(openvdb::Coord(1000, 1, 2))); } // As above, but combining the A grid into the B grid { TreeT aTree(/*bg=*/one); aTree.setValueOn(openvdb::Coord(0, 0, 0), three); aTree.setValueOn(openvdb::Coord(0, 0, 1), three); aTree.setValueOn(openvdb::Coord(0, 0, 2), aTree.background()); aTree.setValueOn(openvdb::Coord(0, 1, 2), aTree.background()); aTree.setValueOff(openvdb::Coord(1, 0, 0), three); aTree.setValueOff(openvdb::Coord(1, 0, 1), three); TreeT bTree(five); bTree.setValueOn(openvdb::Coord(0, 0, 0), minusOne); bTree.setValueOn(openvdb::Coord(0, 1, 0), four); bTree.setValueOn(openvdb::Coord(0, 1, 2), minusTwo); bTree.setValueOff(openvdb::Coord(1, 0, 0), minusOne); bTree.setValueOff(openvdb::Coord(1, 1, 0), four); // Call bTree.compMax(aTree), bTree.compSum(aTree), etc. comp(bTree, aTree); // a = 3 (On), b = -1 (On) EXPECT_EQ(op(minusOne, three), bTree.getValue(openvdb::Coord(0, 0, 0))); // a = 3 (On), b = 5 (bg) EXPECT_EQ(op(five, three), bTree.getValue(openvdb::Coord(0, 0, 1))); EXPECT_TRUE(bTree.isValueOn(openvdb::Coord(0, 0, 1))); // a = 1 (On, = bg), b = 5 (bg) EXPECT_EQ(op(five, one), bTree.getValue(openvdb::Coord(0, 0, 2))); EXPECT_TRUE(bTree.isValueOn(openvdb::Coord(0, 0, 2))); // a = 1 (On, = bg), b = -2 (On) EXPECT_EQ(op(minusTwo, one), bTree.getValue(openvdb::Coord(0, 1, 2))); EXPECT_TRUE(bTree.isValueOn(openvdb::Coord(0, 1, 2))); // a = 1 (bg), b = 4 (On) EXPECT_EQ(op(four, one), bTree.getValue(openvdb::Coord(0, 1, 0))); EXPECT_TRUE(bTree.isValueOn(openvdb::Coord(0, 1, 0))); // a = 3 (Off), b = -1 (Off) EXPECT_EQ(op(minusOne, three), bTree.getValue(openvdb::Coord(1, 0, 0))); EXPECT_TRUE(bTree.isValueOff(openvdb::Coord(1, 0, 0))); // a = 3 (Off), b = 5 (bg) EXPECT_EQ(op(five, three), bTree.getValue(openvdb::Coord(1, 0, 1))); EXPECT_TRUE(bTree.isValueOff(openvdb::Coord(1, 0, 1))); // a = 1 (bg), b = 4 (Off) EXPECT_EQ(op(four, one), bTree.getValue(openvdb::Coord(1, 1, 0))); EXPECT_TRUE(bTree.isValueOff(openvdb::Coord(1, 1, 0))); // a = 1 (bg), b = 5 (bg) EXPECT_EQ(op(five, one), bTree.getValue(openvdb::Coord(1000, 1, 2))); EXPECT_TRUE(bTree.isValueOff(openvdb::Coord(1000, 1, 2))); } } //////////////////////////////////////// TEST_F(TestTreeCombine, testCombine2) { using openvdb::Coord; using openvdb::Vec3d; struct Local { static void floatAverage(const float& a, const float& b, float& result) { result = 0.5f * (a + b); } static void vec3dAverage(const Vec3d& a, const Vec3d& b, Vec3d& result) { result = 0.5 * (a + b); } static void vec3dFloatMultiply(const Vec3d& a, const float& b, Vec3d& result) { result = a * b; } static void vec3dBoolMultiply(const Vec3d& a, const bool& b, Vec3d& result) { result = a * b; } }; const Coord c0(0, 0, 0), c1(0, 0, 1), c2(0, 1, 0), c3(1, 0, 0), c4(1000, 1, 2); openvdb::FloatTree aFloatTree(/*bg=*/1.0), bFloatTree(5.0), outFloatTree(1.0); aFloatTree.setValue(c0, 3.0); aFloatTree.setValue(c1, 3.0); bFloatTree.setValue(c0, -1.0); bFloatTree.setValue(c2, 4.0); outFloatTree.combine2(aFloatTree, bFloatTree, Local::floatAverage); const float tolerance = 0.0; // Average of set value 3 and set value -1 EXPECT_NEAR(1.0, outFloatTree.getValue(c0), tolerance); // Average of set value 3 and bg value 5 EXPECT_NEAR(4.0, outFloatTree.getValue(c1), tolerance); // Average of bg value 1 and set value 4 EXPECT_NEAR(2.5, outFloatTree.getValue(c2), tolerance); // Average of bg value 1 and bg value 5 EXPECT_TRUE(outFloatTree.isValueOff(c3)); EXPECT_TRUE(outFloatTree.isValueOff(c4)); EXPECT_NEAR(3.0, outFloatTree.getValue(c3), tolerance); EXPECT_NEAR(3.0, outFloatTree.getValue(c4), tolerance); // As above, but combining vector grids: const Vec3d zero(0), one(1), two(2), three(3), four(4), five(5); openvdb::Vec3DTree aVecTree(/*bg=*/one), bVecTree(five), outVecTree(one); aVecTree.setValue(c0, three); aVecTree.setValue(c1, three); bVecTree.setValue(c0, -1.0 * one); bVecTree.setValue(c2, four); outVecTree.combine2(aVecTree, bVecTree, Local::vec3dAverage); // Average of set value 3 and set value -1 EXPECT_EQ(one, outVecTree.getValue(c0)); // Average of set value 3 and bg value 5 EXPECT_EQ(four, outVecTree.getValue(c1)); // Average of bg value 1 and set value 4 EXPECT_EQ(2.5 * one, outVecTree.getValue(c2)); // Average of bg value 1 and bg value 5 EXPECT_TRUE(outVecTree.isValueOff(c3)); EXPECT_TRUE(outVecTree.isValueOff(c4)); EXPECT_EQ(three, outVecTree.getValue(c3)); EXPECT_EQ(three, outVecTree.getValue(c4)); // Multiply the vector tree by the scalar tree. { openvdb::Vec3DTree vecTree(one); vecTree.combine2(outVecTree, outFloatTree, Local::vec3dFloatMultiply); // Product of set value (1, 1, 1) and set value 1 EXPECT_TRUE(vecTree.isValueOn(c0)); EXPECT_EQ(one, vecTree.getValue(c0)); // Product of set value (4, 4, 4) and set value 4 EXPECT_TRUE(vecTree.isValueOn(c1)); EXPECT_EQ(4 * 4 * one, vecTree.getValue(c1)); // Product of set value (2.5, 2.5, 2.5) and set value 2.5 EXPECT_TRUE(vecTree.isValueOn(c2)); EXPECT_EQ(2.5 * 2.5 * one, vecTree.getValue(c2)); // Product of bg value (3, 3, 3) and bg value 3 EXPECT_TRUE(vecTree.isValueOff(c3)); EXPECT_TRUE(vecTree.isValueOff(c4)); EXPECT_EQ(3 * 3 * one, vecTree.getValue(c3)); EXPECT_EQ(3 * 3 * one, vecTree.getValue(c4)); } // Multiply the vector tree by a boolean tree. { openvdb::BoolTree boolTree(0); boolTree.setValue(c0, true); boolTree.setValue(c1, false); boolTree.setValue(c2, true); openvdb::Vec3DTree vecTree(one); vecTree.combine2(outVecTree, boolTree, Local::vec3dBoolMultiply); // Product of set value (1, 1, 1) and set value 1 EXPECT_TRUE(vecTree.isValueOn(c0)); EXPECT_EQ(one, vecTree.getValue(c0)); // Product of set value (4, 4, 4) and set value 0 EXPECT_TRUE(vecTree.isValueOn(c1)); EXPECT_EQ(zero, vecTree.getValue(c1)); // Product of set value (2.5, 2.5, 2.5) and set value 1 EXPECT_TRUE(vecTree.isValueOn(c2)); EXPECT_EQ(2.5 * one, vecTree.getValue(c2)); // Product of bg value (3, 3, 3) and bg value 0 EXPECT_TRUE(vecTree.isValueOff(c3)); EXPECT_TRUE(vecTree.isValueOff(c4)); EXPECT_EQ(zero, vecTree.getValue(c3)); EXPECT_EQ(zero, vecTree.getValue(c4)); } // Verify that a vector tree can't be combined into a scalar tree // (although the reverse is allowed). { struct Local2 { static void f(const float& a, const Vec3d&, float& result) { result = a; } }; openvdb::FloatTree floatTree(5.0), outTree; openvdb::Vec3DTree vecTree(one); EXPECT_THROW(outTree.combine2(floatTree, vecTree, Local2::f), openvdb::TypeError); } } //////////////////////////////////////// TEST_F(TestTreeCombine, testBoolTree) { openvdb::BoolGrid::Ptr sphere = openvdb::BoolGrid::create(); unittest_util::makeSphere<openvdb::BoolGrid>(/*dim=*/openvdb::Coord(32), /*ctr=*/openvdb::Vec3f(0), /*radius=*/20.0, *sphere, unittest_util::SPHERE_SPARSE_NARROW_BAND); openvdb::BoolGrid::Ptr aGrid = sphere->copy(), bGrid = sphere->copy(); // CSG operations work only on level sets with a nonzero inside and outside values. EXPECT_THROW(openvdb::tools::csgUnion(aGrid->tree(), bGrid->tree()), openvdb::ValueError); EXPECT_THROW(openvdb::tools::csgIntersection(aGrid->tree(), bGrid->tree()), openvdb::ValueError); EXPECT_THROW(openvdb::tools::csgDifference(aGrid->tree(), bGrid->tree()), openvdb::ValueError); openvdb::tools::compSum(aGrid->tree(), bGrid->tree()); bGrid = sphere->copy(); openvdb::tools::compMax(aGrid->tree(), bGrid->tree()); int mismatches = 0; openvdb::BoolGrid::ConstAccessor acc = sphere->getConstAccessor(); for (openvdb::BoolGrid::ValueAllCIter it = aGrid->cbeginValueAll(); it; ++it) { if (*it != acc.getValue(it.getCoord())) ++mismatches; } EXPECT_EQ(0, mismatches); } //////////////////////////////////////// template<typename TreeT> void TestTreeCombine::testCompRepl() { using ValueT = typename TreeT::ValueType; const ValueT zero = openvdb::zeroVal<ValueT>(), minusOne = zero + (-1), one = zero + 1, three = zero + 3, four = zero + 4, five = zero + 5; { TreeT aTree(/*bg=*/one); aTree.setValueOn(openvdb::Coord(0, 0, 0), three); aTree.setValueOn(openvdb::Coord(0, 0, 1), three); aTree.setValueOn(openvdb::Coord(0, 0, 2), aTree.background()); aTree.setValueOn(openvdb::Coord(0, 1, 2), aTree.background()); aTree.setValueOff(openvdb::Coord(1, 0, 0), three); aTree.setValueOff(openvdb::Coord(1, 0, 1), three); TreeT bTree(five); bTree.setValueOn(openvdb::Coord(0, 0, 0), minusOne); bTree.setValueOn(openvdb::Coord(0, 1, 0), four); bTree.setValueOn(openvdb::Coord(0, 1, 2), minusOne); bTree.setValueOff(openvdb::Coord(1, 0, 0), minusOne); bTree.setValueOff(openvdb::Coord(1, 1, 0), four); // Copy active voxels of bTree into aTree. openvdb::tools::compReplace(aTree, bTree); // a = 3 (On), b = -1 (On) EXPECT_EQ(minusOne, aTree.getValue(openvdb::Coord(0, 0, 0))); // a = 3 (On), b = 5 (bg) EXPECT_EQ(three, aTree.getValue(openvdb::Coord(0, 0, 1))); EXPECT_TRUE(aTree.isValueOn(openvdb::Coord(0, 0, 1))); // a = 1 (On, = bg), b = 5 (bg) EXPECT_EQ(one, aTree.getValue(openvdb::Coord(0, 0, 2))); EXPECT_TRUE(aTree.isValueOn(openvdb::Coord(0, 0, 2))); // a = 1 (On, = bg), b = -1 (On) EXPECT_EQ(minusOne, aTree.getValue(openvdb::Coord(0, 1, 2))); EXPECT_TRUE(aTree.isValueOn(openvdb::Coord(0, 1, 2))); // a = 1 (bg), b = 4 (On) EXPECT_EQ(four, aTree.getValue(openvdb::Coord(0, 1, 0))); EXPECT_TRUE(aTree.isValueOn(openvdb::Coord(0, 1, 0))); // a = 3 (Off), b = -1 (Off) EXPECT_EQ(three, aTree.getValue(openvdb::Coord(1, 0, 0))); EXPECT_TRUE(aTree.isValueOff(openvdb::Coord(1, 0, 0))); // a = 3 (Off), b = 5 (bg) EXPECT_EQ(three, aTree.getValue(openvdb::Coord(1, 0, 1))); EXPECT_TRUE(aTree.isValueOff(openvdb::Coord(1, 0, 1))); // a = 1 (bg), b = 4 (Off) EXPECT_EQ(one, aTree.getValue(openvdb::Coord(1, 1, 0))); EXPECT_TRUE(aTree.isValueOff(openvdb::Coord(1, 1, 0))); // a = 1 (bg), b = 5 (bg) EXPECT_EQ(one, aTree.getValue(openvdb::Coord(1000, 1, 2))); EXPECT_TRUE(aTree.isValueOff(openvdb::Coord(1000, 1, 2))); } // As above, but combining the A grid into the B grid { TreeT aTree(/*background=*/one); aTree.setValueOn(openvdb::Coord(0, 0, 0), three); aTree.setValueOn(openvdb::Coord(0, 0, 1), three); aTree.setValueOn(openvdb::Coord(0, 0, 2), aTree.background()); aTree.setValueOn(openvdb::Coord(0, 1, 2), aTree.background()); aTree.setValueOff(openvdb::Coord(1, 0, 0), three); aTree.setValueOff(openvdb::Coord(1, 0, 1), three); TreeT bTree(five); bTree.setValueOn(openvdb::Coord(0, 0, 0), minusOne); bTree.setValueOn(openvdb::Coord(0, 1, 0), four); bTree.setValueOn(openvdb::Coord(0, 1, 2), minusOne); bTree.setValueOff(openvdb::Coord(1, 0, 0), minusOne); bTree.setValueOff(openvdb::Coord(1, 1, 0), four); // Copy active voxels of aTree into bTree. openvdb::tools::compReplace(bTree, aTree); // a = 3 (On), b = -1 (On) EXPECT_EQ(three, bTree.getValue(openvdb::Coord(0, 0, 0))); // a = 3 (On), b = 5 (bg) EXPECT_EQ(three, bTree.getValue(openvdb::Coord(0, 0, 1))); EXPECT_TRUE(bTree.isValueOn(openvdb::Coord(0, 0, 1))); // a = 1 (On, = bg), b = 5 (bg) EXPECT_EQ(one, bTree.getValue(openvdb::Coord(0, 0, 2))); EXPECT_TRUE(bTree.isValueOn(openvdb::Coord(0, 0, 2))); // a = 1 (On, = bg), b = -1 (On) EXPECT_EQ(one, bTree.getValue(openvdb::Coord(0, 1, 2))); EXPECT_TRUE(bTree.isValueOn(openvdb::Coord(0, 1, 2))); // a = 1 (bg), b = 4 (On) EXPECT_EQ(four, bTree.getValue(openvdb::Coord(0, 1, 0))); EXPECT_TRUE(bTree.isValueOn(openvdb::Coord(0, 1, 0))); // a = 3 (Off), b = -1 (Off) EXPECT_EQ(minusOne, bTree.getValue(openvdb::Coord(1, 0, 0))); EXPECT_TRUE(bTree.isValueOff(openvdb::Coord(1, 0, 0))); // a = 3 (Off), b = 5 (bg) EXPECT_EQ(five, bTree.getValue(openvdb::Coord(1, 0, 1))); EXPECT_TRUE(bTree.isValueOff(openvdb::Coord(1, 0, 1))); // a = 1 (bg), b = 4 (Off) EXPECT_EQ(four, bTree.getValue(openvdb::Coord(1, 1, 0))); EXPECT_TRUE(bTree.isValueOff(openvdb::Coord(1, 1, 0))); // a = 1 (bg), b = 5 (bg) EXPECT_EQ(five, bTree.getValue(openvdb::Coord(1000, 1, 2))); EXPECT_TRUE(bTree.isValueOff(openvdb::Coord(1000, 1, 2))); } } //////////////////////////////////////// #ifdef DWA_OPENVDB TEST_F(TestTreeCombine, testCsg) { using TreeT = openvdb::FloatTree; using TreePtr = TreeT::Ptr; using GridT = openvdb::Grid<TreeT>; struct Local { static TreePtr readFile(const std::string& fname) { std::string filename(fname), gridName("LevelSet"); size_t space = filename.find_last_of(' '); if (space != std::string::npos) { gridName = filename.substr(space + 1); filename.erase(space); } TreePtr tree; openvdb::io::File file(filename); file.open(); if (openvdb::GridBase::Ptr basePtr = file.readGrid(gridName)) { if (GridT::Ptr gridPtr = openvdb::gridPtrCast<GridT>(basePtr)) { tree = gridPtr->treePtr(); } } file.close(); return tree; } //static void writeFile(TreePtr tree, const std::string& filename) { // openvdb::io::File file(filename); // openvdb::GridPtrVec grids; // GridT::Ptr grid = openvdb::createGrid(tree); // grid->setName("LevelSet"); // grids.push_back(grid); // file.write(grids); //} static void visitorUnion(TreeT& a, TreeT& b) { openvdb::tools::csgUnion(a, b); } static void visitorIntersect(TreeT& a, TreeT& b) { openvdb::tools::csgIntersection(a, b); } static void visitorDiff(TreeT& a, TreeT& b) { openvdb::tools::csgDifference(a, b); } }; TreePtr smallTree1, smallTree2, largeTree1, largeTree2, refTree, outTree; #if TEST_CSG_VERBOSE openvdb::util::CpuTimer timer; timer.start(); #endif const std::string testDir("/work/rd/fx_tools/vdb_unittest/TestGridCombine::testCsg/"); smallTree1 = Local::readFile(testDir + "small1.vdb2 LevelSet"); EXPECT_TRUE(smallTree1.get() != nullptr); smallTree2 = Local::readFile(testDir + "small2.vdb2 Cylinder"); EXPECT_TRUE(smallTree2.get() != nullptr); largeTree1 = Local::readFile(testDir + "large1.vdb2 LevelSet"); EXPECT_TRUE(largeTree1.get() != nullptr); largeTree2 = Local::readFile(testDir + "large2.vdb2 LevelSet"); EXPECT_TRUE(largeTree2.get() != nullptr); #if TEST_CSG_VERBOSE std::cerr << "file read: " << timer.milliseconds() << " msec\n"; #endif #if TEST_CSG_VERBOSE std::cerr << "\n<union>\n"; #endif refTree = Local::readFile(testDir + "small_union.vdb2"); outTree = visitCsg(*smallTree1, *smallTree2, *refTree, Local::visitorUnion); //Local::writeFile(outTree, "small_union_out.vdb2"); refTree = Local::readFile(testDir + "large_union.vdb2"); outTree = visitCsg(*largeTree1, *largeTree2, *refTree, Local::visitorUnion); //Local::writeFile(outTree, "large_union_out.vdb2"); #if TEST_CSG_VERBOSE std::cerr << "\n<intersection>\n"; #endif refTree = Local::readFile(testDir + "small_intersection.vdb2"); outTree = visitCsg(*smallTree1, *smallTree2, *refTree, Local::visitorIntersect); //Local::writeFile(outTree, "small_intersection_out.vdb2"); refTree = Local::readFile(testDir + "large_intersection.vdb2"); outTree = visitCsg(*largeTree1, *largeTree2, *refTree, Local::visitorIntersect); //Local::writeFile(outTree, "large_intersection_out.vdb2"); #if TEST_CSG_VERBOSE std::cerr << "\n<difference>\n"; #endif refTree = Local::readFile(testDir + "small_difference.vdb2"); outTree = visitCsg(*smallTree1, *smallTree2, *refTree, Local::visitorDiff); //Local::writeFile(outTree, "small_difference_out.vdb2"); refTree = Local::readFile(testDir + "large_difference.vdb2"); outTree = visitCsg(*largeTree1, *largeTree2, *refTree, Local::visitorDiff); //Local::writeFile(outTree, "large_difference_out.vdb2"); } #endif template<typename TreeT, typename VisitorT> typename TreeT::Ptr TestTreeCombine::visitCsg(const TreeT& aInputTree, const TreeT& bInputTree, const TreeT& refTree, const VisitorT& visitor) { using TreePtr = typename TreeT::Ptr; #if TEST_CSG_VERBOSE openvdb::util::CpuTimer timer; timer.start(); #endif TreePtr aTree(new TreeT(aInputTree)); TreeT bTree(bInputTree); #if TEST_CSG_VERBOSE std::cerr << "deep copy: " << timer.milliseconds() << " msec\n"; #endif #if (TEST_CSG_VERBOSE > 1) std::cerr << "\nA grid:\n"; aTree->print(std::cerr, /*verbose=*/3); std::cerr << "\nB grid:\n"; bTree.print(std::cerr, /*verbose=*/3); std::cerr << "\nExpected:\n"; refTree.print(std::cerr, /*verbose=*/3); std::cerr << "\n"; #endif // Compute the CSG combination of the two grids. #if TEST_CSG_VERBOSE timer.start(); #endif visitor(*aTree, bTree); #if TEST_CSG_VERBOSE std::cerr << "combine: " << timer.milliseconds() << " msec\n"; #endif #if (TEST_CSG_VERBOSE > 1) std::cerr << "\nActual:\n"; aTree->print(std::cerr, /*verbose=*/3); #endif std::ostringstream aInfo, refInfo; aTree->print(aInfo, /*verbose=*/2); refTree.print(refInfo, /*verbose=*/2); EXPECT_EQ(refInfo.str(), aInfo.str()); EXPECT_TRUE(aTree->hasSameTopology(refTree)); return aTree; } //////////////////////////////////////// TEST_F(TestTreeCombine, testCsgCopy) { const float voxelSize = 0.2f; const float radius = 3.0f; openvdb::Vec3f center(0.0f, 0.0f, 0.0f); openvdb::FloatGrid::Ptr gridA = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>(radius, center, voxelSize); openvdb::Coord ijkA = gridA->transform().worldToIndexNodeCentered(center); EXPECT_TRUE(gridA->tree().getValue(ijkA) < 0.0f); // center is inside center.x() += 3.5f; openvdb::FloatGrid::Ptr gridB = openvdb::tools::createLevelSetSphere<openvdb::FloatGrid>(radius, center, voxelSize); openvdb::Coord ijkB = gridA->transform().worldToIndexNodeCentered(center); EXPECT_TRUE(gridB->tree().getValue(ijkB) < 0.0f); // center is inside openvdb::FloatGrid::Ptr unionGrid = openvdb::tools::csgUnionCopy(*gridA, *gridB); openvdb::FloatGrid::Ptr intersectionGrid = openvdb::tools::csgIntersectionCopy(*gridA, *gridB); openvdb::FloatGrid::Ptr differenceGrid = openvdb::tools::csgDifferenceCopy(*gridA, *gridB); EXPECT_TRUE(unionGrid.get() != nullptr); EXPECT_TRUE(intersectionGrid.get() != nullptr); EXPECT_TRUE(differenceGrid.get() != nullptr); EXPECT_TRUE(!unionGrid->empty()); EXPECT_TRUE(!intersectionGrid->empty()); EXPECT_TRUE(!differenceGrid->empty()); // test inside / outside sign EXPECT_TRUE(unionGrid->tree().getValue(ijkA) < 0.0f); EXPECT_TRUE(unionGrid->tree().getValue(ijkB) < 0.0f); EXPECT_TRUE(!(intersectionGrid->tree().getValue(ijkA) < 0.0f)); EXPECT_TRUE(!(intersectionGrid->tree().getValue(ijkB) < 0.0f)); EXPECT_TRUE(differenceGrid->tree().getValue(ijkA) < 0.0f); EXPECT_TRUE(!(differenceGrid->tree().getValue(ijkB) < 0.0f)); } //////////////////////////////////////// TEST_F(TestTreeCombine, testCompActiveLeafVoxels) { {//replace float tree (default argument) openvdb::FloatTree srcTree(0.0f), dstTree(0.0f); dstTree.setValue(openvdb::Coord(1,1,1), 1.0f); srcTree.setValue(openvdb::Coord(1,1,1), 2.0f); srcTree.setValue(openvdb::Coord(8,8,8), 3.0f); EXPECT_EQ(1, int(dstTree.leafCount())); EXPECT_EQ(2, int(srcTree.leafCount())); EXPECT_EQ(1.0f, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(0.0f, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(!dstTree.isValueOn(openvdb::Coord(8, 8, 8))); openvdb::tools::compActiveLeafVoxels(srcTree, dstTree); EXPECT_EQ(2, int(dstTree.leafCount())); EXPECT_EQ(0, int(srcTree.leafCount())); EXPECT_EQ(2.0f, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(3.0f, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(8, 8, 8))); } {//replace float tree (lambda expression) openvdb::FloatTree srcTree(0.0f), dstTree(0.0f); dstTree.setValue(openvdb::Coord(1,1,1), 1.0f); srcTree.setValue(openvdb::Coord(1,1,1), 2.0f); srcTree.setValue(openvdb::Coord(8,8,8), 3.0f); EXPECT_EQ(1, int(dstTree.leafCount())); EXPECT_EQ(2, int(srcTree.leafCount())); EXPECT_EQ(1.0f, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(0.0f, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(!dstTree.isValueOn(openvdb::Coord(8, 8, 8))); openvdb::tools::compActiveLeafVoxels(srcTree, dstTree, [](float &d, float s){d=s;}); EXPECT_EQ(2, int(dstTree.leafCount())); EXPECT_EQ(0, int(srcTree.leafCount())); EXPECT_EQ(2.0f, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(3.0f, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(8, 8, 8))); } {//add float tree openvdb::FloatTree srcTree(0.0f), dstTree(0.0f); dstTree.setValue(openvdb::Coord(1,1,1), 1.0f); srcTree.setValue(openvdb::Coord(1,1,1), 2.0f); srcTree.setValue(openvdb::Coord(8,8,8), 3.0f); EXPECT_EQ(1, int(dstTree.leafCount())); EXPECT_EQ(2, int(srcTree.leafCount())); EXPECT_EQ(1.0f, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(0.0f, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(!dstTree.isValueOn(openvdb::Coord(8, 8, 8))); openvdb::tools::compActiveLeafVoxels(srcTree, dstTree, [](float &d, float s){d+=s;}); EXPECT_EQ(2, int(dstTree.leafCount())); EXPECT_EQ(0, int(srcTree.leafCount())); EXPECT_EQ(3.0f, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(3.0f, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(8, 8, 8))); } { using BufferT = openvdb::FloatTree::LeafNodeType::Buffer; EXPECT_TRUE((std::is_same<BufferT::ValueType, BufferT::StorageType>::value)); } { using BufferT = openvdb::Vec3fTree::LeafNodeType::Buffer; EXPECT_TRUE((std::is_same<BufferT::ValueType, BufferT::StorageType>::value)); } { using BufferT = openvdb::BoolTree::LeafNodeType::Buffer; EXPECT_TRUE(!(std::is_same<BufferT::ValueType, BufferT::StorageType>::value)); } { using BufferT = openvdb::MaskTree::LeafNodeType::Buffer; EXPECT_TRUE(!(std::is_same<BufferT::ValueType, BufferT::StorageType>::value)); } {//replace bool tree openvdb::BoolTree srcTree(false), dstTree(false); dstTree.setValue(openvdb::Coord(1,1,1), true); srcTree.setValue(openvdb::Coord(1,1,1), false); srcTree.setValue(openvdb::Coord(8,8,8), true); //(9,8,8) is inactive but true so it should have no effect srcTree.setValueOnly(openvdb::Coord(9,8,8), true); EXPECT_EQ(1, int(dstTree.leafCount())); EXPECT_EQ(2, int(srcTree.leafCount())); EXPECT_EQ(true, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(false, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(!dstTree.isValueOn(openvdb::Coord(8, 8, 8))); EXPECT_EQ(true, srcTree.getValue(openvdb::Coord(9, 8, 8))); EXPECT_TRUE(!srcTree.isValueOn(openvdb::Coord(9, 8, 8))); using Word = openvdb::BoolTree::LeafNodeType::Buffer::WordType; openvdb::tools::compActiveLeafVoxels(srcTree, dstTree, [](Word &d, Word s){d=s;}); EXPECT_EQ(2, int(dstTree.leafCount())); EXPECT_EQ(0, int(srcTree.leafCount())); EXPECT_EQ(false, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(true, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(8, 8, 8))); } {// mask tree openvdb::MaskTree srcTree(false), dstTree(false); dstTree.setValueOn(openvdb::Coord(1,1,1)); srcTree.setValueOn(openvdb::Coord(1,1,1)); srcTree.setValueOn(openvdb::Coord(8,8,8)); EXPECT_EQ(1, int(dstTree.leafCount())); EXPECT_EQ(2, int(srcTree.leafCount())); EXPECT_EQ(true, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(false, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(!dstTree.isValueOn(openvdb::Coord(8, 8, 8))); openvdb::tools::compActiveLeafVoxels(srcTree, dstTree); EXPECT_EQ(2, int(dstTree.leafCount())); EXPECT_EQ(0, int(srcTree.leafCount())); EXPECT_EQ(true, dstTree.getValue(openvdb::Coord(1, 1, 1))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(1, 1, 1))); EXPECT_EQ(true, dstTree.getValue(openvdb::Coord(8, 8, 8))); EXPECT_TRUE(dstTree.isValueOn(openvdb::Coord(8, 8, 8))); } } ////////////////////////////////////////
37,795
C++
36.833834
101
0.607911
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestPointGroup.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/points/PointGroup.h> #include <openvdb/points/PointCount.h> #include <openvdb/points/PointConversion.h> #include <cstdio> // for std::remove() #include <cstdlib> // for std::getenv() #include <iostream> #include <sstream> #include <string> #include <vector> #ifdef _MSC_VER #include <windows.h> #endif using namespace openvdb; using namespace openvdb::points; class TestPointGroup: public ::testing::Test { public: void SetUp() override { openvdb::initialize(); } void TearDown() override { openvdb::uninitialize(); } }; // class TestPointGroup //////////////////////////////////////// class FirstFilter { public: static bool initialized() { return true; } static index::State state() { return index::PARTIAL; } template <typename LeafT> static index::State state(const LeafT&) { return index::PARTIAL; } template <typename LeafT> void reset(const LeafT&) { } template <typename IterT> bool valid(const IterT& iter) const { return *iter == 0; } }; // class FirstFilter //////////////////////////////////////// namespace { bool testStringVector(std::vector<Name>& input) { return input.size() == 0; } bool testStringVector(std::vector<Name>& input, const Name& name1) { if (input.size() != 1) return false; if (input[0] != name1) return false; return true; } bool testStringVector(std::vector<Name>& input, const Name& name1, const Name& name2) { if (input.size() != 2) return false; if (input[0] != name1) return false; if (input[1] != name2) return false; return true; } } // namespace TEST_F(TestPointGroup, testDescriptor) { // test missing groups deletion { // no groups, empty Descriptor std::vector<std::string> groups; AttributeSet::Descriptor descriptor; deleteMissingPointGroups(groups, descriptor); EXPECT_TRUE(testStringVector(groups)); } { // one group, empty Descriptor std::vector<std::string> groups{"group1"}; AttributeSet::Descriptor descriptor; deleteMissingPointGroups(groups, descriptor); EXPECT_TRUE(testStringVector(groups)); } { // one group, Descriptor with same group std::vector<std::string> groups{"group1"}; AttributeSet::Descriptor descriptor; descriptor.setGroup("group1", 0); deleteMissingPointGroups(groups, descriptor); EXPECT_TRUE(testStringVector(groups, "group1")); } { // one group, Descriptor with different group std::vector<std::string> groups{"group1"}; AttributeSet::Descriptor descriptor; descriptor.setGroup("group2", 0); deleteMissingPointGroups(groups, descriptor); EXPECT_TRUE(testStringVector(groups)); } { // three groups, Descriptor with three groups, one different std::vector<std::string> groups{"group1", "group3", "group4"}; AttributeSet::Descriptor descriptor; descriptor.setGroup("group1", 0); descriptor.setGroup("group2", 0); descriptor.setGroup("group4", 0); deleteMissingPointGroups(groups, descriptor); EXPECT_TRUE(testStringVector(groups, "group1", "group4")); } } //////////////////////////////////////// TEST_F(TestPointGroup, testAppendDrop) { std::vector<Vec3s> positions{{1, 1, 1}, {1, 10, 1}, {10, 1, 1}, {10, 10, 1}}; const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree = grid->tree(); // check one leaf per point EXPECT_EQ(tree.leafCount(), Index32(4)); // retrieve first and last leaf attribute sets PointDataTree::LeafCIter leafIter = tree.cbeginLeaf(); const AttributeSet& attributeSet = leafIter->attributeSet(); ++leafIter; ++leafIter; ++leafIter; const AttributeSet& attributeSet4 = leafIter->attributeSet(); { // throw on append or drop an empty group EXPECT_THROW(appendGroup(tree, ""), openvdb::KeyError); EXPECT_THROW(dropGroup(tree, ""), openvdb::KeyError); } { // append a group appendGroup(tree, "test"); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(1)); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test")); EXPECT_TRUE(attributeSet4.descriptor().hasGroup("test")); } { // append a group with non-unique name (repeat the append) appendGroup(tree, "test"); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(1)); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test")); EXPECT_TRUE(attributeSet4.descriptor().hasGroup("test")); } { // append multiple groups std::vector<Name> names{"test2", "test3"}; appendGroups(tree, names); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(3)); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test")); EXPECT_TRUE(attributeSet4.descriptor().hasGroup("test")); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test2")); EXPECT_TRUE(attributeSet4.descriptor().hasGroup("test2")); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test3")); EXPECT_TRUE(attributeSet4.descriptor().hasGroup("test3")); } { // append to a copy PointDataTree tree2(tree); appendGroup(tree2, "copy1"); EXPECT_TRUE(!attributeSet.descriptor().hasGroup("copy1")); EXPECT_TRUE(tree2.beginLeaf()->attributeSet().descriptor().hasGroup("copy1")); } { // drop a group dropGroup(tree, "test2"); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(2)); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test")); EXPECT_TRUE(attributeSet4.descriptor().hasGroup("test")); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test3")); EXPECT_TRUE(attributeSet4.descriptor().hasGroup("test3")); } { // drop multiple groups std::vector<Name> names{"test", "test3"}; dropGroups(tree, names); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(0)); } { // drop a copy appendGroup(tree, "copy2"); PointDataTree tree2(tree); dropGroup(tree2, "copy2"); EXPECT_TRUE(attributeSet.descriptor().hasGroup("copy2")); EXPECT_TRUE(!tree2.beginLeaf()->attributeSet().descriptor().hasGroup("copy2")); dropGroup(tree, "copy2"); } { // set group membership appendGroup(tree, "test"); setGroup(tree, "test", true); GroupFilter filter("test", tree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(tree, filter), Index64(4)); setGroup(tree, "test", false); EXPECT_EQ(pointCount(tree, filter), Index64(0)); dropGroup(tree, "test"); } { // drop all groups appendGroup(tree, "test"); appendGroup(tree, "test2"); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(2)); EXPECT_EQ(attributeSet.descriptor().count(GroupAttributeArray::attributeType()), size_t(1)); dropGroups(tree); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(0)); EXPECT_EQ(attributeSet.descriptor().count(GroupAttributeArray::attributeType()), size_t(0)); } { // check that newly added groups have empty group membership // recreate the grid with 3 points in one leaf positions = {{1, 1, 1}, {1, 2, 1}, {2, 1, 1}}; grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& newTree = grid->tree(); appendGroup(newTree, "test"); // test that a completely new group (with a new group attribute) // has empty membership EXPECT_TRUE(newTree.cbeginLeaf()); GroupFilter filter("test", newTree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(newTree, filter), Index64(0)); // check that membership in a group that was not created with a // new attribute array is still empty. // we will append a second group, set its membership, then // drop it and append a new group with the same name again appendGroup(newTree, "test2"); PointDataTree::LeafIter leafIter2 = newTree.beginLeaf(); EXPECT_TRUE(leafIter2); GroupWriteHandle test2Handle = leafIter2->groupWriteHandle("test2"); test2Handle.set(0, true); test2Handle.set(2, true); GroupFilter filter2("test2", newTree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(newTree, filter2), Index64(2)); // drop and re-add group dropGroup(newTree, "test2"); appendGroup(newTree, "test2"); // check that group is fully cleared and does not have previously existing data EXPECT_EQ(pointCount(newTree, filter2), Index64(0)); } } TEST_F(TestPointGroup, testCompact) { std::vector<Vec3s> positions{{1, 1, 1}}; const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(positions, *transform); PointDataTree& tree = grid->tree(); // check one leaf EXPECT_EQ(tree.leafCount(), Index32(1)); // retrieve first and last leaf attribute sets PointDataTree::LeafCIter leafIter = tree.cbeginLeaf(); const AttributeSet& attributeSet = leafIter->attributeSet(); std::stringstream ss; { // append nine groups for (int i = 0; i < 8; i++) { ss.str(""); ss << "test" << i; appendGroup(tree, ss.str()); } EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(8)); EXPECT_EQ(attributeSet.descriptor().count(GroupAttributeArray::attributeType()), size_t(1)); appendGroup(tree, "test8"); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test0")); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test7")); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test8")); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(9)); EXPECT_EQ(attributeSet.descriptor().count(GroupAttributeArray::attributeType()), size_t(2)); } { // drop first attribute then compact dropGroup(tree, "test5", /*compact=*/false); EXPECT_TRUE(!attributeSet.descriptor().hasGroup("test5")); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(8)); EXPECT_EQ(attributeSet.descriptor().count(GroupAttributeArray::attributeType()), size_t(2)); compactGroups(tree); EXPECT_TRUE(!attributeSet.descriptor().hasGroup("test5")); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test7")); EXPECT_TRUE(attributeSet.descriptor().hasGroup("test8")); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(8)); EXPECT_EQ(attributeSet.descriptor().count(GroupAttributeArray::attributeType()), size_t(1)); } { // append seventeen groups, drop most of them, then compact for (int i = 0; i < 17; i++) { ss.str(""); ss << "test" << i; appendGroup(tree, ss.str()); } EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(17)); EXPECT_EQ(attributeSet.descriptor().count(GroupAttributeArray::attributeType()), size_t(3)); // delete all but 0, 5, 9, 15 for (int i = 0; i < 17; i++) { if (i == 0 || i == 5 || i == 9 || i == 15) continue; ss.str(""); ss << "test" << i; dropGroup(tree, ss.str(), /*compact=*/false); } EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(4)); EXPECT_EQ(attributeSet.descriptor().count(GroupAttributeArray::attributeType()), size_t(3)); // make a copy PointDataTree tree2(tree); // compact - should now occupy one attribute compactGroups(tree); EXPECT_EQ(attributeSet.descriptor().groupMap().size(), size_t(4)); EXPECT_EQ(attributeSet.descriptor().count(GroupAttributeArray::attributeType()), size_t(1)); // check descriptor has been deep copied EXPECT_EQ(tree2.cbeginLeaf()->attributeSet().descriptor().groupMap().size(), size_t(4)); EXPECT_EQ(tree2.cbeginLeaf()->attributeSet().descriptor().count(GroupAttributeArray::attributeType()), size_t(3)); } } TEST_F(TestPointGroup, testSet) { // four points in the same leaf std::vector<Vec3s> positions = { {1, 1, 1}, {1, 2, 1}, {2, 1, 1}, {2, 2, 1}, {100, 100, 100}, {100, 101, 100} }; const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); const PointAttributeVector<Vec3s> pointList(positions); openvdb::tools::PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<openvdb::tools::PointIndexGrid>(pointList, *transform); PointDataGrid::Ptr grid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList, *transform); PointDataTree& tree = grid->tree(); appendGroup(tree, "test"); EXPECT_EQ(pointCount(tree), Index64(6)); GroupFilter filter("test", tree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(tree, filter), Index64(0)); // copy tree for descriptor sharing test PointDataTree tree2(tree); std::vector<short> membership{1, 0, 1, 1, 0, 1}; // test add to group setGroup(tree, "test", true); EXPECT_EQ(pointCount(tree, filter), Index64(6)); // test nothing is done if the index tree contains no valid indices tools::PointIndexGrid::Ptr tmpIndexGrid = tools::PointIndexGrid::create(); setGroup(tree, tmpIndexGrid->tree(), {0,0,0,0,0,0}, "test", /*remove*/true); EXPECT_EQ(Index64(6), pointCount(tree, filter)); // test throw on out of range index auto indexLeaf = tmpIndexGrid->tree().touchLeaf(tree.cbeginLeaf()->origin()); indexLeaf->indices().emplace_back(membership.size()); EXPECT_THROW(setGroup(tree, tmpIndexGrid->tree(), membership, "test"), IndexError); EXPECT_EQ(Index64(6), pointCount(tree, filter)); // test remove flag setGroup(tree, pointIndexGrid->tree(), membership, "test", /*remove*/false); EXPECT_EQ(Index64(6), pointCount(tree, filter)); setGroup(tree, pointIndexGrid->tree(), membership, "test", /*remove*/true); EXPECT_EQ(Index64(4), pointCount(tree, filter)); setGroup(tree, pointIndexGrid->tree(), {0,1,0,0,1,0}, "test", /*remove*/false); EXPECT_EQ(Index64(6), pointCount(tree, filter)); setGroup(tree, pointIndexGrid->tree(), membership, "test", /*remove*/true); // check that descriptor remains shared appendGroup(tree2, "copy1"); EXPECT_TRUE(!tree.cbeginLeaf()->attributeSet().descriptor().hasGroup("copy1")); dropGroup(tree2, "copy1"); EXPECT_EQ(pointCount(tree), Index64(6)); GroupFilter filter2("test", tree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(tree, filter2), Index64(4)); { // IO // setup temp directory std::string tempDir; if (const char* dir = std::getenv("TMPDIR")) tempDir = dir; #ifdef _MSC_VER if (tempDir.empty()) { char tempDirBuffer[MAX_PATH+1]; int tempDirLen = GetTempPath(MAX_PATH+1, tempDirBuffer); EXPECT_TRUE(tempDirLen > 0 && tempDirLen <= MAX_PATH); tempDir = tempDirBuffer; } #else if (tempDir.empty()) tempDir = P_tmpdir; #endif std::string filename; // write out grid to a temp file { filename = tempDir + "/openvdb_test_point_load"; io::File fileOut(filename); GridCPtrVec grids{grid}; fileOut.write(grids); } // read test groups { io::File fileIn(filename); fileIn.open(); GridPtrVecPtr grids = fileIn.getGrids(); fileIn.close(); EXPECT_EQ(grids->size(), size_t(1)); PointDataGrid::Ptr inputGrid = GridBase::grid<PointDataGrid>((*grids)[0]); PointDataTree& treex = inputGrid->tree(); EXPECT_TRUE(treex.cbeginLeaf()); const PointDataGrid::TreeType::LeafNodeType& leaf = *treex.cbeginLeaf(); const AttributeSet::Descriptor& descriptor = leaf.attributeSet().descriptor(); EXPECT_TRUE(descriptor.hasGroup("test")); EXPECT_EQ(descriptor.groupMap().size(), size_t(1)); EXPECT_EQ(pointCount(treex), Index64(6)); GroupFilter filter3("test", leaf.attributeSet()); EXPECT_EQ(pointCount(treex, filter3), Index64(4)); } std::remove(filename.c_str()); } } TEST_F(TestPointGroup, testFilter) { const float voxelSize(1.0); math::Transform::Ptr transform(math::Transform::createLinearTransform(voxelSize)); PointDataGrid::Ptr grid; { // four points in the same leaf std::vector<Vec3s> positions = { {1, 1, 1}, {1, 2, 1}, {2, 1, 1}, {2, 2, 1}, {100, 100, 100}, {100, 101, 100} }; const PointAttributeVector<Vec3s> pointList(positions); openvdb::tools::PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<openvdb::tools::PointIndexGrid>(pointList, *transform); grid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList, *transform); } PointDataTree& tree = grid->tree(); { // first point filter appendGroup(tree, "first"); EXPECT_EQ(pointCount(tree), Index64(6)); GroupFilter filter("first", tree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(tree, filter), Index64(0)); FirstFilter filter2; setGroupByFilter<PointDataTree, FirstFilter>(tree, "first", filter2); auto iter = tree.cbeginLeaf(); for ( ; iter; ++iter) { EXPECT_EQ(iter->groupPointCount("first"), Index64(1)); } GroupFilter filter3("first", tree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(tree, filter3), Index64(2)); } const openvdb::BBoxd bbox(openvdb::Vec3d(0, 1.5, 0), openvdb::Vec3d(101, 100.5, 101)); { // bbox filter appendGroup(tree, "bbox"); EXPECT_EQ(pointCount(tree), Index64(6)); GroupFilter filter("bbox", tree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(tree, filter), Index64(0)); BBoxFilter filter2(*transform, bbox); setGroupByFilter<PointDataTree, BBoxFilter>(tree, "bbox", filter2); GroupFilter filter3("bbox", tree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(tree, filter3), Index64(3)); } { // first point filter and bbox filter (intersection of the above two filters) appendGroup(tree, "first_bbox"); EXPECT_EQ(pointCount(tree), Index64(6)); GroupFilter filter("first_bbox", tree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(tree, filter), Index64(0)); using FirstBBoxFilter = BinaryFilter<FirstFilter, BBoxFilter>; FirstFilter firstFilter; BBoxFilter bboxFilter(*transform, bbox); FirstBBoxFilter filter2(firstFilter, bboxFilter); setGroupByFilter<PointDataTree, FirstBBoxFilter>(tree, "first_bbox", filter2); GroupFilter filter3("first_bbox", tree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(tree, filter3), Index64(1)); std::vector<Vec3f> positions; for (auto iter = tree.cbeginLeaf(); iter; ++iter) { GroupFilter filterx("first_bbox", iter->attributeSet()); auto filterIndexIter = iter->beginIndexOn(filterx); auto handle = AttributeHandle<Vec3f>::create(iter->attributeArray("P")); for ( ; filterIndexIter; ++filterIndexIter) { const openvdb::Coord ijk = filterIndexIter.getCoord(); positions.push_back(handle->get(*filterIndexIter) + ijk.asVec3d()); } } EXPECT_EQ(positions.size(), size_t(1)); EXPECT_EQ(positions[0], Vec3f(100, 100, 100)); } { // add 1000 points in three leafs (positions aren't important) std::vector<Vec3s> positions(1000, {1, 1, 1}); positions.insert(positions.end(), 1000, {1, 1, 9}); positions.insert(positions.end(), 1000, {9, 9, 9}); const PointAttributeVector<Vec3s> pointList(positions); openvdb::tools::PointIndexGrid::Ptr pointIndexGrid = openvdb::tools::createPointIndexGrid<openvdb::tools::PointIndexGrid>(pointList, *transform); grid = createPointDataGrid<NullCodec, PointDataGrid>(*pointIndexGrid, pointList, *transform); PointDataTree& newTree = grid->tree(); EXPECT_EQ(pointCount(newTree), Index64(3000)); // random - maximum appendGroup(newTree, "random_maximum"); const Index64 target = 1001; setGroupByRandomTarget(newTree, "random_maximum", target); GroupFilter filter("random_maximum", newTree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(newTree, filter), target); // random - percentage appendGroup(newTree, "random_percentage"); setGroupByRandomPercentage(newTree, "random_percentage", 33.333333f); GroupFilter filter2("random_percentage", newTree.cbeginLeaf()->attributeSet()); EXPECT_EQ(pointCount(newTree, filter2), Index64(1000)); } }
22,362
C++
31.935199
122
0.611752
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestMath.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "gtest/gtest.h" #include <openvdb/Exceptions.h> #include <openvdb/math/Math.h> #include <openvdb/Types.h> #include <type_traits> #include <vector> class TestMath: public ::testing::Test { }; // This suite of tests obviously needs to be expanded! TEST_F(TestMath, testAll) { using namespace openvdb; {// Sign EXPECT_EQ(math::Sign( 3 ), 1); EXPECT_EQ(math::Sign(-1.0 ),-1); EXPECT_EQ(math::Sign( 0.0f), 0); } {// SignChange EXPECT_TRUE( math::SignChange( -1, 1)); EXPECT_TRUE(!math::SignChange( 0.0f, 0.5f)); EXPECT_TRUE( math::SignChange( 0.0f,-0.5f)); EXPECT_TRUE( math::SignChange(-0.1, 0.0001)); } {// isApproxZero EXPECT_TRUE( math::isApproxZero( 0.0f)); EXPECT_TRUE(!math::isApproxZero( 9.0e-6f)); EXPECT_TRUE(!math::isApproxZero(-9.0e-6f)); EXPECT_TRUE( math::isApproxZero( 9.0e-9f)); EXPECT_TRUE( math::isApproxZero(-9.0e-9f)); EXPECT_TRUE( math::isApproxZero( 0.01, 0.1)); } {// Cbrt const double a = math::Cbrt(3.0); EXPECT_TRUE(math::isApproxEqual(a*a*a, 3.0, 1e-6)); } {// isNegative EXPECT_TRUE(!std::is_signed<unsigned int>::value); EXPECT_TRUE(std::is_signed<int>::value); EXPECT_TRUE(!std::is_signed<bool>::value); //EXPECT_TRUE(std::is_signed<double>::value);//fails! //EXPECT_TRUE(std::is_signed<float>::value);//fails! EXPECT_TRUE( math::isNegative(-1.0f)); EXPECT_TRUE(!math::isNegative( 1.0f)); EXPECT_TRUE( math::isNegative(-1.0)); EXPECT_TRUE(!math::isNegative( 1.0)); EXPECT_TRUE(!math::isNegative(true)); EXPECT_TRUE(!math::isNegative(false)); EXPECT_TRUE(!math::isNegative(1u)); EXPECT_TRUE( math::isNegative(-1)); EXPECT_TRUE(!math::isNegative( 1)); } {// zeroVal EXPECT_EQ(zeroVal<bool>(), false); EXPECT_EQ(zeroVal<int>(), int(0)); EXPECT_EQ(zeroVal<float>(), 0.0f); EXPECT_EQ(zeroVal<double>(), 0.0); EXPECT_EQ(zeroVal<Vec3i>(), Vec3i(0,0,0)); EXPECT_EQ(zeroVal<Vec3s>(), Vec3s(0,0,0)); EXPECT_EQ(zeroVal<Vec3d>(), Vec3d(0,0,0)); EXPECT_EQ(zeroVal<Quats>(), Quats::zero()); EXPECT_EQ(zeroVal<Quatd>(), Quatd::zero()); EXPECT_EQ(zeroVal<Mat3s>(), Mat3s::zero()); EXPECT_EQ(zeroVal<Mat3d>(), Mat3d::zero()); EXPECT_EQ(zeroVal<Mat4s>(), Mat4s::zero()); EXPECT_EQ(zeroVal<Mat4d>(), Mat4d::zero()); } } TEST_F(TestMath, testRandomInt) { using openvdb::math::RandomInt; int imin = -3, imax = 11; RandomInt rnd(/*seed=*/42, imin, imax); // Generate a sequence of random integers and verify that they all fall // in the interval [imin, imax]. std::vector<int> seq(100); for (int i = 0; i < 100; ++i) { seq[i] = rnd(); EXPECT_TRUE(seq[i] >= imin); EXPECT_TRUE(seq[i] <= imax); } // Verify that generators with the same seed produce the same sequence. rnd = RandomInt(42, imin, imax); for (int i = 0; i < 100; ++i) { int r = rnd(); EXPECT_EQ(seq[i], r); } // Verify that generators with different seeds produce different sequences. rnd = RandomInt(101, imin, imax); std::vector<int> newSeq(100); for (int i = 0; i < 100; ++i) newSeq[i] = rnd(); EXPECT_TRUE(newSeq != seq); // Temporarily change the range. imin = -5; imax = 6; for (int i = 0; i < 100; ++i) { int r = rnd(imin, imax); EXPECT_TRUE(r >= imin); EXPECT_TRUE(r <= imax); } // Verify that the range change was temporary. imin = -3; imax = 11; for (int i = 0; i < 100; ++i) { int r = rnd(); EXPECT_TRUE(r >= imin); EXPECT_TRUE(r <= imax); } // Permanently change the range. imin = -5; imax = 6; rnd.setRange(imin, imax); for (int i = 0; i < 100; ++i) { int r = rnd(); EXPECT_TRUE(r >= imin); EXPECT_TRUE(r <= imax); } // Verify that it is OK to specify imin > imax (they are automatically swapped). imin = 5; imax = -6; rnd.setRange(imin, imax); rnd = RandomInt(42, imin, imax); } TEST_F(TestMath, testRandom01) { using openvdb::math::Random01; using openvdb::math::isApproxEqual; Random01 rnd(/*seed=*/42); // Generate a sequence of random numbers and verify that they all fall // in the interval [0, 1). std::vector<Random01::ValueType> seq(100); for (int i = 0; i < 100; ++i) { seq[i] = rnd(); EXPECT_TRUE(seq[i] >= 0.0); EXPECT_TRUE(seq[i] < 1.0); } // Verify that generators with the same seed produce the same sequence. rnd = Random01(42); for (int i = 0; i < 100; ++i) { EXPECT_NEAR(seq[i], rnd(), /*tolerance=*/1.0e-6); } // Verify that generators with different seeds produce different sequences. rnd = Random01(101); bool allEqual = true; for (int i = 0; allEqual && i < 100; ++i) { if (!isApproxEqual(rnd(), seq[i])) allEqual = false; } EXPECT_TRUE(!allEqual); } TEST_F(TestMath, testMinMaxIndex) { const openvdb::Vec3R a(-1, 2, 0); EXPECT_EQ(size_t(0), openvdb::math::MinIndex(a)); EXPECT_EQ(size_t(1), openvdb::math::MaxIndex(a)); const openvdb::Vec3R b(-1, -2, 0); EXPECT_EQ(size_t(1), openvdb::math::MinIndex(b)); EXPECT_EQ(size_t(2), openvdb::math::MaxIndex(b)); const openvdb::Vec3R c(5, 2, 1); EXPECT_EQ(size_t(2), openvdb::math::MinIndex(c)); EXPECT_EQ(size_t(0), openvdb::math::MaxIndex(c)); const openvdb::Vec3R d(0, 0, 1); EXPECT_EQ(size_t(1), openvdb::math::MinIndex(d)); EXPECT_EQ(size_t(2), openvdb::math::MaxIndex(d)); const openvdb::Vec3R e(1, 0, 0); EXPECT_EQ(size_t(2), openvdb::math::MinIndex(e)); EXPECT_EQ(size_t(0), openvdb::math::MaxIndex(e)); const openvdb::Vec3R f(0, 1, 0); EXPECT_EQ(size_t(2), openvdb::math::MinIndex(f)); EXPECT_EQ(size_t(1), openvdb::math::MaxIndex(f)); const openvdb::Vec3R g(1, 1, 0); EXPECT_EQ(size_t(2), openvdb::math::MinIndex(g)); EXPECT_EQ(size_t(1), openvdb::math::MaxIndex(g)); const openvdb::Vec3R h(1, 0, 1); EXPECT_EQ(size_t(1), openvdb::math::MinIndex(h)); EXPECT_EQ(size_t(2), openvdb::math::MaxIndex(h)); const openvdb::Vec3R i(0, 1, 1); EXPECT_EQ(size_t(0), openvdb::math::MinIndex(i)); EXPECT_EQ(size_t(2), openvdb::math::MaxIndex(i)); const openvdb::Vec3R j(1, 1, 1); EXPECT_EQ(size_t(2), openvdb::math::MinIndex(j)); EXPECT_EQ(size_t(2), openvdb::math::MaxIndex(j)); }
6,711
C++
31.582524
84
0.576963
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/unittest/TestParticlesToLevelSet.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include <vector> #include "gtest/gtest.h" #include <openvdb/openvdb.h> #include <openvdb/Exceptions.h> #include <openvdb/Types.h> #include <openvdb/tree/LeafNode.h> #include <openvdb/tools/LevelSetUtil.h> // for sdfInteriorMask() #include <openvdb/tools/ParticlesToLevelSet.h> #define ASSERT_DOUBLES_EXACTLY_EQUAL(expected, actual) \ EXPECT_NEAR((expected), (actual), /*tolerance=*/0.0); class TestParticlesToLevelSet: public ::testing::Test { public: void SetUp() override {openvdb::initialize();} void TearDown() override {openvdb::uninitialize();} void writeGrid(openvdb::GridBase::Ptr grid, std::string fileName) const { std::cout << "\nWriting \""<<fileName<<"\" to file\n"; grid->setName("TestParticlesToLevelSet"); openvdb::GridPtrVec grids; grids.push_back(grid); openvdb::io::File file(fileName + ".vdb"); file.write(grids); file.close(); } }; class MyParticleList { protected: struct MyParticle { openvdb::Vec3R p, v; openvdb::Real r; }; openvdb::Real mRadiusScale; openvdb::Real mVelocityScale; std::vector<MyParticle> mParticleList; public: typedef openvdb::Vec3R PosType; MyParticleList(openvdb::Real rScale=1, openvdb::Real vScale=1) : mRadiusScale(rScale), mVelocityScale(vScale) {} void add(const openvdb::Vec3R &p, const openvdb::Real &r, const openvdb::Vec3R &v=openvdb::Vec3R(0,0,0)) { MyParticle pa; pa.p = p; pa.r = r; pa.v = v; mParticleList.push_back(pa); } /// @return coordinate bbox in the space of the specified transfrom openvdb::CoordBBox getBBox(const openvdb::GridBase& grid) { openvdb::CoordBBox bbox; openvdb::Coord &min= bbox.min(), &max = bbox.max(); openvdb::Vec3R pos; openvdb::Real rad, invDx = 1/grid.voxelSize()[0]; for (size_t n=0, e=this->size(); n<e; ++n) { this->getPosRad(n, pos, rad); const openvdb::Vec3d xyz = grid.worldToIndex(pos); const openvdb::Real r = rad * invDx; for (int i=0; i<3; ++i) { min[i] = openvdb::math::Min(min[i], openvdb::math::Floor(xyz[i] - r)); max[i] = openvdb::math::Max(max[i], openvdb::math::Ceil( xyz[i] + r)); } } return bbox; } //typedef int AttributeType; // The methods below are only required for the unit-tests openvdb::Vec3R pos(int n) const {return mParticleList[n].p;} openvdb::Vec3R vel(int n) const {return mVelocityScale*mParticleList[n].v;} openvdb::Real radius(int n) const {return mRadiusScale*mParticleList[n].r;} ////////////////////////////////////////////////////////////////////////////// /// The methods below are the only ones required by tools::ParticleToLevelSet /// @note We return by value since the radius and velocities are modified /// by the scaling factors! Also these methods are all assumed to /// be thread-safe. /// Return the total number of particles in list. /// Always required! size_t size() const { return mParticleList.size(); } /// Get the world space position of n'th particle. /// Required by ParticledToLevelSet::rasterizeSphere(*this,radius). void getPos(size_t n, openvdb::Vec3R&pos) const { pos = mParticleList[n].p; } void getPosRad(size_t n, openvdb::Vec3R& pos, openvdb::Real& rad) const { pos = mParticleList[n].p; rad = mRadiusScale*mParticleList[n].r; } void getPosRadVel(size_t n, openvdb::Vec3R& pos, openvdb::Real& rad, openvdb::Vec3R& vel) const { pos = mParticleList[n].p; rad = mRadiusScale*mParticleList[n].r; vel = mVelocityScale*mParticleList[n].v; } // The method below is only required for attribute transfer void getAtt(size_t n, openvdb::Index32& att) const { att = openvdb::Index32(n); } }; TEST_F(TestParticlesToLevelSet, testBlindData) { using BlindTypeIF = openvdb::tools::p2ls_internal::BlindData<openvdb::Index, float>; BlindTypeIF value(openvdb::Index(8), 5.2f); EXPECT_EQ(openvdb::Index(8), value.visible()); ASSERT_DOUBLES_EXACTLY_EQUAL(5.2f, value.blind()); BlindTypeIF value2(openvdb::Index(13), 1.6f); { // test equality // only visible portion needs to be equal BlindTypeIF blind(openvdb::Index(13), 6.7f); EXPECT_TRUE(value2 == blind); } { // test addition of two blind types BlindTypeIF blind = value + value2; EXPECT_EQ(openvdb::Index(8+13), blind.visible()); EXPECT_EQ(0.0f, blind.blind()); // blind values are both dropped } { // test addition of blind type with visible type BlindTypeIF blind = value + 3; EXPECT_EQ(openvdb::Index(8+3), blind.visible()); EXPECT_EQ(5.2f, blind.blind()); } { // test addition of blind type with type that requires casting // note that this will generate conversion warnings if not handled properly BlindTypeIF blind = value + 3.7; EXPECT_EQ(openvdb::Index(8+3), blind.visible()); EXPECT_EQ(5.2f, blind.blind()); } } TEST_F(TestParticlesToLevelSet, testMyParticleList) { MyParticleList pa; EXPECT_EQ(0, int(pa.size())); pa.add(openvdb::Vec3R(10,10,10), 2, openvdb::Vec3R(1,0,0)); EXPECT_EQ(1, int(pa.size())); ASSERT_DOUBLES_EXACTLY_EQUAL(10, pa.pos(0)[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(10, pa.pos(0)[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(10, pa.pos(0)[2]); ASSERT_DOUBLES_EXACTLY_EQUAL(1 , pa.vel(0)[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(0 , pa.vel(0)[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0 , pa.vel(0)[2]); ASSERT_DOUBLES_EXACTLY_EQUAL(2 , pa.radius(0)); pa.add(openvdb::Vec3R(20,20,20), 3); EXPECT_EQ(2, int(pa.size())); ASSERT_DOUBLES_EXACTLY_EQUAL(20, pa.pos(1)[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(20, pa.pos(1)[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(20, pa.pos(1)[2]); ASSERT_DOUBLES_EXACTLY_EQUAL(0 , pa.vel(1)[0]); ASSERT_DOUBLES_EXACTLY_EQUAL(0 , pa.vel(1)[1]); ASSERT_DOUBLES_EXACTLY_EQUAL(0 , pa.vel(1)[2]); ASSERT_DOUBLES_EXACTLY_EQUAL(3 , pa.radius(1)); const float voxelSize = 0.5f, halfWidth = 4.0f; openvdb::FloatGrid::Ptr ls = openvdb::createLevelSet<openvdb::FloatGrid>(voxelSize, halfWidth); openvdb::CoordBBox bbox = pa.getBBox(*ls); ASSERT_DOUBLES_EXACTLY_EQUAL((10-2)/voxelSize, bbox.min()[0]); ASSERT_DOUBLES_EXACTLY_EQUAL((10-2)/voxelSize, bbox.min()[1]); ASSERT_DOUBLES_EXACTLY_EQUAL((10-2)/voxelSize, bbox.min()[2]); ASSERT_DOUBLES_EXACTLY_EQUAL((20+3)/voxelSize, bbox.max()[0]); ASSERT_DOUBLES_EXACTLY_EQUAL((20+3)/voxelSize, bbox.max()[1]); ASSERT_DOUBLES_EXACTLY_EQUAL((20+3)/voxelSize, bbox.max()[2]); } TEST_F(TestParticlesToLevelSet, testRasterizeSpheres) { MyParticleList pa; pa.add(openvdb::Vec3R(10,10,10), 2); pa.add(openvdb::Vec3R(20,20,20), 2); // testing CSG pa.add(openvdb::Vec3R(31.0,31,31), 5); pa.add(openvdb::Vec3R(31.5,31,31), 5); pa.add(openvdb::Vec3R(32.0,31,31), 5); pa.add(openvdb::Vec3R(32.5,31,31), 5); pa.add(openvdb::Vec3R(33.0,31,31), 5); pa.add(openvdb::Vec3R(33.5,31,31), 5); pa.add(openvdb::Vec3R(34.0,31,31), 5); pa.add(openvdb::Vec3R(34.5,31,31), 5); pa.add(openvdb::Vec3R(35.0,31,31), 5); pa.add(openvdb::Vec3R(35.5,31,31), 5); pa.add(openvdb::Vec3R(36.0,31,31), 5); EXPECT_EQ(13, int(pa.size())); const float voxelSize = 1.0f, halfWidth = 2.0f; openvdb::FloatGrid::Ptr ls = openvdb::createLevelSet<openvdb::FloatGrid>(voxelSize, halfWidth); openvdb::tools::ParticlesToLevelSet<openvdb::FloatGrid> raster(*ls); raster.setGrainSize(1);//a value of zero disables threading raster.rasterizeSpheres(pa); raster.finalize(); //openvdb::FloatGrid::Ptr ls = raster.getSdfGrid(); //ls->tree().print(std::cout,4); //this->writeGrid(ls, "testRasterizeSpheres"); ASSERT_DOUBLES_EXACTLY_EQUAL(halfWidth * voxelSize, ls->tree().getValue(openvdb::Coord( 0, 0, 0))); ASSERT_DOUBLES_EXACTLY_EQUAL( 2, ls->tree().getValue(openvdb::Coord( 6,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 1, ls->tree().getValue(openvdb::Coord( 7,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, ls->tree().getValue(openvdb::Coord( 8,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL(-1, ls->tree().getValue(openvdb::Coord( 9,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL(-2, ls->tree().getValue(openvdb::Coord(10,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL(-1, ls->tree().getValue(openvdb::Coord(11,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, ls->tree().getValue(openvdb::Coord(12,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 1, ls->tree().getValue(openvdb::Coord(13,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 2, ls->tree().getValue(openvdb::Coord(14,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 2, ls->tree().getValue(openvdb::Coord(20,16,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 1, ls->tree().getValue(openvdb::Coord(20,17,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, ls->tree().getValue(openvdb::Coord(20,18,20))); ASSERT_DOUBLES_EXACTLY_EQUAL(-1, ls->tree().getValue(openvdb::Coord(20,19,20))); ASSERT_DOUBLES_EXACTLY_EQUAL(-2, ls->tree().getValue(openvdb::Coord(20,20,20))); ASSERT_DOUBLES_EXACTLY_EQUAL(-1, ls->tree().getValue(openvdb::Coord(20,21,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, ls->tree().getValue(openvdb::Coord(20,22,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 1, ls->tree().getValue(openvdb::Coord(20,23,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 2, ls->tree().getValue(openvdb::Coord(20,24,20))); {// full but slow test of all voxels openvdb::CoordBBox bbox = pa.getBBox(*ls); bbox.expand(static_cast<int>(halfWidth)+1); openvdb::Index64 count=0; const float outside = ls->background(), inside = -outside; const openvdb::Coord &min=bbox.min(), &max=bbox.max(); for (openvdb::Coord ijk=min; ijk[0]<max[0]; ++ijk[0]) { for (ijk[1]=min[1]; ijk[1]<max[1]; ++ijk[1]) { for (ijk[2]=min[2]; ijk[2]<max[2]; ++ijk[2]) { const openvdb::Vec3d xyz = ls->indexToWorld(ijk.asVec3d()); double dist = (xyz-pa.pos(0)).length()-pa.radius(0); for (int i = 1, s = int(pa.size()); i < s; ++i) { dist=openvdb::math::Min(dist,(xyz-pa.pos(i)).length()-pa.radius(i)); } const float val = ls->tree().getValue(ijk); if (dist >= outside) { EXPECT_NEAR(outside, val, 0.0001); EXPECT_TRUE(ls->tree().isValueOff(ijk)); } else if( dist <= inside ) { EXPECT_NEAR(inside, val, 0.0001); EXPECT_TRUE(ls->tree().isValueOff(ijk)); } else { EXPECT_NEAR( dist, val, 0.0001); EXPECT_TRUE(ls->tree().isValueOn(ijk)); ++count; } } } } //std::cerr << "\nExpected active voxel count = " << count // << ", actual active voxle count = " // << ls->activeVoxelCount() << std::endl; EXPECT_EQ(count, ls->activeVoxelCount()); } } TEST_F(TestParticlesToLevelSet, testRasterizeSpheresAndId) { MyParticleList pa(0.5f); pa.add(openvdb::Vec3R(10,10,10), 4); pa.add(openvdb::Vec3R(20,20,20), 4); // testing CSG pa.add(openvdb::Vec3R(31.0,31,31),10); pa.add(openvdb::Vec3R(31.5,31,31),10); pa.add(openvdb::Vec3R(32.0,31,31),10); pa.add(openvdb::Vec3R(32.5,31,31),10); pa.add(openvdb::Vec3R(33.0,31,31),10); pa.add(openvdb::Vec3R(33.5,31,31),10); pa.add(openvdb::Vec3R(34.0,31,31),10); pa.add(openvdb::Vec3R(34.5,31,31),10); pa.add(openvdb::Vec3R(35.0,31,31),10); pa.add(openvdb::Vec3R(35.5,31,31),10); pa.add(openvdb::Vec3R(36.0,31,31),10); EXPECT_EQ(13, int(pa.size())); typedef openvdb::tools::ParticlesToLevelSet<openvdb::FloatGrid, openvdb::Index32> RasterT; const float voxelSize = 1.0f, halfWidth = 2.0f; openvdb::FloatGrid::Ptr ls = openvdb::createLevelSet<openvdb::FloatGrid>(voxelSize, halfWidth); RasterT raster(*ls); raster.setGrainSize(1);//a value of zero disables threading raster.rasterizeSpheres(pa); raster.finalize(); const RasterT::AttGridType::Ptr id = raster.attributeGrid(); int minVal = std::numeric_limits<int>::max(), maxVal = -minVal; for (RasterT::AttGridType::ValueOnCIter i=id->cbeginValueOn(); i; ++i) { minVal = openvdb::math::Min(minVal, int(*i)); maxVal = openvdb::math::Max(maxVal, int(*i)); } EXPECT_EQ(0 , minVal); EXPECT_EQ(12, maxVal); //grid.tree().print(std::cout,4); //id->print(std::cout,4); //this->writeGrid(ls, "testRasterizeSpheres"); ASSERT_DOUBLES_EXACTLY_EQUAL(halfWidth * voxelSize, ls->tree().getValue(openvdb::Coord( 0, 0, 0))); ASSERT_DOUBLES_EXACTLY_EQUAL( 2, ls->tree().getValue(openvdb::Coord( 6,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 1, ls->tree().getValue(openvdb::Coord( 7,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, ls->tree().getValue(openvdb::Coord( 8,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL(-1, ls->tree().getValue(openvdb::Coord( 9,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL(-2, ls->tree().getValue(openvdb::Coord(10,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL(-1, ls->tree().getValue(openvdb::Coord(11,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, ls->tree().getValue(openvdb::Coord(12,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 1, ls->tree().getValue(openvdb::Coord(13,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 2, ls->tree().getValue(openvdb::Coord(14,10,10))); ASSERT_DOUBLES_EXACTLY_EQUAL( 2, ls->tree().getValue(openvdb::Coord(20,16,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 1, ls->tree().getValue(openvdb::Coord(20,17,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, ls->tree().getValue(openvdb::Coord(20,18,20))); ASSERT_DOUBLES_EXACTLY_EQUAL(-1, ls->tree().getValue(openvdb::Coord(20,19,20))); ASSERT_DOUBLES_EXACTLY_EQUAL(-2, ls->tree().getValue(openvdb::Coord(20,20,20))); ASSERT_DOUBLES_EXACTLY_EQUAL(-1, ls->tree().getValue(openvdb::Coord(20,21,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 0, ls->tree().getValue(openvdb::Coord(20,22,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 1, ls->tree().getValue(openvdb::Coord(20,23,20))); ASSERT_DOUBLES_EXACTLY_EQUAL( 2, ls->tree().getValue(openvdb::Coord(20,24,20))); {// full but slow test of all voxels openvdb::CoordBBox bbox = pa.getBBox(*ls); bbox.expand(static_cast<int>(halfWidth)+1); openvdb::Index64 count = 0; const float outside = ls->background(), inside = -outside; const openvdb::Coord &min=bbox.min(), &max=bbox.max(); for (openvdb::Coord ijk=min; ijk[0]<max[0]; ++ijk[0]) { for (ijk[1]=min[1]; ijk[1]<max[1]; ++ijk[1]) { for (ijk[2]=min[2]; ijk[2]<max[2]; ++ijk[2]) { const openvdb::Vec3d xyz = ls->indexToWorld(ijk.asVec3d()); double dist = (xyz-pa.pos(0)).length()-pa.radius(0); openvdb::Index32 k =0; for (int i = 1, s = int(pa.size()); i < s; ++i) { double d = (xyz-pa.pos(i)).length()-pa.radius(i); if (d<dist) { k = openvdb::Index32(i); dist = d; } }//loop over particles const float val = ls->tree().getValue(ijk); openvdb::Index32 m = id->tree().getValue(ijk); if (dist >= outside) { EXPECT_NEAR(outside, val, 0.0001); EXPECT_TRUE(ls->tree().isValueOff(ijk)); //EXPECT_EQ(openvdb::util::INVALID_IDX, m); EXPECT_TRUE(id->tree().isValueOff(ijk)); } else if( dist <= inside ) { EXPECT_NEAR(inside, val, 0.0001); EXPECT_TRUE(ls->tree().isValueOff(ijk)); //EXPECT_EQ(openvdb::util::INVALID_IDX, m); EXPECT_TRUE(id->tree().isValueOff(ijk)); } else { EXPECT_NEAR( dist, val, 0.0001); EXPECT_TRUE(ls->tree().isValueOn(ijk)); EXPECT_EQ(k, m); EXPECT_TRUE(id->tree().isValueOn(ijk)); ++count; } } } } //std::cerr << "\nExpected active voxel count = " << count // << ", actual active voxle count = " // << ls->activeVoxelCount() << std::endl; EXPECT_EQ(count, ls->activeVoxelCount()); } } /// This is not really a conventional unit-test since the result of /// the tests are written to a file and need to be visually verified! TEST_F(TestParticlesToLevelSet, testRasterizeTrails) { const float voxelSize = 1.0f, halfWidth = 2.0f; openvdb::FloatGrid::Ptr ls = openvdb::createLevelSet<openvdb::FloatGrid>(voxelSize, halfWidth); MyParticleList pa(1,5); // This particle radius = 1 < 1.5 i.e. it's below the Nyquist frequency and hence ignored pa.add(openvdb::Vec3R( 0, 0, 0), 1, openvdb::Vec3R( 0, 1, 0)); pa.add(openvdb::Vec3R(-10,-10,-10), 2, openvdb::Vec3R( 2, 0, 0)); pa.add(openvdb::Vec3R( 10, 10, 10), 3, openvdb::Vec3R( 0, 1, 0)); pa.add(openvdb::Vec3R( 0, 0, 0), 6, openvdb::Vec3R( 0, 0,-5)); pa.add(openvdb::Vec3R( 20, 0, 0), 2, openvdb::Vec3R( 0, 0, 0)); openvdb::tools::ParticlesToLevelSet<openvdb::FloatGrid> raster(*ls); raster.rasterizeTrails(pa, 0.75);//scale offset between two instances //ls->tree().print(std::cout, 4); //this->writeGrid(ls, "testRasterizeTrails"); } TEST_F(TestParticlesToLevelSet, testRasterizeTrailsAndId) { MyParticleList pa(1,5); // This particle radius = 1 < 1.5 i.e. it's below the Nyquist frequency and hence ignored pa.add(openvdb::Vec3R( 0, 0, 0), 1, openvdb::Vec3R( 0, 1, 0)); pa.add(openvdb::Vec3R(-10,-10,-10), 2, openvdb::Vec3R( 2, 0, 0)); pa.add(openvdb::Vec3R( 10, 10, 10), 3, openvdb::Vec3R( 0, 1, 0)); pa.add(openvdb::Vec3R( 0, 0, 0), 6, openvdb::Vec3R( 0, 0,-5)); typedef openvdb::tools::ParticlesToLevelSet<openvdb::FloatGrid, openvdb::Index> RasterT; const float voxelSize = 1.0f, halfWidth = 2.0f; openvdb::FloatGrid::Ptr ls = openvdb::createLevelSet<openvdb::FloatGrid>(voxelSize, halfWidth); RasterT raster(*ls); raster.rasterizeTrails(pa, 0.75);//scale offset between two instances raster.finalize(); const RasterT::AttGridType::Ptr id = raster.attributeGrid(); EXPECT_TRUE(!ls->empty()); EXPECT_TRUE(!id->empty()); EXPECT_EQ(ls->activeVoxelCount(),id->activeVoxelCount()); int min = std::numeric_limits<int>::max(), max = -min; for (RasterT::AttGridType::ValueOnCIter i=id->cbeginValueOn(); i; ++i) { min = openvdb::math::Min(min, int(*i)); max = openvdb::math::Max(max, int(*i)); } EXPECT_EQ(1, min);//first particle is ignored because of its small rdadius! EXPECT_EQ(3, max); //ls->tree().print(std::cout, 4); //this->writeGrid(ls, "testRasterizeTrails"); } TEST_F(TestParticlesToLevelSet, testMaskOutput) { using namespace openvdb; using SdfGridType = FloatGrid; using MaskGridType = MaskGrid; MyParticleList pa; const Vec3R vel(10, 5, 1); pa.add(Vec3R(84.7252, 85.7946, 84.4266), 11.8569, vel); pa.add(Vec3R(47.9977, 81.2169, 47.7665), 5.45313, vel); pa.add(Vec3R(87.0087, 14.0351, 95.7155), 7.36483, vel); pa.add(Vec3R(75.8616, 53.7373, 58.202), 14.4127, vel); pa.add(Vec3R(14.9675, 32.4141, 13.5218), 4.33101, vel); pa.add(Vec3R(96.9809, 9.92804, 90.2349), 12.2613, vel); pa.add(Vec3R(63.4274, 3.84254, 32.5047), 12.1566, vel); pa.add(Vec3R(62.351, 47.4698, 41.4369), 11.637, vel); pa.add(Vec3R(62.2846, 1.35716, 66.2527), 18.9914, vel); pa.add(Vec3R(44.1711, 1.99877, 45.1159), 1.11429, vel); { // Test variable-radius particles. // Rasterize into an SDF. auto sdf = createLevelSet<SdfGridType>(); tools::particlesToSdf(pa, *sdf); // Rasterize into a boolean mask. auto mask = MaskGridType::create(); tools::particlesToMask(pa, *mask); // Verify that the rasterized mask matches the interior of the SDF. mask->tree().voxelizeActiveTiles(); auto interior = tools::sdfInteriorMask(*sdf); EXPECT_TRUE(interior); interior->tree().voxelizeActiveTiles(); EXPECT_EQ(interior->activeVoxelCount(), mask->activeVoxelCount()); interior->topologyDifference(*mask); EXPECT_EQ(0, int(interior->activeVoxelCount())); } { // Test fixed-radius particles. auto sdf = createLevelSet<SdfGridType>(); tools::particlesToSdf(pa, *sdf, /*radius=*/10.0); auto mask = MaskGridType::create(); tools::particlesToMask(pa, *mask, /*radius=*/10.0); mask->tree().voxelizeActiveTiles(); auto interior = tools::sdfInteriorMask(*sdf); EXPECT_TRUE(interior); interior->tree().voxelizeActiveTiles(); EXPECT_EQ(interior->activeVoxelCount(), mask->activeVoxelCount()); interior->topologyDifference(*mask); EXPECT_EQ(0, int(interior->activeVoxelCount())); } { // Test particle trails. auto sdf = createLevelSet<SdfGridType>(); tools::particleTrailsToSdf(pa, *sdf); auto mask = MaskGridType::create(); tools::particleTrailsToMask(pa, *mask); mask->tree().voxelizeActiveTiles(); auto interior = tools::sdfInteriorMask(*sdf); EXPECT_TRUE(interior); interior->tree().voxelizeActiveTiles(); EXPECT_EQ(interior->activeVoxelCount(), mask->activeVoxelCount()); interior->topologyDifference(*mask); EXPECT_EQ(0, int(interior->activeVoxelCount())); } { // Test attribute transfer. auto sdf = createLevelSet<SdfGridType>(); tools::ParticlesToLevelSet<SdfGridType, Index32> p2sdf(*sdf); p2sdf.rasterizeSpheres(pa); p2sdf.finalize(/*prune=*/true); const auto sdfAttr = p2sdf.attributeGrid(); EXPECT_TRUE(sdfAttr); auto mask = MaskGridType::create(); tools::ParticlesToLevelSet<MaskGridType, Index32> p2mask(*mask); p2mask.rasterizeSpheres(pa); p2mask.finalize(/*prune=*/true); const auto maskAttr = p2mask.attributeGrid(); EXPECT_TRUE(maskAttr); mask->tree().voxelizeActiveTiles(); auto interior = tools::sdfInteriorMask(*sdf); EXPECT_TRUE(interior); interior->tree().voxelizeActiveTiles(); EXPECT_EQ(interior->activeVoxelCount(), mask->activeVoxelCount()); interior->topologyDifference(*mask); EXPECT_EQ(0, int(interior->activeVoxelCount())); // Verify that the mask- and SDF-generated attribute grids match. auto sdfAcc = sdfAttr->getConstAccessor(); auto maskAcc = maskAttr->getConstAccessor(); for (auto it = interior->cbeginValueOn(); it; ++it) { const auto& c = it.getCoord(); EXPECT_EQ(sdfAcc.getValue(c), maskAcc.getValue(c)); } } }
23,668
C++
41.723827
102
0.602375
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointCount.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/PointCount.h /// /// @author Dan Bailey /// /// @brief Methods for counting points in VDB Point grids. #ifndef OPENVDB_POINTS_POINT_COUNT_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_COUNT_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include "PointDataGrid.h" #include "PointMask.h" #include "IndexFilter.h" #include <tbb/parallel_reduce.h> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { /// @brief Count the total number of points in a PointDataTree /// @param tree the PointDataTree in which to count the points /// @param filter an optional index filter /// @param inCoreOnly if true, points in out-of-core leaf nodes are not counted /// @param threaded enable or disable threading (threading is enabled by default) template <typename PointDataTreeT, typename FilterT = NullFilter> inline Index64 pointCount( const PointDataTreeT& tree, const FilterT& filter = NullFilter(), const bool inCoreOnly = false, const bool threaded = true); /// @brief Populate an array of cumulative point offsets per leaf node. /// @param pointOffsets array of offsets to be populated /// @param tree the PointDataTree from which to populate the offsets /// @param filter an optional index filter /// @param inCoreOnly if true, points in out-of-core leaf nodes are ignored /// @param threaded enable or disable threading (threading is enabled by default) /// @return The final cumulative point offset. template <typename PointDataTreeT, typename FilterT = NullFilter> inline Index64 pointOffsets(std::vector<Index64>& pointOffsets, const PointDataTreeT& tree, const FilterT& filter = NullFilter(), const bool inCoreOnly = false, const bool threaded = true); /// @brief Generate a new grid with voxel values to store the number of points per voxel /// @param grid the PointDataGrid to use to compute the count grid /// @param filter an optional index filter /// @note The return type of the grid must be an integer or floating-point scalar grid. template <typename PointDataGridT, typename GridT = typename PointDataGridT::template ValueConverter<Int32>::Type, typename FilterT = NullFilter> inline typename GridT::Ptr pointCountGrid( const PointDataGridT& grid, const FilterT& filter = NullFilter()); /// @brief Generate a new grid that uses the supplied transform with voxel values to store the /// number of points per voxel. /// @param grid the PointDataGrid to use to compute the count grid /// @param transform the transform to use to compute the count grid /// @param filter an optional index filter /// @note The return type of the grid must be an integer or floating-point scalar grid. template <typename PointDataGridT, typename GridT = typename PointDataGridT::template ValueConverter<Int32>::Type, typename FilterT = NullFilter> inline typename GridT::Ptr pointCountGrid( const PointDataGridT& grid, const openvdb::math::Transform& transform, const FilterT& filter = NullFilter()); //////////////////////////////////////// template <typename PointDataTreeT, typename FilterT> Index64 pointCount(const PointDataTreeT& tree, const FilterT& filter, const bool inCoreOnly, const bool threaded) { using LeafManagerT = tree::LeafManager<const PointDataTreeT>; using LeafRangeT = typename LeafManagerT::LeafRange; auto countLambda = [&filter, &inCoreOnly] (const LeafRangeT& range, Index64 sum) -> Index64 { for (const auto& leaf : range) { if (inCoreOnly && leaf.buffer().isOutOfCore()) continue; auto state = filter.state(leaf); if (state == index::ALL) { sum += leaf.pointCount(); } else if (state != index::NONE) { sum += iterCount(leaf.beginIndexAll(filter)); } } return sum; }; LeafManagerT leafManager(tree); if (threaded) { return tbb::parallel_reduce(leafManager.leafRange(), Index64(0), countLambda, [] (Index64 n, Index64 m) -> Index64 { return n + m; }); } else { return countLambda(leafManager.leafRange(), Index64(0)); } } template <typename PointDataTreeT, typename FilterT> Index64 pointOffsets( std::vector<Index64>& pointOffsets, const PointDataTreeT& tree, const FilterT& filter, const bool inCoreOnly, const bool threaded) { using LeafT = typename PointDataTreeT::LeafNodeType; using LeafManagerT = typename tree::LeafManager<const PointDataTreeT>; // allocate and zero values in point offsets array pointOffsets.assign(tree.leafCount(), Index64(0)); // compute total points per-leaf LeafManagerT leafManager(tree); leafManager.foreach( [&pointOffsets, &filter, &inCoreOnly](const LeafT& leaf, size_t pos) { if (inCoreOnly && leaf.buffer().isOutOfCore()) return; auto state = filter.state(leaf); if (state == index::ALL) { pointOffsets[pos] = leaf.pointCount(); } else if (state != index::NONE) { pointOffsets[pos] = iterCount(leaf.beginIndexAll(filter)); } }, threaded); // turn per-leaf totals into cumulative leaf totals Index64 pointOffset(pointOffsets[0]); for (size_t n = 1; n < pointOffsets.size(); n++) { pointOffset += pointOffsets[n]; pointOffsets[n] = pointOffset; } return pointOffset; } template <typename PointDataGridT, typename GridT, typename FilterT> typename GridT::Ptr pointCountGrid( const PointDataGridT& points, const FilterT& filter) { static_assert( std::is_integral<typename GridT::ValueType>::value || std::is_floating_point<typename GridT::ValueType>::value, "openvdb::points::pointCountGrid must return an integer or floating-point scalar grid"); // This is safe because the PointDataGrid can only be modified by the deformer using AdapterT = TreeAdapter<typename PointDataGridT::TreeType>; auto& nonConstPoints = const_cast<typename AdapterT::NonConstGridType&>(points); return point_mask_internal::convertPointsToScalar<GridT>( nonConstPoints, filter); } template <typename PointDataGridT, typename GridT, typename FilterT> typename GridT::Ptr pointCountGrid( const PointDataGridT& points, const openvdb::math::Transform& transform, const FilterT& filter) { static_assert( std::is_integral<typename GridT::ValueType>::value || std::is_floating_point<typename GridT::ValueType>::value, "openvdb::points::pointCountGrid must return an integer or floating-point scalar grid"); // This is safe because the PointDataGrid can only be modified by the deformer using AdapterT = TreeAdapter<typename PointDataGridT::TreeType>; auto& nonConstPoints = const_cast<typename AdapterT::NonConstGridType&>(points); NullDeformer deformer; return point_mask_internal::convertPointsToScalar<GridT>( nonConstPoints, transform, filter, deformer); } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_COUNT_HAS_BEEN_INCLUDED
7,879
C
36.884615
96
0.650209
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointMask.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/PointMask.h /// /// @author Dan Bailey /// /// @brief Methods for extracting masks from VDB Point grids. #ifndef OPENVDB_POINTS_POINT_MASK_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_MASK_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/tools/ValueTransformer.h> // valxform::SumOp #include "PointDataGrid.h" #include "IndexFilter.h" #include <tbb/combinable.h> #include <type_traits> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { /// @brief Extract a Mask Grid from a Point Data Grid /// @param grid the PointDataGrid to extract the mask from. /// @param filter an optional index filter /// @param threaded enable or disable threading (threading is enabled by default) /// @note this method is only available for Bool Grids and Mask Grids template <typename PointDataGridT, typename MaskT = typename PointDataGridT::template ValueConverter<bool>::Type, typename FilterT = NullFilter> inline typename std::enable_if<std::is_same<typename MaskT::ValueType, bool>::value, typename MaskT::Ptr>::type convertPointsToMask(const PointDataGridT& grid, const FilterT& filter = NullFilter(), bool threaded = true); /// @brief Extract a Mask Grid from a Point Data Grid using a new transform /// @param grid the PointDataGrid to extract the mask from. /// @param transform target transform for the mask. /// @param filter an optional index filter /// @param threaded enable or disable threading (threading is enabled by default) /// @note this method is only available for Bool Grids and Mask Grids template <typename PointDataGridT, typename MaskT = typename PointDataGridT::template ValueConverter<bool>::Type, typename FilterT = NullFilter> inline typename std::enable_if<std::is_same<typename MaskT::ValueType, bool>::value, typename MaskT::Ptr>::type convertPointsToMask(const PointDataGridT& grid, const openvdb::math::Transform& transform, const FilterT& filter = NullFilter(), bool threaded = true); /// @brief No-op deformer (adheres to the deformer interface documented in PointMove.h) struct NullDeformer { template <typename LeafT> void reset(LeafT&, size_t /*idx*/ = 0) { } template <typename IterT> void apply(Vec3d&, IterT&) const { } }; /// @brief Deformer Traits for optionally configuring deformers to be applied /// in index-space. The default is world-space. template <typename DeformerT> struct DeformerTraits { static const bool IndexSpace = false; }; //////////////////////////////////////// namespace point_mask_internal { template <typename LeafT> void voxelSum(LeafT& leaf, const Index offset, const typename LeafT::ValueType& value) { leaf.modifyValue(offset, tools::valxform::SumOp<typename LeafT::ValueType>(value)); } // overload PointDataLeaf access to use setOffsetOn(), as modifyValue() // is intentionally disabled to avoid accidental usage template <typename T, Index Log2Dim> void voxelSum(PointDataLeafNode<T, Log2Dim>& leaf, const Index offset, const typename PointDataLeafNode<T, Log2Dim>::ValueType& value) { leaf.setOffsetOn(offset, leaf.getValue(offset) + value); } /// @brief Combines multiple grids into one by stealing leaf nodes and summing voxel values /// This class is designed to work with thread local storage containers such as tbb::combinable template<typename GridT> struct GridCombinerOp { using CombinableT = typename tbb::combinable<GridT>; using TreeT = typename GridT::TreeType; using LeafT = typename TreeT::LeafNodeType; using ValueType = typename TreeT::ValueType; using SumOp = tools::valxform::SumOp<typename TreeT::ValueType>; GridCombinerOp(GridT& grid) : mTree(grid.tree()) {} void operator()(const GridT& grid) { for (auto leaf = grid.tree().beginLeaf(); leaf; ++leaf) { auto* newLeaf = mTree.probeLeaf(leaf->origin()); if (!newLeaf) { // if the leaf doesn't yet exist in the new tree, steal it auto& tree = const_cast<GridT&>(grid).tree(); mTree.addLeaf(tree.template stealNode<LeafT>(leaf->origin(), zeroVal<ValueType>(), false)); } else { // otherwise increment existing values for (auto iter = leaf->cbeginValueOn(); iter; ++iter) { voxelSum(*newLeaf, iter.offset(), ValueType(*iter)); } } } } private: TreeT& mTree; }; // struct GridCombinerOp /// @brief Compute scalar grid from PointDataGrid while evaluating the point filter template <typename GridT, typename PointDataGridT, typename FilterT> struct PointsToScalarOp { using LeafT = typename GridT::TreeType::LeafNodeType; using ValueT = typename LeafT::ValueType; PointsToScalarOp( const PointDataGridT& grid, const FilterT& filter) : mPointDataAccessor(grid.getConstAccessor()) , mFilter(filter) { } void operator()(LeafT& leaf, size_t /*idx*/) const { const auto* const pointLeaf = mPointDataAccessor.probeConstLeaf(leaf.origin()); // assumes matching topology assert(pointLeaf); for (auto value = leaf.beginValueOn(); value; ++value) { const Index64 count = points::iterCount( pointLeaf->beginIndexVoxel(value.getCoord(), mFilter)); if (count > Index64(0)) { value.setValue(ValueT(count)); } else { // disable any empty voxels value.setValueOn(false); } } } private: const typename PointDataGridT::ConstAccessor mPointDataAccessor; const FilterT& mFilter; }; // struct PointsToScalarOp /// @brief Compute scalar grid from PointDataGrid using a different transform /// and while evaluating the point filter template <typename GridT, typename PointDataGridT, typename FilterT, typename DeformerT> struct PointsToTransformedScalarOp { using PointDataLeafT = typename PointDataGridT::TreeType::LeafNodeType; using ValueT = typename GridT::TreeType::ValueType; using HandleT = AttributeHandle<Vec3f>; using CombinableT = typename GridCombinerOp<GridT>::CombinableT; PointsToTransformedScalarOp(const math::Transform& targetTransform, const math::Transform& sourceTransform, const FilterT& filter, const DeformerT& deformer, CombinableT& combinable) : mTargetTransform(targetTransform) , mSourceTransform(sourceTransform) , mFilter(filter) , mDeformer(deformer) , mCombinable(combinable) { } void operator()(const PointDataLeafT& leaf, size_t idx) const { DeformerT deformer(mDeformer); auto& grid = mCombinable.local(); auto& countTree = grid.tree(); tree::ValueAccessor<typename GridT::TreeType> accessor(countTree); deformer.reset(leaf, idx); auto handle = HandleT::create(leaf.constAttributeArray("P")); for (auto iter = leaf.beginIndexOn(mFilter); iter; iter++) { // extract index-space position Vec3d position = handle->get(*iter) + iter.getCoord().asVec3d(); // if deformer is designed to be used in index-space, perform deformation prior // to transforming position to world-space, otherwise perform deformation afterwards if (DeformerTraits<DeformerT>::IndexSpace) { deformer.template apply<decltype(iter)>(position, iter); position = mSourceTransform.indexToWorld(position); } else { position = mSourceTransform.indexToWorld(position); deformer.template apply<decltype(iter)>(position, iter); } // determine coord of target grid const Coord ijk = mTargetTransform.worldToIndexCellCentered(position); // increment count in target voxel auto* newLeaf = accessor.touchLeaf(ijk); assert(newLeaf); voxelSum(*newLeaf, newLeaf->coordToOffset(ijk), ValueT(1)); } } private: const openvdb::math::Transform& mTargetTransform; const openvdb::math::Transform& mSourceTransform; const FilterT& mFilter; const DeformerT& mDeformer; CombinableT& mCombinable; }; // struct PointsToTransformedScalarOp template<typename GridT, typename PointDataGridT, typename FilterT> inline typename GridT::Ptr convertPointsToScalar( const PointDataGridT& points, const FilterT& filter, bool threaded = true) { using point_mask_internal::PointsToScalarOp; using GridTreeT = typename GridT::TreeType; using ValueT = typename GridTreeT::ValueType; // copy the topology from the points grid typename GridTreeT::Ptr tree(new GridTreeT(points.constTree(), false, openvdb::TopologyCopy())); typename GridT::Ptr grid = GridT::create(tree); grid->setTransform(points.transform().copy()); // early exit if no leaves if (points.constTree().leafCount() == 0) return grid; // early exit if mask and no group logic if (std::is_same<ValueT, bool>::value && filter.state() == index::ALL) return grid; // evaluate point group filters to produce a subset of the generated mask tree::LeafManager<GridTreeT> leafManager(*tree); if (filter.state() == index::ALL) { NullFilter nullFilter; PointsToScalarOp<GridT, PointDataGridT, NullFilter> pointsToScalarOp( points, nullFilter); leafManager.foreach(pointsToScalarOp, threaded); } else { // build mask from points in parallel only where filter evaluates to true PointsToScalarOp<GridT, PointDataGridT, FilterT> pointsToScalarOp( points, filter); leafManager.foreach(pointsToScalarOp, threaded); } return grid; } template<typename GridT, typename PointDataGridT, typename FilterT, typename DeformerT> inline typename GridT::Ptr convertPointsToScalar( PointDataGridT& points, const openvdb::math::Transform& transform, const FilterT& filter, const DeformerT& deformer, bool threaded = true) { using point_mask_internal::PointsToTransformedScalarOp; using point_mask_internal::GridCombinerOp; using CombinerOpT = GridCombinerOp<GridT>; using CombinableT = typename GridCombinerOp<GridT>::CombinableT; // use the simpler method if the requested transform matches the existing one const openvdb::math::Transform& pointsTransform = points.constTransform(); if (transform == pointsTransform && std::is_same<NullDeformer, DeformerT>()) { return convertPointsToScalar<GridT>(points, filter, threaded); } typename GridT::Ptr grid = GridT::create(); grid->setTransform(transform.copy()); // early exit if no leaves if (points.constTree().leafCount() == 0) return grid; // compute mask grids in parallel using new transform CombinableT combiner; tree::LeafManager<typename PointDataGridT::TreeType> leafManager(points.tree()); if (filter.state() == index::ALL) { NullFilter nullFilter; PointsToTransformedScalarOp<GridT, PointDataGridT, NullFilter, DeformerT> pointsToScalarOp( transform, pointsTransform, nullFilter, deformer, combiner); leafManager.foreach(pointsToScalarOp, threaded); } else { PointsToTransformedScalarOp<GridT, PointDataGridT, FilterT, DeformerT> pointsToScalarOp( transform, pointsTransform, filter, deformer, combiner); leafManager.foreach(pointsToScalarOp, threaded); } // combine the mask grids into one CombinerOpT combineOp(*grid); combiner.combine_each(combineOp); return grid; } } // namespace point_mask_internal //////////////////////////////////////// template<typename PointDataGridT, typename MaskT, typename FilterT> inline typename std::enable_if<std::is_same<typename MaskT::ValueType, bool>::value, typename MaskT::Ptr>::type convertPointsToMask( const PointDataGridT& points, const FilterT& filter, bool threaded) { return point_mask_internal::convertPointsToScalar<MaskT>( points, filter, threaded); } template<typename PointDataGridT, typename MaskT, typename FilterT> inline typename std::enable_if<std::is_same<typename MaskT::ValueType, bool>::value, typename MaskT::Ptr>::type convertPointsToMask( const PointDataGridT& points, const openvdb::math::Transform& transform, const FilterT& filter, bool threaded) { // This is safe because the PointDataGrid can only be modified by the deformer using AdapterT = TreeAdapter<typename PointDataGridT::TreeType>; auto& nonConstPoints = const_cast<typename AdapterT::NonConstGridType&>(points); NullDeformer deformer; return point_mask_internal::convertPointsToScalar<MaskT>( nonConstPoints, transform, filter, deformer, threaded); } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_MASK_HAS_BEEN_INCLUDED
13,566
C
32.9175
99
0.671384
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/AttributeArray.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/AttributeArray.h /// /// @authors Dan Bailey, Mihai Alden, Nick Avramoussis, James Bird, Khang Ngo /// /// @brief Attribute Array storage templated on type and compression codec. #ifndef OPENVDB_POINTS_ATTRIBUTE_ARRAY_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_ATTRIBUTE_ARRAY_HAS_BEEN_INCLUDED #include <openvdb/Types.h> #include <openvdb/math/QuantizedUnitVec.h> #include <openvdb/util/Name.h> #include <openvdb/util/logging.h> #include <openvdb/io/io.h> // MappedFile #include <openvdb/io/Compression.h> // COMPRESS_BLOSC #include "IndexIterator.h" #include "StreamCompression.h" #include <tbb/spin_mutex.h> #include <tbb/atomic.h> #include <memory> #include <mutex> #include <string> #include <type_traits> class TestAttributeArray; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { using NamePair = std::pair<Name, Name>; namespace points { //////////////////////////////////////// // Utility methods template <typename IntegerT, typename FloatT> inline IntegerT floatingPointToFixedPoint(const FloatT s) { static_assert(std::is_unsigned<IntegerT>::value, "IntegerT must be unsigned"); if (FloatT(0.0) > s) return std::numeric_limits<IntegerT>::min(); else if (FloatT(1.0) <= s) return std::numeric_limits<IntegerT>::max(); return IntegerT(s * FloatT(std::numeric_limits<IntegerT>::max())); } template <typename FloatT, typename IntegerT> inline FloatT fixedPointToFloatingPoint(const IntegerT s) { static_assert(std::is_unsigned<IntegerT>::value, "IntegerT must be unsigned"); return FloatT(s) / FloatT((std::numeric_limits<IntegerT>::max())); } template <typename IntegerVectorT, typename FloatT> inline IntegerVectorT floatingPointToFixedPoint(const math::Vec3<FloatT>& v) { return IntegerVectorT( floatingPointToFixedPoint<typename IntegerVectorT::ValueType>(v.x()), floatingPointToFixedPoint<typename IntegerVectorT::ValueType>(v.y()), floatingPointToFixedPoint<typename IntegerVectorT::ValueType>(v.z())); } template <typename FloatVectorT, typename IntegerT> inline FloatVectorT fixedPointToFloatingPoint(const math::Vec3<IntegerT>& v) { return FloatVectorT( fixedPointToFloatingPoint<typename FloatVectorT::ValueType>(v.x()), fixedPointToFloatingPoint<typename FloatVectorT::ValueType>(v.y()), fixedPointToFloatingPoint<typename FloatVectorT::ValueType>(v.z())); } //////////////////////////////////////// /// Base class for storing attribute data class OPENVDB_API AttributeArray { protected: struct AccessorBase; template <typename T> struct Accessor; using AccessorBasePtr = std::shared_ptr<AccessorBase>; public: enum Flag { TRANSIENT = 0x1, /// by default not written to disk HIDDEN = 0x2, /// hidden from UIs or iterators CONSTANTSTRIDE = 0x8, /// stride size does not vary in the array STREAMING = 0x10, /// streaming mode collapses attributes when first accessed PARTIALREAD = 0x20 /// data has been partially read (compressed bytes is used) }; enum SerializationFlag { WRITESTRIDED = 0x1, /// data is marked as strided when written WRITEUNIFORM = 0x2, /// data is marked as uniform when written WRITEMEMCOMPRESS = 0x4, /// data is marked as compressed in-memory when written /// (deprecated flag as of ABI=6) WRITEPAGED = 0x8 /// data is written out in pages }; // Scoped Lock wrapper class that locks the AttributeArray registry mutex class OPENVDB_API ScopedRegistryLock { tbb::spin_mutex::scoped_lock lock; public: ScopedRegistryLock(); }; // class ScopedRegistryLock using Ptr = std::shared_ptr<AttributeArray>; using ConstPtr = std::shared_ptr<const AttributeArray>; using FactoryMethod = Ptr (*)(Index, Index, bool, const Metadata*); template <typename ValueType, typename CodecType> friend class AttributeHandle; AttributeArray(): mPageHandle() { mOutOfCore = 0; } virtual ~AttributeArray() { // if this AttributeArray has been partially read, zero the compressed bytes, // so the page handle won't attempt to clean up invalid memory if (mFlags & PARTIALREAD) mCompressedBytes = 0; } #if OPENVDB_ABI_VERSION_NUMBER >= 6 AttributeArray(const AttributeArray& rhs); AttributeArray& operator=(const AttributeArray& rhs); #else AttributeArray(const AttributeArray&) = default; AttributeArray& operator=(const AttributeArray&) = default; #endif AttributeArray(AttributeArray&&) = delete; AttributeArray& operator=(AttributeArray&&) = delete; /// Return a copy of this attribute. virtual AttributeArray::Ptr copy() const = 0; /// Return a copy of this attribute. #ifndef _MSC_VER [[deprecated("In-memory compression no longer supported, use AttributeArray::copy() instead")]] #endif virtual AttributeArray::Ptr copyUncompressed() const = 0; /// Return the number of elements in this array. /// @note This does not count each data element in a strided array virtual Index size() const = 0; /// Return the stride of this array. /// @note a return value of zero means a non-constant stride virtual Index stride() const = 0; /// Return the total number of data elements in this array. /// @note This counts each data element in a strided array virtual Index dataSize() const = 0; #if OPENVDB_ABI_VERSION_NUMBER >= 6 /// Return the name of the value type of a single element in this array (e.g., "float" or "vec3d"). virtual Name valueType() const = 0; /// Return the name of the codec used by this array (e.g., "trnc" or "fxpt"). virtual Name codecType() const = 0; /// Return the size in bytes of the value type of a single element in this array. /// (e.g. "float" -> 4 bytes, "vec3d" -> 24 bytes"). virtual Index valueTypeSize() const = 0; /// Return the size in bytes of the storage type of a single element of this array. /// @note If the Codec is a NullCodec, valueSize() == storageSize() virtual Index storageTypeSize() const = 0; /// Return @c true if the value type is floating point virtual bool valueTypeIsFloatingPoint() const = 0; /// Return @c true if the value type is a class (ie vector, matrix or quaternion return true) virtual bool valueTypeIsClass() const = 0; /// Return @c true if the value type is a vector virtual bool valueTypeIsVector() const = 0; /// Return @c true if the value type is a quaternion virtual bool valueTypeIsQuaternion() const = 0; /// Return @c true if the value type is a matrix virtual bool valueTypeIsMatrix() const = 0; #endif /// Return the number of bytes of memory used by this attribute. virtual size_t memUsage() const = 0; /// Create a new attribute array of the given (registered) type, length and stride. /// @details If @a lock is non-null, the AttributeArray registry mutex /// has already been locked static Ptr create(const NamePair& type, Index length, Index stride = 1, bool constantStride = true, const Metadata* metadata = nullptr, const ScopedRegistryLock* lock = nullptr); /// Return @c true if the given attribute type name is registered. static bool isRegistered(const NamePair& type, const ScopedRegistryLock* lock = nullptr); /// Clear the attribute type registry. static void clearRegistry(const ScopedRegistryLock* lock = nullptr); /// Return the name of this attribute's type. virtual const NamePair& type() const = 0; /// Return @c true if this attribute is of the same type as the template parameter. template<typename AttributeArrayType> bool isType() const { return this->type() == AttributeArrayType::attributeType(); } /// Return @c true if this attribute has a value type the same as the template parameter template<typename ValueType> bool hasValueType() const { return this->type().first == typeNameAsString<ValueType>(); } /// @brief Set value at given index @a n from @a sourceIndex of another @a sourceArray. #if OPENVDB_ABI_VERSION_NUMBER >= 6 // Windows does not allow base classes to be easily deprecated. #ifndef _MSC_VER [[deprecated("From ABI 6 on, use copyValues() with source-target index pairs")]] #endif #endif virtual void set(const Index n, const AttributeArray& sourceArray, const Index sourceIndex) = 0; #if OPENVDB_ABI_VERSION_NUMBER >= 6 /// @brief Copy values into this array from a source array to a target array /// as referenced by an iterator. /// @details Iterators must adhere to the ForwardIterator interface described /// in the example below: /// @code /// struct MyIterator /// { /// // returns true if the iterator is referencing valid copying indices /// operator bool() const; /// // increments the iterator /// MyIterator& operator++(); /// // returns the source index that the iterator is referencing for copying /// Index sourceIndex() const; /// // returns the target index that the iterator is referencing for copying /// Index targetIndex() const; /// }; /// @endcode /// @note It is assumed that the strided storage sizes match, the arrays are both in-core, /// and both value types are floating-point or both integer. /// @note It is possible to use this method to write to a uniform target array /// if the iterator does not have non-zero target indices. /// @note This method is not thread-safe, it must be guaranteed that this array is not /// concurrently modified by another thread and that the source array is also not modified. template<typename IterT> void copyValuesUnsafe(const AttributeArray& sourceArray, const IterT& iter); /// @brief Like copyValuesUnsafe(), but if @a compact is true, attempt to collapse this array. /// @note This method is not thread-safe, it must be guaranteed that this array is not /// concurrently modified by another thread and that the source array is also not modified. template<typename IterT> void copyValues(const AttributeArray& sourceArray, const IterT& iter, bool compact = true); #endif /// Return @c true if this array is stored as a single uniform value. virtual bool isUniform() const = 0; /// @brief If this array is uniform, replace it with an array of length size(). /// @param fill if true, assign the uniform value to each element of the array. virtual void expand(bool fill = true) = 0; /// Replace the existing array with a uniform zero value. virtual void collapse() = 0; /// Compact the existing array to become uniform if all values are identical virtual bool compact() = 0; // Windows does not allow base classes to be deprecated #ifndef _MSC_VER [[deprecated("Previously this compressed the attribute array, now it does nothing")]] #endif virtual bool compress() = 0; // Windows does not allow base classes to be deprecated #ifndef _MSC_VER [[deprecated("Previously this uncompressed the attribute array, now it does nothing")]] #endif virtual bool decompress() = 0; /// @brief Specify whether this attribute should be hidden (e.g., from UI or iterators). /// @details This is useful if the attribute is used for blind data or as scratch space /// for a calculation. /// @note Attributes are not hidden by default. void setHidden(bool state); /// Return @c true if this attribute is hidden (e.g., from UI or iterators). bool isHidden() const { return bool(mFlags & HIDDEN); } /// @brief Specify whether this attribute should only exist in memory /// and not be serialized during stream output. /// @note Attributes are not transient by default. void setTransient(bool state); /// Return @c true if this attribute is not serialized during stream output. bool isTransient() const { return bool(mFlags & TRANSIENT); } /// @brief Specify whether this attribute is to be streamed off disk, in which /// case, the attributes are collapsed after being first loaded leaving them /// in a destroyed state. /// @note This operation is not thread-safe. void setStreaming(bool state); /// Return @c true if this attribute is in streaming mode. bool isStreaming() const { return bool(mFlags & STREAMING); } /// Return @c true if this attribute has a constant stride bool hasConstantStride() const { return bool(mFlags & CONSTANTSTRIDE); } /// @brief Retrieve the attribute array flags uint8_t flags() const { return mFlags; } /// Read attribute metadata and buffers from a stream. virtual void read(std::istream&) = 0; /// Write attribute metadata and buffers to a stream. /// @param outputTransient if true, write out transient attributes virtual void write(std::ostream&, bool outputTransient) const = 0; /// Write attribute metadata and buffers to a stream, don't write transient attributes. virtual void write(std::ostream&) const = 0; /// Read attribute metadata from a stream. virtual void readMetadata(std::istream&) = 0; /// Write attribute metadata to a stream. /// @param outputTransient if true, write out transient attributes /// @param paged if true, data is written out in pages virtual void writeMetadata(std::ostream&, bool outputTransient, bool paged) const = 0; /// Read attribute buffers from a stream. virtual void readBuffers(std::istream&) = 0; /// Write attribute buffers to a stream. /// @param outputTransient if true, write out transient attributes virtual void writeBuffers(std::ostream&, bool outputTransient) const = 0; /// Read attribute buffers from a paged stream. virtual void readPagedBuffers(compression::PagedInputStream&) = 0; /// Write attribute buffers to a paged stream. /// @param outputTransient if true, write out transient attributes virtual void writePagedBuffers(compression::PagedOutputStream&, bool outputTransient) const = 0; /// Ensures all data is in-core virtual void loadData() const = 0; #if OPENVDB_ABI_VERSION_NUMBER >= 6 /// Return @c true if all data has been loaded virtual bool isDataLoaded() const = 0; #endif /// Check the compressed bytes and flags. If they are equal, perform a deeper /// comparison check necessary on the inherited types (TypedAttributeArray) /// Requires non operator implementation due to inheritance bool operator==(const AttributeArray& other) const; bool operator!=(const AttributeArray& other) const { return !this->operator==(other); } private: friend class ::TestAttributeArray; /// Virtual function used by the comparison operator to perform /// comparisons on inherited types virtual bool isEqual(const AttributeArray& other) const = 0; #if OPENVDB_ABI_VERSION_NUMBER >= 6 /// Virtual function to retrieve the data buffer cast to a char byte array virtual char* dataAsByteArray() = 0; virtual const char* dataAsByteArray() const = 0; /// Private implementation for copyValues/copyValuesUnsafe template <typename IterT> void doCopyValues(const AttributeArray& sourceArray, const IterT& iter, bool rangeChecking = true); #endif protected: #if OPENVDB_ABI_VERSION_NUMBER >= 7 AttributeArray(const AttributeArray& rhs, const tbb::spin_mutex::scoped_lock&); #endif /// @brief Specify whether this attribute has a constant stride or not. void setConstantStride(bool state); /// Obtain an Accessor that stores getter and setter functors. virtual AccessorBasePtr getAccessor() const = 0; /// Register a attribute type along with a factory function. static void registerType(const NamePair& type, FactoryMethod, const ScopedRegistryLock* lock = nullptr); /// Remove a attribute type from the registry. static void unregisterType(const NamePair& type, const ScopedRegistryLock* lock = nullptr); #if OPENVDB_ABI_VERSION_NUMBER < 6 size_t mCompressedBytes = 0; uint8_t mFlags = 0; uint8_t mUsePagedRead = 0; tbb::atomic<Index32> mOutOfCore; // interpreted as bool compression::PageHandle::Ptr mPageHandle; #else // #if OPENVDB_ABI_VERSION_NUMBER < 6 bool mIsUniform = true; mutable tbb::spin_mutex mMutex; uint8_t mFlags = 0; uint8_t mUsePagedRead = 0; tbb::atomic<Index32> mOutOfCore; // interpreted as bool /// used for out-of-core, paged reading union { compression::PageHandle::Ptr mPageHandle; size_t mCompressedBytes; // as of ABI=6, this data is packed together to save memory }; #endif }; // class AttributeArray //////////////////////////////////////// /// Accessor base class for AttributeArray storage where type is not available struct AttributeArray::AccessorBase { virtual ~AccessorBase() = default; }; /// Templated Accessor stores typed function pointers used in binding /// AttributeHandles template <typename T> struct AttributeArray::Accessor : public AttributeArray::AccessorBase { using GetterPtr = T (*)(const AttributeArray* array, const Index n); using SetterPtr = void (*)(AttributeArray* array, const Index n, const T& value); using ValuePtr = void (*)(AttributeArray* array, const T& value); Accessor(GetterPtr getter, SetterPtr setter, ValuePtr collapser, ValuePtr filler) : mGetter(getter), mSetter(setter), mCollapser(collapser), mFiller(filler) { } GetterPtr mGetter; SetterPtr mSetter; ValuePtr mCollapser; ValuePtr mFiller; }; // struct AttributeArray::Accessor //////////////////////////////////////// namespace attribute_traits { template <typename T> struct TruncateTrait { }; template <> struct TruncateTrait<float> { using Type = half; }; template <> struct TruncateTrait<int> { using Type = short; }; template <typename T> struct TruncateTrait<math::Vec3<T>> { using Type = math::Vec3<typename TruncateTrait<T>::Type>; }; template <bool OneByte, typename T> struct UIntTypeTrait { }; template<typename T> struct UIntTypeTrait</*OneByte=*/true, T> { using Type = uint8_t; }; template<typename T> struct UIntTypeTrait</*OneByte=*/false, T> { using Type = uint16_t; }; template<typename T> struct UIntTypeTrait</*OneByte=*/true, math::Vec3<T>> { using Type = math::Vec3<uint8_t>; }; template<typename T> struct UIntTypeTrait</*OneByte=*/false, math::Vec3<T>> { using Type = math::Vec3<uint16_t>; }; } //////////////////////////////////////// // Attribute codec schemes struct UnknownCodec { }; struct NullCodec { template <typename T> struct Storage { using Type = T; }; template<typename ValueType> static void decode(const ValueType&, ValueType&); template<typename ValueType> static void encode(const ValueType&, ValueType&); static const char* name() { return "null"; } }; struct TruncateCodec { template <typename T> struct Storage { using Type = typename attribute_traits::TruncateTrait<T>::Type; }; template<typename StorageType, typename ValueType> static void decode(const StorageType&, ValueType&); template<typename StorageType, typename ValueType> static void encode(const ValueType&, StorageType&); static const char* name() { return "trnc"; } }; // Fixed-point codec range for voxel-space positions [-0.5,0.5] struct PositionRange { static const char* name() { return "fxpt"; } template <typename ValueType> static ValueType encode(const ValueType& value) { return value + ValueType(0.5); } template <typename ValueType> static ValueType decode(const ValueType& value) { return value - ValueType(0.5); } }; // Fixed-point codec range for unsigned values in the unit range [0.0,1.0] struct UnitRange { static const char* name() { return "ufxpt"; } template <typename ValueType> static ValueType encode(const ValueType& value) { return value; } template <typename ValueType> static ValueType decode(const ValueType& value) { return value; } }; template <bool OneByte, typename Range=PositionRange> struct FixedPointCodec { template <typename T> struct Storage { using Type = typename attribute_traits::UIntTypeTrait<OneByte, T>::Type; }; template<typename StorageType, typename ValueType> static void decode(const StorageType&, ValueType&); template<typename StorageType, typename ValueType> static void encode(const ValueType&, StorageType&); static const char* name() { static const std::string Name = std::string(Range::name()) + (OneByte ? "8" : "16"); return Name.c_str(); } }; struct UnitVecCodec { using StorageType = uint16_t; template <typename T> struct Storage { using Type = StorageType; }; template<typename T> static void decode(const StorageType&, math::Vec3<T>&); template<typename T> static void encode(const math::Vec3<T>&, StorageType&); static const char* name() { return "uvec"; } }; //////////////////////////////////////// /// Typed class for storing attribute data template<typename ValueType_, typename Codec_ = NullCodec> #if OPENVDB_ABI_VERSION_NUMBER >= 6 // for ABI=6, class is final to allow for de-virtualization class TypedAttributeArray final: public AttributeArray #else class TypedAttributeArray: public AttributeArray #endif { public: using Ptr = std::shared_ptr<TypedAttributeArray>; using ConstPtr = std::shared_ptr<const TypedAttributeArray>; using ValueType = ValueType_; using Codec = Codec_; using StorageType = typename Codec::template Storage<ValueType>::Type; ////////// /// Default constructor, always constructs a uniform attribute. explicit TypedAttributeArray(Index n = 1, Index strideOrTotalSize = 1, bool constantStride = true, const ValueType& uniformValue = zeroVal<ValueType>()); #if OPENVDB_ABI_VERSION_NUMBER >= 7 /// Deep copy constructor. /// @note This method is thread-safe (as of ABI=7) for concurrently reading from the /// source attribute array while being deep-copied. Specifically, this means that the /// attribute array being deep-copied can be out-of-core and safely loaded in one thread /// while being copied using this copy-constructor in another thread. /// It is not thread-safe for write. TypedAttributeArray(const TypedAttributeArray&); /// Deep copy constructor. [[deprecated("Use copy-constructor without unused bool parameter")]] TypedAttributeArray(const TypedAttributeArray&, bool /*unused*/); #else /// Deep copy constructor. /// @note This method is not thread-safe for reading or writing, use /// TypedAttributeArray::copy() to ensure thread-safety when reading concurrently. TypedAttributeArray(const TypedAttributeArray&, bool uncompress = false); #endif /// Deep copy assignment operator. /// @note this operator is thread-safe. TypedAttributeArray& operator=(const TypedAttributeArray&); /// Move constructor disabled. TypedAttributeArray(TypedAttributeArray&&) = delete; /// Move assignment operator disabled. TypedAttributeArray& operator=(TypedAttributeArray&&) = delete; ~TypedAttributeArray() override { this->deallocate(); } /// Return a copy of this attribute. /// @note This method is thread-safe. AttributeArray::Ptr copy() const override; /// Return a copy of this attribute. /// @note This method is thread-safe. [[deprecated("In-memory compression no longer supported, use AttributeArray::copy() instead")]] AttributeArray::Ptr copyUncompressed() const override; /// Return a new attribute array of the given length @a n and @a stride with uniform value zero. static Ptr create(Index n, Index strideOrTotalSize = 1, bool constantStride = true, const Metadata* metadata = nullptr); /// Cast an AttributeArray to TypedAttributeArray<T> static TypedAttributeArray& cast(AttributeArray& attributeArray); /// Cast an AttributeArray to TypedAttributeArray<T> static const TypedAttributeArray& cast(const AttributeArray& attributeArray); /// Return the name of this attribute's type (includes codec) static const NamePair& attributeType(); /// Return the name of this attribute's type. const NamePair& type() const override { return attributeType(); } /// Return @c true if this attribute type is registered. static bool isRegistered(); /// Register this attribute type along with a factory function. static void registerType(); /// Remove this attribute type from the registry. static void unregisterType(); /// Return the number of elements in this array. Index size() const override { return mSize; } /// Return the stride of this array. /// @note A return value of zero means a variable stride Index stride() const override { return hasConstantStride() ? mStrideOrTotalSize : 0; } /// Return the size of the data in this array. Index dataSize() const override { return hasConstantStride() ? mSize * mStrideOrTotalSize : mStrideOrTotalSize; } #if OPENVDB_ABI_VERSION_NUMBER >= 6 /// Return the name of the value type of a single element in this array (e.g., "float" or "vec3d"). Name valueType() const override { return typeNameAsString<ValueType>(); } /// Return the name of the codec used by this array (e.g., "trnc" or "fxpt"). Name codecType() const override { return Codec::name(); } /// Return the size in bytes of the value type of a single element in this array. Index valueTypeSize() const override { return sizeof(ValueType); } /// Return the size in bytes of the storage type of a single element of this array. /// @note If the Codec is a NullCodec, valueSize() == storageSize() Index storageTypeSize() const override { return sizeof(StorageType); } /// Return @c true if the value type is floating point bool valueTypeIsFloatingPoint() const override; /// Return @c true if the value type is a class (ie vector, matrix or quaternion return true) bool valueTypeIsClass() const override; /// Return @c true if the value type is a vector bool valueTypeIsVector() const override; /// Return @c true if the value type is a quaternion bool valueTypeIsQuaternion() const override; /// Return @c true if the value type is a matrix bool valueTypeIsMatrix() const override; #endif /// Return the number of bytes of memory used by this attribute. size_t memUsage() const override; /// Return the value at index @a n (assumes in-core) ValueType getUnsafe(Index n) const; /// Return the value at index @a n ValueType get(Index n) const; /// Return the @a value at index @a n (assumes in-core) template<typename T> void getUnsafe(Index n, T& value) const; /// Return the @a value at index @a n template<typename T> void get(Index n, T& value) const; /// Non-member equivalent to getUnsafe() that static_casts array to this TypedAttributeArray /// (assumes in-core) static ValueType getUnsafe(const AttributeArray* array, const Index n); /// Set @a value at the given index @a n (assumes in-core) void setUnsafe(Index n, const ValueType& value); /// Set @a value at the given index @a n void set(Index n, const ValueType& value); /// Set @a value at the given index @a n (assumes in-core) template<typename T> void setUnsafe(Index n, const T& value); /// Set @a value at the given index @a n template<typename T> void set(Index n, const T& value); /// Non-member equivalent to setUnsafe() that static_casts array to this TypedAttributeArray /// (assumes in-core) static void setUnsafe(AttributeArray* array, const Index n, const ValueType& value); /// Set value at given index @a n from @a sourceIndex of another @a sourceArray #if OPENVDB_ABI_VERSION_NUMBER >= 6 [[deprecated("From ABI 6 on, use copyValues() with source-target index pairs")]] #endif void set(const Index n, const AttributeArray& sourceArray, const Index sourceIndex) override; /// Return @c true if this array is stored as a single uniform value. bool isUniform() const override { return mIsUniform; } /// @brief Replace the single value storage with an array of length size(). /// @note Non-uniform attributes are unchanged. /// @param fill toggle to initialize the array elements with the pre-expanded value. void expand(bool fill = true) override; /// Replace the existing array with a uniform zero value. void collapse() override; /// Compact the existing array to become uniform if all values are identical bool compact() override; /// Replace the existing array with the given uniform value. void collapse(const ValueType& uniformValue); /// @brief Fill the existing array with the given value. /// @note Identical to collapse() except a non-uniform array will not become uniform. void fill(const ValueType& value); /// Non-member equivalent to collapse() that static_casts array to this TypedAttributeArray static void collapse(AttributeArray* array, const ValueType& value); /// Non-member equivalent to fill() that static_casts array to this TypedAttributeArray static void fill(AttributeArray* array, const ValueType& value); /// Compress the attribute array. [[deprecated("Previously this compressed the attribute array, now it does nothing")]] bool compress() override; /// Uncompress the attribute array. [[deprecated("Previously this uncompressed the attribute array, now it does nothing")]] bool decompress() override; /// Read attribute data from a stream. void read(std::istream&) override; /// Write attribute data to a stream. /// @param os the output stream /// @param outputTransient if true, write out transient attributes void write(std::ostream& os, bool outputTransient) const override; /// Write attribute data to a stream, don't write transient attributes. void write(std::ostream&) const override; /// Read attribute metadata from a stream. void readMetadata(std::istream&) override; /// Write attribute metadata to a stream. /// @param os the output stream /// @param outputTransient if true, write out transient attributes /// @param paged if true, data is written out in pages void writeMetadata(std::ostream& os, bool outputTransient, bool paged) const override; /// Read attribute buffers from a stream. void readBuffers(std::istream&) override; /// Write attribute buffers to a stream. /// @param os the output stream /// @param outputTransient if true, write out transient attributes void writeBuffers(std::ostream& os, bool outputTransient) const override; /// Read attribute buffers from a paged stream. void readPagedBuffers(compression::PagedInputStream&) override; /// Write attribute buffers to a paged stream. /// @param os the output stream /// @param outputTransient if true, write out transient attributes void writePagedBuffers(compression::PagedOutputStream& os, bool outputTransient) const override; /// Return @c true if this buffer's values have not yet been read from disk. inline bool isOutOfCore() const; /// Ensures all data is in-core void loadData() const override; #if OPENVDB_ABI_VERSION_NUMBER >= 6 /// Return @c true if all data has been loaded bool isDataLoaded() const override; #endif protected: AccessorBasePtr getAccessor() const override; /// Return the raw data buffer inline StorageType* data() { assert(validData()); return mData.get(); } inline const StorageType* data() const { assert(validData()); return mData.get(); } /// Verify that data is not out-of-core or in a partially-read state inline bool validData() const { return !(isOutOfCore() || (flags() & PARTIALREAD)); } private: friend class ::TestAttributeArray; #if OPENVDB_ABI_VERSION_NUMBER >= 7 TypedAttributeArray(const TypedAttributeArray&, const tbb::spin_mutex::scoped_lock&); #endif /// Load data from memory-mapped file. inline void doLoad() const; /// Load data from memory-mapped file (unsafe as this function is not protected by a mutex). /// @param compression parameter no longer used inline void doLoadUnsafe(const bool compression = true) const; /// Compress in-core data assuming mutex is locked inline bool compressUnsafe(); /// Toggle out-of-core state inline void setOutOfCore(const bool); /// Compare the this data to another attribute array. Used by the base class comparison operator bool isEqual(const AttributeArray& other) const override; #if OPENVDB_ABI_VERSION_NUMBER >= 6 /// Virtual function to retrieve the data buffer from the derived class cast to a char byte array char* dataAsByteArray() override; const char* dataAsByteArray() const override; #endif size_t arrayMemUsage() const; void allocate(); void deallocate(); /// Helper function for use with registerType() static AttributeArray::Ptr factory(Index n, Index strideOrTotalSize, bool constantStride, const Metadata* metadata) { return TypedAttributeArray::create(n, strideOrTotalSize, constantStride, metadata); } static std::unique_ptr<const NamePair> sTypeName; std::unique_ptr<StorageType[]> mData; Index mSize; Index mStrideOrTotalSize; #if OPENVDB_ABI_VERSION_NUMBER < 6 // as of ABI=6, this data lives in the base class to reduce memory bool mIsUniform = true; mutable tbb::spin_mutex mMutex; #endif }; // class TypedAttributeArray //////////////////////////////////////// /// AttributeHandles provide access to specific TypedAttributeArray methods without needing /// to know the compression codec, however these methods also incur the cost of a function pointer template <typename ValueType, typename CodecType = UnknownCodec> class AttributeHandle { public: using Handle = AttributeHandle<ValueType, CodecType>; using Ptr = std::shared_ptr<Handle>; using UniquePtr = std::unique_ptr<Handle>; protected: using GetterPtr = ValueType (*)(const AttributeArray* array, const Index n); using SetterPtr = void (*)(AttributeArray* array, const Index n, const ValueType& value); using ValuePtr = void (*)(AttributeArray* array, const ValueType& value); public: static Ptr create(const AttributeArray& array, const bool collapseOnDestruction = true); AttributeHandle(const AttributeArray& array, const bool collapseOnDestruction = true); AttributeHandle(const AttributeHandle&) = default; AttributeHandle& operator=(const AttributeHandle&) = default; virtual ~AttributeHandle(); Index stride() const { return mStrideOrTotalSize; } Index size() const { return mSize; } bool isUniform() const; bool hasConstantStride() const; ValueType get(Index n, Index m = 0) const; const AttributeArray& array() const; protected: Index index(Index n, Index m) const; const AttributeArray* mArray; GetterPtr mGetter; SetterPtr mSetter; ValuePtr mCollapser; ValuePtr mFiller; private: friend class ::TestAttributeArray; template <bool IsUnknownCodec> typename std::enable_if<IsUnknownCodec, bool>::type compatibleType() const; template <bool IsUnknownCodec> typename std::enable_if<!IsUnknownCodec, bool>::type compatibleType() const; template <bool IsUnknownCodec> typename std::enable_if<IsUnknownCodec, ValueType>::type get(Index index) const; template <bool IsUnknownCodec> typename std::enable_if<!IsUnknownCodec, ValueType>::type get(Index index) const; // local copy of AttributeArray (to preserve compression) AttributeArray::Ptr mLocalArray; Index mStrideOrTotalSize; Index mSize; bool mCollapseOnDestruction; }; // class AttributeHandle //////////////////////////////////////// /// Write-able version of AttributeHandle template <typename ValueType, typename CodecType = UnknownCodec> class AttributeWriteHandle : public AttributeHandle<ValueType, CodecType> { public: using Handle = AttributeWriteHandle<ValueType, CodecType>; using Ptr = std::shared_ptr<Handle>; using ScopedPtr = std::unique_ptr<Handle>; static Ptr create(AttributeArray& array, const bool expand = true); AttributeWriteHandle(AttributeArray& array, const bool expand = true); virtual ~AttributeWriteHandle() = default; /// @brief If this array is uniform, replace it with an array of length size(). /// @param fill if true, assign the uniform value to each element of the array. void expand(bool fill = true); /// Replace the existing array with a uniform value (zero if none provided). void collapse(); void collapse(const ValueType& uniformValue); /// Compact the existing array to become uniform if all values are identical bool compact(); /// @brief Fill the existing array with the given value. /// @note Identical to collapse() except a non-uniform array will not become uniform. void fill(const ValueType& value); void set(Index n, const ValueType& value); void set(Index n, Index m, const ValueType& value); AttributeArray& array(); private: friend class ::TestAttributeArray; template <bool IsUnknownCodec> typename std::enable_if<IsUnknownCodec, void>::type set(Index index, const ValueType& value) const; template <bool IsUnknownCodec> typename std::enable_if<!IsUnknownCodec, void>::type set(Index index, const ValueType& value) const; }; // class AttributeWriteHandle //////////////////////////////////////// // Attribute codec implementation template<typename ValueType> inline void NullCodec::decode(const ValueType& data, ValueType& val) { val = data; } template<typename ValueType> inline void NullCodec::encode(const ValueType& val, ValueType& data) { data = val; } template<typename StorageType, typename ValueType> inline void TruncateCodec::decode(const StorageType& data, ValueType& val) { val = static_cast<ValueType>(data); } template<typename StorageType, typename ValueType> inline void TruncateCodec::encode(const ValueType& val, StorageType& data) { data = static_cast<StorageType>(val); } template <bool OneByte, typename Range> template<typename StorageType, typename ValueType> inline void FixedPointCodec<OneByte, Range>::decode(const StorageType& data, ValueType& val) { val = fixedPointToFloatingPoint<ValueType>(data); // shift value range to be -0.5 => 0.5 (as this is most commonly used for position) val = Range::template decode<ValueType>(val); } template <bool OneByte, typename Range> template<typename StorageType, typename ValueType> inline void FixedPointCodec<OneByte, Range>::encode(const ValueType& val, StorageType& data) { // shift value range to be -0.5 => 0.5 (as this is most commonly used for position) const ValueType newVal = Range::template encode<ValueType>(val); data = floatingPointToFixedPoint<StorageType>(newVal); } template<typename T> inline void UnitVecCodec::decode(const StorageType& data, math::Vec3<T>& val) { val = math::QuantizedUnitVec::unpack(data); } template<typename T> inline void UnitVecCodec::encode(const math::Vec3<T>& val, StorageType& data) { data = math::QuantizedUnitVec::pack(val); } //////////////////////////////////////// // AttributeArray implementation #if OPENVDB_ABI_VERSION_NUMBER >= 6 template <typename IterT> void AttributeArray::doCopyValues(const AttributeArray& sourceArray, const IterT& iter, bool rangeChecking/*=true*/) { // ensure both arrays have float-float or integer-integer value types assert(sourceArray.valueTypeIsFloatingPoint() == this->valueTypeIsFloatingPoint()); // ensure both arrays have been loaded from disk (if delay-loaded) assert(sourceArray.isDataLoaded() && this->isDataLoaded()); // ensure storage size * stride matches on both arrays assert(this->storageTypeSize()*this->stride() == sourceArray.storageTypeSize()*sourceArray.stride()); const size_t bytes(sourceArray.storageTypeSize()*sourceArray.stride()); const char* const sourceBuffer = sourceArray.dataAsByteArray(); char* const targetBuffer = this->dataAsByteArray(); assert(sourceBuffer && targetBuffer); if (rangeChecking && this->isUniform()) { OPENVDB_THROW(IndexError, "Cannot copy array data as target array is uniform."); } const bool sourceIsUniform = sourceArray.isUniform(); const Index sourceDataSize = rangeChecking ? sourceArray.dataSize() : 0; const Index targetDataSize = rangeChecking ? this->dataSize() : 0; for (IterT it(iter); it; ++it) { const Index sourceIndex = sourceIsUniform ? 0 : it.sourceIndex(); const Index targetIndex = it.targetIndex(); if (rangeChecking) { if (sourceIndex >= sourceDataSize) { OPENVDB_THROW(IndexError, "Cannot copy array data as source index exceeds size of source array."); } if (targetIndex >= targetDataSize) { OPENVDB_THROW(IndexError, "Cannot copy array data as target index exceeds size of target array."); } } else { // range-checking asserts assert(sourceIndex < sourceArray.dataSize()); assert(targetIndex < this->dataSize()); if (this->isUniform()) assert(targetIndex == Index(0)); } const size_t targetOffset(targetIndex * bytes); const size_t sourceOffset(sourceIndex * bytes); std::memcpy(targetBuffer + targetOffset, sourceBuffer + sourceOffset, bytes); } } template <typename IterT> void AttributeArray::copyValuesUnsafe(const AttributeArray& sourceArray, const IterT& iter) { this->doCopyValues(sourceArray, iter, /*range-checking=*/false); } template <typename IterT> void AttributeArray::copyValues(const AttributeArray& sourceArray, const IterT& iter, bool compact/* = true*/) { const Index bytes = sourceArray.storageTypeSize(); if (bytes != this->storageTypeSize()) { OPENVDB_THROW(TypeError, "Cannot copy array data due to mis-match in storage type sizes."); } // ensure both arrays have been loaded from disk sourceArray.loadData(); this->loadData(); // if the target array is uniform, expand it first this->expand(); // TODO: Acquire mutex locks for source and target arrays to ensure that // value copying is always thread-safe. Note that the unsafe method will be // faster, but can only be used if neither the source or target arrays are // modified during copying. Note that this will require a new private // virtual method with ABI=7 to access the mutex from the derived class. this->doCopyValues(sourceArray, iter, true); // attempt to compact target array if (compact) { this->compact(); } } #endif //////////////////////////////////////// // TypedAttributeArray implementation template<typename ValueType_, typename Codec_> std::unique_ptr<const NamePair> TypedAttributeArray<ValueType_, Codec_>::sTypeName; template<typename ValueType_, typename Codec_> TypedAttributeArray<ValueType_, Codec_>::TypedAttributeArray( Index n, Index strideOrTotalSize, bool constantStride, const ValueType& uniformValue) : AttributeArray() , mData(new StorageType[1]) , mSize(n) , mStrideOrTotalSize(strideOrTotalSize) { if (constantStride) { this->setConstantStride(true); if (strideOrTotalSize == 0) { OPENVDB_THROW(ValueError, "Creating a TypedAttributeArray with a constant stride requires that " \ "stride to be at least one.") } } else { this->setConstantStride(false); if (mStrideOrTotalSize < n) { OPENVDB_THROW(ValueError, "Creating a TypedAttributeArray with a non-constant stride must have " \ "a total size of at least the number of elements in the array.") } } mSize = std::max(Index(1), mSize); mStrideOrTotalSize = std::max(Index(1), mStrideOrTotalSize); Codec::encode(uniformValue, this->data()[0]); } #if OPENVDB_ABI_VERSION_NUMBER >= 7 template<typename ValueType_, typename Codec_> TypedAttributeArray<ValueType_, Codec_>::TypedAttributeArray(const TypedAttributeArray& rhs) : TypedAttributeArray(rhs, tbb::spin_mutex::scoped_lock(rhs.mMutex)) { } template<typename ValueType_, typename Codec_> TypedAttributeArray<ValueType_, Codec_>::TypedAttributeArray(const TypedAttributeArray& rhs, const tbb::spin_mutex::scoped_lock& lock) : AttributeArray(rhs, lock) #else template<typename ValueType_, typename Codec_> TypedAttributeArray<ValueType_, Codec_>::TypedAttributeArray(const TypedAttributeArray& rhs, bool) : AttributeArray(rhs) #endif , mSize(rhs.mSize) , mStrideOrTotalSize(rhs.mStrideOrTotalSize) #if OPENVDB_ABI_VERSION_NUMBER < 6 , mIsUniform(rhs.mIsUniform) #endif { if (this->validData()) { this->allocate(); std::memcpy(static_cast<void*>(this->data()), rhs.data(), this->arrayMemUsage()); } } template<typename ValueType_, typename Codec_> TypedAttributeArray<ValueType_, Codec_>& TypedAttributeArray<ValueType_, Codec_>::operator=(const TypedAttributeArray& rhs) { if (&rhs != this) { // lock both the source and target arrays to ensure thread-safety tbb::spin_mutex::scoped_lock lock(mMutex); tbb::spin_mutex::scoped_lock rhsLock(rhs.mMutex); this->deallocate(); mFlags = rhs.mFlags; mUsePagedRead = rhs.mUsePagedRead; mSize = rhs.mSize; mStrideOrTotalSize = rhs.mStrideOrTotalSize; mIsUniform = rhs.mIsUniform; if (this->validData()) { this->allocate(); std::memcpy(static_cast<void*>(this->data()), rhs.data(), this->arrayMemUsage()); } } return *this; } template<typename ValueType_, typename Codec_> inline const NamePair& TypedAttributeArray<ValueType_, Codec_>::attributeType() { static std::once_flag once; std::call_once(once, []() { sTypeName.reset(new NamePair(typeNameAsString<ValueType>(), Codec::name())); }); return *sTypeName; } template<typename ValueType_, typename Codec_> inline bool TypedAttributeArray<ValueType_, Codec_>::isRegistered() { return AttributeArray::isRegistered(TypedAttributeArray::attributeType()); } template<typename ValueType_, typename Codec_> inline void TypedAttributeArray<ValueType_, Codec_>::registerType() { AttributeArray::registerType(TypedAttributeArray::attributeType(), TypedAttributeArray::factory); } template<typename ValueType_, typename Codec_> inline void TypedAttributeArray<ValueType_, Codec_>::unregisterType() { AttributeArray::unregisterType(TypedAttributeArray::attributeType()); } template<typename ValueType_, typename Codec_> inline typename TypedAttributeArray<ValueType_, Codec_>::Ptr TypedAttributeArray<ValueType_, Codec_>::create(Index n, Index stride, bool constantStride, const Metadata* metadata) { const TypedMetadata<ValueType>* typedMetadata = metadata ? dynamic_cast<const TypedMetadata<ValueType>*>(metadata) : nullptr; return Ptr(new TypedAttributeArray(n, stride, constantStride, typedMetadata ? typedMetadata->value() : zeroVal<ValueType>())); } template<typename ValueType_, typename Codec_> inline TypedAttributeArray<ValueType_, Codec_>& TypedAttributeArray<ValueType_, Codec_>::cast(AttributeArray& attributeArray) { if (!attributeArray.isType<TypedAttributeArray>()) { OPENVDB_THROW(TypeError, "Invalid Attribute Type"); } return static_cast<TypedAttributeArray&>(attributeArray); } template<typename ValueType_, typename Codec_> inline const TypedAttributeArray<ValueType_, Codec_>& TypedAttributeArray<ValueType_, Codec_>::cast(const AttributeArray& attributeArray) { if (!attributeArray.isType<TypedAttributeArray>()) { OPENVDB_THROW(TypeError, "Invalid Attribute Type"); } return static_cast<const TypedAttributeArray&>(attributeArray); } template<typename ValueType_, typename Codec_> AttributeArray::Ptr TypedAttributeArray<ValueType_, Codec_>::copy() const { #if OPENVDB_ABI_VERSION_NUMBER < 7 tbb::spin_mutex::scoped_lock lock(mMutex); #endif return AttributeArray::Ptr(new TypedAttributeArray<ValueType, Codec>(*this)); } template<typename ValueType_, typename Codec_> AttributeArray::Ptr TypedAttributeArray<ValueType_, Codec_>::copyUncompressed() const { return this->copy(); } template<typename ValueType_, typename Codec_> size_t TypedAttributeArray<ValueType_, Codec_>::arrayMemUsage() const { if (this->isOutOfCore()) return 0; return (mIsUniform ? 1 : this->dataSize()) * sizeof(StorageType); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::allocate() { assert(!mData); if (mIsUniform) { mData.reset(new StorageType[1]); } else { const size_t size(this->dataSize()); assert(size > 0); mData.reset(new StorageType[size]); } } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::deallocate() { // detach from file if delay-loaded if (this->isOutOfCore()) { this->setOutOfCore(false); this->mPageHandle.reset(); } if (mData) mData.reset(); } #if OPENVDB_ABI_VERSION_NUMBER >= 6 template<typename ValueType_, typename Codec_> bool TypedAttributeArray<ValueType_, Codec_>::valueTypeIsFloatingPoint() const { // TODO: Update to use Traits that correctly handle matrices and quaternions. if (std::is_same<ValueType, Quats>::value || std::is_same<ValueType, Quatd>::value || std::is_same<ValueType, Mat3s>::value || std::is_same<ValueType, Mat3d>::value || std::is_same<ValueType, Mat4s>::value || std::is_same<ValueType, Mat4d>::value) return true; using ElementT = typename VecTraits<ValueType>::ElementType; // half is not defined as float point as expected, so explicitly handle it return std::is_floating_point<ElementT>::value || std::is_same<half, ElementT>::value; } template<typename ValueType_, typename Codec_> bool TypedAttributeArray<ValueType_, Codec_>::valueTypeIsClass() const { // half is not defined as a non-class type as expected, so explicitly exclude it return std::is_class<ValueType>::value && !std::is_same<half, ValueType>::value; } template<typename ValueType_, typename Codec_> bool TypedAttributeArray<ValueType_, Codec_>::valueTypeIsVector() const { return VecTraits<ValueType>::IsVec; } template<typename ValueType_, typename Codec_> bool TypedAttributeArray<ValueType_, Codec_>::valueTypeIsQuaternion() const { // TODO: improve performance by making this a compile-time check using type traits return !this->valueType().compare(0, 4, "quat"); } template<typename ValueType_, typename Codec_> bool TypedAttributeArray<ValueType_, Codec_>::valueTypeIsMatrix() const { // TODO: improve performance by making this a compile-time check using type traits return !this->valueType().compare(0, 3, "mat"); } #endif template<typename ValueType_, typename Codec_> size_t TypedAttributeArray<ValueType_, Codec_>::memUsage() const { return sizeof(*this) + (bool(mData) ? this->arrayMemUsage() : 0); } template<typename ValueType_, typename Codec_> typename TypedAttributeArray<ValueType_, Codec_>::ValueType TypedAttributeArray<ValueType_, Codec_>::getUnsafe(Index n) const { assert(n < this->dataSize()); ValueType val; Codec::decode(/*in=*/this->data()[mIsUniform ? 0 : n], /*out=*/val); return val; } template<typename ValueType_, typename Codec_> typename TypedAttributeArray<ValueType_, Codec_>::ValueType TypedAttributeArray<ValueType_, Codec_>::get(Index n) const { if (n >= this->dataSize()) OPENVDB_THROW(IndexError, "Out-of-range access."); if (this->isOutOfCore()) this->doLoad(); return this->getUnsafe(n); } template<typename ValueType_, typename Codec_> template<typename T> void TypedAttributeArray<ValueType_, Codec_>::getUnsafe(Index n, T& val) const { val = static_cast<T>(this->getUnsafe(n)); } template<typename ValueType_, typename Codec_> template<typename T> void TypedAttributeArray<ValueType_, Codec_>::get(Index n, T& val) const { val = static_cast<T>(this->get(n)); } template<typename ValueType_, typename Codec_> typename TypedAttributeArray<ValueType_, Codec_>::ValueType TypedAttributeArray<ValueType_, Codec_>::getUnsafe(const AttributeArray* array, const Index n) { return static_cast<const TypedAttributeArray<ValueType, Codec>*>(array)->getUnsafe(n); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::setUnsafe(Index n, const ValueType& val) { assert(n < this->dataSize()); assert(!this->isOutOfCore()); assert(!this->isUniform()); // this unsafe method assumes the data is not uniform, however if it is, this redirects the index // to zero, which is marginally less efficient but ensures not writing to an illegal address Codec::encode(/*in=*/val, /*out=*/this->data()[mIsUniform ? 0 : n]); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::set(Index n, const ValueType& val) { if (n >= this->dataSize()) OPENVDB_THROW(IndexError, "Out-of-range access."); if (this->isOutOfCore()) this->doLoad(); if (this->isUniform()) this->expand(); this->setUnsafe(n, val); } template<typename ValueType_, typename Codec_> template<typename T> void TypedAttributeArray<ValueType_, Codec_>::setUnsafe(Index n, const T& val) { this->setUnsafe(n, static_cast<ValueType>(val)); } template<typename ValueType_, typename Codec_> template<typename T> void TypedAttributeArray<ValueType_, Codec_>::set(Index n, const T& val) { this->set(n, static_cast<ValueType>(val)); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::setUnsafe(AttributeArray* array, const Index n, const ValueType& value) { static_cast<TypedAttributeArray<ValueType, Codec>*>(array)->setUnsafe(n, value); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::set(Index n, const AttributeArray& sourceArray, const Index sourceIndex) { const TypedAttributeArray& sourceTypedArray = static_cast<const TypedAttributeArray&>(sourceArray); ValueType sourceValue; sourceTypedArray.get(sourceIndex, sourceValue); this->set(n, sourceValue); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::expand(bool fill) { if (!mIsUniform) return; const StorageType val = this->data()[0]; { tbb::spin_mutex::scoped_lock lock(mMutex); this->deallocate(); mIsUniform = false; this->allocate(); } if (fill) { for (Index i = 0; i < this->dataSize(); ++i) this->data()[i] = val; } } template<typename ValueType_, typename Codec_> bool TypedAttributeArray<ValueType_, Codec_>::compact() { if (mIsUniform) return true; // compaction is not possible if any values are different const ValueType_ val = this->get(0); for (Index i = 1; i < this->dataSize(); i++) { if (!math::isExactlyEqual(this->get(i), val)) return false; } this->collapse(this->get(0)); return true; } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::collapse() { this->collapse(zeroVal<ValueType>()); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::collapse(const ValueType& uniformValue) { if (!mIsUniform) { tbb::spin_mutex::scoped_lock lock(mMutex); this->deallocate(); mIsUniform = true; this->allocate(); } Codec::encode(uniformValue, this->data()[0]); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::collapse(AttributeArray* array, const ValueType& value) { static_cast<TypedAttributeArray<ValueType, Codec>*>(array)->collapse(value); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::fill(const ValueType& value) { if (this->isOutOfCore()) { tbb::spin_mutex::scoped_lock lock(mMutex); this->deallocate(); this->allocate(); } const Index size = mIsUniform ? 1 : this->dataSize(); for (Index i = 0; i < size; ++i) { Codec::encode(value, this->data()[i]); } } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::fill(AttributeArray* array, const ValueType& value) { static_cast<TypedAttributeArray<ValueType, Codec>*>(array)->fill(value); } template<typename ValueType_, typename Codec_> inline bool TypedAttributeArray<ValueType_, Codec_>::compress() { return false; } template<typename ValueType_, typename Codec_> inline bool TypedAttributeArray<ValueType_, Codec_>::compressUnsafe() { return false; } template<typename ValueType_, typename Codec_> inline bool TypedAttributeArray<ValueType_, Codec_>::decompress() { return false; } template<typename ValueType_, typename Codec_> bool TypedAttributeArray<ValueType_, Codec_>::isOutOfCore() const { return mOutOfCore; } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::setOutOfCore(const bool b) { mOutOfCore = b; } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::doLoad() const { if (!(this->isOutOfCore())) return; TypedAttributeArray<ValueType_, Codec_>* self = const_cast<TypedAttributeArray<ValueType_, Codec_>*>(this); // This lock will be contended at most once, after which this buffer // will no longer be out-of-core. tbb::spin_mutex::scoped_lock lock(self->mMutex); this->doLoadUnsafe(); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::loadData() const { this->doLoad(); } #if OPENVDB_ABI_VERSION_NUMBER >= 6 template<typename ValueType_, typename Codec_> bool TypedAttributeArray<ValueType_, Codec_>::isDataLoaded() const { return !this->isOutOfCore(); } #endif template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::read(std::istream& is) { this->readMetadata(is); this->readBuffers(is); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::readMetadata(std::istream& is) { // read data Index64 bytes = Index64(0); is.read(reinterpret_cast<char*>(&bytes), sizeof(Index64)); bytes = bytes - /*flags*/sizeof(Int16) - /*size*/sizeof(Index); uint8_t flags = uint8_t(0); is.read(reinterpret_cast<char*>(&flags), sizeof(uint8_t)); mFlags = flags; uint8_t serializationFlags = uint8_t(0); is.read(reinterpret_cast<char*>(&serializationFlags), sizeof(uint8_t)); Index size = Index(0); is.read(reinterpret_cast<char*>(&size), sizeof(Index)); mSize = size; // warn if an unknown flag has been set if (mFlags >= 0x20) { OPENVDB_LOG_WARN("Unknown attribute flags for VDB file format."); } // error if an unknown serialization flag has been set, // as this will adjust the layout of the data and corrupt the ability to read if (serializationFlags >= 0x10) { OPENVDB_THROW(IoError, "Unknown attribute serialization flags for VDB file format."); } // set uniform, compressed and page read state mIsUniform = serializationFlags & WRITEUNIFORM; mUsePagedRead = serializationFlags & WRITEPAGED; mCompressedBytes = bytes; mFlags |= PARTIALREAD; // mark data as having been partially read // read strided value (set to 1 if array is not strided) if (serializationFlags & WRITESTRIDED) { Index stride = Index(0); is.read(reinterpret_cast<char*>(&stride), sizeof(Index)); mStrideOrTotalSize = stride; } else { mStrideOrTotalSize = 1; } } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::readBuffers(std::istream& is) { if (mUsePagedRead) { // use readBuffers(PagedInputStream&) for paged buffers OPENVDB_THROW(IoError, "Cannot read paged AttributeArray buffers."); } tbb::spin_mutex::scoped_lock lock(mMutex); this->deallocate(); uint8_t bloscCompressed(0); if (!mIsUniform) is.read(reinterpret_cast<char*>(&bloscCompressed), sizeof(uint8_t)); assert(mFlags & PARTIALREAD); std::unique_ptr<char[]> buffer(new char[mCompressedBytes]); is.read(buffer.get(), mCompressedBytes); mCompressedBytes = 0; mFlags = static_cast<uint8_t>(mFlags & ~PARTIALREAD); // mark data read as having completed // compressed on-disk if (bloscCompressed == uint8_t(1)) { // decompress buffer const size_t inBytes = this->dataSize() * sizeof(StorageType); std::unique_ptr<char[]> newBuffer = compression::bloscDecompress(buffer.get(), inBytes); if (newBuffer) buffer.reset(newBuffer.release()); } // set data to buffer mData.reset(reinterpret_cast<StorageType*>(buffer.release())); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::readPagedBuffers(compression::PagedInputStream& is) { if (!mUsePagedRead) { if (!is.sizeOnly()) this->readBuffers(is.getInputStream()); return; } // If this array is being read from a memory-mapped file, delay loading of its data // until the data is actually accessed. io::MappedFile::Ptr mappedFile = io::getMappedFilePtr(is.getInputStream()); const bool delayLoad = (mappedFile.get() != nullptr); if (is.sizeOnly()) { size_t compressedBytes(mCompressedBytes); mCompressedBytes = 0; // if not set to zero, mPageHandle will attempt to destroy invalid memory mFlags = static_cast<uint8_t>(mFlags & ~PARTIALREAD); // mark data read as having completed assert(!mPageHandle); mPageHandle = is.createHandle(compressedBytes); return; } assert(mPageHandle); tbb::spin_mutex::scoped_lock lock(mMutex); this->deallocate(); this->setOutOfCore(delayLoad); is.read(mPageHandle, std::streamsize(mPageHandle->size()), delayLoad); if (!delayLoad) { std::unique_ptr<char[]> buffer = mPageHandle->read(); mData.reset(reinterpret_cast<StorageType*>(buffer.release())); } // clear page state mUsePagedRead = 0; } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::write(std::ostream& os) const { this->write(os, /*outputTransient=*/false); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::write(std::ostream& os, bool outputTransient) const { this->writeMetadata(os, outputTransient, /*paged=*/false); this->writeBuffers(os, outputTransient); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::writeMetadata(std::ostream& os, bool outputTransient, bool paged) const { if (!outputTransient && this->isTransient()) return; if (mFlags & PARTIALREAD) { OPENVDB_THROW(IoError, "Cannot write out a partially-read AttributeArray."); } uint8_t flags(mFlags); uint8_t serializationFlags(0); Index size(mSize); Index stride(mStrideOrTotalSize); bool strideOfOne(this->stride() == 1); bool bloscCompression = io::getDataCompression(os) & io::COMPRESS_BLOSC; // any compressed data needs to be loaded if out-of-core if (bloscCompression) this->doLoad(); size_t compressedBytes = 0; if (!strideOfOne) { serializationFlags |= WRITESTRIDED; } if (mIsUniform) { serializationFlags |= WRITEUNIFORM; if (bloscCompression && paged) serializationFlags |= WRITEPAGED; } else if (bloscCompression) { if (paged) serializationFlags |= WRITEPAGED; else { const char* charBuffer = reinterpret_cast<const char*>(this->data()); const size_t inBytes = this->arrayMemUsage(); compressedBytes = compression::bloscCompressedSize(charBuffer, inBytes); } } Index64 bytes = /*flags*/ sizeof(Int16) + /*size*/ sizeof(Index); bytes += (compressedBytes > 0) ? compressedBytes : this->arrayMemUsage(); // write data os.write(reinterpret_cast<const char*>(&bytes), sizeof(Index64)); os.write(reinterpret_cast<const char*>(&flags), sizeof(uint8_t)); os.write(reinterpret_cast<const char*>(&serializationFlags), sizeof(uint8_t)); os.write(reinterpret_cast<const char*>(&size), sizeof(Index)); // write strided if (!strideOfOne) os.write(reinterpret_cast<const char*>(&stride), sizeof(Index)); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::writeBuffers(std::ostream& os, bool outputTransient) const { if (!outputTransient && this->isTransient()) return; if (mFlags & PARTIALREAD) { OPENVDB_THROW(IoError, "Cannot write out a partially-read AttributeArray."); } this->doLoad(); if (this->isUniform()) { os.write(reinterpret_cast<const char*>(this->data()), sizeof(StorageType)); } else if (io::getDataCompression(os) & io::COMPRESS_BLOSC) { std::unique_ptr<char[]> compressedBuffer; size_t compressedBytes = 0; const char* charBuffer = reinterpret_cast<const char*>(this->data()); const size_t inBytes = this->arrayMemUsage(); compressedBuffer = compression::bloscCompress(charBuffer, inBytes, compressedBytes); if (compressedBuffer) { uint8_t bloscCompressed(1); os.write(reinterpret_cast<const char*>(&bloscCompressed), sizeof(uint8_t)); os.write(reinterpret_cast<const char*>(compressedBuffer.get()), compressedBytes); } else { uint8_t bloscCompressed(0); os.write(reinterpret_cast<const char*>(&bloscCompressed), sizeof(uint8_t)); os.write(reinterpret_cast<const char*>(this->data()), inBytes); } } else { uint8_t bloscCompressed(0); os.write(reinterpret_cast<const char*>(&bloscCompressed), sizeof(uint8_t)); os.write(reinterpret_cast<const char*>(this->data()), this->arrayMemUsage()); } } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::writePagedBuffers(compression::PagedOutputStream& os, bool outputTransient) const { if (!outputTransient && this->isTransient()) return; // paged compression only available when Blosc is enabled bool bloscCompression = io::getDataCompression(os.getOutputStream()) & io::COMPRESS_BLOSC; if (!bloscCompression) { if (!os.sizeOnly()) this->writeBuffers(os.getOutputStream(), outputTransient); return; } if (mFlags & PARTIALREAD) { OPENVDB_THROW(IoError, "Cannot write out a partially-read AttributeArray."); } this->doLoad(); os.write(reinterpret_cast<const char*>(this->data()), this->arrayMemUsage()); } template<typename ValueType_, typename Codec_> void TypedAttributeArray<ValueType_, Codec_>::doLoadUnsafe(const bool /*compression*/) const { if (!(this->isOutOfCore())) return; // this function expects the mutex to already be locked auto* self = const_cast<TypedAttributeArray<ValueType_, Codec_>*>(this); assert(self->mPageHandle); assert(!(self->mFlags & PARTIALREAD)); std::unique_ptr<char[]> buffer = self->mPageHandle->read(); self->mData.reset(reinterpret_cast<StorageType*>(buffer.release())); self->mPageHandle.reset(); // clear all write and out-of-core flags self->mOutOfCore = false; } template<typename ValueType_, typename Codec_> AttributeArray::AccessorBasePtr TypedAttributeArray<ValueType_, Codec_>::getAccessor() const { // use the faster 'unsafe' get and set methods as attribute handles // ensure data is in-core when constructed return AccessorBasePtr(new AttributeArray::Accessor<ValueType_>( &TypedAttributeArray<ValueType_, Codec_>::getUnsafe, &TypedAttributeArray<ValueType_, Codec_>::setUnsafe, &TypedAttributeArray<ValueType_, Codec_>::collapse, &TypedAttributeArray<ValueType_, Codec_>::fill)); } template<typename ValueType_, typename Codec_> bool TypedAttributeArray<ValueType_, Codec_>::isEqual(const AttributeArray& other) const { const TypedAttributeArray<ValueType_, Codec_>* const otherT = dynamic_cast<const TypedAttributeArray<ValueType_, Codec_>* >(&other); if(!otherT) return false; if(this->mSize != otherT->mSize || this->mStrideOrTotalSize != otherT->mStrideOrTotalSize || this->mIsUniform != otherT->mIsUniform || this->attributeType() != this->attributeType()) return false; this->doLoad(); otherT->doLoad(); const StorageType *target = this->data(), *source = otherT->data(); if (!target && !source) return true; if (!target || !source) return false; Index n = this->mIsUniform ? 1 : mSize; while (n && math::isExactlyEqual(*target++, *source++)) --n; return n == 0; } #if OPENVDB_ABI_VERSION_NUMBER >= 6 template<typename ValueType_, typename Codec_> char* TypedAttributeArray<ValueType_, Codec_>::dataAsByteArray() { return reinterpret_cast<char*>(this->data()); } template<typename ValueType_, typename Codec_> const char* TypedAttributeArray<ValueType_, Codec_>::dataAsByteArray() const { return reinterpret_cast<const char*>(this->data()); } #endif //////////////////////////////////////// /// Accessor to call unsafe get and set methods based on templated Codec and Value template <typename CodecType, typename ValueType> struct AccessorEval { using GetterPtr = ValueType (*)(const AttributeArray* array, const Index n); using SetterPtr = void (*)(AttributeArray* array, const Index n, const ValueType& value); /// Getter that calls to TypedAttributeArray::getUnsafe() /// @note Functor argument is provided but not required for the generic case static ValueType get(GetterPtr /*functor*/, const AttributeArray* array, const Index n) { return TypedAttributeArray<ValueType, CodecType>::getUnsafe(array, n); } /// Getter that calls to TypedAttributeArray::setUnsafe() /// @note Functor argument is provided but not required for the generic case static void set(SetterPtr /*functor*/, AttributeArray* array, const Index n, const ValueType& value) { TypedAttributeArray<ValueType, CodecType>::setUnsafe(array, n, value); } }; /// Partial specialization when Codec is not known at compile-time to use the supplied functor instead template <typename ValueType> struct AccessorEval<UnknownCodec, ValueType> { using GetterPtr = ValueType (*)(const AttributeArray* array, const Index n); using SetterPtr = void (*)(AttributeArray* array, const Index n, const ValueType& value); /// Getter that calls the supplied functor static ValueType get(GetterPtr functor, const AttributeArray* array, const Index n) { return (*functor)(array, n); } /// Setter that calls the supplied functor static void set(SetterPtr functor, AttributeArray* array, const Index n, const ValueType& value) { (*functor)(array, n, value); } }; //////////////////////////////////////// // AttributeHandle implementation template <typename ValueType, typename CodecType> typename AttributeHandle<ValueType, CodecType>::Ptr AttributeHandle<ValueType, CodecType>::create(const AttributeArray& array, const bool collapseOnDestruction) { return typename AttributeHandle<ValueType, CodecType>::Ptr( new AttributeHandle<ValueType, CodecType>(array, collapseOnDestruction)); } template <typename ValueType, typename CodecType> AttributeHandle<ValueType, CodecType>::AttributeHandle(const AttributeArray& array, const bool collapseOnDestruction) : mArray(&array) , mStrideOrTotalSize(array.hasConstantStride() ? array.stride() : 1) , mSize(array.hasConstantStride() ? array.size() : array.dataSize()) , mCollapseOnDestruction(collapseOnDestruction && array.isStreaming()) { if (!this->compatibleType<std::is_same<CodecType, UnknownCodec>::value>()) { OPENVDB_THROW(TypeError, "Cannot bind handle due to incompatible type of AttributeArray."); } // load data if delay-loaded mArray->loadData(); // bind getter and setter methods AttributeArray::AccessorBasePtr accessor = mArray->getAccessor(); assert(accessor); AttributeArray::Accessor<ValueType>* typedAccessor = static_cast<AttributeArray::Accessor<ValueType>*>(accessor.get()); mGetter = typedAccessor->mGetter; mSetter = typedAccessor->mSetter; mCollapser = typedAccessor->mCollapser; mFiller = typedAccessor->mFiller; } template <typename ValueType, typename CodecType> AttributeHandle<ValueType, CodecType>::~AttributeHandle() { // if enabled, attribute is collapsed on destruction of the handle to save memory if (mCollapseOnDestruction) const_cast<AttributeArray*>(this->mArray)->collapse(); } template <typename ValueType, typename CodecType> template <bool IsUnknownCodec> typename std::enable_if<IsUnknownCodec, bool>::type AttributeHandle<ValueType, CodecType>::compatibleType() const { // if codec is unknown, just check the value type return mArray->hasValueType<ValueType>(); } template <typename ValueType, typename CodecType> template <bool IsUnknownCodec> typename std::enable_if<!IsUnknownCodec, bool>::type AttributeHandle<ValueType, CodecType>::compatibleType() const { // if the codec is known, check the value type and codec return mArray->isType<TypedAttributeArray<ValueType, CodecType>>(); } template <typename ValueType, typename CodecType> const AttributeArray& AttributeHandle<ValueType, CodecType>::array() const { assert(mArray); return *mArray; } template <typename ValueType, typename CodecType> Index AttributeHandle<ValueType, CodecType>::index(Index n, Index m) const { Index index = n * mStrideOrTotalSize + m; assert(index < (mSize * mStrideOrTotalSize)); return index; } template <typename ValueType, typename CodecType> ValueType AttributeHandle<ValueType, CodecType>::get(Index n, Index m) const { return this->get<std::is_same<CodecType, UnknownCodec>::value>(this->index(n, m)); } template <typename ValueType, typename CodecType> template <bool IsUnknownCodec> typename std::enable_if<IsUnknownCodec, ValueType>::type AttributeHandle<ValueType, CodecType>::get(Index index) const { // if the codec is unknown, use the getter functor return (*mGetter)(mArray, index); } template <typename ValueType, typename CodecType> template <bool IsUnknownCodec> typename std::enable_if<!IsUnknownCodec, ValueType>::type AttributeHandle<ValueType, CodecType>::get(Index index) const { // if the codec is known, call the method on the attribute array directly return TypedAttributeArray<ValueType, CodecType>::getUnsafe(mArray, index); } template <typename ValueType, typename CodecType> bool AttributeHandle<ValueType, CodecType>::isUniform() const { return mArray->isUniform(); } template <typename ValueType, typename CodecType> bool AttributeHandle<ValueType, CodecType>::hasConstantStride() const { return mArray->hasConstantStride(); } //////////////////////////////////////// // AttributeWriteHandle implementation template <typename ValueType, typename CodecType> typename AttributeWriteHandle<ValueType, CodecType>::Ptr AttributeWriteHandle<ValueType, CodecType>::create(AttributeArray& array, const bool expand) { return typename AttributeWriteHandle<ValueType, CodecType>::Ptr( new AttributeWriteHandle<ValueType, CodecType>(array, expand)); } template <typename ValueType, typename CodecType> AttributeWriteHandle<ValueType, CodecType>::AttributeWriteHandle(AttributeArray& array, const bool expand) : AttributeHandle<ValueType, CodecType>(array, /*collapseOnDestruction=*/false) { if (expand) array.expand(); } template <typename ValueType, typename CodecType> void AttributeWriteHandle<ValueType, CodecType>::set(Index n, const ValueType& value) { this->set<std::is_same<CodecType, UnknownCodec>::value>(this->index(n, 0), value); } template <typename ValueType, typename CodecType> void AttributeWriteHandle<ValueType, CodecType>::set(Index n, Index m, const ValueType& value) { this->set<std::is_same<CodecType, UnknownCodec>::value>(this->index(n, m), value); } template <typename ValueType, typename CodecType> void AttributeWriteHandle<ValueType, CodecType>::expand(const bool fill) { const_cast<AttributeArray*>(this->mArray)->expand(fill); } template <typename ValueType, typename CodecType> void AttributeWriteHandle<ValueType, CodecType>::collapse() { const_cast<AttributeArray*>(this->mArray)->collapse(); } template <typename ValueType, typename CodecType> bool AttributeWriteHandle<ValueType, CodecType>::compact() { return const_cast<AttributeArray*>(this->mArray)->compact(); } template <typename ValueType, typename CodecType> void AttributeWriteHandle<ValueType, CodecType>::collapse(const ValueType& uniformValue) { this->mCollapser(const_cast<AttributeArray*>(this->mArray), uniformValue); } template <typename ValueType, typename CodecType> void AttributeWriteHandle<ValueType, CodecType>::fill(const ValueType& value) { this->mFiller(const_cast<AttributeArray*>(this->mArray), value); } template <typename ValueType, typename CodecType> template <bool IsUnknownCodec> typename std::enable_if<IsUnknownCodec, void>::type AttributeWriteHandle<ValueType, CodecType>::set(Index index, const ValueType& value) const { // if the codec is unknown, use the setter functor (*this->mSetter)(const_cast<AttributeArray*>(this->mArray), index, value); } template <typename ValueType, typename CodecType> template <bool IsUnknownCodec> typename std::enable_if<!IsUnknownCodec, void>::type AttributeWriteHandle<ValueType, CodecType>::set(Index index, const ValueType& value) const { // if the codec is known, call the method on the attribute array directly TypedAttributeArray<ValueType, CodecType>::setUnsafe(const_cast<AttributeArray*>(this->mArray), index, value); } template <typename ValueType, typename CodecType> AttributeArray& AttributeWriteHandle<ValueType, CodecType>::array() { assert(this->mArray); return *const_cast<AttributeArray*>(this->mArray); } } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_ATTRIBUTE_ARRAY_HAS_BEEN_INCLUDED
79,926
C
33.391997
136
0.695181
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointDataGrid.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Dan Bailey /// /// @file points/PointDataGrid.h /// /// @brief Attribute-owned data structure for points. Point attributes are /// stored in leaf nodes and ordered by voxel for fast random and /// sequential access. #ifndef OPENVDB_POINTS_POINT_DATA_GRID_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_DATA_GRID_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Grid.h> #include <openvdb/tree/Tree.h> #include <openvdb/tree/LeafNode.h> #include <openvdb/tools/PointIndexGrid.h> #include "AttributeArray.h" #include "AttributeArrayString.h" #include "AttributeGroup.h" #include "AttributeSet.h" #include "StreamCompression.h" #include <cstring> // std::memcpy #include <iostream> #include <limits> #include <memory> #include <type_traits> // std::is_same #include <utility> // std::pair, std::make_pair #include <vector> class TestPointDataLeaf; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace io { /// @brief openvdb::io::readCompressedValues specialized on PointDataIndex32 arrays to /// ignore the value mask, use a larger block size and use 16-bit size instead of 64-bit template<> inline void readCompressedValues( std::istream& is, PointDataIndex32* destBuf, Index destCount, const util::NodeMask<3>& /*valueMask*/, bool /*fromHalf*/) { using compression::bloscDecompress; const bool seek = destBuf == nullptr; const size_t destBytes = destCount*sizeof(PointDataIndex32); const size_t maximumBytes = std::numeric_limits<uint16_t>::max(); if (destBytes >= maximumBytes) { OPENVDB_THROW(openvdb::IoError, "Cannot read more than " << maximumBytes << " bytes in voxel values.") } uint16_t bytes16; const io::StreamMetadata::Ptr meta = io::getStreamMetadataPtr(is); if (seek && meta) { // buffer size temporarily stored in the StreamMetadata pass // to avoid having to perform an expensive disk read for 2-bytes bytes16 = static_cast<uint16_t>(meta->pass()); // seek over size of the compressed buffer is.seekg(sizeof(uint16_t), std::ios_base::cur); } else { // otherwise read from disk is.read(reinterpret_cast<char*>(&bytes16), sizeof(uint16_t)); } if (bytes16 == std::numeric_limits<uint16_t>::max()) { // read or seek uncompressed data if (seek) { is.seekg(destBytes, std::ios_base::cur); } else { is.read(reinterpret_cast<char*>(destBuf), destBytes); } } else { // read or seek uncompressed data if (seek) { is.seekg(int(bytes16), std::ios_base::cur); } else { // decompress into the destination buffer std::unique_ptr<char[]> bloscBuffer(new char[int(bytes16)]); is.read(bloscBuffer.get(), bytes16); std::unique_ptr<char[]> buffer = bloscDecompress( bloscBuffer.get(), destBytes, /*resize=*/false); std::memcpy(destBuf, buffer.get(), destBytes); } } } /// @brief openvdb::io::writeCompressedValues specialized on PointDataIndex32 arrays to /// ignore the value mask, use a larger block size and use 16-bit size instead of 64-bit template<> inline void writeCompressedValues( std::ostream& os, PointDataIndex32* srcBuf, Index srcCount, const util::NodeMask<3>& /*valueMask*/, const util::NodeMask<3>& /*childMask*/, bool /*toHalf*/) { using compression::bloscCompress; const size_t srcBytes = srcCount*sizeof(PointDataIndex32); const size_t maximumBytes = std::numeric_limits<uint16_t>::max(); if (srcBytes >= maximumBytes) { OPENVDB_THROW(openvdb::IoError, "Cannot write more than " << maximumBytes << " bytes in voxel values.") } const char* charBuffer = reinterpret_cast<const char*>(srcBuf); size_t compressedBytes; std::unique_ptr<char[]> buffer = bloscCompress( charBuffer, srcBytes, compressedBytes, /*resize=*/false); if (compressedBytes > 0) { auto bytes16 = static_cast<uint16_t>(compressedBytes); // clamp to 16-bit unsigned integer os.write(reinterpret_cast<const char*>(&bytes16), sizeof(uint16_t)); os.write(reinterpret_cast<const char*>(buffer.get()), compressedBytes); } else { auto bytes16 = static_cast<uint16_t>(maximumBytes); // max value indicates uncompressed os.write(reinterpret_cast<const char*>(&bytes16), sizeof(uint16_t)); os.write(reinterpret_cast<const char*>(srcBuf), srcBytes); } } template <typename T> inline void writeCompressedValuesSize(std::ostream& os, const T* srcBuf, Index srcCount) { using compression::bloscCompressedSize; const size_t srcBytes = srcCount*sizeof(T); const size_t maximumBytes = std::numeric_limits<uint16_t>::max(); if (srcBytes >= maximumBytes) { OPENVDB_THROW(openvdb::IoError, "Cannot write more than " << maximumBytes << " bytes in voxel values.") } const char* charBuffer = reinterpret_cast<const char*>(srcBuf); // calculate voxel buffer size after compression size_t compressedBytes = bloscCompressedSize(charBuffer, srcBytes); if (compressedBytes > 0) { auto bytes16 = static_cast<uint16_t>(compressedBytes); // clamp to 16-bit unsigned integer os.write(reinterpret_cast<const char*>(&bytes16), sizeof(uint16_t)); } else { auto bytes16 = static_cast<uint16_t>(maximumBytes); // max value indicates uncompressed os.write(reinterpret_cast<const char*>(&bytes16), sizeof(uint16_t)); } } } // namespace io // forward declaration namespace tree { template<Index, typename> struct SameLeafConfig; } //////////////////////////////////////// namespace points { // forward declaration template<typename T, Index Log2Dim> class PointDataLeafNode; /// @brief Point index tree configured to match the default VDB configurations. using PointDataTree = tree::Tree<tree::RootNode<tree::InternalNode<tree::InternalNode <PointDataLeafNode<PointDataIndex32, 3>, 4>, 5>>>; /// @brief Point data grid. using PointDataGrid = Grid<PointDataTree>; /// @brief Deep copy the descriptor across all leaf nodes. /// /// @param tree the PointDataTree. /// /// @return the new descriptor. /// /// @note This method will fail if the Descriptors in the tree are not all identical. template <typename PointDataTreeT> inline AttributeSet::Descriptor::Ptr makeDescriptorUnique(PointDataTreeT& tree); /// @brief Toggle the streaming mode on all attributes in the tree to collapse the attributes /// after deconstructing a bound AttributeHandle to each array. This results in better /// memory efficiency when the data is streamed into another data structure /// (typically for rendering). /// /// @param tree the PointDataTree. /// @param on @c true to enable streaming /// /// @note Multiple threads cannot safely access the same AttributeArray when using streaming. template <typename PointDataTreeT> inline void setStreamingMode(PointDataTreeT& tree, bool on = true); /// @brief Sequentially pre-fetch all delayed-load voxel and attribute data from disk in order /// to accelerate subsequent random access. /// /// @param tree the PointDataTree. /// @param position if enabled, prefetch the position attribute (default is on) /// @param otherAttributes if enabled, prefetch all other attributes (default is on) template <typename PointDataTreeT> inline void prefetch(PointDataTreeT& tree, bool position = true, bool otherAttributes = true); //////////////////////////////////////// template <typename T, Index Log2Dim> class PointDataLeafNode : public tree::LeafNode<T, Log2Dim>, io::MultiPass { public: using LeafNodeType = PointDataLeafNode<T, Log2Dim>; using Ptr = std::shared_ptr<PointDataLeafNode>; using ValueType = T; using ValueTypePair = std::pair<ValueType, ValueType>; using IndexArray = std::vector<ValueType>; using Descriptor = AttributeSet::Descriptor; //////////////////////////////////////// // The following methods had to be copied from the LeafNode class // to make the derived PointDataLeafNode class compatible with the tree structure. using BaseLeaf = tree::LeafNode<T, Log2Dim>; using NodeMaskType = util::NodeMask<Log2Dim>; using BaseLeaf::LOG2DIM; using BaseLeaf::TOTAL; using BaseLeaf::DIM; using BaseLeaf::NUM_VALUES; using BaseLeaf::NUM_VOXELS; using BaseLeaf::SIZE; using BaseLeaf::LEVEL; /// Default constructor PointDataLeafNode() : mAttributeSet(new AttributeSet) { } ~PointDataLeafNode() = default; /// Construct using deep copy of other PointDataLeafNode explicit PointDataLeafNode(const PointDataLeafNode& other) : BaseLeaf(other) , mAttributeSet(new AttributeSet(*other.mAttributeSet)) { } /// Construct using supplied origin, value and active status explicit PointDataLeafNode(const Coord& coords, const T& value = zeroVal<T>(), bool active = false) : BaseLeaf(coords, zeroVal<T>(), active) , mAttributeSet(new AttributeSet) { assertNonModifiableUnlessZero(value); } /// Construct using supplied origin, value and active status /// use attribute map from another PointDataLeafNode PointDataLeafNode(const PointDataLeafNode& other, const Coord& coords, const T& value = zeroVal<T>(), bool active = false) : BaseLeaf(coords, zeroVal<T>(), active) , mAttributeSet(new AttributeSet(*other.mAttributeSet)) { assertNonModifiableUnlessZero(value); } // Copy-construct from a PointIndexLeafNode with the same configuration but a different ValueType. template<typename OtherValueType> PointDataLeafNode(const tools::PointIndexLeafNode<OtherValueType, Log2Dim>& other) : BaseLeaf(other) , mAttributeSet(new AttributeSet) { } // Copy-construct from a LeafNode with the same configuration but a different ValueType. // Used for topology copies - explicitly sets the value (background) to zeroVal template <typename ValueType> PointDataLeafNode(const tree::LeafNode<ValueType, Log2Dim>& other, const T& value, TopologyCopy) : BaseLeaf(other, zeroVal<T>(), TopologyCopy()) , mAttributeSet(new AttributeSet) { assertNonModifiableUnlessZero(value); } // Copy-construct from a LeafNode with the same configuration but a different ValueType. // Used for topology copies - explicitly sets the on and off value (background) to zeroVal template <typename ValueType> PointDataLeafNode(const tree::LeafNode<ValueType, Log2Dim>& other, const T& /*offValue*/, const T& /*onValue*/, TopologyCopy) : BaseLeaf(other, zeroVal<T>(), zeroVal<T>(), TopologyCopy()) , mAttributeSet(new AttributeSet) { } PointDataLeafNode(PartialCreate, const Coord& coords, const T& value = zeroVal<T>(), bool active = false) : BaseLeaf(PartialCreate(), coords, value, active) , mAttributeSet(new AttributeSet) { assertNonModifiableUnlessZero(value); } public: /// Retrieve the attribute set. const AttributeSet& attributeSet() const { return *mAttributeSet; } /// @brief Steal the attribute set, a new, empty attribute set is inserted in it's place. AttributeSet::UniquePtr stealAttributeSet(); /// @brief Create a new attribute set. Existing attributes will be removed. void initializeAttributes(const Descriptor::Ptr& descriptor, const Index arrayLength, const AttributeArray::ScopedRegistryLock* lock = nullptr); /// @brief Clear the attribute set. void clearAttributes(const bool updateValueMask = true, const AttributeArray::ScopedRegistryLock* lock = nullptr); /// @brief Returns @c true if an attribute with this index exists. /// @param pos Index of the attribute bool hasAttribute(const size_t pos) const; /// @brief Returns @c true if an attribute with this name exists. /// @param attributeName Name of the attribute bool hasAttribute(const Name& attributeName) const; /// @brief Append an attribute to the leaf. /// @param expected Existing descriptor is expected to match this parameter. /// @param replacement New descriptor to replace the existing one. /// @param pos Index of the new attribute in the descriptor replacement. /// @param strideOrTotalSize Stride of the attribute array (if constantStride), total size otherwise /// @param constantStride if @c false, stride is interpreted as total size of the array /// @param metadata optional default value metadata /// @param lock an optional scoped registry lock to avoid contention AttributeArray::Ptr appendAttribute(const Descriptor& expected, Descriptor::Ptr& replacement, const size_t pos, const Index strideOrTotalSize = 1, const bool constantStride = true, const Metadata* metadata = nullptr, const AttributeArray::ScopedRegistryLock* lock = nullptr); /// @brief Drop list of attributes. /// @param pos vector of attribute indices to drop /// @param expected Existing descriptor is expected to match this parameter. /// @param replacement New descriptor to replace the existing one. void dropAttributes(const std::vector<size_t>& pos, const Descriptor& expected, Descriptor::Ptr& replacement); /// @brief Reorder attribute set. /// @param replacement New descriptor to replace the existing one. void reorderAttributes(const Descriptor::Ptr& replacement); /// @brief Rename attributes in attribute set (order must remain the same). /// @param expected Existing descriptor is expected to match this parameter. /// @param replacement New descriptor to replace the existing one. void renameAttributes(const Descriptor& expected, Descriptor::Ptr& replacement); /// @brief Compact all attributes in attribute set. void compactAttributes(); /// @brief Replace the underlying attribute set with the given @a attributeSet. /// @details This leaf will assume ownership of the given attribute set. The descriptors must /// match and the voxel offsets values will need updating if the point order is different. /// @throws ValueError if @a allowMismatchingDescriptors is @c false and the descriptors /// do not match void replaceAttributeSet(AttributeSet* attributeSet, bool allowMismatchingDescriptors = false); /// @brief Replace the descriptor with a new one /// The new Descriptor must exactly match the old one void resetDescriptor(const Descriptor::Ptr& replacement); /// @brief Sets all of the voxel offset values on this leaf, from the given vector /// of @a offsets. If @a updateValueMask is true, then the active value mask will /// be updated so voxels with points are active and empty voxels are inactive. void setOffsets(const std::vector<ValueType>& offsets, const bool updateValueMask = true); /// @brief Throws an error if the voxel values on this leaf are not monotonically /// increasing or within the bounds of the attribute arrays void validateOffsets() const; /// @brief Read-write attribute array reference from index /// @details Attribute arrays can be shared across leaf nodes, so non-const /// access will deep-copy the array to make it unique. Always prefer /// accessing const arrays where possible to eliminate this copying. /// { AttributeArray& attributeArray(const size_t pos); const AttributeArray& attributeArray(const size_t pos) const; const AttributeArray& constAttributeArray(const size_t pos) const; /// } /// @brief Read-write attribute array reference from name /// @details Attribute arrays can be shared across leaf nodes, so non-const /// access will deep-copy the array to make it unique. Always prefer /// accessing const arrays where possible to eliminate this copying. /// { AttributeArray& attributeArray(const Name& attributeName); const AttributeArray& attributeArray(const Name& attributeName) const; const AttributeArray& constAttributeArray(const Name& attributeName) const; /// } /// @brief Read-only group handle from group index GroupHandle groupHandle(const AttributeSet::Descriptor::GroupIndex& index) const; /// @brief Read-only group handle from group name GroupHandle groupHandle(const Name& group) const; /// @brief Read-write group handle from group index GroupWriteHandle groupWriteHandle(const AttributeSet::Descriptor::GroupIndex& index); /// @brief Read-write group handle from group name GroupWriteHandle groupWriteHandle(const Name& name); /// @brief Compute the total point count for the leaf Index64 pointCount() const; /// @brief Compute the total active (on) point count for the leaf Index64 onPointCount() const; /// @brief Compute the total inactive (off) point count for the leaf Index64 offPointCount() const; /// @brief Compute the point count in a specific group for the leaf Index64 groupPointCount(const Name& groupName) const; /// @brief Activate voxels with non-zero points, deactivate voxels with zero points. void updateValueMask(); //////////////////////////////////////// void setOffsetOn(Index offset, const ValueType& val); void setOffsetOnly(Index offset, const ValueType& val); /// @brief Return @c true if the given node (which may have a different @c ValueType /// than this node) has the same active value topology as this node. template<typename OtherType, Index OtherLog2Dim> bool hasSameTopology(const PointDataLeafNode<OtherType, OtherLog2Dim>* other) const { return BaseLeaf::hasSameTopology(other); } /// Check for buffer, state and origin equivalence first. /// If this returns true, do a deeper comparison on the attribute set to check bool operator==(const PointDataLeafNode& other) const { if(BaseLeaf::operator==(other) != true) return false; return (*this->mAttributeSet == *other.mAttributeSet); } bool operator!=(const PointDataLeafNode& other) const { return !(other == *this); } void addLeaf(PointDataLeafNode*) {} template<typename AccessorT> void addLeafAndCache(PointDataLeafNode*, AccessorT&) {} //@{ /// @brief Return a pointer to this node. PointDataLeafNode* touchLeaf(const Coord&) { return this; } template<typename AccessorT> PointDataLeafNode* touchLeafAndCache(const Coord&, AccessorT&) { return this; } template<typename NodeT, typename AccessorT> NodeT* probeNodeAndCache(const Coord&, AccessorT&) { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT,PointDataLeafNode>::value)) return nullptr; return reinterpret_cast<NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } PointDataLeafNode* probeLeaf(const Coord&) { return this; } template<typename AccessorT> PointDataLeafNode* probeLeafAndCache(const Coord&, AccessorT&) { return this; } //@} //@{ /// @brief Return a @const pointer to this node. const PointDataLeafNode* probeConstLeaf(const Coord&) const { return this; } template<typename AccessorT> const PointDataLeafNode* probeConstLeafAndCache(const Coord&, AccessorT&) const { return this; } template<typename AccessorT> const PointDataLeafNode* probeLeafAndCache(const Coord&, AccessorT&) const { return this; } const PointDataLeafNode* probeLeaf(const Coord&) const { return this; } template<typename NodeT, typename AccessorT> const NodeT* probeConstNodeAndCache(const Coord&, AccessorT&) const { OPENVDB_NO_UNREACHABLE_CODE_WARNING_BEGIN if (!(std::is_same<NodeT,PointDataLeafNode>::value)) return nullptr; return reinterpret_cast<const NodeT*>(this); OPENVDB_NO_UNREACHABLE_CODE_WARNING_END } //@} // I/O methods void readTopology(std::istream& is, bool fromHalf = false); void writeTopology(std::ostream& os, bool toHalf = false) const; Index buffers() const; void readBuffers(std::istream& is, bool fromHalf = false); void readBuffers(std::istream& is, const CoordBBox&, bool fromHalf = false); void writeBuffers(std::ostream& os, bool toHalf = false) const; Index64 memUsage() const; void evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels = true) const; /// @brief Return the bounding box of this node, i.e., the full index space /// spanned by this leaf node. CoordBBox getNodeBoundingBox() const; //////////////////////////////////////// // Disable all write methods to avoid unintentional changes // to the point-array offsets. void assertNonmodifiable() { assert(false && "Cannot modify voxel values in a PointDataTree."); } // some methods silently ignore attempts to modify the // point-array offsets if a zero value is used void assertNonModifiableUnlessZero(const ValueType& value) { if (value != zeroVal<T>()) this->assertNonmodifiable(); } void setActiveState(const Coord& xyz, bool on) { BaseLeaf::setActiveState(xyz, on); } void setActiveState(Index offset, bool on) { BaseLeaf::setActiveState(offset, on); } void setValueOnly(const Coord&, const ValueType&) { assertNonmodifiable(); } void setValueOnly(Index, const ValueType&) { assertNonmodifiable(); } void setValueOff(const Coord& xyz) { BaseLeaf::setValueOff(xyz); } void setValueOff(Index offset) { BaseLeaf::setValueOff(offset); } void setValueOff(const Coord&, const ValueType&) { assertNonmodifiable(); } void setValueOff(Index, const ValueType&) { assertNonmodifiable(); } void setValueOn(const Coord& xyz) { BaseLeaf::setValueOn(xyz); } void setValueOn(Index offset) { BaseLeaf::setValueOn(offset); } void setValueOn(const Coord&, const ValueType&) { assertNonmodifiable(); } void setValueOn(Index, const ValueType&) { assertNonmodifiable(); } void setValue(const Coord&, const ValueType&) { assertNonmodifiable(); } void setValuesOn() { BaseLeaf::setValuesOn(); } void setValuesOff() { BaseLeaf::setValuesOff(); } template<typename ModifyOp> void modifyValue(Index, const ModifyOp&) { assertNonmodifiable(); } template<typename ModifyOp> void modifyValue(const Coord&, const ModifyOp&) { assertNonmodifiable(); } template<typename ModifyOp> void modifyValueAndActiveState(const Coord&, const ModifyOp&) { assertNonmodifiable(); } // clipping is not yet supported void clip(const CoordBBox&, const ValueType& value) { assertNonModifiableUnlessZero(value); } void fill(const CoordBBox&, const ValueType&, bool); void fill(const ValueType& value) { assertNonModifiableUnlessZero(value); } void fill(const ValueType&, bool); template<typename AccessorT> void setValueOnlyAndCache(const Coord&, const ValueType&, AccessorT&) {assertNonmodifiable();} template<typename ModifyOp, typename AccessorT> void modifyValueAndActiveStateAndCache(const Coord&, const ModifyOp&, AccessorT&) { assertNonmodifiable(); } template<typename AccessorT> void setValueOffAndCache(const Coord&, const ValueType&, AccessorT&) { assertNonmodifiable(); } template<typename AccessorT> void setActiveStateAndCache(const Coord& xyz, bool on, AccessorT& parent) { BaseLeaf::setActiveStateAndCache(xyz, on, parent); } void resetBackground(const ValueType&, const ValueType& newBackground) { assertNonModifiableUnlessZero(newBackground); } void signedFloodFill(const ValueType&) { assertNonmodifiable(); } void signedFloodFill(const ValueType&, const ValueType&) { assertNonmodifiable(); } void negate() { assertNonmodifiable(); } friend class ::TestPointDataLeaf; using ValueOn = typename BaseLeaf::ValueOn; using ValueOff = typename BaseLeaf::ValueOff; using ValueAll = typename BaseLeaf::ValueAll; private: AttributeSet::UniquePtr mAttributeSet; uint16_t mVoxelBufferSize = 0; protected: using ChildOn = typename BaseLeaf::ChildOn; using ChildOff = typename BaseLeaf::ChildOff; using ChildAll = typename BaseLeaf::ChildAll; using MaskOnIterator = typename NodeMaskType::OnIterator; using MaskOffIterator = typename NodeMaskType::OffIterator; using MaskDenseIterator = typename NodeMaskType::DenseIterator; // During topology-only construction, access is needed // to protected/private members of other template instances. template<typename, Index> friend class PointDataLeafNode; friend class tree::IteratorBase<MaskOnIterator, PointDataLeafNode>; friend class tree::IteratorBase<MaskOffIterator, PointDataLeafNode>; friend class tree::IteratorBase<MaskDenseIterator, PointDataLeafNode>; public: /// @brief Leaf value voxel iterator ValueVoxelCIter beginValueVoxel(const Coord& ijk) const; public: using ValueOnIter = typename BaseLeaf::template ValueIter< MaskOnIterator, PointDataLeafNode, const ValueType, ValueOn>; using ValueOnCIter = typename BaseLeaf::template ValueIter< MaskOnIterator, const PointDataLeafNode, const ValueType, ValueOn>; using ValueOffIter = typename BaseLeaf::template ValueIter< MaskOffIterator, PointDataLeafNode, const ValueType, ValueOff>; using ValueOffCIter = typename BaseLeaf::template ValueIter< MaskOffIterator,const PointDataLeafNode,const ValueType,ValueOff>; using ValueAllIter = typename BaseLeaf::template ValueIter< MaskDenseIterator, PointDataLeafNode, const ValueType, ValueAll>; using ValueAllCIter = typename BaseLeaf::template ValueIter< MaskDenseIterator,const PointDataLeafNode,const ValueType,ValueAll>; using ChildOnIter = typename BaseLeaf::template ChildIter< MaskOnIterator, PointDataLeafNode, ChildOn>; using ChildOnCIter = typename BaseLeaf::template ChildIter< MaskOnIterator, const PointDataLeafNode, ChildOn>; using ChildOffIter = typename BaseLeaf::template ChildIter< MaskOffIterator, PointDataLeafNode, ChildOff>; using ChildOffCIter = typename BaseLeaf::template ChildIter< MaskOffIterator, const PointDataLeafNode, ChildOff>; using ChildAllIter = typename BaseLeaf::template DenseIter< PointDataLeafNode, ValueType, ChildAll>; using ChildAllCIter = typename BaseLeaf::template DenseIter< const PointDataLeafNode, const ValueType, ChildAll>; using IndexVoxelIter = IndexIter<ValueVoxelCIter, NullFilter>; using IndexAllIter = IndexIter<ValueAllCIter, NullFilter>; using IndexOnIter = IndexIter<ValueOnCIter, NullFilter>; using IndexOffIter = IndexIter<ValueOffCIter, NullFilter>; /// @brief Leaf index iterator IndexAllIter beginIndexAll() const { NullFilter filter; return this->beginIndex<ValueAllCIter, NullFilter>(filter); } IndexOnIter beginIndexOn() const { NullFilter filter; return this->beginIndex<ValueOnCIter, NullFilter>(filter); } IndexOffIter beginIndexOff() const { NullFilter filter; return this->beginIndex<ValueOffCIter, NullFilter>(filter); } template<typename IterT, typename FilterT> IndexIter<IterT, FilterT> beginIndex(const FilterT& filter) const; /// @brief Filtered leaf index iterator template<typename FilterT> IndexIter<ValueAllCIter, FilterT> beginIndexAll(const FilterT& filter) const { return this->beginIndex<ValueAllCIter, FilterT>(filter); } template<typename FilterT> IndexIter<ValueOnCIter, FilterT> beginIndexOn(const FilterT& filter) const { return this->beginIndex<ValueOnCIter, FilterT>(filter); } template<typename FilterT> IndexIter<ValueOffCIter, FilterT> beginIndexOff(const FilterT& filter) const { return this->beginIndex<ValueOffCIter, FilterT>(filter); } /// @brief Leaf index iterator from voxel IndexVoxelIter beginIndexVoxel(const Coord& ijk) const; /// @brief Filtered leaf index iterator from voxel template<typename FilterT> IndexIter<ValueVoxelCIter, FilterT> beginIndexVoxel(const Coord& ijk, const FilterT& filter) const; #define VMASK_ this->getValueMask() ValueOnCIter cbeginValueOn() const { return ValueOnCIter(VMASK_.beginOn(), this); } ValueOnCIter beginValueOn() const { return ValueOnCIter(VMASK_.beginOn(), this); } ValueOnIter beginValueOn() { return ValueOnIter(VMASK_.beginOn(), this); } ValueOffCIter cbeginValueOff() const { return ValueOffCIter(VMASK_.beginOff(), this); } ValueOffCIter beginValueOff() const { return ValueOffCIter(VMASK_.beginOff(), this); } ValueOffIter beginValueOff() { return ValueOffIter(VMASK_.beginOff(), this); } ValueAllCIter cbeginValueAll() const { return ValueAllCIter(VMASK_.beginDense(), this); } ValueAllCIter beginValueAll() const { return ValueAllCIter(VMASK_.beginDense(), this); } ValueAllIter beginValueAll() { return ValueAllIter(VMASK_.beginDense(), this); } ValueOnCIter cendValueOn() const { return ValueOnCIter(VMASK_.endOn(), this); } ValueOnCIter endValueOn() const { return ValueOnCIter(VMASK_.endOn(), this); } ValueOnIter endValueOn() { return ValueOnIter(VMASK_.endOn(), this); } ValueOffCIter cendValueOff() const { return ValueOffCIter(VMASK_.endOff(), this); } ValueOffCIter endValueOff() const { return ValueOffCIter(VMASK_.endOff(), this); } ValueOffIter endValueOff() { return ValueOffIter(VMASK_.endOff(), this); } ValueAllCIter cendValueAll() const { return ValueAllCIter(VMASK_.endDense(), this); } ValueAllCIter endValueAll() const { return ValueAllCIter(VMASK_.endDense(), this); } ValueAllIter endValueAll() { return ValueAllIter(VMASK_.endDense(), this); } ChildOnCIter cbeginChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); } ChildOnCIter beginChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); } ChildOnIter beginChildOn() { return ChildOnIter(VMASK_.endOn(), this); } ChildOffCIter cbeginChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); } ChildOffCIter beginChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); } ChildOffIter beginChildOff() { return ChildOffIter(VMASK_.endOff(), this); } ChildAllCIter cbeginChildAll() const { return ChildAllCIter(VMASK_.beginDense(), this); } ChildAllCIter beginChildAll() const { return ChildAllCIter(VMASK_.beginDense(), this); } ChildAllIter beginChildAll() { return ChildAllIter(VMASK_.beginDense(), this); } ChildOnCIter cendChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); } ChildOnCIter endChildOn() const { return ChildOnCIter(VMASK_.endOn(), this); } ChildOnIter endChildOn() { return ChildOnIter(VMASK_.endOn(), this); } ChildOffCIter cendChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); } ChildOffCIter endChildOff() const { return ChildOffCIter(VMASK_.endOff(), this); } ChildOffIter endChildOff() { return ChildOffIter(VMASK_.endOff(), this); } ChildAllCIter cendChildAll() const { return ChildAllCIter(VMASK_.endDense(), this); } ChildAllCIter endChildAll() const { return ChildAllCIter(VMASK_.endDense(), this); } ChildAllIter endChildAll() { return ChildAllIter(VMASK_.endDense(), this); } #undef VMASK_ }; // struct PointDataLeafNode //////////////////////////////////////// // PointDataLeafNode implementation template<typename T, Index Log2Dim> inline AttributeSet::UniquePtr PointDataLeafNode<T, Log2Dim>::stealAttributeSet() { AttributeSet::UniquePtr ptr = std::make_unique<AttributeSet>(); std::swap(ptr, mAttributeSet); return ptr; } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::initializeAttributes(const Descriptor::Ptr& descriptor, const Index arrayLength, const AttributeArray::ScopedRegistryLock* lock) { if (descriptor->size() != 1 || descriptor->find("P") == AttributeSet::INVALID_POS || descriptor->valueType(0) != typeNameAsString<Vec3f>()) { OPENVDB_THROW(IndexError, "Initializing attributes only allowed with one Vec3f position attribute."); } mAttributeSet.reset(new AttributeSet(descriptor, arrayLength, lock)); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::clearAttributes(const bool updateValueMask, const AttributeArray::ScopedRegistryLock* lock) { mAttributeSet.reset(new AttributeSet(*mAttributeSet, 0, lock)); // zero voxel values this->buffer().fill(ValueType(0)); // if updateValueMask, also de-activate all voxels if (updateValueMask) this->setValuesOff(); } template<typename T, Index Log2Dim> inline bool PointDataLeafNode<T, Log2Dim>::hasAttribute(const size_t pos) const { return pos < mAttributeSet->size(); } template<typename T, Index Log2Dim> inline bool PointDataLeafNode<T, Log2Dim>::hasAttribute(const Name& attributeName) const { const size_t pos = mAttributeSet->find(attributeName); return pos != AttributeSet::INVALID_POS; } template<typename T, Index Log2Dim> inline AttributeArray::Ptr PointDataLeafNode<T, Log2Dim>::appendAttribute( const Descriptor& expected, Descriptor::Ptr& replacement, const size_t pos, const Index strideOrTotalSize, const bool constantStride, const Metadata* metadata, const AttributeArray::ScopedRegistryLock* lock) { return mAttributeSet->appendAttribute( expected, replacement, pos, strideOrTotalSize, constantStride, metadata, lock); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::dropAttributes(const std::vector<size_t>& pos, const Descriptor& expected, Descriptor::Ptr& replacement) { mAttributeSet->dropAttributes(pos, expected, replacement); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::reorderAttributes(const Descriptor::Ptr& replacement) { mAttributeSet->reorderAttributes(replacement); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::renameAttributes(const Descriptor& expected, Descriptor::Ptr& replacement) { mAttributeSet->renameAttributes(expected, replacement); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::compactAttributes() { for (size_t i = 0; i < mAttributeSet->size(); i++) { AttributeArray* array = mAttributeSet->get(i); array->compact(); } } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::replaceAttributeSet(AttributeSet* attributeSet, bool allowMismatchingDescriptors) { if (!attributeSet) { OPENVDB_THROW(ValueError, "Cannot replace with a null attribute set"); } if (!allowMismatchingDescriptors && mAttributeSet->descriptor() != attributeSet->descriptor()) { OPENVDB_THROW(ValueError, "Attribute set descriptors are not equal."); } mAttributeSet.reset(attributeSet); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::resetDescriptor(const Descriptor::Ptr& replacement) { mAttributeSet->resetDescriptor(replacement); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::setOffsets(const std::vector<ValueType>& offsets, const bool updateValueMask) { if (offsets.size() != LeafNodeType::NUM_VALUES) { OPENVDB_THROW(ValueError, "Offset vector size doesn't match number of voxels.") } for (Index index = 0; index < offsets.size(); ++index) { setOffsetOnly(index, offsets[index]); } if (updateValueMask) this->updateValueMask(); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::validateOffsets() const { // Ensure all of the offset values are monotonically increasing for (Index index = 1; index < BaseLeaf::SIZE; ++index) { if (this->getValue(index-1) > this->getValue(index)) { OPENVDB_THROW(ValueError, "Voxel offset values are not monotonically increasing"); } } // Ensure all attribute arrays are of equal length for (size_t attributeIndex = 1; attributeIndex < mAttributeSet->size(); ++attributeIndex ) { if (mAttributeSet->getConst(attributeIndex-1)->size() != mAttributeSet->getConst(attributeIndex)->size()) { OPENVDB_THROW(ValueError, "Attribute arrays have inconsistent length"); } } // Ensure the last voxel's offset value matches the size of each attribute array if (mAttributeSet->size() > 0 && this->getValue(BaseLeaf::SIZE-1) != mAttributeSet->getConst(0)->size()) { OPENVDB_THROW(ValueError, "Last voxel offset value does not match attribute array length"); } } template<typename T, Index Log2Dim> inline AttributeArray& PointDataLeafNode<T, Log2Dim>::attributeArray(const size_t pos) { if (pos >= mAttributeSet->size()) OPENVDB_THROW(LookupError, "Attribute Out Of Range - " << pos); return *mAttributeSet->get(pos); } template<typename T, Index Log2Dim> inline const AttributeArray& PointDataLeafNode<T, Log2Dim>::attributeArray(const size_t pos) const { if (pos >= mAttributeSet->size()) OPENVDB_THROW(LookupError, "Attribute Out Of Range - " << pos); return *mAttributeSet->getConst(pos); } template<typename T, Index Log2Dim> inline const AttributeArray& PointDataLeafNode<T, Log2Dim>::constAttributeArray(const size_t pos) const { return this->attributeArray(pos); } template<typename T, Index Log2Dim> inline AttributeArray& PointDataLeafNode<T, Log2Dim>::attributeArray(const Name& attributeName) { const size_t pos = mAttributeSet->find(attributeName); if (pos == AttributeSet::INVALID_POS) OPENVDB_THROW(LookupError, "Attribute Not Found - " << attributeName); return *mAttributeSet->get(pos); } template<typename T, Index Log2Dim> inline const AttributeArray& PointDataLeafNode<T, Log2Dim>::attributeArray(const Name& attributeName) const { const size_t pos = mAttributeSet->find(attributeName); if (pos == AttributeSet::INVALID_POS) OPENVDB_THROW(LookupError, "Attribute Not Found - " << attributeName); return *mAttributeSet->getConst(pos); } template<typename T, Index Log2Dim> inline const AttributeArray& PointDataLeafNode<T, Log2Dim>::constAttributeArray(const Name& attributeName) const { return this->attributeArray(attributeName); } template<typename T, Index Log2Dim> inline GroupHandle PointDataLeafNode<T, Log2Dim>::groupHandle(const AttributeSet::Descriptor::GroupIndex& index) const { const AttributeArray& array = this->attributeArray(index.first); assert(isGroup(array)); const GroupAttributeArray& groupArray = GroupAttributeArray::cast(array); return GroupHandle(groupArray, index.second); } template<typename T, Index Log2Dim> inline GroupHandle PointDataLeafNode<T, Log2Dim>::groupHandle(const Name& name) const { const AttributeSet::Descriptor::GroupIndex index = this->attributeSet().groupIndex(name); return this->groupHandle(index); } template<typename T, Index Log2Dim> inline GroupWriteHandle PointDataLeafNode<T, Log2Dim>::groupWriteHandle(const AttributeSet::Descriptor::GroupIndex& index) { AttributeArray& array = this->attributeArray(index.first); assert(isGroup(array)); GroupAttributeArray& groupArray = GroupAttributeArray::cast(array); return GroupWriteHandle(groupArray, index.second); } template<typename T, Index Log2Dim> inline GroupWriteHandle PointDataLeafNode<T, Log2Dim>::groupWriteHandle(const Name& name) { const AttributeSet::Descriptor::GroupIndex index = this->attributeSet().groupIndex(name); return this->groupWriteHandle(index); } template<typename T, Index Log2Dim> template<typename ValueIterT, typename FilterT> inline IndexIter<ValueIterT, FilterT> PointDataLeafNode<T, Log2Dim>::beginIndex(const FilterT& filter) const { // generate no-op iterator if filter evaluates no indices if (filter.state() == index::NONE) { return IndexIter<ValueIterT, FilterT>(ValueIterT(), filter); } // copy filter to ensure thread-safety FilterT newFilter(filter); newFilter.reset(*this); using IterTraitsT = tree::IterTraits<LeafNodeType, ValueIterT>; // construct the value iterator and reset the filter to use this leaf ValueIterT valueIter = IterTraitsT::begin(*this); return IndexIter<ValueIterT, FilterT>(valueIter, newFilter); } template<typename T, Index Log2Dim> inline ValueVoxelCIter PointDataLeafNode<T, Log2Dim>::beginValueVoxel(const Coord& ijk) const { const Index index = LeafNodeType::coordToOffset(ijk); assert(index < BaseLeaf::SIZE); const ValueType end = this->getValue(index); const ValueType start = (index == 0) ? ValueType(0) : this->getValue(index - 1); return ValueVoxelCIter(start, end); } template<typename T, Index Log2Dim> inline typename PointDataLeafNode<T, Log2Dim>::IndexVoxelIter PointDataLeafNode<T, Log2Dim>::beginIndexVoxel(const Coord& ijk) const { ValueVoxelCIter iter = this->beginValueVoxel(ijk); return IndexVoxelIter(iter, NullFilter()); } template<typename T, Index Log2Dim> template<typename FilterT> inline IndexIter<ValueVoxelCIter, FilterT> PointDataLeafNode<T, Log2Dim>::beginIndexVoxel(const Coord& ijk, const FilterT& filter) const { ValueVoxelCIter iter = this->beginValueVoxel(ijk); FilterT newFilter(filter); newFilter.reset(*this); return IndexIter<ValueVoxelCIter, FilterT>(iter, newFilter); } template<typename T, Index Log2Dim> inline Index64 PointDataLeafNode<T, Log2Dim>::pointCount() const { return this->getLastValue(); } template<typename T, Index Log2Dim> inline Index64 PointDataLeafNode<T, Log2Dim>::onPointCount() const { if (this->isEmpty()) return 0; else if (this->isDense()) return this->pointCount(); return iterCount(this->beginIndexOn()); } template<typename T, Index Log2Dim> inline Index64 PointDataLeafNode<T, Log2Dim>::offPointCount() const { if (this->isEmpty()) return this->pointCount(); else if (this->isDense()) return 0; return iterCount(this->beginIndexOff()); } template<typename T, Index Log2Dim> inline Index64 PointDataLeafNode<T, Log2Dim>::groupPointCount(const Name& groupName) const { if (!this->attributeSet().descriptor().hasGroup(groupName)) { return Index64(0); } GroupFilter filter(groupName, this->attributeSet()); if (filter.state() == index::ALL) { return this->pointCount(); } else { return iterCount(this->beginIndexAll(filter)); } } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::updateValueMask() { ValueType start = 0, end = 0; for (Index n = 0; n < LeafNodeType::NUM_VALUES; n++) { end = this->getValue(n); this->setValueMask(n, (end - start) > 0); start = end; } } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::setOffsetOn(Index offset, const ValueType& val) { this->buffer().setValue(offset, val); this->setValueMaskOn(offset); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::setOffsetOnly(Index offset, const ValueType& val) { this->buffer().setValue(offset, val); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::readTopology(std::istream& is, bool fromHalf) { BaseLeaf::readTopology(is, fromHalf); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::writeTopology(std::ostream& os, bool toHalf) const { BaseLeaf::writeTopology(os, toHalf); } template<typename T, Index Log2Dim> inline Index PointDataLeafNode<T, Log2Dim>::buffers() const { return Index( /*voxel buffer sizes*/ 1 + /*voxel buffers*/ 1 + /*attribute metadata*/ 1 + /*attribute uniform values*/ mAttributeSet->size() + /*attribute buffers*/ mAttributeSet->size() + /*cleanup*/ 1); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::readBuffers(std::istream& is, bool fromHalf) { this->readBuffers(is, CoordBBox::inf(), fromHalf); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::readBuffers(std::istream& is, const CoordBBox& /*bbox*/, bool fromHalf) { struct Local { static void destroyPagedStream(const io::StreamMetadata::AuxDataMap& auxData, const Index index) { // if paged stream exists, delete it std::string key("paged:" + std::to_string(index)); auto it = auxData.find(key); if (it != auxData.end()) { (const_cast<io::StreamMetadata::AuxDataMap&>(auxData)).erase(it); } } static compression::PagedInputStream& getOrInsertPagedStream( const io::StreamMetadata::AuxDataMap& auxData, const Index index) { std::string key("paged:" + std::to_string(index)); auto it = auxData.find(key); if (it != auxData.end()) { return *(boost::any_cast<compression::PagedInputStream::Ptr>(it->second)); } else { compression::PagedInputStream::Ptr pagedStream = std::make_shared<compression::PagedInputStream>(); (const_cast<io::StreamMetadata::AuxDataMap&>(auxData))[key] = pagedStream; return *pagedStream; } } static bool hasMatchingDescriptor(const io::StreamMetadata::AuxDataMap& auxData) { std::string matchingKey("hasMatchingDescriptor"); auto itMatching = auxData.find(matchingKey); return itMatching != auxData.end(); } static void clearMatchingDescriptor(const io::StreamMetadata::AuxDataMap& auxData) { std::string matchingKey("hasMatchingDescriptor"); std::string descriptorKey("descriptorPtr"); auto itMatching = auxData.find(matchingKey); auto itDescriptor = auxData.find(descriptorKey); if (itMatching != auxData.end()) (const_cast<io::StreamMetadata::AuxDataMap&>(auxData)).erase(itMatching); if (itDescriptor != auxData.end()) (const_cast<io::StreamMetadata::AuxDataMap&>(auxData)).erase(itDescriptor); } static void insertDescriptor( const io::StreamMetadata::AuxDataMap& auxData, const Descriptor::Ptr descriptor) { std::string descriptorKey("descriptorPtr"); std::string matchingKey("hasMatchingDescriptor"); auto itMatching = auxData.find(matchingKey); if (itMatching == auxData.end()) { // if matching bool is not found, insert "true" and the descriptor (const_cast<io::StreamMetadata::AuxDataMap&>(auxData))[matchingKey] = true; (const_cast<io::StreamMetadata::AuxDataMap&>(auxData))[descriptorKey] = descriptor; } } static AttributeSet::Descriptor::Ptr retrieveMatchingDescriptor(const io::StreamMetadata::AuxDataMap& auxData) { std::string descriptorKey("descriptorPtr"); auto itDescriptor = auxData.find(descriptorKey); assert(itDescriptor != auxData.end()); const Descriptor::Ptr descriptor = boost::any_cast<AttributeSet::Descriptor::Ptr>(itDescriptor->second); return descriptor; } }; const io::StreamMetadata::Ptr meta = io::getStreamMetadataPtr(is); if (!meta) { OPENVDB_THROW(IoError, "Cannot read in a PointDataLeaf without StreamMetadata."); } const Index pass(static_cast<uint16_t>(meta->pass())); const Index maximumPass(static_cast<uint16_t>(meta->pass() >> 16)); const Index attributes = (maximumPass - 4) / 2; if (pass == 0) { // pass 0 - voxel data sizes is.read(reinterpret_cast<char*>(&mVoxelBufferSize), sizeof(uint16_t)); Local::clearMatchingDescriptor(meta->auxData()); } else if (pass == 1) { // pass 1 - descriptor and attribute metadata if (Local::hasMatchingDescriptor(meta->auxData())) { AttributeSet::Descriptor::Ptr descriptor = Local::retrieveMatchingDescriptor(meta->auxData()); mAttributeSet->resetDescriptor(descriptor, /*allowMismatchingDescriptors=*/true); } else { uint8_t header; is.read(reinterpret_cast<char*>(&header), sizeof(uint8_t)); mAttributeSet->readDescriptor(is); if (header & uint8_t(1)) { AttributeSet::DescriptorPtr descriptor = mAttributeSet->descriptorPtr(); Local::insertDescriptor(meta->auxData(), descriptor); } // a forwards-compatibility mechanism for future use, // if a 0x2 bit is set, read and skip over a specific number of bytes if (header & uint8_t(2)) { uint64_t bytesToSkip; is.read(reinterpret_cast<char*>(&bytesToSkip), sizeof(uint64_t)); if (bytesToSkip > uint64_t(0)) { auto metadata = io::getStreamMetadataPtr(is); if (metadata && metadata->seekable()) { is.seekg(bytesToSkip, std::ios_base::cur); } else { std::vector<uint8_t> tempData(bytesToSkip); is.read(reinterpret_cast<char*>(&tempData[0]), bytesToSkip); } } } // this reader is only able to read headers with 0x1 and 0x2 bits set if (header > uint8_t(3)) { OPENVDB_THROW(IoError, "Unrecognised header flags in PointDataLeafNode"); } } mAttributeSet->readMetadata(is); } else if (pass < (attributes + 2)) { // pass 2...n+2 - attribute uniform values const size_t attributeIndex = pass - 2; AttributeArray* array = attributeIndex < mAttributeSet->size() ? mAttributeSet->get(attributeIndex) : nullptr; if (array) { compression::PagedInputStream& pagedStream = Local::getOrInsertPagedStream(meta->auxData(), static_cast<Index>(attributeIndex)); pagedStream.setInputStream(is); pagedStream.setSizeOnly(true); array->readPagedBuffers(pagedStream); } } else if (pass == attributes + 2) { // pass n+2 - voxel data const Index passValue(meta->pass()); // StreamMetadata pass variable used to temporarily store voxel buffer size io::StreamMetadata& nonConstMeta = const_cast<io::StreamMetadata&>(*meta); nonConstMeta.setPass(mVoxelBufferSize); // readBuffers() calls readCompressedValues specialization above BaseLeaf::readBuffers(is, fromHalf); // pass now reset to original value nonConstMeta.setPass(passValue); } else if (pass < (attributes*2 + 3)) { // pass n+2..2n+2 - attribute buffers const Index attributeIndex = pass - attributes - 3; AttributeArray* array = attributeIndex < mAttributeSet->size() ? mAttributeSet->get(attributeIndex) : nullptr; if (array) { compression::PagedInputStream& pagedStream = Local::getOrInsertPagedStream(meta->auxData(), attributeIndex); pagedStream.setInputStream(is); pagedStream.setSizeOnly(false); array->readPagedBuffers(pagedStream); } // cleanup paged stream reference in auxiliary metadata if (pass > attributes + 3) { Local::destroyPagedStream(meta->auxData(), attributeIndex-1); } } else if (pass < buffers()) { // pass 2n+3 - cleanup last paged stream const Index attributeIndex = pass - attributes - 4; Local::destroyPagedStream(meta->auxData(), attributeIndex); } } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::writeBuffers(std::ostream& os, bool toHalf) const { struct Local { static void destroyPagedStream(const io::StreamMetadata::AuxDataMap& auxData, const Index index) { // if paged stream exists, flush and delete it std::string key("paged:" + std::to_string(index)); auto it = auxData.find(key); if (it != auxData.end()) { compression::PagedOutputStream& stream = *(boost::any_cast<compression::PagedOutputStream::Ptr>(it->second)); stream.flush(); (const_cast<io::StreamMetadata::AuxDataMap&>(auxData)).erase(it); } } static compression::PagedOutputStream& getOrInsertPagedStream( const io::StreamMetadata::AuxDataMap& auxData, const Index index) { std::string key("paged:" + std::to_string(index)); auto it = auxData.find(key); if (it != auxData.end()) { return *(boost::any_cast<compression::PagedOutputStream::Ptr>(it->second)); } else { compression::PagedOutputStream::Ptr pagedStream = std::make_shared<compression::PagedOutputStream>(); (const_cast<io::StreamMetadata::AuxDataMap&>(auxData))[key] = pagedStream; return *pagedStream; } } static void insertDescriptor( const io::StreamMetadata::AuxDataMap& auxData, const Descriptor::Ptr descriptor) { std::string descriptorKey("descriptorPtr"); std::string matchingKey("hasMatchingDescriptor"); auto itMatching = auxData.find(matchingKey); auto itDescriptor = auxData.find(descriptorKey); if (itMatching == auxData.end()) { // if matching bool is not found, insert "true" and the descriptor (const_cast<io::StreamMetadata::AuxDataMap&>(auxData))[matchingKey] = true; assert(itDescriptor == auxData.end()); (const_cast<io::StreamMetadata::AuxDataMap&>(auxData))[descriptorKey] = descriptor; } else { // if matching bool is found and is false, early exit (a previous descriptor did not match) bool matching = boost::any_cast<bool>(itMatching->second); if (!matching) return; assert(itDescriptor != auxData.end()); // if matching bool is true, check whether the existing descriptor matches the current one and set // matching bool to false if not const Descriptor::Ptr existingDescriptor = boost::any_cast<AttributeSet::Descriptor::Ptr>(itDescriptor->second); if (*existingDescriptor != *descriptor) { (const_cast<io::StreamMetadata::AuxDataMap&>(auxData))[matchingKey] = false; } } } static bool hasMatchingDescriptor(const io::StreamMetadata::AuxDataMap& auxData) { std::string matchingKey("hasMatchingDescriptor"); auto itMatching = auxData.find(matchingKey); // if matching key is not found, no matching descriptor if (itMatching == auxData.end()) return false; // if matching key is found and is false, no matching descriptor if (!boost::any_cast<bool>(itMatching->second)) return false; return true; } static AttributeSet::Descriptor::Ptr retrieveMatchingDescriptor(const io::StreamMetadata::AuxDataMap& auxData) { std::string descriptorKey("descriptorPtr"); auto itDescriptor = auxData.find(descriptorKey); // if matching key is true, however descriptor is not found, it has already been retrieved if (itDescriptor == auxData.end()) return nullptr; // otherwise remove it and return it const Descriptor::Ptr descriptor = boost::any_cast<AttributeSet::Descriptor::Ptr>(itDescriptor->second); (const_cast<io::StreamMetadata::AuxDataMap&>(auxData)).erase(itDescriptor); return descriptor; } static void clearMatchingDescriptor(const io::StreamMetadata::AuxDataMap& auxData) { std::string matchingKey("hasMatchingDescriptor"); std::string descriptorKey("descriptorPtr"); auto itMatching = auxData.find(matchingKey); auto itDescriptor = auxData.find(descriptorKey); if (itMatching != auxData.end()) (const_cast<io::StreamMetadata::AuxDataMap&>(auxData)).erase(itMatching); if (itDescriptor != auxData.end()) (const_cast<io::StreamMetadata::AuxDataMap&>(auxData)).erase(itDescriptor); } }; const io::StreamMetadata::Ptr meta = io::getStreamMetadataPtr(os); if (!meta) { OPENVDB_THROW(IoError, "Cannot write out a PointDataLeaf without StreamMetadata."); } const Index pass(static_cast<uint16_t>(meta->pass())); // leaf traversal analysis deduces the number of passes to perform for this leaf // then updates the leaf traversal value to ensure all passes will be written if (meta->countingPasses()) { const Index requiredPasses = this->buffers(); if (requiredPasses > pass) { meta->setPass(requiredPasses); } return; } const Index maximumPass(static_cast<uint16_t>(meta->pass() >> 16)); const Index attributes = (maximumPass - 4) / 2; if (pass == 0) { // pass 0 - voxel data sizes io::writeCompressedValuesSize(os, this->buffer().data(), SIZE); // track if descriptor is shared or not Local::insertDescriptor(meta->auxData(), mAttributeSet->descriptorPtr()); } else if (pass == 1) { // pass 1 - descriptor and attribute metadata bool matchingDescriptor = Local::hasMatchingDescriptor(meta->auxData()); if (matchingDescriptor) { AttributeSet::Descriptor::Ptr descriptor = Local::retrieveMatchingDescriptor(meta->auxData()); if (descriptor) { // write a header to indicate a shared descriptor uint8_t header(1); os.write(reinterpret_cast<const char*>(&header), sizeof(uint8_t)); mAttributeSet->writeDescriptor(os, /*transient=*/false); } } else { // write a header to indicate a non-shared descriptor uint8_t header(0); os.write(reinterpret_cast<const char*>(&header), sizeof(uint8_t)); mAttributeSet->writeDescriptor(os, /*transient=*/false); } mAttributeSet->writeMetadata(os, /*transient=*/false, /*paged=*/true); } else if (pass < attributes + 2) { // pass 2...n+2 - attribute buffer sizes const Index attributeIndex = pass - 2; // destroy previous paged stream if (pass > 2) { Local::destroyPagedStream(meta->auxData(), attributeIndex-1); } const AttributeArray* array = attributeIndex < mAttributeSet->size() ? mAttributeSet->getConst(attributeIndex) : nullptr; if (array) { compression::PagedOutputStream& pagedStream = Local::getOrInsertPagedStream(meta->auxData(), attributeIndex); pagedStream.setOutputStream(os); pagedStream.setSizeOnly(true); array->writePagedBuffers(pagedStream, /*outputTransient*/false); } } else if (pass == attributes + 2) { const Index attributeIndex = pass - 3; Local::destroyPagedStream(meta->auxData(), attributeIndex); // pass n+2 - voxel data BaseLeaf::writeBuffers(os, toHalf); } else if (pass < (attributes*2 + 3)) { // pass n+3...2n+3 - attribute buffers const Index attributeIndex = pass - attributes - 3; // destroy previous paged stream if (pass > attributes + 2) { Local::destroyPagedStream(meta->auxData(), attributeIndex-1); } const AttributeArray* array = attributeIndex < mAttributeSet->size() ? mAttributeSet->getConst(attributeIndex) : nullptr; if (array) { compression::PagedOutputStream& pagedStream = Local::getOrInsertPagedStream(meta->auxData(), attributeIndex); pagedStream.setOutputStream(os); pagedStream.setSizeOnly(false); array->writePagedBuffers(pagedStream, /*outputTransient*/false); } } else if (pass < buffers()) { Local::clearMatchingDescriptor(meta->auxData()); // pass 2n+3 - cleanup last paged stream const Index attributeIndex = pass - attributes - 4; Local::destroyPagedStream(meta->auxData(), attributeIndex); } } template<typename T, Index Log2Dim> inline Index64 PointDataLeafNode<T, Log2Dim>::memUsage() const { return BaseLeaf::memUsage() + mAttributeSet->memUsage(); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::evalActiveBoundingBox(CoordBBox& bbox, bool visitVoxels) const { BaseLeaf::evalActiveBoundingBox(bbox, visitVoxels); } template<typename T, Index Log2Dim> inline CoordBBox PointDataLeafNode<T, Log2Dim>::getNodeBoundingBox() const { return BaseLeaf::getNodeBoundingBox(); } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::fill(const CoordBBox& bbox, const ValueType& value, bool active) { if (!this->allocate()) return; this->assertNonModifiableUnlessZero(value); // active state is permitted to be updated for (Int32 x = bbox.min().x(); x <= bbox.max().x(); ++x) { const Index offsetX = (x & (DIM-1u)) << 2*Log2Dim; for (Int32 y = bbox.min().y(); y <= bbox.max().y(); ++y) { const Index offsetXY = offsetX + ((y & (DIM-1u)) << Log2Dim); for (Int32 z = bbox.min().z(); z <= bbox.max().z(); ++z) { const Index offset = offsetXY + (z & (DIM-1u)); this->setValueMask(offset, active); } } } } template<typename T, Index Log2Dim> inline void PointDataLeafNode<T, Log2Dim>::fill(const ValueType& value, bool active) { this->assertNonModifiableUnlessZero(value); // active state is permitted to be updated if (active) this->setValuesOn(); else this->setValuesOff(); } //////////////////////////////////////// template <typename PointDataTreeT> inline AttributeSet::Descriptor::Ptr makeDescriptorUnique(PointDataTreeT& tree) { auto leafIter = tree.beginLeaf(); if (!leafIter) return nullptr; const AttributeSet::Descriptor& descriptor = leafIter->attributeSet().descriptor(); auto newDescriptor = std::make_shared<AttributeSet::Descriptor>(descriptor); for (; leafIter; ++leafIter) { leafIter->resetDescriptor(newDescriptor); } return newDescriptor; } template <typename PointDataTreeT> inline void setStreamingMode(PointDataTreeT& tree, bool on) { auto leafIter = tree.beginLeaf(); for (; leafIter; ++leafIter) { for (size_t i = 0; i < leafIter->attributeSet().size(); i++) { leafIter->attributeArray(i).setStreaming(on); } } } template <typename PointDataTreeT> inline void prefetch(PointDataTreeT& tree, bool position, bool otherAttributes) { // NOTE: the following is intentionally not multi-threaded, as the I/O // is faster if done in the order in which it is stored in the file auto leaf = tree.cbeginLeaf(); if (!leaf) return; const auto& attributeSet = leaf->attributeSet(); // pre-fetch leaf data for ( ; leaf; ++leaf) { leaf->buffer().data(); } // pre-fetch position attribute data (position will typically have index 0) size_t positionIndex = attributeSet.find("P"); if (position && positionIndex != AttributeSet::INVALID_POS) { for (leaf = tree.cbeginLeaf(); leaf; ++leaf) { assert(leaf->hasAttribute(positionIndex)); leaf->constAttributeArray(positionIndex).loadData(); } } // pre-fetch other attribute data if (otherAttributes) { const size_t attributes = attributeSet.size(); for (size_t attributeIndex = 0; attributeIndex < attributes; attributeIndex++) { if (attributeIndex == positionIndex) continue; for (leaf = tree.cbeginLeaf(); leaf; ++leaf) { assert(leaf->hasAttribute(attributeIndex)); leaf->constAttributeArray(attributeIndex).loadData(); } } } } namespace internal { /// @brief Global registration of point data-related types /// @note This is called from @c openvdb::initialize, so there is /// no need to call it directly. void initialize(); /// @brief Global deregistration of point data-related types /// @note This is called from @c openvdb::uninitialize, so there is /// no need to call it directly. void uninitialize(); /// @brief Recursive node chain which generates a openvdb::TypeList value /// converted types of nodes to PointDataGrid nodes of the same configuration, /// rooted at RootNodeType in reverse order, from LeafNode to RootNode. /// See also TreeConverter<>. template<typename HeadT, int HeadLevel> struct PointDataNodeChain { using SubtreeT = typename PointDataNodeChain<typename HeadT::ChildNodeType, HeadLevel-1>::Type; using RootNodeT = tree::RootNode<typename SubtreeT::Back>; using Type = typename SubtreeT::template Append<RootNodeT>; }; // Specialization for internal nodes which require their embedded child type to // be switched template <typename ChildT, Index Log2Dim, int HeadLevel> struct PointDataNodeChain<tree::InternalNode<ChildT, Log2Dim>, HeadLevel> { using SubtreeT = typename PointDataNodeChain<ChildT, HeadLevel-1>::Type; using InternalNodeT = tree::InternalNode<typename SubtreeT::Back, Log2Dim>; using Type = typename SubtreeT::template Append<InternalNodeT>; }; // Specialization for the last internal node of a node chain, expected // to be templated on a leaf node template <typename ChildT, Index Log2Dim> struct PointDataNodeChain<tree::InternalNode<ChildT, Log2Dim>, /*HeadLevel=*/1> { using LeafNodeT = PointDataLeafNode<PointDataIndex32, ChildT::LOG2DIM>; using InternalNodeT = tree::InternalNode<LeafNodeT, Log2Dim>; using Type = TypeList<LeafNodeT, InternalNodeT>; }; } // namespace internal /// @brief Similiar to ValueConverter, but allows for tree configuration conversion /// to a PointDataTree. ValueConverter<PointDataIndex32> cannot be used as a /// PointDataLeafNode is not a specialization of LeafNode template <typename TreeType> struct TreeConverter { using RootNodeT = typename TreeType::RootNodeType; using NodeChainT = typename internal::PointDataNodeChain<RootNodeT, RootNodeT::LEVEL>::Type; using Type = tree::Tree<typename NodeChainT::Back>; }; } // namespace points //////////////////////////////////////// namespace tree { /// Helper metafunction used to implement LeafNode::SameConfiguration /// (which, as an inner class, can't be independently specialized) template<Index Dim1, typename T2> struct SameLeafConfig<Dim1, points::PointDataLeafNode<T2, Dim1>> { static const bool value = true; }; } // namespace tree } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_DATA_GRID_HAS_BEEN_INCLUDED
69,022
C
39.106334
129
0.669453
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/IndexFilter.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/IndexFilter.h /// /// @author Dan Bailey /// /// @brief Index filters primarily designed to be used with a FilterIndexIter. /// /// Filters must adhere to the interface described in the example below: /// @code /// struct MyFilter /// { /// // Return true when the filter has been initialized for first use /// bool initialized() { return true; } /// /// // Return index::ALL if all points are valid, index::NONE if no points are valid /// // and index::PARTIAL if some points are valid /// index::State state() { return index::PARTIAL; } /// /// // Return index::ALL if all points in this leaf are valid, index::NONE if no points /// // in this leaf are valid and index::PARTIAL if some points in this leaf are valid /// template <typename LeafT> /// index::State state(const LeafT&) { return index::PARTIAL; } /// /// // Resets the filter to refer to the specified leaf, all subsequent valid() calls /// // will be relative to this leaf until reset() is called with a different leaf. /// // Although a required method, many filters will provide an empty implementation if /// // there is no leaf-specific logic needed. /// template <typename LeafT> void reset(const LeafT&) { } /// /// // Returns true if the filter is valid for the supplied iterator /// template <typename IterT> bool valid(const IterT&) { return true; } /// }; /// @endcode #ifndef OPENVDB_POINTS_INDEX_FILTER_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_INDEX_FILTER_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Types.h> #include <openvdb/math/Transform.h> #include <openvdb/tools/Interpolation.h> #include "IndexIterator.h" #include "AttributeArray.h" #include "AttributeGroup.h" #include "AttributeSet.h" #include <random> // std::mt19937 #include <numeric> // std::iota #include <unordered_map> class TestIndexFilter; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { //////////////////////////////////////// namespace index_filter_internal { // generate a random subset of n indices from the range [0:m] template <typename RandGenT, typename IntType> std::vector<IntType> generateRandomSubset(const unsigned int seed, const IntType n, const IntType m) { if (n <= 0) return std::vector<IntType>(); // fill vector with ascending indices std::vector<IntType> values(m); std::iota(values.begin(), values.end(), 0); if (n >= m) return values; // shuffle indices using random generator RandGenT randGen(seed); std::shuffle(values.begin(), values.end(), randGen); // resize the container to n elements values.resize(n); // sort the subset of the indices vector that will be used std::sort(values.begin(), values.end()); return values; } } // namespace index_filter_internal /// Index filtering on active / inactive state of host voxel template <bool On> class ValueMaskFilter { public: static bool initialized() { return true; } static index::State state() { return index::PARTIAL; } template <typename LeafT> static index::State state(const LeafT& leaf) { if (leaf.isDense()) return On ? index::ALL : index::NONE; else if (leaf.isEmpty()) return On ? index::NONE : index::ALL; return index::PARTIAL; } template <typename LeafT> void reset(const LeafT&) { } template <typename IterT> bool valid(const IterT& iter) const { const bool valueOn = iter.isValueOn(); return On ? valueOn : !valueOn; } }; using ActiveFilter = ValueMaskFilter<true>; using InactiveFilter = ValueMaskFilter<false>; /// Index filtering on multiple group membership for inclusion and exclusion /// /// @note include filters are applied first, then exclude filters class MultiGroupFilter { public: using NameVector = std::vector<Name>; using IndexVector = std::vector<AttributeSet::Descriptor::GroupIndex>; using HandleVector = std::vector<GroupHandle>; private: static IndexVector namesToIndices(const AttributeSet& attributeSet, const NameVector& names) { IndexVector indices; for (const auto& name : names) { try { indices.emplace_back(attributeSet.groupIndex(name)); } catch (LookupError&) { // silently drop group names that don't exist } } return indices; } public: MultiGroupFilter( const NameVector& include, const NameVector& exclude, const AttributeSet& attributeSet) : mInclude(MultiGroupFilter::namesToIndices(attributeSet, include)) , mExclude(MultiGroupFilter::namesToIndices(attributeSet, exclude)) { } MultiGroupFilter( const IndexVector& include, const IndexVector& exclude) : mInclude(include) , mExclude(exclude) { } MultiGroupFilter( const MultiGroupFilter& filter) : mInclude(filter.mInclude) , mExclude(filter.mExclude) , mIncludeHandles(filter.mIncludeHandles) , mExcludeHandles(filter.mExcludeHandles) , mInitialized(filter.mInitialized) { } inline bool initialized() const { return mInitialized; } inline index::State state() const { return (mInclude.empty() && mExclude.empty()) ? index::ALL : index::PARTIAL; } template <typename LeafT> static index::State state(const LeafT&) { return index::PARTIAL; } template <typename LeafT> void reset(const LeafT& leaf) { mIncludeHandles.clear(); mExcludeHandles.clear(); for (const auto& i : mInclude) { mIncludeHandles.emplace_back(leaf.groupHandle(i)); } for (const auto& i : mExclude) { mExcludeHandles.emplace_back(leaf.groupHandle(i)); } mInitialized = true; } template <typename IterT> bool valid(const IterT& iter) const { assert(mInitialized); // accept no include filters as valid bool includeValid = mIncludeHandles.empty(); for (const GroupHandle& handle : mIncludeHandles) { if (handle.getUnsafe(*iter)) { includeValid = true; break; } } if (!includeValid) return false; for (const GroupHandle& handle : mExcludeHandles) { if (handle.getUnsafe(*iter)) return false; } return true; } private: IndexVector mInclude; IndexVector mExclude; HandleVector mIncludeHandles; HandleVector mExcludeHandles; bool mInitialized = false; }; // class MultiGroupFilter // Random index filtering per leaf template <typename PointDataTreeT, typename RandGenT> class RandomLeafFilter { public: using SeedCountPair = std::pair<Index, Index>; using LeafMap = std::unordered_map<openvdb::Coord, SeedCountPair>; RandomLeafFilter( const PointDataTreeT& tree, const Index64 targetPoints, const unsigned int seed = 0) { Index64 currentPoints = 0; for (auto iter = tree.cbeginLeaf(); iter; ++iter) { currentPoints += iter->pointCount(); } const float factor = targetPoints > currentPoints ? 1.0f : float(targetPoints) / float(currentPoints); std::mt19937 generator(seed); std::uniform_int_distribution<unsigned int> dist(0, std::numeric_limits<unsigned int>::max() - 1); Index32 leafCounter = 0; float totalPointsFloat = 0.0f; int totalPoints = 0; for (auto iter = tree.cbeginLeaf(); iter; ++iter) { // for the last leaf - use the remaining points to reach the target points if (leafCounter + 1 == tree.leafCount()) { const int leafPoints = static_cast<int>(targetPoints) - totalPoints; mLeafMap[iter->origin()] = SeedCountPair(dist(generator), leafPoints); break; } totalPointsFloat += factor * static_cast<float>(iter->pointCount()); const auto leafPoints = static_cast<int>(math::Floor(totalPointsFloat)); totalPointsFloat -= static_cast<float>(leafPoints); totalPoints += leafPoints; mLeafMap[iter->origin()] = SeedCountPair(dist(generator), leafPoints); leafCounter++; } } inline bool initialized() const { return mNextIndex == -1; } static index::State state() { return index::PARTIAL; } template <typename LeafT> static index::State state(const LeafT&) { return index::PARTIAL; } template <typename LeafT> void reset(const LeafT& leaf) { using index_filter_internal::generateRandomSubset; auto it = mLeafMap.find(leaf.origin()); if (it == mLeafMap.end()) { OPENVDB_THROW(openvdb::KeyError, "Cannot find leaf origin in map for random filter - " << leaf.origin()); } const SeedCountPair& value = it->second; const unsigned int seed = static_cast<unsigned int>(value.first); const auto total = static_cast<Index>(leaf.pointCount()); mCount = std::min(value.second, total); mIndices = generateRandomSubset<RandGenT, int>(seed, mCount, total); mSubsetOffset = -1; mNextIndex = -1; } inline void next() const { mSubsetOffset++; mNextIndex = mSubsetOffset >= mCount ? std::numeric_limits<int>::max() : mIndices[mSubsetOffset]; } template <typename IterT> bool valid(const IterT& iter) const { const int index = *iter; while (mNextIndex < index) this->next(); return mNextIndex == index; } protected: friend class ::TestIndexFilter; private: LeafMap mLeafMap; std::vector<int> mIndices; int mCount = 0; mutable int mSubsetOffset = -1; mutable int mNextIndex = -1; }; // class RandomLeafFilter // Hash attribute value for deterministic, but approximate filtering template <typename RandGenT, typename IntType> class AttributeHashFilter { public: using Handle = AttributeHandle<IntType>; AttributeHashFilter(const size_t index, const double percentage, const unsigned int seed = 0) : mIndex(index) , mFactor(percentage / 100.0) , mSeed(seed) { } AttributeHashFilter(const AttributeHashFilter& filter) : mIndex(filter.mIndex) , mFactor(filter.mFactor) , mSeed(filter.mSeed) { if (filter.mIdHandle) mIdHandle.reset(new Handle(*filter.mIdHandle)); } inline bool initialized() const { return bool(mIdHandle); } static index::State state() { return index::PARTIAL; } template <typename LeafT> static index::State state(const LeafT&) { return index::PARTIAL; } template <typename LeafT> void reset(const LeafT& leaf) { assert(leaf.hasAttribute(mIndex)); mIdHandle.reset(new Handle(leaf.constAttributeArray(mIndex))); } template <typename IterT> bool valid(const IterT& iter) const { assert(mIdHandle); const IntType id = mIdHandle->get(*iter); const unsigned int seed = mSeed + static_cast<unsigned int>(id); RandGenT generator(seed); std::uniform_real_distribution<double> dist(0.0, 1.0); return dist(generator) < mFactor; } private: const size_t mIndex; const double mFactor; const unsigned int mSeed; typename Handle::UniquePtr mIdHandle; }; // class AttributeHashFilter template <typename LevelSetGridT> class LevelSetFilter { public: using ValueT = typename LevelSetGridT::ValueType; using Handle = AttributeHandle<openvdb::Vec3f>; LevelSetFilter( const LevelSetGridT& grid, const math::Transform& transform, const ValueT min, const ValueT max) : mAccessor(grid.getConstAccessor()) , mLevelSetTransform(grid.transform()) , mTransform(transform) , mMin(min) , mMax(max) { } LevelSetFilter(const LevelSetFilter& filter) : mAccessor(filter.mAccessor) , mLevelSetTransform(filter.mLevelSetTransform) , mTransform(filter.mTransform) , mMin(filter.mMin) , mMax(filter.mMax) { if (filter.mPositionHandle) mPositionHandle.reset(new Handle(*filter.mPositionHandle)); } inline bool initialized() const { return bool(mPositionHandle); } static index::State state() { return index::PARTIAL; } template <typename LeafT> static index::State state(const LeafT&) { return index::PARTIAL; } template <typename LeafT> void reset(const LeafT& leaf) { mPositionHandle.reset(new Handle(leaf.constAttributeArray("P"))); } template <typename IterT> bool valid(const IterT& iter) const { assert(mPositionHandle); assert(iter); const openvdb::Coord ijk = iter.getCoord(); const openvdb::Vec3f voxelIndexSpace = ijk.asVec3d(); // Retrieve point position in voxel space const openvdb::Vec3f& pointVoxelSpace = mPositionHandle->get(*iter); // Compute point position in index space const openvdb::Vec3f pointWorldSpace = mTransform.indexToWorld(pointVoxelSpace + voxelIndexSpace); const openvdb::Vec3f pointIndexSpace = mLevelSetTransform.worldToIndex(pointWorldSpace); // Perform level-set sampling const typename LevelSetGridT::ValueType value = tools::BoxSampler::sample(mAccessor, pointIndexSpace); // if min is greater than max, we invert so that values are valid outside of the range (not inside) const bool invert = mMin > mMax; return invert ? (value < mMax || value > mMin) : (value < mMax && value > mMin); } private: // not a reference to ensure const-accessor is unique per-thread const typename LevelSetGridT::ConstAccessor mAccessor; const math::Transform& mLevelSetTransform; const math::Transform& mTransform; const ValueT mMin; const ValueT mMax; Handle::UniquePtr mPositionHandle; }; // class LevelSetFilter // BBox index filtering class BBoxFilter { public: using Handle = AttributeHandle<openvdb::Vec3f>; BBoxFilter(const openvdb::math::Transform& transform, const openvdb::BBoxd& bboxWS) : mTransform(transform) , mBbox(transform.worldToIndex(bboxWS)) { } BBoxFilter(const BBoxFilter& filter) : mTransform(filter.mTransform) , mBbox(filter.mBbox) { if (filter.mPositionHandle) mPositionHandle.reset(new Handle(*filter.mPositionHandle)); } inline bool initialized() const { return bool(mPositionHandle); } inline index::State state() const { return mBbox.empty() ? index::NONE : index::PARTIAL; } template <typename LeafT> static index::State state(const LeafT&) { return index::PARTIAL; } template <typename LeafT> void reset(const LeafT& leaf) { mPositionHandle.reset(new Handle(leaf.constAttributeArray("P"))); } template <typename IterT> bool valid(const IterT& iter) const { assert(mPositionHandle); const openvdb::Coord ijk = iter.getCoord(); const openvdb::Vec3f voxelIndexSpace = ijk.asVec3d(); // Retrieve point position in voxel space const openvdb::Vec3f& pointVoxelSpace = mPositionHandle->get(*iter); // Compute point position in index space const openvdb::Vec3f pointIndexSpace = pointVoxelSpace + voxelIndexSpace; return mBbox.isInside(pointIndexSpace); } private: const openvdb::math::Transform& mTransform; const openvdb::BBoxd mBbox; Handle::UniquePtr mPositionHandle; }; // class BBoxFilter // Index filtering based on evaluating both sub-filters template <typename T1, typename T2, bool And = true> class BinaryFilter { public: BinaryFilter( const T1& filter1, const T2& filter2) : mFilter1(filter1) , mFilter2(filter2) { } inline bool initialized() const { return mFilter1.initialized() && mFilter2.initialized(); } inline index::State state() const { return this->computeState(mFilter1.state(), mFilter2.state()); } template <typename LeafT> inline index::State state(const LeafT& leaf) const { return this->computeState(mFilter1.state(leaf), mFilter2.state(leaf)); } template <typename LeafT> void reset(const LeafT& leaf) { mFilter1.reset(leaf); mFilter2.reset(leaf); } template <typename IterT> bool valid(const IterT& iter) const { if (And) return mFilter1.valid(iter) && mFilter2.valid(iter); return mFilter1.valid(iter) || mFilter2.valid(iter); } private: inline index::State computeState( index::State state1, index::State state2) const { if (And) { if (state1 == index::NONE || state2 == index::NONE) return index::NONE; else if (state1 == index::ALL && state2 == index::ALL) return index::ALL; } else { if (state1 == index::NONE && state2 == index::NONE) return index::NONE; else if (state1 == index::ALL && state2 == index::ALL) return index::ALL; } return index::PARTIAL; } T1 mFilter1; T2 mFilter2; }; // class BinaryFilter //////////////////////////////////////// template<typename T> struct FilterTraits { static const bool RequiresCoord = false; }; template<> struct FilterTraits<BBoxFilter> { static const bool RequiresCoord = true; }; template <typename T> struct FilterTraits<LevelSetFilter<T>> { static const bool RequiresCoord = true; }; template <typename T0, typename T1, bool And> struct FilterTraits<BinaryFilter<T0, T1, And>> { static const bool RequiresCoord = FilterTraits<T0>::RequiresCoord || FilterTraits<T1>::RequiresCoord; }; //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_INDEX_FILTER_HAS_BEEN_INCLUDED
18,450
C
30.702749
110
0.637453
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/StreamCompression.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/StreamCompression.cc #include "StreamCompression.h" #include <openvdb/util/logging.h> #include <map> #ifdef OPENVDB_USE_BLOSC #include <blosc.h> #endif namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace compression { #ifdef OPENVDB_USE_BLOSC bool bloscCanCompress() { return true; } size_t bloscUncompressedSize(const char* buffer) { size_t bytes, _1, _2; blosc_cbuffer_sizes(buffer, &bytes, &_1, &_2); return bytes; } void bloscCompress(char* compressedBuffer, size_t& compressedBytes, const size_t bufferBytes, const char* uncompressedBuffer, const size_t uncompressedBytes) { if (bufferBytes > BLOSC_MAX_BUFFERSIZE) { OPENVDB_LOG_DEBUG("Blosc compress failed due to exceeding maximum buffer size."); compressedBytes = 0; compressedBuffer = nullptr; return; } if (bufferBytes < uncompressedBytes + BLOSC_MAX_OVERHEAD) { OPENVDB_LOG_DEBUG("Blosc compress failed due to insufficient space in compressed buffer."); compressedBytes = 0; compressedBuffer = nullptr; return; } if (uncompressedBytes <= BLOSC_MINIMUM_BYTES) { // no Blosc compression performed below this limit compressedBytes = 0; compressedBuffer = nullptr; return; } if (uncompressedBytes < BLOSC_PAD_BYTES && bufferBytes < BLOSC_PAD_BYTES + BLOSC_MAX_OVERHEAD) { OPENVDB_LOG_DEBUG( "Blosc compress failed due to insufficient space in compressed buffer for padding."); compressedBytes = 0; compressedBuffer = nullptr; return; } size_t inputBytes = uncompressedBytes; const char* buffer = uncompressedBuffer; std::unique_ptr<char[]> paddedBuffer; if (uncompressedBytes < BLOSC_PAD_BYTES) { // input array padded with zeros below this limit to improve compression paddedBuffer.reset(new char[BLOSC_PAD_BYTES]); std::memcpy(paddedBuffer.get(), buffer, uncompressedBytes); for (int i = static_cast<int>(uncompressedBytes); i < BLOSC_PAD_BYTES; i++) { paddedBuffer.get()[i] = 0; } buffer = paddedBuffer.get(); inputBytes = BLOSC_PAD_BYTES; } int _compressedBytes = blosc_compress_ctx( /*clevel=*/9, // 0 (no compression) to 9 (maximum compression) /*doshuffle=*/true, /*typesize=*/sizeof(float), // hard-coded to 4-bytes for better compression /*srcsize=*/inputBytes, /*src=*/buffer, /*dest=*/compressedBuffer, /*destsize=*/bufferBytes, BLOSC_LZ4_COMPNAME, /*blocksize=*/inputBytes, /*numthreads=*/1); if (_compressedBytes <= 0) { std::ostringstream ostr; ostr << "Blosc failed to compress " << uncompressedBytes << " byte" << (uncompressedBytes == 1 ? "" : "s"); if (_compressedBytes < 0) ostr << " (internal error " << _compressedBytes << ")"; OPENVDB_LOG_DEBUG(ostr.str()); compressedBytes = 0; return; } compressedBytes = _compressedBytes; // fail if compression does not result in a smaller buffer if (compressedBytes >= uncompressedBytes) { compressedBytes = 0; } } std::unique_ptr<char[]> bloscCompress(const char* buffer, const size_t uncompressedBytes, size_t& compressedBytes, const bool resize) { size_t tempBytes = uncompressedBytes; // increase temporary buffer for padding if necessary if (tempBytes >= BLOSC_MINIMUM_BYTES && tempBytes < BLOSC_PAD_BYTES) { tempBytes += BLOSC_PAD_BYTES; } // increase by Blosc max overhead tempBytes += BLOSC_MAX_OVERHEAD; const bool outOfRange = tempBytes > BLOSC_MAX_BUFFERSIZE; std::unique_ptr<char[]> outBuffer(outOfRange ? new char[1] : new char[tempBytes]); bloscCompress(outBuffer.get(), compressedBytes, tempBytes, buffer, uncompressedBytes); if (compressedBytes == 0) { return nullptr; } // buffer size is larger due to Blosc overhead so resize // (resize can be skipped if the buffer is only temporary) if (resize) { std::unique_ptr<char[]> newBuffer(new char[compressedBytes]); std::memcpy(newBuffer.get(), outBuffer.get(), compressedBytes); outBuffer.reset(newBuffer.release()); } return outBuffer; } size_t bloscCompressedSize( const char* buffer, const size_t uncompressedBytes) { size_t compressedBytes; bloscCompress(buffer, uncompressedBytes, compressedBytes, /*resize=*/false); return compressedBytes; } void bloscDecompress(char* uncompressedBuffer, const size_t expectedBytes, const size_t bufferBytes, const char* compressedBuffer) { size_t uncompressedBytes = bloscUncompressedSize(compressedBuffer); if (bufferBytes > BLOSC_MAX_BUFFERSIZE) { OPENVDB_THROW(RuntimeError, "Blosc decompress failed due to exceeding maximum buffer size."); } if (bufferBytes < uncompressedBytes + BLOSC_MAX_OVERHEAD) { OPENVDB_THROW(RuntimeError, "Blosc decompress failed due to insufficient space in uncompressed buffer."); } uncompressedBytes = blosc_decompress_ctx( /*src=*/compressedBuffer, /*dest=*/uncompressedBuffer, bufferBytes, /*numthreads=*/1); if (uncompressedBytes < 1) { OPENVDB_THROW(RuntimeError, "Blosc decompress returned error code " << uncompressedBytes); } if (uncompressedBytes == BLOSC_PAD_BYTES && expectedBytes <= BLOSC_PAD_BYTES) { // padded array to improve compression } else if (uncompressedBytes != expectedBytes) { OPENVDB_THROW(RuntimeError, "Expected to decompress " << expectedBytes << " byte" << (expectedBytes == 1 ? "" : "s") << ", got " << uncompressedBytes << " byte" << (uncompressedBytes == 1 ? "" : "s")); } } std::unique_ptr<char[]> bloscDecompress(const char* buffer, const size_t expectedBytes, const bool resize) { size_t uncompressedBytes = bloscUncompressedSize(buffer); size_t tempBytes = uncompressedBytes + BLOSC_MAX_OVERHEAD; const bool outOfRange = tempBytes > BLOSC_MAX_BUFFERSIZE; if (outOfRange) tempBytes = 1; std::unique_ptr<char[]> outBuffer(new char[tempBytes]); bloscDecompress(outBuffer.get(), expectedBytes, tempBytes, buffer); // buffer size is larger due to Blosc overhead so resize // (resize can be skipped if the buffer is only temporary) if (resize) { std::unique_ptr<char[]> newBuffer(new char[expectedBytes]); std::memcpy(newBuffer.get(), outBuffer.get(), expectedBytes); outBuffer.reset(newBuffer.release()); } return outBuffer; } #else bool bloscCanCompress() { OPENVDB_LOG_DEBUG("Can't compress array data without the blosc library."); return false; } size_t bloscUncompressedSize(const char*) { OPENVDB_THROW(RuntimeError, "Can't extract compressed data without the blosc library."); } void bloscCompress(char*, size_t& compressedBytes, const size_t, const char*, const size_t) { OPENVDB_LOG_DEBUG("Can't compress array data without the blosc library."); compressedBytes = 0; } std::unique_ptr<char[]> bloscCompress(const char*, const size_t, size_t& compressedBytes, const bool) { OPENVDB_LOG_DEBUG("Can't compress array data without the blosc library."); compressedBytes = 0; return nullptr; } size_t bloscCompressedSize(const char*, const size_t) { OPENVDB_LOG_DEBUG("Can't compress array data without the blosc library."); return 0; } void bloscDecompress(char*, const size_t, const size_t, const char*) { OPENVDB_THROW(RuntimeError, "Can't extract compressed data without the blosc library."); } std::unique_ptr<char[]> bloscDecompress(const char*, const size_t, const bool) { OPENVDB_THROW(RuntimeError, "Can't extract compressed data without the blosc library."); } #endif // OPENVDB_USE_BLOSC //////////////////////////////////////// void Page::load() const { this->doLoad(); } long Page::uncompressedBytes() const { assert(mInfo); return mInfo->uncompressedBytes; } const char* Page::buffer(const int index) const { if (this->isOutOfCore()) this->load(); return mData.get() + index; } void Page::readHeader(std::istream& is) { assert(mInfo); // read the (compressed) size of the page int compressedSize; is.read(reinterpret_cast<char*>(&compressedSize), sizeof(int)); int uncompressedSize; // if uncompressed, read the (compressed) size of the page if (compressedSize > 0) is.read(reinterpret_cast<char*>(&uncompressedSize), sizeof(int)); else uncompressedSize = -compressedSize; assert(compressedSize != 0); assert(uncompressedSize != 0); mInfo->compressedBytes = compressedSize; mInfo->uncompressedBytes = uncompressedSize; } void Page::readBuffers(std::istream&is, bool delayed) { assert(mInfo); bool isCompressed = mInfo->compressedBytes > 0; io::MappedFile::Ptr mappedFile = io::getMappedFilePtr(is); if (delayed && mappedFile) { SharedPtr<io::StreamMetadata> meta = io::getStreamMetadataPtr(is); assert(meta); std::streamoff filepos = is.tellg(); // seek over the page is.seekg((isCompressed ? mInfo->compressedBytes : -mInfo->compressedBytes), std::ios_base::cur); mInfo->mappedFile = mappedFile; mInfo->meta = meta; mInfo->filepos = filepos; assert(mInfo->mappedFile); } else { std::unique_ptr<char[]> buffer(new char[ (isCompressed ? mInfo->compressedBytes : -mInfo->compressedBytes)]); is.read(buffer.get(), (isCompressed ? mInfo->compressedBytes : -mInfo->compressedBytes)); if (mInfo->compressedBytes > 0) { this->decompress(buffer); } else { this->copy(buffer, -static_cast<int>(mInfo->compressedBytes)); } mInfo.reset(); } } bool Page::isOutOfCore() const { return bool(mInfo); } void Page::copy(const std::unique_ptr<char[]>& temp, int pageSize) { mData.reset(new char[pageSize]); std::memcpy(mData.get(), temp.get(), pageSize); } void Page::decompress(const std::unique_ptr<char[]>& temp) { size_t uncompressedBytes = bloscUncompressedSize(temp.get()); size_t tempBytes = uncompressedBytes; #ifdef OPENVDB_USE_BLOSC tempBytes += uncompressedBytes; #endif mData.reset(new char[tempBytes]); bloscDecompress(mData.get(), uncompressedBytes, tempBytes, temp.get()); } void Page::doLoad() const { if (!this->isOutOfCore()) return; Page* self = const_cast<Page*>(this); // This lock will be contended at most once, after which this buffer // will no longer be out-of-core. tbb::spin_mutex::scoped_lock lock(self->mMutex); if (!this->isOutOfCore()) return; assert(self->mInfo); int compressedBytes = static_cast<int>(self->mInfo->compressedBytes); bool compressed = compressedBytes > 0; if (!compressed) compressedBytes = -compressedBytes; assert(compressedBytes); std::unique_ptr<char[]> temp(new char[compressedBytes]); assert(self->mInfo->mappedFile); SharedPtr<std::streambuf> buf = self->mInfo->mappedFile->createBuffer(); assert(buf); std::istream is(buf.get()); io::setStreamMetadataPtr(is, self->mInfo->meta, /*transfer=*/true); is.seekg(self->mInfo->filepos); is.read(temp.get(), compressedBytes); if (compressed) self->decompress(temp); else self->copy(temp, compressedBytes); self->mInfo.reset(); } //////////////////////////////////////// PageHandle::PageHandle( const Page::Ptr& page, const int index, const int size) : mPage(page) , mIndex(index) , mSize(size) { } Page& PageHandle::page() { assert(mPage); return *mPage; } std::unique_ptr<char[]> PageHandle::read() { assert(mIndex >= 0); assert(mSize > 0); std::unique_ptr<char[]> buffer(new char[mSize]); std::memcpy(buffer.get(), mPage->buffer(mIndex), mSize); return buffer; } //////////////////////////////////////// PagedInputStream::PagedInputStream(std::istream& is) : mIs(&is) { } PageHandle::Ptr PagedInputStream::createHandle(std::streamsize n) { assert(mByteIndex <= mUncompressedBytes); if (mByteIndex == mUncompressedBytes) { mPage = std::make_shared<Page>(); mPage->readHeader(*mIs); mUncompressedBytes = static_cast<int>(mPage->uncompressedBytes()); mByteIndex = 0; } #if OPENVDB_ABI_VERSION_NUMBER >= 6 // TODO: C++14 introduces std::make_unique PageHandle::Ptr pageHandle(new PageHandle(mPage, mByteIndex, int(n))); #else PageHandle::Ptr pageHandle = std::make_shared<PageHandle>(mPage, mByteIndex, int(n)); #endif mByteIndex += int(n); return pageHandle; } void PagedInputStream::read(PageHandle::Ptr& pageHandle, std::streamsize n, bool delayed) { assert(mByteIndex <= mUncompressedBytes); Page& page = pageHandle->page(); if (mByteIndex == mUncompressedBytes) { mUncompressedBytes = static_cast<int>(page.uncompressedBytes()); page.readBuffers(*mIs, delayed); mByteIndex = 0; } mByteIndex += int(n); } //////////////////////////////////////// PagedOutputStream::PagedOutputStream() { #ifdef OPENVDB_USE_BLOSC mCompressedData.reset(new char[PageSize + BLOSC_MAX_OVERHEAD]); #endif } PagedOutputStream::PagedOutputStream(std::ostream& os) : mOs(&os) { #ifdef OPENVDB_USE_BLOSC mCompressedData.reset(new char[PageSize + BLOSC_MAX_OVERHEAD]); #endif } PagedOutputStream& PagedOutputStream::write(const char* str, std::streamsize n) { if (n > PageSize) { this->flush(); // write out the block as if a whole page this->compressAndWrite(str, size_t(n)); } else { // if the size of this block will overflow the page, flush to disk if ((int(n) + mBytes) > PageSize) { this->flush(); } // store and increment the data in the current page std::memcpy(mData.get() + mBytes, str, n); mBytes += int(n); } return *this; } void PagedOutputStream::flush() { this->compressAndWrite(mData.get(), mBytes); mBytes = 0; } void PagedOutputStream::compressAndWrite(const char* buffer, size_t size) { if (size == 0) return; assert(size < std::numeric_limits<int>::max()); this->resize(size); size_t compressedBytes(0); if (mSizeOnly) { #ifdef OPENVDB_USE_BLOSC compressedBytes = bloscCompressedSize(buffer, size); #endif } else { #ifdef OPENVDB_USE_BLOSC bloscCompress(mCompressedData.get(), compressedBytes, mCapacity + BLOSC_MAX_OVERHEAD, buffer, size); #endif } if (compressedBytes == 0) { int uncompressedBytes = -static_cast<int>(size); if (mSizeOnly) { mOs->write(reinterpret_cast<const char*>(&uncompressedBytes), sizeof(int)); } else { mOs->write(buffer, size); } } else { if (mSizeOnly) { mOs->write(reinterpret_cast<const char*>(&compressedBytes), sizeof(int)); mOs->write(reinterpret_cast<const char*>(&size), sizeof(int)); } else { #ifdef OPENVDB_USE_BLOSC mOs->write(mCompressedData.get(), compressedBytes); #else OPENVDB_THROW(RuntimeError, "Cannot write out compressed data without Blosc."); #endif } } } void PagedOutputStream::resize(size_t size) { // grow the capacity if not sufficient space size_t requiredSize = size; if (size < BLOSC_PAD_BYTES && size >= BLOSC_MINIMUM_BYTES) { requiredSize = BLOSC_PAD_BYTES; } if (requiredSize > mCapacity) { mCapacity = requiredSize; mData.reset(new char[mCapacity]); #ifdef OPENVDB_USE_BLOSC mCompressedData.reset(new char[mCapacity + BLOSC_MAX_OVERHEAD]); #endif } } } // namespace compression } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
16,323
C++
24.747634
108
0.640446
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointSample.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Nick Avramoussis, Francisco Gochez, Dan Bailey /// /// @file points/PointSample.h /// /// @brief Sample a VDB Grid onto a VDB Points attribute #ifndef OPENVDB_POINTS_POINT_SAMPLE_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_SAMPLE_HAS_BEEN_INCLUDED #include <openvdb/util/NullInterrupter.h> #include <openvdb/tools/Interpolation.h> #include "PointDataGrid.h" #include "PointAttribute.h" #include <sstream> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { /// @brief Performs closest point sampling from a VDB grid onto a VDB Points attribute /// @param points the PointDataGrid whose points will be sampled on to /// @param sourceGrid VDB grid which will be sampled /// @param targetAttribute a target attribute on the points which will hold samples. This /// attribute will be created with the source grid type if it does /// not exist, and with the source grid name if the name is empty /// @param filter an optional index filter /// @param interrupter an optional interrupter /// @note The target attribute may exist provided it can be cast to the SourceGridT ValueType template<typename PointDataGridT, typename SourceGridT, typename FilterT = NullFilter, typename InterrupterT = util::NullInterrupter> inline void pointSample(PointDataGridT& points, const SourceGridT& sourceGrid, const Name& targetAttribute = "", const FilterT& filter = NullFilter(), InterrupterT* const interrupter = nullptr); /// @brief Performs tri-linear sampling from a VDB grid onto a VDB Points attribute /// @param points the PointDataGrid whose points will be sampled on to /// @param sourceGrid VDB grid which will be sampled /// @param targetAttribute a target attribute on the points which will hold samples. This /// attribute will be created with the source grid type if it does /// not exist, and with the source grid name if the name is empty /// @param filter an optional index filter /// @param interrupter an optional interrupter /// @note The target attribute may exist provided it can be cast to the SourceGridT ValueType template<typename PointDataGridT, typename SourceGridT, typename FilterT = NullFilter, typename InterrupterT = util::NullInterrupter> inline void boxSample( PointDataGridT& points, const SourceGridT& sourceGrid, const Name& targetAttribute = "", const FilterT& filter = NullFilter(), InterrupterT* const interrupter = nullptr); /// @brief Performs tri-quadratic sampling from a VDB grid onto a VDB Points attribute /// @param points the PointDataGrid whose points will be sampled on to /// @param sourceGrid VDB grid which will be sampled /// @param targetAttribute a target attribute on the points which will hold samples. This /// attribute will be created with the source grid type if it does /// not exist, and with the source grid name if the name is empty /// @param filter an optional index filter /// @param interrupter an optional interrupter /// @note The target attribute may exist provided it can be cast to the SourceGridT ValueType template<typename PointDataGridT, typename SourceGridT, typename FilterT = NullFilter, typename InterrupterT = util::NullInterrupter> inline void quadraticSample(PointDataGridT& points, const SourceGridT& sourceGrid, const Name& targetAttribute = "", const FilterT& filter = NullFilter(), InterrupterT* const interrupter = nullptr); // This struct samples the source grid accessor using the world-space position supplied, // with SamplerT providing the sampling scheme. In the case where ValueT does not match // the value type of the source grid, the sample() method will also convert the sampled // value into a ValueT value, using round-to-nearest for float-to-integer conversion. struct SampleWithRounding { template<typename ValueT, typename SamplerT, typename AccessorT> inline ValueT sample(const AccessorT& accessor, const Vec3d& position) const; }; // A dummy struct that is used to mean that the sampled attribute should either match the type // of the existing attribute or the type of the source grid (if the attribute doesn't exist yet) struct DummySampleType { }; /// @brief Performs sampling and conversion from a VDB grid onto a VDB Points attribute /// @param order the sampling order - 0 = closest-point, 1 = trilinear, 2 = triquadratic /// @param points the PointDataGrid whose points will be sampled on to /// @param sourceGrid VDB grid which will be sampled /// @param targetAttribute a target attribute on the points which will hold samples. This /// attribute will be created with the source grid type if it does /// not exist, and with the source grid name if the name is empty /// @param filter an optional index filter /// @param sampler handles sampling and conversion into the target attribute type, /// which by default this uses the SampleWithRounding struct. /// @param interrupter an optional interrupter /// @param threaded enable or disable threading (threading is enabled by default) /// @note The target attribute may exist provided it can be cast to the SourceGridT ValueType template<typename PointDataGridT, typename SourceGridT, typename TargetValueT = DummySampleType, typename SamplerT = SampleWithRounding, typename FilterT = NullFilter, typename InterrupterT = util::NullInterrupter> inline void sampleGrid( size_t order, PointDataGridT& points, const SourceGridT& sourceGrid, const Name& targetAttribute, const FilterT& filter = NullFilter(), const SamplerT& sampler = SampleWithRounding(), InterrupterT* const interrupter = nullptr, const bool threaded = true); /////////////////////////////////////////////////// namespace point_sample_internal { template<typename FromType, typename ToType> struct CompatibleTypes { enum { value = std::is_constructible<ToType, FromType>::value }; }; // Specializations for types that can be converted from source grid to target attribute template<typename T> struct CompatibleTypes< T, T> { enum { value = true }; }; template<typename T> struct CompatibleTypes< T, math::Vec2<T>> { enum { value = true }; }; template<typename T> struct CompatibleTypes< T, math::Vec3<T>> { enum { value = true }; }; template<typename T> struct CompatibleTypes< T, math::Vec4<T>> { enum { value = true }; }; template<typename T> struct CompatibleTypes< math::Vec2<T>, math::Vec2<T>> { enum { value = true }; }; template<typename T> struct CompatibleTypes< math::Vec3<T>, math::Vec3<T>> { enum { value = true }; }; template<typename T> struct CompatibleTypes< math::Vec4<T>, math::Vec4<T>> { enum { value = true }; }; template<typename T0, typename T1> struct CompatibleTypes< math::Vec2<T0>, math::Vec2<T1>> { enum { value = CompatibleTypes<T0, T1>::value }; }; template<typename T0, typename T1> struct CompatibleTypes< math::Vec3<T0>, math::Vec3<T1>> { enum { value = CompatibleTypes<T0, T1>::value }; }; template<typename T0, typename T1> struct CompatibleTypes< math::Vec4<T0>, math::Vec4<T1>> { enum { value = CompatibleTypes<T0, T1>::value }; }; template<typename T> struct CompatibleTypes< ValueMask, T> { enum { value = CompatibleTypes<bool, T>::value }; }; // Ability to access the Order and Staggered template parameter from tools::Sampler<Order, Staggered> template <typename T> struct SamplerTraits { static const size_t Order = 0; static const bool Staggered = false; }; template <size_t T0, bool T1> struct SamplerTraits<tools::Sampler<T0, T1>> { static const size_t Order = T0; static const bool Staggered = T1; }; // default sampling is incompatible, so throw an error template <typename ValueT, typename SamplerT, typename AccessorT, bool Round, bool Compatible = false> struct SampleWithRoundingOp { static inline void sample(ValueT&, const AccessorT&, const Vec3d&) { std::ostringstream ostr; ostr << "Cannot sample a " << typeNameAsString<typename AccessorT::ValueType>() << " grid on to a " << typeNameAsString<ValueT>() << " attribute"; OPENVDB_THROW(TypeError, ostr.str()); } }; // partial specialization to handle sampling and rounding of compatible conversion template <typename ValueT, typename SamplerT, typename AccessorT> struct SampleWithRoundingOp<ValueT, SamplerT, AccessorT, /*Round=*/true, /*Compatible=*/true> { static inline void sample(ValueT& value, const AccessorT& accessor, const Vec3d& position) { value = ValueT(math::Round(SamplerT::sample(accessor, position))); } }; // partial specialization to handle sampling and simple casting of compatible conversion template <typename ValueT, typename SamplerT, typename AccessorT> struct SampleWithRoundingOp<ValueT, SamplerT, AccessorT, /*Round=*/false, /*Compatible=*/true> { static inline void sample(ValueT& value, const AccessorT& accessor, const Vec3d& position) { value = ValueT(SamplerT::sample(accessor, position)); } }; template <typename PointDataGridT, typename SamplerT, typename FilterT, typename InterrupterT> class PointDataSampler { public: PointDataSampler(size_t order, PointDataGridT& points, const SamplerT& sampler, const FilterT& filter, InterrupterT* const interrupter, const bool threaded) : mOrder(order) , mPoints(points) , mSampler(sampler) , mFilter(filter) , mInterrupter(interrupter) , mThreaded(threaded) { } private: // No-op transformation struct AlignedTransform { inline Vec3d transform(const Vec3d& position) const { return position; } }; // struct AlignedTransform // Re-sample world-space position from source to target transforms struct NonAlignedTransform { NonAlignedTransform(const math::Transform& source, const math::Transform& target) : mSource(source) , mTarget(target) { } inline Vec3d transform(const Vec3d& position) const { return mSource.worldToIndex(mTarget.indexToWorld(position)); } private: const math::Transform& mSource; const math::Transform& mTarget; }; // struct NonAlignedTransform // A simple convenience wrapper that contains the source grid accessor and the sampler template <typename ValueT, typename SourceGridT, typename GridSamplerT> struct SamplerWrapper { using ValueType = ValueT; using SourceValueType = typename SourceGridT::ValueType; using SourceAccessorT = typename SourceGridT::ConstAccessor; // can only sample from a bool or mask grid using a PointSampler static const bool SourceIsBool = std::is_same<SourceValueType, bool>::value || std::is_same<SourceValueType, ValueMask>::value; static const bool OrderIsZero = SamplerTraits<GridSamplerT>::Order == 0; static const bool IsValid = !SourceIsBool || OrderIsZero; SamplerWrapper(const SourceGridT& sourceGrid, const SamplerT& sampler) : mAccessor(sourceGrid.getConstAccessor()) , mSampler(sampler) { } // note that creating a new accessor from the underlying tree is faster than // copying an existing accessor SamplerWrapper(const SamplerWrapper& other) : mAccessor(other.mAccessor.tree()) , mSampler(other.mSampler) { } template <bool IsValidT = IsValid> inline typename std::enable_if<IsValidT, ValueT>::type sample(const Vec3d& position) const { return mSampler.template sample<ValueT, GridSamplerT, SourceAccessorT>( mAccessor, position); } template <bool IsValidT = IsValid> inline typename std::enable_if<!IsValidT, ValueT>::type sample(const Vec3d& /*position*/) const { OPENVDB_THROW(RuntimeError, "Cannot sample bool grid with BoxSampler or QuadraticSampler."); } private: SourceAccessorT mAccessor; const SamplerT& mSampler; }; // struct SamplerWrapper template <typename SamplerWrapperT, typename TransformerT> inline void doSample(const SamplerWrapperT& sampleWrapper, const Index targetIndex, const TransformerT& transformer) { using PointDataTreeT = typename PointDataGridT::TreeType; using LeafT = typename PointDataTreeT::LeafNodeType; using LeafManagerT = typename tree::LeafManager<PointDataTreeT>; const auto& filter(mFilter); const auto& interrupter(mInterrupter); auto sampleLambda = [targetIndex, &sampleWrapper, &transformer, &filter, &interrupter]( LeafT& leaf, size_t /*idx*/) { using TargetHandleT = AttributeWriteHandle<typename SamplerWrapperT::ValueType>; if (util::wasInterrupted(interrupter)) { tbb::task::self().cancel_group_execution(); return; } SamplerWrapperT newSampleWrapper(sampleWrapper); auto positionHandle = AttributeHandle<Vec3f>::create(leaf.constAttributeArray("P")); auto targetHandle = TargetHandleT::create(leaf.attributeArray(targetIndex)); for (auto iter = leaf.beginIndexOn(filter); iter; ++iter) { const Vec3d position = transformer.transform( positionHandle->get(*iter) + iter.getCoord().asVec3d()); targetHandle->set(*iter, newSampleWrapper.sample(position)); } }; LeafManagerT leafManager(mPoints.tree()); if (mInterrupter) mInterrupter->start(); leafManager.foreach(sampleLambda, mThreaded); if (mInterrupter) mInterrupter->end(); } template <typename SourceGridT, typename SamplerWrapperT> inline void resolveTransform(const SourceGridT& sourceGrid, const SamplerWrapperT& sampleWrapper, const Index targetIndex) { const auto& sourceTransform = sourceGrid.constTransform(); const auto& pointsTransform = mPoints.constTransform(); if (sourceTransform == pointsTransform) { AlignedTransform transformer; doSample(sampleWrapper, targetIndex, transformer); } else { NonAlignedTransform transformer(sourceTransform, pointsTransform); doSample(sampleWrapper, targetIndex, transformer); } } template <typename SourceGridT, typename TargetValueT, size_t Order> inline void resolveStaggered(const SourceGridT& sourceGrid, const Index targetIndex) { using SamplerWrapperT = SamplerWrapper<TargetValueT, SourceGridT, tools::Sampler<Order, false>>; using StaggeredSamplerWrapperT = SamplerWrapper<TargetValueT, SourceGridT, tools::Sampler<Order, true>>; using SourceValueType = typename SourceGridT::ValueType; if (VecTraits<SourceValueType>::Size == 3 && sourceGrid.getGridClass() == GRID_STAGGERED) { StaggeredSamplerWrapperT sampleWrapper(sourceGrid, mSampler); resolveTransform(sourceGrid, sampleWrapper, targetIndex); } else { SamplerWrapperT sampleWrapper(sourceGrid, mSampler); resolveTransform(sourceGrid, sampleWrapper, targetIndex); } } public: template <typename SourceGridT, typename TargetValueT = typename SourceGridT::ValueType> inline void sample(const SourceGridT& sourceGrid, Index targetIndex) { using SourceValueType = typename SourceGridT::ValueType; static const bool SourceIsMask = std::is_same<SourceValueType, bool>::value || std::is_same<SourceValueType, ValueMask>::value; if (SourceIsMask || mOrder == 0) { resolveStaggered<SourceGridT, TargetValueT, 0>(sourceGrid, targetIndex); } else if (mOrder == 1) { resolveStaggered<SourceGridT, TargetValueT, 1>(sourceGrid, targetIndex); } else if (mOrder == 2) { resolveStaggered<SourceGridT, TargetValueT, 2>(sourceGrid, targetIndex); } } private: size_t mOrder; PointDataGridT& mPoints; const SamplerT& mSampler; const FilterT& mFilter; InterrupterT* const mInterrupter; const bool mThreaded; }; // class PointDataSampler template <typename PointDataGridT, typename ValueT> struct AppendAttributeOp { static void append(PointDataGridT& points, const Name& attribute) { appendAttribute<ValueT>(points.tree(), attribute); } }; // partial specialization to disable attempts to append attribute type of DummySampleType template <typename PointDataGridT> struct AppendAttributeOp<PointDataGridT, DummySampleType> { static void append(PointDataGridT&, const Name&) { } }; } // namespace point_sample_internal //////////////////////////////////////// template<typename ValueT, typename SamplerT, typename AccessorT> ValueT SampleWithRounding::sample(const AccessorT& accessor, const Vec3d& position) const { using namespace point_sample_internal; using SourceValueT = typename AccessorT::ValueType; static const bool staggered = SamplerTraits<SamplerT>::Staggered; static const bool compatible = CompatibleTypes</*from=*/SourceValueT, /*to=*/ValueT>::value && (!staggered || (staggered && VecTraits<SourceValueT>::Size == 3)); static const bool round = std::is_floating_point<SourceValueT>::value && std::is_integral<ValueT>::value; ValueT value; SampleWithRoundingOp<ValueT, SamplerT, AccessorT, round, compatible>::sample( value, accessor, position); return value; } //////////////////////////////////////// template<typename PointDataGridT, typename SourceGridT, typename TargetValueT, typename SamplerT, typename FilterT, typename InterrupterT> inline void sampleGrid( size_t order, PointDataGridT& points, const SourceGridT& sourceGrid, const Name& targetAttribute, const FilterT& filter, const SamplerT& sampler, InterrupterT* const interrupter, const bool threaded) { using point_sample_internal::AppendAttributeOp; using point_sample_internal::PointDataSampler; // use the name of the grid if no target attribute name supplied Name attribute(targetAttribute); if (targetAttribute.empty()) { attribute = sourceGrid.getName(); } // we do not allow sampling onto the "P" attribute if (attribute == "P") { OPENVDB_THROW(RuntimeError, "Cannot sample onto the \"P\" attribute"); } auto leaf = points.tree().cbeginLeaf(); if (!leaf) return; PointDataSampler<PointDataGridT, SamplerT, FilterT, InterrupterT> pointDataSampler( order, points, sampler, filter, interrupter, threaded); const auto& descriptor = leaf->attributeSet().descriptor(); size_t targetIndex = descriptor.find(attribute); const bool attributeExists = targetIndex != AttributeSet::INVALID_POS; if (std::is_same<TargetValueT, DummySampleType>::value) { if (!attributeExists) { // append attribute of source grid value type appendAttribute<typename SourceGridT::ValueType>(points.tree(), attribute); targetIndex = leaf->attributeSet().descriptor().find(attribute); assert(targetIndex != AttributeSet::INVALID_POS); // sample using same type as source grid pointDataSampler.template sample<SourceGridT>(sourceGrid, Index(targetIndex)); } else { auto targetIdx = static_cast<Index>(targetIndex); // attempt to explicitly sample using type of existing attribute const Name& targetType = descriptor.valueType(targetIndex); if (targetType == typeNameAsString<Vec3f>()) { pointDataSampler.template sample<SourceGridT, Vec3f>(sourceGrid, targetIdx); } else if (targetType == typeNameAsString<Vec3d>()) { pointDataSampler.template sample<SourceGridT, Vec3d>(sourceGrid, targetIdx); } else if (targetType == typeNameAsString<Vec3i>()) { pointDataSampler.template sample<SourceGridT, Vec3i>(sourceGrid, targetIdx); } else if (targetType == typeNameAsString<int8_t>()) { pointDataSampler.template sample<SourceGridT, int8_t>(sourceGrid, targetIdx); } else if (targetType == typeNameAsString<int16_t>()) { pointDataSampler.template sample<SourceGridT, int16_t>(sourceGrid, targetIdx); } else if (targetType == typeNameAsString<int32_t>()) { pointDataSampler.template sample<SourceGridT, int32_t>(sourceGrid, targetIdx); } else if (targetType == typeNameAsString<int64_t>()) { pointDataSampler.template sample<SourceGridT, int64_t>(sourceGrid, targetIdx); } else if (targetType == typeNameAsString<float>()) { pointDataSampler.template sample<SourceGridT, float>(sourceGrid, targetIdx); } else if (targetType == typeNameAsString<double>()) { pointDataSampler.template sample<SourceGridT, double>(sourceGrid, targetIdx); } else if (targetType == typeNameAsString<bool>()) { pointDataSampler.template sample<SourceGridT, bool>(sourceGrid, targetIdx); } else { std::ostringstream ostr; ostr << "Cannot sample attribute of type - " << targetType; OPENVDB_THROW(TypeError, ostr.str()); } } } else { if (!attributeExists) { // append attribute of target value type // (point_sample_internal wrapper disables the ability to use DummySampleType) AppendAttributeOp<PointDataGridT, TargetValueT>::append(points, attribute); targetIndex = leaf->attributeSet().descriptor().find(attribute); assert(targetIndex != AttributeSet::INVALID_POS); } else { const Name targetType = typeNameAsString<TargetValueT>(); const Name attributeType = descriptor.valueType(targetIndex); if (targetType != attributeType) { std::ostringstream ostr; ostr << "Requested attribute type " << targetType << " for sampling " << " does not match existing attribute type " << attributeType; OPENVDB_THROW(TypeError, ostr.str()); } } // sample using target value type pointDataSampler.template sample<SourceGridT, TargetValueT>( sourceGrid, static_cast<Index>(targetIndex)); } } template<typename PointDataGridT, typename SourceGridT, typename FilterT, typename InterrupterT> inline void pointSample(PointDataGridT& points, const SourceGridT& sourceGrid, const Name& targetAttribute, const FilterT& filter, InterrupterT* const interrupter) { SampleWithRounding sampler; sampleGrid(/*order=*/0, points, sourceGrid, targetAttribute, filter, sampler, interrupter); } template<typename PointDataGridT, typename SourceGridT, typename FilterT, typename InterrupterT> inline void boxSample( PointDataGridT& points, const SourceGridT& sourceGrid, const Name& targetAttribute, const FilterT& filter, InterrupterT* const interrupter) { SampleWithRounding sampler; sampleGrid(/*order=*/1, points, sourceGrid, targetAttribute, filter, sampler, interrupter); } template<typename PointDataGridT, typename SourceGridT, typename FilterT, typename InterrupterT> inline void quadraticSample(PointDataGridT& points, const SourceGridT& sourceGrid, const Name& targetAttribute, const FilterT& filter, InterrupterT* const interrupter) { SampleWithRounding sampler; sampleGrid(/*order=*/2, points, sourceGrid, targetAttribute, filter, sampler, interrupter); } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_SAMPLE_HAS_BEEN_INCLUDED
25,485
C
44.028268
112
0.655798
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointScatter.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Nick Avramoussis /// /// @file points/PointScatter.h /// /// @brief Various point scattering methods for generating VDB Points. /// /// All random number calls are made to the same generator to produce /// temporarily consistent results in relation to the provided seed. This /// comes with some multi-threaded performance trade-offs. #ifndef OPENVDB_POINTS_POINT_SCATTER_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_SCATTER_HAS_BEEN_INCLUDED #include <type_traits> #include <algorithm> #include <thread> #include <random> #include <openvdb/openvdb.h> #include <openvdb/Types.h> #include <openvdb/tree/LeafManager.h> #include <openvdb/tools/Prune.h> #include <openvdb/util/NullInterrupter.h> #include "AttributeArray.h" #include "PointCount.h" #include "PointDataGrid.h" #include <tbb/parallel_sort.h> #include <tbb/parallel_for.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { /// @brief The free functions depend on the following class: /// /// The @c InterrupterT template argument below refers to any class /// with the following interface: /// @code /// class Interrupter { /// ... /// public: /// void start(const char* name = nullptr) // called when computations begin /// void end() // called when computations end /// bool wasInterrupted(int percent=-1) // return true to break computation ///}; /// @endcode /// /// @note If no template argument is provided for this InterrupterT /// the util::NullInterrupter is used which implies that all /// interrupter calls are no-ops (i.e. incurs no computational overhead). /// @brief Uniformly scatter a total amount of points in active regions /// /// @param grid A source grid. The resulting PointDataGrid will copy this grids /// transform and scatter in its active voxelized topology. /// @param count The total number of points to scatter /// @param seed A seed for the RandGenT /// @param spread The spread of points as a scale from each voxels center. A value of /// 1.0f indicates points can be placed anywhere within the voxel, where /// as a value of 0.0f will force all points to be created exactly at the /// centers of each voxel. /// @param interrupter An optional interrupter /// @note returns the scattered PointDataGrid template< typename GridT, typename RandGenT = std::mt19937, typename PositionArrayT = TypedAttributeArray<Vec3f, NullCodec>, typename PointDataGridT = Grid< typename points::TreeConverter<typename GridT::TreeType>::Type>, typename InterrupterT = util::NullInterrupter> inline typename PointDataGridT::Ptr uniformPointScatter(const GridT& grid, const Index64 count, const unsigned int seed = 0, const float spread = 1.0f, InterrupterT* interrupter = nullptr); /// @brief Uniformly scatter a fixed number of points per active voxel. If the pointsPerVoxel /// value provided is a fractional value, each voxel calculates a delta value of /// how likely it is to contain an extra point. /// /// @param grid A source grid. The resulting PointDataGrid will copy this grids /// transform and scatter in its active voxelized topology. /// @param pointsPerVoxel The number of points to scatter per voxel /// @param seed A seed for the RandGenT /// @param spread The spread of points as a scale from each voxels center. A value of /// 1.0f indicates points can be placed anywhere within the voxel, where /// as a value of 0.0f will force all points to be created exactly at the /// centers of each voxel. /// @param interrupter An optional interrupter /// @note returns the scattered PointDataGrid template< typename GridT, typename RandGenT = std::mt19937, typename PositionArrayT = TypedAttributeArray<Vec3f, NullCodec>, typename PointDataGridT = Grid< typename points::TreeConverter<typename GridT::TreeType>::Type>, typename InterrupterT = util::NullInterrupter> inline typename PointDataGridT::Ptr denseUniformPointScatter(const GridT& grid, const float pointsPerVoxel, const unsigned int seed = 0, const float spread = 1.0f, InterrupterT* interrupter = nullptr); /// @brief Non uniformly scatter points per active voxel. The pointsPerVoxel value is used /// to weight each grids cell value to compute a fixed number of points for every /// active voxel. If the computed result is a fractional value, each voxel calculates /// a delta value of how likely it is to contain an extra point. /// /// @param grid A source grid. The resulting PointDataGrid will copy this grids /// transform, voxelized topology and use its values to compute a /// target points per voxel. The grids ValueType must be convertible /// to a scalar value. Only active and larger than zero values will /// contain points. /// @param pointsPerVoxel The number of points to scatter per voxel /// @param seed A seed for the RandGenT /// @param spread The spread of points as a scale from each voxels center. A value of /// 1.0f indicates points can be placed anywhere within the voxel, where /// as a value of 0.0f will force all points to be created exactly at the /// centers of each voxel. /// @param interrupter An optional interrupter /// @note returns the scattered PointDataGrid template< typename GridT, typename RandGenT = std::mt19937, typename PositionArrayT = TypedAttributeArray<Vec3f, NullCodec>, typename PointDataGridT = Grid< typename points::TreeConverter<typename GridT::TreeType>::Type>, typename InterrupterT = util::NullInterrupter> inline typename PointDataGridT::Ptr nonUniformPointScatter(const GridT& grid, const float pointsPerVoxel, const unsigned int seed = 0, const float spread = 1.0f, InterrupterT* interrupter = nullptr); //////////////////////////////////////// namespace point_scatter_internal { /// @brief initialise the topology of a PointDataGrid and ensure /// everything is voxelized /// @param grid The source grid from which to base the topology generation template<typename PointDataGridT, typename GridT> inline typename PointDataGridT::Ptr initialisePointTopology(const GridT& grid) { typename PointDataGridT::Ptr points(new PointDataGridT); points->setTransform(grid.transform().copy()); points->topologyUnion(grid); if (points->tree().hasActiveTiles()) { points->tree().voxelizeActiveTiles(); } return points; } /// @brief Generate random point positions for a leaf node /// @param leaf The leaf node to initialize /// @param descriptor The descriptor containing the position type /// @param count The number of points to generate /// @param spread The spread of points from the voxel center /// @param rand01 The random number generator, expected to produce floating point /// values between 0 and 1. template<typename PositionType, typename CodecT, typename RandGenT, typename LeafNodeT> inline void generatePositions(LeafNodeT& leaf, const AttributeSet::Descriptor::Ptr& descriptor, const Index64& count, const float spread, RandGenT& rand01) { using PositionTraits = VecTraits<PositionType>; using ValueType = typename PositionTraits::ElementType; using PositionWriteHandle = AttributeWriteHandle<PositionType, CodecT>; leaf.initializeAttributes(descriptor, static_cast<Index>(count)); // directly expand to avoid needlessly setting uniform values in the // write handle auto& array = leaf.attributeArray(0); array.expand(/*fill*/false); PositionWriteHandle pHandle(array, /*expand*/false); PositionType P; for (Index64 index = 0; index < count; ++index) { P[0] = (spread * (rand01() - ValueType(0.5))); P[1] = (spread * (rand01() - ValueType(0.5))); P[2] = (spread * (rand01() - ValueType(0.5))); pHandle.set(static_cast<Index>(index), P); } } } // namespace point_scatter_internal //////////////////////////////////////// template< typename GridT, typename RandGenT, typename PositionArrayT, typename PointDataGridT, typename InterrupterT> inline typename PointDataGridT::Ptr uniformPointScatter(const GridT& grid, const Index64 count, const unsigned int seed, const float spread, InterrupterT* interrupter) { using PositionType = typename PositionArrayT::ValueType; using PositionTraits = VecTraits<PositionType>; using ValueType = typename PositionTraits::ElementType; using CodecType = typename PositionArrayT::Codec; using RandomGenerator = math::Rand01<ValueType, RandGenT>; using TreeType = typename PointDataGridT::TreeType; using LeafNodeType = typename TreeType::LeafNodeType; using LeafManagerT = tree::LeafManager<TreeType>; struct Local { /// @brief Get the prefixed voxel counts for each leaf node with an /// additional value to represent the end voxel count. /// See also LeafManager::getPrefixSum() static void getPrefixSum(LeafManagerT& leafManager, std::vector<Index64>& offsets) { Index64 offset = 0; offsets.reserve(leafManager.leafCount() + 1); offsets.push_back(0); const auto leafRange = leafManager.leafRange(); for (auto leaf = leafRange.begin(); leaf; ++leaf) { offset += leaf->onVoxelCount(); offsets.push_back(offset); } } }; static_assert(PositionTraits::IsVec && PositionTraits::Size == 3, "Invalid Position Array type."); if (spread < 0.0f || spread > 1.0f) { OPENVDB_THROW(ValueError, "Spread must be between 0 and 1."); } if (interrupter) interrupter->start("Uniform scattering with fixed point count"); typename PointDataGridT::Ptr points = point_scatter_internal::initialisePointTopology<PointDataGridT>(grid); TreeType& tree = points->tree(); if (!tree.cbeginLeaf()) return points; LeafManagerT leafManager(tree); const Index64 voxelCount = leafManager.activeLeafVoxelCount(); assert(voxelCount != 0); const double pointsPerVolume = double(count) / double(voxelCount); const Index32 pointsPerVoxel = static_cast<Index32>(math::RoundDown(pointsPerVolume)); const Index64 remainder = count - (pointsPerVoxel * voxelCount); if (remainder == 0) { return denseUniformPointScatter< GridT, RandGenT, PositionArrayT, PointDataGridT, InterrupterT>( grid, float(pointsPerVoxel), seed, spread, interrupter); } std::vector<Index64> voxelOffsets, values; std::thread worker(&Local::getPrefixSum, std::ref(leafManager), std::ref(voxelOffsets)); { math::RandInt<Index64, RandGenT> gen(seed, 0, voxelCount-1); values.reserve(remainder); for (Index64 i = 0; i < remainder; ++i) values.emplace_back(gen()); } worker.join(); if (util::wasInterrupted<InterrupterT>(interrupter)) { tree.clear(); return points; } tbb::parallel_sort(values.begin(), values.end()); const bool fractionalOnly(pointsPerVoxel == 0); leafManager.foreach([&voxelOffsets, &values, fractionalOnly] (LeafNodeType& leaf, const size_t idx) { const Index64 lowerOffset = voxelOffsets[idx]; // inclusive const Index64 upperOffset = voxelOffsets[idx + 1]; // exclusive assert(upperOffset > lowerOffset); const auto valuesEnd = values.end(); auto lower = std::lower_bound(values.begin(), valuesEnd, lowerOffset); auto* const data = leaf.buffer().data(); auto iter = leaf.beginValueOn(); Index32 currentOffset(0); bool addedPoints(!fractionalOnly); while (lower != valuesEnd) { const Index64 vId = *lower; if (vId >= upperOffset) break; const Index32 nextOffset = Index32(vId - lowerOffset); iter.increment(nextOffset - currentOffset); currentOffset = nextOffset; assert(iter); auto& value = data[iter.pos()]; value = value + 1; // no += operator support addedPoints = true; ++lower; } // deactivate this leaf if no points were added. This will speed up // the unthreaded rng if (!addedPoints) leaf.setValuesOff(); }); voxelOffsets.clear(); values.clear(); if (fractionalOnly) { tools::pruneInactive(tree); leafManager.rebuild(); } const AttributeSet::Descriptor::Ptr descriptor = AttributeSet::Descriptor::create(PositionArrayT::attributeType()); RandomGenerator rand01(seed); const auto leafRange = leafManager.leafRange(); auto leaf = leafRange.begin(); for (; leaf; ++leaf) { if (util::wasInterrupted<InterrupterT>(interrupter)) break; Index32 offset(0); for (auto iter = leaf->beginValueAll(); iter; ++iter) { if (iter.isValueOn()) { const Index32 value = Index32(pointsPerVolume + Index32(*iter)); if (value == 0) leaf->setValueOff(iter.pos()); else offset += value; } // @note can't use iter.setValue(offset) on point grids leaf->setOffsetOnly(iter.pos(), offset); } // offset should always be non zero assert(offset != 0); point_scatter_internal::generatePositions<PositionType, CodecType> (*leaf, descriptor, offset, spread, rand01); } // if interrupted, remove remaining leaf nodes if (leaf) { for (; leaf; ++leaf) leaf->setValuesOff(); tools::pruneInactive(tree); } if (interrupter) interrupter->end(); return points; } //////////////////////////////////////// template< typename GridT, typename RandGenT, typename PositionArrayT, typename PointDataGridT, typename InterrupterT> inline typename PointDataGridT::Ptr denseUniformPointScatter(const GridT& grid, const float pointsPerVoxel, const unsigned int seed, const float spread, InterrupterT* interrupter) { using PositionType = typename PositionArrayT::ValueType; using PositionTraits = VecTraits<PositionType>; using ValueType = typename PositionTraits::ElementType; using CodecType = typename PositionArrayT::Codec; using RandomGenerator = math::Rand01<ValueType, RandGenT>; using TreeType = typename PointDataGridT::TreeType; static_assert(PositionTraits::IsVec && PositionTraits::Size == 3, "Invalid Position Array type."); if (pointsPerVoxel < 0.0f) { OPENVDB_THROW(ValueError, "Points per voxel must not be less than zero."); } if (spread < 0.0f || spread > 1.0f) { OPENVDB_THROW(ValueError, "Spread must be between 0 and 1."); } if (interrupter) interrupter->start("Dense uniform scattering with fixed point count"); typename PointDataGridT::Ptr points = point_scatter_internal::initialisePointTopology<PointDataGridT>(grid); TreeType& tree = points->tree(); auto leafIter = tree.beginLeaf(); if (!leafIter) return points; const Index32 pointsPerVoxelInt = math::Floor(pointsPerVoxel); const double delta = pointsPerVoxel - float(pointsPerVoxelInt); const bool fractional = !math::isApproxZero(delta, 1.0e-6); const bool fractionalOnly = pointsPerVoxelInt == 0; const AttributeSet::Descriptor::Ptr descriptor = AttributeSet::Descriptor::create(PositionArrayT::attributeType()); RandomGenerator rand01(seed); for (; leafIter; ++leafIter) { if (util::wasInterrupted<InterrupterT>(interrupter)) break; Index32 offset(0); for (auto iter = leafIter->beginValueAll(); iter; ++iter) { if (iter.isValueOn()) { offset += pointsPerVoxelInt; if (fractional && rand01() < delta) ++offset; else if (fractionalOnly) leafIter->setValueOff(iter.pos()); } // @note can't use iter.setValue(offset) on point grids leafIter->setOffsetOnly(iter.pos(), offset); } if (offset != 0) { point_scatter_internal::generatePositions<PositionType, CodecType> (*leafIter, descriptor, offset, spread, rand01); } } // if interrupted, remove remaining leaf nodes const bool prune(leafIter || fractionalOnly); for (; leafIter; ++leafIter) leafIter->setValuesOff(); if (prune) tools::pruneInactive(tree); if (interrupter) interrupter->end(); return points; } //////////////////////////////////////// template< typename GridT, typename RandGenT, typename PositionArrayT, typename PointDataGridT, typename InterrupterT> inline typename PointDataGridT::Ptr nonUniformPointScatter(const GridT& grid, const float pointsPerVoxel, const unsigned int seed, const float spread, InterrupterT* interrupter) { using PositionType = typename PositionArrayT::ValueType; using PositionTraits = VecTraits<PositionType>; using ValueType = typename PositionTraits::ElementType; using CodecType = typename PositionArrayT::Codec; using RandomGenerator = math::Rand01<ValueType, RandGenT>; using TreeType = typename PointDataGridT::TreeType; static_assert(PositionTraits::IsVec && PositionTraits::Size == 3, "Invalid Position Array type."); static_assert(std::is_arithmetic<typename GridT::ValueType>::value, "Scalar grid type required for weighted voxel scattering."); if (pointsPerVoxel < 0.0f) { OPENVDB_THROW(ValueError, "Points per voxel must not be less than zero."); } if (spread < 0.0f || spread > 1.0f) { OPENVDB_THROW(ValueError, "Spread must be between 0 and 1."); } if (interrupter) interrupter->start("Non-uniform scattering with local point density"); typename PointDataGridT::Ptr points = point_scatter_internal::initialisePointTopology<PointDataGridT>(grid); TreeType& tree = points->tree(); auto leafIter = tree.beginLeaf(); if (!leafIter) return points; const AttributeSet::Descriptor::Ptr descriptor = AttributeSet::Descriptor::create(PositionArrayT::attributeType()); RandomGenerator rand01(seed); const auto accessor = grid.getConstAccessor(); for (; leafIter; ++leafIter) { if (util::wasInterrupted<InterrupterT>(interrupter)) break; Index32 offset(0); for (auto iter = leafIter->beginValueAll(); iter; ++iter) { if (iter.isValueOn()) { double fractional = double(accessor.getValue(iter.getCoord())) * pointsPerVoxel; fractional = std::max(0.0, fractional); int count = int(fractional); if (rand01() < (fractional - double(count))) ++count; else if (count == 0) leafIter->setValueOff(iter.pos()); offset += count; } // @note can't use iter.setValue(offset) on point grids leafIter->setOffsetOnly(iter.pos(), offset); } if (offset != 0) { point_scatter_internal::generatePositions<PositionType, CodecType> (*leafIter, descriptor, offset, spread, rand01); } } // if interrupted, remove remaining leaf nodes for (; leafIter; ++leafIter) leafIter->setValuesOff(); tools::pruneInactive(points->tree()); if (interrupter) interrupter->end(); return points; } } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_SCATTER_HAS_BEEN_INCLUDED
20,847
C
36.563964
96
0.63731
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/AttributeArrayString.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/AttributeArrayString.h /// /// @author Dan Bailey /// /// @brief Attribute array storage for string data using Descriptor Metadata. #ifndef OPENVDB_POINTS_ATTRIBUTE_ARRAY_STRING_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_ATTRIBUTE_ARRAY_STRING_HAS_BEEN_INCLUDED #include "AttributeArray.h" #include <memory> #include <deque> #include <unordered_map> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { //////////////////////////////////////// namespace attribute_traits { template <bool Truncate> struct StringTypeTrait { using Type = Index; }; template<> struct StringTypeTrait</*Truncate=*/true> { using Type = uint16_t; }; } template <bool Truncate> struct StringCodec { using ValueType = Index; template <typename T> struct Storage { using Type = typename attribute_traits::StringTypeTrait<Truncate>::Type; }; template<typename StorageType> static void decode(const StorageType&, ValueType&); template<typename StorageType> static void encode(const ValueType&, StorageType&); static const char* name() { return Truncate ? "str_trnc" : "str"; } }; using StringAttributeArray = TypedAttributeArray<Index, StringCodec<false>>; //////////////////////////////////////// /// Class to compute a string->index map from all string:N metadata class OPENVDB_API StringMetaCache { public: using UniquePtr = std::unique_ptr<StringMetaCache>; using ValueMap = std::unordered_map<Name, Index>; StringMetaCache() = default; explicit StringMetaCache(const MetaMap& metadata); /// Return @c true if no string elements in metadata bool empty() const { return mCache.empty(); } /// Returns the number of string elements in metadata size_t size() const { return mCache.size(); } /// Clears and re-populates the cache void reset(const MetaMap& metadata); /// Insert a new element in the cache void insert(const Name& key, Index index); /// Retrieve the value map (string -> index) const ValueMap& map() const { return mCache; } private: ValueMap mCache; }; // StringMetaCache //////////////////////////////////////// /// Class to help with insertion of keyed string values into metadata class OPENVDB_API StringMetaInserter { public: using UniquePtr = std::unique_ptr<StringMetaInserter>; explicit StringMetaInserter(MetaMap& metadata); /// Returns @c true if key exists bool hasKey(const Name& key) const; /// Returns @c true if index exists bool hasIndex(Index index) const; /// @brief Insert the string into the metadata using the hint if non-zero /// @param name the string to insert /// @param hint requested index to use if non-zero and not already in use /// @note the hint can be used to insert non-sequentially so as to avoid an /// expensive re-indexing of string keys /// @return the chosen index which will match hint if the hint was used Index insert(const Name& name, Index hint = Index(0)); /// Reset the cache from the metadata void resetCache(); private: using IndexPairArray = std::deque<std::pair<Index, Index>>; MetaMap& mMetadata; IndexPairArray mIdBlocks; StringMetaCache mCache; }; // StringMetaInserter //////////////////////////////////////// template <bool Truncate> template<typename StorageType> inline void StringCodec<Truncate>::decode(const StorageType& data, ValueType& val) { val = static_cast<ValueType>(data); } template <bool Truncate> template<typename StorageType> inline void StringCodec<Truncate>::encode(const ValueType& val, StorageType& data) { data = static_cast<ValueType>(val); } //////////////////////////////////////// inline bool isString(const AttributeArray& array) { return array.isType<StringAttributeArray>(); } //////////////////////////////////////// class OPENVDB_API StringAttributeHandle { public: using Ptr = std::shared_ptr<StringAttributeHandle>;//SharedPtr<StringAttributeHandle>; using UniquePtr = std::unique_ptr<StringAttributeHandle>; static Ptr create(const AttributeArray& array, const MetaMap& metadata, const bool preserveCompression = true); StringAttributeHandle( const AttributeArray& array, const MetaMap& metadata, const bool preserveCompression = true); Index stride() const { return mHandle.stride(); } Index size() const { return mHandle.size(); } bool isUniform() const { return mHandle.isUniform(); } bool hasConstantStride() const { return mHandle.hasConstantStride(); } Name get(Index n, Index m = 0) const; void get(Name& name, Index n, Index m = 0) const; /// @brief Returns a reference to the array held in the Handle. const AttributeArray& array() const; protected: AttributeHandle<Index, StringCodec<false>> mHandle; const MetaMap& mMetadata; }; // class StringAttributeHandle //////////////////////////////////////// class OPENVDB_API StringAttributeWriteHandle : public StringAttributeHandle { public: using Ptr = std::shared_ptr<StringAttributeWriteHandle>;//SharedPtr<StringAttributeWriteHandle>; using UniquePtr = std::unique_ptr<StringAttributeWriteHandle>; static Ptr create(AttributeArray& array, const MetaMap& metadata, const bool expand = true); StringAttributeWriteHandle( AttributeArray& array, const MetaMap& metadata, const bool expand = true); /// @brief If this array is uniform, replace it with an array of length size(). /// @param fill if true, assign the uniform value to each element of the array. void expand(bool fill = true); /// @brief Set membership for the whole array and attempt to collapse void collapse(); /// @brief Set membership for the whole array and attempt to collapse /// @param name Name of the String void collapse(const Name& name); /// Compact the existing array to become uniform if all values are identical bool compact(); /// @brief Fill the existing array with the given value. /// @note Identical to collapse() except a non-uniform array will not become uniform. void fill(const Name& name); /// Set the value of the index to @a name void set(Index n, const Name& name); void set(Index n, Index m, const Name& name); /// Reset the value cache from the metadata void resetCache(); /// @brief Returns a reference to the array held in the Write Handle. AttributeArray& array(); /// @brief Returns whether or not the metadata cache contains a given value. /// @param name Name of the String. bool contains(const Name& name) const; private: /// Retrieve the index of this string value from the cache /// @note throws if name does not exist in cache Index getIndex(const Name& name) const; StringMetaCache mCache; AttributeWriteHandle<Index, StringCodec<false>> mWriteHandle; }; // class StringAttributeWriteHandle //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_ATTRIBUTE_ARRAY_STRING_HAS_BEEN_INCLUDED
7,418
C
28.915322
115
0.665543
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/AttributeArrayString.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/AttributeArrayString.cc #include "AttributeArrayString.h" #include <openvdb/Metadata.h> #include <openvdb/MetaMap.h> #include <tbb/parallel_sort.h> #include <string> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { namespace { Name getStringKey(const Index index) { return "string:" + std::to_string(index - 1); } } // namespace //////////////////////////////////////// // StringMetaCache implementation StringMetaCache::StringMetaCache(const MetaMap& metadata) { this->reset(metadata); } void StringMetaCache::insert(const Name& key, Index index) { mCache[key] = index; } void StringMetaCache::reset(const MetaMap& metadata) { mCache.clear(); // populate the cache for (auto it = metadata.beginMeta(), itEnd = metadata.endMeta(); it != itEnd; ++it) { const Name& key = it->first; const Metadata::Ptr& meta = it->second; // attempt to cast metadata to StringMetadata const StringMetadata* stringMeta = dynamic_cast<StringMetadata*>(meta.get()); if (!stringMeta) continue; // string attribute metadata must have a key that starts "string:" if (key.compare(0, 7, "string:") != 0) continue; // remove "string:" and cast to Index Index index = 1 + static_cast<Index>( std::stoul(key.substr(7, key.size() - 7))); // add to the cache this->insert(stringMeta->value(), index); } } //////////////////////////////////////// // StringMetaInserter implementation StringMetaInserter::StringMetaInserter(MetaMap& metadata) : mMetadata(metadata) , mIdBlocks() , mCache() { // populate the cache resetCache(); } bool StringMetaInserter::hasKey(const Name& key) const { return mCache.map().find(key) != mCache.map().end(); } bool StringMetaInserter::hasIndex(Index index) const { return bool(mMetadata[getStringKey(index)]); } Index StringMetaInserter::insert(const Name& name, Index hint) { using IterT = IndexPairArray::iterator; // if name already exists, return the index const auto& cacheMap = mCache.map(); auto it = cacheMap.find(name); if (it != cacheMap.end()) { return it->second; } Index index = 1; Name hintKey; bool canUseHint = false; // hint must be non-zero to have been requested if (hint > Index(0)) { hintKey = getStringKey(hint); // check if hint is already in use if (!bool(mMetadata[hintKey])) { canUseHint = true; index = hint; } } // look through the id blocks for hint or index IterT iter = mIdBlocks.begin(); for (; iter != mIdBlocks.end(); ++iter) { const Index start = iter->first; const Index end = start + iter->second; if (index < start || index >= end) break; if (!canUseHint) index = end; } // index now holds the next valid index. if it's 1 (the beginning // iterator) no initial block exists - add it IterT prevIter; if (iter == mIdBlocks.begin()) { prevIter = mIdBlocks.emplace(iter, 1, 1); iter = std::next(prevIter); } else { // accumulate the id block size where the next index is going prevIter = std::prev(iter); prevIter->second++; } // see if this block and the next block can be compacted if (iter != mIdBlocks.end() && prevIter->second + 1 == iter->first) { prevIter->second += iter->second; mIdBlocks.erase(iter); } // insert into metadata const Name key = getStringKey(index); mMetadata.insertMeta(key, StringMetadata(name)); // update the cache mCache.insert(name, index); return index; } void StringMetaInserter::resetCache() { mCache.reset(mMetadata); mIdBlocks.clear(); std::vector<Index> stringIndices; stringIndices.reserve(mCache.size()); if (mCache.empty()) return; const auto& cacheMap = mCache.map(); for (auto it = cacheMap.cbegin(); it != cacheMap.cend(); ++it) { const Index index = it->second; stringIndices.emplace_back(index); } tbb::parallel_sort(stringIndices.begin(), stringIndices.end()); // bucket string indices Index key = stringIndices.front(); Index size = 0; // For each id, see if it's adjacent id is sequentially increasing and continue to // track how many are until we find a value that isn't. Store the start and length // of each of these blocks. For example, the following container could be created // consisting of 3 elements: // key -> size // ------------- // 7 -> 1000 (values 7->1007) // 1020 -> 5 (values 1020->1025) // 2013 -> 30 (values 2013->2043) // Note that the end value is exclusive (values 1007, 1025 and 2043 do not exist // given the above example) for (const Index id : stringIndices) { if (key + size != id) { assert(size > 0); mIdBlocks.emplace_back(key, size); size = 0; key = id; } ++size; } // add the last block mIdBlocks.emplace_back(key, size); } //////////////////////////////////////// // StringAttributeHandle implementation StringAttributeHandle::Ptr StringAttributeHandle::create(const AttributeArray& array, const MetaMap& metadata, const bool preserveCompression) { return std::make_shared<StringAttributeHandle>(array, metadata, preserveCompression); } StringAttributeHandle::StringAttributeHandle(const AttributeArray& array, const MetaMap& metadata, const bool preserveCompression) : mHandle(array, preserveCompression) , mMetadata(metadata) { if (!isString(array)) { OPENVDB_THROW(TypeError, "Cannot create a StringAttributeHandle for an attribute array that is not a string."); } } Name StringAttributeHandle::get(Index n, Index m) const { Name name; this->get(name, n, m); return name; } void StringAttributeHandle::get(Name& name, Index n, Index m) const { Index index = mHandle.get(n, m); // index zero is reserved for an empty string if (index == 0) { name = ""; return; } const Name key = getStringKey(index); // key is assumed to exist in metadata openvdb::StringMetadata::ConstPtr meta = mMetadata.getMetadata<StringMetadata>(key); if (!meta) { OPENVDB_THROW(LookupError, "String attribute cannot be found with index - \"" << index << "\"."); } name = meta->value(); } const AttributeArray& StringAttributeHandle::array() const { return mHandle.array(); } //////////////////////////////////////// // StringAttributeWriteHandle implementation StringAttributeWriteHandle::Ptr StringAttributeWriteHandle::create(AttributeArray& array, const MetaMap& metadata, const bool expand) { return std::make_shared<StringAttributeWriteHandle>(array, metadata, expand); } StringAttributeWriteHandle::StringAttributeWriteHandle(AttributeArray& array, const MetaMap& metadata, const bool expand) : StringAttributeHandle(array, metadata, /*preserveCompression=*/ false) , mWriteHandle(array, expand) { // populate the cache resetCache(); } void StringAttributeWriteHandle::expand(bool fill) { mWriteHandle.expand(fill); } void StringAttributeWriteHandle::collapse() { // zero is used for an empty string mWriteHandle.collapse(0); } void StringAttributeWriteHandle::collapse(const Name& name) { Index index = getIndex(name); mWriteHandle.collapse(index); } bool StringAttributeWriteHandle::compact() { return mWriteHandle.compact(); } void StringAttributeWriteHandle::fill(const Name& name) { Index index = getIndex(name); mWriteHandle.fill(index); } void StringAttributeWriteHandle::set(Index n, const Name& name) { Index index = getIndex(name); mWriteHandle.set(n, /*stride*/0, index); } void StringAttributeWriteHandle::set(Index n, Index m, const Name& name) { Index index = getIndex(name); mWriteHandle.set(n, m, index); } void StringAttributeWriteHandle::resetCache() { mCache.reset(mMetadata); } AttributeArray& StringAttributeWriteHandle::array() { return mWriteHandle.array(); } bool StringAttributeWriteHandle::contains(const Name& name) const { // empty strings always have an index at index zero if (name.empty()) return true; const auto& cacheMap = mCache.map(); return cacheMap.find(name) != cacheMap.end(); } Index StringAttributeWriteHandle::getIndex(const Name& name) const { // zero used for an empty string if (name.empty()) return Index(0); const auto& cacheMap = mCache.map(); auto it = cacheMap.find(name); if (it == cacheMap.end()) { OPENVDB_THROW(LookupError, "String does not exist in Metadata, insert it and reset the cache - \"" << name << "\"."); } return it->second; } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
9,454
C++
22.461538
125
0.621007
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointGroup.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Dan Bailey /// /// @file points/PointGroup.h /// /// @brief Point group manipulation in a VDB Point Grid. #ifndef OPENVDB_POINTS_POINT_GROUP_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_GROUP_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include "IndexIterator.h" // FilterTraits #include "IndexFilter.h" // FilterTraits #include "AttributeSet.h" #include "PointDataGrid.h" #include "PointAttribute.h" #include "PointCount.h" #include <tbb/parallel_reduce.h> #include <algorithm> #include <random> #include <string> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { /// @brief Delete any group that is not present in the Descriptor. /// /// @param groups the vector of group names. /// @param descriptor the descriptor that holds the group map. inline void deleteMissingPointGroups( std::vector<std::string>& groups, const AttributeSet::Descriptor& descriptor); /// @brief Appends a new empty group to the VDB tree. /// /// @param tree the PointDataTree to be appended to. /// @param group name of the new group. template <typename PointDataTree> inline void appendGroup(PointDataTree& tree, const Name& group); /// @brief Appends new empty groups to the VDB tree. /// /// @param tree the PointDataTree to be appended to. /// @param groups names of the new groups. template <typename PointDataTree> inline void appendGroups(PointDataTree& tree, const std::vector<Name>& groups); /// @brief Drops an existing group from the VDB tree. /// /// @param tree the PointDataTree to be dropped from. /// @param group name of the group. /// @param compact compact attributes if possible to reduce memory - if dropping /// more than one group, compacting once at the end will be faster template <typename PointDataTree> inline void dropGroup( PointDataTree& tree, const Name& group, const bool compact = true); /// @brief Drops existing groups from the VDB tree, the tree is compacted after dropping. /// /// @param tree the PointDataTree to be dropped from. /// @param groups names of the groups. template <typename PointDataTree> inline void dropGroups( PointDataTree& tree, const std::vector<Name>& groups); /// @brief Drops all existing groups from the VDB tree, the tree is compacted after dropping. /// /// @param tree the PointDataTree to be dropped from. template <typename PointDataTree> inline void dropGroups( PointDataTree& tree); /// @brief Compacts existing groups of a VDB Tree to use less memory if possible. /// /// @param tree the PointDataTree to be compacted. template <typename PointDataTree> inline void compactGroups(PointDataTree& tree); /// @brief Sets group membership from a PointIndexTree-ordered vector. /// /// @param tree the PointDataTree. /// @param indexTree the PointIndexTree. /// @param membership @c 1 if the point is in the group, 0 otherwise. /// @param group the name of the group. /// @param remove if @c true also perform removal of points from the group. /// /// @note vector<bool> is not thread-safe on concurrent write, so use vector<short> instead template <typename PointDataTree, typename PointIndexTree> inline void setGroup( PointDataTree& tree, const PointIndexTree& indexTree, const std::vector<short>& membership, const Name& group, const bool remove = false); /// @brief Sets membership for the specified group for all points (on/off). /// /// @param tree the PointDataTree. /// @param group the name of the group. /// @param member true / false for membership of the group. template <typename PointDataTree> inline void setGroup( PointDataTree& tree, const Name& group, const bool member = true); /// @brief Sets group membership based on a provided filter. /// /// @param tree the PointDataTree. /// @param group the name of the group. /// @param filter filter data that is used to create a per-leaf filter template <typename PointDataTree, typename FilterT> inline void setGroupByFilter( PointDataTree& tree, const Name& group, const FilterT& filter); //////////////////////////////////////// namespace point_group_internal { /// Copy a group attribute value from one group offset to another template<typename PointDataTreeType> struct CopyGroupOp { using LeafManagerT = typename tree::LeafManager<PointDataTreeType>; using LeafRangeT = typename LeafManagerT::LeafRange; using GroupIndex = AttributeSet::Descriptor::GroupIndex; CopyGroupOp(const GroupIndex& targetIndex, const GroupIndex& sourceIndex) : mTargetIndex(targetIndex) , mSourceIndex(sourceIndex) { } void operator()(const typename LeafManagerT::LeafRange& range) const { for (auto leaf = range.begin(); leaf; ++leaf) { GroupHandle sourceGroup = leaf->groupHandle(mSourceIndex); GroupWriteHandle targetGroup = leaf->groupWriteHandle(mTargetIndex); for (auto iter = leaf->beginIndexAll(); iter; ++iter) { const bool groupOn = sourceGroup.get(*iter); targetGroup.set(*iter, groupOn); } } } ////////// const GroupIndex mTargetIndex; const GroupIndex mSourceIndex; }; /// Set membership on or off for the specified group template <typename PointDataTree, bool Member> struct SetGroupOp { using LeafManagerT = typename tree::LeafManager<PointDataTree>; using GroupIndex = AttributeSet::Descriptor::GroupIndex; SetGroupOp(const AttributeSet::Descriptor::GroupIndex& index) : mIndex(index) { } void operator()(const typename LeafManagerT::LeafRange& range) const { for (auto leaf = range.begin(); leaf; ++leaf) { // obtain the group attribute array GroupWriteHandle group(leaf->groupWriteHandle(mIndex)); // set the group value group.collapse(Member); } } ////////// const GroupIndex& mIndex; }; // struct SetGroupOp template <typename PointDataTree, typename PointIndexTree, bool Remove> struct SetGroupFromIndexOp { using LeafManagerT = typename tree::LeafManager<PointDataTree>; using LeafRangeT = typename LeafManagerT::LeafRange; using PointIndexLeafNode = typename PointIndexTree::LeafNodeType; using IndexArray = typename PointIndexLeafNode::IndexArray; using GroupIndex = AttributeSet::Descriptor::GroupIndex; using MembershipArray = std::vector<short>; SetGroupFromIndexOp(const PointIndexTree& indexTree, const MembershipArray& membership, const GroupIndex& index) : mIndexTree(indexTree) , mMembership(membership) , mIndex(index) { } void operator()(const typename LeafManagerT::LeafRange& range) const { for (auto leaf = range.begin(); leaf; ++leaf) { // obtain the PointIndexLeafNode (using the origin of the current leaf) const PointIndexLeafNode* pointIndexLeaf = mIndexTree.probeConstLeaf(leaf->origin()); if (!pointIndexLeaf) continue; // obtain the group attribute array GroupWriteHandle group(leaf->groupWriteHandle(mIndex)); // initialise the attribute storage Index64 index = 0; const IndexArray& indices = pointIndexLeaf->indices(); for (const Index64 i: indices) { if (Remove) { group.set(static_cast<Index>(index), mMembership[i]); } else if (mMembership[i] == short(1)) { group.set(static_cast<Index>(index), short(1)); } index++; } // attempt to compact the array group.compact(); } } ////////// const PointIndexTree& mIndexTree; const MembershipArray& mMembership; const GroupIndex& mIndex; }; // struct SetGroupFromIndexOp template <typename PointDataTree, typename FilterT, typename IterT = typename PointDataTree::LeafNodeType::ValueAllCIter> struct SetGroupByFilterOp { using LeafManagerT = typename tree::LeafManager<PointDataTree>; using LeafRangeT = typename LeafManagerT::LeafRange; using LeafNodeT = typename PointDataTree::LeafNodeType; using GroupIndex = AttributeSet::Descriptor::GroupIndex; SetGroupByFilterOp( const GroupIndex& index, const FilterT& filter) : mIndex(index) , mFilter(filter) { } void operator()(const typename LeafManagerT::LeafRange& range) const { for (auto leaf = range.begin(); leaf; ++leaf) { // obtain the group attribute array GroupWriteHandle group(leaf->groupWriteHandle(mIndex)); auto iter = leaf->template beginIndex<IterT, FilterT>(mFilter); for (; iter; ++iter) { group.set(*iter, true); } // attempt to compact the array group.compact(); } } ////////// const GroupIndex& mIndex; const FilterT& mFilter; // beginIndex takes a copy of mFilter }; // struct SetGroupByFilterOp //////////////////////////////////////// } // namespace point_group_internal //////////////////////////////////////// inline void deleteMissingPointGroups( std::vector<std::string>& groups, const AttributeSet::Descriptor& descriptor) { for (auto it = groups.begin(); it != groups.end();) { if (!descriptor.hasGroup(*it)) it = groups.erase(it); else ++it; } } //////////////////////////////////////// template <typename PointDataTreeT> inline void appendGroup(PointDataTreeT& tree, const Name& group) { if (group.empty()) { OPENVDB_THROW(KeyError, "Cannot use an empty group name as a key."); } auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); auto descriptor = attributeSet.descriptorPtr(); // don't add if group already exists if (descriptor->hasGroup(group)) return; const bool hasUnusedGroup = descriptor->unusedGroups() > 0; // add a new group attribute if there are no unused groups if (!hasUnusedGroup) { // find a new internal group name const Name groupName = descriptor->uniqueName("__group"); descriptor = descriptor->duplicateAppend(groupName, GroupAttributeArray::attributeType()); const size_t pos = descriptor->find(groupName); // insert new group attribute tree::LeafManager<PointDataTreeT> leafManager(tree); leafManager.foreach( [&](typename PointDataTreeT::LeafNodeType& leaf, size_t /*idx*/) { auto expected = leaf.attributeSet().descriptorPtr(); leaf.appendAttribute(*expected, descriptor, pos); }, /*threaded=*/true ); } else { // make the descriptor unique before we modify the group map makeDescriptorUnique(tree); descriptor = attributeSet.descriptorPtr(); } // ensure that there are now available groups assert(descriptor->unusedGroups() > 0); // find next unused offset const size_t offset = descriptor->unusedGroupOffset(); // add the group mapping to the descriptor descriptor->setGroup(group, offset); // if there was an unused group then we did not need to append a new attribute, so // we must manually clear membership in the new group as its bits may have been // previously set if (hasUnusedGroup) setGroup(tree, group, false); } //////////////////////////////////////// template <typename PointDataTree> inline void appendGroups(PointDataTree& tree, const std::vector<Name>& groups) { // TODO: could be more efficient by appending multiple groups at once // instead of one-by-one, however this is likely not that common a use case for (const Name& name : groups) { appendGroup(tree, name); } } //////////////////////////////////////// template <typename PointDataTree> inline void dropGroup(PointDataTree& tree, const Name& group, const bool compact) { using Descriptor = AttributeSet::Descriptor; if (group.empty()) { OPENVDB_THROW(KeyError, "Cannot use an empty group name as a key."); } auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); // make the descriptor unique before we modify the group map makeDescriptorUnique(tree); Descriptor::Ptr descriptor = attributeSet.descriptorPtr(); // now drop the group descriptor->dropGroup(group); if (compact) { compactGroups(tree); } } //////////////////////////////////////// template <typename PointDataTree> inline void dropGroups( PointDataTree& tree, const std::vector<Name>& groups) { for (const Name& name : groups) { dropGroup(tree, name, /*compact=*/false); } // compaction done once for efficiency compactGroups(tree); } //////////////////////////////////////// template <typename PointDataTree> inline void dropGroups( PointDataTree& tree) { using Descriptor = AttributeSet::Descriptor; auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); // make the descriptor unique before we modify the group map makeDescriptorUnique(tree); Descriptor::Ptr descriptor = attributeSet.descriptorPtr(); descriptor->clearGroups(); // find all indices for group attribute arrays std::vector<size_t> indices = attributeSet.groupAttributeIndices(); // drop these attributes arrays dropAttributes(tree, indices); } //////////////////////////////////////// template <typename PointDataTree> inline void compactGroups(PointDataTree& tree) { using Descriptor = AttributeSet::Descriptor; using GroupIndex = Descriptor::GroupIndex; using LeafManagerT = typename tree::template LeafManager<PointDataTree>; using point_group_internal::CopyGroupOp; auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); // early exit if not possible to compact if (!attributeSet.descriptor().canCompactGroups()) return; // make the descriptor unique before we modify the group map makeDescriptorUnique(tree); Descriptor::Ptr descriptor = attributeSet.descriptorPtr(); // generate a list of group offsets and move them (one-by-one) // TODO: improve this algorithm to move multiple groups per array at once // though this is likely not that common a use case Name sourceName; size_t sourceOffset, targetOffset; while (descriptor->requiresGroupMove(sourceName, sourceOffset, targetOffset)) { const GroupIndex sourceIndex = attributeSet.groupIndex(sourceOffset); const GroupIndex targetIndex = attributeSet.groupIndex(targetOffset); CopyGroupOp<PointDataTree> copy(targetIndex, sourceIndex); LeafManagerT leafManager(tree); tbb::parallel_for(leafManager.leafRange(), copy); descriptor->setGroup(sourceName, targetOffset); } // drop unused attribute arrays const std::vector<size_t> indices = attributeSet.groupAttributeIndices(); const size_t totalAttributesToDrop = descriptor->unusedGroups() / descriptor->groupBits(); assert(totalAttributesToDrop <= indices.size()); const std::vector<size_t> indicesToDrop(indices.end() - totalAttributesToDrop, indices.end()); dropAttributes(tree, indicesToDrop); } //////////////////////////////////////// template <typename PointDataTree, typename PointIndexTree> inline void setGroup( PointDataTree& tree, const PointIndexTree& indexTree, const std::vector<short>& membership, const Name& group, const bool remove) { using Descriptor = AttributeSet::Descriptor; using LeafManagerT = typename tree::template LeafManager<PointDataTree>; using point_group_internal::SetGroupFromIndexOp; auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); const Descriptor& descriptor = attributeSet.descriptor(); if (!descriptor.hasGroup(group)) { OPENVDB_THROW(LookupError, "Group must exist on Tree before defining membership."); } { // Check that that the largest index in the PointIndexTree is smaller than the size // of the membership vector. The index tree will be used to lookup membership // values. If the index tree was constructed with nan positions, this index will // differ from the PointDataTree count using IndexTreeManager = tree::LeafManager<const PointIndexTree>; IndexTreeManager leafManager(indexTree); const int64_t max = tbb::parallel_reduce(leafManager.leafRange(), -1, [](const typename IndexTreeManager::LeafRange& range, int64_t value) -> int64_t { for (auto leaf = range.begin(); leaf; ++leaf) { auto it = std::max_element(leaf->indices().begin(), leaf->indices().end()); value = std::max(value, static_cast<int64_t>(*it)); } return value; }, [](const int64_t a, const int64_t b) { return std::max(a, b); } ); if (max != -1 && membership.size() <= static_cast<size_t>(max)) { OPENVDB_THROW(IndexError, "Group membership vector size must be larger than " " the maximum index within the provided index tree."); } } const Descriptor::GroupIndex index = attributeSet.groupIndex(group); LeafManagerT leafManager(tree); // set membership if (remove) { SetGroupFromIndexOp<PointDataTree, PointIndexTree, true> set(indexTree, membership, index); tbb::parallel_for(leafManager.leafRange(), set); } else { SetGroupFromIndexOp<PointDataTree, PointIndexTree, false> set(indexTree, membership, index); tbb::parallel_for(leafManager.leafRange(), set); } } //////////////////////////////////////// template <typename PointDataTree> inline void setGroup( PointDataTree& tree, const Name& group, const bool member) { using Descriptor = AttributeSet::Descriptor; using LeafManagerT = typename tree::template LeafManager<PointDataTree>; using point_group_internal::SetGroupOp; auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); const Descriptor& descriptor = attributeSet.descriptor(); if (!descriptor.hasGroup(group)) { OPENVDB_THROW(LookupError, "Group must exist on Tree before defining membership."); } const Descriptor::GroupIndex index = attributeSet.groupIndex(group); LeafManagerT leafManager(tree); // set membership based on member variable if (member) tbb::parallel_for(leafManager.leafRange(), SetGroupOp<PointDataTree, true>(index)); else tbb::parallel_for(leafManager.leafRange(), SetGroupOp<PointDataTree, false>(index)); } //////////////////////////////////////// template <typename PointDataTree, typename FilterT> inline void setGroupByFilter( PointDataTree& tree, const Name& group, const FilterT& filter) { using Descriptor = AttributeSet::Descriptor; using LeafManagerT = typename tree::template LeafManager<PointDataTree>; using point_group_internal::SetGroupByFilterOp; auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); const Descriptor& descriptor = attributeSet.descriptor(); if (!descriptor.hasGroup(group)) { OPENVDB_THROW(LookupError, "Group must exist on Tree before defining membership."); } const Descriptor::GroupIndex index = attributeSet.groupIndex(group); // set membership using filter SetGroupByFilterOp<PointDataTree, FilterT> set(index, filter); LeafManagerT leafManager(tree); tbb::parallel_for(leafManager.leafRange(), set); } //////////////////////////////////////// template <typename PointDataTree> inline void setGroupByRandomTarget( PointDataTree& tree, const Name& group, const Index64 targetPoints, const unsigned int seed = 0) { using RandomFilter = RandomLeafFilter<PointDataTree, std::mt19937>; RandomFilter filter(tree, targetPoints, seed); setGroupByFilter<PointDataTree, RandomFilter>(tree, group, filter); } //////////////////////////////////////// template <typename PointDataTree> inline void setGroupByRandomPercentage( PointDataTree& tree, const Name& group, const float percentage = 10.0f, const unsigned int seed = 0) { using RandomFilter = RandomLeafFilter<PointDataTree, std::mt19937>; const int currentPoints = static_cast<int>(pointCount(tree)); const int targetPoints = int(math::Round((percentage * float(currentPoints))/100.0f)); RandomFilter filter(tree, targetPoints, seed); setGroupByFilter<PointDataTree, RandomFilter>(tree, group, filter); } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_GROUP_HAS_BEEN_INCLUDED
22,542
C
30.008253
121
0.630201
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/IndexIterator.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/IndexIterator.h /// /// @author Dan Bailey /// /// @brief Index Iterators. #ifndef OPENVDB_POINTS_INDEX_ITERATOR_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_INDEX_ITERATOR_HAS_BEEN_INCLUDED #include <openvdb/version.h> #include <openvdb/Types.h> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { /// @brief Count up the number of times the iterator can iterate /// /// @param iter the iterator. /// /// @note counting by iteration only performed where a dynamic filter is in use, template <typename IterT> inline Index64 iterCount(const IterT& iter); //////////////////////////////////////// namespace index { // Enum for informing early-exit optimizations // PARTIAL - No optimizations are possible // NONE - No indices to evaluate, can skip computation // ALL - All indices to evaluate, can skip filtering enum State { PARTIAL=0, NONE, ALL }; } /// @brief A no-op filter that can be used when iterating over all indices /// @see points/IndexFilter.h for the documented interface for an index filter class NullFilter { public: static bool initialized() { return true; } static index::State state() { return index::ALL; } template <typename LeafT> static index::State state(const LeafT&) { return index::ALL; } template <typename LeafT> void reset(const LeafT&) { } template <typename IterT> static bool valid(const IterT&) { return true; } }; // class NullFilter /// @brief A forward iterator over array indices in a single voxel class ValueVoxelCIter { public: struct Parent { Parent() = default; explicit Parent(Index32 offset): mOffset(offset) { } Index32 getValue(unsigned /*offset*/) const { return mOffset; } private: Index32 mOffset = 0; }; // struct Parent using NodeType = Parent; ValueVoxelCIter() = default; ValueVoxelCIter(Index32 prevOffset, Index32 offset) : mOffset(offset), mParent(prevOffset) {} ValueVoxelCIter(const ValueVoxelCIter& other) : mOffset(other.mOffset), mParent(other.mParent), mValid(other.mValid) {} /// @brief Return the item to which this iterator is currently pointing. Index32 operator*() { return mOffset; } Index32 operator*() const { return mOffset; } /// @brief Advance to the next (valid) item (prefix). ValueVoxelCIter& operator++() { mValid = false; return *this; } operator bool() const { return mValid; } bool test() const { return mValid; } Index32 end() const { return mOffset+1; } void reset(Index32 /*item*/, Index32 /*end*/) {} Parent& parent() { return mParent; } Index32 offset() { return mOffset; } inline bool next() { this->operator++(); return this->test(); } /// @brief For efficiency, Coord and active state assumed to be readily available /// when iterating over indices of a single voxel Coord getCoord [[noreturn]] () const { OPENVDB_THROW(RuntimeError, "ValueVoxelCIter does not provide a valid Coord."); } void getCoord [[noreturn]] (Coord& /*coord*/) const { OPENVDB_THROW(RuntimeError, "ValueVoxelCIter does not provide a valid Coord."); } bool isValueOn [[noreturn]] () const { OPENVDB_THROW(RuntimeError, "ValueVoxelCIter does not test if voxel is active."); } /// @{ /// @brief Equality operators bool operator==(const ValueVoxelCIter& other) const { return mOffset == other.mOffset; } bool operator!=(const ValueVoxelCIter& other) const { return !this->operator==(other); } /// @} private: Index32 mOffset = 0; Parent mParent; mutable bool mValid = true; }; // class ValueVoxelCIter /// @brief A forward iterator over array indices with filtering /// IteratorT can be either IndexIter or ValueIndexIter (or some custom index iterator) /// FilterT should be a struct or class with a valid() method than can be evaluated per index /// Here's a simple filter example that only accepts even indices: /// /// struct EvenIndexFilter /// { /// bool valid(const Index32 offset) const { /// return (offset % 2) == 0; /// } /// }; /// template <typename IteratorT, typename FilterT> class IndexIter { public: /// @brief A forward iterator over array indices from a value iterator (such as ValueOnCIter) class ValueIndexIter { public: ValueIndexIter(const IteratorT& iter) : mIter(iter), mParent(&mIter.parent()) { if (mIter) { assert(mParent); Index32 start = (mIter.offset() > 0 ? Index32(mParent->getValue(mIter.offset() - 1)) : Index32(0)); this->reset(start, *mIter); if (mItem >= mEnd) this->operator++(); } } ValueIndexIter(const ValueIndexIter& other) : mEnd(other.mEnd), mItem(other.mItem), mIter(other.mIter), mParent(other.mParent) { assert(mParent); } ValueIndexIter& operator=(const ValueIndexIter&) = default; inline Index32 end() const { return mEnd; } inline void reset(Index32 item, Index32 end) { mItem = item; mEnd = end; } /// @brief Returns the item to which this iterator is currently pointing. inline Index32 operator*() { assert(mIter); return mItem; } inline Index32 operator*() const { assert(mIter); return mItem; } /// @brief Return @c true if this iterator is not yet exhausted. inline operator bool() const { return mIter; } inline bool test() const { return mIter; } /// @brief Advance to the next (valid) item (prefix). inline ValueIndexIter& operator++() { ++mItem; while (mItem >= mEnd && mIter.next()) { assert(mParent); this->reset(mParent->getValue(mIter.offset() - 1), *mIter); } return *this; } /// @brief Advance to the next (valid) item. inline bool next() { this->operator++(); return this->test(); } inline bool increment() { this->next(); return this->test(); } /// Return the coordinates of the item to which the value iterator is pointing. inline Coord getCoord() const { assert(mIter); return mIter.getCoord(); } /// Return in @a xyz the coordinates of the item to which the value iterator is pointing. inline void getCoord(Coord& xyz) const { assert(mIter); xyz = mIter.getCoord(); } /// @brief Return @c true if this iterator is pointing to an active value. inline bool isValueOn() const { assert(mIter); return mIter.isValueOn(); } /// Return the const value iterator inline const IteratorT& valueIter() const { return mIter; } /// @brief Equality operators bool operator==(const ValueIndexIter& other) const { return mItem == other.mItem; } bool operator!=(const ValueIndexIter& other) const { return !this->operator==(other); } private: Index32 mEnd = 0; Index32 mItem = 0; IteratorT mIter; const typename IteratorT::NodeType* mParent; }; // ValueIndexIter IndexIter(const IteratorT& iterator, const FilterT& filter) : mIterator(iterator) , mFilter(filter) { if (!mFilter.initialized()) { OPENVDB_THROW(RuntimeError, "Filter needs to be initialized before constructing the iterator."); } if (mIterator) { this->reset(*mIterator, mIterator.end()); } } IndexIter(const IndexIter& other) : mIterator(other.mIterator) , mFilter(other.mFilter) { if (!mFilter.initialized()) { OPENVDB_THROW(RuntimeError, "Filter needs to be initialized before constructing the iterator."); } } IndexIter& operator=(const IndexIter& other) { if (&other != this) { mIterator = other.mIterator; mFilter = other.mFilter; if (!mFilter.initialized()) { OPENVDB_THROW(RuntimeError, "Filter needs to be initialized before constructing the iterator."); } } return *this; } Index32 end() const { return mIterator.end(); } /// @brief Reset the begining and end of the iterator. void reset(Index32 begin, Index32 end) { mIterator.reset(begin, end); while (mIterator.test() && !mFilter.template valid<ValueIndexIter>(mIterator)) { ++mIterator; } } /// @brief Returns the item to which this iterator is currently pointing. Index32 operator*() { assert(mIterator); return *mIterator; } Index32 operator*() const { assert(mIterator); return *mIterator; } /// @brief Return @c true if this iterator is not yet exhausted. operator bool() const { return mIterator.test(); } bool test() const { return mIterator.test(); } /// @brief Advance to the next (valid) item (prefix). IndexIter& operator++() { while (true) { ++mIterator; if (!mIterator.test() || mFilter.template valid<ValueIndexIter>(mIterator)) { break; } } return *this; } /// @brief Advance to the next (valid) item (postfix). IndexIter operator++(int /*dummy*/) { IndexIter newIterator(*this); this->operator++(); return newIterator; } /// @brief Advance to the next (valid) item. bool next() { this->operator++(); return this->test(); } bool increment() { this->next(); return this->test(); } /// Return the const filter inline const FilterT& filter() const { return mFilter; } /// Return the coordinates of the item to which the value iterator is pointing. inline Coord getCoord() const { assert(mIterator); return mIterator.getCoord(); } /// Return in @a xyz the coordinates of the item to which the value iterator is pointing. inline void getCoord(Coord& xyz) const { assert(mIterator); xyz = mIterator.getCoord(); } /// @brief Return @c true if the value iterator is pointing to an active value. inline bool isValueOn() const { assert(mIterator); return mIterator.valueIter().isValueOn(); } /// @brief Equality operators bool operator==(const IndexIter& other) const { return mIterator == other.mIterator; } bool operator!=(const IndexIter& other) const { return !this->operator==(other); } private: ValueIndexIter mIterator; FilterT mFilter; }; // class IndexIter //////////////////////////////////////// template <typename IterT> inline Index64 iterCount(const IterT& iter) { Index64 size = 0; for (IterT newIter(iter); newIter; ++newIter, ++size) { } return size; } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_INDEX_ITERATOR_HAS_BEEN_INCLUDED
11,115
C
32.684848
98
0.623032
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/StreamCompression.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/StreamCompression.h /// /// @author Dan Bailey /// /// @brief Convenience wrappers to using Blosc and reading and writing of Paged data. /// /// Blosc is most effective with large (> ~256KB) blocks of data. Writing the entire /// data block contiguously would provide the most optimal compression, however would /// limit the ability to use delayed-loading as the whole block would be required to /// be loaded from disk at once. To balance these two competing factors, Paging is used /// to write out blocks of data that are a reasonable size for Blosc. These Pages are /// loaded lazily, tracking the input stream pointers and creating Handles that reference /// portions of the buffer. When the Page buffer is accessed, the data will be read from /// the stream. #ifndef OPENVDB_TOOLS_STREAM_COMPRESSION_HAS_BEEN_INCLUDED #define OPENVDB_TOOLS_STREAM_COMPRESSION_HAS_BEEN_INCLUDED #include <openvdb/io/io.h> #include <tbb/spin_mutex.h> #include <memory> #include <string> class TestStreamCompression; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace compression { // This is the minimum number of bytes below which Blosc compression is not used to // avoid unecessary computation, as Blosc offers minimal compression until this limit static const int BLOSC_MINIMUM_BYTES = 48; // This is the minimum number of bytes below which the array is padded with zeros up // to this number of bytes to allow Blosc to perform compression with small arrays static const int BLOSC_PAD_BYTES = 128; /// @brief Returns true if compression is available OPENVDB_API bool bloscCanCompress(); /// @brief Retrieves the uncompressed size of buffer when uncompressed /// /// @param buffer the compressed buffer OPENVDB_API size_t bloscUncompressedSize(const char* buffer); /// @brief Compress into the supplied buffer. /// /// @param compressedBuffer the buffer to compress /// @param compressedBytes number of compressed bytes /// @param bufferBytes the number of bytes in compressedBuffer available to be filled /// @param uncompressedBuffer the uncompressed buffer to compress /// @param uncompressedBytes number of uncompressed bytes OPENVDB_API void bloscCompress(char* compressedBuffer, size_t& compressedBytes, const size_t bufferBytes, const char* uncompressedBuffer, const size_t uncompressedBytes); /// @brief Compress and return the heap-allocated compressed buffer. /// /// @param buffer the buffer to compress /// @param uncompressedBytes number of uncompressed bytes /// @param compressedBytes number of compressed bytes (written to this variable) /// @param resize the compressed buffer will be exactly resized to remove the /// portion used for Blosc overhead, for efficiency this can be /// skipped if it is known that the resulting buffer is temporary OPENVDB_API std::unique_ptr<char[]> bloscCompress(const char* buffer, const size_t uncompressedBytes, size_t& compressedBytes, const bool resize = true); /// @brief Convenience wrapper to retrieve the compressed size of buffer when compressed /// /// @param buffer the uncompressed buffer /// @param uncompressedBytes number of uncompressed bytes OPENVDB_API size_t bloscCompressedSize(const char* buffer, const size_t uncompressedBytes); /// @brief Decompress into the supplied buffer. Will throw if decompression fails or /// uncompressed buffer has insufficient space in which to decompress. /// /// @param uncompressedBuffer the uncompressed buffer to decompress into /// @param expectedBytes the number of bytes expected once the buffer is decompressed /// @param bufferBytes the number of bytes in uncompressedBuffer available to be filled /// @param compressedBuffer the compressed buffer to decompress OPENVDB_API void bloscDecompress(char* uncompressedBuffer, const size_t expectedBytes, const size_t bufferBytes, const char* compressedBuffer); /// @brief Decompress and return the the heap-allocated uncompressed buffer. /// /// @param buffer the buffer to decompress /// @param expectedBytes the number of bytes expected once the buffer is decompressed /// @param resize the compressed buffer will be exactly resized to remove the /// portion used for Blosc overhead, for efficiency this can be /// skipped if it is known that the resulting buffer is temporary OPENVDB_API std::unique_ptr<char[]> bloscDecompress(const char* buffer, const size_t expectedBytes, const bool resize = true); //////////////////////////////////////// // 1MB = 1048576 Bytes static const int PageSize = 1024 * 1024; /// @brief Stores a variable-size, compressed, delayed-load Page of data /// that is loaded into memory when accessed. Access to the Page is /// thread-safe as loading and decompressing the data is protected by a mutex. class OPENVDB_API Page { private: struct Info { io::MappedFile::Ptr mappedFile; SharedPtr<io::StreamMetadata> meta; std::streamoff filepos; long compressedBytes; long uncompressedBytes; }; // Info public: using Ptr = std::shared_ptr<Page>; Page() = default; /// @brief load the Page into memory void load() const; /// @brief Uncompressed bytes of the Paged data, available /// when the header has been read. long uncompressedBytes() const; /// @brief Retrieves a data pointer at the specific @param index /// @note Will force a Page load when called. const char* buffer(const int index) const; /// @brief Read the Page header void readHeader(std::istream&); /// @brief Read the Page buffers. If @a delayed is true, stream /// pointers will be stored to load the data lazily. void readBuffers(std::istream&, bool delayed); /// @brief Test if the data is out-of-core bool isOutOfCore() const; private: /// @brief Convenience method to store a copy of the supplied buffer void copy(const std::unique_ptr<char[]>& temp, int pageSize); /// @brief Decompress and store the supplied data void decompress(const std::unique_ptr<char[]>& temp); /// @brief Thread-safe loading of the data void doLoad() const; std::unique_ptr<Info> mInfo = std::unique_ptr<Info>(new Info); std::unique_ptr<char[]> mData; tbb::spin_mutex mMutex; }; // class Page /// @brief A PageHandle holds a unique ptr to a Page and a specific stream /// pointer to a point within the decompressed Page buffer class OPENVDB_API PageHandle { public: #if OPENVDB_ABI_VERSION_NUMBER >= 6 using Ptr = std::unique_ptr<PageHandle>; #else using Ptr = std::shared_ptr<PageHandle>; #endif /// @brief Create the page handle /// @param page a shared ptr to the page that stores the buffer /// @param index start position of the buffer to be read /// @param size total size of the buffer to be read in bytes PageHandle(const Page::Ptr& page, const int index, const int size); /// @brief Retrieve a reference to the stored page Page& page(); /// @brief Return the size of the buffer int size() const { return mSize; } /// @brief Read and return the buffer, loading and decompressing /// the Page if necessary. std::unique_ptr<char[]> read(); /// @brief Return a copy of this PageHandle Ptr copy() { return Ptr(new PageHandle(mPage, mIndex, mSize)); } protected: friend class ::TestStreamCompression; private: Page::Ptr mPage; int mIndex = -1; int mSize = 0; }; // class PageHandle /// @brief A Paging wrapper to std::istream that is responsible for reading /// from a given input stream and creating Page objects and PageHandles that /// reference those pages for delayed reading. class OPENVDB_API PagedInputStream { public: using Ptr = std::shared_ptr<PagedInputStream>; PagedInputStream() = default; explicit PagedInputStream(std::istream& is); /// @brief Size-only mode tags the stream as only reading size data. void setSizeOnly(bool sizeOnly) { mSizeOnly = sizeOnly; } bool sizeOnly() const { return mSizeOnly; } // @brief Set and get the input stream std::istream& getInputStream() { assert(mIs); return *mIs; } void setInputStream(std::istream& is) { mIs = &is; } /// @brief Creates a PageHandle to access the next @param n bytes of the Page. PageHandle::Ptr createHandle(std::streamsize n); /// @brief Takes a @a pageHandle and updates the referenced page with the /// current stream pointer position and if @a delayed is false performs /// an immediate read of the data. void read(PageHandle::Ptr& pageHandle, std::streamsize n, bool delayed = true); private: int mByteIndex = 0; int mUncompressedBytes = 0; std::istream* mIs = nullptr; Page::Ptr mPage; bool mSizeOnly = false; }; // class PagedInputStream /// @brief A Paging wrapper to std::ostream that is responsible for writing /// from a given output stream at intervals set by the PageSize. As Pages are /// variable in size, they are flushed to disk as soon as sufficiently large. class OPENVDB_API PagedOutputStream { public: using Ptr = std::shared_ptr<PagedOutputStream>; PagedOutputStream(); explicit PagedOutputStream(std::ostream& os); /// @brief Size-only mode tags the stream as only writing size data. void setSizeOnly(bool sizeOnly) { mSizeOnly = sizeOnly; } bool sizeOnly() const { return mSizeOnly; } /// @brief Set and get the output stream std::ostream& getOutputStream() { assert(mOs); return *mOs; } void setOutputStream(std::ostream& os) { mOs = &os; } /// @brief Writes the given @param str buffer of size @param n PagedOutputStream& write(const char* str, std::streamsize n); /// @brief Manually flushes the current page to disk if non-zero void flush(); private: /// @brief Compress the @param buffer of @param size bytes and write /// out to the stream. void compressAndWrite(const char* buffer, size_t size); /// @brief Resize the internal page buffer to @param size bytes void resize(size_t size); std::unique_ptr<char[]> mData = std::unique_ptr<char[]>(new char[PageSize]); std::unique_ptr<char[]> mCompressedData = nullptr; size_t mCapacity = PageSize; int mBytes = 0; std::ostream* mOs = nullptr; bool mSizeOnly = false; }; // class PagedOutputStream } // namespace compression } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_TOOLS_STREAM_COMPRESSION_HAS_BEEN_INCLUDED
10,745
C
36.055172
94
0.707213
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/AttributeGroup.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/AttributeGroup.h /// /// @author Dan Bailey /// /// @brief Attribute Group access and filtering for iteration. #ifndef OPENVDB_POINTS_ATTRIBUTE_GROUP_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_ATTRIBUTE_GROUP_HAS_BEEN_INCLUDED #include "AttributeArray.h" #include "AttributeSet.h" #include <memory> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { //////////////////////////////////////// struct GroupCodec { using StorageType = GroupType; using ValueType = GroupType; template <typename T> struct Storage { using Type = StorageType; }; static void decode(const StorageType&, ValueType&); static void encode(const ValueType&, StorageType&); static const char* name() { return "grp"; } }; using GroupAttributeArray = TypedAttributeArray<GroupType, GroupCodec>; //////////////////////////////////////// inline void GroupCodec::decode(const StorageType& data, ValueType& val) { val = data; } inline void GroupCodec::encode(const ValueType& val, StorageType& data) { data = val; } //////////////////////////////////////// inline bool isGroup(const AttributeArray& array) { return array.isType<GroupAttributeArray>(); } //////////////////////////////////////// class OPENVDB_API GroupHandle { public: using Ptr = std::shared_ptr<GroupHandle>; using UniquePtr = std::unique_ptr<GroupHandle>; // Dummy class that distinguishes an offset from a bitmask on construction struct BitMask { }; using GroupIndex = std::pair<Index, uint8_t>; GroupHandle(const GroupAttributeArray& array, const GroupType& offset); GroupHandle(const GroupAttributeArray& array, const GroupType& bitMask, BitMask); Index size() const { return mArray.size(); } bool isUniform() const { return mArray.isUniform(); } bool get(Index n) const; bool getUnsafe(Index n) const; protected: const GroupAttributeArray& mArray; const GroupType mBitMask; }; // class GroupHandle //////////////////////////////////////// class OPENVDB_API GroupWriteHandle : public GroupHandle { public: using Ptr = std::shared_ptr<GroupWriteHandle>; using UniquePtr = std::unique_ptr<GroupWriteHandle>; GroupWriteHandle(GroupAttributeArray& array, const GroupType& offset); /// Set @a on at the given index @a n void set(Index n, bool on); /// Set @a on at the given index @a n (assumes in-core and non-uniform) void setUnsafe(Index n, bool on); /// @brief Set membership for the whole array and attempt to collapse /// /// @param on True or false for inclusion in group /// /// @note This method guarantees that all attributes will have group membership /// changed according to the input bool, however compaction will not be performed /// if other groups that share the same underlying array are non-uniform. /// The return value indicates if the group array ends up being uniform. bool collapse(bool on); /// Compact the existing array to become uniform if all values are identical bool compact(); }; // class GroupWriteHandle //////////////////////////////////////// /// Index filtering on group membership class GroupFilter { public: GroupFilter(const Name& name, const AttributeSet& attributeSet) : mIndex(attributeSet.groupIndex(name)) { } explicit GroupFilter(const AttributeSet::Descriptor::GroupIndex& index) : mIndex(index) { } inline bool initialized() const { return bool(mHandle); } static index::State state() { return index::PARTIAL; } template <typename LeafT> static index::State state(const LeafT&) { return index::PARTIAL; } template <typename LeafT> void reset(const LeafT& leaf) { mHandle.reset(new GroupHandle(leaf.groupHandle(mIndex))); } template <typename IterT> bool valid(const IterT& iter) const { assert(mHandle); return mHandle->getUnsafe(*iter); } private: const AttributeSet::Descriptor::GroupIndex mIndex; GroupHandle::Ptr mHandle; }; // class GroupFilter //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_ATTRIBUTE_GROUP_HAS_BEEN_INCLUDED
4,385
C
23.920454
85
0.659293
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/AttributeArray.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/AttributeArray.cc #include "AttributeArray.h" #include <map> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { //////////////////////////////////////// namespace { using AttributeFactoryMap = std::map<NamePair, AttributeArray::FactoryMethod>; struct LockedAttributeRegistry { tbb::spin_mutex mMutex; AttributeFactoryMap mMap; }; // Global function for accessing the registry LockedAttributeRegistry* getAttributeRegistry() { static LockedAttributeRegistry registry; return &registry; } } // unnamed namespace //////////////////////////////////////// // AttributeArray::ScopedRegistryLock implementation AttributeArray::ScopedRegistryLock::ScopedRegistryLock() : lock(getAttributeRegistry()->mMutex) { } //////////////////////////////////////// // AttributeArray implementation #if OPENVDB_ABI_VERSION_NUMBER >= 6 #if OPENVDB_ABI_VERSION_NUMBER >= 7 AttributeArray::AttributeArray(const AttributeArray& rhs) : AttributeArray(rhs, tbb::spin_mutex::scoped_lock(rhs.mMutex)) { } AttributeArray::AttributeArray(const AttributeArray& rhs, const tbb::spin_mutex::scoped_lock&) #else AttributeArray::AttributeArray(const AttributeArray& rhs) #endif : mIsUniform(rhs.mIsUniform) , mFlags(rhs.mFlags) , mUsePagedRead(rhs.mUsePagedRead) , mOutOfCore(rhs.mOutOfCore) , mPageHandle() { if (mFlags & PARTIALREAD) mCompressedBytes = rhs.mCompressedBytes; else if (rhs.mPageHandle) mPageHandle = rhs.mPageHandle->copy(); } AttributeArray& AttributeArray::operator=(const AttributeArray& rhs) { // if this AttributeArray has been partially read, zero the compressed bytes, // so the page handle won't attempt to clean up invalid memory if (mFlags & PARTIALREAD) mCompressedBytes = 0; mIsUniform = rhs.mIsUniform; mFlags = rhs.mFlags; mUsePagedRead = rhs.mUsePagedRead; mOutOfCore = rhs.mOutOfCore; if (mFlags & PARTIALREAD) mCompressedBytes = rhs.mCompressedBytes; else if (rhs.mPageHandle) mPageHandle = rhs.mPageHandle->copy(); else mPageHandle.reset(); return *this; } #endif AttributeArray::Ptr AttributeArray::create(const NamePair& type, Index length, Index stride, bool constantStride, const Metadata* metadata, const ScopedRegistryLock* lock) { auto* registry = getAttributeRegistry(); tbb::spin_mutex::scoped_lock _lock; if (!lock) _lock.acquire(registry->mMutex); auto iter = registry->mMap.find(type); if (iter == registry->mMap.end()) { OPENVDB_THROW(LookupError, "Cannot create attribute of unregistered type " << type.first << "_" << type.second); } return (iter->second)(length, stride, constantStride, metadata); } bool AttributeArray::isRegistered(const NamePair& type, const ScopedRegistryLock* lock) { LockedAttributeRegistry* registry = getAttributeRegistry(); tbb::spin_mutex::scoped_lock _lock; if (!lock) _lock.acquire(registry->mMutex); return (registry->mMap.find(type) != registry->mMap.end()); } void AttributeArray::clearRegistry(const ScopedRegistryLock* lock) { LockedAttributeRegistry* registry = getAttributeRegistry(); tbb::spin_mutex::scoped_lock _lock; if (!lock) _lock.acquire(registry->mMutex); registry->mMap.clear(); } void AttributeArray::registerType(const NamePair& type, FactoryMethod factory, const ScopedRegistryLock* lock) { { // check the type of the AttributeArray generated by the factory method auto array = (*factory)(/*length=*/0, /*stride=*/0, /*constantStride=*/false, /*metadata=*/nullptr); const NamePair& factoryType = array->type(); if (factoryType != type) { OPENVDB_THROW(KeyError, "Attribute type " << type.first << "_" << type.second << " does not match the type created by the factory method " << factoryType.first << "_" << factoryType.second << "."); } } LockedAttributeRegistry* registry = getAttributeRegistry(); tbb::spin_mutex::scoped_lock _lock; if (!lock) _lock.acquire(registry->mMutex); registry->mMap[type] = factory; } void AttributeArray::unregisterType(const NamePair& type, const ScopedRegistryLock* lock) { LockedAttributeRegistry* registry = getAttributeRegistry(); tbb::spin_mutex::scoped_lock _lock; if (!lock) _lock.acquire(registry->mMutex); registry->mMap.erase(type); } void AttributeArray::setTransient(bool state) { if (state) mFlags = static_cast<uint8_t>(mFlags | Int16(TRANSIENT)); else mFlags = static_cast<uint8_t>(mFlags & ~Int16(TRANSIENT)); } void AttributeArray::setHidden(bool state) { if (state) mFlags = static_cast<uint8_t>(mFlags | Int16(HIDDEN)); else mFlags = static_cast<uint8_t>(mFlags & ~Int16(HIDDEN)); } void AttributeArray::setStreaming(bool state) { if (state) mFlags = static_cast<uint8_t>(mFlags | Int16(STREAMING)); else mFlags = static_cast<uint8_t>(mFlags & ~Int16(STREAMING)); } void AttributeArray::setConstantStride(bool state) { if (state) mFlags = static_cast<uint8_t>(mFlags | Int16(CONSTANTSTRIDE)); else mFlags = static_cast<uint8_t>(mFlags & ~Int16(CONSTANTSTRIDE)); } bool AttributeArray::operator==(const AttributeArray& other) const { this->loadData(); other.loadData(); if (this->mUsePagedRead != other.mUsePagedRead || this->mFlags != other.mFlags) return false; return this->isEqual(other); } } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
5,735
C++
26.184834
108
0.678989
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointAdvect.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Dan Bailey /// /// @file points/PointAdvect.h /// /// @brief Ability to advect VDB Points through a velocity field. #ifndef OPENVDB_POINTS_POINT_ADVECT_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_ADVECT_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/tools/Prune.h> #include <openvdb/tools/VelocityFields.h> #include <openvdb/points/AttributeGroup.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointGroup.h> #include <openvdb/points/PointMove.h> #include <memory> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { /// @brief Advect points in a PointDataGrid through a velocity grid /// @param points the PointDataGrid containing the points to be advected. /// @param velocity a velocity grid to be sampled. /// @param integrationOrder the integration scheme to use (1 is forward euler, 4 is runge-kutta 4th) /// @param dt delta time. /// @param timeSteps number of advection steps to perform. /// @param advectFilter an optional advection index filter (moves a subset of the points) /// @param filter an optional index filter (deletes a subset of the points) /// @param cached caches velocity interpolation for faster performance, disable to use /// less memory (default is on). template <typename PointDataGridT, typename VelGridT, typename AdvectFilterT = NullFilter, typename FilterT = NullFilter> inline void advectPoints(PointDataGridT& points, const VelGridT& velocity, const Index integrationOrder, const double dt, const Index timeSteps, const AdvectFilterT& advectFilter = NullFilter(), const FilterT& filter = NullFilter(), const bool cached = true); //////////////////////////////////////// namespace point_advect_internal { enum IntegrationOrder { INTEGRATION_ORDER_FWD_EULER = 1, INTEGRATION_ORDER_RK_2ND, INTEGRATION_ORDER_RK_3RD, INTEGRATION_ORDER_RK_4TH }; template <typename VelGridT, Index IntegrationOrder, bool Staggered, typename FilterT> class AdvectionDeformer { public: using IntegratorT = openvdb::tools::VelocityIntegrator<VelGridT, Staggered>; AdvectionDeformer(const VelGridT& velocityGrid, const double timeStep, const int steps, const FilterT& filter) : mIntegrator(velocityGrid) , mTimeStep(timeStep) , mSteps(steps) , mFilter(filter) { } template <typename LeafT> void reset(const LeafT& leaf, size_t /*idx*/) { mFilter.reset(leaf); } template <typename IndexIterT> void apply(Vec3d& position, const IndexIterT& iter) const { if (mFilter.valid(iter)) { for (int n = 0; n < mSteps; ++n) { mIntegrator.template rungeKutta<IntegrationOrder, openvdb::Vec3d>( static_cast<typename IntegratorT::ElementType>(mTimeStep), position); } } } private: IntegratorT mIntegrator; double mTimeStep; const int mSteps; FilterT mFilter; }; // class AdvectionDeformer template <typename PointDataGridT, typename VelGridT, typename AdvectFilterT, typename FilterT> struct AdvectionOp { using CachedDeformerT = CachedDeformer<double>; AdvectionOp(PointDataGridT& points, const VelGridT& velocity, const Index integrationOrder, const double timeStep, const Index steps, const AdvectFilterT& advectFilter, const FilterT& filter) : mPoints(points) , mVelocity(velocity) , mIntegrationOrder(integrationOrder) , mTimeStep(timeStep) , mSteps(steps) , mAdvectFilter(advectFilter) , mFilter(filter) { } void cache() { mCachedDeformer.reset(new CachedDeformerT(mCache)); (*this)(true); } void advect() { (*this)(false); } private: template <int IntegrationOrder, bool Staggered> void resolveIntegrationOrder(bool buildCache) { const auto leaf = mPoints.constTree().cbeginLeaf(); if (!leaf) return; // move points according to the pre-computed cache if (!buildCache && mCachedDeformer) { movePoints(mPoints, *mCachedDeformer, mFilter); return; } NullFilter nullFilter; if (buildCache) { // disable group filtering from the advection deformer and perform group filtering // in the cache deformer instead, this restricts the cache to just containing // positions from points which are both deforming *and* are not being deleted AdvectionDeformer<VelGridT, IntegrationOrder, Staggered, NullFilter> deformer( mVelocity, mTimeStep, mSteps, nullFilter); if (mFilter.state() == index::ALL && mAdvectFilter.state() == index::ALL) { mCachedDeformer->evaluate(mPoints, deformer, nullFilter); } else { BinaryFilter<AdvectFilterT, FilterT, /*And=*/true> binaryFilter( mAdvectFilter, mFilter); mCachedDeformer->evaluate(mPoints, deformer, binaryFilter); } } else { // revert to NullFilter if all points are being evaluated if (mAdvectFilter.state() == index::ALL) { AdvectionDeformer<VelGridT, IntegrationOrder, Staggered, NullFilter> deformer( mVelocity, mTimeStep, mSteps, nullFilter); movePoints(mPoints, deformer, mFilter); } else { AdvectionDeformer<VelGridT, IntegrationOrder, Staggered, AdvectFilterT> deformer( mVelocity, mTimeStep, mSteps, mAdvectFilter); movePoints(mPoints, deformer, mFilter); } } } template <bool Staggered> void resolveStaggered(bool buildCache) { if (mIntegrationOrder == INTEGRATION_ORDER_FWD_EULER) { resolveIntegrationOrder<1, Staggered>(buildCache); } else if (mIntegrationOrder == INTEGRATION_ORDER_RK_2ND) { resolveIntegrationOrder<2, Staggered>(buildCache); } else if (mIntegrationOrder == INTEGRATION_ORDER_RK_3RD) { resolveIntegrationOrder<3, Staggered>(buildCache); } else if (mIntegrationOrder == INTEGRATION_ORDER_RK_4TH) { resolveIntegrationOrder<4, Staggered>(buildCache); } } void operator()(bool buildCache) { // early-exit if no leafs if (mPoints.constTree().leafCount() == 0) return; if (mVelocity.getGridClass() == openvdb::GRID_STAGGERED) { resolveStaggered<true>(buildCache); } else { resolveStaggered<false>(buildCache); } } PointDataGridT& mPoints; const VelGridT& mVelocity; const Index mIntegrationOrder; const double mTimeStep; const Index mSteps; const AdvectFilterT& mAdvectFilter; const FilterT& mFilter; CachedDeformerT::Cache mCache; std::unique_ptr<CachedDeformerT> mCachedDeformer; }; // struct AdvectionOp } // namespace point_advect_internal //////////////////////////////////////// template <typename PointDataGridT, typename VelGridT, typename AdvectFilterT, typename FilterT> inline void advectPoints(PointDataGridT& points, const VelGridT& velocity, const Index integrationOrder, const double timeStep, const Index steps, const AdvectFilterT& advectFilter, const FilterT& filter, const bool cached) { using namespace point_advect_internal; if (steps == 0) return; if (integrationOrder > 4) { throw ValueError{"Unknown integration order for advecting points."}; } AdvectionOp<PointDataGridT, VelGridT, AdvectFilterT, FilterT> op( points, velocity, integrationOrder, timeStep, steps, advectFilter, filter); // if caching is enabled, sample the velocity field using a CachedDeformer to store the // intermediate positions before moving the points, this uses more memory but typically // results in faster overall performance if (cached) op.cache(); // advect the points op.advect(); } } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_ADVECT_HAS_BEEN_INCLUDED
8,666
C
33.947581
104
0.639626
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/AttributeSet.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/AttributeSet.h /// /// @authors Dan Bailey, Mihai Alden /// /// @brief Set of Attribute Arrays which tracks metadata about each array. #ifndef OPENVDB_POINTS_ATTRIBUTE_SET_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_ATTRIBUTE_SET_HAS_BEEN_INCLUDED #include "AttributeArray.h" #include <openvdb/version.h> #include <openvdb/MetaMap.h> #include <limits> #include <memory> #include <vector> class TestAttributeSet; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { using GroupType = uint8_t; //////////////////////////////////////// /// Ordered collection of uniquely-named attribute arrays class OPENVDB_API AttributeSet { public: enum { INVALID_POS = std::numeric_limits<size_t>::max() }; using Ptr = std::shared_ptr<AttributeSet>; using ConstPtr = std::shared_ptr<const AttributeSet>; using UniquePtr = std::unique_ptr<AttributeSet>; class Descriptor; using DescriptorPtr = std::shared_ptr<Descriptor>; using DescriptorConstPtr = std::shared_ptr<const Descriptor>; ////////// struct Util { /// Attribute and type name pair. struct NameAndType { NameAndType(const std::string& n, const NamePair& t, const Index s = 1) : name(n), type(t), stride(s) {} Name name; NamePair type; Index stride; }; using NameAndTypeVec = std::vector<NameAndType>; using NameToPosMap = std::map<std::string, size_t>; using GroupIndex = std::pair<size_t, uint8_t>; }; ////////// AttributeSet(); /// Construct a new AttributeSet from the given AttributeSet. /// @param attributeSet the old attribute set /// @param arrayLength the desired length of the arrays in the new AttributeSet /// @param lock an optional scoped registry lock to avoid contention /// @note This constructor is typically used to resize an existing AttributeSet as /// it transfers attribute metadata such as hidden and transient flags AttributeSet(const AttributeSet& attributeSet, Index arrayLength, const AttributeArray::ScopedRegistryLock* lock = nullptr); /// Construct a new AttributeSet from the given Descriptor. /// @param descriptor stored in the new AttributeSet and used in construction /// @param arrayLength the desired length of the arrays in the new AttributeSet /// @param lock an optional scoped registry lock to avoid contention /// @note Descriptors do not store attribute metadata such as hidden and transient flags /// which live on the AttributeArrays, so for constructing from an existing AttributeSet /// use the AttributeSet(const AttributeSet&, Index) constructor instead AttributeSet(const DescriptorPtr& descriptor, Index arrayLength = 1, const AttributeArray::ScopedRegistryLock* lock = nullptr); /// Shallow copy constructor, the descriptor and attribute arrays will be shared. AttributeSet(const AttributeSet&); /// Disallow copy assignment, since it wouldn't be obvious whether the copy is deep or shallow. AttributeSet& operator=(const AttributeSet&) = delete; //@{ /// @brief Return a reference to this attribute set's descriptor, which might /// be shared with other sets. Descriptor& descriptor() { return *mDescr; } const Descriptor& descriptor() const { return *mDescr; } //@} /// @brief Return a pointer to this attribute set's descriptor, which might be /// shared with other sets DescriptorPtr descriptorPtr() const { return mDescr; } /// Return the number of attributes in this set. size_t size() const { return mAttrs.size(); } /// Return the number of bytes of memory used by this attribute set. size_t memUsage() const; /// @brief Return the position of the attribute array whose name is @a name, /// or @c INVALID_POS if no match is found. size_t find(const std::string& name) const; /// @brief Replace the attribute array whose name is @a name. /// @return The position of the updated attribute array or @c INVALID_POS /// if the given name does not exist or if the replacement failed because /// the new array type does not comply with the descriptor. size_t replace(const std::string& name, const AttributeArray::Ptr&); /// @brief Replace the attribute array stored at position @a pos in this container. /// @return The position of the updated attribute array or @c INVALID_POS /// if replacement failed because the new array type does not comply with /// the descriptor. size_t replace(size_t pos, const AttributeArray::Ptr&); //@{ /// @brief Return a pointer to the attribute array whose name is @a name or /// a null pointer if no match is found. const AttributeArray* getConst(const std::string& name) const; const AttributeArray* get(const std::string& name) const; AttributeArray* get(const std::string& name); //@} //@{ /// @brief Return a pointer to the attribute array stored at position @a pos /// in this set. const AttributeArray* getConst(size_t pos) const; const AttributeArray* get(size_t pos) const; AttributeArray* get(size_t pos); //@} //@{ /// @brief Return the group offset from the name or index of the group /// A group attribute array is a single byte (8-bit), each bit of which /// can denote a group. The group offset is the position of the bit that /// denotes the requested group if all group attribute arrays in the set /// (and only attribute arrays marked as group) were to be laid out linearly /// according to their order in the set. size_t groupOffset(const Name& groupName) const; size_t groupOffset(const Util::GroupIndex& index) const; //@} /// Return the group index from the name of the group Util::GroupIndex groupIndex(const Name& groupName) const; /// Return the group index from the offset of the group /// @note see offset description for groupOffset() Util::GroupIndex groupIndex(const size_t offset) const; /// Return the indices of the attribute arrays which are group attribute arrays std::vector<size_t> groupAttributeIndices() const; /// Return true if the attribute array stored at position @a pos is shared. bool isShared(size_t pos) const; /// @brief If the attribute array stored at position @a pos is shared, /// replace the array with a deep copy of itself that is not /// shared with anyone else. void makeUnique(size_t pos); /// Append attribute @a attribute (simple method) AttributeArray::Ptr appendAttribute(const Name& name, const NamePair& type, const Index strideOrTotalSize = 1, const bool constantStride = true, const Metadata* defaultValue = nullptr); /// Append attribute @a attribute (descriptor-sharing) /// Requires current descriptor to match @a expected /// On append, current descriptor is replaced with @a replacement /// Provide a @a lock object to avoid contention from appending in parallel AttributeArray::Ptr appendAttribute(const Descriptor& expected, DescriptorPtr& replacement, const size_t pos, const Index strideOrTotalSize = 1, const bool constantStride = true, const Metadata* defaultValue = nullptr, const AttributeArray::ScopedRegistryLock* lock = nullptr); /// @brief Remove and return an attribute array by name /// @param name the name of the attribute array to release /// @details Detaches the attribute array from this attribute set and returns it, if /// @a name is invalid, returns an empty shared pointer. This also updates the descriptor /// to remove the reference to the attribute array. /// @note AttributeArrays are stored as shared pointers, so they are not guaranteed /// to be unique. Check the reference count before blindly re-using in a new AttributeSet. AttributeArray::Ptr removeAttribute(const Name& name); /// @brief Remove and return an attribute array by index /// @param pos the position index of the attribute to release /// @details Detaches the attribute array from this attribute set and returns it, if /// @a pos is invalid, returns an empty shared pointer. This also updates the descriptor /// to remove the reference to the attribute array. /// @note AttributeArrays are stored as shared pointers, so they are not guaranteed /// to be unique. Check the reference count before blindly re-using in a new AttributeSet. AttributeArray::Ptr removeAttribute(const size_t pos); /// @brief Remove and return an attribute array by index (unsafe method) /// @param pos the position index of the attribute to release /// @details Detaches the attribute array from this attribute set and returns it, if /// @a pos is invalid, returns an empty shared pointer. /// In cases where the AttributeSet is due to be destroyed, a small performance /// advantage can be gained by leaving the attribute array as a nullptr and not /// updating the descriptor. However, this leaves the AttributeSet in an invalid /// state making it unsafe to call any methods that implicitly derefence the attribute array. /// @note AttributeArrays are stored as shared pointers, so they are not guaranteed /// to be unique. Check the reference count before blindly re-using in a new AttributeSet. /// @warning Only use this method if you're an expert and know the risks of not /// updating the array of attributes or the descriptor. AttributeArray::Ptr removeAttributeUnsafe(const size_t pos); /// Drop attributes with @a pos indices (simple method) /// Creates a new descriptor for this attribute set void dropAttributes(const std::vector<size_t>& pos); /// Drop attributes with @a pos indices (descriptor-sharing method) /// Requires current descriptor to match @a expected /// On drop, current descriptor is replaced with @a replacement void dropAttributes(const std::vector<size_t>& pos, const Descriptor& expected, DescriptorPtr& replacement); /// Re-name attributes in set to match a provided descriptor /// Replaces own descriptor with @a replacement void renameAttributes(const Descriptor& expected, const DescriptorPtr& replacement); /// Re order attribute set to match a provided descriptor /// Replaces own descriptor with @a replacement void reorderAttributes(const DescriptorPtr& replacement); /// Replace the current descriptor with a @a replacement /// Note the provided Descriptor must be identical to the replacement /// unless @a allowMismatchingDescriptors is true (default is false) void resetDescriptor(const DescriptorPtr& replacement, const bool allowMismatchingDescriptors = false); /// Read the entire set from a stream. void read(std::istream&); /// Write the entire set to a stream. /// @param outputTransient if true, write out transient attributes void write(std::ostream&, bool outputTransient = false) const; /// This will read the attribute descriptor from a stream. void readDescriptor(std::istream&); /// This will write the attribute descriptor to a stream. /// @param outputTransient if true, write out transient attributes void writeDescriptor(std::ostream&, bool outputTransient = false) const; /// This will read the attribute metadata from a stream. void readMetadata(std::istream&); /// This will write the attribute metadata to a stream. /// @param outputTransient if true, write out transient attributes /// @param paged if true, data is written out in pages void writeMetadata(std::ostream&, bool outputTransient = false, bool paged = false) const; /// This will read the attribute data from a stream. void readAttributes(std::istream&); /// This will write the attribute data to a stream. /// @param outputTransient if true, write out transient attributes void writeAttributes(std::ostream&, bool outputTransient = false) const; /// Compare the descriptors and attribute arrays on the attribute sets /// Exit early if the descriptors do not match bool operator==(const AttributeSet& other) const; bool operator!=(const AttributeSet& other) const { return !this->operator==(other); } private: using AttrArrayVec = std::vector<AttributeArray::Ptr>; DescriptorPtr mDescr; AttrArrayVec mAttrs; }; // class AttributeSet //////////////////////////////////////// /// A container for ABI=5 to help ease introduction of upcoming features namespace future { class Container { class Element { }; std::vector<std::shared_ptr<Element>> mElements; }; } //////////////////////////////////////// /// @brief An immutable object that stores name, type and AttributeSet position /// for a constant collection of attribute arrays. /// @note The attribute name is actually mutable, but the attribute type /// and position can not be changed after creation. class OPENVDB_API AttributeSet::Descriptor { public: using Ptr = std::shared_ptr<Descriptor>; using NameAndType = Util::NameAndType; using NameAndTypeVec = Util::NameAndTypeVec; using GroupIndex = Util::GroupIndex; using NameToPosMap = Util::NameToPosMap; using ConstIterator = NameToPosMap::const_iterator; /// Utility method to construct a NameAndType sequence. struct Inserter { NameAndTypeVec vec; Inserter& add(const NameAndType& nameAndType) { vec.push_back(nameAndType); return *this; } Inserter& add(const Name& name, const NamePair& type) { vec.emplace_back(name, type); return *this; } Inserter& add(const NameAndTypeVec& other) { for (NameAndTypeVec::const_iterator it = other.begin(), itEnd = other.end(); it != itEnd; ++it) { vec.emplace_back(it->name, it->type); } return *this; } }; ////////// Descriptor(); /// Copy constructor Descriptor(const Descriptor&); /// Create a new descriptor from a position attribute type and assumes "P" (for convenience). static Ptr create(const NamePair&); /// Create a new descriptor as a duplicate with a new attribute appended Ptr duplicateAppend(const Name& name, const NamePair& type) const; /// Create a new descriptor as a duplicate with existing attributes dropped Ptr duplicateDrop(const std::vector<size_t>& pos) const; /// Return the number of attributes in this descriptor. size_t size() const { return mTypes.size(); } /// Return the number of attributes with this attribute type size_t count(const NamePair& type) const; /// Return the number of bytes of memory used by this attribute set. size_t memUsage() const; /// @brief Return the position of the attribute array whose name is @a name, /// or @c INVALID_POS if no match is found. size_t find(const std::string& name) const; /// Rename an attribute array size_t rename(const std::string& fromName, const std::string& toName); /// Return the name of the attribute array's type. const Name& valueType(size_t pos) const; /// Return the name of the attribute array's type. const NamePair& type(size_t pos) const; /// Retrieve metadata map MetaMap& getMetadata(); const MetaMap& getMetadata() const; /// Return true if the attribute has a default value bool hasDefaultValue(const Name& name) const; /// Get a default value for an existing attribute template<typename ValueType> ValueType getDefaultValue(const Name& name) const { const size_t pos = find(name); if (pos == INVALID_POS) { OPENVDB_THROW(LookupError, "Cannot find attribute name to set default value.") } std::stringstream ss; ss << "default:" << name; auto metadata = mMetadata.getMetadata<TypedMetadata<ValueType>>(ss.str()); if (metadata) return metadata->value(); return zeroVal<ValueType>(); } /// Set a default value for an existing attribute void setDefaultValue(const Name& name, const Metadata& defaultValue); // Remove the default value if it exists void removeDefaultValue(const Name& name); // Prune any default values for which the key is no longer present void pruneUnusedDefaultValues(); /// Return true if this descriptor is equal to the given one. bool operator==(const Descriptor&) const; /// Return true if this descriptor is not equal to the given one. bool operator!=(const Descriptor& rhs) const { return !this->operator==(rhs); } /// Return true if this descriptor contains the same attributes /// as the given descriptor, ignoring attribute order bool hasSameAttributes(const Descriptor& rhs) const; /// Return a reference to the name-to-position map. const NameToPosMap& map() const { return mNameMap; } /// Return a reference to the name-to-position group map. const NameToPosMap& groupMap() const { return mGroupMap; } /// Return @c true if group exists bool hasGroup(const Name& group) const; /// @brief Define a group name to offset mapping /// @param group group name /// @param offset group offset /// @param checkValidOffset throws if offset out-of-range or in-use void setGroup(const Name& group, const size_t offset, const bool checkValidOffset = false); /// Drop any mapping keyed by group name void dropGroup(const Name& group); /// Clear all groups void clearGroups(); /// Rename a group size_t renameGroup(const std::string& fromName, const std::string& toName); /// Return a unique name for a group based on given name const Name uniqueGroupName(const Name& name) const; //@{ /// @brief Return the group offset from the name or index of the group /// A group attribute array is a single byte (8-bit), each bit of which /// can denote a group. The group offset is the position of the bit that /// denotes the requested group if all group attribute arrays in the set /// (and only attribute arrays marked as group) were to be laid out linearly /// according to their order in the set. size_t groupOffset(const Name& groupName) const; size_t groupOffset(const GroupIndex& index) const; //@} /// Return the group index from the name of the group GroupIndex groupIndex(const Name& groupName) const; /// Return the group index from the offset of the group /// @note see offset description for groupOffset() GroupIndex groupIndex(const size_t offset) const; /// Return number of bits occupied by a group attribute array static size_t groupBits() { return sizeof(GroupType) * CHAR_BIT; } /// Return the total number of available groups /// (group bits * number of group attributes) size_t availableGroups() const; /// Return the number of empty group slots which correlates to the number of groups /// that can be stored without increasing the number of group attribute arrays size_t unusedGroups() const; /// Return @c true if there are sufficient empty slots to allow compacting bool canCompactGroups() const; /// @brief Return a group offset that is not in use /// @param hint if provided, request a specific offset as a hint /// @return index of an offset or size_t max if no available group offsets size_t unusedGroupOffset(size_t hint = std::numeric_limits<size_t>::max()) const; /// @brief Determine if a move is required to efficiently compact the data and store the /// source name, offset and the target offset in the input parameters /// @param sourceName source name /// @param sourceOffset source offset /// @param targetOffset target offset /// @return @c true if move is required to compact the data bool requiresGroupMove(Name& sourceName, size_t& sourceOffset, size_t& targetOffset) const; /// @brief Test if there are any group names shared by both descriptors which /// have a different index /// @param rhs the descriptor to compare with /// @return @c true if an index collision exists bool groupIndexCollision(const Descriptor& rhs) const; /// Return a unique name for an attribute array based on given name const Name uniqueName(const Name& name) const; /// Return true if the name is valid static bool validName(const Name& name); /// @brief Extract each name from @a nameStr into @a includeNames, or into @a excludeNames /// if the name is prefixed with a caret. /// @param nameStr the input string of names /// @param includeNames on exit, the list of names that are not prefixed with a caret /// @param excludeNames on exit, the list of names that are prefixed with a caret /// @param includeAll on exit, @c true if a "*" wildcard is present in the @a includeNames static void parseNames( std::vector<std::string>& includeNames, std::vector<std::string>& excludeNames, bool& includeAll, const std::string& nameStr); /// @brief Extract each name from @a nameStr into @a includeNames, or into @a excludeNames /// if the name is prefixed with a caret. static void parseNames( std::vector<std::string>& includeNames, std::vector<std::string>& excludeNames, const std::string& nameStr); /// Serialize this descriptor to the given stream. void write(std::ostream&) const; /// Unserialize this transform from the given stream. void read(std::istream&); protected: /// Append to a vector of names and types from this Descriptor in position order void appendTo(NameAndTypeVec& attrs) const; /// Create a new descriptor from the given attribute and type name pairs /// and copy the group maps and metamap. static Ptr create(const NameAndTypeVec&, const NameToPosMap&, const MetaMap&); size_t insert(const std::string& name, const NamePair& typeName); private: friend class ::TestAttributeSet; NameToPosMap mNameMap; std::vector<NamePair> mTypes; NameToPosMap mGroupMap; MetaMap mMetadata; // as this change is part of an ABI change, there's no good reason to reduce the reserved // space aside from keeping the memory size of an AttributeSet the same for convenience // (note that this assumes a typical three-pointer implementation for std::vector) future::Container mFutureContainer; // occupies 3 reserved slots int64_t mReserved[5]; // for future use }; // class Descriptor } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_ATTRIBUTE_ARRAY_HAS_BEEN_INCLUDED
23,694
C
43.124767
109
0.672153
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/points.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/points.cc #include "PointDataGrid.h" namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { void internal::initialize() { // Register attribute arrays with no compression TypedAttributeArray<bool>::registerType(); TypedAttributeArray<int8_t>::registerType(); TypedAttributeArray<int16_t>::registerType(); TypedAttributeArray<int32_t>::registerType(); TypedAttributeArray<int64_t>::registerType(); TypedAttributeArray<float>::registerType(); TypedAttributeArray<double>::registerType(); TypedAttributeArray<math::Vec3<int32_t>>::registerType(); TypedAttributeArray<math::Vec3<float>>::registerType(); TypedAttributeArray<math::Vec3<double>>::registerType(); // Register attribute arrays with group and string attribute GroupAttributeArray::registerType(); StringAttributeArray::registerType(); // Register attribute arrays with matrix and quaternion attributes TypedAttributeArray<math::Mat3<float>>::registerType(); TypedAttributeArray<math::Mat3<double>>::registerType(); TypedAttributeArray<math::Mat4<float>>::registerType(); TypedAttributeArray<math::Mat4<double>>::registerType(); TypedAttributeArray<math::Quat<float>>::registerType(); TypedAttributeArray<math::Quat<double>>::registerType(); // Register attribute arrays with truncate compression TypedAttributeArray<float, TruncateCodec>::registerType(); TypedAttributeArray<math::Vec3<float>, TruncateCodec>::registerType(); // Register attribute arrays with fixed point compression TypedAttributeArray<math::Vec3<float>, FixedPointCodec<true>>::registerType(); TypedAttributeArray<math::Vec3<float>, FixedPointCodec<false>>::registerType(); TypedAttributeArray<math::Vec3<float>, FixedPointCodec<true, PositionRange>>::registerType(); TypedAttributeArray<math::Vec3<float>, FixedPointCodec<false, PositionRange>>::registerType(); TypedAttributeArray<math::Vec3<float>, FixedPointCodec<true, UnitRange>>::registerType(); TypedAttributeArray<math::Vec3<float>, FixedPointCodec<false, UnitRange>>::registerType(); // Register attribute arrays with unit vector compression TypedAttributeArray<math::Vec3<float>, UnitVecCodec>::registerType(); // Register types associated with point data grids. Metadata::registerType(typeNameAsString<PointDataIndex32>(), Int32Metadata::createMetadata); Metadata::registerType(typeNameAsString<PointDataIndex64>(), Int64Metadata::createMetadata); PointDataGrid::registerGrid(); } void internal::uninitialize() { AttributeArray::clearRegistry(); } } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
2,832
C++
38.901408
98
0.754944
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointMove.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Dan Bailey /// /// @file PointMove.h /// /// @brief Ability to move VDB Points using a custom deformer. /// /// Deformers used when moving points are in world space by default and must adhere /// to the interface described in the example below: /// @code /// struct MyDeformer /// { /// // A reset is performed on each leaf in turn before the points in that leaf are /// // deformed. A leaf and leaf index (standard leaf traversal order) are supplied as /// // the arguments, which matches the functor interface for LeafManager::foreach(). /// template <typename LeafNoteType> /// void reset(LeafNoteType& leaf, size_t idx); /// /// // Evaluate the deformer and modify the given position to generate the deformed /// // position. An index iterator is supplied as the argument to allow querying the /// // point offset or containing voxel coordinate. /// template <typename IndexIterT> /// void apply(Vec3d& position, const IndexIterT& iter) const; /// }; /// @endcode /// /// @note The DeformerTraits struct (defined in PointMask.h) can be used to configure /// a deformer to evaluate in index space. #ifndef OPENVDB_POINTS_POINT_MOVE_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_MOVE_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointMask.h> #include <tbb/concurrent_vector.h> #include <algorithm> #include <iterator> // for std::begin(), std::end() #include <map> #include <numeric> // for std::iota() #include <tuple> #include <unordered_map> #include <vector> class TestPointMove; namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { // dummy object for future use namespace future { struct Advect { }; } /// @brief Move points in a PointDataGrid using a custom deformer /// @param points the PointDataGrid containing the points to be moved. /// @param deformer a custom deformer that defines how to move the points. /// @param filter an optional index filter /// @param objectNotInUse for future use, this object is currently ignored /// @param threaded enable or disable threading (threading is enabled by default) template <typename PointDataGridT, typename DeformerT, typename FilterT = NullFilter> inline void movePoints(PointDataGridT& points, DeformerT& deformer, const FilterT& filter = NullFilter(), future::Advect* objectNotInUse = nullptr, bool threaded = true); /// @brief Move points in a PointDataGrid using a custom deformer and a new transform /// @param points the PointDataGrid containing the points to be moved. /// @param transform target transform to use for the resulting points. /// @param deformer a custom deformer that defines how to move the points. /// @param filter an optional index filter /// @param objectNotInUse for future use, this object is currently ignored /// @param threaded enable or disable threading (threading is enabled by default) template <typename PointDataGridT, typename DeformerT, typename FilterT = NullFilter> inline void movePoints(PointDataGridT& points, const math::Transform& transform, DeformerT& deformer, const FilterT& filter = NullFilter(), future::Advect* objectNotInUse = nullptr, bool threaded = true); // define leaf index in use as 32-bit namespace point_move_internal { using LeafIndex = Index32; } /// @brief A Deformer that caches the resulting positions from evaluating another Deformer template <typename T> class CachedDeformer { public: using LeafIndex = point_move_internal::LeafIndex; using Vec3T = typename math::Vec3<T>; using LeafVecT = std::vector<Vec3T>; using LeafMapT = std::unordered_map<LeafIndex, Vec3T>; // Internal data cache to allow the deformer to offer light-weight copying struct Cache { struct Leaf { /// @brief clear data buffers and reset counter void clear() { vecData.clear(); mapData.clear(); totalSize = 0; } LeafVecT vecData; LeafMapT mapData; Index totalSize = 0; }; // struct Leaf std::vector<Leaf> leafs; }; // struct Cache /// Cache is expected to be persistent for the lifetime of the CachedDeformer explicit CachedDeformer(Cache& cache); /// Caches the result of evaluating the supplied point grid using the deformer and filter /// @param grid the points to be moved /// @param deformer the deformer to apply to the points /// @param filter the point filter to use when evaluating the points /// @param threaded enable or disable threading (threading is enabled by default) template <typename PointDataGridT, typename DeformerT, typename FilterT> void evaluate(PointDataGridT& grid, DeformerT& deformer, const FilterT& filter, bool threaded = true); /// Stores pointers to the vector or map and optionally expands the map into a vector /// @throw IndexError if idx is out-of-range of the leafs in the cache template <typename LeafT> void reset(const LeafT& leaf, size_t idx); /// Retrieve the new position from the cache template <typename IndexIterT> void apply(Vec3d& position, const IndexIterT& iter) const; private: friend class ::TestPointMove; Cache& mCache; const LeafVecT* mLeafVec = nullptr; const LeafMapT* mLeafMap = nullptr; }; // class CachedDeformer //////////////////////////////////////// namespace point_move_internal { using IndexArray = std::vector<Index>; using IndexTriple = std::tuple<LeafIndex, Index, Index>; using IndexTripleArray = tbb::concurrent_vector<IndexTriple>; using GlobalPointIndexMap = std::vector<IndexTripleArray>; using GlobalPointIndexIndices = std::vector<IndexArray>; using IndexPair = std::pair<Index, Index>; using IndexPairArray = std::vector<IndexPair>; using LocalPointIndexMap = std::vector<IndexPairArray>; using LeafIndexArray = std::vector<LeafIndex>; using LeafOffsetArray = std::vector<LeafIndexArray>; using LeafMap = std::unordered_map<Coord, LeafIndex>; template <typename DeformerT, typename TreeT, typename FilterT> struct BuildMoveMapsOp { using LeafT = typename TreeT::LeafNodeType; using LeafArrayT = std::vector<LeafT*>; using LeafManagerT = typename tree::LeafManager<TreeT>; BuildMoveMapsOp(const DeformerT& deformer, GlobalPointIndexMap& globalMoveLeafMap, LocalPointIndexMap& localMoveLeafMap, const LeafMap& targetLeafMap, const math::Transform& targetTransform, const math::Transform& sourceTransform, const FilterT& filter) : mDeformer(deformer) , mGlobalMoveLeafMap(globalMoveLeafMap) , mLocalMoveLeafMap(localMoveLeafMap) , mTargetLeafMap(targetLeafMap) , mTargetTransform(targetTransform) , mSourceTransform(sourceTransform) , mFilter(filter) { } void operator()(LeafT& leaf, size_t idx) const { DeformerT deformer(mDeformer); deformer.reset(leaf, idx); // determine source leaf node origin and offset in the source leaf vector Coord sourceLeafOrigin = leaf.origin(); auto sourceHandle = AttributeWriteHandle<Vec3f>::create(leaf.attributeArray("P")); for (auto iter = leaf.beginIndexOn(mFilter); iter; iter++) { const bool useIndexSpace = DeformerTraits<DeformerT>::IndexSpace; // extract index-space position and apply index-space deformation (if applicable) Vec3d positionIS = sourceHandle->get(*iter) + iter.getCoord().asVec3d(); if (useIndexSpace) { deformer.apply(positionIS, iter); } // transform to world-space position and apply world-space deformation (if applicable) Vec3d positionWS = mSourceTransform.indexToWorld(positionIS); if (!useIndexSpace) { deformer.apply(positionWS, iter); } // transform to index-space position of target grid positionIS = mTargetTransform.worldToIndex(positionWS); // determine target voxel and offset Coord targetVoxel = Coord::round(positionIS); Index targetOffset = LeafT::coordToOffset(targetVoxel); // set new local position in source transform space (if point has been deformed) Vec3d voxelPosition(positionIS - targetVoxel.asVec3d()); sourceHandle->set(*iter, voxelPosition); // determine target leaf node origin and offset in the target leaf vector Coord targetLeafOrigin = targetVoxel & ~(LeafT::DIM - 1); assert(mTargetLeafMap.find(targetLeafOrigin) != mTargetLeafMap.end()); const LeafIndex targetLeafOffset(mTargetLeafMap.at(targetLeafOrigin)); // insert into move map based on whether point ends up in a new leaf node or not if (targetLeafOrigin == sourceLeafOrigin) { mLocalMoveLeafMap[targetLeafOffset].emplace_back(targetOffset, *iter); } else { mGlobalMoveLeafMap[targetLeafOffset].push_back(IndexTriple( LeafIndex(static_cast<LeafIndex>(idx)), targetOffset, *iter)); } } } private: const DeformerT& mDeformer; GlobalPointIndexMap& mGlobalMoveLeafMap; LocalPointIndexMap& mLocalMoveLeafMap; const LeafMap& mTargetLeafMap; const math::Transform& mTargetTransform; const math::Transform& mSourceTransform; const FilterT& mFilter; }; // struct BuildMoveMapsOp template <typename LeafT> inline Index indexOffsetFromVoxel(const Index voxelOffset, const LeafT& leaf, IndexArray& offsets) { // compute the target point index by summing the point index of the previous // voxel with the current number of points added to this voxel, tracked by the // offsets array Index targetOffset = offsets[voxelOffset]++; if (voxelOffset > 0) { targetOffset += static_cast<Index>(leaf.getValue(voxelOffset - 1)); } return targetOffset; } #if OPENVDB_ABI_VERSION_NUMBER >= 6 template <typename TreeT> struct GlobalMovePointsOp { using LeafT = typename TreeT::LeafNodeType; using LeafArrayT = std::vector<LeafT*>; using LeafManagerT = typename tree::LeafManager<TreeT>; using AttributeArrays = std::vector<AttributeArray*>; GlobalMovePointsOp(LeafOffsetArray& offsetMap, LeafManagerT& sourceLeafManager, const Index attributeIndex, const GlobalPointIndexMap& moveLeafMap, const GlobalPointIndexIndices& moveLeafIndices) : mOffsetMap(offsetMap) , mSourceLeafManager(sourceLeafManager) , mAttributeIndex(attributeIndex) , mMoveLeafMap(moveLeafMap) , mMoveLeafIndices(moveLeafIndices) { } // A CopyIterator is designed to use the indices in a GlobalPointIndexMap for this leaf // and match the interface required for AttributeArray::copyValues() struct CopyIterator { CopyIterator(const LeafT& leaf, const IndexArray& sortedIndices, const IndexTripleArray& moveIndices, IndexArray& offsets) : mLeaf(leaf) , mSortedIndices(sortedIndices) , mMoveIndices(moveIndices) , mOffsets(offsets) { } operator bool() const { return bool(mIt); } void reset(Index startIndex, Index endIndex) { mIndex = startIndex; mEndIndex = endIndex; this->advance(); } CopyIterator& operator++() { this->advance(); return *this; } Index leafIndex(Index i) const { if (i < mSortedIndices.size()) { return std::get<0>(this->leafIndexTriple(i)); } return std::numeric_limits<Index>::max(); } Index sourceIndex() const { assert(mIt); return std::get<2>(*mIt); } Index targetIndex() const { assert(mIt); return indexOffsetFromVoxel(std::get<1>(*mIt), mLeaf, mOffsets); } private: void advance() { if (mIndex >= mEndIndex || mIndex >= mSortedIndices.size()) { mIt = nullptr; } else { mIt = &this->leafIndexTriple(mIndex); } ++mIndex; } const IndexTriple& leafIndexTriple(Index i) const { return mMoveIndices[mSortedIndices[i]]; } private: const LeafT& mLeaf; Index mIndex; Index mEndIndex; const IndexArray& mSortedIndices; const IndexTripleArray& mMoveIndices; IndexArray& mOffsets; const IndexTriple* mIt = nullptr; }; // struct CopyIterator void operator()(LeafT& leaf, size_t idx) const { const IndexTripleArray& moveIndices = mMoveLeafMap[idx]; if (moveIndices.empty()) return; const IndexArray& sortedIndices = mMoveLeafIndices[idx]; // extract per-voxel offsets for this leaf LeafIndexArray& offsets = mOffsetMap[idx]; // extract target array and ensure data is out-of-core and non-uniform auto& targetArray = leaf.attributeArray(mAttributeIndex); targetArray.loadData(); targetArray.expand(); // perform the copy CopyIterator copyIterator(leaf, sortedIndices, moveIndices, offsets); // use the sorted indices to track the index of the source leaf Index sourceLeafIndex = copyIterator.leafIndex(0); Index startIndex = 0; for (size_t i = 1; i <= sortedIndices.size(); i++) { Index endIndex = static_cast<Index>(i); Index newSourceLeafIndex = copyIterator.leafIndex(endIndex); // when it changes, do a batch-copy of all the indices that lie within this range // TODO: this step could use nested parallelization for cases where there are a // large number of points being moved per attribute if (newSourceLeafIndex > sourceLeafIndex) { copyIterator.reset(startIndex, endIndex); const LeafT& sourceLeaf = mSourceLeafManager.leaf(sourceLeafIndex); const auto& sourceArray = sourceLeaf.constAttributeArray(mAttributeIndex); sourceArray.loadData(); targetArray.copyValuesUnsafe(sourceArray, copyIterator); sourceLeafIndex = newSourceLeafIndex; startIndex = endIndex; } } } private: LeafOffsetArray& mOffsetMap; LeafManagerT& mSourceLeafManager; const Index mAttributeIndex; const GlobalPointIndexMap& mMoveLeafMap; const GlobalPointIndexIndices& mMoveLeafIndices; }; // struct GlobalMovePointsOp template <typename TreeT> struct LocalMovePointsOp { using LeafT = typename TreeT::LeafNodeType; using LeafArrayT = std::vector<LeafT*>; using LeafManagerT = typename tree::LeafManager<TreeT>; using AttributeArrays = std::vector<AttributeArray*>; LocalMovePointsOp( LeafOffsetArray& offsetMap, const LeafIndexArray& sourceIndices, LeafManagerT& sourceLeafManager, const Index attributeIndex, const LocalPointIndexMap& moveLeafMap) : mOffsetMap(offsetMap) , mSourceIndices(sourceIndices) , mSourceLeafManager(sourceLeafManager) , mAttributeIndex(attributeIndex) , mMoveLeafMap(moveLeafMap) { } // A CopyIterator is designed to use the indices in a LocalPointIndexMap for this leaf // and match the interface required for AttributeArray::copyValues() struct CopyIterator { CopyIterator(const LeafT& leaf, const IndexPairArray& indices, IndexArray& offsets) : mLeaf(leaf) , mIndices(indices) , mOffsets(offsets) { } operator bool() const { return mIndex < static_cast<int>(mIndices.size()); } CopyIterator& operator++() { ++mIndex; return *this; } Index sourceIndex() const { return mIndices[mIndex].second; } Index targetIndex() const { return indexOffsetFromVoxel(mIndices[mIndex].first, mLeaf, mOffsets); } private: const LeafT& mLeaf; const IndexPairArray& mIndices; IndexArray& mOffsets; int mIndex = 0; }; // struct CopyIterator void operator()(LeafT& leaf, size_t idx) const { const IndexPairArray& moveIndices = mMoveLeafMap[idx]; if (moveIndices.empty()) return; // extract per-voxel offsets for this leaf LeafIndexArray& offsets = mOffsetMap[idx]; // extract source array that has the same origin as the target leaf assert(idx < mSourceIndices.size()); const Index sourceLeafOffset(mSourceIndices[idx]); LeafT& sourceLeaf = mSourceLeafManager.leaf(sourceLeafOffset); const auto& sourceArray = sourceLeaf.constAttributeArray(mAttributeIndex); sourceArray.loadData(); // extract target array and ensure data is out-of-core and non-uniform auto& targetArray = leaf.attributeArray(mAttributeIndex); targetArray.loadData(); targetArray.expand(); // perform the copy CopyIterator copyIterator(leaf, moveIndices, offsets); targetArray.copyValuesUnsafe(sourceArray, copyIterator); } private: LeafOffsetArray& mOffsetMap; const LeafIndexArray& mSourceIndices; LeafManagerT& mSourceLeafManager; const Index mAttributeIndex; const LocalPointIndexMap& mMoveLeafMap; }; // struct LocalMovePointsOp #else // The following infrastructure - ArrayProcessor, PerformTypedMoveOp, processTypedArray() // is required to improve AttributeArray copying performance beyond using the virtual function // AttributeArray::set(Index, AttributeArray&, Index). An ABI=6 addition to AttributeArray // improves this by introducing an AttributeArray::copyValues() method to significantly // simplify this logic without incurring the same virtual function cost. /// Helper class used internally by processTypedArray() template<typename ValueType, typename OpType> struct ArrayProcessor { static inline void call(OpType& op, const AttributeArray& array) { op.template operator()<ValueType>(array); } }; /// @brief Utility function that, given a generic attribute array, /// calls a functor with the fully-resolved value type of the array template<typename ArrayType, typename OpType> bool processTypedArray(const ArrayType& array, OpType& op) { using namespace openvdb; using namespace openvdb::math; if (array.template hasValueType<bool>()) ArrayProcessor<bool, OpType>::call(op, array); else if (array.template hasValueType<int16_t>()) ArrayProcessor<int16_t, OpType>::call(op, array); else if (array.template hasValueType<int32_t>()) ArrayProcessor<int32_t, OpType>::call(op, array); else if (array.template hasValueType<int64_t>()) ArrayProcessor<int64_t, OpType>::call(op, array); else if (array.template hasValueType<float>()) ArrayProcessor<float, OpType>::call(op, array); else if (array.template hasValueType<double>()) ArrayProcessor<double, OpType>::call(op, array); else if (array.template hasValueType<Vec3<int32_t>>()) ArrayProcessor<Vec3<int32_t>, OpType>::call(op, array); else if (array.template hasValueType<Vec3<float>>()) ArrayProcessor<Vec3<float>, OpType>::call(op, array); else if (array.template hasValueType<Vec3<double>>()) ArrayProcessor<Vec3<double>, OpType>::call(op, array); else if (array.template hasValueType<GroupType>()) ArrayProcessor<GroupType, OpType>::call(op, array); else if (array.template hasValueType<Index>()) ArrayProcessor<Index, OpType>::call(op, array); else if (array.template hasValueType<Mat3<float>>()) ArrayProcessor<Mat3<float>, OpType>::call(op, array); else if (array.template hasValueType<Mat3<double>>()) ArrayProcessor<Mat3<double>, OpType>::call(op, array); else if (array.template hasValueType<Mat4<float>>()) ArrayProcessor<Mat4<float>, OpType>::call(op, array); else if (array.template hasValueType<Mat4<double>>()) ArrayProcessor<Mat4<double>, OpType>::call(op, array); else if (array.template hasValueType<Quat<float>>()) ArrayProcessor<Quat<float>, OpType>::call(op, array); else if (array.template hasValueType<Quat<double>>()) ArrayProcessor<Quat<double>, OpType>::call(op, array); else return false; return true; } /// Cache read and write attribute handles to amortize construction cost struct AttributeHandles { using HandleArray = std::vector<AttributeHandle<int>::Ptr>; AttributeHandles(const size_t size) : mHandles() { mHandles.reserve(size); } AttributeArray& getArray(const Index leafOffset) { auto* handle = reinterpret_cast<AttributeWriteHandle<int>*>(mHandles[leafOffset].get()); assert(handle); return handle->array(); } const AttributeArray& getConstArray(const Index leafOffset) const { const auto* handle = mHandles[leafOffset].get(); assert(handle); return handle->array(); } template <typename ValueT> AttributeHandle<ValueT>& getHandle(const Index leafOffset) { auto* handle = reinterpret_cast<AttributeHandle<ValueT>*>(mHandles[leafOffset].get()); assert(handle); return *handle; } template <typename ValueT> AttributeWriteHandle<ValueT>& getWriteHandle(const Index leafOffset) { auto* handle = reinterpret_cast<AttributeWriteHandle<ValueT>*>(mHandles[leafOffset].get()); assert(handle); return *handle; } /// Create a handle and reinterpret cast as an int handle to store struct CacheHandleOp { CacheHandleOp(HandleArray& handles) : mHandles(handles) { } template<typename ValueT> void operator()(const AttributeArray& array) const { auto* handleAsInt = reinterpret_cast<AttributeHandle<int>*>( new AttributeHandle<ValueT>(array)); mHandles.emplace_back(handleAsInt); } private: HandleArray& mHandles; }; // struct CacheHandleOp template <typename LeafRangeT> void cache(const LeafRangeT& range, const Index attributeIndex) { using namespace openvdb::math; mHandles.clear(); CacheHandleOp op(mHandles); for (auto leaf = range.begin(); leaf; ++leaf) { const auto& array = leaf->constAttributeArray(attributeIndex); processTypedArray(array, op); } } private: HandleArray mHandles; }; // struct AttributeHandles template <typename TreeT> struct GlobalMovePointsOp { using LeafT = typename TreeT::LeafNodeType; using LeafArrayT = std::vector<LeafT*>; using LeafManagerT = typename tree::LeafManager<TreeT>; GlobalMovePointsOp(LeafOffsetArray& offsetMap, AttributeHandles& targetHandles, AttributeHandles& sourceHandles, const Index attributeIndex, const GlobalPointIndexMap& moveLeafMap, const GlobalPointIndexIndices& moveLeafIndices) : mOffsetMap(offsetMap) , mTargetHandles(targetHandles) , mSourceHandles(sourceHandles) , mAttributeIndex(attributeIndex) , mMoveLeafMap(moveLeafMap) , mMoveLeafIndices(moveLeafIndices) { } struct PerformTypedMoveOp { PerformTypedMoveOp(AttributeHandles& targetHandles, AttributeHandles& sourceHandles, Index targetOffset, const LeafT& targetLeaf, IndexArray& offsets, const IndexTripleArray& indices, const IndexArray& sortedIndices) : mTargetHandles(targetHandles) , mSourceHandles(sourceHandles) , mTargetOffset(targetOffset) , mTargetLeaf(targetLeaf) , mOffsets(offsets) , mIndices(indices) , mSortedIndices(sortedIndices) { } template<typename ValueT> void operator()(const AttributeArray&) const { auto& targetHandle = mTargetHandles.getWriteHandle<ValueT>(mTargetOffset); targetHandle.expand(); for (const auto& index : mSortedIndices) { const auto& it = mIndices[index]; const auto& sourceHandle = mSourceHandles.getHandle<ValueT>(std::get<0>(it)); const Index targetIndex = indexOffsetFromVoxel(std::get<1>(it), mTargetLeaf, mOffsets); for (Index i = 0; i < sourceHandle.stride(); i++) { ValueT sourceValue = sourceHandle.get(std::get<2>(it), i); targetHandle.set(targetIndex, i, sourceValue); } } } private: AttributeHandles& mTargetHandles; AttributeHandles& mSourceHandles; Index mTargetOffset; const LeafT& mTargetLeaf; IndexArray& mOffsets; const IndexTripleArray& mIndices; const IndexArray& mSortedIndices; }; // struct PerformTypedMoveOp void performMove(Index targetOffset, const LeafT& targetLeaf, IndexArray& offsets, const IndexTripleArray& indices, const IndexArray& sortedIndices) const { auto& targetArray = mTargetHandles.getArray(targetOffset); targetArray.loadData(); targetArray.expand(); for (const auto& index : sortedIndices) { const auto& it = indices[index]; const auto& sourceArray = mSourceHandles.getConstArray(std::get<0>(it)); const Index sourceOffset = std::get<2>(it); const Index targetOffset = indexOffsetFromVoxel(std::get<1>(it), targetLeaf, offsets); targetArray.set(targetOffset, sourceArray, sourceOffset); } } void operator()(LeafT& leaf, size_t aIdx) const { const Index idx(static_cast<Index>(aIdx)); const auto& moveIndices = mMoveLeafMap[aIdx]; if (moveIndices.empty()) return; const auto& sortedIndices = mMoveLeafIndices[aIdx]; // extract per-voxel offsets for this leaf auto& offsets = mOffsetMap[aIdx]; const auto& array = leaf.constAttributeArray(mAttributeIndex); PerformTypedMoveOp op(mTargetHandles, mSourceHandles, idx, leaf, offsets, moveIndices, sortedIndices); if (!processTypedArray(array, op)) { this->performMove(idx, leaf, offsets, moveIndices, sortedIndices); } } private: LeafOffsetArray& mOffsetMap; AttributeHandles& mTargetHandles; AttributeHandles& mSourceHandles; const Index mAttributeIndex; const GlobalPointIndexMap& mMoveLeafMap; const GlobalPointIndexIndices& mMoveLeafIndices; }; // struct GlobalMovePointsOp template <typename TreeT> struct LocalMovePointsOp { using LeafT = typename TreeT::LeafNodeType; using LeafArrayT = std::vector<LeafT*>; using LeafManagerT = typename tree::LeafManager<TreeT>; LocalMovePointsOp( LeafOffsetArray& offsetMap, AttributeHandles& targetHandles, const LeafIndexArray& sourceIndices, AttributeHandles& sourceHandles, const Index attributeIndex, const LocalPointIndexMap& moveLeafMap) : mOffsetMap(offsetMap) , mTargetHandles(targetHandles) , mSourceIndices(sourceIndices) , mSourceHandles(sourceHandles) , mAttributeIndex(attributeIndex) , mMoveLeafMap(moveLeafMap) { } struct PerformTypedMoveOp { PerformTypedMoveOp(AttributeHandles& targetHandles, AttributeHandles& sourceHandles, Index targetOffset, Index sourceOffset, const LeafT& targetLeaf, IndexArray& offsets, const IndexPairArray& indices) : mTargetHandles(targetHandles) , mSourceHandles(sourceHandles) , mTargetOffset(targetOffset) , mSourceOffset(sourceOffset) , mTargetLeaf(targetLeaf) , mOffsets(offsets) , mIndices(indices) { } template<typename ValueT> void operator()(const AttributeArray&) const { auto& targetHandle = mTargetHandles.getWriteHandle<ValueT>(mTargetOffset); const auto& sourceHandle = mSourceHandles.getHandle<ValueT>(mSourceOffset); targetHandle.expand(); for (const auto& it : mIndices) { const Index targetIndex = indexOffsetFromVoxel(it.first, mTargetLeaf, mOffsets); for (Index i = 0; i < sourceHandle.stride(); i++) { ValueT sourceValue = sourceHandle.get(it.second, i); targetHandle.set(targetIndex, i, sourceValue); } } } private: AttributeHandles& mTargetHandles; AttributeHandles& mSourceHandles; Index mTargetOffset; Index mSourceOffset; const LeafT& mTargetLeaf; IndexArray& mOffsets; const IndexPairArray& mIndices; }; // struct PerformTypedMoveOp template <typename ValueT> void performTypedMove(Index sourceOffset, Index targetOffset, const LeafT& targetLeaf, IndexArray& offsets, const IndexPairArray& indices) const { auto& targetHandle = mTargetHandles.getWriteHandle<ValueT>(targetOffset); const auto& sourceHandle = mSourceHandles.getHandle<ValueT>(sourceOffset); targetHandle.expand(); for (const auto& it : indices) { const Index tgtOffset = indexOffsetFromVoxel(it.first, targetLeaf, offsets); for (Index i = 0; i < sourceHandle.stride(); i++) { ValueT sourceValue = sourceHandle.get(it.second, i); targetHandle.set(tgtOffset, i, sourceValue); } } } void performMove(Index targetOffset, Index sourceOffset, const LeafT& targetLeaf, IndexArray& offsets, const IndexPairArray& indices) const { auto& targetArray = mTargetHandles.getArray(targetOffset); const auto& sourceArray = mSourceHandles.getConstArray(sourceOffset); for (const auto& it : indices) { const Index sourceOffset = it.second; const Index targetOffset = indexOffsetFromVoxel(it.first, targetLeaf, offsets); targetArray.set(targetOffset, sourceArray, sourceOffset); } } void operator()(const LeafT& leaf, size_t aIdx) const { const Index idx(static_cast<Index>(aIdx)); const auto& moveIndices = mMoveLeafMap.at(aIdx); if (moveIndices.empty()) return; // extract target leaf and per-voxel offsets for this leaf auto& offsets = mOffsetMap[aIdx]; // extract source leaf that has the same origin as the target leaf (if any) assert(aIdx < mSourceIndices.size()); const Index sourceOffset(mSourceIndices[aIdx]); const auto& array = leaf.constAttributeArray(mAttributeIndex); PerformTypedMoveOp op(mTargetHandles, mSourceHandles, idx, sourceOffset, leaf, offsets, moveIndices); if (!processTypedArray(array, op)) { this->performMove(idx, sourceOffset, leaf, offsets, moveIndices); } } private: LeafOffsetArray& mOffsetMap; AttributeHandles& mTargetHandles; const LeafIndexArray& mSourceIndices; AttributeHandles& mSourceHandles; const Index mAttributeIndex; const LocalPointIndexMap& mMoveLeafMap; }; // struct LocalMovePointsOp #endif // OPENVDB_ABI_VERSION_NUMBER >= 6 } // namespace point_move_internal //////////////////////////////////////// template <typename PointDataGridT, typename DeformerT, typename FilterT> inline void movePoints( PointDataGridT& points, const math::Transform& transform, DeformerT& deformer, const FilterT& filter, future::Advect* objectNotInUse, bool threaded) { using LeafIndex = point_move_internal::LeafIndex; using PointDataTreeT = typename PointDataGridT::TreeType; using LeafT = typename PointDataTreeT::LeafNodeType; using LeafManagerT = typename tree::LeafManager<PointDataTreeT>; using namespace point_move_internal; // this object is for future use only assert(!objectNotInUse); (void)objectNotInUse; PointDataTreeT& tree = points.tree(); // early exit if no LeafNodes PointDataTree::LeafCIter iter = tree.cbeginLeaf(); if (!iter) return; // build voxel topology taking into account any point group deletion auto newPoints = point_mask_internal::convertPointsToScalar<PointDataGrid>( points, transform, filter, deformer, threaded); auto& newTree = newPoints->tree(); // create leaf managers for both trees LeafManagerT sourceLeafManager(tree); LeafManagerT targetLeafManager(newTree); // extract the existing attribute set const auto& existingAttributeSet = points.tree().cbeginLeaf()->attributeSet(); // build a coord -> index map for looking up target leafs by origin and a faster // unordered map for finding the source index from a target index LeafMap targetLeafMap; LeafIndexArray sourceIndices(targetLeafManager.leafCount(), std::numeric_limits<LeafIndex>::max()); LeafOffsetArray offsetMap(targetLeafManager.leafCount()); { LeafMap sourceLeafMap; auto sourceRange = sourceLeafManager.leafRange(); for (auto leaf = sourceRange.begin(); leaf; ++leaf) { sourceLeafMap.insert({leaf->origin(), LeafIndex(static_cast<LeafIndex>(leaf.pos()))}); } auto targetRange = targetLeafManager.leafRange(); for (auto leaf = targetRange.begin(); leaf; ++leaf) { targetLeafMap.insert({leaf->origin(), LeafIndex(static_cast<LeafIndex>(leaf.pos()))}); } // acquire registry lock to avoid locking when appending attributes in parallel AttributeArray::ScopedRegistryLock lock; // perform four independent per-leaf operations in parallel targetLeafManager.foreach( [&](LeafT& leaf, size_t idx) { // map frequency => cumulative histogram auto* buffer = leaf.buffer().data(); for (Index i = 1; i < leaf.buffer().size(); i++) { buffer[i] = buffer[i-1] + buffer[i]; } // replace attribute set with a copy of the existing one leaf.replaceAttributeSet( new AttributeSet(existingAttributeSet, leaf.getLastValue(), &lock), /*allowMismatchingDescriptors=*/true); // store the index of the source leaf in a corresponding target leaf array const auto it = sourceLeafMap.find(leaf.origin()); if (it != sourceLeafMap.end()) { sourceIndices[idx] = it->second; } // allocate offset maps offsetMap[idx].resize(LeafT::SIZE); }, threaded); } // moving leaf GlobalPointIndexMap globalMoveLeafMap(targetLeafManager.leafCount()); LocalPointIndexMap localMoveLeafMap(targetLeafManager.leafCount()); // build global and local move leaf maps and update local positions if (filter.state() == index::ALL) { NullFilter nullFilter; BuildMoveMapsOp<DeformerT, PointDataTreeT, NullFilter> op(deformer, globalMoveLeafMap, localMoveLeafMap, targetLeafMap, transform, points.transform(), nullFilter); sourceLeafManager.foreach(op, threaded); } else { BuildMoveMapsOp<DeformerT, PointDataTreeT, FilterT> op(deformer, globalMoveLeafMap, localMoveLeafMap, targetLeafMap, transform, points.transform(), filter); sourceLeafManager.foreach(op, threaded); } // build a sorted index vector for each leaf that references the global move map // indices in order of their source leafs and voxels to ensure determinism in the // resulting point orders GlobalPointIndexIndices globalMoveLeafIndices(globalMoveLeafMap.size()); targetLeafManager.foreach( [&](LeafT& /*leaf*/, size_t idx) { const IndexTripleArray& moveIndices = globalMoveLeafMap[idx]; if (moveIndices.empty()) return; IndexArray& sortedIndices = globalMoveLeafIndices[idx]; sortedIndices.resize(moveIndices.size()); std::iota(std::begin(sortedIndices), std::end(sortedIndices), 0); std::sort(std::begin(sortedIndices), std::end(sortedIndices), [&](int i, int j) { const Index& indexI0(std::get<0>(moveIndices[i])); const Index& indexJ0(std::get<0>(moveIndices[j])); if (indexI0 < indexJ0) return true; if (indexI0 > indexJ0) return false; return std::get<2>(moveIndices[i]) < std::get<2>(moveIndices[j]); } ); }, threaded); #if OPENVDB_ABI_VERSION_NUMBER < 6 // initialize attribute handles AttributeHandles sourceHandles(sourceLeafManager.leafCount()); AttributeHandles targetHandles(targetLeafManager.leafCount()); #endif for (const auto& it : existingAttributeSet.descriptor().map()) { const Index attributeIndex = static_cast<Index>(it.second); // zero offsets targetLeafManager.foreach( [&offsetMap](const LeafT& /*leaf*/, size_t idx) { std::fill(offsetMap[idx].begin(), offsetMap[idx].end(), 0); }, threaded); #if OPENVDB_ABI_VERSION_NUMBER >= 6 // move points between leaf nodes GlobalMovePointsOp<PointDataTreeT> globalMoveOp(offsetMap, sourceLeafManager, attributeIndex, globalMoveLeafMap, globalMoveLeafIndices); targetLeafManager.foreach(globalMoveOp, threaded); // move points within leaf nodes LocalMovePointsOp<PointDataTreeT> localMoveOp(offsetMap, sourceIndices, sourceLeafManager, attributeIndex, localMoveLeafMap); targetLeafManager.foreach(localMoveOp, threaded); #else // cache attribute handles sourceHandles.cache(sourceLeafManager.leafRange(), attributeIndex); targetHandles.cache(targetLeafManager.leafRange(), attributeIndex); // move points between leaf nodes GlobalMovePointsOp<PointDataTreeT> globalMoveOp(offsetMap, targetHandles, sourceHandles, attributeIndex, globalMoveLeafMap, globalMoveLeafIndices); targetLeafManager.foreach(globalMoveOp, threaded); // move points within leaf nodes LocalMovePointsOp<PointDataTreeT> localMoveOp(offsetMap, targetHandles, sourceIndices, sourceHandles, attributeIndex, localMoveLeafMap); targetLeafManager.foreach(localMoveOp, threaded); #endif // OPENVDB_ABI_VERSION_NUMBER >= 6 } points.setTree(newPoints->treePtr()); } template <typename PointDataGridT, typename DeformerT, typename FilterT> inline void movePoints( PointDataGridT& points, DeformerT& deformer, const FilterT& filter, future::Advect* objectNotInUse, bool threaded) { movePoints(points, points.transform(), deformer, filter, objectNotInUse, threaded); } //////////////////////////////////////// template <typename T> CachedDeformer<T>::CachedDeformer(Cache& cache) : mCache(cache) { } template <typename T> template <typename PointDataGridT, typename DeformerT, typename FilterT> void CachedDeformer<T>::evaluate(PointDataGridT& grid, DeformerT& deformer, const FilterT& filter, bool threaded) { using TreeT = typename PointDataGridT::TreeType; using LeafT = typename TreeT::LeafNodeType; using LeafManagerT = typename tree::LeafManager<TreeT>; LeafManagerT leafManager(grid.tree()); // initialize cache auto& leafs = mCache.leafs; leafs.resize(leafManager.leafCount()); const auto& transform = grid.transform(); // insert deformed positions into the cache auto cachePositionsOp = [&](const LeafT& leaf, size_t idx) { const Index64 totalPointCount = leaf.pointCount(); if (totalPointCount == 0) return; // deformer is copied to ensure that it is unique per-thread DeformerT newDeformer(deformer); newDeformer.reset(leaf, idx); auto handle = AttributeHandle<Vec3f>::create(leaf.constAttributeArray("P")); auto& cache = leafs[idx]; cache.clear(); // only insert into a vector directly if the filter evaluates all points // and all points are stored in active voxels const bool useVector = filter.state() == index::ALL && (leaf.isDense() || (leaf.onPointCount() == leaf.pointCount())); if (useVector) { cache.vecData.resize(totalPointCount); } for (auto iter = leaf.beginIndexOn(filter); iter; iter++) { // extract index-space position and apply index-space deformation (if defined) Vec3d position = handle->get(*iter) + iter.getCoord().asVec3d(); // if deformer is designed to be used in index-space, perform deformation prior // to transforming position to world-space, otherwise perform deformation afterwards if (DeformerTraits<DeformerT>::IndexSpace) { newDeformer.apply(position, iter); position = transform.indexToWorld(position); } else { position = transform.indexToWorld(position); newDeformer.apply(position, iter); } // insert new position into the cache if (useVector) { cache.vecData[*iter] = static_cast<Vec3T>(position); } else { cache.mapData.insert({*iter, static_cast<Vec3T>(position)}); } } // store the total number of points to allow use of an expanded vector on access if (!cache.mapData.empty()) { cache.totalSize = static_cast<Index>(totalPointCount); } }; leafManager.foreach(cachePositionsOp, threaded); } template <typename T> template <typename LeafT> void CachedDeformer<T>::reset(const LeafT& /*leaf*/, size_t idx) { if (idx >= mCache.leafs.size()) { if (mCache.leafs.empty()) { throw IndexError("No leafs in cache, perhaps CachedDeformer has not been evaluated?"); } else { throw IndexError("Leaf index is out-of-range of cache leafs."); } } auto& cache = mCache.leafs[idx]; if (!cache.mapData.empty()) { mLeafMap = &cache.mapData; mLeafVec = nullptr; } else { mLeafVec = &cache.vecData; mLeafMap = nullptr; } } template <typename T> template <typename IndexIterT> void CachedDeformer<T>::apply(Vec3d& position, const IndexIterT& iter) const { assert(*iter >= 0); if (mLeafMap) { auto it = mLeafMap->find(*iter); if (it == mLeafMap->end()) return; position = static_cast<openvdb::Vec3d>(it->second); } else { assert(mLeafVec); if (mLeafVec->empty()) return; assert(*iter < mLeafVec->size()); position = static_cast<openvdb::Vec3d>((*mLeafVec)[*iter]); } } } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_MOVE_HAS_BEEN_INCLUDED
44,778
C
35.287682
119
0.642481
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/AttributeSet.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/AttributeSet.cc #include "AttributeSet.h" #include "AttributeGroup.h" #include <algorithm> // std::equal #include <string> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { namespace { // remove the items from the vector corresponding to the indices template <typename T> void eraseIndices(std::vector<T>& vec, const std::vector<size_t>& indices) { // early-exit if no indices to erase if (indices.empty()) return; // build the sorted, unique indices to remove std::vector<size_t> toRemove(indices); std::sort(toRemove.rbegin(), toRemove.rend()); toRemove.erase(unique(toRemove.begin(), toRemove.end()), toRemove.end()); // throw if the largest index is out of range if (*toRemove.begin() >= vec.size()) { OPENVDB_THROW(LookupError, "Cannot erase indices as index is out of range.") } // erase elements from the back for (auto it = toRemove.cbegin(); it != toRemove.cend(); ++it) { vec.erase(vec.begin() + (*it)); } } // return true if a string begins with a particular substring bool startsWith(const std::string& str, const std::string& prefix) { return str.compare(0, prefix.length(), prefix) == 0; } } //////////////////////////////////////// // AttributeSet implementation AttributeSet::AttributeSet() : mDescr(new Descriptor()) { } AttributeSet::AttributeSet(const AttributeSet& attrSet, Index arrayLength, const AttributeArray::ScopedRegistryLock* lock) : mDescr(attrSet.descriptorPtr()) , mAttrs(attrSet.descriptor().size(), AttributeArray::Ptr()) { std::unique_ptr<AttributeArray::ScopedRegistryLock> localLock; if (!lock) { localLock.reset(new AttributeArray::ScopedRegistryLock); lock = localLock.get(); } const MetaMap& meta = mDescr->getMetadata(); bool hasMetadata = meta.metaCount(); for (const auto& namePos : mDescr->map()) { const size_t& pos = namePos.second; Metadata::ConstPtr metadata; if (hasMetadata) metadata = meta["default:" + namePos.first]; const AttributeArray* existingArray = attrSet.getConst(pos); const bool constantStride = existingArray->hasConstantStride(); const Index stride = constantStride ? existingArray->stride() : existingArray->dataSize(); AttributeArray::Ptr array = AttributeArray::create(mDescr->type(pos), arrayLength, stride, constantStride, metadata.get(), lock); // transfer hidden and transient flags if (existingArray->isHidden()) array->setHidden(true); if (existingArray->isTransient()) array->setTransient(true); mAttrs[pos] = array; } } AttributeSet::AttributeSet(const DescriptorPtr& descr, Index arrayLength, const AttributeArray::ScopedRegistryLock* lock) : mDescr(descr) , mAttrs(descr->size(), AttributeArray::Ptr()) { std::unique_ptr<AttributeArray::ScopedRegistryLock> localLock; if (!lock) { localLock.reset(new AttributeArray::ScopedRegistryLock); lock = localLock.get(); } const MetaMap& meta = mDescr->getMetadata(); bool hasMetadata = meta.metaCount(); for (const auto& namePos : mDescr->map()) { const size_t& pos = namePos.second; Metadata::ConstPtr metadata; if (hasMetadata) metadata = meta["default:" + namePos.first]; mAttrs[pos] = AttributeArray::create(mDescr->type(pos), arrayLength, /*stride=*/1, /*constantStride=*/true, metadata.get(), lock); } } AttributeSet::AttributeSet(const AttributeSet& rhs) : mDescr(rhs.mDescr) , mAttrs(rhs.mAttrs) { } size_t AttributeSet::memUsage() const { size_t bytes = sizeof(*this) + mDescr->memUsage(); for (const auto& attr : mAttrs) { bytes += attr->memUsage(); } return bytes; } size_t AttributeSet::find(const std::string& name) const { return mDescr->find(name); } size_t AttributeSet::replace(const std::string& name, const AttributeArray::Ptr& attr) { const size_t pos = this->find(name); return pos != INVALID_POS ? this->replace(pos, attr) : INVALID_POS; } size_t AttributeSet::replace(size_t pos, const AttributeArray::Ptr& attr) { assert(pos != INVALID_POS); assert(pos < mAttrs.size()); if (attr->type() != mDescr->type(pos)) { return INVALID_POS; } mAttrs[pos] = attr; return pos; } const AttributeArray* AttributeSet::getConst(const std::string& name) const { const size_t pos = this->find(name); if (pos < mAttrs.size()) return this->getConst(pos); return nullptr; } const AttributeArray* AttributeSet::get(const std::string& name) const { return this->getConst(name); } AttributeArray* AttributeSet::get(const std::string& name) { const size_t pos = this->find(name); if (pos < mAttrs.size()) return this->get(pos); return nullptr; } const AttributeArray* AttributeSet::getConst(size_t pos) const { assert(pos != INVALID_POS); assert(pos < mAttrs.size()); return mAttrs[pos].get(); } const AttributeArray* AttributeSet::get(size_t pos) const { assert(pos != INVALID_POS); assert(pos < mAttrs.size()); return this->getConst(pos); } AttributeArray* AttributeSet::get(size_t pos) { makeUnique(pos); return mAttrs[pos].get(); } size_t AttributeSet::groupOffset(const Name& group) const { return mDescr->groupOffset(group); } size_t AttributeSet::groupOffset(const Util::GroupIndex& index) const { return mDescr->groupOffset(index); } AttributeSet::Descriptor::GroupIndex AttributeSet::groupIndex(const Name& group) const { return mDescr->groupIndex(group); } AttributeSet::Descriptor::GroupIndex AttributeSet::groupIndex(const size_t offset) const { return mDescr->groupIndex(offset); } std::vector<size_t> AttributeSet::groupAttributeIndices() const { std::vector<size_t> indices; for (const auto& namePos : mDescr->map()) { const AttributeArray* array = this->getConst(namePos.first); if (isGroup(*array)) { indices.push_back(namePos.second); } } return indices; } bool AttributeSet::isShared(size_t pos) const { assert(pos != INVALID_POS); assert(pos < mAttrs.size()); return !mAttrs[pos].unique(); } void AttributeSet::makeUnique(size_t pos) { assert(pos != INVALID_POS); assert(pos < mAttrs.size()); if (!mAttrs[pos].unique()) { mAttrs[pos] = mAttrs[pos]->copy(); } } AttributeArray::Ptr AttributeSet::appendAttribute( const Name& name, const NamePair& type, const Index strideOrTotalSize, const bool constantStride, const Metadata* defaultValue) { Descriptor::Ptr descriptor = mDescr->duplicateAppend(name, type); // store the attribute default value in the descriptor metadata if (defaultValue) descriptor->setDefaultValue(name, *defaultValue); // extract the index from the descriptor const size_t pos = descriptor->find(name); return this->appendAttribute(*mDescr, descriptor, pos, strideOrTotalSize, constantStride, defaultValue); } AttributeArray::Ptr AttributeSet::appendAttribute( const Descriptor& expected, DescriptorPtr& replacement, const size_t pos, const Index strideOrTotalSize, const bool constantStride, const Metadata* defaultValue, const AttributeArray::ScopedRegistryLock* lock) { // ensure the descriptor is as expected if (*mDescr != expected) { OPENVDB_THROW(LookupError, "Cannot append attributes as descriptors do not match.") } assert(replacement->size() >= mDescr->size()); const size_t offset = mDescr->size(); // extract the array length from the first attribute array if it exists const Index arrayLength = offset > 0 ? this->get(0)->size() : 1; // extract the type from the descriptor const NamePair& type = replacement->type(pos); // append the new array AttributeArray::Ptr array = AttributeArray::create( type, arrayLength, strideOrTotalSize, constantStride, defaultValue, lock); // if successful, update Descriptor and append the created array mDescr = replacement; mAttrs.push_back(array); return array; } AttributeArray::Ptr AttributeSet::removeAttribute(const Name& name) { const size_t pos = this->find(name); if (pos == INVALID_POS) return AttributeArray::Ptr(); return this->removeAttribute(pos); } AttributeArray::Ptr AttributeSet::removeAttribute(const size_t pos) { if (pos >= mAttrs.size()) return AttributeArray::Ptr(); assert(mAttrs[pos]); AttributeArray::Ptr array; std::swap(array, mAttrs[pos]); assert(array); // safely drop the attribute and update the descriptor std::vector<size_t> toDrop{pos}; this->dropAttributes(toDrop); return array; } AttributeArray::Ptr AttributeSet::removeAttributeUnsafe(const size_t pos) { if (pos >= mAttrs.size()) return AttributeArray::Ptr(); assert(mAttrs[pos]); AttributeArray::Ptr array; std::swap(array, mAttrs[pos]); return array; } void AttributeSet::dropAttributes(const std::vector<size_t>& pos) { if (pos.empty()) return; Descriptor::Ptr descriptor = mDescr->duplicateDrop(pos); this->dropAttributes(pos, *mDescr, descriptor); } void AttributeSet::dropAttributes( const std::vector<size_t>& pos, const Descriptor& expected, DescriptorPtr& replacement) { if (pos.empty()) return; // ensure the descriptor is as expected if (*mDescr != expected) { OPENVDB_THROW(LookupError, "Cannot drop attributes as descriptors do not match.") } mDescr = replacement; eraseIndices(mAttrs, pos); // remove any unused default values mDescr->pruneUnusedDefaultValues(); } void AttributeSet::renameAttributes(const Descriptor& expected, const DescriptorPtr& replacement) { // ensure the descriptor is as expected if (*mDescr != expected) { OPENVDB_THROW(LookupError, "Cannot rename attribute as descriptors do not match.") } mDescr = replacement; } void AttributeSet::reorderAttributes(const DescriptorPtr& replacement) { if (*mDescr == *replacement) { this->resetDescriptor(replacement); return; } if (!mDescr->hasSameAttributes(*replacement)) { OPENVDB_THROW(LookupError, "Cannot reorder attributes as descriptors do not contain the same attributes.") } AttrArrayVec attrs(replacement->size()); // compute target indices for attributes from the given decriptor for (const auto& namePos : mDescr->map()) { const size_t index = replacement->find(namePos.first); attrs[index] = AttributeArray::Ptr(mAttrs[namePos.second]); } // copy the ordering to the member attributes vector and update descriptor to be target std::copy(attrs.begin(), attrs.end(), mAttrs.begin()); mDescr = replacement; } void AttributeSet::resetDescriptor(const DescriptorPtr& replacement, const bool allowMismatchingDescriptors) { // ensure the descriptors match if (!allowMismatchingDescriptors && *mDescr != *replacement) { OPENVDB_THROW(LookupError, "Cannot swap descriptor as replacement does not match.") } mDescr = replacement; } void AttributeSet::read(std::istream& is) { this->readDescriptor(is); this->readMetadata(is); this->readAttributes(is); } void AttributeSet::write(std::ostream& os, bool outputTransient) const { this->writeDescriptor(os, outputTransient); this->writeMetadata(os, outputTransient); this->writeAttributes(os, outputTransient); } void AttributeSet::readDescriptor(std::istream& is) { mDescr->read(is); } void AttributeSet::writeDescriptor(std::ostream& os, bool outputTransient) const { // build a vector of all attribute arrays that have a transient flag // unless also writing transient attributes std::vector<size_t> transientArrays; if (!outputTransient) { for (size_t i = 0; i < size(); i++) { const AttributeArray* array = this->getConst(i); if (array->isTransient()) { transientArrays.push_back(i); } } } // write out a descriptor without transient attributes if (transientArrays.empty()) { mDescr->write(os); } else { Descriptor::Ptr descr = mDescr->duplicateDrop(transientArrays); descr->write(os); } } void AttributeSet::readMetadata(std::istream& is) { AttrArrayVec(mDescr->size()).swap(mAttrs); // allocate vector for (size_t n = 0, N = mAttrs.size(); n < N; ++n) { mAttrs[n] = AttributeArray::create(mDescr->type(n), 1, 1); mAttrs[n]->readMetadata(is); } } void AttributeSet::writeMetadata(std::ostream& os, bool outputTransient, bool paged) const { // write attribute metadata for (size_t i = 0; i < size(); i++) { const AttributeArray* array = this->getConst(i); array->writeMetadata(os, outputTransient, paged); } } void AttributeSet::readAttributes(std::istream& is) { for (size_t i = 0; i < mAttrs.size(); i++) { mAttrs[i]->readBuffers(is); } } void AttributeSet::writeAttributes(std::ostream& os, bool outputTransient) const { for (auto attr : mAttrs) { attr->writeBuffers(os, outputTransient); } } bool AttributeSet::operator==(const AttributeSet& other) const { if(*this->mDescr != *other.mDescr) return false; if(this->mAttrs.size() != other.mAttrs.size()) return false; for (size_t n = 0; n < this->mAttrs.size(); ++n) { if (*this->mAttrs[n] != *other.mAttrs[n]) return false; } return true; } //////////////////////////////////////// // AttributeSet::Descriptor implementation AttributeSet::Descriptor::Descriptor() { } AttributeSet::Descriptor::Descriptor(const Descriptor& rhs) : mNameMap(rhs.mNameMap) , mTypes(rhs.mTypes) , mGroupMap(rhs.mGroupMap) , mMetadata(rhs.mMetadata) { } bool AttributeSet::Descriptor::operator==(const Descriptor& rhs) const { if (this == &rhs) return true; if (mTypes.size() != rhs.mTypes.size() || mNameMap.size() != rhs.mNameMap.size() || mGroupMap.size() != rhs.mGroupMap.size()) { return false; } for (size_t n = 0, N = mTypes.size(); n < N; ++n) { if (mTypes[n] != rhs.mTypes[n]) return false; } if (this->mMetadata != rhs.mMetadata) return false; return std::equal(mGroupMap.begin(), mGroupMap.end(), rhs.mGroupMap.begin()) && std::equal(mNameMap.begin(), mNameMap.end(), rhs.mNameMap.begin()); } bool AttributeSet::Descriptor::hasSameAttributes(const Descriptor& rhs) const { if (this == &rhs) return true; if (mTypes.size() != rhs.mTypes.size() || mNameMap.size() != rhs.mNameMap.size() || mGroupMap.size() != rhs.mGroupMap.size()) { return false; } for (const auto& namePos : mNameMap) { const size_t index = rhs.find(namePos.first); if (index == INVALID_POS) return false; if (mTypes[namePos.second] != rhs.mTypes[index]) return false; } return std::equal(mGroupMap.begin(), mGroupMap.end(), rhs.mGroupMap.begin()); } size_t AttributeSet::Descriptor::count(const NamePair& matchType) const { return std::count(mTypes.begin(), mTypes.end(), matchType); } size_t AttributeSet::Descriptor::memUsage() const { size_t bytes = sizeof(NameToPosMap::mapped_type) * this->size(); for (const auto& namePos : mNameMap) { bytes += namePos.first.capacity(); } for (const NamePair& type : mTypes) { bytes += type.first.capacity(); bytes += type.second.capacity(); } return sizeof(*this) + bytes; } size_t AttributeSet::Descriptor::find(const std::string& name) const { auto it = mNameMap.find(name); if (it != mNameMap.end()) { return it->second; } return INVALID_POS; } size_t AttributeSet::Descriptor::rename(const std::string& fromName, const std::string& toName) { if (!validName(toName)) throw RuntimeError("Attribute name contains invalid characters - " + toName); size_t pos = INVALID_POS; // check if the new name is already used. auto it = mNameMap.find(toName); if (it != mNameMap.end()) return pos; it = mNameMap.find(fromName); if (it != mNameMap.end()) { pos = it->second; mNameMap.erase(it); mNameMap[toName] = pos; // rename default value if it exists std::stringstream ss; ss << "default:" << fromName; Metadata::Ptr defaultValue = mMetadata[ss.str()]; if (defaultValue) { mMetadata.removeMeta(ss.str()); ss.str(""); ss << "default:" << toName; mMetadata.insertMeta(ss.str(), *defaultValue); } } return pos; } size_t AttributeSet::Descriptor::renameGroup(const std::string& fromName, const std::string& toName) { if (!validName(toName)) throw RuntimeError("Group name contains invalid characters - " + toName); size_t pos = INVALID_POS; // check if the new name is already used. auto it = mGroupMap.find(toName); if (it != mGroupMap.end()) return pos; it = mGroupMap.find(fromName); if (it != mGroupMap.end()) { pos = it->second; mGroupMap.erase(it); mGroupMap[toName] = pos; } return pos; } const Name& AttributeSet::Descriptor::valueType(size_t pos) const { // pos is assumed to exist return this->type(pos).first; } const NamePair& AttributeSet::Descriptor::type(size_t pos) const { // assert that pos is valid and in-range assert(pos != AttributeSet::INVALID_POS); assert(pos < mTypes.size()); return mTypes[pos]; } MetaMap& AttributeSet::Descriptor::getMetadata() { return mMetadata; } const MetaMap& AttributeSet::Descriptor::getMetadata() const { return mMetadata; } bool AttributeSet::Descriptor::hasDefaultValue(const Name& name) const { std::stringstream ss; ss << "default:" << name; return bool(mMetadata[ss.str()]); } void AttributeSet::Descriptor::setDefaultValue(const Name& name, const Metadata& defaultValue) { const size_t pos = find(name); if (pos == INVALID_POS) { OPENVDB_THROW(LookupError, "Cannot find attribute name to set default value.") } // check type of metadata matches attribute type const Name& valueType = this->valueType(pos); if (valueType != defaultValue.typeName()) { OPENVDB_THROW(TypeError, "Mis-matching Default Value Type"); } std::stringstream ss; ss << "default:" << name; mMetadata.insertMeta(ss.str(), defaultValue); } void AttributeSet::Descriptor::removeDefaultValue(const Name& name) { std::stringstream ss; ss << "default:" << name; mMetadata.removeMeta(ss.str()); } void AttributeSet::Descriptor::pruneUnusedDefaultValues() { // store any default metadata keys for which the attribute name is no longer present std::vector<Name> metaToErase; for (auto it = mMetadata.beginMeta(), itEnd = mMetadata.endMeta(); it != itEnd; ++it) { const Name name = it->first; // ignore non-default metadata if (!startsWith(name, "default:")) continue; const Name defaultName = name.substr(8, it->first.size() - 8); if (mNameMap.find(defaultName) == mNameMap.end()) { metaToErase.push_back(name); } } // remove this metadata for (const Name& name : metaToErase) { mMetadata.removeMeta(name); } } size_t AttributeSet::Descriptor::insert(const std::string& name, const NamePair& typeName) { if (!validName(name)) throw RuntimeError("Attribute name contains invalid characters - " + name); size_t pos = INVALID_POS; auto it = mNameMap.find(name); if (it != mNameMap.end()) { assert(it->second < mTypes.size()); if (mTypes[it->second] != typeName) { OPENVDB_THROW(KeyError, "Cannot insert into a Descriptor with a duplicate name, but different type.") } pos = it->second; } else { if (!AttributeArray::isRegistered(typeName)) { OPENVDB_THROW(KeyError, "Failed to insert '" << name << "' with unregistered attribute type '" << typeName.first << "_" << typeName.second); } pos = mTypes.size(); mTypes.push_back(typeName); mNameMap.insert(it, NameToPosMap::value_type(name, pos)); } return pos; } AttributeSet::Descriptor::Ptr AttributeSet::Descriptor::create(const NameAndTypeVec& attrs, const NameToPosMap& groupMap, const MetaMap& metadata) { auto newDescriptor = std::make_shared<Descriptor>(); for (const NameAndType& attr : attrs) { newDescriptor->insert(attr.name, attr.type); } newDescriptor->mGroupMap = groupMap; newDescriptor->mMetadata = metadata; return newDescriptor; } AttributeSet::Descriptor::Ptr AttributeSet::Descriptor::create(const NamePair& positionType) { auto descr = std::make_shared<Descriptor>(); descr->insert("P", positionType); return descr; } AttributeSet::Descriptor::Ptr AttributeSet::Descriptor::duplicateAppend(const Name& name, const NamePair& type) const { Inserter attributes; this->appendTo(attributes.vec); attributes.add(NameAndType(name, type)); return Descriptor::create(attributes.vec, mGroupMap, mMetadata); } AttributeSet::Descriptor::Ptr AttributeSet::Descriptor::duplicateDrop(const std::vector<size_t>& pos) const { NameAndTypeVec vec; this->appendTo(vec); Descriptor::Ptr descriptor; // If groups exist, check to see if those arrays are being dropped if (!mGroupMap.empty()) { // extract all attribute array group indices and specific groups // to drop std::vector<size_t> groups, groupsToDrop; for (size_t i = 0; i < vec.size(); i++) { if (vec[i].type == GroupAttributeArray::attributeType()) { groups.push_back(i); if (std::find(pos.begin(), pos.end(), i) != pos.end()) { groupsToDrop.push_back(i); } } } // drop the indices in indices from vec eraseIndices(vec, pos); if (!groupsToDrop.empty()) { // configure group mapping if group arrays have been dropped NameToPosMap droppedGroupMap = mGroupMap; const size_t GROUP_BITS = sizeof(GroupType) * CHAR_BIT; for (auto iter = droppedGroupMap.begin(); iter != droppedGroupMap.end();) { const size_t groupArrayPos = iter->second / GROUP_BITS; const size_t arrayPos = groups[groupArrayPos]; if (std::find(pos.begin(), pos.end(), arrayPos) != pos.end()) { iter = droppedGroupMap.erase(iter); } else { size_t offset(0); for (const size_t& idx : groupsToDrop) { if (idx >= arrayPos) break; ++offset; } iter->second -= (offset * GROUP_BITS); ++iter; } } descriptor = Descriptor::create(vec, droppedGroupMap, mMetadata); // remove any unused default values descriptor->pruneUnusedDefaultValues(); return descriptor; } } else { // drop the indices in pos from vec eraseIndices(vec, pos); } descriptor = Descriptor::create(vec, mGroupMap, mMetadata); // remove any unused default values descriptor->pruneUnusedDefaultValues(); return descriptor; } void AttributeSet::Descriptor::appendTo(NameAndTypeVec& attrs) const { // build a std::map<pos, name> (ie key and value swapped) using PosToNameMap = std::map<size_t, std::string>; PosToNameMap posToNameMap; for (const auto& namePos : mNameMap) { posToNameMap[namePos.second] = namePos.first; } // std::map is sorted by key, so attributes can now be inserted in position order for (const auto& posName : posToNameMap) { attrs.emplace_back(posName.second, this->type(posName.first)); } } bool AttributeSet::Descriptor::hasGroup(const Name& group) const { return mGroupMap.find(group) != mGroupMap.end(); } void AttributeSet::Descriptor::setGroup(const Name& group, const size_t offset, const bool checkValidOffset) { if (!validName(group)) { throw RuntimeError("Group name contains invalid characters - " + group); } if (checkValidOffset) { // check offset is not out-of-range if (offset >= this->availableGroups()) { throw RuntimeError("Group offset is out-of-range - " + group); } // check offset is not already in use for (const auto& namePos : mGroupMap) { if (namePos.second == offset) { throw RuntimeError("Group offset is already in use - " + group); } } } mGroupMap[group] = offset; } void AttributeSet::Descriptor::dropGroup(const Name& group) { mGroupMap.erase(group); } void AttributeSet::Descriptor::clearGroups() { mGroupMap.clear(); } const Name AttributeSet::Descriptor::uniqueName(const Name& name) const { auto it = mNameMap.find(name); if (it == mNameMap.end()) return name; std::ostringstream ss; size_t i(0); while (it != mNameMap.end()) { ss.str(""); ss << name << i++; it = mNameMap.find(ss.str()); } return ss.str(); } const Name AttributeSet::Descriptor::uniqueGroupName(const Name& name) const { auto it = mGroupMap.find(name); if (it == mGroupMap.end()) return name; std::ostringstream ss; size_t i(0); while (it != mGroupMap.end()) { ss.str(""); ss << name << i++; it = mGroupMap.find(ss.str()); } return ss.str(); } size_t AttributeSet::Descriptor::groupOffset(const Name& group) const { const auto it = mGroupMap.find(group); if (it == mGroupMap.end()) { return INVALID_POS; } return it->second; } size_t AttributeSet::Descriptor::groupOffset(const Util::GroupIndex& index) const { if (index.first >= mNameMap.size()) { OPENVDB_THROW(LookupError, "Out of range group index.") } if (mTypes[index.first] != GroupAttributeArray::attributeType()) { OPENVDB_THROW(LookupError, "Group index invalid.") } // find the relative index in the group attribute arrays size_t relativeIndex = 0; for (const auto& namePos : mNameMap) { if (namePos.second < index.first && mTypes[namePos.second] == GroupAttributeArray::attributeType()) { relativeIndex++; } } const size_t GROUP_BITS = sizeof(GroupType) * CHAR_BIT; return (relativeIndex * GROUP_BITS) + index.second; } AttributeSet::Descriptor::GroupIndex AttributeSet::Descriptor::groupIndex(const Name& group) const { const size_t offset = this->groupOffset(group); if (offset == INVALID_POS) { OPENVDB_THROW(LookupError, "Group not found - " << group << ".") } return this->groupIndex(offset); } AttributeSet::Descriptor::GroupIndex AttributeSet::Descriptor::groupIndex(const size_t offset) const { // extract all attribute array group indices std::vector<size_t> groups; for (const auto& namePos : mNameMap) { if (mTypes[namePos.second] == GroupAttributeArray::attributeType()) { groups.push_back(namePos.second); } } if (offset >= groups.size() * this->groupBits()) { OPENVDB_THROW(LookupError, "Out of range group offset - " << offset << ".") } // adjust relative offset to find offset into the array vector std::sort(groups.begin(), groups.end()); return Util::GroupIndex(groups[offset / this->groupBits()], static_cast<uint8_t>(offset % this->groupBits())); } size_t AttributeSet::Descriptor::availableGroups() const { // the number of group attributes * number of bits per group const size_t groupAttributes = this->count(GroupAttributeArray::attributeType()); return groupAttributes * this->groupBits(); } size_t AttributeSet::Descriptor::unusedGroups() const { // compute total slots (one slot per bit of the group attributes) const size_t availableGroups = this->availableGroups(); if (availableGroups == 0) return 0; // compute slots in use const size_t usedGroups = mGroupMap.size(); return availableGroups - usedGroups; } bool AttributeSet::Descriptor::canCompactGroups() const { // can compact if more unused groups than in one group attribute array return this->unusedGroups() >= this->groupBits(); } size_t AttributeSet::Descriptor::unusedGroupOffset(size_t hint) const { // all group offsets are in use if (unusedGroups() == size_t(0)) { return std::numeric_limits<size_t>::max(); } // build a list of group indices std::vector<size_t> indices; indices.reserve(mGroupMap.size()); for (const auto& namePos : mGroupMap) { indices.push_back(namePos.second); } std::sort(indices.begin(), indices.end()); // return hint if not already in use if (hint != std::numeric_limits<Index>::max() && hint < availableGroups() && std::find(indices.begin(), indices.end(), hint) == indices.end()) { return hint; } // otherwise return first index not present size_t offset = 0; for (const size_t& index : indices) { if (index != offset) break; offset++; } return offset; } bool AttributeSet::Descriptor::requiresGroupMove(Name& sourceName, size_t& sourceOffset, size_t& targetOffset) const { targetOffset = this->unusedGroupOffset(); for (const auto& namePos : mGroupMap) { // move only required if source comes after the target if (namePos.second >= targetOffset) { sourceName = namePos.first; sourceOffset = namePos.second; return true; } } return false; } bool AttributeSet::Descriptor::groupIndexCollision(const Descriptor& rhs) const { const auto& groupMap = this->groupMap(); const auto& otherGroupMap = rhs.groupMap(); // iterate both group maps at the same time and find any keys that occur // in both maps and test their values for equality auto groupsIt1 = groupMap.cbegin(); auto groupsIt2 = otherGroupMap.cbegin(); while (groupsIt1 != groupMap.cend() && groupsIt2 != otherGroupMap.cend()) { if (groupsIt1->first < groupsIt2->first) { ++groupsIt1; } else if (groupsIt1->first > groupsIt2->first) { ++groupsIt2; } else { if (groupsIt1->second != groupsIt2->second) { return true; } else { ++groupsIt1; ++groupsIt2; } } } return false; } bool AttributeSet::Descriptor::validName(const Name& name) { if (name.empty()) return false; return std::find_if(name.begin(), name.end(), [&](int c) { return !(isalnum(c) || (c == '_') || (c == '|') || (c == ':')); } ) == name.end(); } void AttributeSet::Descriptor::parseNames( std::vector<std::string>& includeNames, std::vector<std::string>& excludeNames, bool& includeAll, const std::string& nameStr) { includeAll = false; std::stringstream tokenStream(nameStr); Name token; while (tokenStream >> token) { bool negate = startsWith(token, "^") || startsWith(token, "!"); if (negate) { if (token.length() < 2) throw RuntimeError("Negate character (^) must prefix a name."); token = token.substr(1, token.length()-1); if (!validName(token)) throw RuntimeError("Name contains invalid characters - " + token); excludeNames.push_back(token); } else if (!includeAll) { if (token == "*") { includeAll = true; includeNames.clear(); continue; } if (!validName(token)) throw RuntimeError("Name contains invalid characters - " + token); includeNames.push_back(token); } } } void AttributeSet::Descriptor::parseNames( std::vector<std::string>& includeNames, std::vector<std::string>& excludeNames, const std::string& nameStr) { bool includeAll = false; Descriptor::parseNames(includeNames, excludeNames, includeAll, nameStr); } void AttributeSet::Descriptor::write(std::ostream& os) const { const Index64 arraylength = Index64(mTypes.size()); os.write(reinterpret_cast<const char*>(&arraylength), sizeof(Index64)); for (const NamePair& np : mTypes) { writeString(os, np.first); writeString(os, np.second); } for (auto it = mNameMap.begin(), endIt = mNameMap.end(); it != endIt; ++it) { writeString(os, it->first); os.write(reinterpret_cast<const char*>(&it->second), sizeof(Index64)); } const Index64 grouplength = Index64(mGroupMap.size()); os.write(reinterpret_cast<const char*>(&grouplength), sizeof(Index64)); for (auto groupIt = mGroupMap.cbegin(), endGroupIt = mGroupMap.cend(); groupIt != endGroupIt; ++groupIt) { writeString(os, groupIt->first); os.write(reinterpret_cast<const char*>(&groupIt->second), sizeof(Index64)); } mMetadata.writeMeta(os); } void AttributeSet::Descriptor::read(std::istream& is) { Index64 arraylength = 0; is.read(reinterpret_cast<char*>(&arraylength), sizeof(Index64)); std::vector<NamePair>(size_t(arraylength)).swap(mTypes); for (NamePair& np : mTypes) { np.first = readString(is); np.second = readString(is); } mNameMap.clear(); std::pair<std::string, size_t> nameAndOffset; for (Index64 n = 0; n < arraylength; ++n) { nameAndOffset.first = readString(is); if (!validName(nameAndOffset.first)) throw IoError("Attribute name contains invalid characters - " + nameAndOffset.first); is.read(reinterpret_cast<char*>(&nameAndOffset.second), sizeof(Index64)); mNameMap.insert(nameAndOffset); } Index64 grouplength = 0; is.read(reinterpret_cast<char*>(&grouplength), sizeof(Index64)); for (Index64 n = 0; n < grouplength; ++n) { nameAndOffset.first = readString(is); if (!validName(nameAndOffset.first)) throw IoError("Group name contains invalid characters - " + nameAndOffset.first); is.read(reinterpret_cast<char*>(&nameAndOffset.second), sizeof(Index64)); mGroupMap.insert(nameAndOffset); } mMetadata.readMeta(is); } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
35,714
C++
24.861694
131
0.624685
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointAttribute.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Dan Bailey, Khang Ngo /// /// @file points/PointAttribute.h /// /// @brief Point attribute manipulation in a VDB Point Grid. #ifndef OPENVDB_POINTS_POINT_ATTRIBUTE_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_ATTRIBUTE_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include "AttributeArrayString.h" #include "AttributeSet.h" #include "AttributeGroup.h" #include "PointDataGrid.h" namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { namespace point_attribute_internal { template <typename ValueType> struct Default { static inline ValueType value() { return zeroVal<ValueType>(); } }; } // namespace point_attribute_internal /// @brief Appends a new attribute to the VDB tree /// (this method does not require a templated AttributeType) /// /// @param tree the PointDataTree to be appended to. /// @param name name for the new attribute. /// @param type the type of the attibute. /// @param strideOrTotalSize the stride of the attribute /// @param constantStride if @c false, stride is interpreted as total size of the array /// @param defaultValue metadata default attribute value /// @param hidden mark attribute as hidden /// @param transient mark attribute as transient template <typename PointDataTreeT> inline void appendAttribute(PointDataTreeT& tree, const Name& name, const NamePair& type, const Index strideOrTotalSize = 1, const bool constantStride = true, const Metadata* defaultValue = nullptr, const bool hidden = false, const bool transient = false); /// @brief Appends a new attribute to the VDB tree. /// /// @param tree the PointDataTree to be appended to. /// @param name name for the new attribute /// @param uniformValue the initial value of the attribute /// @param strideOrTotalSize the stride of the attribute /// @param constantStride if @c false, stride is interpreted as total size of the array /// @param defaultValue metadata default attribute value /// @param hidden mark attribute as hidden /// @param transient mark attribute as transient template <typename ValueType, typename CodecType = NullCodec, typename PointDataTreeT = PointDataTree> inline void appendAttribute(PointDataTreeT& tree, const std::string& name, const ValueType& uniformValue = point_attribute_internal::Default<ValueType>::value(), const Index strideOrTotalSize = 1, const bool constantStride = true, const TypedMetadata<ValueType>* defaultValue = nullptr, const bool hidden = false, const bool transient = false); /// @brief Collapse the attribute into a uniform value /// /// @param tree the PointDataTree in which to collapse the attribute. /// @param name name for the attribute. /// @param uniformValue value of the attribute template <typename ValueType, typename PointDataTreeT> inline void collapseAttribute( PointDataTreeT& tree, const Name& name, const ValueType& uniformValue = point_attribute_internal::Default<ValueType>::value()); /// @brief Drops attributes from the VDB tree. /// /// @param tree the PointDataTree to be dropped from. /// @param indices indices of the attributes to drop. template <typename PointDataTreeT> inline void dropAttributes( PointDataTreeT& tree, const std::vector<size_t>& indices); /// @brief Drops attributes from the VDB tree. /// /// @param tree the PointDataTree to be dropped from. /// @param names names of the attributes to drop. template <typename PointDataTreeT> inline void dropAttributes( PointDataTreeT& tree, const std::vector<Name>& names); /// @brief Drop one attribute from the VDB tree (convenience method). /// /// @param tree the PointDataTree to be dropped from. /// @param index index of the attribute to drop. template <typename PointDataTreeT> inline void dropAttribute( PointDataTreeT& tree, const size_t& index); /// @brief Drop one attribute from the VDB tree (convenience method). /// /// @param tree the PointDataTree to be dropped from. /// @param name name of the attribute to drop. template <typename PointDataTreeT> inline void dropAttribute( PointDataTreeT& tree, const Name& name); /// @brief Rename attributes in a VDB tree. /// /// @param tree the PointDataTree. /// @param oldNames a list of old attribute names to rename from. /// @param newNames a list of new attribute names to rename to. /// /// @note Number of oldNames must match the number of newNames. /// /// @note Duplicate names and renaming group attributes are not allowed. template <typename PointDataTreeT> inline void renameAttributes(PointDataTreeT& tree, const std::vector<Name>& oldNames, const std::vector<Name>& newNames); /// @brief Rename an attribute in a VDB tree. /// /// @param tree the PointDataTree. /// @param oldName the old attribute name to rename from. /// @param newName the new attribute name to rename to. /// /// @note newName must not already exist and must not be a group attribute. template <typename PointDataTreeT> inline void renameAttribute(PointDataTreeT& tree, const Name& oldName, const Name& newName); /// @brief Compact attributes in a VDB tree (if possible). /// /// @param tree the PointDataTree. template <typename PointDataTreeT> inline void compactAttributes(PointDataTreeT& tree); //////////////////////////////////////// namespace point_attribute_internal { template <typename ValueType> inline void collapseAttribute(AttributeArray& array, const AttributeSet::Descriptor&, const ValueType& uniformValue) { AttributeWriteHandle<ValueType> handle(array); handle.collapse(uniformValue); } inline void collapseAttribute(AttributeArray& array, const AttributeSet::Descriptor& descriptor, const Name& uniformValue) { StringAttributeWriteHandle handle(array, descriptor.getMetadata()); handle.collapse(uniformValue); } //////////////////////////////////////// template <typename ValueType, typename CodecType> struct AttributeTypeConversion { static const NamePair& type() { return TypedAttributeArray<ValueType, CodecType>::attributeType(); } }; template <typename CodecType> struct AttributeTypeConversion<Name, CodecType> { static const NamePair& type() { return StringAttributeArray::attributeType(); } }; //////////////////////////////////////// template <typename PointDataTreeT, typename ValueType> struct MetadataStorage { static void add(PointDataTreeT&, const ValueType&) {} template<typename AttributeListType> static void add(PointDataTreeT&, const AttributeListType&) {} }; template <typename PointDataTreeT> struct MetadataStorage<PointDataTreeT, Name> { static void add(PointDataTreeT& tree, const Name& uniformValue) { MetaMap& metadata = makeDescriptorUnique(tree)->getMetadata(); StringMetaInserter inserter(metadata); inserter.insert(uniformValue); } template<typename AttributeListType> static void add(PointDataTreeT& tree, const AttributeListType& data) { MetaMap& metadata = makeDescriptorUnique(tree)->getMetadata(); StringMetaInserter inserter(metadata); Name value; for (size_t i = 0; i < data.size(); i++) { data.get(value, i); inserter.insert(value); } } }; } // namespace point_attribute_internal //////////////////////////////////////// template <typename PointDataTreeT> inline void appendAttribute(PointDataTreeT& tree, const Name& name, const NamePair& type, const Index strideOrTotalSize, const bool constantStride, const Metadata* defaultValue, const bool hidden, const bool transient) { auto iter = tree.cbeginLeaf(); if (!iter) return; // do not append a non-unique attribute const auto& descriptor = iter->attributeSet().descriptor(); const size_t index = descriptor.find(name); if (index != AttributeSet::INVALID_POS) { OPENVDB_THROW(KeyError, "Cannot append an attribute with a non-unique name - " << name << "."); } // create a new attribute descriptor auto newDescriptor = descriptor.duplicateAppend(name, type); // store the attribute default value in the descriptor metadata if (defaultValue) { newDescriptor->setDefaultValue(name, *defaultValue); } // extract new pos const size_t pos = newDescriptor->find(name); // acquire registry lock to avoid locking when appending attributes in parallel AttributeArray::ScopedRegistryLock lock; // insert attributes using the new descriptor tree::LeafManager<PointDataTreeT> leafManager(tree); leafManager.foreach( [&](typename PointDataTree::LeafNodeType& leaf, size_t /*idx*/) { auto expected = leaf.attributeSet().descriptorPtr(); auto attribute = leaf.appendAttribute(*expected, newDescriptor, pos, strideOrTotalSize, constantStride, defaultValue, &lock); if (hidden) attribute->setHidden(true); if (transient) attribute->setTransient(true); }, /*threaded=*/ true ); } //////////////////////////////////////// template <typename ValueType, typename CodecType, typename PointDataTreeT> inline void appendAttribute(PointDataTreeT& tree, const std::string& name, const ValueType& uniformValue, const Index strideOrTotalSize, const bool constantStride, const TypedMetadata<ValueType>* defaultValue, const bool hidden, const bool transient) { static_assert(!std::is_base_of<AttributeArray, ValueType>::value, "ValueType must not be derived from AttributeArray"); using point_attribute_internal::AttributeTypeConversion; using point_attribute_internal::Default; using point_attribute_internal::MetadataStorage; appendAttribute(tree, name, AttributeTypeConversion<ValueType, CodecType>::type(), strideOrTotalSize, constantStride, defaultValue, hidden, transient); // if the uniform value is equal to either the default value provided // through the metadata argument or the default value for this value type, // it is not necessary to perform the collapse const bool uniformIsDefault = math::isExactlyEqual(uniformValue, bool(defaultValue) ? defaultValue->value() : Default<ValueType>::value()); if (!uniformIsDefault) { MetadataStorage<PointDataTreeT, ValueType>::add(tree, uniformValue); collapseAttribute<ValueType>(tree, name, uniformValue); } } //////////////////////////////////////// template <typename ValueType, typename PointDataTreeT> inline void collapseAttribute( PointDataTreeT& tree, const Name& name, const ValueType& uniformValue) { static_assert(!std::is_base_of<AttributeArray, ValueType>::value, "ValueType must not be derived from AttributeArray"); auto iter = tree.cbeginLeaf(); if (!iter) return; const auto& descriptor = iter->attributeSet().descriptor(); // throw if attribute name does not exist const size_t index = descriptor.find(name); if (index == AttributeSet::INVALID_POS) { OPENVDB_THROW(KeyError, "Cannot find attribute name in PointDataTree."); } tree::LeafManager<PointDataTreeT> leafManager(tree); leafManager.foreach( [&](typename PointDataTree::LeafNodeType& leaf, size_t /*idx*/) { assert(leaf.hasAttribute(index)); AttributeArray& array = leaf.attributeArray(index); point_attribute_internal::collapseAttribute( array, descriptor, uniformValue); }, /*threaded=*/true ); } //////////////////////////////////////// template <typename PointDataTreeT> inline void dropAttributes( PointDataTreeT& tree, const std::vector<size_t>& indices) { auto iter = tree.cbeginLeaf(); if (!iter) return; const auto& descriptor = iter->attributeSet().descriptor(); // throw if position index present in the indices as this attribute is mandatory const size_t positionIndex = descriptor.find("P"); if (positionIndex!= AttributeSet::INVALID_POS && std::find(indices.begin(), indices.end(), positionIndex) != indices.end()) { OPENVDB_THROW(KeyError, "Cannot drop mandatory position attribute."); } // insert attributes using the new descriptor auto newDescriptor = descriptor.duplicateDrop(indices); tree::LeafManager<PointDataTreeT> leafManager(tree); leafManager.foreach( [&](typename PointDataTree::LeafNodeType& leaf, size_t /*idx*/) { auto expected = leaf.attributeSet().descriptorPtr(); leaf.dropAttributes(indices, *expected, newDescriptor); }, /*threaded=*/true ); } //////////////////////////////////////// template <typename PointDataTreeT> inline void dropAttributes( PointDataTreeT& tree, const std::vector<Name>& names) { auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); const AttributeSet::Descriptor& descriptor = attributeSet.descriptor(); std::vector<size_t> indices; for (const Name& name : names) { const size_t index = descriptor.find(name); // do not attempt to drop an attribute that does not exist if (index == AttributeSet::INVALID_POS) { OPENVDB_THROW(KeyError, "Cannot drop an attribute that does not exist - " << name << "."); } indices.push_back(index); } dropAttributes(tree, indices); } //////////////////////////////////////// template <typename PointDataTreeT> inline void dropAttribute( PointDataTreeT& tree, const size_t& index) { std::vector<size_t> indices{index}; dropAttributes(tree, indices); } template <typename PointDataTreeT> inline void dropAttribute( PointDataTreeT& tree, const Name& name) { std::vector<Name> names{name}; dropAttributes(tree, names); } //////////////////////////////////////// template <typename PointDataTreeT> inline void renameAttributes( PointDataTreeT& tree, const std::vector<Name>& oldNames, const std::vector<Name>& newNames) { if (oldNames.size() != newNames.size()) { OPENVDB_THROW(ValueError, "Mis-matching sizes of name vectors, cannot rename attributes."); } using Descriptor = AttributeSet::Descriptor; auto iter = tree.beginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); const Descriptor::Ptr descriptor = attributeSet.descriptorPtr(); auto newDescriptor = std::make_shared<Descriptor>(*descriptor); for (size_t i = 0; i < oldNames.size(); i++) { const Name& oldName = oldNames[i]; if (descriptor->find(oldName) == AttributeSet::INVALID_POS) { OPENVDB_THROW(KeyError, "Cannot find requested attribute - " << oldName << "."); } const Name& newName = newNames[i]; if (descriptor->find(newName) != AttributeSet::INVALID_POS) { OPENVDB_THROW(KeyError, "Cannot rename attribute as new name already exists - " << newName << "."); } const AttributeArray* array = attributeSet.getConst(oldName); assert(array); if (isGroup(*array)) { OPENVDB_THROW(KeyError, "Cannot rename group attribute - " << oldName << "."); } newDescriptor->rename(oldName, newName); } for (; iter; ++iter) { iter->renameAttributes(*descriptor, newDescriptor); } } template <typename PointDataTreeT> inline void renameAttribute(PointDataTreeT& tree, const Name& oldName, const Name& newName) { renameAttributes(tree, {oldName}, {newName}); } //////////////////////////////////////// template <typename PointDataTreeT> inline void compactAttributes(PointDataTreeT& tree) { auto iter = tree.beginLeaf(); if (!iter) return; tree::LeafManager<PointDataTreeT> leafManager(tree); leafManager.foreach( [&](typename PointDataTree::LeafNodeType& leaf, size_t /*idx*/) { leaf.compactAttributes(); }, /*threaded=*/ true ); } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_ATTRIBUTE_HAS_BEEN_INCLUDED
17,919
C
31.820513
99
0.62124
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointDelete.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Nick Avramoussis, Francisco Gochez, Dan Bailey /// /// @file PointDelete.h /// /// @brief Methods for deleting points based on group membership #ifndef OPENVDB_POINTS_POINT_DELETE_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_DELETE_HAS_BEEN_INCLUDED #include "PointDataGrid.h" #include "PointGroup.h" #include "IndexIterator.h" #include "IndexFilter.h" #include <openvdb/tools/Prune.h> #include <openvdb/tree/LeafManager.h> #include <memory> #include <string> #include <vector> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { /// @brief Delete points that are members of specific groups /// /// @details This method will delete points which are members of any of the supplied groups and /// will optionally drop the groups from the tree. An invert flag can be used to /// delete points that belong to none of the groups. /// /// @param pointTree the point tree /// @param groups the groups from which to delete points /// @param invert if enabled, points not belonging to any of the groups will be deleted /// @param drop if enabled and invert is disabled, the groups will be dropped from the tree /// /// @note If the invert flag is true, none of the groups will be dropped after deleting points /// regardless of the value of the drop parameter. template <typename PointDataTreeT> inline void deleteFromGroups(PointDataTreeT& pointTree, const std::vector<std::string>& groups, bool invert = false, bool drop = true); /// @brief Delete points that are members of a group /// /// @details This method will delete points which are members of the supplied group and will /// optionally drop the group from the tree. An invert flag can be used to /// delete points that belong to none of the groups. /// /// @param pointTree the point tree with the group to delete /// @param group the name of the group to delete /// @param invert if enabled, points not belonging to any of the groups will be deleted /// @param drop if enabled and invert is disabled, the group will be dropped from the tree /// /// @note If the invert flag is true, the group will not be dropped after deleting points /// regardless of the value of the drop parameter. template <typename PointDataTreeT> inline void deleteFromGroup(PointDataTreeT& pointTree, const std::string& group, bool invert = false, bool drop = true); //////////////////////////////////////// namespace point_delete_internal { struct VectorWrapper { using T = std::vector<std::pair<Index, Index>>; VectorWrapper(const T& _data) : data(_data) { } operator bool() const { return index < data.size(); } VectorWrapper& operator++() { index++; return *this; } Index sourceIndex() const { assert(*this); return data[index].first; } Index targetIndex() const { assert(*this); return data[index].second; } private: const T& data; T::size_type index = 0; }; // struct VectorWrapper template <typename PointDataTreeT, typename FilterT> struct DeleteByFilterOp { using LeafManagerT = tree::LeafManager<PointDataTreeT>; using LeafRangeT = typename LeafManagerT::LeafRange; using LeafNodeT = typename PointDataTreeT::LeafNodeType; using ValueType = typename LeafNodeT::ValueType; DeleteByFilterOp(const FilterT& filter, const AttributeArray::ScopedRegistryLock* lock) : mFilter(filter) , mLock(lock) { } void operator()(const LeafRangeT& range) const { for (auto leaf = range.begin(); leaf != range.end(); ++leaf) { const size_t newSize = iterCount(leaf->template beginIndexAll<FilterT>(mFilter)); // if all points are being deleted, clear the leaf attributes if (newSize == 0) { leaf->clearAttributes(/*updateValueMask=*/true, mLock); continue; } // early exit if no points are being deleted const size_t currentSize = leaf->getLastValue(); if (newSize == currentSize) continue; const AttributeSet& existingAttributeSet = leaf->attributeSet(); AttributeSet* newAttributeSet = new AttributeSet( existingAttributeSet, static_cast<Index>(newSize), mLock); const size_t attributeSetSize = existingAttributeSet.size(); // cache the attribute arrays for efficiency std::vector<AttributeArray*> newAttributeArrays; std::vector<const AttributeArray*> existingAttributeArrays; for (size_t i = 0; i < attributeSetSize; i++) { AttributeArray* newArray = newAttributeSet->get(i); const AttributeArray* existingArray = existingAttributeSet.getConst(i); if (!newArray->hasConstantStride() || !existingArray->hasConstantStride()) { OPENVDB_THROW(openvdb::NotImplementedError, "Transfer of attribute values for dynamic arrays not currently supported."); } if (newArray->stride() != existingArray->stride()) { OPENVDB_THROW(openvdb::LookupError, "Cannot transfer attribute values with mis-matching strides."); } newAttributeArrays.push_back(newArray); existingAttributeArrays.push_back(existingArray); } Index attributeIndex = 0; std::vector<ValueType> endOffsets; endOffsets.reserve(LeafNodeT::NUM_VALUES); // now construct new attribute arrays which exclude data from deleted points #if OPENVDB_ABI_VERSION_NUMBER >= 6 std::vector<std::pair<Index, Index>> indexMapping; indexMapping.reserve(newSize); for (auto voxel = leaf->cbeginValueAll(); voxel; ++voxel) { for (auto iter = leaf->beginIndexVoxel(voxel.getCoord(), mFilter); iter; ++iter) { indexMapping.emplace_back(*iter, attributeIndex++); } endOffsets.push_back(static_cast<ValueType>(attributeIndex)); } for (size_t i = 0; i < attributeSetSize; i++) { VectorWrapper indexMappingWrapper(indexMapping); newAttributeArrays[i]->copyValues(*(existingAttributeArrays[i]), indexMappingWrapper); } #else for (auto voxel = leaf->cbeginValueAll(); voxel; ++voxel) { for (auto iter = leaf->beginIndexVoxel(voxel.getCoord(), mFilter); iter; ++iter) { for (size_t i = 0; i < attributeSetSize; i++) { newAttributeArrays[i]->set(attributeIndex, *(existingAttributeArrays[i]), *iter); } ++attributeIndex; } endOffsets.push_back(static_cast<ValueType>(attributeIndex)); } #endif leaf->replaceAttributeSet(newAttributeSet); leaf->setOffsets(endOffsets); } } private: const FilterT& mFilter; const AttributeArray::ScopedRegistryLock* mLock; }; // struct DeleteByFilterOp } // namespace point_delete_internal //////////////////////////////////////// template <typename PointDataTreeT> inline void deleteFromGroups(PointDataTreeT& pointTree, const std::vector<std::string>& groups, bool invert, bool drop) { const typename PointDataTreeT::LeafCIter leafIter = pointTree.cbeginLeaf(); if (!leafIter) return; const openvdb::points::AttributeSet& attributeSet = leafIter->attributeSet(); const AttributeSet::Descriptor& descriptor = attributeSet.descriptor(); std::vector<std::string> availableGroups; // determine which of the requested groups exist, and early exit // if none are present in the tree for (const auto& groupName : groups) { if (descriptor.hasGroup(groupName)) { availableGroups.push_back(groupName); } } if (availableGroups.empty()) return; std::vector<std::string> empty; std::unique_ptr<MultiGroupFilter> filter; if (invert) { filter.reset(new MultiGroupFilter(groups, empty, leafIter->attributeSet())); } else { filter.reset(new MultiGroupFilter(empty, groups, leafIter->attributeSet())); } { // acquire registry lock to avoid locking when appending attributes in parallel AttributeArray::ScopedRegistryLock lock; tree::LeafManager<PointDataTreeT> leafManager(pointTree); point_delete_internal::DeleteByFilterOp<PointDataTreeT, MultiGroupFilter> deleteOp( *filter, &lock); tbb::parallel_for(leafManager.leafRange(), deleteOp); } // remove empty leaf nodes tools::pruneInactive(pointTree); // drop the now-empty groups if requested (unless invert = true) if (drop && !invert) { dropGroups(pointTree, availableGroups); } } template <typename PointDataTreeT> inline void deleteFromGroup(PointDataTreeT& pointTree, const std::string& group, bool invert, bool drop) { std::vector<std::string> groups(1, group); deleteFromGroups(pointTree, groups, invert, drop); } } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_DELETE_HAS_BEEN_INCLUDED
9,907
C
34.512545
102
0.619663
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/AttributeGroup.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file points/AttributeGroup.cc #include "AttributeGroup.h" namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { //////////////////////////////////////// // GroupHandle implementation GroupHandle::GroupHandle(const GroupAttributeArray& array, const GroupType& offset) : mArray(array) , mBitMask(static_cast<GroupType>(1 << offset)) { assert(isGroup(mArray)); // load data if delay-loaded mArray.loadData(); } GroupHandle::GroupHandle(const GroupAttributeArray& array, const GroupType& bitMask, BitMask) : mArray(array) , mBitMask(bitMask) { assert(isGroup(mArray)); // load data if delay-loaded mArray.loadData(); } bool GroupHandle::get(Index n) const { return (mArray.get(n) & mBitMask) == mBitMask; } bool GroupHandle::getUnsafe(Index n) const { return (mArray.getUnsafe(n) & mBitMask) == mBitMask; } //////////////////////////////////////// // GroupWriteHandle implementation GroupWriteHandle::GroupWriteHandle(GroupAttributeArray& array, const GroupType& offset) : GroupHandle(array, offset) { assert(isGroup(mArray)); } void GroupWriteHandle::set(Index n, bool on) { const GroupType& value = mArray.get(n); GroupAttributeArray& array(const_cast<GroupAttributeArray&>(mArray)); if (on) array.set(n, value | mBitMask); else array.set(n, value & ~mBitMask); } void GroupWriteHandle::setUnsafe(Index n, bool on) { const GroupType& value = mArray.getUnsafe(n); GroupAttributeArray& array(const_cast<GroupAttributeArray&>(mArray)); if (on) array.setUnsafe(n, value | mBitMask); else array.setUnsafe(n, value & ~mBitMask); } bool GroupWriteHandle::collapse(bool on) { using ValueT = GroupAttributeArray::ValueType; GroupAttributeArray& array(const_cast<GroupAttributeArray&>(mArray)); array.compact(); if (this->isUniform()) { if (on) array.collapse(static_cast<ValueT>(array.get(0) | mBitMask)); else array.collapse(static_cast<ValueT>(array.get(0) & ~mBitMask)); return true; } for (Index i = 0; i < array.size(); i++) { if (on) array.set(i, static_cast<ValueT>(array.get(i) | mBitMask)); else array.set(i, static_cast<ValueT>(array.get(i) & ~mBitMask)); } return false; } bool GroupWriteHandle::compact() { GroupAttributeArray& array(const_cast<GroupAttributeArray&>(mArray)); return array.compact(); } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb
2,742
C++
20.429687
87
0.641138
NVIDIA-Omniverse/ext-openvdb/openvdb/openvdb/points/PointConversion.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @author Dan Bailey, Nick Avramoussis /// /// @file points/PointConversion.h /// /// @brief Convert points and attributes to and from VDB Point Data grids. #ifndef OPENVDB_POINTS_POINT_CONVERSION_HAS_BEEN_INCLUDED #define OPENVDB_POINTS_POINT_CONVERSION_HAS_BEEN_INCLUDED #include <openvdb/math/Transform.h> #include <openvdb/tools/PointIndexGrid.h> #include <openvdb/tools/PointsToMask.h> #include <openvdb/util/NullInterrupter.h> #include "AttributeArrayString.h" #include "AttributeSet.h" #include "IndexFilter.h" #include "PointAttribute.h" #include "PointDataGrid.h" #include "PointGroup.h" #include <tbb/parallel_reduce.h> #include <type_traits> namespace openvdb { OPENVDB_USE_VERSION_NAMESPACE namespace OPENVDB_VERSION_NAME { namespace points { /// @brief Localises points with position into a @c PointDataGrid into two stages: /// allocation of the leaf attribute data and population of the positions. /// /// @param pointIndexGrid a PointIndexGrid into the points. /// @param positions list of world space point positions. /// @param xform world to index space transform. /// @param positionDefaultValue metadata default position value /// /// @note The position data must be supplied in a Point-Partitioner compatible /// data structure. A convenience PointAttributeVector class is offered. /// /// @note The position data is populated separately to perform world space to /// voxel space conversion and apply quantisation. /// /// @note A @c PointIndexGrid to the points must be supplied to perform this /// operation. Typically this is built implicitly by the PointDataGrid constructor. template< typename CompressionT, typename PointDataGridT, typename PositionArrayT, typename PointIndexGridT> inline typename PointDataGridT::Ptr createPointDataGrid(const PointIndexGridT& pointIndexGrid, const PositionArrayT& positions, const math::Transform& xform, const Metadata* positionDefaultValue = nullptr); /// @brief Convenience method to create a @c PointDataGrid from a std::vector of /// point positions. /// /// @param positions list of world space point positions. /// @param xform world to index space transform. /// @param positionDefaultValue metadata default position value /// /// @note This method implicitly wraps the std::vector for a Point-Partitioner compatible /// data structure and creates the required @c PointIndexGrid to the points. template <typename CompressionT, typename PointDataGridT, typename ValueT> inline typename PointDataGridT::Ptr createPointDataGrid(const std::vector<ValueT>& positions, const math::Transform& xform, const Metadata* positionDefaultValue = nullptr); /// @brief Stores point attribute data in an existing @c PointDataGrid attribute. /// /// @param tree the PointDataGrid to be populated. /// @param pointIndexTree a PointIndexTree into the points. /// @param attributeName the name of the VDB Points attribute to be populated. /// @param data a wrapper to the attribute data. /// @param stride the stride of the attribute /// @param insertMetadata true if strings are to be automatically inserted as metadata. /// /// @note A @c PointIndexGrid to the points must be supplied to perform this /// operation. This is required to ensure the same point index ordering. template <typename PointDataTreeT, typename PointIndexTreeT, typename PointArrayT> inline void populateAttribute( PointDataTreeT& tree, const PointIndexTreeT& pointIndexTree, const openvdb::Name& attributeName, const PointArrayT& data, const Index stride = 1, const bool insertMetadata = true); /// @brief Convert the position attribute from a Point Data Grid /// /// @param positionAttribute the position attribute to be populated. /// @param grid the PointDataGrid to be converted. /// @param pointOffsets a vector of cumulative point offsets for each leaf /// @param startOffset a value to shift all the point offsets by /// @param filter an index filter /// @param inCoreOnly true if out-of-core leaf nodes are to be ignored /// template <typename PositionAttribute, typename PointDataGridT, typename FilterT = NullFilter> inline void convertPointDataGridPosition( PositionAttribute& positionAttribute, const PointDataGridT& grid, const std::vector<Index64>& pointOffsets, const Index64 startOffset, const FilterT& filter = NullFilter(), const bool inCoreOnly = false); /// @brief Convert the attribute from a PointDataGrid /// /// @param attribute the attribute to be populated. /// @param tree the PointDataTree to be converted. /// @param pointOffsets a vector of cumulative point offsets for each leaf. /// @param startOffset a value to shift all the point offsets by /// @param arrayIndex the index in the Descriptor of the array to be converted. /// @param stride the stride of the attribute /// @param filter an index filter /// @param inCoreOnly true if out-of-core leaf nodes are to be ignored template <typename TypedAttribute, typename PointDataTreeT, typename FilterT = NullFilter> inline void convertPointDataGridAttribute( TypedAttribute& attribute, const PointDataTreeT& tree, const std::vector<Index64>& pointOffsets, const Index64 startOffset, const unsigned arrayIndex, const Index stride = 1, const FilterT& filter = NullFilter(), const bool inCoreOnly = false); /// @brief Convert the group from a PointDataGrid /// /// @param group the group to be populated. /// @param tree the PointDataTree to be converted. /// @param pointOffsets a vector of cumulative point offsets for each leaf /// @param startOffset a value to shift all the point offsets by /// @param index the group index to be converted. /// @param filter an index filter /// @param inCoreOnly true if out-of-core leaf nodes are to be ignored /// template <typename Group, typename PointDataTreeT, typename FilterT = NullFilter> inline void convertPointDataGridGroup( Group& group, const PointDataTreeT& tree, const std::vector<Index64>& pointOffsets, const Index64 startOffset, const AttributeSet::Descriptor::GroupIndex index, const FilterT& filter = NullFilter(), const bool inCoreOnly = false); /// @ brief Given a container of world space positions and a target points per voxel, /// compute a uniform voxel size that would best represent the storage of the points in a grid. /// This voxel size is typically used for conversion of the points into a PointDataGrid. /// /// @param positions array of world space positions /// @param pointsPerVoxel the target number of points per voxel, must be positive and non-zero /// @param transform voxel size will be computed using this optional transform if provided /// @param decimalPlaces for readability, truncate voxel size to this number of decimals /// @param interrupter an optional interrupter /// /// @note if none or one point provided in positions, the default voxel size of 0.1 will be returned /// template<typename PositionWrapper, typename InterrupterT = openvdb::util::NullInterrupter> inline float computeVoxelSize( const PositionWrapper& positions, const uint32_t pointsPerVoxel, const math::Mat4d transform = math::Mat4d::identity(), const Index decimalPlaces = 5, InterrupterT* const interrupter = nullptr); //////////////////////////////////////// /// @brief Point-partitioner compatible STL vector attribute wrapper for convenience template<typename ValueType> class PointAttributeVector { public: using PosType = ValueType; using value_type= ValueType; PointAttributeVector(const std::vector<value_type>& data, const Index stride = 1) : mData(data) , mStride(stride) { } size_t size() const { return mData.size(); } void getPos(size_t n, ValueType& xyz) const { xyz = mData[n]; } void get(ValueType& value, size_t n) const { value = mData[n]; } void get(ValueType& value, size_t n, openvdb::Index m) const { value = mData[n * mStride + m]; } private: const std::vector<value_type>& mData; const Index mStride; }; // PointAttributeVector //////////////////////////////////////// namespace point_conversion_internal { // ConversionTraits to create the relevant Attribute Handles from a LeafNode template <typename T> struct ConversionTraits { using Handle = AttributeHandle<T, UnknownCodec>; using WriteHandle = AttributeWriteHandle<T, UnknownCodec>; static T zero() { return zeroVal<T>(); } template <typename LeafT> static typename Handle::Ptr handleFromLeaf(LeafT& leaf, Index index) { const AttributeArray& array = leaf.constAttributeArray(index); return Handle::create(array); } template <typename LeafT> static typename WriteHandle::Ptr writeHandleFromLeaf(LeafT& leaf, Index index) { AttributeArray& array = leaf.attributeArray(index); return WriteHandle::create(array); } }; // ConversionTraits template <> struct ConversionTraits<openvdb::Name> { using Handle = StringAttributeHandle; using WriteHandle = StringAttributeWriteHandle; static openvdb::Name zero() { return ""; } template <typename LeafT> static typename Handle::Ptr handleFromLeaf(LeafT& leaf, Index index) { const AttributeArray& array = leaf.constAttributeArray(index); const AttributeSet::Descriptor& descriptor = leaf.attributeSet().descriptor(); return Handle::create(array, descriptor.getMetadata()); } template <typename LeafT> static typename WriteHandle::Ptr writeHandleFromLeaf(LeafT& leaf, Index index) { AttributeArray& array = leaf.attributeArray(index); const AttributeSet::Descriptor& descriptor = leaf.attributeSet().descriptor(); return WriteHandle::create(array, descriptor.getMetadata()); } }; // ConversionTraits<openvdb::Name> template< typename PointDataTreeType, typename PointIndexTreeType, typename AttributeListType> struct PopulateAttributeOp { using LeafManagerT = typename tree::LeafManager<PointDataTreeType>; using LeafRangeT = typename LeafManagerT::LeafRange; using PointIndexLeafNode = typename PointIndexTreeType::LeafNodeType; using IndexArray = typename PointIndexLeafNode::IndexArray; using ValueType = typename AttributeListType::value_type; using HandleT = typename ConversionTraits<ValueType>::WriteHandle; PopulateAttributeOp(const PointIndexTreeType& pointIndexTree, const AttributeListType& data, const size_t index, const Index stride = 1) : mPointIndexTree(pointIndexTree) , mData(data) , mIndex(index) , mStride(stride) { } void operator()(const typename LeafManagerT::LeafRange& range) const { for (auto leaf = range.begin(); leaf; ++leaf) { // obtain the PointIndexLeafNode (using the origin of the current leaf) const PointIndexLeafNode* pointIndexLeaf = mPointIndexTree.probeConstLeaf(leaf->origin()); if (!pointIndexLeaf) continue; typename HandleT::Ptr attributeWriteHandle = ConversionTraits<ValueType>::writeHandleFromLeaf(*leaf, static_cast<Index>(mIndex)); Index64 index = 0; const IndexArray& indices = pointIndexLeaf->indices(); for (const Index64 leafIndex: indices) { ValueType value; for (Index i = 0; i < mStride; i++) { mData.get(value, leafIndex, i); attributeWriteHandle->set(static_cast<Index>(index), i, value); } index++; } // attempt to compact the array attributeWriteHandle->compact(); } } ////////// const PointIndexTreeType& mPointIndexTree; const AttributeListType& mData; const size_t mIndex; const Index mStride; }; template<typename PointDataTreeType, typename Attribute, typename FilterT> struct ConvertPointDataGridPositionOp { using LeafNode = typename PointDataTreeType::LeafNodeType; using ValueType = typename Attribute::ValueType; using HandleT = typename Attribute::Handle; using SourceHandleT = AttributeHandle<ValueType>; using LeafManagerT = typename tree::LeafManager<const PointDataTreeType>; using LeafRangeT = typename LeafManagerT::LeafRange; ConvertPointDataGridPositionOp( Attribute& attribute, const std::vector<Index64>& pointOffsets, const Index64 startOffset, const math::Transform& transform, const size_t index, const FilterT& filter, const bool inCoreOnly) : mAttribute(attribute) , mPointOffsets(pointOffsets) , mStartOffset(startOffset) , mTransform(transform) , mIndex(index) , mFilter(filter) , mInCoreOnly(inCoreOnly) { // only accept Vec3f as ValueType static_assert(VecTraits<ValueType>::Size == 3 && std::is_floating_point<typename ValueType::ValueType>::value, "ValueType is not Vec3f"); } template <typename IterT> void convert(IterT& iter, HandleT& targetHandle, SourceHandleT& sourceHandle, Index64& offset) const { for (; iter; ++iter) { const Vec3d xyz = iter.getCoord().asVec3d(); const Vec3d pos = sourceHandle.get(*iter); targetHandle.set(static_cast<Index>(offset++), /*stride=*/0, mTransform.indexToWorld(pos + xyz)); } } void operator()(const LeafRangeT& range) const { HandleT pHandle(mAttribute); for (auto leaf = range.begin(); leaf; ++leaf) { assert(leaf.pos() < mPointOffsets.size()); if (mInCoreOnly && leaf->buffer().isOutOfCore()) continue; Index64 offset = mStartOffset; if (leaf.pos() > 0) offset += mPointOffsets[leaf.pos() - 1]; auto handle = SourceHandleT::create(leaf->constAttributeArray(mIndex)); if (mFilter.state() == index::ALL) { auto iter = leaf->beginIndexOn(); convert(iter, pHandle, *handle, offset); } else { auto iter = leaf->beginIndexOn(mFilter); convert(iter, pHandle, *handle, offset); } } } ////////// Attribute& mAttribute; const std::vector<Index64>& mPointOffsets; const Index64 mStartOffset; const math::Transform& mTransform; const size_t mIndex; const FilterT& mFilter; const bool mInCoreOnly; }; // ConvertPointDataGridPositionOp template<typename PointDataTreeType, typename Attribute, typename FilterT> struct ConvertPointDataGridAttributeOp { using LeafNode = typename PointDataTreeType::LeafNodeType; using ValueType = typename Attribute::ValueType; using HandleT = typename Attribute::Handle; using SourceHandleT = typename ConversionTraits<ValueType>::Handle; using LeafManagerT = typename tree::LeafManager<const PointDataTreeType>; using LeafRangeT = typename LeafManagerT::LeafRange; ConvertPointDataGridAttributeOp(Attribute& attribute, const std::vector<Index64>& pointOffsets, const Index64 startOffset, const size_t index, const Index stride, const FilterT& filter, const bool inCoreOnly) : mAttribute(attribute) , mPointOffsets(pointOffsets) , mStartOffset(startOffset) , mIndex(index) , mStride(stride) , mFilter(filter) , mInCoreOnly(inCoreOnly) { } template <typename IterT> void convert(IterT& iter, HandleT& targetHandle, SourceHandleT& sourceHandle, Index64& offset) const { if (sourceHandle.isUniform()) { const ValueType uniformValue(sourceHandle.get(0)); for (; iter; ++iter) { for (Index i = 0; i < mStride; i++) { targetHandle.set(static_cast<Index>(offset), i, uniformValue); } offset++; } } else { for (; iter; ++iter) { for (Index i = 0; i < mStride; i++) { targetHandle.set(static_cast<Index>(offset), i, sourceHandle.get(*iter, /*stride=*/i)); } offset++; } } } void operator()(const LeafRangeT& range) const { HandleT pHandle(mAttribute); for (auto leaf = range.begin(); leaf; ++leaf) { assert(leaf.pos() < mPointOffsets.size()); if (mInCoreOnly && leaf->buffer().isOutOfCore()) continue; Index64 offset = mStartOffset; if (leaf.pos() > 0) offset += mPointOffsets[leaf.pos() - 1]; typename SourceHandleT::Ptr handle = ConversionTraits<ValueType>::handleFromLeaf( *leaf, static_cast<Index>(mIndex)); if (mFilter.state() == index::ALL) { auto iter = leaf->beginIndexOn(); convert(iter, pHandle, *handle, offset); } else { auto iter = leaf->beginIndexOn(mFilter); convert(iter, pHandle, *handle, offset); } } } ////////// Attribute& mAttribute; const std::vector<Index64>& mPointOffsets; const Index64 mStartOffset; const size_t mIndex; const Index mStride; const FilterT& mFilter; const bool mInCoreOnly; }; // ConvertPointDataGridAttributeOp template<typename PointDataTreeType, typename Group, typename FilterT> struct ConvertPointDataGridGroupOp { using LeafNode = typename PointDataTreeType::LeafNodeType; using GroupIndex = AttributeSet::Descriptor::GroupIndex; using LeafManagerT = typename tree::LeafManager<const PointDataTreeType>; using LeafRangeT = typename LeafManagerT::LeafRange; ConvertPointDataGridGroupOp(Group& group, const std::vector<Index64>& pointOffsets, const Index64 startOffset, const AttributeSet::Descriptor::GroupIndex index, const FilterT& filter, const bool inCoreOnly) : mGroup(group) , mPointOffsets(pointOffsets) , mStartOffset(startOffset) , mIndex(index) , mFilter(filter) , mInCoreOnly(inCoreOnly) { } template <typename IterT> void convert(IterT& iter, const GroupAttributeArray& groupArray, Index64& offset) const { const auto bitmask = static_cast<GroupType>(1 << mIndex.second); if (groupArray.isUniform()) { if (groupArray.get(0) & bitmask) { for (; iter; ++iter) { mGroup.setOffsetOn(static_cast<Index>(offset)); offset++; } } } else { for (; iter; ++iter) { if (groupArray.get(*iter) & bitmask) { mGroup.setOffsetOn(static_cast<Index>(offset)); } offset++; } } } void operator()(const LeafRangeT& range) const { for (auto leaf = range.begin(); leaf; ++leaf) { assert(leaf.pos() < mPointOffsets.size()); if (mInCoreOnly && leaf->buffer().isOutOfCore()) continue; Index64 offset = mStartOffset; if (leaf.pos() > 0) offset += mPointOffsets[leaf.pos() - 1]; const AttributeArray& array = leaf->constAttributeArray(mIndex.first); assert(isGroup(array)); const GroupAttributeArray& groupArray = GroupAttributeArray::cast(array); if (mFilter.state() == index::ALL) { auto iter = leaf->beginIndexOn(); convert(iter, groupArray, offset); } else { auto iter = leaf->beginIndexOn(mFilter); convert(iter, groupArray, offset); } } } ////////// Group& mGroup; const std::vector<Index64>& mPointOffsets; const Index64 mStartOffset; const GroupIndex mIndex; const FilterT& mFilter; const bool mInCoreOnly; }; // ConvertPointDataGridGroupOp template<typename PositionArrayT> struct CalculatePositionBounds { CalculatePositionBounds(const PositionArrayT& positions, const math::Mat4d& inverse) : mPositions(positions) , mInverseMat(inverse) , mMin(std::numeric_limits<Real>::max()) , mMax(-std::numeric_limits<Real>::max()) {} CalculatePositionBounds(const CalculatePositionBounds& other, tbb::split) : mPositions(other.mPositions) , mInverseMat(other.mInverseMat) , mMin(std::numeric_limits<Real>::max()) , mMax(-std::numeric_limits<Real>::max()) {} void operator()(const tbb::blocked_range<size_t>& range) { Vec3R pos; for (size_t n = range.begin(), N = range.end(); n != N; ++n) { mPositions.getPos(n, pos); pos = mInverseMat.transform(pos); mMin = math::minComponent(mMin, pos); mMax = math::maxComponent(mMax, pos); } } void join(const CalculatePositionBounds& other) { mMin = math::minComponent(mMin, other.mMin); mMax = math::maxComponent(mMax, other.mMax); } BBoxd getBoundingBox() const { return BBoxd(mMin, mMax); } private: const PositionArrayT& mPositions; const math::Mat4d& mInverseMat; Vec3R mMin, mMax; }; } // namespace point_conversion_internal //////////////////////////////////////// template<typename CompressionT, typename PointDataGridT, typename PositionArrayT, typename PointIndexGridT> inline typename PointDataGridT::Ptr createPointDataGrid(const PointIndexGridT& pointIndexGrid, const PositionArrayT& positions, const math::Transform& xform, const Metadata* positionDefaultValue) { using PointDataTreeT = typename PointDataGridT::TreeType; using LeafT = typename PointDataTree::LeafNodeType; using PointIndexLeafT = typename PointIndexGridT::TreeType::LeafNodeType; using PointIndexT = typename PointIndexLeafT::ValueType; using LeafManagerT = typename tree::LeafManager<PointDataTreeT>; using PositionAttributeT = TypedAttributeArray<Vec3f, CompressionT>; const NamePair positionType = PositionAttributeT::attributeType(); // construct the Tree using a topology copy of the PointIndexGrid const auto& pointIndexTree = pointIndexGrid.tree(); typename PointDataTreeT::Ptr treePtr(new PointDataTreeT(pointIndexTree)); // create attribute descriptor from position type auto descriptor = AttributeSet::Descriptor::create(positionType); // add default value for position if provided if (positionDefaultValue) descriptor->setDefaultValue("P", *positionDefaultValue); // retrieve position index const size_t positionIndex = descriptor->find("P"); assert(positionIndex != AttributeSet::INVALID_POS); // acquire registry lock to avoid locking when appending attributes in parallel AttributeArray::ScopedRegistryLock lock; // populate position attribute LeafManagerT leafManager(*treePtr); leafManager.foreach( [&](LeafT& leaf, size_t /*idx*/) { // obtain the PointIndexLeafNode (using the origin of the current leaf) const auto* pointIndexLeaf = pointIndexTree.probeConstLeaf(leaf.origin()); assert(pointIndexLeaf); // initialise the attribute storage Index pointCount(static_cast<Index>(pointIndexLeaf->indices().size())); leaf.initializeAttributes(descriptor, pointCount, &lock); // create write handle for position auto attributeWriteHandle = AttributeWriteHandle<Vec3f, CompressionT>::create( leaf.attributeArray(positionIndex)); Index index = 0; const PointIndexT *begin = static_cast<PointIndexT*>(nullptr), *end = static_cast<PointIndexT*>(nullptr); // iterator over every active voxel in the point index leaf for (auto iter = pointIndexLeaf->cbeginValueOn(); iter; ++iter) { // find the voxel center const Coord& ijk = iter.getCoord(); const Vec3d& positionCellCenter(ijk.asVec3d()); // obtain pointers for this voxel from begin to end in the indices array pointIndexLeaf->getIndices(ijk, begin, end); while (begin < end) { typename PositionArrayT::value_type positionWorldSpace; positions.getPos(*begin, positionWorldSpace); // compute the index-space position and then subtract the voxel center const Vec3d positionIndexSpace = xform.worldToIndex(positionWorldSpace); const Vec3f positionVoxelSpace(positionIndexSpace - positionCellCenter); attributeWriteHandle->set(index++, positionVoxelSpace); ++begin; } } }, /*threaded=*/true); auto grid = PointDataGridT::create(treePtr); grid->setTransform(xform.copy()); return grid; } //////////////////////////////////////// template <typename CompressionT, typename PointDataGridT, typename ValueT> inline typename PointDataGridT::Ptr createPointDataGrid(const std::vector<ValueT>& positions, const math::Transform& xform, const Metadata* positionDefaultValue) { const PointAttributeVector<ValueT> pointList(positions); tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(pointList, xform); return createPointDataGrid<CompressionT, PointDataGridT>( *pointIndexGrid, pointList, xform, positionDefaultValue); } //////////////////////////////////////// template <typename PointDataTreeT, typename PointIndexTreeT, typename PointArrayT> inline void populateAttribute(PointDataTreeT& tree, const PointIndexTreeT& pointIndexTree, const openvdb::Name& attributeName, const PointArrayT& data, const Index stride, const bool insertMetadata) { using point_conversion_internal::PopulateAttributeOp; using ValueType = typename PointArrayT::value_type; auto iter = tree.cbeginLeaf(); if (!iter) return; const size_t index = iter->attributeSet().find(attributeName); if (index == AttributeSet::INVALID_POS) { OPENVDB_THROW(KeyError, "Attribute not found to populate - " << attributeName << "."); } if (insertMetadata) { point_attribute_internal::MetadataStorage<PointDataTreeT, ValueType>::add(tree, data); } // populate attribute typename tree::LeafManager<PointDataTreeT> leafManager(tree); PopulateAttributeOp<PointDataTreeT, PointIndexTreeT, PointArrayT> populate(pointIndexTree, data, index, stride); tbb::parallel_for(leafManager.leafRange(), populate); } //////////////////////////////////////// template <typename PositionAttribute, typename PointDataGridT, typename FilterT> inline void convertPointDataGridPosition( PositionAttribute& positionAttribute, const PointDataGridT& grid, const std::vector<Index64>& pointOffsets, const Index64 startOffset, const FilterT& filter, const bool inCoreOnly) { using TreeType = typename PointDataGridT::TreeType; using LeafManagerT = typename tree::LeafManager<const TreeType>; using point_conversion_internal::ConvertPointDataGridPositionOp; const TreeType& tree = grid.tree(); auto iter = tree.cbeginLeaf(); if (!iter) return; const size_t positionIndex = iter->attributeSet().find("P"); positionAttribute.expand(); LeafManagerT leafManager(tree); ConvertPointDataGridPositionOp<TreeType, PositionAttribute, FilterT> convert( positionAttribute, pointOffsets, startOffset, grid.transform(), positionIndex, filter, inCoreOnly); tbb::parallel_for(leafManager.leafRange(), convert); positionAttribute.compact(); } //////////////////////////////////////// template <typename TypedAttribute, typename PointDataTreeT, typename FilterT> inline void convertPointDataGridAttribute( TypedAttribute& attribute, const PointDataTreeT& tree, const std::vector<Index64>& pointOffsets, const Index64 startOffset, const unsigned arrayIndex, const Index stride, const FilterT& filter, const bool inCoreOnly) { using LeafManagerT = typename tree::LeafManager<const PointDataTreeT>; using point_conversion_internal::ConvertPointDataGridAttributeOp; auto iter = tree.cbeginLeaf(); if (!iter) return; attribute.expand(); LeafManagerT leafManager(tree); ConvertPointDataGridAttributeOp<PointDataTreeT, TypedAttribute, FilterT> convert( attribute, pointOffsets, startOffset, arrayIndex, stride, filter, inCoreOnly); tbb::parallel_for(leafManager.leafRange(), convert); attribute.compact(); } //////////////////////////////////////// template <typename Group, typename PointDataTreeT, typename FilterT> inline void convertPointDataGridGroup( Group& group, const PointDataTreeT& tree, const std::vector<Index64>& pointOffsets, const Index64 startOffset, const AttributeSet::Descriptor::GroupIndex index, const FilterT& filter, const bool inCoreOnly) { using LeafManagerT= typename tree::LeafManager<const PointDataTreeT>; using point_conversion_internal::ConvertPointDataGridGroupOp; auto iter = tree.cbeginLeaf(); if (!iter) return; LeafManagerT leafManager(tree); ConvertPointDataGridGroupOp<PointDataTree, Group, FilterT> convert( group, pointOffsets, startOffset, index, filter, inCoreOnly); tbb::parallel_for(leafManager.leafRange(), convert); // must call this after modifying point groups in parallel group.finalize(); } template<typename PositionWrapper, typename InterrupterT> inline float computeVoxelSize( const PositionWrapper& positions, const uint32_t pointsPerVoxel, const math::Mat4d transform, const Index decimalPlaces, InterrupterT* const interrupter) { using namespace point_conversion_internal; struct Local { static bool voxelSizeFromVolume(const double volume, const size_t estimatedVoxelCount, float& voxelSize) { // dictated by the math::ScaleMap limit static const double minimumVoxelVolume(3e-15); static const double maximumVoxelVolume(std::numeric_limits<float>::max()); double voxelVolume = volume / static_cast<double>(estimatedVoxelCount); bool valid = true; if (voxelVolume < minimumVoxelVolume) { voxelVolume = minimumVoxelVolume; valid = false; } else if (voxelVolume > maximumVoxelVolume) { voxelVolume = maximumVoxelVolume; valid = false; } voxelSize = static_cast<float>(math::Pow(voxelVolume, 1.0/3.0)); return valid; } static float truncate(const float voxelSize, Index decPlaces) { float truncatedVoxelSize = voxelSize; // attempt to truncate from decPlaces -> 11 for (int i = decPlaces; i < 11; i++) { truncatedVoxelSize = static_cast<float>(math::Truncate(double(voxelSize), i)); if (truncatedVoxelSize != 0.0f) break; } return truncatedVoxelSize; } }; if (pointsPerVoxel == 0) OPENVDB_THROW(ValueError, "Points per voxel cannot be zero."); // constructed with the default voxel size as specified by openvdb interface values float voxelSize(0.1f); const size_t numPoints = positions.size(); // return the default voxel size if we have zero or only 1 point if (numPoints <= 1) return voxelSize; size_t targetVoxelCount(numPoints / size_t(pointsPerVoxel)); if (targetVoxelCount == 0) targetVoxelCount++; // calculate the world space, transform-oriented bounding box math::Mat4d inverseTransform = transform.inverse(); inverseTransform = math::unit(inverseTransform); tbb::blocked_range<size_t> range(0, numPoints); CalculatePositionBounds<PositionWrapper> calculateBounds(positions, inverseTransform); tbb::parallel_reduce(range, calculateBounds); BBoxd bbox = calculateBounds.getBoundingBox(); // return default size if points are coincident if (bbox.min() == bbox.max()) return voxelSize; double volume = bbox.volume(); // handle points that are collinear or coplanar by expanding the volume if (math::isApproxZero(volume)) { Vec3d extents = bbox.extents().sorted().reversed(); if (math::isApproxZero(extents[1])) { // colinear (maxExtent^3) volume = extents[0]*extents[0]*extents[0]; } else { // coplanar (maxExtent*nextMaxExtent^2) volume = extents[0]*extents[1]*extents[1]; } } double previousVolume = volume; if (!Local::voxelSizeFromVolume(volume, targetVoxelCount, voxelSize)) { OPENVDB_LOG_DEBUG("Out of range, clamping voxel size."); return voxelSize; } size_t previousVoxelCount(0); size_t voxelCount(1); if (interrupter) interrupter->start("Computing voxel size"); while (voxelCount > previousVoxelCount) { math::Transform::Ptr newTransform; if (!math::isIdentity(transform)) { // if using a custom transform, pre-scale by coefficients // which define the new voxel size math::Mat4d matrix(transform); matrix.preScale(Vec3d(voxelSize) / math::getScale(matrix)); newTransform = math::Transform::createLinearTransform(matrix); } else { newTransform = math::Transform::createLinearTransform(voxelSize); } // create a mask grid of the points from the calculated voxel size // this is the same function call as tools::createPointMask() which has // been duplicated to provide an interrupter MaskGrid::Ptr mask = createGrid<MaskGrid>(false); mask->setTransform(newTransform); tools::PointsToMask<MaskGrid, InterrupterT> pointMaskOp(*mask, interrupter); pointMaskOp.addPoints(positions); if (interrupter && util::wasInterrupted(interrupter)) break; previousVoxelCount = voxelCount; voxelCount = mask->activeVoxelCount(); volume = math::Pow3(voxelSize) * static_cast<float>(voxelCount); // stop if no change in the volume or the volume has increased if (volume >= previousVolume) break; previousVolume = volume; const float previousVoxelSize = voxelSize; // compute the new voxel size and if invalid return the previous value if (!Local::voxelSizeFromVolume(volume, targetVoxelCount, voxelSize)) { voxelSize = previousVoxelSize; break; } // halt convergence if the voxel size has decreased by less // than 10% in this iteration if (voxelSize / previousVoxelSize > 0.9f) break; } if (interrupter) interrupter->end(); // truncate the voxel size for readability and return the value return Local::truncate(voxelSize, decimalPlaces); } //////////////////////////////////////// } // namespace points } // namespace OPENVDB_VERSION_NAME } // namespace openvdb #endif // OPENVDB_POINTS_POINT_CONVERSION_HAS_BEEN_INCLUDED
38,932
C
36.256459
107
0.610192
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GT_GEOPrimCollectVDB.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) Side Effects Software Inc. * * Produced by: * Side Effects Software Inc * 123 Front Street West, Suite 1401 * Toronto, Ontario * Canada M5J 2M2 * 416-504-9876 * * NAME: GT_GEOPrimCollectVDB.h ( GT Library, C++) * * COMMENTS: */ #ifndef __GT_GEOPrimCollectVDB__ #define __GT_GEOPrimCollectVDB__ #include <GT/GT_GEOPrimCollect.h> #include <openvdb/Platform.h> namespace openvdb_houdini { class OPENVDB_HOUDINI_API GT_GEOPrimCollectVDB : public GT_GEOPrimCollect { public: GT_GEOPrimCollectVDB(const GA_PrimitiveTypeId &id); virtual ~GT_GEOPrimCollectVDB(); static void registerPrimitive(const GA_PrimitiveTypeId &id); virtual GT_GEOPrimCollectData * beginCollecting( const GT_GEODetailListHandle &, const GT_RefineParms *) const; virtual GT_PrimitiveHandle collect( const GT_GEODetailListHandle &geometry, const GEO_Primitive *const* prim_list, int nsegments, GT_GEOPrimCollectData *data) const; virtual GT_PrimitiveHandle endCollecting( const GT_GEODetailListHandle &geometry, GT_GEOPrimCollectData *data) const; private: GA_PrimitiveTypeId myId; }; } // namespace openvdb_houdini #endif // __GT_GEOPrimCollectVDB__
1,572
C
25.216666
73
0.603053
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Clip.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file SOP_OpenVDB_Clip.cc /// /// @author FX R&D OpenVDB team /// /// @brief Clip grids #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/GeometryUtil.h> // for drawFrustum(), frustumTransformFromCamera() #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/Clip.h> // for tools::clip() #include <openvdb/tools/LevelSetUtil.h> // for tools::sdfInteriorMask() #include <openvdb/tools/Mask.h> // for tools::interiorMask() #include <openvdb/tools/Morphology.h> // for tools::dilateActiveValues(), tools::erodeVoxels() #include <openvdb/points/PointDataGrid.h> #include <OBJ/OBJ_Camera.h> #include <cmath> // for std::abs(), std::round() #include <exception> #include <string> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Clip: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Clip(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Clip() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned input) const override { return (input == 1); } class Cache: public SOP_VDBCacheOptions { public: openvdb::math::Transform::Ptr frustum() const { return mFrustum; } protected: OP_ERROR cookVDBSop(OP_Context&) override; private: void getFrustum(OP_Context&); openvdb::math::Transform::Ptr mFrustum; }; // class Cache protected: void resolveObsoleteParms(PRM_ParmList*) override; bool updateParmsFlags() override; OP_ERROR cookMyGuide1(OP_Context&) override; }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of VDBs from the first input to be clipped.") .setDocumentation( "A subset of VDBs from the first input to be clipped" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "inside", "Keep Inside") .setDefault(PRMoneDefaults) .setTooltip( "If enabled, keep voxels that lie inside the clipping region.\n" "If disabled, keep voxels that lie outside the clipping region.") .setDocumentation( "If enabled, keep voxels that lie inside the clipping region," " otherwise keep voxels that lie outside the clipping region.")); parms.add(hutil::ParmFactory(PRM_STRING, "clipper", "Clip To") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "camera", "Camera", "geometry", "Geometry", "mask", "Mask VDB" }) .setDefault("geometry") .setTooltip("Specify how the clipping region should be defined.") .setDocumentation("\ How to define the clipping region\n\ \n\ Camera:\n\ Use a camera frustum as the clipping region.\n\ Geometry:\n\ Use the bounding box of geometry from the second input as the clipping region.\n\ Mask VDB:\n\ Use the active voxels of a VDB volume from the second input as a clipping mask.\n")); parms.add(hutil::ParmFactory(PRM_STRING, "mask", "Mask VDB") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("Specify a VDB whose active voxels are to be used as a clipping mask.") .setDocumentation( "A VDB from the second input whose active voxels are to be used as a clipping mask" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "camera", "Camera") .setTypeExtended(PRM_TYPE_DYNAMIC_PATH) .setSpareData(&PRM_SpareData::objCameraPath) .setTooltip("Specify the path to a reference camera") .setDocumentation( "The path to the camera whose frustum is to be used as a clipping region" " (e.g., `/obj/cam1`)")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "setnear", "") .setDefault(PRMzeroDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("If enabled, override the camera's near clipping plane.")); parms.add(hutil::ParmFactory(PRM_FLT_E, "near", "Near Clipping") .setDefault(0.001) .setTooltip("The position of the near clipping plane") .setDocumentation( "The position of the near clipping plane\n\n" "If enabled, this setting overrides the camera's clipping plane.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "setfar", "") .setDefault(PRMzeroDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("If enabled, override the camera's far clipping plane.")); parms.add(hutil::ParmFactory(PRM_FLT_E, "far", "Far Clipping") .setDefault(10000) .setTooltip("The position of the far clipping plane") .setDocumentation( "The position of the far clipping plane\n\n" "If enabled, this setting overrides the camera's clipping plane.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "setpadding", "") .setDefault(PRMzeroDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("If enabled, expand or shrink the clipping region.")); parms.add(hutil::ParmFactory(PRM_FLT_E, "padding", "Padding") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setTooltip("Padding in world units to be added to the clipping region") .setDocumentation( "Padding in world units to be added to the clipping region\n\n" "Negative values shrink the clipping region.\n\n" "Nonuniform padding is not supported when clipping to a VDB volume.\n" "The mask volume will be dilated or eroded uniformly" " by the _x_-axis padding value.")); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "usemask", "").setDefault(PRMzeroDefaults)); hvdb::OpenVDBOpFactory("VDB Clip", SOP_OpenVDB_Clip::factory, parms, *table) .addInput("VDBs") .addOptionalInput("Mask VDB or bounding geometry") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Clip::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Clip VDB volumes using a camera frustum, a bounding box, or another VDB as a mask.\"\"\"\n\ \n\ @overview\n\ \n\ This node clips VDB volumes, that is, it removes voxels that lie outside\n\ (or, optionally, inside) a given region by deactivating them and setting them\n\ to the background value.\n\ The clipping region may be one of the following:\n\ * the frustum of a camera\n\ * the bounding box of reference geometry\n\ * the active voxels of another VDB.\n\ \n\ When the clipping region is defined by a VDB, the operation\n\ is similar to [activity intersection|Node:sop/DW_OpenVDBCombine],\n\ except that clipped voxels are not only deactivated but also set\n\ to the background value.\n\ \n\ @related\n\ \n\ - [OpenVDB Combine|Node:sop/DW_OpenVDBCombine]\n\ - [OpenVDB Occlusion Mask|Node:sop/DW_OpenVDBOcclusionMask]\n\ - [Node:sop/vdbactivate]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Clip::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; auto* parm = obsoleteParms->getParmPtr("usemask"); if (parm && !parm->isFactoryDefault()) { // factory default was Off setString("clipper", CH_STRING_LITERAL, "mask", 0, 0.0); } // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } bool SOP_OpenVDB_Clip::updateParmsFlags() { bool changed = false; UT_String clipper; evalString(clipper, "clipper", 0, 0.0); const bool clipToCamera = (clipper == "camera"); changed |= enableParm("mask", clipper == "mask"); changed |= enableParm("camera", clipToCamera); changed |= enableParm("setnear", clipToCamera); changed |= enableParm("near", clipToCamera && evalInt("setnear", 0, 0.0)); changed |= enableParm("setfar", clipToCamera); changed |= enableParm("far", clipToCamera && evalInt("setfar", 0, 0.0)); changed |= enableParm("padding", 0 != evalInt("setpadding", 0, 0.0)); changed |= setVisibleState("mask", clipper == "mask"); changed |= setVisibleState("camera", clipToCamera); changed |= setVisibleState("setnear", clipToCamera); changed |= setVisibleState("near", clipToCamera); changed |= setVisibleState("setfar", clipToCamera); changed |= setVisibleState("far", clipToCamera); return changed; } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Clip::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Clip(net, name, op); } SOP_OpenVDB_Clip::SOP_OpenVDB_Clip(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { // Functor to convert a mask grid of arbitrary type to a BoolGrid // and to dilate or erode it struct DilatedMaskOp { DilatedMaskOp(int dilation_): dilation{dilation_} {} template<typename GridType> void operator()(const GridType& grid) { if (dilation == 0) return; maskGrid = openvdb::BoolGrid::create(); maskGrid->setTransform(grid.transform().copy()); maskGrid->topologyUnion(grid); if (dilation < 0) { // Densify the mask, since tools::erodeVoxels() ignores active tiles. /// @todo Remove this once tools::erodeActiveValues() is implemented. maskGrid->tree().voxelizeActiveTiles(); } UT_AutoInterrupt progress{ ((dilation > 0 ? "Dilating" : "Eroding") + std::string{" VDB mask"}).c_str()}; int numIterations = std::abs(dilation); const int kNumIterationsPerPass = 4; const int numPasses = numIterations / kNumIterationsPerPass; auto morphologyOp = [&](int iterations) { if (dilation > 0) { openvdb::tools::dilateActiveValues(maskGrid->tree(), iterations); } else { /// @todo Replace with tools::erodeActiveValues() once it is implemented. openvdb::tools::erodeVoxels(maskGrid->tree(), iterations); } }; // Since large dilations and erosions can be expensive, apply them // in multiple passes and check for interrupts. for (int pass = 0; pass < numPasses; ++pass, numIterations -= kNumIterationsPerPass) { const bool interrupt = progress.wasInterrupted( /*pct=*/int((100.0 * pass * kNumIterationsPerPass) / std::abs(dilation))); if (interrupt) { maskGrid.reset(); throw std::runtime_error{"interrupted"}; } morphologyOp(kNumIterationsPerPass); } if (numIterations > 0) { morphologyOp(numIterations); } } int dilation = 0; // positive = dilation, negative = erosion openvdb::BoolGrid::Ptr maskGrid; }; struct LevelSetMaskOp { template<typename GridType> void operator()(const GridType& grid) { outputGrid = openvdb::tools::sdfInteriorMask(grid); } hvdb::GridPtr outputGrid; }; struct BBoxClipOp { BBoxClipOp(const openvdb::BBoxd& bbox_, bool inside_ = true): bbox(bbox_), inside(inside_) {} template<typename GridType> void operator()(const GridType& grid) { outputGrid = openvdb::tools::clip(grid, bbox, inside); } openvdb::BBoxd bbox; hvdb::GridPtr outputGrid; bool inside = true; }; struct FrustumClipOp { FrustumClipOp(const openvdb::math::Transform::Ptr& frustum_, bool inside_ = true): frustum(frustum_), inside(inside_) {} template<typename GridType> void operator()(const GridType& grid) { openvdb::math::NonlinearFrustumMap::ConstPtr mapPtr; if (frustum) mapPtr = frustum->constMap<openvdb::math::NonlinearFrustumMap>(); if (mapPtr) { outputGrid = openvdb::tools::clip(grid, *mapPtr, inside); } } const openvdb::math::Transform::ConstPtr frustum; const bool inside = true; hvdb::GridPtr outputGrid; }; template<typename GridType> struct MaskClipDispatchOp { MaskClipDispatchOp(const GridType& grid_, bool inside_ = true): grid(&grid_), inside(inside_) {} template<typename MaskGridType> void operator()(const MaskGridType& mask) { outputGrid.reset(); if (grid) outputGrid = openvdb::tools::clip(*grid, mask, inside); } const GridType* grid; hvdb::GridPtr outputGrid; bool inside = true; }; struct MaskClipOp { MaskClipOp(hvdb::GridCPtr mask_, bool inside_ = true): mask(mask_), inside(inside_) {} template<typename GridType> void operator()(const GridType& grid) { outputGrid.reset(); if (mask) { // Dispatch on the mask grid type, now that the source grid type is resolved. MaskClipDispatchOp<GridType> op(grid, inside); if (mask->apply<hvdb::AllGridTypes>(op)) { outputGrid = op.outputGrid; } } } hvdb::GridCPtr mask; hvdb::GridPtr outputGrid; bool inside = true; }; } // unnamed namespace //////////////////////////////////////// /// Get the selected camera's frustum transform. void SOP_OpenVDB_Clip::Cache::getFrustum(OP_Context& context) { mFrustum.reset(); const auto time = context.getTime(); UT_String cameraPath; evalString(cameraPath, "camera", 0, time); if (!cameraPath.isstring()) { throw std::runtime_error{"no camera path was specified"}; } OBJ_Camera* camera = nullptr; if (auto* obj = cookparms()->getCwd()->findOBJNode(cameraPath)) { camera = obj->castToOBJCamera(); } OP_Node* self = cookparms()->getCwd(); if (!camera) { throw std::runtime_error{"camera \"" + cameraPath.toStdString() + "\" was not found"}; } self->addExtraInput(camera, OP_INTEREST_DATA); OBJ_CameraParms cameraParms; camera->getCameraParms(cameraParms, time); if (cameraParms.projection != OBJ_PROJ_PERSPECTIVE) { throw std::runtime_error{cameraPath.toStdString() + " is not a perspective camera"}; /// @todo support ortho and other cameras? } const bool pad = (0 != evalInt("setpadding", 0, time)); const auto padding = pad ? evalVec3f("padding", time) : openvdb::Vec3f{0}; const float nearPlane = (evalInt("setnear", 0, time) ? static_cast<float>(evalFloat("near", 0, time)) : static_cast<float>(camera->getNEAR(time))) - padding[2]; const float farPlane = (evalInt("setfar", 0, time) ? static_cast<float>(evalFloat("far", 0, time)) : static_cast<float>(camera->getFAR(time))) + padding[2]; mFrustum = hvdb::frustumTransformFromCamera(*self, context, *camera, /*offset=*/0.f, nearPlane, farPlane, /*voxelDepth=*/1.f, /*voxelCountX=*/100); if (!mFrustum || !mFrustum->constMap<openvdb::math::NonlinearFrustumMap>()) { throw std::runtime_error{ "failed to compute frustum bounds for camera " + cameraPath.toStdString()}; } if (pad) { const auto extents = mFrustum->constMap<openvdb::math::NonlinearFrustumMap>()->getBBox().extents(); mFrustum->preScale(openvdb::Vec3d{ (extents[0] + 2 * padding[0]) / extents[0], (extents[1] + 2 * padding[1]) / extents[1], 1.0}); } } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Clip::cookMyGuide1(OP_Context&) { myGuide1->clearAndDestroy(); openvdb::math::Transform::ConstPtr frustum; // Attempt to extract the frustum from our cache. if (auto* cache = dynamic_cast<SOP_OpenVDB_Clip::Cache*>(myNodeVerbCache)) { frustum = cache->frustum(); } if (frustum) { const UT_Vector3 color{0.9f, 0.0f, 0.0f}; hvdb::drawFrustum(*myGuide1, *frustum, &color, /*tickColor=*/nullptr, /*shaded=*/false, /*ticks=*/false); } return error(); } OP_ERROR SOP_OpenVDB_Clip::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); UT_AutoInterrupt progress{"Clipping VDBs"}; const GU_Detail* maskGeo = inputGeo(1); UT_String clipper; evalString(clipper, "clipper", 0, time); const bool useCamera = (clipper == "camera"), useMask = (clipper == "mask"), inside = evalInt("inside", 0, time), pad = evalInt("setpadding", 0, time); const auto padding = pad ? evalVec3f("padding", time) : openvdb::Vec3f{0}; mFrustum.reset(); openvdb::BBoxd clipBox; hvdb::GridCPtr maskGrid; if (useCamera) { getFrustum(context); } else if (maskGeo) { if (useMask) { const GA_PrimitiveGroup* maskGroup = parsePrimitiveGroups( evalStdString("mask", time).c_str(), GroupCreator{maskGeo}); hvdb::VdbPrimCIterator maskIt{maskGeo, maskGroup}; if (maskIt) { if (maskIt->getConstGrid().getGridClass() == openvdb::GRID_LEVEL_SET) { // If the mask grid is a level set, extract an interior mask from it. LevelSetMaskOp op; hvdb::GEOvdbApply<hvdb::NumericGridTypes>(**maskIt, op); maskGrid = op.outputGrid; } else { maskGrid = maskIt->getConstGridPtr(); } } if (!maskGrid) { addError(SOP_MESSAGE, "mask VDB not found"); return error(); } if (pad) { // If padding is enabled and nonzero, dilate or erode the mask grid. const auto paddingInVoxels = padding / maskGrid->voxelSize(); if (!openvdb::math::isApproxEqual(paddingInVoxels[0], paddingInVoxels[1]) || !openvdb::math::isApproxEqual(paddingInVoxels[1], paddingInVoxels[2])) { addWarning(SOP_MESSAGE, "nonuniform padding is not supported for mask clipping"); } if (const int dilation = int(std::round(paddingInVoxels[0]))) { DilatedMaskOp op{dilation}; maskGrid->apply<hvdb::AllGridTypes>(op); if (op.maskGrid) maskGrid = op.maskGrid; } } } else { UT_BoundingBox box; maskGeo->getBBox(&box); clipBox.min()[0] = box.xmin(); clipBox.min()[1] = box.ymin(); clipBox.min()[2] = box.zmin(); clipBox.max()[0] = box.xmax(); clipBox.max()[1] = box.ymax(); clipBox.max()[2] = box.zmax(); if (pad) { clipBox.min() -= padding; clipBox.max() += padding; } } } else { addError(SOP_MESSAGE, "Not enough sources specified."); return error(); } // Get the group of grids to process. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); int numLevelSets = 0; for (hvdb::VdbPrimIterator it{gdp, group}; it; ++it) { if (progress.wasInterrupted()) { throw std::runtime_error{"interrupted"}; } const auto& inGrid = it->getConstGrid(); hvdb::GridPtr outGrid; if (inGrid.getGridClass() == openvdb::GRID_LEVEL_SET) { ++numLevelSets; } progress.getInterrupt()->setAppTitle( ("Clipping VDB " + it.getPrimitiveIndexAndName().toStdString()).c_str()); if (maskGrid) { MaskClipOp op{maskGrid, inside}; if (hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, op)) { // all Houdini-supported volume grid types outGrid = op.outputGrid; } else if (inGrid.isType<openvdb::points::PointDataGrid>()) { addWarning(SOP_MESSAGE, "only bounding box clipping is currently supported for point data grids"); } } else if (useCamera) { FrustumClipOp op{mFrustum, inside}; if (hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, op)) { // all Houdini-supported volume grid types outGrid = op.outputGrid; } else if (inGrid.isType<openvdb::points::PointDataGrid>()) { addWarning(SOP_MESSAGE, "only bounding box clipping is currently supported for point data grids"); } } else { BBoxClipOp op{clipBox, inside}; if (hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, op)) { // all Houdini-supported volume grid types outGrid = op.outputGrid; } else if (inGrid.isType<openvdb::points::PointDataGrid>()) { if (inside) { outGrid = inGrid.deepCopyGrid(); outGrid->clipGrid(clipBox); } else { addWarning(SOP_MESSAGE, "only Keep Inside mode is currently supported for point data grids"); } } } // Replace the original VDB primitive with a new primitive that contains // the output grid and has the same attributes and group membership. hvdb::replaceVdbPrimitive(*gdp, outGrid, **it, true); } if (numLevelSets > 0) { if (numLevelSets == 1) { addWarning(SOP_MESSAGE, "a level set grid was clipped;" " the resulting grid might not be a valid level set"); } else { addWarning(SOP_MESSAGE, "some level sets were clipped;" " the resulting grids might not be valid level sets"); } } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
22,848
C++
33.256372
116
0.595895
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Morph_Level_Set.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Morph_Level_Set.cc /// /// @author Ken Museth /// /// @brief Level set morphing SOP #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/LevelSetMorph.h> #include <hboost/algorithm/string/join.hpp> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// // Utilities namespace { struct MorphingParms { MorphingParms() : mLSGroup(nullptr) , mAdvectSpatial(openvdb::math::UNKNOWN_BIAS) , mRenormSpatial(openvdb::math::UNKNOWN_BIAS) , mAdvectTemporal(openvdb::math::UNKNOWN_TIS) , mRenormTemporal(openvdb::math::UNKNOWN_TIS) , mNormCount(1) , mTimeStep(0.0) , mMinMask(0) , mMaxMask(1) , mInvertMask(false) { } const GA_PrimitiveGroup* mLSGroup; openvdb::FloatGrid::ConstPtr mTargetGrid; openvdb::FloatGrid::ConstPtr mMaskGrid; openvdb::math::BiasedGradientScheme mAdvectSpatial, mRenormSpatial; openvdb::math::TemporalIntegrationScheme mAdvectTemporal, mRenormTemporal; int mNormCount; float mTimeStep; float mMinMask, mMaxMask; bool mInvertMask; }; class MorphOp { public: MorphOp(MorphingParms& parms, hvdb::Interrupter& boss) : mParms(&parms) , mBoss(&boss) { } void operator()(openvdb::FloatGrid& grid) { if (mBoss->wasInterrupted()) return; openvdb::tools::LevelSetMorphing<openvdb::FloatGrid, hvdb::Interrupter> morph(grid, *(mParms->mTargetGrid), mBoss); if (mParms->mMaskGrid) { morph.setAlphaMask(*(mParms->mMaskGrid)); morph.setMaskRange(mParms->mMinMask, mParms->mMaxMask); morph.invertMask(mParms->mInvertMask); } morph.setSpatialScheme(mParms->mAdvectSpatial); morph.setTemporalScheme(mParms->mAdvectTemporal); morph.setTrackerSpatialScheme(mParms->mRenormSpatial); morph.setTrackerTemporalScheme(mParms->mRenormTemporal); morph.setNormCount(mParms->mNormCount); morph.advect(0, mParms->mTimeStep); } private: MorphingParms* mParms; hvdb::Interrupter* mBoss; }; } // namespace //////////////////////////////////////// // SOP Declaration class SOP_OpenVDB_Morph_Level_Set: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Morph_Level_Set(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Morph_Level_Set() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i ) const override { return (i > 0); } class Cache: public SOP_VDBCacheOptions { protected: OP_ERROR cookVDBSop(OP_Context&) override; OP_ERROR evalMorphingParms(OP_Context&, MorphingParms&); bool processGrids(MorphingParms&, hvdb::Interrupter&); }; protected: void resolveObsoleteParms(PRM_ParmList*) override; bool updateParmsFlags() override; }; //////////////////////////////////////// // Build UI and register this operator void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; using namespace openvdb::math; hutil::ParmList parms; // Level set grid parms.add(hutil::ParmFactory(PRM_STRING, "sourcegroup", "Source") .setChoiceList(&hutil::PrimGroupMenuInput1) .setDocumentation( "A subset of the input level set VDBs to be morphed" " (see [specifying volumes|/model/volumes#group])")); // Target grid parms.add(hutil::ParmFactory(PRM_STRING, "targetgroup", "Target") .setChoiceList(&hutil::PrimGroupMenuInput2) .setDocumentation( "The target level set VDB (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "mask", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable/disable the mask.") .setDocumentation(nullptr)); // Alpha grid parms.add(hutil::ParmFactory(PRM_STRING, "maskname", "Alpha Mask") .setChoiceList(&hutil::PrimGroupMenuInput3) .setTooltip( "An optional scalar VDB to be used for alpha masking" " (see [specifying volumes|/model/volumes#group])\n\n" "Voxel values are assumed to be between 0 and 1.")); parms.add(hutil::ParmFactory(PRM_HEADING, "morphingheading", "Morphing") .setDocumentation( "These parameters control how the SDF moves from the source to the target.")); // Advect: timestep parms.add(hutil::ParmFactory(PRM_FLT, "timestep", "Timestep") .setDefault(1, "1.0/$FPS") .setDocumentation( "The number of seconds of movement to apply to the input points\n\n" "The default is `1/$FPS` (one frame's worth of time).\n\n" "TIP:\n" " This parameter can be animated through time using the `$T`\n" " expression. To control how fast the morphing is done, multiply `$T`\n" " by a scale factor. For example, to animate it twice as fast, use\n" " the expression, `$T*2`.\n")); // Advect: spatial menu { std::vector<std::string> items; items.push_back(biasedGradientSchemeToString(FIRST_BIAS)); items.push_back(biasedGradientSchemeToMenuName(FIRST_BIAS)); items.push_back(biasedGradientSchemeToString(HJWENO5_BIAS)); items.push_back(biasedGradientSchemeToMenuName(HJWENO5_BIAS)); parms.add(hutil::ParmFactory(PRM_STRING, "advectspatial", "Spatial Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(biasedGradientSchemeToString(HJWENO5_BIAS)) .setTooltip("Set the spatial finite difference scheme.") .setDocumentation( "How accurately the gradients of the signed distance field\n" "are computed during advection\n\n" "The later choices are more accurate but take more time.")); } // Advect: temporal menu { std::vector<std::string> items; for (int i = 0; i < NUM_TEMPORAL_SCHEMES; ++i) { TemporalIntegrationScheme it = TemporalIntegrationScheme(i); items.push_back(temporalIntegrationSchemeToString(it)); // token items.push_back(temporalIntegrationSchemeToMenuName(it)); // label } parms.add(hutil::ParmFactory(PRM_STRING, "advecttemporal", "Temporal Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(temporalIntegrationSchemeToString(TVD_RK2)) .setTooltip("Set the temporal integration scheme.") .setDocumentation( "How accurately time is evolved within each advection step\n\n" "The later choices are more accurate but take more time.")); } parms.add(hutil::ParmFactory(PRM_HEADING, "renormheading", "Renormalization") .setDocumentation( "After morphing the signed distance field, it will often no longer" " contain valid distances. A number of renormalization passes can be" " performed to convert it back into a proper signed distance field.")); parms.add(hutil::ParmFactory(PRM_INT_J, "normsteps", "Steps") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip("The number of times to renormalize between each substep")); // Renorm: spatial menu { std::vector<std::string> items; items.push_back(biasedGradientSchemeToString(FIRST_BIAS)); items.push_back(biasedGradientSchemeToMenuName(FIRST_BIAS)); items.push_back(biasedGradientSchemeToString(HJWENO5_BIAS)); items.push_back(biasedGradientSchemeToMenuName(HJWENO5_BIAS)); parms.add(hutil::ParmFactory(PRM_STRING, "renormspatial", "Spatial Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(biasedGradientSchemeToString(HJWENO5_BIAS)) .setTooltip("Set the spatial finite difference scheme.") .setDocumentation( "How accurately the gradients of the signed distance field\n" "are computed during renormalization\n\n" "The later choices are more accurate but take more time.")); } // Renorm: temporal menu { std::vector<std::string> items; for (int i = 0; i < NUM_TEMPORAL_SCHEMES; ++i) { TemporalIntegrationScheme it = TemporalIntegrationScheme(i); items.push_back(temporalIntegrationSchemeToString(it)); // token items.push_back(temporalIntegrationSchemeToMenuName(it)); // label } parms.add(hutil::ParmFactory(PRM_STRING, "renormtemporal", "Temporal Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(temporalIntegrationSchemeToString(TVD_RK2)) .setTooltip("Set the temporal integration scheme.") .setDocumentation( "How accurately time is evolved during renormalization\n\n" "The later choices are more accurate but take more time.")); } parms.add(hutil::ParmFactory(PRM_HEADING, "maskheading", "Alpha Mask")); //Invert mask. parms.add(hutil::ParmFactory(PRM_TOGGLE, "invert", "Invert Alpha Mask") .setTooltip("Invert the optional mask so that alpha value 0 maps to 1 and 1 maps to 0.")); // Min mask range parms.add(hutil::ParmFactory(PRM_FLT_J, "minmask", "Min Mask Cutoff") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, 0.0, PRM_RANGE_UI, 1.0) .setTooltip("Threshold below which to clamp mask values to zero")); // Max mask range parms.add(hutil::ParmFactory(PRM_FLT_J, "maxmask", "Max Mask Cutoff") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, 0.0, PRM_RANGE_UI, 1.0) .setTooltip("Threshold above which to clamp mask values to one")); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_FLT, "beginTime", "Begin time")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT, "endTime", "Time step")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "lsGroup", "Source Level Set")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "targetGroup", "Target")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "maskGroup", "Alpha Mask")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "morphingHeading", "Morphing")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "advectSpatial", "Spatial Scheme") .setDefault(biasedGradientSchemeToString(HJWENO5_BIAS))); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "advectTemporal", "Temporal Scheme") .setDefault(temporalIntegrationSchemeToString(TVD_RK2))); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "renormHeading", "Renormalization")); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "normSteps", "Steps") .setDefault(PRMthreeDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "renormSpatial", "Spatial Scheme") .setDefault(biasedGradientSchemeToString(HJWENO5_BIAS))); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "renormTemporal", "Temporal Scheme") .setDefault(temporalIntegrationSchemeToString(TVD_RK2))); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "maskHeading", "Alpha Mask")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "minMask", "Min Mask Cutoff") .setDefault(PRMzeroDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "maxMask", "Max Mask Cutoff") .setDefault(PRMoneDefaults)); // Register this operator. hvdb::OpenVDBOpFactory("VDB Morph SDF", SOP_OpenVDB_Morph_Level_Set::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBMorphLevelSet") #endif .setObsoleteParms(obsoleteParms) .addInput("Source SDF VDBs to Morph") .addInput("Target SDF VDB") .addOptionalInput("Optional VDB Alpha Mask") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Morph_Level_Set::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Blend between source and target level set VDBs.\"\"\"\n\ \n\ @overview\n\ \n\ This node advects a source narrow-band signed distance field\n\ towards a target narrow-band signed distance field.\n\ \n\ @related\n\ - [OpenVDB Advect|Node:sop/DW_OpenVDBAdvect]\n\ - [OpenVDB Advect Points|Node:sop/DW_OpenVDBAdvectPoints]\n\ - [Node:sop/vdbmorphsdf]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Morph_Level_Set::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; resolveRenamedParm(*obsoleteParms, "advectSpatial", "advectspatial"); resolveRenamedParm(*obsoleteParms, "advectTemporal", "advecttemporal"); resolveRenamedParm(*obsoleteParms, "lsGroup", "sourcegroup"); resolveRenamedParm(*obsoleteParms, "maskGroup", "maskname"); resolveRenamedParm(*obsoleteParms, "maxMask", "maxmask"); resolveRenamedParm(*obsoleteParms, "minMask", "minmask"); resolveRenamedParm(*obsoleteParms, "normSteps", "normsteps"); resolveRenamedParm(*obsoleteParms, "renormSpatial", "renormspatial"); resolveRenamedParm(*obsoleteParms, "renormTemporal", "renormtemporal"); resolveRenamedParm(*obsoleteParms, "targetGroup", "targetgroup"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } // Enable/disable or show/hide parameters in the UI. bool SOP_OpenVDB_Morph_Level_Set::updateParmsFlags() { bool changed = false; const bool hasMask = (this->nInputs() == 3); changed |= enableParm("mask", hasMask); const bool useMask = hasMask && bool(evalInt("mask", 0, 0)); changed |= enableParm("invert", useMask); changed |= enableParm("minmask", useMask); changed |= enableParm("maxmask", useMask); changed |= enableParm("maskname", useMask); return changed; } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Morph_Level_Set::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Morph_Level_Set(net, name, op); } SOP_OpenVDB_Morph_Level_Set::SOP_OpenVDB_Morph_Level_Set(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Morph_Level_Set::Cache::cookVDBSop(OP_Context& context) { try { // Evaluate UI parameters MorphingParms parms; if (evalMorphingParms(context, parms) >= UT_ERROR_ABORT) return error(); hvdb::Interrupter boss("Morphing level set"); processGrids(parms, boss); if (boss.wasInterrupted()) addWarning(SOP_MESSAGE, "Process was interrupted"); boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Morph_Level_Set::Cache::evalMorphingParms( OP_Context& context, MorphingParms& parms) { const fpreal now = context.getTime(); parms.mLSGroup = matchGroup(*gdp, evalStdString("sourcegroup", now)); parms.mTimeStep = static_cast<float>(evalFloat("timestep", 0, now)); parms.mAdvectSpatial = openvdb::math::stringToBiasedGradientScheme(evalStdString("advectspatial", now)); if (parms.mAdvectSpatial == openvdb::math::UNKNOWN_BIAS) { addError(SOP_MESSAGE, "Morph: Unknown biased gradient"); return UT_ERROR_ABORT; } parms.mRenormSpatial = openvdb::math::stringToBiasedGradientScheme(evalStdString("renormspatial", now)); if (parms.mRenormSpatial == openvdb::math::UNKNOWN_BIAS) { addError(SOP_MESSAGE, "Renorm: Unknown biased gradient"); return UT_ERROR_ABORT; } parms.mAdvectTemporal = openvdb::math::stringToTemporalIntegrationScheme(evalStdString("advecttemporal", now)); if (parms.mAdvectTemporal == openvdb::math::UNKNOWN_TIS) { addError(SOP_MESSAGE, "Morph: Unknown temporal integration"); return UT_ERROR_ABORT; } parms.mRenormTemporal = openvdb::math::stringToTemporalIntegrationScheme(evalStdString("renormtemporal", now)); if (parms.mRenormTemporal == openvdb::math::UNKNOWN_TIS) { addError(SOP_MESSAGE, "Renorm: Unknown temporal integration"); return UT_ERROR_ABORT; } parms.mNormCount = static_cast<int>(evalInt("normsteps", 0, now)); const GU_Detail* targetGeo = inputGeo(1); if (!targetGeo) { addError(SOP_MESSAGE, "Missing target grid input"); return UT_ERROR_ABORT; } const GA_PrimitiveGroup* targetGroup = matchGroup(*targetGeo, evalStdString("targetgroup", now)); hvdb::VdbPrimCIterator it(targetGeo, targetGroup); if (it) { if (it->getStorageType() != UT_VDB_FLOAT) { addError(SOP_MESSAGE, "Unrecognized target grid type."); return UT_ERROR_ABORT; } parms.mTargetGrid = hvdb::Grid::constGrid<openvdb::FloatGrid>(it->getConstGridPtr()); } if (!parms.mTargetGrid) { addError(SOP_MESSAGE, "Missing target grid"); return UT_ERROR_ABORT; } const GU_Detail* maskGeo = evalInt("mask", 0, now) ? inputGeo(2) : nullptr; if (maskGeo) { const GA_PrimitiveGroup* maskGroup = matchGroup(*maskGeo, evalStdString("maskname", now)); hvdb::VdbPrimCIterator maskIt(maskGeo, maskGroup); if (maskIt) { if (maskIt->getStorageType() != UT_VDB_FLOAT) { addError(SOP_MESSAGE, "Unrecognized alpha mask grid type."); return UT_ERROR_ABORT; } parms.mMaskGrid = hvdb::Grid::constGrid<openvdb::FloatGrid>(maskIt->getConstGridPtr()); } if (!parms.mMaskGrid) { addError(SOP_MESSAGE, "Missing alpha mask grid"); return UT_ERROR_ABORT; } } parms.mMinMask = static_cast<float>(evalFloat("minmask", 0, now)); parms.mMaxMask = static_cast<float>(evalFloat("maxmask", 0, now)); parms.mInvertMask = evalInt("invert", 0, now); return error(); } //////////////////////////////////////// bool SOP_OpenVDB_Morph_Level_Set::Cache::processGrids( MorphingParms& parms, hvdb::Interrupter& boss) { MorphOp op(parms, boss); std::vector<std::string> skippedGrids, nonLevelSetGrids, narrowBands; for (hvdb::VdbPrimIterator it(gdp, parms.mLSGroup); it; ++it) { if (boss.wasInterrupted()) break; GU_PrimVDB* vdbPrim = *it; const openvdb::GridClass gridClass = vdbPrim->getGrid().getGridClass(); if (gridClass != openvdb::GRID_LEVEL_SET) { nonLevelSetGrids.push_back(it.getPrimitiveNameOrIndex().toStdString()); continue; } if (vdbPrim->getStorageType() == UT_VDB_FLOAT) { vdbPrim->makeGridUnique(); openvdb::FloatGrid& grid = UTvdbGridCast<openvdb::FloatGrid>(vdbPrim->getGrid()); if ( grid.background() < float(openvdb::LEVEL_SET_HALF_WIDTH * grid.voxelSize()[0]) ) { narrowBands.push_back(it.getPrimitiveNameOrIndex().toStdString()); } op(grid); } else { skippedGrids.push_back(it.getPrimitiveNameOrIndex().toStdString()); } } if (!skippedGrids.empty()) { std::string s = "The following non-floating-point grids were skipped: " + hboost::algorithm::join(skippedGrids, ", "); addWarning(SOP_MESSAGE, s.c_str()); } if (!nonLevelSetGrids.empty()) { std::string s = "The following non-level-set grids were skipped: " + hboost::algorithm::join(nonLevelSetGrids, ", "); addWarning(SOP_MESSAGE, s.c_str()); } if (!narrowBands.empty()) { std::string s = "The following grids have a narrow band width that is" " less than 3 voxel units: " + hboost::algorithm::join(narrowBands, ", "); addWarning(SOP_MESSAGE, s.c_str()); } return true; }
20,548
C++
34.551903
99
0.647557
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Topology_To_Level_Set.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Topology_To_Level_Set.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/GU_VDBPointTools.h> #include <openvdb/tools/TopologyToLevelSet.h> #include <openvdb/tools/LevelSetUtil.h> #include <openvdb/points/PointDataGrid.h> #include <UT/UT_Interrupt.h> #include <GA/GA_Handle.h> #include <GA/GA_Types.h> #include <GA/GA_Iterator.h> #include <GU/GU_Detail.h> #include <PRM/PRM_Parm.h> #include <stdexcept> #include <string> namespace cvdb = openvdb; namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Topology_To_Level_Set: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Topology_To_Level_Set(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Topology_To_Level_Set() override {} bool updateParmsFlags() override; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: void resolveObsoleteParms(PRM_ParmList*) override; }; //////////////////////////////////////// namespace { struct Converter { float bandWidthWorld; int bandWidthVoxels, closingWidth, dilation, smoothingSteps; bool worldSpaceUnits; std::string outputName, customName; Converter(GU_Detail& geo, hvdb::Interrupter& boss) : bandWidthWorld(0) , bandWidthVoxels(3) , closingWidth(1) , dilation(0) , smoothingSteps(0) , worldSpaceUnits(false) , outputName("keep") , customName("vdb") , mGeoPt(&geo) , mBossPt(&boss) { } template<typename GridType> void operator()(const GridType& grid) { int bandWidth = bandWidthVoxels; if (worldSpaceUnits) { bandWidth = int(openvdb::math::Round(bandWidthWorld / grid.transform().voxelSize()[0])); } openvdb::FloatGrid::Ptr sdfGrid = openvdb::tools::topologyToLevelSet( grid, bandWidth, closingWidth, dilation, smoothingSteps, mBossPt); std::string name = grid.getName(); if (outputName == "append") name += customName; else if (outputName == "replace") name = customName; hvdb::createVdbPrimitive(*mGeoPt, sdfGrid, name.c_str()); } private: GU_Detail * const mGeoPt; hvdb::Interrupter * const mBossPt; }; // struct Converter } // unnamed namespace void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenu) .setTooltip("Specify a subset of the input VDBs to be processed.") .setDocumentation( "A subset of the input VDB grids to be processed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "outputname", "Output Name") .setDefault("keep") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "keep", "Keep Original Name", "append", "Add Suffix", "replace", "Custom Name", }) .setTooltip("Output VDB naming scheme") .setDocumentation( "Give the output VDB the same name as the input VDB," " or add a suffix to the input name, or use a custom name.")); parms.add(hutil::ParmFactory(PRM_STRING, "customname", "Custom Name") .setTooltip("The suffix or custom name to be used")); /// Narrow-band width { parms.add(hutil::ParmFactory(PRM_TOGGLE, "worldspaceunits", "Use World Space for Band") .setDocumentation( "If enabled, specify the width of the narrow band in world units," " otherwise specify it in voxels. Voxel units work with all scales of geometry.")); parms.add(hutil::ParmFactory(PRM_INT_J, "bandwidth", "Half-Band in Voxels") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip( "Specify the half width of the narrow band in voxels." " Three voxels is optimal for many level set operations.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "bandwidthws", "Half-Band in World") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 10) .setTooltip("Specify the half width of the narrow band in world units.")); /// } parms.add(hutil::ParmFactory(PRM_INT_J, "dilation", "Voxel Dilation") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip("Expand the filled voxel region by the specified number of voxels.")); parms.add(hutil::ParmFactory(PRM_INT_J, "closingwidth", "Closing Width") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip( "First expand the filled voxel region, then shrink it by the specified " "number of voxels. This causes holes and valleys to be filled.")); parms.add(hutil::ParmFactory(PRM_INT_J, "smoothingsteps", "Smoothing Steps") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 10) .setTooltip("Number of smoothing iterations")); hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "outputName", "Output Name") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "keep", "Keep Original Name", "append", "Add Suffix", "replace", "Custom Name", })); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "customName", "Custom Name")); obsoleteParms.add( hutil::ParmFactory(PRM_TOGGLE, "worldSpaceUnits", "Use World Space for Band")); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "bandWidth", "Half-Band in Voxels") .setDefault(PRMthreeDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "bandWidthWS", "Half-Band in World") .setDefault(PRMoneDefaults)); // Register this operator. hvdb::OpenVDBOpFactory("VDB Topology to SDF", SOP_OpenVDB_Topology_To_Level_Set::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBTopologyToLevelSet") #endif .addInput("VDB Grids") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_Topology_To_Level_Set::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Create a level set VDB based on the active voxels of another VDB.\"\"\"\n\ \n\ @overview\n\ \n\ This node creates a narrow-band level set VDB that conforms to the active voxels\n\ of the input VDB. This forms a shell or wrapper that can be used\n\ to conservatively enclose the input volume.\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Topology_To_Level_Set::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Topology_To_Level_Set(net, name, op); } SOP_OpenVDB_Topology_To_Level_Set::SOP_OpenVDB_Topology_To_Level_Set(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } // Enable or disable parameters in the UI. bool SOP_OpenVDB_Topology_To_Level_Set::updateParmsFlags() { bool changed = false; const fpreal time = 0; const bool wsUnits = bool(evalInt("worldspaceunits", 0, time)); changed |= enableParm("bandwidth", !wsUnits); changed |= enableParm("bandwidthws", wsUnits); changed |= setVisibleState("bandwidth", !wsUnits); changed |= setVisibleState("bandwidthws", wsUnits); const auto outputName = evalStdString("outputname", time); changed |= enableParm("customname", (outputName != "keep")); return changed; } void SOP_OpenVDB_Topology_To_Level_Set::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; const fpreal time = 0.0; if (PRM_Parm* parm = obsoleteParms->getParmPtr("outputName")) { if (!parm->isFactoryDefault()) { std::string val{"keep"}; switch (obsoleteParms->evalInt("outputName", 0, time)) { case 0: val = "keep"; break; case 1: val = "append"; break; case 2: val = "replace"; break; } setString(val.c_str(), CH_STRING_LITERAL, "outputname", 0, time); } } resolveRenamedParm(*obsoleteParms, "customName", "customname"); resolveRenamedParm(*obsoleteParms, "worldSpaceUnits", "worldspaceunits"); resolveRenamedParm(*obsoleteParms, "bandWidth", "bandwidth"); resolveRenamedParm(*obsoleteParms, "bandWidthWS", "bandwidthws"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Topology_To_Level_Set::Cache::cookVDBSop( OP_Context& context) { try { const fpreal time = context.getTime(); const GU_Detail* inputGeoPt = inputGeo(0); if (inputGeoPt == nullptr) return error(); hvdb::Interrupter boss; // Get UI settings Converter converter(*gdp, boss); converter.worldSpaceUnits = evalInt("worldspaceunits", 0, time) != 0; converter.bandWidthWorld = float(evalFloat("bandwidthws", 0, time)); converter.bandWidthVoxels = static_cast<int>(evalInt("bandwidth", 0, time)); converter.closingWidth = static_cast<int>(evalInt("closingwidth", 0, time)); converter.dilation = static_cast<int>(evalInt("dilation", 0, time)); converter.smoothingSteps = static_cast<int>(evalInt("smoothingsteps", 0, time)); converter.outputName = evalStdString("outputname", time); converter.customName = evalStdString("customname", time); // Process VDB primitives const GA_PrimitiveGroup* group = matchGroup(*inputGeoPt, evalStdString("group", time)); hvdb::VdbPrimCIterator vdbIt(inputGeoPt, group); if (!vdbIt) { addWarning(SOP_MESSAGE, "No VDB grids to process."); return error(); } for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; hvdb::GEOvdbApply<hvdb::AllGridTypes::Append<cvdb::MaskGrid>>(**vdbIt, converter); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
10,835
C++
31.346269
100
0.643286
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_VDBVerbUtils.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) * Side Effects Software Inc. All rights reserved. */ #ifndef OPENVDB_HOUDINI_SOP_VDBVERBUTILS_HAS_BEEN_INCLUDED #define OPENVDB_HOUDINI_SOP_VDBVERBUTILS_HAS_BEEN_INCLUDED #include <GOP/GOP_Manager.h> #include <SOP/SOP_NodeParmsOptions.h> // for SOP_NodeCacheOptions #include <openvdb/Types.h> #include <string> //////////////////////////////////////// /// @brief SOP_NodeCacheOptions subclass that adds methods specific to SOP_NodeVDB class SOP_VDBCacheOptions: public SOP_NodeCacheOptions { public: SOP_VDBCacheOptions() {} ~SOP_VDBCacheOptions() override {} openvdb::Vec3f evalVec3f(const char* name, fpreal time) const { return openvdb::Vec3f(static_cast<float>(evalFloat(name, 0, time)), static_cast<float>(evalFloat(name, 1, time)), static_cast<float>(evalFloat(name, 2, time))); } openvdb::Vec3R evalVec3R(const char* name, fpreal time) const { return openvdb::Vec3R(evalFloat(name, 0, time), evalFloat(name, 1, time), evalFloat(name, 2, time)); } openvdb::Vec3i evalVec3i(const char* name, fpreal time) const { using IntT = openvdb::Vec3i::ValueType; return openvdb::Vec3i(static_cast<IntT>(evalInt(name, 0, time)), static_cast<IntT>(evalInt(name, 1, time)), static_cast<IntT>(evalInt(name, 2, time))); } openvdb::Vec2R evalVec2R(const char* name, fpreal time) const { return openvdb::Vec2R(evalFloat(name, 0, time), evalFloat(name, 1, time)); } openvdb::Vec2i evalVec2i(const char* name, fpreal time) const { using IntT = openvdb::Vec2i::ValueType; return openvdb::Vec2i(static_cast<IntT>(evalInt(name, 0, time)), static_cast<IntT>(evalInt(name, 1, time))); } std::string evalStdString(const char* name, fpreal time, int index = 0) const { UT_String str; evalString(str, name, index, time); return str.toStdString(); } const GA_PrimitiveGroup *matchGroup(const GU_Detail &gdp, const UT_StringRef &groupname) { const GA_PrimitiveGroup *group = 0; if (groupname.isstring()) { bool success = false; group = gop.parseOrderedPrimitiveDetached(groupname, &gdp, false, success); if (!success) { UT_StringHolder error; error = "Invalid group ("; error += groupname; error += ")"; throw std::runtime_error(error.c_str()); } } return group; } const GA_PrimitiveGroup * parsePrimitiveGroups(const UT_StringRef &maskStr, const GroupCreator &maskGeo) { return gop.parsePrimitiveGroups(maskStr, maskGeo); } GA_PrimitiveGroup * parsePrimitiveGroupsCopy(const UT_StringRef &maskStr, const GroupCreator &maskGeo) { return gop.parsePrimitiveGroupsCopy(maskStr, maskGeo); } const GA_PointGroup * parsePointGroups(const UT_StringRef &maskStr, const GroupCreator &maskGeo) { return gop.parsePointGroups(maskStr, maskGeo); } const GA_PointGroup * parsePointGroups(const UT_StringRef &maskStr, const GU_Detail *gdp) { return parsePointGroups(maskStr, GroupCreator(gdp)); } protected: OP_ERROR cook(OP_Context &context) override final { auto result = cookMySop(context); gop.destroyAdhocGroups(); return result; } virtual OP_ERROR cookVDBSop(OP_Context&) = 0; OP_ERROR cookMySop(OP_Context& context) { return cookVDBSop(context); } // Handles ad-hoc group creation. GOP_Manager gop; }; // class SOP_VDBCacheOptions #endif // OPENVDB_HOUDINI_SOP_VDBVERBUTILS_HAS_BEEN_INCLUDED
4,055
C
31.448
92
0.602219
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Write.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Write.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/GEO_PrimVDB.h> #include <openvdb_houdini/GU_PrimVDB.h> #include <PRM/PRM_Parm.h> #include <UT/UT_Interrupt.h> #include <set> #include <sstream> #include <stdexcept> #include <string> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Write: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Write(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Write() override {} void getDescriptiveParmName(UT_String& s) const override { s = "file_name"; } static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); static int writeNowCallback(void* data, int index, float now, const PRM_Template*); protected: void resolveObsoleteParms(PRM_ParmList*) override; OP_ERROR cookVDBSop(OP_Context&) override; void writeOnNextCook(bool write = true) { mWriteOnNextCook = write; } using StringSet = std::set<std::string>; void reportFloatPrecisionConflicts(const StringSet& conflicts); private: void doCook(const fpreal time = 0); bool mWriteOnNextCook; }; //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms, obsoleteParms; // File name parms.add(hutil::ParmFactory(PRM_FILE, "file_name", "File Name") .setDefault(0, "./filename.vdb") .setTooltip("Path name for the output VDB file")); // Group parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Write only a subset of the input grids.") .setDocumentation( "Write only a subset of the input VDBs" " (see [specifying volumes|/model/volumes#group]).")); // Compression { char const * const items[] = { "none", "None", "zip", "Zip", "blosc", "Blosc", nullptr }; #ifdef OPENVDB_USE_BLOSC parms.add(hutil::ParmFactory(PRM_ORD, "compression", "Compression") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault("blosc") .setTooltip( "Zip is slow but compresses very well. Blosc is fast and compresses well,\n" "but files written with Blosc cannot be read by older versions of Houdini.\n") .setDocumentation( "[Blosc|http://www.blosc.org] is fast and compresses well." " Zip is slow but compresses very well." " For most cases Blosc is the recommended compression type.")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "compress_zip", "Zip Compression")); #else #ifdef OPENVDB_USE_ZLIB parms.add(hutil::ParmFactory(PRM_TOGGLE, "compress_zip", "Zip Compression") .setDefault(true) .setTooltip( "Apply Zip \"deflate\" compression to non-SDF and non-fog grids.\n" "(Zip compression can be slow for large volumes.)")); obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "compression", "Compression") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items)); #else // no compression available obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "compress_zip", "Zip Compression")); obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "compression", "Compression") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items)); #endif #endif } // Write mode (manual/auto) parms.add(hutil::ParmFactory(PRM_ORD, "writeMode", "Write Mode") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "manual", "Manual", "auto", "Automatic" }) .setTooltip( "In Manual mode, click the Write Now button\n" "to write the output file.\n" "In Automatic mode, the file is written\n" "each time this node cooks.")); // "Write Now" button parms.add(hutil::ParmFactory(PRM_CALLBACK, "write", "Write Now") .setCallbackFunc(&SOP_OpenVDB_Write::writeNowCallback) .setTooltip("Click to write the output file.")); { // Float precision parms.add(hutil::ParmFactory(PRM_HEADING, "float_header", "Float Precision")); parms.add(hutil::ParmFactory(PRM_STRING, "float_16_group", "Write 16-Bit") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip( "For grids that belong to the group(s) listed here,\n" "write floating-point scalar or vector voxel values\n" "using 16-bit half floats.\n" "If no groups are listed, all grids will be written\n" "using their existing precision settings.")); parms.add(hutil::ParmFactory(PRM_STRING, "float_full_group", "Write Full-Precision") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip( "For grids that belong to the group(s) listed here,\n" "write floating-point scalar or vector voxel values\n" "using full-precision floats or doubles.\n" "If no groups are listed, all grids will be written\n" "using their existing precision settings.")); } // Register this operator. hvdb::OpenVDBOpFactory("VDB Write", SOP_OpenVDB_Write::factory, parms, *table) .setNativeName("") .setObsoleteParms(obsoleteParms) .addInput("VDBs to be written to disk") .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Write a `.vdb` file to disk.\"\"\"\n\ \n\ @overview\n\ \n\ This node writes VDB volumes to a `.vdb` file.\n\ It is usually preferable to use Houdini's native [File|Node:sop/file] node,\n\ but this node allows one to specify the file compression scheme\n\ and to control floating-point precision for individual volumes,\n\ options that are not available on the native node.\n\ \n\ @related\n\ - [OpenVDB Read|Node:sop/DW_OpenVDBRead]\n\ - [Node:sop/file]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Write::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; #ifdef OPENVDB_USE_BLOSC PRM_Parm* parm = obsoleteParms->getParmPtr("compress_zip"); if (parm && !parm->isFactoryDefault()) { const bool zip = obsoleteParms->evalInt("compress_zip", 0, /*time=*/0.0); const UT_String compression(zip ? "zip" : "none"); setString(compression, CH_STRING_LITERAL, "compression", 0, 0.0); } #else #ifdef OPENVDB_USE_ZLIB if (nullptr != obsoleteParms->getParmPtr("compression") && !obsoleteParms->getParmPtr("compression")->isFactoryDefault()) { UT_String compression; obsoleteParms->evalString(compression, "compression", 0, /*time=*/0.0); setInt("compress_zip", 0, 0.0, (compression == "zip" ? 1 : 0)); } #endif #endif // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } OP_Node* SOP_OpenVDB_Write::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Write(net, name, op); } SOP_OpenVDB_Write::SOP_OpenVDB_Write(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op), mWriteOnNextCook(false) { } //////////////////////////////////////// int SOP_OpenVDB_Write::writeNowCallback( void *data, int /*index*/, float /*now*/, const PRM_Template*) { if (SOP_OpenVDB_Write* self = static_cast<SOP_OpenVDB_Write*>(data)) { self->writeOnNextCook(); self->forceRecook(); return 1; } return 0; } //////////////////////////////////////// /// If any grids belong to both the "Write 16-Bit" and "Write Full-Precision" /// groups, display a warning. void SOP_OpenVDB_Write::reportFloatPrecisionConflicts(const StringSet& conflicts) { if (conflicts.empty()) return; std::ostringstream ostr; if (conflicts.size() == 1) { ostr << "For grid \"" << *conflicts.begin() << "\""; } else { StringSet::const_iterator i = conflicts.begin(), e = conflicts.end(); ostr << "For grids \"" << *i << "\""; // Join grid names into a string of the form "grid1, grid2 and grid3". size_t count = conflicts.size(), n = 1; for (++i; i != e; ++i, ++n) { if (n + 1 < count) ostr << ", "; else ostr << " and "; ostr << "\"" << *i << "\""; } } ostr << ", specify either 16-bit output or full-precision output" << " or neither, but not both."; // Word wrap the message at 60 columns and indent the first line. const std::string prefix(20, '#'); UT_String word_wrapped(prefix + ostr.str()); word_wrapped.format(60/*cols*/); word_wrapped.replacePrefix(prefix.c_str(), ""); addWarning(SOP_MESSAGE, word_wrapped); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Write::cookVDBSop(OP_Context& context) { try { hutil::ScopedInputLock lock(*this, context); const fpreal t = context.getTime(); if (mWriteOnNextCook || 1 == evalInt("writeMode", 0, t)) { duplicateSource(0, context); doCook(t); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } void SOP_OpenVDB_Write::doCook(const fpreal time) { // Get the filename of the output file. const std::string filename = evalStdString("file_name", time); if (filename.empty()) { addWarning(SOP_MESSAGE, "no name given for the output file"); return; } // Get grid groups. UT_String groupStr, halfGroupStr, fullGroupStr; evalString(groupStr, "group", 0, time); evalString(halfGroupStr, "float_16_group", 0, time); evalString(fullGroupStr, "float_full_group", 0, time); const GA_PrimitiveGroup *group = matchGroup(*gdp, groupStr.toStdString()), *halfGroup = nullptr, *fullGroup = nullptr; if (halfGroupStr.isstring()) { // Normally, an empty group pattern matches all primitives, but // for the float precision filters, we want it to match nothing. halfGroup = matchGroup(*gdp, halfGroupStr.toStdString()); } if (fullGroupStr.isstring()) { fullGroup = matchGroup(*gdp, fullGroupStr.toStdString()); } // Get compression options. #ifdef OPENVDB_USE_BLOSC UT_String compression; evalString(compression, "compression", 0, time); #else #ifdef OPENVDB_USE_ZLIB const bool zip = evalInt("compress_zip", 0, time); #endif #endif UT_AutoInterrupt progress(("Writing " + filename).c_str()); // Set of names of grids that the user selected for both 16-bit // and full-precision conversion StringSet conflicts; // Collect pointers to grids from VDB primitives found in the geometry. openvdb::GridPtrSet outGrids; for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (progress.wasInterrupted()) { throw std::runtime_error("Interrupted"); } const GU_PrimVDB* vdb = *it; // Create a new grid that shares the primitive's tree and transform // and then transfer primitive attributes to the new grid as metadata. hvdb::GridPtr grid = openvdb::ConstPtrCast<hvdb::Grid>(vdb->getGrid().copyGrid()); GU_PrimVDB::createMetadataFromGridAttrs(*grid, *vdb, *gdp); grid->removeMeta("is_vdb"); // Retrieve the grid's name from the primitive attribute. const std::string gridName = it.getPrimitiveName().toStdString(); // Check if the user has overridden this grid's saveFloatAsHalf setting. if (halfGroup && halfGroup->contains(vdb)) { if (fullGroup && fullGroup->contains(vdb)) { // This grid belongs to both the 16-bit and full-precision groups. conflicts.insert(gridName); } else { grid->setSaveFloatAsHalf(true); } } else if (fullGroup && fullGroup->contains(vdb)) { if (halfGroup && halfGroup->contains(vdb)) { // This grid belongs to both the 16-bit and full-precision groups. conflicts.insert(gridName); } else { grid->setSaveFloatAsHalf(false); } } else { // Preserve this grid's existing saveFloatAsHalf setting. } outGrids.insert(grid); } if (outGrids.empty()) { addWarning(SOP_MESSAGE, ("No grids were written to " + filename).c_str()); } reportFloatPrecisionConflicts(conflicts); // Add file-level metadata. openvdb::MetaMap outMeta; outMeta.insertMeta("creator", openvdb::StringMetadata("Houdini/SOP_OpenVDB_Write")); // Create a VDB file object. openvdb::io::File file(filename); #ifdef OPENVDB_USE_BLOSC uint32_t compressionFlags = file.compression(); if (compression == "none") { compressionFlags &= ~(openvdb::io::COMPRESS_ZIP | openvdb::io::COMPRESS_BLOSC); } else if (compression == "blosc") { compressionFlags &= ~openvdb::io::COMPRESS_ZIP; compressionFlags |= openvdb::io::COMPRESS_BLOSC; } else if (compression == "zip") { compressionFlags |= openvdb::io::COMPRESS_ZIP; compressionFlags &= ~openvdb::io::COMPRESS_BLOSC; } #else uint32_t compressionFlags = openvdb::io::COMPRESS_ACTIVE_MASK; #ifdef OPENVDB_USE_ZLIB if (zip) compressionFlags |= openvdb::io::COMPRESS_ZIP; #endif #endif // OPENVDB_USE_BLOSC file.setCompression(compressionFlags); file.write(outGrids, outMeta); file.close(); mWriteOnNextCook = false; }
14,101
C++
32.103286
94
0.62024
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GEO_VDBTranslator.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) * Side Effects Software Inc. All rights reserved. */ #include "GU_PrimVDB.h" #include "Utils.h" #include <UT/UT_EnvControl.h> #include <UT/UT_Error.h> #include <UT/UT_ErrorManager.h> #include <UT/UT_IOTable.h> #include <UT/UT_IStream.h> #include <UT/UT_Version.h> #include <FS/FS_IStreamDevice.h> #include <GA/GA_Stat.h> #include <GU/GU_Detail.h> #include <SOP/SOP_Node.h> #include <GEO/GEO_IOTranslator.h> #include <openvdb/io/Stream.h> #include <openvdb/io/File.h> #include <openvdb/Metadata.h> #include <stdio.h> #include <iostream> using namespace openvdb_houdini; using std::cerr; namespace { class GEO_VDBTranslator : public GEO_IOTranslator { public: GEO_VDBTranslator() {} ~GEO_VDBTranslator() override {} GEO_IOTranslator *duplicate() const override; const char *formatName() const override; int checkExtension(const char *name) override; void getFileExtensions( UT_StringArray &extensions) const override; int checkMagicNumber(unsigned magic) override; bool fileStat( const char *filename, GA_Stat &stat, uint level) override; GA_Detail::IOStatus fileLoad( GEO_Detail *gdp, UT_IStream &is, bool ate_magic) override; GA_Detail::IOStatus fileSave( const GEO_Detail *gdp, std::ostream &os) override; GA_Detail::IOStatus fileSaveToFile( const GEO_Detail *gdp, const char *fname) override; }; GEO_IOTranslator * GEO_VDBTranslator::duplicate() const { return new GEO_VDBTranslator(); } const char * GEO_VDBTranslator::formatName() const { return "VDB Format"; } int GEO_VDBTranslator::checkExtension(const char *name) { return UT_String(name).matchFileExtension(".vdb"); } void GEO_VDBTranslator::getFileExtensions(UT_StringArray &extensions) const { extensions.clear(); extensions.append(".vdb"); } int GEO_VDBTranslator::checkMagicNumber(unsigned /*magic*/) { return 0; } bool GEO_VDBTranslator::fileStat(const char *filename, GA_Stat &stat, uint /*level*/) { stat.clear(); try { openvdb::io::File file(filename); file.open(/*delayLoad*/false); int nprim = 0; UT_BoundingBox bbox; bbox.makeInvalid(); // Loop over all grids in the file. for (openvdb::io::File::NameIterator nameIter = file.beginName(); nameIter != file.endName(); ++nameIter) { const std::string& gridName = nameIter.gridName(); // Read the grid metadata. auto grid = file.readGridMetadata(gridName); auto stats = grid->getStatsMetadata(); openvdb::Vec3IMetadata::Ptr meta_minbbox, meta_maxbbox; UT_BoundingBox voxelbox; voxelbox.initBounds(); meta_minbbox = stats->getMetadata<openvdb::Vec3IMetadata>("file_bbox_min"); meta_maxbbox = stats->getMetadata<openvdb::Vec3IMetadata>("file_bbox_max"); if (meta_minbbox && meta_maxbbox) { UT_Vector3 minv, maxv; minv = UTvdbConvert(meta_minbbox->value()); maxv = UTvdbConvert(meta_maxbbox->value()); voxelbox.enlargeBounds(minv); voxelbox.enlargeBounds(maxv); // We need to convert from corner-sampled (as in VDB) // to center-sampled (as our BBOX elsewhere reports) voxelbox.expandBounds(0.5, 0.5, 0.5); // Transform UT_Vector3 voxelpts[8]; UT_BoundingBox worldbox; worldbox.initBounds(); voxelbox.getBBoxPoints(voxelpts); for (int i = 0; i < 8; i++) { worldbox.enlargeBounds( UTvdbConvert( grid->indexToWorld(UTvdbConvert(voxelpts[i])) ) ); } bbox.enlargeBounds(worldbox); } if (voxelbox.isValid()) { stat.appendVolume(nprim, gridName.c_str(), static_cast<int>(voxelbox.size().x()), static_cast<int>(voxelbox.size().y()), static_cast<int>(voxelbox.size().z())); } else { stat.appendVolume(nprim, gridName.c_str(), 0, 0, 0); } nprim++; } // Straightforward correspondence: stat.setPointCount(nprim); stat.setVertexCount(nprim); stat.setPrimitiveCount(nprim); stat.setBounds(bbox); file.close(); } catch (std::exception &e) { cerr << "Stat failure: " << e.what() << "\n"; return false; } return true; } GA_Detail::IOStatus GEO_VDBTranslator::fileLoad(GEO_Detail *geogdp, UT_IStream &is, bool /*ate_magic*/) { UT_WorkBuffer buf; GU_Detail *gdp = static_cast<GU_Detail*>(geogdp); bool ok = true; // Create a std::stream proxy. FS_IStreamDevice reader(&is); auto streambuf = new FS_IStreamDeviceBuffer(reader); auto stdstream = new std::istream(streambuf); try { // Create and open a VDB file, but don't read any grids yet. openvdb::io::Stream file(*stdstream, /*delayLoad*/false); // Read the file-level metadata into global attributes. openvdb::MetaMap::Ptr fileMetadata = file.getMetadata(); if (fileMetadata) { GU_PrimVDB::createAttrsFromMetadata( GA_ATTRIB_GLOBAL, GA_Offset(0), *fileMetadata, *geogdp); } // Loop over all grids in the file. auto && allgrids = file.getGrids(); for (auto && grid : *allgrids) { // Add a new VDB primitive for this grid. // Note: this clears the grid's metadata. createVdbPrimitive(*gdp, grid); } } catch (std::exception &e) { // Add a warning here instead of an error or else the File SOP's // Missing Frame parameter won't be able to suppress cook errors. UTaddCommonWarning(UT_ERROR_JUST_STRING, e.what()); ok = false; } delete stdstream; delete streambuf; return ok; } template <typename FileT, typename OutputT> bool fileSaveVDB(const GEO_Detail *geogdp, OutputT os) { const GU_Detail *gdp = static_cast<const GU_Detail*>(geogdp); if (!gdp) return false; try { // Populate an output GridMap with VDB grid primitives found in the // geometry. openvdb::GridPtrVec outGrids; for (VdbPrimCIterator it(gdp); it; ++it) { const GU_PrimVDB* vdb = *it; // Create a new grid that shares the primitive's tree and transform // and then transfer primitive attributes to the new grid as metadata. GridPtr grid = openvdb::ConstPtrCast<Grid>(vdb->getGrid().copyGrid()); GU_PrimVDB::createMetadataFromGridAttrs(*grid, *vdb, *gdp); grid->removeMeta("is_vdb"); // Retrieve the grid's name from the primitive attribute. grid->setName(it.getPrimitiveName().toStdString()); outGrids.push_back(grid); } // Add file-level metadata. openvdb::MetaMap fileMetadata; std::string versionStr = "Houdini "; versionStr += UTgetFullVersion(); versionStr += "/GEO_VDBTranslator"; fileMetadata.insertMeta("creator", openvdb::StringMetadata(versionStr)); #if defined(SESI_OPENVDB) GU_PrimVDB::createMetadataFromAttrs( fileMetadata, GA_ATTRIB_GLOBAL, GA_Offset(0), *gdp); #endif // Create a VDB file object. FileT file(os); // Always enable active mask compression, since it is fast // and compresses level sets and fog volumes well. uint32_t compression = openvdb::io::COMPRESS_ACTIVE_MASK; // Enable Blosc unless backwards compatibility is requested. if (openvdb::io::Archive::hasBloscCompression() && !UT_EnvControl::getInt(ENV_HOUDINI13_VOLUME_COMPATIBILITY)) { compression |= openvdb::io::COMPRESS_BLOSC; } file.setCompression(compression); file.write(outGrids, fileMetadata); } catch (std::exception &e) { cerr << "Save failure: " << e.what() << "\n"; return false; } return true; } GA_Detail::IOStatus GEO_VDBTranslator::fileSave(const GEO_Detail *geogdp, std::ostream &os) { // Saving via io::Stream will NOT save grid offsets, disabling partial // reading. return fileSaveVDB<openvdb::io::Stream, std::ostream &>(geogdp, os); } GA_Detail::IOStatus GEO_VDBTranslator::fileSaveToFile(const GEO_Detail *geogdp, const char *fname) { // Saving via io::File will save grid offsets that allow for partial // reading. return fileSaveVDB<openvdb::io::File, const char *>(geogdp, fname); } } // unnamed namespace void new_VDBGeometryIO(void *) { GU_Detail::registerIOTranslator(new GEO_VDBTranslator()); // addExtension() will ignore if vdb is already in the list of extensions UTgetGeoExtensions()->addExtension("vdb"); } #ifndef SESI_OPENVDB void newGeometryIO(void *data) { // Initialize the version of the OpenVDB library that this library is built against // (i.e., not the HDK native OpenVDB library). openvdb::initialize(); // Register a .vdb file translator. new_VDBGeometryIO(data); } #endif
9,883
C++
28.951515
92
0.587069
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Analysis.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Analysis.cc /// /// @author FX R&D OpenVDB team /// /// @brief Compute gradient fields and other differential properties from VDB volumes #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/GridOperators.h> #include <openvdb/tools/LevelSetUtil.h> #include <openvdb/tools/Mask.h> // for tools::interiorMask() #include <openvdb/tools/GridTransformer.h> #include <UT/UT_Interrupt.h> #include <sstream> #include <stdexcept> #include <string> namespace cvdb = openvdb; namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { enum OpId { OP_GRADIENT = 0, OP_CURVATURE = 1, OP_LAPLACIAN = 2, OP_CPT = 3, OP_DIVERGENCE = 4, OP_CURL = 5, OP_MAGNITUDE = 6, OP_NORMALIZE = 7 }; } class SOP_OpenVDB_Analysis: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Analysis(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Analysis() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i == 1); } static const char* sOpName[]; class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; //////////////////////////////////////// const char* SOP_OpenVDB_Analysis::sOpName[] = { "gradient", "curvature", "laplacian", "closest point transform", "divergence", "curl", "magnitude", "normalize" }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Group pattern parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to be processed.") .setDocumentation( "A subset of VDBs to analyze (see [specifying volumes|/model/volumes#group])")); // Operator parms.add(hutil::ParmFactory(PRM_ORD, "operator", "Operator") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "gradient", "Gradient (Scalar->Vector)", "curvature", "Curvature (Scalar->Scalar)", "laplacian", "Laplacian (Scalar->Scalar)", "closestpoint", "Closest Point (Scalar->Vector)", "divergence", "Divergence (Vector->Scalar)", "curl", "Curl (Vector->Vector)", "length", "Length (Vector->Scalar)", "normalize", "Normalize (Vector->Vector)" }) .setDocumentation("\ What to compute\n\ \n\ The labels on the items in the menu indicate what datatype\n\ the incoming VDB volume must be and the datatype of the output volume.\n\ \n\ Gradient (scalar -> vector):\n\ The gradient of a scalar field\n\ \n\ Curvature (scalar -> scalar):\n\ The mean curvature of a scalar field\n\ \n\ Laplacian (scalar -> scalar):\n\ The Laplacian of a scalar field\n\ \n\ Closest Point (scalar -> vector):\n\ The location, at each voxel, of the closest point on a surface\n\ defined by the incoming signed distance field\n\ \n\ You can use the resulting field with the\n\ [OpenVDB Advect Points node|Node:sop/DW_OpenVDBAdvectPoints]\n\ to stick points to the surface.\n\ \n\ Divergence (vector -> scalar):\n\ The divergence of a vector field\n\ \n\ Curl (vector -> vector):\n\ The curl of a vector field\n\ \n\ Magnitude (vector -> scalar):\n\ The length of the vectors in a vector field\n\ \n\ Normalize (vector -> vector):\n\ The vectors in a vector field divided by their lengths\n")); parms.add(hutil::ParmFactory(PRM_STRING, "maskname", "Mask VDB") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("VDB (from the second input) used to define the iteration space") .setDocumentation( "A VDB from the second input used to define the iteration space" " (see [specifying volumes|/model/volumes#group])\n\n" "The selected __Operator__ will be applied only where the mask VDB has" " [active|http://www.openvdb.org/documentation/doxygen/overview.html#subsecInactive]" " voxels or, if the mask VDB is a level set, only in the interior of the level set.")); // Output name parms.add(hutil::ParmFactory(PRM_STRING, "outputname", "Output Name") .setDefault("keep") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "keep", "Keep Incoming VDB Names", "append", "Append Operation Name", "custom", "Custom Name" }) .setTooltip("Rename output grid(s)") .setDocumentation( "How to name the generated VDB volumes\n\n" "If you choose __Keep Incoming VDB Names__, the generated fields" " will replace the input fields.")); parms.add(hutil::ParmFactory(PRM_STRING, "customname", "Custom Name") .setTooltip("Rename all output grids with this custom name") .setDocumentation("If this is not blank, the output VDB will use this name.")); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "threaded", "Multithreaded")); obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "outputName", "Output Name") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "keep", "Keep Incoming VDB Names", "append", "Append Operation Name", "custom", "Custom Name" })); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "customName", "Custom Name")); // Register this operator. hvdb::OpenVDBOpFactory("VDB Analysis", SOP_OpenVDB_Analysis::factory, parms, *table) .setObsoleteParms(obsoleteParms) .addInput("VDBs to Analyze") .addOptionalInput("Optional VDB mask input") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Analysis::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Compute an analytic property of a VDB volume, such as gradient or curvature.\"\"\"\n\ \n\ @overview\n\ \n\ This node computes certain properties from the values of VDB volumes,\n\ and generates new VDB volumes where the voxel values are the computed results.\n\ Using the __Output Name__ parameter you can choose whether the generated\n\ volumes replace the original volumes.\n\ \n\ @related\n\ \n\ - [OpenVDB Advect Points|Node:sop/DW_OpenVDBAdvectPoints]\n\ - [Node:sop/volumeanalysis]\n\ - [Node:sop/vdbanalysis]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Analysis::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Analysis(net, name, op); } SOP_OpenVDB_Analysis::SOP_OpenVDB_Analysis(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { template<template<typename GridT, typename MaskType, typename InterruptT> class ToolT> struct ToolOp { ToolOp(bool t, hvdb::Interrupter& boss, const cvdb::BoolGrid *mask = nullptr) : mMaskGrid(mask) , mThreaded(t) , mBoss(boss) { } template<typename GridType> void operator()(const GridType& inGrid) { if (mMaskGrid) { // match transform cvdb::BoolGrid regionMask; regionMask.setTransform(inGrid.transform().copy()); openvdb::tools::resampleToMatch<openvdb::tools::PointSampler>( *mMaskGrid, regionMask, mBoss); ToolT<GridType, cvdb::BoolGrid, hvdb::Interrupter> tool(inGrid, regionMask, &mBoss); mOutGrid = tool.process(mThreaded); } else { ToolT<GridType, cvdb::BoolGrid/*dummy*/, hvdb::Interrupter> tool(inGrid, &mBoss); mOutGrid = tool.process(mThreaded); } } const cvdb::BoolGrid *mMaskGrid; hvdb::GridPtr mOutGrid; bool mThreaded; hvdb::Interrupter& mBoss; }; struct MaskOp { template<typename GridType> void operator()(const GridType& grid) { mMaskGrid = cvdb::tools::interiorMask(grid); } cvdb::BoolGrid::Ptr mMaskGrid; }; } // unnamed namespace //////////////////////////////////////// // Enable or disable parameters in the UI. bool SOP_OpenVDB_Analysis::updateParmsFlags() { bool changed = false; bool useCustomName = (evalStdString("outputname", 0) == "custom"); changed |= enableParm("customname", useCustomName); #ifndef SESI_OPENVDB changed |= setVisibleState("customname", useCustomName); #endif const bool hasMask = (2 == nInputs()); changed |= enableParm("maskname", hasMask); return changed; } void SOP_OpenVDB_Analysis::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; const fpreal time = 0.0; if (PRM_Parm* parm = obsoleteParms->getParmPtr("outputName")) { if (!parm->isFactoryDefault()) { std::string val{"keep"}; switch (obsoleteParms->evalInt("outputName", 0, time)) { case 0: val = "keep"; break; case 1: val = "append"; break; case 2: val = "custom"; break; } setString(val.c_str(), CH_STRING_LITERAL, "outputname", 0, time); } } resolveRenamedParm(*obsoleteParms, "customName", "customname"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Analysis::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); // Get the group of grids to be transformed. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); const int whichOp = static_cast<int>(evalInt("operator", 0, time)); if (whichOp < 0 || whichOp > 7) { std::ostringstream ostr; ostr << "expected 0 <= operator <= 7, got " << whichOp; throw std::runtime_error(ostr.str().c_str()); } const bool threaded = true; hvdb::Interrupter boss( (std::string("Computing ") + sOpName[whichOp] + " of VDB grids").c_str()); // Check mask input const GU_Detail* maskGeo = inputGeo(1); cvdb::BoolGrid::Ptr maskGrid; if (maskGeo) { const GA_PrimitiveGroup* maskGroup = parsePrimitiveGroups( evalStdString("maskname", time).c_str(), GroupCreator(maskGeo)); hvdb::VdbPrimCIterator maskIt(maskGeo, maskGroup); if (maskIt) { MaskOp op; if (hvdb::GEOvdbApply<hvdb::AllGridTypes>(**maskIt, op)) { maskGrid = op.mMaskGrid; } } if (!maskGrid) addWarning(SOP_MESSAGE, "Mask VDB not found."); } // For each VDB primitive (with a non-null grid pointer) in the given group... std::string operationName; for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (boss.wasInterrupted()) throw std::runtime_error("was interrupted"); GU_PrimVDB* vdb = *it; hvdb::GridPtr outGrid; bool ok = true; switch (whichOp) { case OP_GRADIENT: // gradient of scalar field { ToolOp<cvdb::tools::Gradient> op(threaded, boss, maskGrid.get()); if (hvdb::GEOvdbApply<hvdb::NumericGridTypes>(*vdb, op, /*makeUnique=*/false)) { outGrid = op.mOutGrid; } operationName = "_gradient"; break; } case OP_CURVATURE: // mean curvature of scalar field { ToolOp<cvdb::tools::MeanCurvature> op(threaded, boss, maskGrid.get()); if (hvdb::GEOvdbApply<hvdb::NumericGridTypes>(*vdb, op, /*makeUnique=*/false)) { outGrid = op.mOutGrid; } operationName = "_curvature"; break; } case OP_LAPLACIAN: // Laplacian of scalar field { ToolOp<cvdb::tools::Laplacian> op(threaded, boss, maskGrid.get()); if (hvdb::GEOvdbApply<hvdb::NumericGridTypes>(*vdb, op, /*makeUnique=*/false)) { outGrid = op.mOutGrid; } operationName = "_laplacian"; break; } case OP_CPT: // closest point transform of scalar level set { ToolOp<cvdb::tools::Cpt> op(threaded, boss, maskGrid.get()); if (hvdb::GEOvdbApply<hvdb::NumericGridTypes>(*vdb, op, /*makeUnique=*/false)) { outGrid = op.mOutGrid; } operationName = "_cpt"; break; } case OP_DIVERGENCE: // divergence of vector field { ToolOp<cvdb::tools::Divergence> op(threaded, boss, maskGrid.get()); if (hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*vdb, op, /*makeUnique=*/false)) { outGrid = op.mOutGrid; } operationName = "_divergence"; break; } case OP_CURL: // curl (rotation) of vector field { ToolOp<cvdb::tools::Curl> op(threaded, boss, maskGrid.get()); if (hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*vdb, op, /*makeUnique=*/false)) { outGrid = op.mOutGrid; } operationName = "_curl"; break; } case OP_MAGNITUDE: // magnitude of vector field { ToolOp<cvdb::tools::Magnitude> op(threaded, boss, maskGrid.get()); if (hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*vdb, op, /*makeUnique=*/false)) { outGrid = op.mOutGrid; } operationName = "_magnitude"; break; } case OP_NORMALIZE: // normalize vector field { ToolOp<cvdb::tools::Normalize> op(threaded, boss, maskGrid.get()); if (hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*vdb, op, /*makeUnique=*/false)) { outGrid = op.mOutGrid; } operationName = "_normalize"; break; } } if (!ok) { UT_String inGridName = it.getPrimitiveNameOrIndex(); std::ostringstream ss; ss << "Can't compute " << sOpName[whichOp] << " from grid"; if (inGridName.isstring()) ss << " " << inGridName; ss << " of type " << UTvdbGetGridTypeString(vdb->getGrid()); addWarning(SOP_MESSAGE, ss.str().c_str()); } // Rename grid std::string gridName = vdb->getGridName(); const auto renaming = evalStdString("outputname", time); if (renaming == "append") { if (operationName.size() > 0) gridName += operationName; } else if (renaming == "custom") { const auto customName = evalStdString("customname", time); if (!customName.empty()) gridName = customName; } // Replace the original VDB primitive with a new primitive that contains // the output grid and has the same attributes and group membership. hvdb::replaceVdbPrimitive(*gdp, outGrid, *vdb, true, gridName.c_str()); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
16,611
C++
32.559596
100
0.568118
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Points_Convert.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file SOP_OpenVDB_Points_Convert.cc /// /// @authors Dan Bailey, Nick Avramoussis, James Bird /// /// @brief Converts points to OpenVDB points. #include <openvdb/openvdb.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointCount.h> #include <openvdb/points/PointMask.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/PointUtils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <houdini_utils/geometry.h> #include <houdini_utils/ParmFactory.h> #include <CH/CH_Manager.h> // for CHgetEvalTime #include <GU/GU_DetailHandle.h> #include <GU/GU_PackedContext.h> #include <GU/GU_PackedGeometry.h> #include <GU/GU_PackedFragment.h> #include <GU/GU_PrimPacked.h> #include <stdexcept> #include <string> #include <utility> #include <vector> using namespace openvdb; using namespace openvdb::points; using namespace openvdb::math; namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { enum TRANSFORM_MODE { TRANSFORM_TARGET_POINTS = 0, TRANSFORM_VOXEL_SIZE, TRANSFORM_REF_GRID }; enum CONVERSION_MODE { MODE_CONVERT_TO_VDB = 0, MODE_CONVERT_FROM_VDB, MODE_GENERATE_MASK, MODE_COUNT_POINTS, }; enum OUTPUT_NAME_MODE { NAME_KEEP = 0, NAME_APPEND, NAME_REPLACE }; } // anonymous namespace //////////////////////////////////////// class SOP_OpenVDB_Points_Convert: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Points_Convert(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Points_Convert() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i == 1); } static OUTPUT_NAME_MODE getOutputNameMode(const std::string& modeName); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; //////////////////////////////////////// namespace { inline int lookupAttrInput(const PRM_SpareData* spare) { const char *istring; if (!spare) return 0; istring = spare->getValue("sop_input"); return istring ? atoi(istring) : 0; } inline void sopBuildAttrMenu(void* data, PRM_Name* menuEntries, int themenusize, const PRM_SpareData* spare, const PRM_Parm*) { if (data == nullptr || menuEntries == nullptr || spare == nullptr) return; SOP_Node* sop = CAST_SOPNODE(static_cast<OP_Node*>(data)); if (sop == nullptr) { // terminate and quit menuEntries[0].setToken(0); menuEntries[0].setLabel(0); return; } int inputIndex = lookupAttrInput(spare); const GU_Detail* gdp = sop->getInputLastGeo(inputIndex, CHgetEvalTime()); size_t menuIdx = 0, menuEnd(themenusize - 2); // null object menuEntries[menuIdx].setToken("0"); menuEntries[menuIdx++].setLabel("- no attribute selected -"); if (gdp) { // point attribute names auto iter = gdp->pointAttribs().begin(GA_SCOPE_PUBLIC); if (!iter.atEnd() && menuIdx != menuEnd) { if (menuIdx > 0) { menuEntries[menuIdx].setToken(PRM_Name::mySeparator); menuEntries[menuIdx++].setLabel(PRM_Name::mySeparator); } for (; !iter.atEnd() && menuIdx != menuEnd; ++iter) { const char* str = (*iter)->getName(); if (str) { Name name = str; if (name != "P") { menuEntries[menuIdx].setToken(name.c_str()); menuEntries[menuIdx++].setLabel(name.c_str()); } } } } } // terminator menuEntries[menuIdx].setToken(0); menuEntries[menuIdx].setLabel(0); } const PRM_ChoiceList PrimAttrMenu( PRM_ChoiceListType(PRM_CHOICELIST_REPLACE), sopBuildAttrMenu); } // unnamed namespace //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { openvdb::initialize(); // Force the building of the unit vector codec as it isn't threadsafe. const uint16_t data = 0; auto SYS_UNUSED_VAR_ATTRIB ignoredResult = openvdb::math::QuantizedUnitVec::unpack(data); if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_ORD, "conversion", "Conversion") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "vdb", "Pack Points into VDB Points", "hdk", "Extract Points from VDB Points", "mask", "Generate Mask from VDB Points", "count", "Points/Voxel Count from VDB Points" }) .setTooltip("The conversion method for the expected input types.") .setDocumentation( "Whether to pack points into a VDB Points primitive" " or to extract points from such a primitive or to generate" " a mask from the primitive or to count the number of" " points-per-voxel in the primitive")); parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenu) .setTooltip("Specify a subset of the input point data grids to convert.") .setDocumentation( "A subset of the input VDB Points primitives to be processed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "vdbpointsgroup", "VDB Points Group") .setChoiceList(&hvdb::VDBPointsGroupMenuInput1) .setTooltip("Specify VDB Points Groups to use as an input.") .setDocumentation( "The point group inside the VDB Points primitive to extract\n\n" "This may be a normal point group that was collapsed into the" " VDB Points primitive when it was created, or a new group created" " with the [OpenVDB Points Group node|Node:sop/DW_OpenVDBPointsGroup].")); // point grid name parms.add(hutil::ParmFactory(PRM_STRING, "name", "VDB Name") .setDefault("points") .setTooltip("The name of the VDB Points primitive to be created") .setDocumentation(nullptr)); // VDB points grid name parms.add(hutil::ParmFactory(PRM_STRING, "outputname", "Output Name") .setDefault("keep") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "keep", "Keep Original Name", "append", "Add Suffix", "replace", "Custom Name", }) .setTooltip("Output VDB naming scheme") .setDocumentation( "Give the output VDB Points the same name as the input VDB," " or add a suffix to the input name, or use a custom name.")); parms.add(hutil::ParmFactory(PRM_STRING, "countname", "VDB Name") .setDefault("count") .setTooltip("The name of the VDB count primitive to be created") .setDocumentation(nullptr)); parms.add(hutil::ParmFactory(PRM_STRING, "maskname", "VDB Name") .setDefault("mask") .setTooltip("The name of the VDB mask primitive to be created") .setDocumentation("The name of the VDB primitive to be created")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "keep", "Keep Original Geometry") .setDefault(PRMzeroDefaults) .setTooltip("The incoming geometry will not be deleted if this is set.") .setDocumentation("The incoming geometry will not be deleted if this is set.")); // Transform parms.add(hutil::ParmFactory(PRM_ORD, "transform", "Define Transform") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "targetpointspervoxel", "Using Target Points Per Voxel", "voxelsizeonly", "Using Voxel Size Only", "userefvdb", "To Match Reference VDB" }) .setTooltip( "Specify how to construct the PointDataGrid transform. If\n" "an optional transform input is provided for the first two\n" "options, the rotate and translate components are preserved.\n" "Using Target Points Per Voxel:\n" " Automatically calculates a voxel size based off the input\n" " point set and a target amount of points per voxel.\n" "Using Voxel Size Only:\n" " Explicitly sets a voxel size.\n" "To Match Reference VDB:\n" " Uses the complete transform provided from the second input.") .setDocumentation("\ How to construct the VDB Points primitive's transform\n\n\ An important consideration is how big to make the grid cells\n\ that contain the points. Too large and there are too many points\n\ per cell and little optimization occurs. Too small and the cost\n\ of the cells outweighs the points.\n\ \n\ Using Target Points Per Voxel:\n\ Automatically calculate a voxel size so that the given number\n\ of points ends up in each voxel. This will assume uniform\n\ distribution of points.\n\ \n\ If an optional transform input is provided, use its rotation\n\ and translation.\n\ Using Voxel Size Only:\n\ Provide an explicit voxel size, and if an optional transform input\n\ is provided, use its rotation and translation.\n\ To Match Reference VDB:\n\ Use the complete transform provided from the second input.\n")); parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelsize", "Voxel Size") .setDefault(PRMpointOneDefaults) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 5) .setTooltip("The desired voxel size of the new VDB Points grid")); parms.add(hutil::ParmFactory(PRM_INT_J, "pointspervoxel", "Points per Voxel") .setDefault(8) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 16) .setTooltip( "The number of points per voxel to use as the target for " "automatic voxel size computation")); // Group name (Transform reference) parms.add(hutil::ParmFactory(PRM_STRING, "refvdb", "Reference VDB") .setChoiceList(&hutil::PrimGroupMenu) .setSpareData(&SOP_Node::theSecondInput) .setTooltip("References the first/selected grid's transform.") .setDocumentation( "Which VDB in the second input to use as the reference for the transform\n\n" "If this is not set, use the first VDB found.")); ////////// // Point attribute transfer parms.add(hutil::ParmFactory(PRM_ORD, "poscompression", "Position Compression") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "none", "None", "int16", "16-bit Fixed Point", "int8", "8-bit Fixed Point" }) .setTooltip("The position attribute compression setting.") .setDocumentation( "The position can be stored relative to the center of the voxel.\n" "This means it does not require the full 32-bit float representation,\n" "but can be quantized to a smaller fixed-point value.")); parms.add(hutil::ParmFactory(PRM_HEADING, "transferheading", "Attribute Transfer")); // Mode. Either convert all or convert specifc attributes parms.add(hutil::ParmFactory(PRM_ORD, "mode", "Mode") .setDefault(PRMzeroDefaults) .setTooltip("Whether to transfer only specific attributes or all attributes found") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "all", "All Attributes", "spec", "Specific Attributes" })); hutil::ParmList attrParms; // Attribute name attrParms.add(hutil::ParmFactory(PRM_STRING, "attribute#", "Attribute") .setChoiceList(&PrimAttrMenu) .setSpareData(&SOP_Node::theFirstInput) .setTooltip("Select a point attribute to transfer.\n\n" "Supports integer and floating-point attributes of " "arbitrary precisions and tuple sizes.")); { char const * const items[] = { "none", "None", "truncate", "16-bit Truncate", UnitVecCodec::name(), "Unit Vector", FixedPointCodec<true, UnitRange>::name(), "8-bit Unit", FixedPointCodec<false, UnitRange>::name(), "16-bit Unit", nullptr }; attrParms.add(hutil::ParmFactory(PRM_ORD, "valuecompression#", "Value Compression") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip("Value compression to use for specific attributes.") .setDocumentation("\ How to compress attribute values\n\ \n\ None:\n\ Values are stored with their full precision.\n\ \n\ 16-bit Truncate:\n\ Values are stored at half precision, truncating lower-order bits.\n\ \n\ Unit Vector:\n\ Values are treated as unit vectors, so that if two components\n\ are known, the third is implied and need not be stored.\n\ \n\ 8-bit Unit:\n\ Values are treated as lying in the 0..1 range and are quantized to 8 bits.\n\ \n\ 16-bit Unit:\n\ Values are treated as lying in the 0..1 range and are quantized to 16 bits.\n")); } attrParms.add(hutil::ParmFactory(PRM_TOGGLE, "blosccompression#", "Blosc Compression") .setInvisible() // this parm is now a no-op as in-memory blosc compression is deprecated .setDefault(PRMzeroDefaults)); // Add multi parm parms.add(hutil::ParmFactory(PRM_MULTITYPE_LIST, "attrList", "Point Attributes") .setTooltip("Transfer point attributes to each voxel in the level set's narrow band") .setMultiparms(attrParms) .setDefault(PRMzeroDefaults)); parms.add(hutil::ParmFactory(PRM_LABEL, "attributespacer", "")); { char const * const items[] = { "none", "None", UnitVecCodec::name(), "Unit Vector", "truncate", "16-bit Truncate", nullptr }; parms.add(hutil::ParmFactory(PRM_ORD, "normalcompression", "Normal Compression") .setDefault(PRMzeroDefaults) .setTooltip("All normal attributes will use this compression codec.") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items)); } { char const * const items[] = { "none", "None", FixedPointCodec<false, UnitRange>::name(), "16-bit Unit", FixedPointCodec<true, UnitRange>::name(), "8-bit Unit", "truncate", "16-bit Truncate", nullptr }; parms.add(hutil::ParmFactory(PRM_ORD, "colorcompression", "Color Compression") .setDefault(PRMzeroDefaults) .setTooltip("All color attributes will use this compression codec.") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items)); } hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "transferHeading", "Attribute Transfer")); ////////// // Register this operator. hvdb::OpenVDBOpFactory("Convert VDB Points", SOP_OpenVDB_Points_Convert::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBPointsConvert") #endif .addInput("Points to Convert") .addOptionalInput("Optional Reference VDB (for transform)") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_GENERIC, []() { return new SOP_OpenVDB_Points_Convert::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Convert a point cloud into a VDB Points primitive, or vice versa.\"\"\"\n\ \n\ @overview\n\ \n\ This node converts an unstructured cloud of points to and from a single\n\ [VDB Points|http://www.openvdb.org/documentation/doxygen/points.html] primitive.\n\ The resulting primitive will reorder the points to place spatially\n\ close points close together.\n\ It is then able to efficiently unpack regions of interest within that primitive.\n\ The [OpenVDB Points Group node|Node:sop/DW_OpenVDBPointsGroup] can be used\n\ to create regions of interest.\n\ \n\ Because nearby points often have similar data, there is the possibility\n\ of aggressively compressing attribute data to minimize data size.\n\ \n\ @related\n\ - [OpenVDB Points Group|Node:sop/DW_OpenVDBPointsGroup]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Points_Convert::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Points_Convert(net, name, op); } SOP_OpenVDB_Points_Convert::SOP_OpenVDB_Points_Convert(OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// // Enable or disable parameters in the UI. bool SOP_OpenVDB_Points_Convert::updateParmsFlags() { bool changed = false; const bool toVdbPoints = evalInt("conversion", 0, 0) == 0; const bool toMask = evalInt("conversion", 0, 0) == 2; const bool toCount = evalInt("conversion", 0, 0) == 3; const bool convertAll = evalInt("mode", 0, 0) == 0; const auto transform = evalInt("transform", 0, 0); changed |= enableParm("group", !toVdbPoints); changed |= setVisibleState("group", !toVdbPoints); changed |= enableParm("vdbpointsgroup", !toVdbPoints); changed |= setVisibleState("vdbpointsgroup", !toVdbPoints); changed |= enableParm("name", toVdbPoints); changed |= setVisibleState("name", toVdbPoints); changed |= enableParm("outputname", toCount || toMask); changed |= setVisibleState("outputname", toCount || toMask); const bool useCustomName = (getOutputNameMode(evalStdString("outputname", 0.0)) != NAME_KEEP); changed |= enableParm("countname", useCustomName && toCount); changed |= setVisibleState("countname", toCount); changed |= enableParm("maskname", useCustomName && toMask); changed |= setVisibleState("maskname", toMask); changed |= enableParm("keep", !toVdbPoints); changed |= setVisibleState("keep", !toVdbPoints); const int refexists = (this->nInputs() == 2); changed |= enableParm("transform", toVdbPoints); changed |= setVisibleState("transform", toVdbPoints); changed |= enableParm("refvdb", refexists); changed |= setVisibleState("refvdb", toVdbPoints); changed |= enableParm("voxelsize", toVdbPoints && transform == TRANSFORM_VOXEL_SIZE); changed |= setVisibleState("voxelsize", toVdbPoints && transform == TRANSFORM_VOXEL_SIZE); changed |= enableParm("pointspervoxel", toVdbPoints && transform == TRANSFORM_TARGET_POINTS); changed |= setVisibleState("pointspervoxel", toVdbPoints && transform == TRANSFORM_TARGET_POINTS); changed |= setVisibleState("transferheading", toVdbPoints); changed |= enableParm("poscompression", toVdbPoints); changed |= setVisibleState("poscompression", toVdbPoints); changed |= enableParm("mode", toVdbPoints); changed |= setVisibleState("mode", toVdbPoints); changed |= enableParm("attrList", toVdbPoints && !convertAll); changed |= setVisibleState("attrList", toVdbPoints && !convertAll); changed |= enableParm("normalcompression", toVdbPoints && convertAll); changed |= setVisibleState("normalcompression", toVdbPoints && convertAll); changed |= enableParm("colorcompression", toVdbPoints && convertAll); changed |= setVisibleState("colorcompression", toVdbPoints && convertAll); return changed; } //////////////////////////////////////// OUTPUT_NAME_MODE SOP_OpenVDB_Points_Convert::getOutputNameMode(const std::string& modeName) { if (modeName == "append") return NAME_APPEND; if (modeName == "replace") return NAME_REPLACE; return NAME_KEEP; } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Points_Convert::Cache::cookVDBSop(OP_Context& context) { try { hvdb::Interrupter boss{"Converting points"}; hvdb::WarnFunc warnFunction = [this](const std::string& msg) { this->addWarning(SOP_MESSAGE, msg.c_str()); }; const fpreal time = context.getTime(); const int conversion = static_cast<int>(evalInt("conversion", 0, time)); const bool keepOriginalGeo = evalInt("keep", 0, time) == 1; const GA_PrimitiveGroup* group = (conversion != MODE_CONVERT_TO_VDB) ? matchGroup(*inputGeo(0), evalStdString("group", time)) : nullptr; // Extract VDB Point groups to filter const std::string pointsGroup = evalStdString("vdbpointsgroup", time); std::vector<std::string> includeGroups; std::vector<std::string> excludeGroups; if (conversion != MODE_CONVERT_TO_VDB) { openvdb::points::AttributeSet::Descriptor::parseNames( includeGroups, excludeGroups, pointsGroup); } // Optionally copy transform parameters from reference grid (if not converting from VDB). Transform::Ptr transform; if (conversion != MODE_CONVERT_FROM_VDB) { if (const GU_Detail* refGeo = inputGeo(1, context)) { const GA_PrimitiveGroup* refGroup = matchGroup(*refGeo, evalStdString("refvdb", time)); hvdb::VdbPrimCIterator it(refGeo, refGroup); const hvdb::GU_PrimVDB* refPrim = *it; if (!refPrim) { addError(SOP_MESSAGE, "Second input has no VDB primitives."); return error(); } transform = refPrim->getGrid().transform().copy(); } } // handle to VDB, count and mask conversion options if (conversion != MODE_CONVERT_TO_VDB) { UT_Array<GEO_Primitive*> primsToDelete; primsToDelete.clear(); if (keepOriginalGeo) { // Duplicate primary (left) input geometry if (const auto* input0 = inputGeo(0)) { gdp->replaceWith(*input0); } else { gdp->stashAll(); } // Extract VDB primitives to delete for (hvdb::VdbPrimIterator vdbIt(gdp, group); vdbIt; ++vdbIt) { openvdb::GridBase::ConstPtr gridBase = vdbIt->getConstGridPtr(); PointDataGrid::ConstPtr points = openvdb::GridBase::constGrid<PointDataGrid>(gridBase); if (!points) continue; primsToDelete.append(*vdbIt); } } else { gdp->stashAll(); } // Extract point grids and names for conversion std::vector<PointDataGrid::ConstPtr> pointGrids; std::vector<std::string> pointNames; const GU_Detail* sourceGdp = keepOriginalGeo ? gdp : inputGeo(0, context); for (hvdb::VdbPrimCIterator vdbIt(sourceGdp, group); vdbIt; ++vdbIt) { openvdb::GridBase::ConstPtr gridBase = vdbIt->getConstGridPtr(); PointDataGrid::ConstPtr points = openvdb::GridBase::constGrid<PointDataGrid>(gridBase); if (!points) continue; pointGrids.push_back(points); if (conversion != MODE_CONVERT_FROM_VDB) { const std::string gridName = vdbIt.getPrimitiveName().toStdString(); pointNames.push_back(gridName); } } if (keepOriginalGeo) { gdp->deletePrimitives(primsToDelete, true); } if (conversion == MODE_CONVERT_FROM_VDB) { // passing an empty vector of attribute names implies that // all attributes should be converted const std::vector<std::string> emptyNameVector; // if all point data is being converted, sequentially pre-fetch any out-of-core // data for faster performance when using delayed-loading const bool allData = emptyNameVector.empty() && includeGroups.empty() && excludeGroups.empty(); for (const PointDataGrid::ConstPtr &grid : pointGrids) { GU_Detail geo; // if all the data is being loaded, prefetch it for faster load performance if (allData) { prefetch(grid->tree()); } // perform conversion hvdb::convertPointDataGridToHoudini( geo, *grid, emptyNameVector, includeGroups, excludeGroups); const MetaMap& metaMap = *grid; hvdb::convertMetadataToHoudini(geo, metaMap, warnFunction); gdp->merge(geo); } return error(); } else { const auto outputName = getOutputNameMode(evalStdString("outputname", time)); size_t i = 0; for (const PointDataGrid::ConstPtr &grid : pointGrids) { assert(i < pointNames.size()); const std::string gridName = pointNames[i++]; GU_Detail geo; if (conversion == MODE_GENERATE_MASK) { openvdb::BoolGrid::Ptr maskGrid; auto leaf = grid->tree().cbeginLeaf(); if (leaf) { MultiGroupFilter filter(includeGroups, excludeGroups, leaf->attributeSet()); if (transform) { maskGrid = openvdb::points::convertPointsToMask( *grid, *transform, filter); } else { maskGrid = openvdb::points::convertPointsToMask( *grid, filter); } } else { maskGrid = openvdb::BoolGrid::create(); } const std::string customName = evalStdString("maskname", time); std::string vdbName; switch (outputName) { case NAME_KEEP: vdbName = gridName; break; case NAME_APPEND: vdbName = gridName + customName; break; case NAME_REPLACE: vdbName = customName; break; } hvdb::createVdbPrimitive(*gdp, maskGrid, vdbName.c_str()); } else { openvdb::Int32Grid::Ptr countGrid; auto leaf = grid->tree().cbeginLeaf(); if (leaf) { MultiGroupFilter filter(includeGroups, excludeGroups, leaf->attributeSet()); if (transform) { countGrid = openvdb::points::pointCountGrid( *grid, *transform, filter); } else { countGrid = openvdb::points::pointCountGrid( *grid, filter); } } else { countGrid = openvdb::Int32Grid::create(); } const std::string customName = evalStdString("maskname", time); std::string vdbName; switch (outputName) { case NAME_KEEP: vdbName = gridName; break; case NAME_APPEND: vdbName = gridName + customName; break; case NAME_REPLACE: vdbName = customName; break; } hvdb::createVdbPrimitive(*gdp, countGrid, vdbName.c_str()); } } return error(); } } // if we're here, we're converting Houdini points to OpenVDB. Clear gdp entirely // before proceeding, then check for particles in the primary (left) input port gdp->clearAndDestroy(); const GU_Detail* ptGeo = inputGeo(0, context); GU_Detail nonConstDetail; // tmp storage of unpacked geo const GU_Detail* detail; // ptr to geo to convert; either ptGeo or nonConstDetail boss.start(); // Unpack any packed primitives std::unique_ptr<GA_PrimitiveGroup> packgroup(ptGeo->newDetachedPrimitiveGroup()); for (GA_Iterator it(ptGeo->getPrimitiveRange()); !it.atEnd(); ++it) { GA_Offset offset = *it; const GA_Primitive* primitive = ptGeo->getPrimitive(offset); if (!primitive || !GU_PrimPacked::isPackedPrimitive(*primitive)) continue; const GU_PrimPacked* packedPrimitive = static_cast<const GU_PrimPacked*>(primitive); packedPrimitive->unpack(nonConstDetail); packgroup->addOffset(offset); } if (packgroup->entries() == 0) { // If no packed geometry was converted, avoid the merge by // simply using the original geometry detail = ptGeo; } else { // Convert the prim group to points - we have to use mergePoints // instead of mergePrims to make sure we merge points that are not // associated with any primitive GA_PointGroup pointsWithPackedPrims(*ptGeo); pointsWithPackedPrims.combine(packgroup.get()); // Merge everything except the geo associated with prims we've just unpacked nonConstDetail.mergePoints(*ptGeo, GA_Range(pointsWithPackedPrims, /*invert*/true)); detail = &nonConstDetail; } packgroup.reset(); // Configure the transform const auto transformMode = evalInt("transform", 0, time); math::Mat4d matrix(math::Mat4d::identity()); if (transform && transformMode != TRANSFORM_REF_GRID) { const math::AffineMap::ConstPtr affineMap = transform->baseMap()->getAffineMap(); matrix = affineMap->getMat4(); } else if (!transform && transformMode == TRANSFORM_REF_GRID) { addError(SOP_MESSAGE, "No target VDB transform found on second input."); return error(); } if (transformMode == TRANSFORM_TARGET_POINTS) { const int pointsPerVoxel = static_cast<int>(evalInt("pointspervoxel", 0, time)); const float voxelSize = hvdb::computeVoxelSizeFromHoudini(*detail, pointsPerVoxel, matrix, /*rounding*/ 5, boss); matrix.preScale(Vec3d(voxelSize) / math::getScale(matrix)); transform = Transform::createLinearTransform(matrix); } else if (transformMode == TRANSFORM_VOXEL_SIZE) { const auto voxelSize = evalFloat("voxelsize", 0, time); matrix.preScale(Vec3d(voxelSize) / math::getScale(matrix)); transform = Transform::createLinearTransform(matrix); } // Convert UT_String attrName; openvdb_houdini::AttributeInfoMap attributes; if (evalInt("mode", 0, time) != 0) { // Transfer point attributes. if (evalInt("attrList", 0, time) > 0) { for (int i = 1, N = static_cast<int>(evalInt("attrList", 0, 0)); i <= N; ++i) { evalStringInst("attribute#", &i, attrName, 0, 0); const Name attributeName = Name(attrName); const GA_ROAttributeRef attrRef = detail->findPointAttribute(attributeName.c_str()); if (!attrRef.isValid()) continue; const GA_Attribute* const attribute = attrRef.getAttribute(); if (!attribute) continue; const GA_Storage storage(hvdb::attributeStorageType(attribute)); // only tuple and string tuple attributes are supported if (storage == GA_STORE_INVALID) { throw std::runtime_error{"Invalid attribute type - " + attributeName}; } const int16_t width(hvdb::attributeTupleSize(attribute)); assert(width > 0); const GA_TypeInfo typeInfo(attribute->getOptions().typeInfo()); const bool isVector = width == 3 && (typeInfo == GA_TYPE_VECTOR || typeInfo == GA_TYPE_NORMAL || typeInfo == GA_TYPE_COLOR); const bool isQuaternion = width == 4 && (typeInfo == GA_TYPE_QUATERNION); const bool isMatrix = width == 16 && (typeInfo == GA_TYPE_TRANSFORM); int valueCompression = static_cast<int>( evalIntInst("valuecompression#", &i, 0, 0)); // check value compression compatibility with attribute type if (valueCompression != hvdb::COMPRESSION_NONE) { if (storage == GA_STORE_STRING) { // disable value compression for strings and add a SOP warning valueCompression = hvdb::COMPRESSION_NONE; warnFunction("Value compression not supported on string attributes." " Disabling compression for attribute \"" + attributeName + "\"."); } else { // disable value compression for incompatible types // and add a SOP warning if (valueCompression == hvdb::COMPRESSION_TRUNCATE && (storage != GA_STORE_REAL32 || isQuaternion || isMatrix)) { valueCompression = hvdb::COMPRESSION_NONE; warnFunction("Truncate value compression only supported for 32-bit" " floating-point attributes. Disabling compression for" " attribute \"" + attributeName + "\"."); } if (valueCompression == hvdb::COMPRESSION_UNIT_VECTOR && (storage != GA_STORE_REAL32 || !isVector)) { valueCompression = hvdb::COMPRESSION_NONE; warnFunction("Unit Vector value compression only supported for" " vector 3 x 32-bit floating-point attributes. " "Disabling compression for attribute \"" + attributeName + "\"."); } const bool isUnit = (valueCompression == hvdb::COMPRESSION_UNIT_FIXED_POINT_8 || valueCompression == hvdb::COMPRESSION_UNIT_FIXED_POINT_16); if (isUnit && (storage != GA_STORE_REAL32 || (width != 1 && !isVector))) { valueCompression = hvdb::COMPRESSION_NONE; warnFunction("Unit compression only supported for scalar and vector" " 3 x 32-bit floating-point attributes. " "Disabling compression for attribute \"" + attributeName + "\"."); } } } attributes[attributeName] = std::pair<int, bool>(valueCompression, false); } } } else { // point attribute names auto iter = detail->pointAttribs().begin(GA_SCOPE_PUBLIC); const auto normalCompression = evalInt("normalcompression", 0, time); const auto colorCompression = evalInt("colorcompression", 0, time); if (!iter.atEnd()) { for (; !iter.atEnd(); ++iter) { const char* str = (*iter)->getName(); if (!str) continue; const Name attributeName = str; if (attributeName == "P") continue; const GA_ROAttributeRef attrRef = detail->findPointAttribute(attributeName.c_str()); if (!attrRef.isValid()) continue; const GA_Attribute* const attribute = attrRef.getAttribute(); if (!attribute) continue; const GA_Storage storage(hvdb::attributeStorageType(attribute)); // only tuple and string tuple attributes are supported if (storage == GA_STORE_INVALID) { throw std::runtime_error{"Invalid attribute type - " + attributeName}; } const int16_t width(hvdb::attributeTupleSize(attribute)); assert(width > 0); const GA_TypeInfo typeInfo(attribute->getOptions().typeInfo()); const bool isNormal = width == 3 && typeInfo == GA_TYPE_NORMAL; const bool isColor = width == 3 && typeInfo == GA_TYPE_COLOR; int valueCompression = hvdb::COMPRESSION_NONE; if (isNormal) { if (normalCompression == 1) { valueCompression = hvdb::COMPRESSION_UNIT_VECTOR; } else if (normalCompression == 2) { valueCompression = hvdb::COMPRESSION_TRUNCATE; } } else if (isColor) { if (colorCompression == 1) { valueCompression = hvdb::COMPRESSION_UNIT_FIXED_POINT_16; } else if (colorCompression == 2) { valueCompression = hvdb::COMPRESSION_UNIT_FIXED_POINT_8; } else if (colorCompression == 3) { valueCompression = hvdb::COMPRESSION_TRUNCATE; } } attributes[attributeName] = std::pair<int, bool>(valueCompression, false); } } } // Determine position compression const int positionCompression = static_cast<int>(evalInt("poscompression", 0, time)); PointDataGrid::Ptr pointDataGrid = hvdb::convertHoudiniToPointDataGrid( *detail, positionCompression, attributes, *transform, warnFunction); hvdb::populateMetadataFromHoudini(*pointDataGrid, *detail, warnFunction); hvdb::createVdbPrimitive(*gdp, pointDataGrid, evalStdString("name", time).c_str()); boss.end(); } catch (const std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
39,596
C++
36.891866
104
0.570613
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Potential_Flow.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file SOP_OpenVDB_Potential_Flow.cc /// /// @authors Todd Keeler, Dan Bailey #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/Types.h> #include <openvdb/tools/PotentialFlow.h> #include <openvdb/tools/TopologyToLevelSet.h> #include <UT/UT_Interrupt.h> #include <UT/UT_Version.h> #include <GU/GU_Detail.h> #include <PRM/PRM_Parm.h> #include <algorithm> #include <cmath> #include <iomanip> #include <sstream> #include <stdexcept> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { const int DEFAULT_MAX_ITERATIONS = 10000; const double DEFAULT_MAX_ERROR = 1.0e-20; } // SOP Implementation struct SOP_OpenVDB_Potential_Flow: public hvdb::SOP_NodeVDB { SOP_OpenVDB_Potential_Flow(OP_Network*, const char* name, OP_Operator*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i == 1); } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; // SOP_OpenVDB_Potential_Flow //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setHelpText("Specify grids to process") .setChoiceList(&hutil::PrimGroupMenuInput1)); parms.add(hutil::ParmFactory(PRM_STRING, "velocity", "Velocity VDB") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip( "Name of the reference VDB volume whose active voxels denote solid obstacles\n\n" "If multiple volumes are selected, only the first one will be used.")); parms.add(hutil::ParmFactory(PRM_STRING, "maskvdbname", "Mask VDB") .setHelpText("VDB (from the second input) used to modify the solution domain.") .setChoiceList(&hutil::PrimGroupMenuInput2) .setDocumentation( "A VDB from the second input used to modify the volume where the potential flow" " will be solved. The domain can either be restricted to the VDB input, or excluded" " from expanding in the VDB input.")); parms.add(hutil::ParmFactory(PRM_ORD, "masktype", "Domain Mask Type") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "intersection", "Intersection", "difference", "Difference" }) .setTooltip("Mode for applying the domain modification mask (second input)") .setDocumentation("Modify the constructed domain using the second input VDB mask for" " calculating the potential flow velocity. __Intersection__ causes the created" " domain to be restricted to the Mask's active topology. __Difference__ removes" " any overlap between the constructed domain and the topology. The domain geometry" " will likely change the results")); parms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep1", "")); { std::ostringstream ostr; ostr << "If disabled, limit the potential flow solver to " << DEFAULT_MAX_ITERATIONS << " iterations."; const std::string tooltip = ostr.str(); parms.add(hutil::ParmFactory(PRM_TOGGLE, "useiterations", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip(tooltip.c_str())); parms.add(hutil::ParmFactory(PRM_INT_J, "iterations", "Iterations") .setDefault(1000) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 2000) .setTooltip("Maximum number of iterations of the potential flow solver") .setDocumentation( ("Maximum number of iterations of the potential flow solver\n\n" + tooltip).c_str())); } { std::ostringstream ostr; ostr << "If disabled, limit the potential flow solver error to " << std::setprecision(3) << DEFAULT_MAX_ERROR << "."; const std::string tooltip = ostr.str(); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usetolerance", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip(tooltip.c_str())); ostr.str(""); ostr << "If disabled, limit the potential flow solver error to 10<sup>" << int(std::log10(DEFAULT_MAX_ERROR)) << "</sup>."; parms.add(hutil::ParmFactory(PRM_FLT_J, "tolerance", "Tolerance") .setDefault(openvdb::math::Delta<float>::value()) .setRange(PRM_RANGE_RESTRICTED, DEFAULT_MAX_ERROR, PRM_RANGE_UI, 1) .setTooltip( "The potential flow solver is deemed to have converged when\n" "the magnitude of the absolute error is less than this tolerance.") .setDocumentation( ("The potential flow solver is deemed to have converged when" " the magnitude of the absolute error is less than this tolerance.\n\n" + ostr.str()).c_str())); } // Toggle between world- and index-space units for offset parms.add(hutil::ParmFactory(PRM_TOGGLE, "useworldspace", "Use World Space Units") .setDefault(PRMzeroDefaults) .setTooltip("If enabled, use world-space units, otherwise use voxels.")); // Stencil width parms.add(hutil::ParmFactory(PRM_INT_J, "dilationvoxels", "Dilation Voxels") .setDefault(PRMtenDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 100) .setTooltip( " The number of voxels to dilate the incoming geometry to create the domain" " in which the potential flow will be computed") .setDocumentation( " The number of voxels to dilate the incoming geometry to create the domain" " in which the potential flow will be computed")); parms.add(hutil::ParmFactory(PRM_FLT_J, "dilation", "Dilation") .setDefault(1.0) .setRange(PRM_RANGE_RESTRICTED, 1e-5, PRM_RANGE_UI, 100) .setTooltip( " The distance in world space units to dilate the incoming geometry to create" " the domain in which the potential flow will be computed") .setDocumentation( " The distance in world space units to dilate the incoming geometry to create" " the domain in which the potential flow will be computed")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usebackgroundvelocity", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("If enabled, a background velocity will be applied regardless of any velocity" " VDBs that are also provided. This can be used to create a similar effect" " to an object in a wind tunnel.")); std::vector<fpreal> backgrounddefault{1,0,0}; parms.add(hutil::ParmFactory(PRM_XYZ_J, "backgroundvelocity", "Background Velocity") .setVectorSize(3) .setDefault(backgrounddefault) .setTooltip("A constant background fluid velocity")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "applybackgroundvelocity", "Apply Background Velocity to Flow Field") .setDefault(PRMoneDefaults) .setTooltip("If enabled, apply the background velocity to the resulting flow field." " This can be useful for a simulation where particles are simply advected through a" " passive velocity field. In disabling this, a similar result can be achieved by" " sampling the velocity and adding to an existing velocity point attribute")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "outputpotential", "Output Potential") .setDefault(PRMzeroDefaults) .setTooltip("Output the scalar potential")); // Register this operator. hvdb::OpenVDBOpFactory("VDB Potential Flow", SOP_OpenVDB_Potential_Flow::factory, parms, *table) #if UT_VERSION_INT < 0x11050000 // earlier than 17.5.0 .setNativeName("") #endif .addInput("VDB Surface and optional velocity VDB") .addOptionalInput("Optional VDB Mask") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Potential_Flow::Cache; }) .setDocumentation( "#icon: COMMON/openvdb\n" "#tags: vdb\n" "\n" "\"\"\"Generate Potential Flow VDB velocity field. \"\"\"\n" "\n" "@overview\n" "\n" " Potential flow is the non-rotational flow of a fluid around solid moving or deforming" " objects constructed only from velocity values on their surface." " This flow field is not time-dependent and does not require an input fluid flow from the" " previous frame." " With the combination of procedural curl noise, this operator can construct fluid" " flows around obstacles without simulation dependencies, and therefore allows frames to " " be computed in parallel." " The potential flow field is generally used to create a flow field that cancels out movement" " of fluid into or out of a solid object." " A constant surface velocity is given as a parameter to the node, and an additional variable" " surface velocity can also be defined via a velocity VDB added to the first input." " When both are defined, they are added together on the surface boundary." " For objects in three dimensions, the potential flow decays at greater distances to the" " boundary.\n\n" " The node automatically creates the domain of the flow field by dilating the initial solid" " object boundaries." " It is up to the user to determine the dilation extent and therefore velocity decay needed" " for their application." " The primary input is a VDB signed distance field (SDF) on the first input." " The resolution and grid transform for the new velocity field will be taken from the input" " SDF." " If there are multiple SDFs only the first one is used, it is recommended to sample multiple" " SDFs into a single one for multiple obstacles." " This SDF can be accompanied by a VDB velocity field which will be used to impart the SDF" " velocity into the solver." " The potential flow created is divergence free by design and has the same velocity on the" " boundary as the background velocity.\n\n" " The simplest workflow for multiple moving objects is to animate the polygonal geometry and" " then create SDFs and velocity VDBs by using the VDB from Polygons node." " The output can be fed directly into the first input of the Potential Flow SOP." " The second input of the SOP allows a Mask VDB input for modifiying the solution domain" " created by the Potential Flow SOP." " The created domain can either be restricted to the active voxels of the Mask VDB, or" " restricted from creating a domain inside the active voxels." " These modes are defined by the respective __Intersection__ or __Difference__ modes on the" " parameter toggle" ); } // Enable or disable parameters in the UI. bool SOP_OpenVDB_Potential_Flow::updateParmsFlags() { bool changed = false; const bool worldUnits = bool(evalInt("useworldspace", 0, 0)); changed |= enableParm("dilationvoxels", !worldUnits); changed |= setVisibleState("dilationvoxels", !worldUnits); changed |= enableParm("dilation", worldUnits); changed |= setVisibleState("dilation", worldUnits); const bool hasMask = (2 == nInputs()); changed |= enableParm("maskvdbname", hasMask); changed |= enableParm("masktype", hasMask); changed |= enableParm("iterations", bool(evalInt("useiterations", 0, 0))); changed |= enableParm("tolerance", bool(evalInt("usetolerance", 0, 0))); changed |= enableParm("backgroundvelocity", bool(evalInt("usebackgroundvelocity", 0, 0))); changed |= enableParm("applybackgroundvelocity", bool(evalInt("usebackgroundvelocity", 0, 0))); return changed; } OP_Node* SOP_OpenVDB_Potential_Flow::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Potential_Flow(net, name, op); } SOP_OpenVDB_Potential_Flow::SOP_OpenVDB_Potential_Flow( OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } namespace { struct MaskOp { template<typename GridType> void operator()(const GridType& grid) { mMaskGrid = openvdb::tools::interiorMask(grid); } openvdb::BoolGrid::Ptr mMaskGrid; }; struct MaskToLevelSetOp { template<typename GridType> void operator()(const GridType& grid) { mSdfGrid = openvdb::tools::topologyToLevelSet(grid); } openvdb::FloatGrid::Ptr mSdfGrid; }; template <typename VelGridT, typename MaskGridT> struct PotentialFlowOp { using GridT = typename VelGridT::template ValueConverter<typename VelGridT::ValueType::value_type>::Type; using VecT = typename VelGridT::ValueType; using ScalarT = typename GridT::ValueType; PotentialFlowOp(const openvdb::FloatGrid& solidBoundary, const MaskGridT& domain, const typename VelGridT::ConstPtr& boundaryVelocity, const VecT& backgroundVelocity, const bool applyBackgroundVelocity) : mSolidBoundary(solidBoundary) , mDomain(domain) , mBoundaryVelocity(boundaryVelocity) , mBackgroundVelocity(backgroundVelocity) , mApplyBackgroundVelocity(applyBackgroundVelocity) { } openvdb::math::pcg::State process(int iterations, float absoluteError) { using namespace openvdb; typename VelGridT::Ptr neumann = tools::createPotentialFlowNeumannVelocities( mSolidBoundary, mDomain, mBoundaryVelocity, mBackgroundVelocity); // create solver state math::pcg::State state = math::pcg::terminationDefaults<ScalarT>(); state.iterations = iterations; state.absoluteError = absoluteError; state.relativeError = 0.0; potential = tools::computeScalarPotential(mDomain, *neumann, state); if (mApplyBackgroundVelocity) { flowvel = tools::computePotentialFlow(*potential, *neumann, mBackgroundVelocity); } else { flowvel = tools::computePotentialFlow(*potential, *neumann); } return state; } typename VelGridT::Ptr flowvel; typename GridT::Ptr potential; private: const openvdb::FloatGrid& mSolidBoundary; const MaskGridT& mDomain; const typename VelGridT::ConstPtr mBoundaryVelocity; const VecT mBackgroundVelocity; const bool mApplyBackgroundVelocity; }; // struct PotentialFlowOp } // unnamed namespace OP_ERROR SOP_OpenVDB_Potential_Flow::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Computing Potential Flow"); const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); const std::string velocity = evalStdString("velocity", time); const GA_PrimitiveGroup* velocityGroup = matchGroup(*gdp, velocity); // SOP currently only supports float level sets using SdfGridT = openvdb::FloatGrid; using MaskGridT = openvdb::BoolGrid; typename SdfGridT::ConstPtr grid; const GU_PrimVDB * velGridPrim(nullptr); hvdb::VdbPrimCIterator vdbIt(gdp, group); // find the first level set for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; const openvdb::GridClass gridClass = vdbIt->getGrid().getGridClass(); if (!grid && vdbIt->getStorageType() == UT_VDB_FLOAT && gridClass == openvdb::GRID_LEVEL_SET) { grid = openvdb::gridConstPtrCast<SdfGridT>(vdbIt->getGridPtr()); } } // find a vec3 grid for velocity if (!velocity.empty()) { hvdb::VdbPrimCIterator velocityIt(gdp, velocityGroup); for (; velocityIt; ++velocityIt) { if (!velGridPrim && (velocityIt->getStorageType() == UT_VDB_VEC3F || velocityIt->getStorageType() == UT_VDB_VEC3D)) { velGridPrim = *velocityIt; } } } else { hvdb::VdbPrimCIterator velocityIt(gdp); for (; velocityIt; ++velocityIt) { if (!velGridPrim && (velocityIt->getStorageType() == UT_VDB_VEC3F || velocityIt->getStorageType() == UT_VDB_VEC3D)) { velGridPrim = *velocityIt; } } } // if no level set found, use the topology of the first VDB and turn it into a level set if (!grid) { for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; MaskToLevelSetOp op; if (hvdb::GEOvdbApply<hvdb::AllGridTypes>(**vdbIt, op)) { grid = op.mSdfGrid; } } } if (grid) { // Check for mask input const GU_Detail* maskGeo = inputGeo(1); MaskGridT::Ptr mask; if (maskGeo) { const GA_PrimitiveGroup* maskGroup = parsePrimitiveGroups( evalStdString("maskvdbname", time).c_str(), GroupCreator(maskGeo)); hvdb::VdbPrimCIterator maskIt(maskGeo, maskGroup); if (maskIt) { MaskOp op; if (hvdb::GEOvdbApply<hvdb::AllGridTypes>(**maskIt, op)) { mask = op.mMaskGrid; } else { addWarning(SOP_MESSAGE, "Cannot convert VDB type to mask."); } if (mask && mask->transform() != grid->transform()) { MaskGridT::Ptr resampledMask = mask->copy(); resampledMask->setTransform(grid->transform().copy()); // resample the mask to match the boundary level set openvdb::tools::resampleToMatch<openvdb::tools::PointSampler>( *mask, *resampledMask); } } } // dilate mask topology by world-space distance const bool useWorldSpace = static_cast<int>(evalInt("useworldspace", 0, time)) == 1; int dilation; if (useWorldSpace) { const double dilationDistance(static_cast<float>(evalFloat("dilation", 0, time))); dilation = std::max(1, static_cast<int>(dilationDistance / grid->voxelSize()[0])); } else { dilation = static_cast<int>(evalInt("dilationvoxels", 0, time)); } auto domain = openvdb::tools::createPotentialFlowMask(*grid, dilation); if (mask) { if (static_cast<int>(evalInt("masktype", 0, time)) == /*intersection*/0) { domain->treePtr()->topologyIntersection(mask->tree()); } else { domain->treePtr()->topologyDifference(mask->tree()); } } const int iterations = (static_cast<int>(evalInt("useiterations", 0, time)) == 1 ? static_cast<int>(evalInt("iterations", 0, time)) : DEFAULT_MAX_ITERATIONS); const float absoluteError = static_cast<float>( static_cast<int>(evalInt("usetolerance", 0, time)) == 1 ? evalFloat("tolerance", 0, time) : DEFAULT_MAX_ERROR); openvdb::Vec3f backgroundVelocity(0); bool applyBackground(false); const bool useBackgroundVelocity = static_cast<int>(evalInt("usebackgroundvelocity", 0, time)) == 1; if (useBackgroundVelocity) { backgroundVelocity = openvdb::Vec3f( static_cast<float>(evalFloat("backgroundvelocity", 0, time)), static_cast<float>(evalFloat("backgroundvelocity", 1, time)), static_cast<float>(evalFloat("backgroundvelocity", 2, time))); applyBackground = static_cast<int>( evalInt("applybackgroundvelocity", 0, time)) == 1; } const bool outputPotential = static_cast<int>( evalInt("outputpotential", 0, time)) == 1; openvdb::math::pcg::State solverState; if (velGridPrim && velGridPrim->getStorageType() == UT_VDB_VEC3D) { openvdb::Vec3d backgroundVelocityD( backgroundVelocity[0], backgroundVelocity[1], backgroundVelocity[2]); openvdb::Vec3dGrid::ConstPtr velGrid = openvdb::gridConstPtrCast<openvdb::Vec3dGrid>(velGridPrim->getGridPtr()); PotentialFlowOp<openvdb::Vec3dGrid, openvdb::MaskGrid> potentialFlowOp( *grid, *domain, velGrid, backgroundVelocityD, applyBackground); solverState = potentialFlowOp.process(iterations, absoluteError); hvdb::createVdbPrimitive(*gdp, potentialFlowOp.flowvel, "flowvel"); if (outputPotential) { hvdb::createVdbPrimitive(*gdp, potentialFlowOp.potential, "potential"); } } else { openvdb::Vec3fGrid::ConstPtr velGrid; if (velGridPrim && velGridPrim->getStorageType() == UT_VDB_VEC3F) { velGrid = openvdb::gridConstPtrCast<openvdb::Vec3fGrid>( velGridPrim->getGridPtr()); } PotentialFlowOp<openvdb::Vec3fGrid, openvdb::MaskGrid> potentialFlowOp( *grid, *domain, velGrid, backgroundVelocity, applyBackground); solverState = potentialFlowOp.process(iterations, absoluteError); hvdb::createVdbPrimitive(*gdp, potentialFlowOp.flowvel, "flowvel"); if (outputPotential) { hvdb::createVdbPrimitive(*gdp, potentialFlowOp.potential, "potential"); } } if (!solverState.success) { std::ostringstream errStrm; errStrm << "potential flow failed to converge " << " with error " << solverState.absoluteError; addWarning(SOP_MESSAGE, errStrm.str().c_str()); } else { std::ostringstream infoStrm; infoStrm << "solver converged in " << solverState.iterations << " iteration" << (solverState.iterations == 1 ? "" : "s") << " with error " << solverState.absoluteError; const std::string info = infoStrm.str(); if (!info.empty()) { addMessage(SOP_MESSAGE, info.c_str()); } } } if (!grid && !boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "No valid VDB primitives found."); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
23,391
C++
39.611111
99
0.619982
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GU_PrimVDB.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) Side Effects Software Inc. * * Produced by: * Side Effects Software Inc * 477 Richmond Street West * Toronto, Ontario * Canada M5V 3E7 * 416-504-9876 * * NAME: GU_PrimVDB.h ( GU Library, C++) * * COMMENTS: Custom VDB primitive. */ #include <UT/UT_Version.h> // Using the native OpenVDB Primitive shipped with Houdini is strongly recommended, // as there is no guarantee that this code will be kept in sync with Houdini. // However, for debugging it can be useful, so supply -DSESI_OPENVDB_PRIM to // the compiler to build this custom primitive. #if !defined(SESI_OPENVDB) && !defined(SESI_OPENVDB_PRIM) #include <GU/GU_PrimVDB.h> namespace openvdb_houdini { using ::GU_PrimVDB; } #else // SESI_OPENVDB || SESI_OPENVDB_PRIM #ifndef __HDK_GU_PrimVDB__ #define __HDK_GU_PrimVDB__ #include <GA/GA_PrimitiveDefinition.h> #include "GEO_PrimVDB.h" #include <GU/GU_Detail.h> #include <UT/UT_Matrix4.h> #include <UT/UT_VoxelArray.h> #include <openvdb/Platform.h> #include <stddef.h> class GA_Attribute; class GEO_PrimVolume; class UT_MemoryCounter; class GEO_ConvertParms; typedef GEO_ConvertParms GU_ConvertParms; class OPENVDB_HOUDINI_API GU_PrimVDB : public GEO_PrimVDB { protected: /// NOTE: Primitives should not be deleted directly. They are managed /// by the GA_PrimitiveList and the stash. ~GU_PrimVDB() override {} public: /// NOTE: This constructor should only be called via GU_PrimitiveFactory. GU_PrimVDB(GU_Detail *gdp, GA_Offset offset=GA_INVALID_OFFSET) : GEO_PrimVDB(gdp, offset) {} /// Report approximate memory usage. int64 getMemoryUsage() const override; /// Count memory usage using a UT_MemoryCounter in order to count /// shared memory correctly. /// NOTE: This should always include sizeof(*this). void countMemory(UT_MemoryCounter &counter) const override; #ifndef SESI_OPENVDB /// Allows you to find out what this primitive type was named. static GA_PrimitiveTypeId theTypeId() { return theDefinition->getId(); } /// Must be invoked during the factory callback to add us to the /// list of primitives static void registerMyself(GA_PrimitiveFactory *factory); #endif const GA_PrimitiveDefinition &getTypeDef() const override { UT_ASSERT(theDefinition); return *theDefinition; } // Conversion Methods GEO_Primitive *convert(GU_ConvertParms &parms, GA_PointGroup *usedpts = 0) override; GEO_Primitive *convertNew(GU_ConvertParms &parms) override; /// Convert all GEO_PrimVolume primitives in geometry to /// GEO_PrimVDB, preserving prim/vertex/point attributes (and prim/point /// groups if requested). static void convertVolumesToVDBs( GU_Detail &dst_geo, const GU_Detail &src_geo, GU_ConvertParms &parms, bool flood_sdf, bool prune, fpreal tolerance, bool keep_original, bool activate_inside = true); /// Convert all GEO_PrimVDB primitives in geometry to parms.toType, /// preserving prim/vertex/point attributes (and prim/point groups if /// requested). /// @{ static void convertVDBs( GU_Detail &dst_geo, const GU_Detail &src_geo, GU_ConvertParms &parms, fpreal adaptivity, bool keep_original); static void convertVDBs( GU_Detail &dst_geo, const GU_Detail &src_geo, GU_ConvertParms &parms, fpreal adaptivity, bool keep_original, bool split_disjoint_volumes); /// @} // NOTE: For static member functions please call in the following // manner. <ptrvalue> = GU_PrimVDB::<functname> // i.e. partptr = GU_PrimVDB::build(params...); // Optional Build Method static GU_PrimVDB * build(GU_Detail *gdp, bool append_points = true); /// Store a VDB grid in a new VDB primitive and add the primitive /// to a geometry detail. /// @param gdp the detail to which to add the new primitive /// @param grid a grid to be associated with the new primitive /// @param src if non-null, copy attributes and groups from this primitive /// @param name if non-null, set the new primitive's @c name attribute to /// this string; otherwise, if @a src is non-null, use its name static SYS_FORCE_INLINE GU_PrimVDB* buildFromGrid(GU_Detail& gdp, openvdb::GridBase::Ptr grid, const GEO_PrimVDB* src = NULL, const char* name = NULL) { return GU_PrimVDB::buildFromGridAdapter(gdp, &grid, src, name); } /// Create new VDB primitive from the given native volume primitive static GU_PrimVDB * buildFromPrimVolume( GU_Detail &geo, const GEO_PrimVolume &vol, const char *name, const bool flood_sdf = false, const bool prune = false, const float tolerance = 0.0, const bool activate_inside_sdf = true); /// A fast method for converting a primitive volume to a polysoup via VDB /// into the given gdp. It will _not_ copy attributes because this is a /// special case used for display purposes only. static void convertPrimVolumeToPolySoup( GU_Detail &dst_geo, const GEO_PrimVolume &src_vol); void normal(NormalComp &output) const override; /// @brief Transfer any metadata associated with this primitive's /// VDB grid to primitive attributes. void syncAttrsFromMetadata(); /// @brief Transfer any metadata associated with a VDB grid /// to primitive attributes on a VDB primitive. /// @param prim the primitive to be populated with attributes /// @param grid the grid whose metadata should be transferred /// @param gdp the detail to which to transfer attributes static SYS_FORCE_INLINE void createGridAttrsFromMetadata( const GEO_PrimVDB& prim, const openvdb::GridBase& grid, GEO_Detail& gdp) { GU_PrimVDB::createGridAttrsFromMetadataAdapter(prim, &grid, gdp); } /// @brief Transfer any metadata associated with the given MetaMap /// to attributes on the given element specified by owner. /// @param owner the type of element /// @param element the offset of the element /// @param meta_map the metadata that should be transferred /// @param gdp the detail to which to transfer attributes static SYS_FORCE_INLINE void createAttrsFromMetadata( GA_AttributeOwner owner, GA_Offset element, const openvdb::MetaMap& meta_map, GEO_Detail& gdp) { GU_PrimVDB::createAttrsFromMetadataAdapter(owner, element, &meta_map, gdp); } /// @brief Transfer a VDB primitive's attributes to a VDB grid as metadata. /// @param grid the grid to be populated with metadata /// @param prim the primitive whose attributes should be transferred /// @param gdp the detail from which to retrieve primitive attributes static SYS_FORCE_INLINE void createMetadataFromGridAttrs( openvdb::GridBase& grid, const GEO_PrimVDB& prim, const GEO_Detail& gdp) { GU_PrimVDB::createMetadataFromGridAttrsAdapter(&grid, prim, gdp); } /// @brief Transfer attributes to VDB metadata. /// @param meta_map the output metadata /// @param owner the type of element /// @param element the offset of the element /// @param geo the detail from which to retrieve primitive attributes static SYS_FORCE_INLINE void createMetadataFromAttrs( openvdb::MetaMap& meta_map, GA_AttributeOwner owner, GA_Offset element, const GEO_Detail& geo) { GU_PrimVDB::createMetadataFromAttrsAdapter(&meta_map, owner, element, geo); } private: // METHODS /// Add a border of the given radius by evaluating from the given volume. /// It assumes that the VDB is a float grid and that the voxel array has /// the same index space, so this can really only be safely called after /// buildFromPrimVolume(). This is used to ensure that non-constant borders /// can be converted at the expense of some extra memory. void expandBorderFromPrimVolume( const GEO_PrimVolume &vol, int border_radius); GEO_Primitive * convertToNewPrim( GEO_Detail &dst_geo, GU_ConvertParms &parms, fpreal adaptivity, bool split_disjoint_volumes, bool &success) const; GEO_Primitive * convertToPrimVolume( GEO_Detail &dst_geo, GU_ConvertParms &parms, bool split_disjoint_volumes) const; GEO_Primitive * convertToPoly( GEO_Detail &dst_geo, GU_ConvertParms &parms, fpreal adaptivity, bool buildpolysoup, bool &success) const; static GU_PrimVDB* buildFromGridAdapter( GU_Detail& gdp, void* grid, const GEO_PrimVDB*, const char* name); static void createGridAttrsFromMetadataAdapter( const GEO_PrimVDB& prim, const void* grid, GEO_Detail& gdp); static void createMetadataFromGridAttrsAdapter( void* grid, const GEO_PrimVDB&, const GEO_Detail&); static void createAttrsFromMetadataAdapter( GA_AttributeOwner owner, GA_Offset element, const void* meta_map_ptr, GEO_Detail& geo); static void createMetadataFromAttrsAdapter( void* meta_map_ptr, GA_AttributeOwner owner, GA_Offset element, const GEO_Detail& geo); private: // DATA static GA_PrimitiveDefinition *theDefinition; friend class GU_PrimitiveFactory; SYS_DEPRECATED_PUSH_DISABLE() }; SYS_DEPRECATED_POP_DISABLE() #ifndef SESI_OPENVDB namespace openvdb_houdini { using ::GU_PrimVDB; } // namespace openvdb_houdini #endif #endif // __HDK_GU_PrimVDB__ #endif // SESI_OPENVDB || SESI_OPENVDB_PRIM
11,527
C
36.921053
83
0.578555
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/Utils.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file openvdb_houdini/Utils.h /// @author FX R&D Simulation team /// @brief Utility classes and functions for OpenVDB plugins #ifndef OPENVDB_HOUDINI_UTILS_HAS_BEEN_INCLUDED #define OPENVDB_HOUDINI_UTILS_HAS_BEEN_INCLUDED #include "GU_PrimVDB.h" #include <OP/OP_Node.h> // for OP_OpTypeId #include <UT/UT_SharedPtr.h> #include <UT/UT_Interrupt.h> #include <openvdb/openvdb.h> #include <functional> #include <type_traits> #ifdef SESI_OPENVDB #ifdef OPENVDB_HOUDINI_API #undef OPENVDB_HOUDINI_API #define OPENVDB_HOUDINI_API #endif #endif class GEO_PrimVDB; class GU_Detail; class UT_String; namespace openvdb_houdini { using Grid = openvdb::GridBase; using GridPtr = openvdb::GridBase::Ptr; using GridCPtr = openvdb::GridBase::ConstPtr; using GridRef = openvdb::GridBase&; using GridCRef = const openvdb::GridBase&; /// @brief Iterator over const VDB primitives on a geometry detail /// /// @details At least until @c GEO_PrimVDB becomes a built-in primitive type /// (that can be used as the mask for a @c GA_GBPrimitiveIterator), use this /// iterator to iterate over all VDB grids belonging to a gdp and, optionally, /// belonging to a particular group. class OPENVDB_HOUDINI_API VdbPrimCIterator { public: using FilterFunc = std::function<bool (const GU_PrimVDB&)>; /// @param gdp /// the geometry detail over which to iterate /// @param group /// a group in the detail over which to iterate (if @c nullptr, /// iterate over all VDB primitives) /// @param filter /// an optional function or functor that takes a const reference /// to a GU_PrimVDB and returns a boolean specifying whether /// that primitive should be visited (@c true) or not (@c false) explicit VdbPrimCIterator(const GEO_Detail* gdp, const GA_PrimitiveGroup* group = nullptr, FilterFunc filter = FilterFunc()); VdbPrimCIterator(const VdbPrimCIterator&); VdbPrimCIterator& operator=(const VdbPrimCIterator&); //@{ /// Advance to the next VDB primitive. void advance(); VdbPrimCIterator& operator++() { advance(); return *this; } //@} //@{ /// Return a pointer to the current VDB primitive (@c nullptr if at end). const GU_PrimVDB* getPrimitive() const; const GU_PrimVDB* operator*() const { return getPrimitive(); } const GU_PrimVDB* operator->() const { return getPrimitive(); } //@} //@{ GA_Offset getOffset() const { return getPrimitive()->getMapOffset(); } GA_Index getIndex() const { return getPrimitive()->getMapIndex(); } //@} /// Return @c false if there are no more VDB primitives. operator bool() const { return getPrimitive() != nullptr; } /// @brief Return the value of the current VDB primitive's @c name attribute. /// @param defaultName /// if the current primitive has no @c name attribute /// or its name is empty, return this name instead UT_String getPrimitiveName(const UT_String& defaultName = "") const; /// @brief Return the value of the current VDB primitive's @c name attribute /// or, if the name is empty, the primitive's index (as a UT_String). UT_String getPrimitiveNameOrIndex() const; /// @brief Return a string of the form "N (NAME)", where @e N is /// the current VDB primitive's index and @e NAME is the value /// of the primitive's @c name attribute. /// @param keepEmptyName if the current primitive has no @c name attribute /// or its name is empty, then if this flag is @c true, return a string /// "N ()", otherwise return a string "N" omitting the empty name UT_String getPrimitiveIndexAndName(bool keepEmptyName = true) const; protected: /// Allow primitives to be deleted during iteration. VdbPrimCIterator(const GEO_Detail*, GA_Range::safedeletions, const GA_PrimitiveGroup* = nullptr, FilterFunc = FilterFunc()); UT_SharedPtr<GA_GBPrimitiveIterator> mIter; FilterFunc mFilter; }; // class VdbPrimCIterator /// @brief Iterator over non-const VDB primitives on a geometry detail /// /// @details At least until @c GEO_PrimVDB becomes a built-in primitive type /// (that can be used as the mask for a @c GA_GBPrimitiveIterator), use this /// iterator to iterate over all VDB grids belonging to a gdp and, optionally, /// belonging to a particular group. class OPENVDB_HOUDINI_API VdbPrimIterator: public VdbPrimCIterator { public: /// @param gdp /// the geometry detail over which to iterate /// @param group /// a group in the detail over which to iterate (if @c nullptr, /// iterate over all VDB primitives) /// @param filter /// an optional function or functor that takes a @c const reference /// to a GU_PrimVDB and returns a boolean specifying whether /// that primitive should be visited (@c true) or not (@c false) explicit VdbPrimIterator(GEO_Detail* gdp, const GA_PrimitiveGroup* group = nullptr, FilterFunc filter = FilterFunc()): VdbPrimCIterator(gdp, group, filter) {} /// @brief Allow primitives to be deleted during iteration. /// @param gdp /// the geometry detail over which to iterate /// @param group /// a group in the detail over which to iterate (if @c nullptr, /// iterate over all VDB primitives) /// @param filter /// an optional function or functor that takes a @c const reference /// to a GU_PrimVDB and returns a boolean specifying whether /// that primitive should be visited (@c true) or not (@c false) VdbPrimIterator(GEO_Detail* gdp, GA_Range::safedeletions, const GA_PrimitiveGroup* group = nullptr, FilterFunc filter = FilterFunc()): VdbPrimCIterator(gdp, GA_Range::safedeletions(), group, filter) {} VdbPrimIterator(const VdbPrimIterator&); VdbPrimIterator& operator=(const VdbPrimIterator&); /// Advance to the next VDB primitive. VdbPrimIterator& operator++() { advance(); return *this; } //@{ /// Return a pointer to the current VDB primitive (@c nullptr if at end). GU_PrimVDB* getPrimitive() const { return const_cast<GU_PrimVDB*>(VdbPrimCIterator::getPrimitive()); } GU_PrimVDB* operator*() const { return getPrimitive(); } GU_PrimVDB* operator->() const { return getPrimitive(); } //@} }; // class VdbPrimIterator //////////////////////////////////////// /// @brief Wrapper class that adapts a Houdini @c UT_Interrupt object /// for use with OpenVDB library routines /// @sa openvdb/util/NullInterrupter.h class Interrupter { public: explicit Interrupter(const char* title = nullptr): mUTI{UTgetInterrupt()}, mRunning{false}, mTitle{title ? title : ""} {} ~Interrupter() { if (mRunning) this->end(); } Interrupter(const Interrupter&) = default; Interrupter& operator=(const Interrupter&) = default; /// @brief Signal the start of an interruptible operation. /// @param name an optional descriptive name for the operation void start(const char* name = nullptr) { if (!mRunning) { mRunning = true; mUTI->opStart(name ? name : mTitle.c_str()); } } /// Signal the end of an interruptible operation. void end() { if (mRunning) { mUTI->opEnd(); mRunning = false; } } /// @brief Check if an interruptible operation should be aborted. /// @param percent an optional (when >= 0) percentage indicating /// the fraction of the operation that has been completed bool wasInterrupted(int percent=-1) { return mUTI->opInterrupt(percent); } private: UT_Interrupt* mUTI; bool mRunning; std::string mTitle; }; //////////////////////////////////////// // Utility methods /// @brief Store a VDB grid in a new VDB primitive and add the primitive /// to a geometry detail. /// @return the newly-created VDB primitive. /// @param gdp the detail to which to add the primitive /// @param grid the VDB grid to be added /// @param name if non-null, set the new primitive's @c name attribute to this string /// @note This operation clears the input grid's metadata. OPENVDB_HOUDINI_API GU_PrimVDB* createVdbPrimitive(GU_Detail& gdp, GridPtr grid, const char* name = nullptr); /// @brief Replace an existing VDB primitive with a new primitive that contains /// the given grid. /// @return the newly-created VDB primitive. /// @param gdp the detail to which to add the primitive /// @param grid the VDB grid to be added /// @param src replace this primitive with the newly-created primitive /// @param copyAttrs if @c true, copy attributes and group membership from the @a src primitive /// @param name if non-null, set the new primitive's @c name attribute to this string; /// otherwise, if @a copyAttrs is @c true, copy the name from @a src /// @note This operation clears the input grid's metadata. OPENVDB_HOUDINI_API GU_PrimVDB* replaceVdbPrimitive(GU_Detail& gdp, GridPtr grid, GEO_PrimVDB& src, const bool copyAttrs = true, const char* name = nullptr); /// @brief Return in @a corners the corners of the given grid's active voxel bounding box. /// @return @c false if the grid has no active voxels. OPENVDB_HOUDINI_API bool evalGridBBox(GridCRef grid, UT_Vector3 corners[8], bool expandHalfVoxel = false); /// Construct an index-space CoordBBox from a UT_BoundingBox. OPENVDB_HOUDINI_API openvdb::CoordBBox makeCoordBBox(const UT_BoundingBox&, const openvdb::math::Transform&); /// @{ /// @brief Start forwarding OpenVDB log messages to the Houdini error manager /// for all operators of the given type. /// @details Typically, log forwarding is enabled for specific operator types /// during initialization of the openvdb_houdini library, and there's no need /// for client code to call this function. /// @details This function has no effect unless OpenVDB was built with /// <A HREF="http://log4cplus.sourceforge.net/">log4cplus</A>. /// @note OpenVDB messages are typically logged to the console as well. /// This function has no effect on console logging. /// @sa stopLogForwarding(), isLogForwarding() OPENVDB_HOUDINI_API void startLogForwarding(OP_OpTypeId); /// @brief Stop forwarding OpenVDB log messages to the Houdini error manager /// for all operators of the given type. /// @details Typically, log forwarding is enabled for specific operator types /// during initialization of the openvdb_houdini library, and there's no need /// for client code to disable it. /// @details This function has no effect unless OpenVDB was built with /// <A HREF="http://log4cplus.sourceforge.net/">log4cplus</A>. /// @note OpenVDB messages are typically logged to the console as well. /// This function has no effect on console logging. /// @sa startLogForwarding(), isLogForwarding() OPENVDB_HOUDINI_API void stopLogForwarding(OP_OpTypeId); /// @brief Return @c true if OpenVDB messages logged by operators /// of the given type are forwarded to the Houdini error manager. /// @sa startLogForwarding(), stopLogForwarding() OPENVDB_HOUDINI_API bool isLogForwarding(OP_OpTypeId); /// @} //////////////////////////////////////// // Grid type lists, for use with GEO_PrimVDB::apply(), GEOvdbApply(), // or openvdb::GridBase::apply() using ScalarGridTypes = openvdb::TypeList< openvdb::BoolGrid, openvdb::FloatGrid, openvdb::DoubleGrid, openvdb::Int32Grid, openvdb::Int64Grid>; using NumericGridTypes = openvdb::TypeList< openvdb::FloatGrid, openvdb::DoubleGrid, openvdb::Int32Grid, openvdb::Int64Grid>; using RealGridTypes = openvdb::TypeList< openvdb::FloatGrid, openvdb::DoubleGrid>; using Vec3GridTypes = openvdb::TypeList< openvdb::Vec3SGrid, openvdb::Vec3DGrid, openvdb::Vec3IGrid>; using PointGridTypes = openvdb::TypeList< openvdb::points::PointDataGrid>; using VolumeGridTypes = ScalarGridTypes::Append<Vec3GridTypes>; using AllGridTypes = VolumeGridTypes::Append<PointGridTypes>; /// @brief If the given primitive's grid resolves to one of the listed grid types, /// invoke the functor @a op on the resolved grid. /// @return @c true if the functor was invoked, @c false otherwise template<typename GridTypeListT, typename OpT> inline bool GEOvdbApply(const GEO_PrimVDB& vdb, OpT& op) { if (auto gridPtr = vdb.getConstGridPtr()) { return gridPtr->apply<GridTypeListT>(op); } return false; } /// @brief If the given primitive's grid resolves to one of the listed grid types, /// invoke the functor @a op on the resolved grid. /// @return @c true if the functor was invoked, @c false otherwise /// @details If @a makeUnique is true, deep copy the grid's tree before /// invoking the functor if the tree is shared with other grids. template<typename GridTypeListT, typename OpT> inline bool GEOvdbApply(GEO_PrimVDB& vdb, OpT& op, bool makeUnique = true) { if (vdb.hasGrid()) { auto gridPtr = vdb.getGridPtr(); if (makeUnique) { auto treePtr = gridPtr->baseTreePtr(); if (treePtr.use_count() > 2) { // grid + treePtr = 2 // If the grid resolves to one of the listed types and its tree // is shared with other grids, replace the tree with a deep copy. gridPtr->apply<GridTypeListT>( [](Grid& baseGrid) { baseGrid.setTree(baseGrid.constBaseTree().copy()); }); } } return gridPtr->apply<GridTypeListT>(op); } return false; } } // namespace openvdb_houdini #endif // OPENVDB_HOUDINI_UTILS_HAS_BEEN_INCLUDED
13,654
C
37.142458
96
0.686466
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/UT_VDBTools.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file UT_VDBTools.h /// @author FX R&D Simulation team /// @brief Less commonly-used utility classes and functions for OpenVDB plugins #ifndef OPENVDB_HOUDINI_UT_VDBTOOLS_HAS_BEEN_INCLUDED #define OPENVDB_HOUDINI_UT_VDBTOOLS_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/tools/GridTransformer.h> #include "Utils.h" // for GridPtr namespace openvdb_houdini { /// @brief GridTransformOp is a functor class for use with GridBase::apply() /// that samples an input grid into an output grid of the same type through /// a given affine transform. /// @details The output grid's transform is unchanged by this operation. /// @sa GridResampleOp, GridResampleToMatchOp /// @par Example: /// @code /// const Grid& inGrid = ...; // generic reference to a grid of any type /// /// // Create a new, empty output grid of the same (so far, unknown) type /// // as the input grid and with the same transform and metadata. /// GridPtr outGrid = inGrid.copyGridWithNewTree(); /// /// // Initialize a GridTransformer with the parameters of an affine transform. /// openvdb::tools::GridTransformer xform(pivot, scale, rotate, ...); /// /// // Resolve the input grid's type and resample it into the output grid, /// // using a second-order sampling kernel. /// GridTransformOp<openvdb::tools::QuadraticSampler> op(outGrid, xform); /// inGrid.apply<openvdb_houdini::ScalarGridTypes>(op); /// @endcode template<typename Sampler> class GridTransformOp { public: /// @param outGrid a generic pointer to an output grid of the same type /// as the grid to be resampled /// @param t a @c GridTransformer that defines an affine transform /// @note GridTransformOp makes an internal copy of the @c GridTransformer /// and supplies the copy with a default Interrupter that replaces any /// existing interrupter. GridTransformOp(GridPtr& outGrid, const openvdb::tools::GridTransformer& t): mOutGrid(outGrid), mTransformer(t) {} template<typename GridType> void operator()(const GridType& inGrid) { typename GridType::Ptr outGrid = openvdb::gridPtrCast<GridType>(mOutGrid); Interrupter interrupter; mTransformer.setInterrupter(interrupter); mTransformer.transformGrid<Sampler, GridType>(inGrid, *outGrid); } private: GridPtr mOutGrid; openvdb::tools::GridTransformer mTransformer; }; //////////////////////////////////////// /// @brief GridResampleOp is a functor class for use with UTvdbProcessTypedGrid() /// that samples an input grid into an output grid of the same type through /// a given transform. /// @details The output grid's transform is unchanged by this operation. /// @sa GridTransformOp, GridResampleToMatchOp /// @par Example: /// @code /// namespace { /// // Class that implements GridResampler's Transformer interface /// struct MyXform /// { /// bool isAffine() const { ... } /// openvdb::Vec3d transform(const openvdb::Vec3d&) const { ... } /// openvdb::Vec3d invTransform(const openvdb::Vec3d&) const { ... } /// }; /// } /// /// const Grid& inGrid = ...; // generic reference to a grid of any type /// /// // Create a new, empty output grid of the same (so far, unknown) type /// // as the input grid and with the same transform and metadata. /// GridPtr outGrid = inGrid.copyGridWithNewTree(); /// /// // Resolve the input grid's type and resample it into the output grid, /// // using a trilinear sampling kernel. /// GridResampleOp<openvdb::tools::BoxSampler, MyXform> op(outGrid, MyXform()); /// inGrid.apply<openvdb_houdini::ScalarGridTypes>(op); /// @endcode template<typename Sampler, typename TransformerType> class GridResampleOp { public: /// @param outGrid a generic pointer to an output grid of the same type /// as the grid to be resampled /// @param t an object that implements <tt>GridResampler</tt>'s /// Transformer interface /// @note GridResampleOp makes an internal copy of @a t. GridResampleOp(GridPtr& outGrid, const TransformerType& t): mOutGrid(outGrid), mTransformer(t) {} template<typename GridType> void operator()(const GridType& inGrid) { typename GridType::Ptr outGrid = openvdb::gridPtrCast<GridType>(mOutGrid); openvdb::tools::GridResampler resampler; Interrupter interrupter; resampler.setInterrupter(interrupter); resampler.transformGrid<Sampler>(mTransformer, inGrid, *outGrid); } private: GridPtr mOutGrid; const TransformerType mTransformer; }; //////////////////////////////////////// /// @brief GridResampleToMatchOp is a functor class for use with /// GridBase::apply() that samples an input grid into an output grid /// of the same type such that, after resampling, the input and output grids /// coincide, but the output grid's transform is unchanged. /// @sa GridTransformOp, GridResampleOp /// @par Example: /// @code /// const Grid& inGrid = ...; // generic reference to a grid of any type /// /// // Create a new, empty output grid of the same (so far, unknown) type as /// // the input grid and with the same metadata, but with a different transform. /// GridPtr outGrid = inGrid.copyGridWithNewTree(); /// outGrid->setTransform(myTransform); /// /// // Resolve the input grid's type and resample it into the output grid, /// // using a second-order sampling kernel. /// GridResampleToMatchOp<openvdb::tools::QuadraticSampler> op(outGrid); /// inGrid.apply<openvdb_houdini::ScalarGridTypes>(op); /// @endcode template<typename Sampler> class GridResampleToMatchOp { public: GridResampleToMatchOp(GridPtr outGrid): mOutGrid(outGrid) {} template<typename GridType> void operator()(const GridType& inGrid) { typename GridType::Ptr outGrid = openvdb::gridPtrCast<GridType>(mOutGrid); Interrupter interrupter; openvdb::tools::resampleToMatch<Sampler>(inGrid, *outGrid, interrupter); } private: GridPtr mOutGrid; }; } // namespace openvdb_houdini #endif // OPENVDB_HOUDINI_UT_VDBTOOLS_HAS_BEEN_INCLUDED
6,174
C
34.693641
82
0.693878
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Resample.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// /// @file SOP_OpenVDB_Resample.cc // /// @author FX R&D OpenVDB team /// /// @class SOP_OpenVDB_Resample /// This node resamples voxels from input VDB grids into new grids /// (of the same type) through a sampling transform that is either /// specified by user-supplied translation, rotation, scale and pivot /// parameters or taken from an optional reference grid. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/UT_VDBTools.h> // for GridTransformOp, et al. #include <openvdb_houdini/UT_VDBUtils.h> // for UTvdbGridCast() #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/openvdb.h> #include <openvdb/tools/GridTransformer.h> #include <openvdb/tools/LevelSetRebuild.h> #include <openvdb/tools/VectorTransformer.h> // for transformVectors() #include <UT/UT_Interrupt.h> #include <functional> #include <stdexcept> #include <string> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { enum { MODE_PARMS = 0, MODE_REF_GRID, MODE_VOXEL_SIZE, MODE_VOXEL_SCALE }; } class SOP_OpenVDB_Resample: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Resample(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Resample() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i == 1); } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: void resolveObsoleteParms(PRM_ParmList*) override; bool updateParmsFlags() override; }; //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Group pattern parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDBs to be resampled") .setDocumentation( "A subset of the input VDBs to be resampled" " (see [specifying volumes|/model/volumes#group])")); // Reference grid group parms.add(hutil::ParmFactory(PRM_STRING, "reference", "Reference") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip( "Specify a single reference VDB from the\n" "first input whose transform is to be matched.\n" "Alternatively, connect the reference VDB\n" "to the second input.")); parms.add(hutil::ParmFactory(PRM_ORD, "order", "Interpolation") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "point", "Nearest", "linear", "Linear", "quadratic", "Quadratic" }) .setDocumentation("\ How to interpolate values at fractional voxel positions\n\ \n\ Nearest:\n\ Use the value from the nearest voxel.\n\n\ This is fast but can cause aliasing artifacts.\n\ Linear:\n\ Interpolate trilinearly between the values of immediate neighbors.\n\n\ This matches what [Node:sop/volumemix] and [Vex:volumesample] do.\n\ Quadratic:\n\ Interpolate triquadratically between the values of neighbors.\n\n\ This produces smoother results than trilinear interpolation but is slower.\n")); // Transform source parms.add(hutil::ParmFactory(PRM_ORD, "mode", "Define Transform") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "explicit", "Explicitly", "refvdb", "To Match Reference VDB", "voxelsizeonly", "Using Voxel Size Only", "voxelscaleonly", "Using Voxel Scale Only" }) .setTooltip( "Specify how to define the relative transform\n" "between an input and an output VDB.") .setDocumentation("\ How to generate the new VDB's transform\n\ \n\ Explicitly:\n\ Use the values of the transform parameters below.\n\ To Match Reference VDB:\n\ Match the transform and voxel size of a reference VDB.\n\n\ The resulting volume is a copy of the input VDB,\n\ aligned to the reference VDB.\n\ Using Voxel Size Only:\n\ Keep the transform of the input VDB but set a new voxel size,\n\ increasing or decreasing the resolution.\n\ Using Voxel Scale Only:\n\ Keep the transform of the input VDB but scale the voxel size,\n\ increasing or decreasing the resolution.\n")); parms.add(hutil::ParmFactory(PRM_ORD, "xOrd", "Transform Order") .setDefault(0, "tsr") .setChoiceList(&PRMtrsMenu) .setTypeExtended(PRM_TYPE_JOIN_PAIR) .setTooltip( "When __Define Transform__ is Explicitly, the order of operations" " for the new transform")); parms.add(hutil::ParmFactory( PRM_ORD | PRM_Type(PRM_Type::PRM_INTERFACE_LABEL_NONE), "rOrd", "") .setDefault(0, "zyx") .setChoiceList(&PRMxyzMenu) .setTooltip( "When __Define Transform__ is Explicitly, the order of rotations" " for the new transform")); // Translation parms.add(hutil::ParmFactory(PRM_XYZ_J, "t", "Translate Voxels") .setDefault(PRMzeroDefaults) .setVectorSize(3) .setTooltip( "When __Define Transform__ is Explicitly, the shift in voxels for the new transform")); // Rotation parms.add(hutil::ParmFactory(PRM_XYZ_J, "r", "Rotate") .setDefault(PRMzeroDefaults) .setVectorSize(3) .setTooltip( "When __Define Transform__ is Explicitly, the rotation for the new transform")); // Scale parms.add(hutil::ParmFactory(PRM_XYZ_J, "s", "Scale") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.000001f, PRM_RANGE_UI, 10) .setVectorSize(3) .setTooltip( "When __Define Transform__ is Explicitly, the scale for the new transform")); // Pivot parms.add(hutil::ParmFactory(PRM_XYZ_J, "p", "Pivot") .setDefault(PRMzeroDefaults) .setVectorSize(3) .setTooltip( "When __Define Transform__ is Explicitly, the world-space pivot point" " for scaling and rotation in the new transform")); // Voxel size parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelsize", "Voxel Size") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.000001f, PRM_RANGE_UI, 1) .setTooltip( "The desired absolute voxel size for all output VDBs\n\n" "Larger voxels correspond to lower resolution.\n")); // Voxel scale parms.add(hutil::ParmFactory(PRM_FLT_J, "voxelscale", "Voxel Scale") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.000001f, PRM_RANGE_UI, 1) .setTooltip( "The amount by which to scale the voxel size for each output VDB\n\n" "Larger voxels correspond to lower resolution.\n")); // Toggle to apply transform to vector values parms.add(hutil::ParmFactory(PRM_TOGGLE, "xformvectors", "Transform Vectors") .setDefault(PRMzeroDefaults) .setTooltip( "Apply the resampling transform to the voxel values of vector-valued VDBs," " in accordance with those VDBs'" " [Vector Type|http://www.openvdb.org/documentation/doxygen/overview.html#secGrid]" " attributes.")); // Level set rebuild toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "rebuild", "Rebuild SDF") .setDefault(PRMoneDefaults) .setTooltip( "Transforming (especially scaling) a level set might invalidate\n" "signed distances, necessitating reconstruction of the SDF.\n\n" "This option affects only level set volumes, and it should\n" "almost always be enabled.")); // Prune toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "prune", "Prune Tolerance") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip( "Reduce the memory footprint of output VDBs that have" " (sufficiently large) regions of voxels with the same value.\n\n" "Voxel values are considered equal if they differ by less than" " the specified threshold.\n\n" "NOTE:\n" " Pruning affects only the memory usage of a grid.\n" " It does not remove voxels, apart from inactive voxels\n" " whose value is equal to the background.")); // Pruning tolerance slider parms.add(hutil::ParmFactory( PRM_FLT_J, "tolerance", "Prune Tolerance") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1) .setDocumentation(nullptr)); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep2", "separator")); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep3", "separator")); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep4", "separator")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "reference_grid", "Reference")); obsoleteParms.add(hutil::ParmFactory(PRM_XYZ_J, "translate", "Translate") .setDefault(PRMzeroDefaults) .setVectorSize(3)); obsoleteParms.add(hutil::ParmFactory(PRM_XYZ_J, "rotate", "Rotate") .setDefault(PRMzeroDefaults) .setVectorSize(3)); obsoleteParms.add(hutil::ParmFactory(PRM_XYZ_J, "scale", "Scale") .setDefault(PRMoneDefaults) .setVectorSize(3)); obsoleteParms.add(hutil::ParmFactory(PRM_XYZ_J, "pivot", "Pivot") .setDefault(PRMzeroDefaults) .setVectorSize(3)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "voxel_size", "Voxel Size") .setDefault(PRMoneDefaults)); // Register this operator. hvdb::OpenVDBOpFactory("VDB Resample", SOP_OpenVDB_Resample::factory, parms, *table) .setObsoleteParms(obsoleteParms) .addInput("Source VDB grids to resample") .addOptionalInput("Optional transform reference VDB grid") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Resample::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Resample a VDB volume into a new orientation and/or voxel size.\"\"\"\n\ \n\ @overview\n\ \n\ This node resamples voxels from input VDBs into new VDBs (of the same type)\n\ through a sampling transform that is either specified by user-supplied\n\ translation, rotation, scale and pivot parameters or taken from\n\ an optional reference VDB.\n\ \n\ @related\n\ - [OpenVDB Combine|Node:sop/DW_OpenVDBCombine]\n\ - [Node:sop/vdbresample]\n\ - [Node:sop/xform]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Resample::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; resolveRenamedParm(*obsoleteParms, "reference_grid", "reference"); resolveRenamedParm(*obsoleteParms, "voxel_size", "voxelsize"); resolveRenamedParm(*obsoleteParms, "translate", "t"); resolveRenamedParm(*obsoleteParms, "rotate", "r"); resolveRenamedParm(*obsoleteParms, "scale", "s"); resolveRenamedParm(*obsoleteParms, "pivot", "p"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } // Disable UI Parms. bool SOP_OpenVDB_Resample::updateParmsFlags() { bool changed = false; const auto mode = evalInt("mode", 0, 0); changed |= enableParm("t", mode == MODE_PARMS); changed |= enableParm("r", mode == MODE_PARMS); changed |= enableParm("s", mode == MODE_PARMS); changed |= enableParm("p", mode == MODE_PARMS); changed |= enableParm("xOrd", mode == MODE_PARMS); changed |= enableParm("rOrd", mode == MODE_PARMS); changed |= enableParm("xformvectors", mode == MODE_PARMS); changed |= enableParm("reference", mode == MODE_REF_GRID); changed |= enableParm("voxelsize", mode == MODE_VOXEL_SIZE); changed |= enableParm("voxelscale", mode == MODE_VOXEL_SCALE); // Show either the voxel size or the voxel scale parm, but not both. changed |= setVisibleState("voxelsize", mode != MODE_VOXEL_SCALE); changed |= setVisibleState("voxelscale", mode == MODE_VOXEL_SCALE); changed |= enableParm("tolerance", bool(evalInt("prune", 0, 0))); return changed; } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Resample::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Resample(net, name, op); } SOP_OpenVDB_Resample::SOP_OpenVDB_Resample(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { // Helper class for use with GridBase::apply() struct RebuildOp { std::function<void (const std::string&)> addWarning; openvdb::math::Transform xform; hvdb::GridPtr outGrid; template<typename GridT> void operator()(const GridT& grid) { using ValueT = typename GridT::ValueType; const ValueT halfWidth = ValueT(grid.background() * (1.0 / grid.voxelSize()[0])); hvdb::Interrupter interrupter; try { outGrid = openvdb::tools::doLevelSetRebuild(grid, /*isovalue=*/openvdb::zeroVal<ValueT>(), /*exWidth=*/halfWidth, /*inWidth=*/halfWidth, &xform, &interrupter); } catch (openvdb::TypeError&) { addWarning("skipped rebuild of level set grid " + grid.getName() + " of type " + grid.type()); outGrid = openvdb::ConstPtrCast<GridT>(grid.copy()); } } }; // struct RebuildOp // Functor for use with GridBase::apply() to apply a transform // to the voxel values of vector-valued grids struct VecXformOp { openvdb::Mat4d mat; VecXformOp(const openvdb::Mat4d& _mat): mat(_mat) {} template<typename GridT> void operator()(GridT& grid) const { openvdb::tools::transformVectors(grid, mat); } }; } // unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Resample::Cache::cookVDBSop(OP_Context& context) { try { auto addWarningCB = [this](const std::string& s) { addWarning(SOP_MESSAGE, s.c_str()); }; const fpreal time = context.getTime(); // Get parameters. const int samplingOrder = static_cast<int>(evalInt("order", 0, time)); if (samplingOrder < 0 || samplingOrder > 2) { throw std::runtime_error{"expected interpolation order between 0 and 2, got " + std::to_string(samplingOrder)}; } char const* const xOrdMenu[] = { "srt", "str", "rst", "rts", "tsr", "trs" }; char const* const rOrdMenu[] = { "xyz", "xzy", "yxz", "yzx", "zxy", "zyx" }; const UT_String xformOrder = xOrdMenu[evalInt("xOrd", 0, time)], rotOrder = rOrdMenu[evalInt("rOrd", 0, time)]; const int mode = static_cast<int>(evalInt("mode", 0, time)); if (mode < MODE_PARMS || mode > MODE_VOXEL_SCALE) { throw std::runtime_error{"expected mode between " + std::to_string(int(MODE_PARMS)) + " and " + std::to_string(int(MODE_VOXEL_SCALE)) + ", got " + std::to_string(mode)}; } const openvdb::Vec3R translate = evalVec3R("t", time), rotate = (M_PI / 180.0) * evalVec3R("r", time), scale = evalVec3R("s", time), pivot = evalVec3R("p", time); const float voxelSize = static_cast<float>(evalFloat("voxelsize", 0, time)), voxelScale = static_cast<float>(evalFloat("voxelscale", 0, time)); const bool prune = evalInt("prune", 0, time), rebuild = evalInt("rebuild", 0, time), xformVec = evalInt("xformvectors", 0, time); const float tolerance = static_cast<float>(evalFloat("tolerance", 0, time)); // Get the group of grids to be resampled. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); hvdb::GridCPtr refGrid; if (mode == MODE_VOXEL_SIZE) { // Create a dummy reference grid whose (linear) transform specifies // the desired voxel size. hvdb::GridPtr grid = openvdb::FloatGrid::create(); grid->setTransform(openvdb::math::Transform::createLinearTransform(voxelSize)); refGrid = grid; } else if (mode == MODE_VOXEL_SCALE) { // Create a dummy reference grid with a default (linear) transform. refGrid = openvdb::FloatGrid::create(); } else if (mode == MODE_REF_GRID) { // Get the user-specified reference grid from the second input, // if it is connected, or else from the first input. const GU_Detail* refGdp = inputGeo(1, context); if (!refGdp) { refGdp = gdp; } if (auto it = hvdb::VdbPrimCIterator(refGdp, matchGroup(*refGdp, evalStdString("reference", time)))) { refGrid = it->getConstGridPtr(); if (++it) { addWarning(SOP_MESSAGE, "more than one reference grid was found"); } } else { throw std::runtime_error("no reference grid was found"); } } UT_AutoInterrupt progress("Resampling VDB grids"); // Iterate over the input grids. for (hvdb::VdbPrimIterator it(gdp, GA_Range::safedeletions(), group); it; ++it) { if (progress.wasInterrupted()) throw std::runtime_error("Was Interrupted"); GU_PrimVDB* vdb = *it; const UT_VDBType valueType = vdb->getStorageType(); const bool isLevelSet = ((vdb->getGrid().getGridClass() == openvdb::GRID_LEVEL_SET) && (valueType == UT_VDB_FLOAT || valueType == UT_VDB_DOUBLE)); if (isLevelSet && !rebuild) { // If the input grid is a level set but level set rebuild is disabled, // set the grid's class to "unknown", to prevent the resample tool // from triggering a rebuild. vdb->getGrid().setGridClass(openvdb::GRID_UNKNOWN); } const hvdb::Grid& grid = vdb->getGrid(); // Override the sampling order for boolean grids. int curOrder = samplingOrder; if (valueType == UT_VDB_BOOL && (samplingOrder != 0)) { addWarning(SOP_MESSAGE, ("a boolean VDB grid can't be order-" + std::to_string(samplingOrder) + " sampled; using nearest neighbor sampling instead").c_str()); curOrder = 0; } // Create a new, empty output grid of the same type as the input grid // and with the same metadata. hvdb::GridPtr outGrid = grid.copyGridWithNewTree(); UT_AutoInterrupt scopedInterrupt( ("Resampling " + it.getPrimitiveName().toStdString()).c_str()); if (refGrid) { // If a reference grid was provided, then after resampling, the // output grid's transform will be the same as the reference grid's. openvdb::math::Transform::Ptr refXform = refGrid->transform().copy(); if (mode == MODE_VOXEL_SCALE) { openvdb::Vec3d scaledVoxelSize = grid.voxelSize() * voxelScale; refXform->preScale(scaledVoxelSize); } if (isLevelSet && rebuild) { // Use the level set rebuild tool to both resample and rebuild. RebuildOp op; op.addWarning = addWarningCB; op.xform = *refXform; grid.apply<hvdb::RealGridTypes>(op); outGrid = op.outGrid; } else { // Use the resample tool to sample the input grid into the output grid. // Set the correct transform on the output grid. outGrid->setTransform(refXform); if (curOrder == 0) { hvdb::GridResampleToMatchOp<openvdb::tools::PointSampler> op(outGrid); grid.apply<hvdb::AllGridTypes>(op); } else if (curOrder == 1) { hvdb::GridResampleToMatchOp<openvdb::tools::BoxSampler> op(outGrid); grid.apply<hvdb::AllGridTypes>(op); } else if (curOrder == 2) { hvdb::GridResampleToMatchOp<openvdb::tools::QuadraticSampler> op(outGrid); grid.apply<hvdb::AllGridTypes>(op); } #ifdef SESI_OPENVDB if (isLevelSet) { auto tempgrid = UTvdbGridCast<openvdb::FloatGrid>(outGrid); openvdb::tools::pruneLevelSet(tempgrid->tree()); openvdb::tools::signedFloodFill(tempgrid->tree()); } #endif } } else { // Resample into the output grid using the user-supplied transform. // The output grid's transform will be the same as the input grid's. openvdb::tools::GridTransformer xform(pivot, scale, rotate, translate, xformOrder.toStdString(), rotOrder.toStdString()); if (isLevelSet && rebuild) { // Use the level set rebuild tool to both resample and rebuild. RebuildOp op; op.addWarning = addWarningCB; // Compose the input grid's transform with the user-supplied transform. // (The latter is retrieved from the GridTransformer, so that the // order of operations and the rotation order are respected.) op.xform = grid.constTransform(); op.xform.preMult(xform.getTransform().inverse()); grid.apply<hvdb::RealGridTypes>(op); outGrid = op.outGrid; outGrid->setTransform(grid.constTransform().copy()); } else { // Use the resample tool to sample the input grid into the output grid. hvdb::Interrupter interrupter; xform.setInterrupter(interrupter); if (curOrder == 0) { hvdb::GridTransformOp<openvdb::tools::PointSampler> op(outGrid, xform); hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(*vdb, op); } else if (curOrder == 1) { hvdb::GridTransformOp<openvdb::tools::BoxSampler> op(outGrid, xform); hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(*vdb, op); } else if (curOrder == 2) { hvdb::GridTransformOp<openvdb::tools::QuadraticSampler> op(outGrid, xform); hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(*vdb, op); } #ifdef SESI_OPENVDB if (isLevelSet) { auto tempgrid = UTvdbGridCast<openvdb::FloatGrid>(outGrid); openvdb::tools::pruneLevelSet(tempgrid->tree()); openvdb::tools::signedFloodFill(tempgrid->tree()); } #endif } if (xformVec && outGrid->isInWorldSpace() && outGrid->getVectorType() != openvdb::VEC_INVARIANT) { // If (and only if) the grid is vector-valued, apply the transform // to each voxel's value. VecXformOp op(xform.getTransform()); outGrid->apply<hvdb::Vec3GridTypes>(op); } } if (prune) outGrid->pruneGrid(tolerance); // Replace the original VDB primitive with a new primitive that contains // the output grid and has the same attributes and group membership. hvdb::replaceVdbPrimitive(*gdp, outGrid, *vdb); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
24,505
C++
38.653722
99
0.600816
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Advect.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Advect.cc /// /// @author FX R&D OpenVDB team /// /// @brief Level set advection SOP #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/LevelSetAdvect.h> #include <openvdb/tools/Interpolation.h> #include <openvdb/tools/VolumeAdvect.h> #include <UT/UT_Interrupt.h> #include <GA/GA_PageIterator.h> #include <GU/GU_PrimPoly.h> #include <CH/CH_Manager.h> #include <PRM/PRM_Parm.h> #include <hboost/algorithm/string/join.hpp> #include <functional> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { struct AdvectionParms; } class SOP_OpenVDB_Advect: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Advect(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Advect() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i > 0); } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; //////////////////////////////////////// // Build UI and register this operator void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; using namespace openvdb::math; hutil::ParmList parms; // Level set grid parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("VDB grid(s) to advect.") .setDocumentation( "A subset of VDBs in the first input to move using the velocity field" " (see [specifying volumes|/model/volumes#group])")); // Velocity grid parms.add(hutil::ParmFactory(PRM_STRING, "velgroup", "Velocity") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("Velocity grid") .setDocumentation( "The name of a VDB primitive in the second input to use as" " the velocity field (see [specifying volumes|/model/volumes#group])\n\n" "This must be a vector-valued VDB primitive." " You can use the [Vector Merge node|Node:sop/DW_OpenVDBVectorMerge]" " to turn a `vel.[xyz]` triple into a single primitive.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "respectclass", "Respect Grid Class") .setDefault(PRMoneDefaults) .setTooltip("If disabled, advect level sets using general advection scheme.") .setDocumentation( "When this option is disabled, all VDBs will use a general numerical" " advection scheme, otherwise level set VDBs will be advected using" " a spatial finite-difference scheme.")); // Advect: timestep parms.add(hutil::ParmFactory(PRM_FLT, "timestep", "Timestep") .setDefault(1, "1.0/$FPS") .setDocumentation( "Number of seconds of movement to apply to the input points\n\n" "The default is `1/$FPS` (one frame's worth of time)." " You can use negative values to move the points backwards through" " the velocity field.")); parms.add(hutil::ParmFactory(PRM_HEADING, "general", "General Advection") .setDocumentation( "These control how VDBs that are not level sets are moved through the velocity field." " If the grid class is not being respected, all grids will be advected" " using general advection regardless of whether they are level sets or not.")); // SubSteps parms.add(hutil::ParmFactory(PRM_INT_J, "substeps", "Substeps") .setDefault(1) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip( "The number of substeps per integration step.\n" "The only reason to increase it above its default value of one" " is to reduce the memory footprint from dilations--likely at the cost" " of more smoothing!") .setDocumentation( "The number of substeps per integration step\n\n" "The only reason to increase this above its default value of one is to reduce" " the memory footprint from dilations&mdash;likely at the cost of more smoothing.")); // Advection Scheme parms.add(hutil::ParmFactory(PRM_STRING, "advection", "Advection Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "semi", "Semi-Lagrangian", "mid", "Mid-Point", "rk3", "3rd order Runge-Kutta", "rk4", "4th order Runge-Kutta", "mac", "MacCormack", "bfecc", "BFECC" }) .setDefault("semi") .setTooltip("Set the numerical advection scheme.")); // Limiter Scheme parms.add(hutil::ParmFactory(PRM_STRING, "limiter", "Limiter Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "none", "No limiter", "clamp", "Clamp to extrema", "revert", "Revert to 1st order" }) .setDefault("revert") .setTooltip( "Set the limiter scheme used to stabilize the second-order" " MacCormack and BFECC schemes.")); parms.add(hutil::ParmFactory(PRM_HEADING, "advectionHeading", "Level Set Advection") .setDocumentation( "These control how level set VDBs are moved through the velocity field." " If the grid class is not being respected, these options are not used.")); // Advect: spatial menu { std::vector<std::string> items; items.push_back(biasedGradientSchemeToString(FIRST_BIAS)); items.push_back(biasedGradientSchemeToMenuName(FIRST_BIAS)); items.push_back(biasedGradientSchemeToString(HJWENO5_BIAS)); items.push_back(biasedGradientSchemeToMenuName(HJWENO5_BIAS)); parms.add(hutil::ParmFactory(PRM_STRING, "advectspatial", "Spatial Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(biasedGradientSchemeToString(HJWENO5_BIAS)) .setTooltip("Set the spatial finite difference scheme.") .setDocumentation( "How accurately the gradients of the signed distance field are computed\n\n" "The later choices are more accurate but take more time.")); } // Advect: temporal menu { std::vector<std::string> items; for (int i = 0; i < NUM_TEMPORAL_SCHEMES; ++i) { TemporalIntegrationScheme it = TemporalIntegrationScheme(i); items.push_back(temporalIntegrationSchemeToString(it)); // token items.push_back(temporalIntegrationSchemeToMenuName(it)); // label } parms.add(hutil::ParmFactory(PRM_STRING, "advecttemporal", "Temporal Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(temporalIntegrationSchemeToString(TVD_RK2)) .setTooltip("Set the temporal integration scheme.") .setDocumentation( "How accurately time is evolved within the timestep\n\n" "The later choices are more accurate but take more time.")); } parms.add(hutil::ParmFactory(PRM_HEADING, "renormheading", "Renormalization")); parms.add(hutil::ParmFactory(PRM_INT_J, "normsteps", "Steps") .setDefault(PRMthreeDefaults) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 10) .setTooltip("The number of normalizations performed after each CFL iteration.") .setDocumentation( "After advection, a signed distance field will often no longer contain correct" " distances. A number of renormalization passes can be performed between" " every substep to convert it back into a proper signed distance field.")); // Renorm: spatial menu { std::vector<std::string> items; items.push_back(biasedGradientSchemeToString(FIRST_BIAS)); items.push_back(biasedGradientSchemeToMenuName(FIRST_BIAS)); items.push_back(biasedGradientSchemeToString(HJWENO5_BIAS)); items.push_back(biasedGradientSchemeToMenuName(HJWENO5_BIAS)); parms.add(hutil::ParmFactory(PRM_STRING, "renormspatial", "Spatial Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(biasedGradientSchemeToString(HJWENO5_BIAS)) .setTooltip("Set the spatial finite difference scheme.") .setDocumentation( "How accurately the gradients of the signed distance field are computed\n\n" "The later choices are more accurate but take more time.")); } // Renorm: temporal menu { std::vector<std::string> items; for (int i = 0; i < NUM_TEMPORAL_SCHEMES; ++i) { TemporalIntegrationScheme it = TemporalIntegrationScheme(i); items.push_back(temporalIntegrationSchemeToString(it)); // token items.push_back(temporalIntegrationSchemeToMenuName(it)); // label } parms.add(hutil::ParmFactory(PRM_STRING, "renormtemporal", "Temporal Scheme") .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDefault(items[0]) .setTooltip("Set the temporal integration scheme.") .setDocumentation( "How accurately time is evolved within the renormalization stage\n\n" "The later choices are more accurate but take more time.")); } // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_FLT, "beginTime", "Begin time")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT, "endTime", "Time step")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "lsGroup", "Group")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "densityGroup", "Group")); obsoleteParms.add(hutil::ParmFactory(PRM_HEADING, "renormHeading", "")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "velGroup", "Velocity")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "advectSpatial", "Spatial Scheme") .setDefault(biasedGradientSchemeToString(HJWENO5_BIAS))); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "advectTemporal", "Temporal Scheme") .setDefault(temporalIntegrationSchemeToString(TVD_RK2))); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "normSteps", "Renormalization Steps") .setDefault(PRMthreeDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "renormSpatial", "Spatial Renormalization") .setDefault(biasedGradientSchemeToString(HJWENO5_BIAS))); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "renormTemporal", "Temporal Renormalization") .setDefault(temporalIntegrationSchemeToString(TemporalIntegrationScheme(0)))); // Register this operator. hvdb::OpenVDBOpFactory("VDB Advect", SOP_OpenVDB_Advect::factory, parms, *table) .setNativeName("vdbadvectsdf") .setObsoleteParms(obsoleteParms) .addInput("VDBs to Advect") .addInput("Velocity VDB") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Advect::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Move VDBs in the input geometry along a VDB velocity field.\"\"\"\n\ \n\ @overview\n\ The OpenVDB Advect operation will advect VDB volumes according to\n\ a velocity field defined in a vector VDB.\n\ \n\ @animation Animating advection\n\ \n\ *This node is not a feedback loop*.\n\ \n\ It moves the fields it finds in the input geometry.\n\ It _cannot_ modify the fields over time.\n\ (That is, if you hook this node up to do advection and press play,\n\ the fields will not animate.)\n\ \n\ To set up a feedback loop, where the advection at each frame affects\n\ the advected field from the previous frame, do one of the following:\n\ * Do the advection inside a [SOP Solver|Node:sop/solver].\n\ * Set the __Time Step__ to `$T`\n\ \n\ This will cause the node to recalculate, _at every frame_, the path\n\ of every particle through _every previous frame_ to get the current one.\n\ This is obviously not very practical.\n\ \n\ @related\n\ - [OpenVDB Advect Points|Node:sop/DW_OpenVDBAdvectPoints]\n\ - [OpenVDB Morph Level Set|Node:sop/DW_OpenVDBMorphLevelSet]\n\ - [Node:sop/vdbadvectsdf]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Advect::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Advect(net, name, op); } SOP_OpenVDB_Advect::SOP_OpenVDB_Advect(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// void SOP_OpenVDB_Advect::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; resolveRenamedParm(*obsoleteParms, "lsGroup", "group"); resolveRenamedParm(*obsoleteParms, "densityGroup", "group"); resolveRenamedParm(*obsoleteParms, "advectSpatial", "advectspatial"); resolveRenamedParm(*obsoleteParms, "advectTemporal", "advecttemporal"); resolveRenamedParm(*obsoleteParms, "normSteps", "normsteps"); resolveRenamedParm(*obsoleteParms, "renormSpatial", "renormspatial"); resolveRenamedParm(*obsoleteParms, "renormTemporal", "renormtemporal"); resolveRenamedParm(*obsoleteParms, "velGroup", "velgroup"); hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } //////////////////////////////////////// bool SOP_OpenVDB_Advect::updateParmsFlags() { bool changed = false; const bool respectClass = bool(evalInt("respectclass", 0, 0)); changed |= enableParm("advectspatial", respectClass); changed |= enableParm("advecttemporal", respectClass); changed |= enableParm("normsteps", respectClass); changed |= enableParm("renormspatial", respectClass); changed |= enableParm("renormtemporal", respectClass); return changed; } //////////////////////////////////////// // Utilities namespace { struct AdvectionParms { AdvectionParms() : mGroup(nullptr) , mAdvectSpatial(openvdb::math::UNKNOWN_BIAS) , mRenormSpatial(openvdb::math::UNKNOWN_BIAS) , mAdvectTemporal(openvdb::math::UNKNOWN_TIS) , mRenormTemporal(openvdb::math::UNKNOWN_TIS) , mIntegrator(openvdb::tools::Scheme::SEMI) , mLimiter(openvdb::tools::Scheme::NO_LIMITER) , mNormCount(1) , mSubSteps(1) , mTimeStep(0.0) , mStaggered(false) , mRespectClass(true) { } const GA_PrimitiveGroup * mGroup; hvdb::Grid::ConstPtr mVelocityGrid; openvdb::math::BiasedGradientScheme mAdvectSpatial, mRenormSpatial; openvdb::math::TemporalIntegrationScheme mAdvectTemporal, mRenormTemporal; openvdb::tools::Scheme::SemiLagrangian mIntegrator; openvdb::tools::Scheme::Limiter mLimiter; int mNormCount, mSubSteps; float mTimeStep; bool mStaggered, mRespectClass; }; template<class VelocityGridT> class AdvectOp { public: AdvectOp(AdvectionParms& parms, const VelocityGridT& velGrid, hvdb::Interrupter& boss) : mParms(parms) , mVelGrid(velGrid) , mBoss(boss) { } template<typename GridT, typename SamplerT> void process(GridT& grid) { using FieldT = openvdb::tools::DiscreteField<VelocityGridT, SamplerT>; const FieldT field(mVelGrid); openvdb::tools::LevelSetAdvection<GridT, FieldT, hvdb::Interrupter> advection(grid, field, &mBoss); advection.setSpatialScheme(mParms.mAdvectSpatial); advection.setTemporalScheme(mParms.mAdvectTemporal); advection.setTrackerSpatialScheme(mParms.mRenormSpatial); advection.setTrackerTemporalScheme(mParms.mRenormTemporal); advection.setNormCount(mParms.mNormCount); if (mBoss.wasInterrupted()) return; advection.advect(0, mParms.mTimeStep); } template<typename GridT> void operator()(GridT& grid) { if (mBoss.wasInterrupted()) return; if (mParms.mStaggered) process<GridT, openvdb::tools::StaggeredBoxSampler>(grid); else process<GridT, openvdb::tools::BoxSampler>(grid); } private: AdvectOp(const AdvectOp&);// undefined AdvectOp& operator=(const AdvectOp&);// undefined AdvectionParms& mParms; const VelocityGridT& mVelGrid; hvdb::Interrupter& mBoss; }; template<typename VelocityGridT, bool StaggeredVelocity> inline bool processGrids(GU_Detail* gdp, AdvectionParms& parms, hvdb::Interrupter& boss, const std::function<void (const std::string&)>& warningCallback) { using VolumeAdvection = openvdb::tools::VolumeAdvection<VelocityGridT, StaggeredVelocity, hvdb::Interrupter>; using VelocityGridCPtr = typename VelocityGridT::ConstPtr; VelocityGridCPtr velGrid = hvdb::Grid::constGrid<VelocityGridT>(parms.mVelocityGrid); if (!velGrid) return false; AdvectOp<VelocityGridT> advectLevelSet(parms, *velGrid, boss); VolumeAdvection advectVolume(*velGrid, &boss); advectVolume.setIntegrator(parms.mIntegrator); advectVolume.setLimiter(parms.mLimiter); advectVolume.setSubSteps(parms.mSubSteps); std::vector<std::string> skippedGrids, doubleGrids; for (hvdb::VdbPrimIterator it(gdp, parms.mGroup); it; ++it) { if (boss.wasInterrupted()) break; GU_PrimVDB* vdbPrim = *it; if (parms.mRespectClass && vdbPrim->getGrid().getGridClass() == openvdb::GRID_LEVEL_SET) { if (vdbPrim->getStorageType() == UT_VDB_FLOAT) { vdbPrim->makeGridUnique(); auto& grid = UTvdbGridCast<openvdb::FloatGrid>(vdbPrim->getGrid()); advectLevelSet(grid); } //else if (vdbPrim->getStorageType() == UT_VDB_DOUBLE) { // vdbPrim->makeGridUnique(); // auto& grid = UTvdbGridCast<openvdb::DoubleGrid>(vdbPrim->getGrid()); // advectLevelSet(grid); //} else { skippedGrids.push_back(it.getPrimitiveNameOrIndex().toStdString()); } } else { switch (vdbPrim->getStorageType()) { case UT_VDB_FLOAT: { const auto& inGrid = UTvdbGridCast<openvdb::FloatGrid>(vdbPrim->getConstGrid()); auto outGrid = advectVolume.template advect<openvdb::FloatGrid, openvdb::tools::Sampler<1, false>>(inGrid, parms.mTimeStep); hvdb::replaceVdbPrimitive(*gdp, outGrid, *vdbPrim); break; } case UT_VDB_DOUBLE: { const auto& inGrid = UTvdbGridCast<openvdb::DoubleGrid>(vdbPrim->getConstGrid()); auto outGrid = advectVolume.template advect<openvdb::DoubleGrid, openvdb::tools::Sampler<1, false>>(inGrid, parms.mTimeStep); hvdb::replaceVdbPrimitive(*gdp, outGrid, *vdbPrim); break; } case UT_VDB_VEC3F: { const auto& inGrid = UTvdbGridCast<openvdb::Vec3SGrid>(vdbPrim->getConstGrid()); auto outGrid = advectVolume.template advect<openvdb::Vec3SGrid, openvdb::tools::Sampler<1, false>>(inGrid, parms.mTimeStep); hvdb::replaceVdbPrimitive(*gdp, outGrid, *vdbPrim); break; } default: skippedGrids.push_back(it.getPrimitiveNameOrIndex().toStdString()); break; } } } if (!skippedGrids.empty() && warningCallback) { std::string s = "The following non-floating-point grids were skipped: " + hboost::algorithm::join(skippedGrids, ", "); warningCallback(s); } return true; } // processGrids() } // anonymous namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Advect::Cache::cookVDBSop(OP_Context& context) { try { const fpreal now = context.getTime(); // Evaluate UI parameters AdvectionParms parms; { parms.mGroup = matchGroup(*gdp, evalStdString("group", now)); parms.mTimeStep = static_cast<float>(evalFloat("timestep", 0, now)); parms.mAdvectSpatial = openvdb::math::stringToBiasedGradientScheme(evalStdString("advectspatial", now)); if (parms.mAdvectSpatial == openvdb::math::UNKNOWN_BIAS) { throw std::runtime_error{"Advect: Unknown biased gradient"}; } parms.mRenormSpatial = openvdb::math::stringToBiasedGradientScheme( evalStdString("renormspatial", now)); if (parms.mRenormSpatial == openvdb::math::UNKNOWN_BIAS) { throw std::runtime_error{"Renorm: Unknown biased gradient"}; } parms.mAdvectTemporal = openvdb::math::stringToTemporalIntegrationScheme( evalStdString("advecttemporal", now)); if (parms.mAdvectTemporal == openvdb::math::UNKNOWN_TIS) { throw std::runtime_error{"Advect: Unknown temporal integration"}; } parms.mRenormTemporal = openvdb::math::stringToTemporalIntegrationScheme( evalStdString("renormtemporal", now)); if (parms.mRenormTemporal == openvdb::math::UNKNOWN_TIS) { throw std::runtime_error{"Renorm: Unknown temporal integration"}; } parms.mNormCount = static_cast<int>(evalInt("normsteps", 0, now)); const GU_Detail* velGeo = inputGeo(1); if (!velGeo) throw std::runtime_error{"Missing velocity grid input"}; hvdb::VdbPrimCIterator it{velGeo, matchGroup(*velGeo, evalStdString("velgroup", now))}; if (it) { if (it->getStorageType() != UT_VDB_VEC3F) { throw std::runtime_error{"Unrecognized velocity grid type"}; } parms.mVelocityGrid = it->getConstGridPtr(); } if (!parms.mVelocityGrid) { throw std::runtime_error{"Missing velocity grid"}; } parms.mStaggered = parms.mVelocityGrid->getGridClass() == openvdb::GRID_STAGGERED; parms.mRespectClass = bool(evalInt("respectclass", 0, now)); // General advection options parms.mSubSteps = static_cast<int>(evalInt("substeps", 0, now)); { const auto str = evalStdString("advection", now); if (str == "semi") { parms.mIntegrator = openvdb::tools::Scheme::SEMI; } else if (str == "mid") { parms.mIntegrator = openvdb::tools::Scheme::MID; } else if (str == "rk3") { parms.mIntegrator = openvdb::tools::Scheme::RK3; } else if (str == "rk4") { parms.mIntegrator = openvdb::tools::Scheme::RK4; } else if (str == "mac") { parms.mIntegrator = openvdb::tools::Scheme::MAC; } else if (str == "bfecc") { parms.mIntegrator = openvdb::tools::Scheme::BFECC; } else { throw std::runtime_error{"Invalid advection scheme"}; } } { const auto str = evalStdString("limiter", now); if (str == "none") { parms.mLimiter = openvdb::tools::Scheme::NO_LIMITER; if (parms.mIntegrator == openvdb::tools::Scheme::MAC) { addWarning(SOP_MESSAGE, "MacCormack is unstable without a limiter"); } } else if (str == "clamp") { parms.mLimiter = openvdb::tools::Scheme::CLAMP; } else if (str == "revert") { parms.mLimiter = openvdb::tools::Scheme::REVERT; } else { throw std::runtime_error{"Invalid limiter scheme"}; } } } hvdb::Interrupter boss("Advecting level set"); auto warningCallback = [this](const std::string& s) { this->addWarning(SOP_MESSAGE, s.c_str()); }; if (parms.mStaggered) { processGrids<openvdb::Vec3SGrid, true>(gdp, parms, boss, warningCallback); } else { processGrids<openvdb::Vec3SGrid, false>(gdp, parms, boss, warningCallback); } if (boss.wasInterrupted()) addWarning(SOP_MESSAGE, "Process was interrupted"); boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
25,098
C++
37.319084
99
0.625946
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_LOD.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_LOD.cc /// /// @author FX R&D OpenVDB team /// /// @brief Generate one or more levels of a volume mipmap. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/Utils.h> #include <openvdb/tools/MultiResGrid.h> #include <hboost/algorithm/string/join.hpp> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_LOD: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_LOD(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_LOD() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned input) const override { return (input > 0); } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to be processed.") .setDocumentation( "A subset of the input VDB grids to be processed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_ORD, "lod", "LOD Mode") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "single", "Single Level", "range", "Level Range", "mipmaps","LOD Pyramid" }) .setDocumentation( "How to build the LOD pyramid\n\n" "Single Level:\n" " Build a single, filtered VDB.\n\n" "Level Range:\n" " Build a series of VDBs of progressively lower resolution\n" " within a given range of scales.\n\n" "LOD Pyramid:\n" " Build a standard pyramid of VDBs of decreasing resolution.\n" " Each level of the pyramid is half the resolution in each\n" " dimension of the previous level, starting with the input VDB.\n")); parms.add(hutil::ParmFactory(PRM_FLT_J, "level", "Level") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_UI, 10.0) .setTooltip("Specify which single level to produce.\n" "Level 0, the highest-resolution level, is the input VDB.")); { const std::vector<fpreal> defaultRange{ fpreal(0.0), // start fpreal(2.0), // end fpreal(1.0) // step }; parms.add(hutil::ParmFactory(PRM_FLT_J, "range", "Range") .setDefault(defaultRange) .setVectorSize(3) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_UI, 10.0) .setTooltip( "In Level Range mode, specify the (inclusive) starting and ending levels" " and the level step. Level 0, the highest-resolution level, is the input VDB;" " fractional levels are allowed.")); } parms.add(hutil::ParmFactory(PRM_INT_J, "count", "Count") .setDefault(PRMtwoDefaults) .setRange(PRM_RANGE_RESTRICTED, 2, PRM_RANGE_UI, 10) .setTooltip( "In LOD Pyramid mode, specify the number of pyramid levels to generate." " Each level is half the resolution of the previous level.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "reuse", "Preserve Grid Names") .setDefault(PRMzeroDefaults) .setTooltip( "In Single Level mode, give the output VDB the same name as the input VDB.")); hvdb::OpenVDBOpFactory("VDB LOD", SOP_OpenVDB_LOD::factory, parms, *table) .addInput("VDBs") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_LOD::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Build an LOD pyramid from a VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node creates filtered versions of a VDB volume at multiple resolutions,\n\ providing mipmap-like levels of detail.\n\ The low-resolution versions can be used both as thumbnails for fast processing\n\ and for constant-time, filtered lookups over large areas of a volume.\n\ \n\ @related\n\ - [OpenVDB Resample|Node:sop/DW_OpenVDBResample]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } bool SOP_OpenVDB_LOD::updateParmsFlags() { bool changed = false; const auto lodMode = evalInt("lod", 0, 0); changed |= enableParm("level", lodMode == 0); changed |= enableParm("reuse", lodMode == 0); changed |= enableParm("range", lodMode == 1); changed |= enableParm("count", lodMode == 2); return changed; } //////////////////////////////////////// OP_Node* SOP_OpenVDB_LOD::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_LOD(net, name, op); } SOP_OpenVDB_LOD::SOP_OpenVDB_LOD(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { template<openvdb::Index Order> struct MultiResGridFractionalOp { MultiResGridFractionalOp(float f) : level(f) {} template<typename GridType> void operator()(const GridType& grid) { if ( level <= 0.0f ) { outputGrid = typename GridType::Ptr( new GridType(grid) ); } else { const size_t levels = openvdb::math::Ceil(level) + 1; using TreeT = typename GridType::TreeType; openvdb::tools::MultiResGrid<TreeT> mrg( levels, grid ); outputGrid = mrg.template createGrid<Order>( level ); } } const float level; hvdb::GridPtr outputGrid; }; template<openvdb::Index Order> struct MultiResGridRangeOp { MultiResGridRangeOp(float start_, float end_, float step_, hvdb::Interrupter& boss_) : start(start_), end(end_), step(step_), outputGrids(), boss(&boss_) {} template<typename GridType> void operator()(const GridType& grid) { if ( end > 0.0f ) { const size_t levels = openvdb::math::Ceil(end) + 1; using TreeT = typename GridType::TreeType; openvdb::tools::MultiResGrid<TreeT> mrg( levels, grid ); // inclusive range for (float level = start; !(level > end); level += step) { if (boss->wasInterrupted()) break; outputGrids.push_back( mrg.template createGrid<Order>( level ) ); } } } const float start, end, step; std::vector<hvdb::GridPtr> outputGrids; hvdb::Interrupter * const boss; }; struct MultiResGridIntegerOp { MultiResGridIntegerOp(size_t n) : levels(n) {} template<typename GridType> void operator()(const GridType& grid) { using TreeT = typename GridType::TreeType; openvdb::tools::MultiResGrid<TreeT> mrg( levels, grid ); outputGrids = mrg.grids(); } const size_t levels; openvdb::GridPtrVecPtr outputGrids; }; inline bool isValidRange(float start, float end, float step) { if (start < 0.0f || !(step > 0.0f) || end < 0.0f) { return false; } return !(start > end); } }//unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_LOD::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); // Get the group of grids to process. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); std::vector<std::string> skipped; hvdb::Interrupter boss("Creating VDB LoD pyramid"); GA_RWHandleS name_h(gdp, GA_ATTRIB_PRIMITIVE, "name"); const auto lodMode = evalInt("lod", 0, 0); if (lodMode == 0) { const bool reuseName = evalInt("reuse", 0, 0) > 0; MultiResGridFractionalOp<1> op( float(evalFloat("level", 0, time)) ); for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (boss.wasInterrupted()) return error(); if (name_h.isValid()) it->getGrid().setName(static_cast<const char *>(name_h.get(it->getMapOffset()))); if (!it->getGrid().transform().isLinear()) { skipped.push_back(it->getGrid().getName()); continue; } hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, op); if (boss.wasInterrupted()) return error(); if (reuseName) op.outputGrid->setName( it->getGrid().getName() ); hvdb::createVdbPrimitive(*gdp, op.outputGrid); gdp->destroyPrimitiveOffset(it->getMapOffset(), /*and_points=*/true); } } else if (lodMode == 1) { const float start = float(evalFloat("range", 0, time)); const float end = float(evalFloat("range", 1, time)); const float step = float(evalFloat("range", 2, time)); if (!isValidRange(start, end, step)) { addError(SOP_MESSAGE, "Invalid range, make sure that " "start <= end and the step size is a positive number."); return error(); } MultiResGridRangeOp<1> op( start, end, step, boss ); for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (boss.wasInterrupted()) return error(); if (name_h.isValid()) it->getGrid().setName(static_cast<const char *>(name_h.get(it->getMapOffset()))); if (!it->getGrid().transform().isLinear()) { skipped.push_back(it->getGrid().getName()); continue; } hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, op); if (boss.wasInterrupted()) return error(); for (size_t i=0; i< op.outputGrids.size(); ++i) { hvdb::createVdbPrimitive(*gdp, op.outputGrids[i]); } gdp->destroyPrimitiveOffset(it->getMapOffset(), /*and_points=*/true); } } else if (lodMode == 2) { MultiResGridIntegerOp op( evalInt("count", 0, time) ); for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (boss.wasInterrupted()) return error(); if (name_h.isValid()) it->getGrid().setName(static_cast<const char *>(name_h.get(it->getMapOffset()))); if (!it->getGrid().transform().isLinear()) { skipped.push_back(it->getGrid().getName()); continue; } hvdb::GEOvdbApply<hvdb::VolumeGridTypes>(**it, op); if (boss.wasInterrupted()) return error(); for (size_t i=0; i< op.outputGrids->size(); ++i) { hvdb::createVdbPrimitive(*gdp, op.outputGrids->at(i)); } gdp->destroyPrimitiveOffset(it->getMapOffset(), /*and_points=*/true); } } else { addError(SOP_MESSAGE, "Invalid LOD option."); } if (!skipped.empty()) { addWarning(SOP_MESSAGE, ("Unable to process grid(s): " + hboost::algorithm::join(skipped, ", ")).c_str()); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
11,730
C++
30.199468
101
0.574339
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GEO_PrimVDB.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) Side Effects Software Inc. * * Produced by: * Side Effects Software Inc * 477 Richmond Street West * Toronto, Ontario * Canada M5V 3E7 * 416-504-9876 * * NAME: GEO_PrimVDB.C ( GEO Library, C++) * * COMMENTS: Base class for vdbs. */ #if defined(SESI_OPENVDB) || defined(SESI_OPENVDB_PRIM) #include "GEO_PrimVDB.h" #include <SYS/SYS_AtomicPtr.h> #include <SYS/SYS_Math.h> #include <SYS/SYS_MathCbrt.h> #include <SYS/SYS_SharedMemory.h> #include <UT/UT_Debug.h> #include <UT/UT_Defines.h> #include <UT/UT_EnvControl.h> #include <UT/UT_FSATable.h> #include <UT/UT_IStream.h> #include <UT/UT_JSONParser.h> #include <UT/UT_JSONValue.h> #include <UT/UT_JSONWriter.h> #include <UT/UT_Matrix.h> #include <UT/UT_MatrixSolver.h> #include <UT/UT_MemoryCounter.h> #include <UT/UT_SharedMemoryManager.h> #include <UT/UT_SharedPtr.h> #include <UT/UT_SparseArray.h> #include <UT/UT_StackTrace.h> #include <UT/UT_SysClone.h> #include <UT/UT_UniquePtr.h> #include "UT_VDBUtils.h" #include <UT/UT_Vector.h> #include <UT/UT_XformOrder.h> #include <GA/GA_AttributeRefMap.h> #include <GA/GA_AttributeRefMapDestHandle.h> #include <GA/GA_Defragment.h> #include <GA/GA_ElementWrangler.h> #include <GA/GA_IntrinsicMacros.h> #include <GA/GA_LoadMap.h> #include <GA/GA_MergeMap.h> #include <GA/GA_PrimitiveJSON.h> #include <GA/GA_RangeMemberQuery.h> #include <GA/GA_SaveMap.h> #include <GA/GA_WeightedSum.h> #include <GA/GA_WorkVertexBuffer.h> #include <GEO/GEO_AttributeHandleList.h> #include <GEO/GEO_Detail.h> #include <GEO/GEO_PrimType.h> #include <GEO/GEO_PrimVolume.h> #include <GEO/GEO_VolumeOptions.h> #include <GEO/GEO_WorkVertexBuffer.h> #include <openvdb/io/Stream.h> #include <openvdb/math/Maps.h> #include <openvdb/tools/Composite.h> #include <openvdb/tools/Interpolation.h> #include <openvdb/tools/LevelSetMeasure.h> #include <openvdb/tools/Statistics.h> #include <openvdb/tools/VectorTransformer.h> #include <iostream> #include <stdexcept> using namespace UT::Literal; static const UT_StringHolder theKWVertex = "vertex"_sh; static const UT_StringHolder theKWVDB = "vdb"_sh; static const UT_StringHolder theKWVDBShm = "vdbshm"_sh; static const UT_StringHolder theKWVDBVis = "vdbvis"_sh; GEO_PrimVDB::UniqueId GEO_PrimVDB::nextUniqueId() { static AtomicUniqueId theUniqueId; return static_cast<UniqueId>(theUniqueId.add(1)); } GEO_PrimVDB::GEO_PrimVDB(GEO_Detail *d, GA_Offset offset) : GEO_Primitive(d, offset) , myGridAccessor() , myVis() , myUniqueId(GEO_PrimVDB::nextUniqueId()) , myTreeUniqueId(GEO_PrimVDB::nextUniqueId()) , myMetadataUniqueId(GEO_PrimVDB::nextUniqueId()) , myTransformUniqueId(GEO_PrimVDB::nextUniqueId()) { } void GEO_PrimVDB::stashed(bool beingstashed, GA_Offset offset) { // NB: Base class must be unstashed before we can call allocateVertex(). GEO_Primitive::stashed(beingstashed, offset); if (!beingstashed) { // Reset to state as if freshly constructed myVis = GEO_VolumeOptions(); myUniqueId.relaxedStore(GEO_PrimVDB::nextUniqueId()); myTreeUniqueId.relaxedStore(GEO_PrimVDB::nextUniqueId()); myMetadataUniqueId.relaxedStore(GEO_PrimVDB::nextUniqueId()); myTransformUniqueId.relaxedStore(GEO_PrimVDB::nextUniqueId()); } else { // Free the grid and transform. // This also makes sure that myGridAccessor will be // as if freshly constructed when unstashing. myGridAccessor.clear(); } // Set our internal state to default myVis = GEO_VolumeOptions(GEO_VOLUMEVIS_SMOKE, /*iso*/0.0, /*density*/1.0, GEO_VOLUMEVISLOD_FULL); } bool GEO_PrimVDB::evaluatePointRefMap(GA_Offset result_vtx, GA_AttributeRefMap &map, fpreal /*u*/, fpreal /*v*/, uint /*du*/, uint /*dv*/) const { map.copyValue(GA_ATTRIB_VERTEX, result_vtx, GA_ATTRIB_VERTEX, getVertexOffset(0)); return true; } // Houdini assumes that the depth scaling of the frustum is being done in the // linear part of the NonlinearFrustumMap. This method ensures that if the // grid has a frustum depth not equal to 1, then it returns an equivalent map // which does. static openvdb::math::NonlinearFrustumMap::ConstPtr geoStandardFrustumMapPtr(const GEO_PrimVDB &vdb) { using namespace openvdb::math; using openvdb::Vec3d; const Transform &transform = vdb.getGrid().transform(); UT_ASSERT(transform.baseMap()->isType<NonlinearFrustumMap>()); NonlinearFrustumMap::ConstPtr frustum_map = transform.constMap<NonlinearFrustumMap>(); // If the depth is already 1, then just return the original OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN if (frustum_map->getDepth() == 1.0) return frustum_map; OPENVDB_NO_FP_EQUALITY_WARNING_END AffineMap secondMap = frustum_map->secondMap(); secondMap.accumPreScale(Vec3d(1, 1, frustum_map->getDepth())); return NonlinearFrustumMap::ConstPtr( new NonlinearFrustumMap(frustum_map->getBBox(), frustum_map->getTaper(), /*depth*/1.0, secondMap.copy())); } // The returned space's fromVoxelSpace() method will convert 0-1 // coordinates over the bbox extents to world space (and vice versa for // toVoxelSpace()). GEO_PrimVolumeXform GEO_PrimVDB::getSpaceTransform(const UT_BoundingBoxD &bbox) const { using namespace openvdb; using namespace openvdb::math; using openvdb::Vec3d; using openvdb::Mat4d; MapBase::ConstPtr base_map = getGrid().transform().baseMap(); BBoxd active_bbox(UTvdbConvert(bbox.minvec()), UTvdbConvert(bbox.maxvec())); UT_Matrix4D transform(1.0); // identity fpreal new_taper(1.0); // no taper default // If the active_bbox is empty(), we do not want to produce a singular // matrix. if (active_bbox.empty()) { if (base_map->isType<NonlinearFrustumMap>()) { // Ideally, we use getFrustumBounds() here but it returns the // wrong type. const NonlinearFrustumMap& frustum_map = *getGrid().transform().constMap<NonlinearFrustumMap>(); active_bbox = frustum_map.getBBox(); active_bbox.translate(Vec3d(+0.5)); } else { // Use a single voxel from index origin to act as a pass-through active_bbox = BBoxd(Vec3d(0.0), 1.0); } } // Shift the active_bbox by half a voxel to account for the fact that // UT_VoxelArray's index coordinates are cell-centred while grid indices // are cell edge-aligned. active_bbox.translate(Vec3d(-0.5)); if (base_map->isType<NonlinearFrustumMap>()) { // NOTES: // - VDB's nonlinear frustum goes from index-space to world-space in // two steps: // 1. From index-space to NDC space where we have [-0.5,+0.5] XY // on the Z=0 plane, tapered outwards to to the Z=1 plane // (where depth=1). // 2. Then the base_map->secondMap() is applied to convert it // into world-space. // - Our goal is to come up with an equivalent transform which goes // from [-1,+1] unit-space to world-space, matching GEO_PrimVDB's // node-centred samples with GEO_PrimVolume's cell-centered samples. NonlinearFrustumMap::ConstPtr map_ptr = geoStandardFrustumMapPtr(*this); const NonlinearFrustumMap& frustum_map = *map_ptr; // Math below only handles NonlinearFrustumMap's with a depth of 1. UT_ASSERT(frustum_map.getDepth() == 1); BBoxd frustum_bbox = frustum_map.getBBox(); UT_Vector3D frustum_size(UTvdbConvert(frustum_bbox.extents())); UT_Vector3D inv_frustum_size = 1.0 / frustum_size; // Find active_bbox in the 1 by 1 -> taper by taper frustum UT_Vector3D active_size(UTvdbConvert(active_bbox.extents())); UT_Vector3D offset_uniform = UTvdbConvert(active_bbox.min() - frustum_bbox.min()) * inv_frustum_size; UT_Vector3 scale = active_size * inv_frustum_size; // Compute the z coordinates of 'active_bbox' in the // 0-1 space (normed to the frustum size) fpreal z_min = offset_uniform.z(); fpreal z_max = offset_uniform.z() + scale.z(); // We need a new taper since active_bbox might have a different // near/far plane ratio. The mag values are calculated using VDB's // taper function but then we divide min by max because Houdini's taper // ratio is inversed. fpreal frustum_taper = frustum_map.getTaper(); fpreal gamma = 1.0/frustum_taper - 1.0; fpreal mag_min = 1 + gamma * z_min; fpreal mag_max = 1 + gamma * z_max; new_taper = mag_min / mag_max; // xform will go from 0-1 voxel space to the tapered, near=1x1 frustum UT_Matrix4D xform(1); xform.scale(mag_min, mag_min, 1.0); xform.scale(0.5, 0.5, 0.5); xform.scale(scale.x(), scale.y(), scale.z()); // Scale our correctly tapered box // Offset the correctly scaled and tapered box into the right xy // position. OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN if (gamma != 0.0) OPENVDB_NO_FP_EQUALITY_WARNING_END { // Scale by the inverse of the taper since NonlinearFrustumMap // creates tapers in the -z direction (a positive taper will // increase the ratio of the near / far) but only scales the far // face (effectively, the +z face is scaled by 1 / taper and // the -z face is kept at 1.0) xform.scale(1.0 / new_taper, 1.0 / new_taper, 1.0); // The distance from the near plane that the tapered frustum sides // meet ie. the position of the z-plane that gets mapped to 0 in // the taper map fpreal z_i = 1.0 / gamma; xform.translate(0, 0, z_i + z_min + 0.5 * scale.z()); // Compute the shear: it is the offset on the near plane of the // current frustum to where we want it to be UT_Vector3D frustum_center(0.5*frustum_size); UT_Vector3D active_center(0.5*active_size); // The current active_bbox position UT_Vector3D bbox_offset = frustum_center - active_center; // Compute the offset to the real position. We add back half-voxel // so that the reference base is at the old min UT_Vector3D shear = UTvdbConvert(active_bbox.min() + Vec3d(0.5)) - bbox_offset; shear *= inv_frustum_size; shear /= z_i; xform.shear(0, shear.x(), shear.y()); // Translate ourselves back so that the tapered plane of // frustum_bbox is at 0 xform.translate(0, 0, -z_i); } else { // Translate bottom corner of box to (0,0,0) xform.translate(-0.5, -0.5, 0.0); xform.translate(0.5*scale.x(), 0.5*scale.y(), 0.5*scale.z()); xform.translate(offset_uniform.x(), offset_uniform.y(), offset_uniform.z()); } // `transform` now brings us from a tapered (1*y/x) box to world space, // We want to go from a tapered, near=1x1 frustum to world space, so // prescale by the aspect fpreal aspect = frustum_bbox.extents().y() / frustum_bbox.extents().x(); xform.scale(1.0, aspect, 1.0); Mat4d mat4 = frustum_map.secondMap().getMat4(); transform = xform * UTvdbConvert(mat4); } else { // NOTES: // - VDB's grid transform goes from index-space to world-space. // - Our goal is to come up with an equivalent transform which goes // from [-1,+1] unit-space to world-space, matching GEO_PrimVDB's // node-centred samples with GEO_PrimVolume's cell-centered samples. // (NOTE: fromVoxelSpace() converts from [0,+1] to [-1,+1]) // Create transform which converts [-1,+1] unit-space to [0,+1] transform.translate(1.0, 1.0, 1.0); transform.scale(0.5, 0.5, 0.5); // Go from the [0,1] volume space, to [min,max] where // min and max are in the vdb index space. Note that UT_VoxelArray // doesn't support negative coordinates which is why we need to shift // the index origin to the bbox min. transform.scale(active_bbox.extents().x(), active_bbox.extents().y(), active_bbox.extents().z()); transform.translate(active_bbox.min().x(), active_bbox.min().y(), active_bbox.min().z()); // Post-multiply by the affine map which converts index to world space transform = transform * UTvdbConvert(base_map->getAffineMap()->getMat4()); } UT_Vector3 translate; transform.getTranslates(translate); GEO_PrimVolumeXform result; result.myXform = transform; result.myCenter = translate; OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN result.myHasTaper = (new_taper != 1.0); OPENVDB_NO_FP_EQUALITY_WARNING_END transform.invert(); result.myInverseXform = transform; result.myTaperX = new_taper; result.myTaperY = new_taper; return result; } // Return a GEO_PrimVolumeXform which maps [-0.5,+0.5] Houdini voxel space // coordinates over the VDB's active voxel bbox into world space. GEO_PrimVolumeXform GEO_PrimVDB::getSpaceTransform() const { const openvdb::CoordBBox bbox = getGrid().evalActiveVoxelBoundingBox(); return getSpaceTransform(UTvdbConvert(bbox)); } bool GEO_PrimVDB::conditionMatrix(UT_Matrix4D &mat4) { // This tolerance is just one factor larger than what // AffineMap::updateAcceleration() uses to ensure that we never trigger the // exception. const double tol = 4.0 * openvdb::math::Tolerance<double>::value(); const double min_diag = SYScbrt(tol); if (!SYSequalZero(mat4.determinant3(), tol)) { // openvdb::math::simplify uses openvdb::math::isApproxEqual to detect // uniform scaling, which has a more stringent tolerance than SYSisEqual. // As a result we often have uniform voxel / axis aligned Volumes being // converted to VDBs with Maps that are simplified to ScaleTranslate // rather than UniformScaleTranslate. The latter is more correct, plus // some operations (e.g. LevelSetMorph) don't work with non-Uniform // scales. So we unify the diagonal if the diagonals are SYSisEqual, // but not exactly equal. if (SYSisEqual(mat4(0, 0), mat4(1, 1)) && SYSisEqual(mat4(0, 0), mat4(2, 2)) && !(mat4(0, 0) == mat4(1,1) && mat4(0, 0) == mat4(2,2))) { // Unify to mat(0, 0) like openvdb::math::simplify. mat4(1, 1) = mat4(2, 2) = mat4(0, 0); return true; } if (SYSalmostEqual((float)mat4(0, 0), (float)mat4(1, 1)) && SYSalmostEqual((float)mat4(0, 0), (float)mat4(2, 2)) && !(mat4(0, 0) == mat4(1,1) && mat4(0, 0) == mat4(2,2))) { // Unify to mat(0, 0) like openvdb::math::simplify. mat4(1, 1) = mat4(2, 2) = mat4(0, 0); return true; } return false; } UT_MatrixSolverD solver; UT_Matrix3D mat3(mat4); const int N = 3; UT_MatrixD m(1,N, 1,N), v(1,N, 1,N), diag(1,N, 1,N), tmp(1,N, 1,N); UT_VectorD w(1,N); m.setSubmatrix3(1, 1, mat3); if (!solver.SVDDecomp(m, w, v)) { // Give up and return a scale matrix as small as possible mat4.identity(); mat4.scale(min_diag, min_diag, min_diag); } else { v.transpose(tmp); v = tmp; diag.makeIdentity(); for (int i = 1; i <= N; i++) diag(i,i) = SYSmax(min_diag, w(i)); m.postMult(diag, tmp); tmp.postMult(v, m); m.getSubmatrix3(mat3, 1, 1); mat4 = mat3; } return true; } // All AffineMap creation must to through this to avoid crashes when passing // singular matrices into OpenVDB template<typename T> static openvdb::SharedPtr<T> geoCreateAffineMap(const UT_Matrix4D& mat4) { using namespace openvdb::math; openvdb::SharedPtr<T> transform; UT_Matrix4D new_mat4(mat4); (void) GEO_PrimVDB::conditionMatrix(new_mat4); try { transform.reset(new AffineMap(UTvdbConvert(new_mat4))); } catch (openvdb::ArithmeticError &) { // Fall back to trying to clear the last column first, since // VDB seems to not like that, instead of falling back to identity. new_mat4 = mat4; new_mat4[0][3] = 0; new_mat4[1][3] = 0; new_mat4[2][3] = 0; new_mat4[3][3] = 1; (void) GEO_PrimVDB::conditionMatrix(new_mat4); try { transform.reset(new AffineMap(UTvdbConvert(new_mat4))); } catch (openvdb::ArithmeticError &) { UT_ASSERT(!"Failed to create affine map"); transform.reset(new AffineMap()); } } return transform; } // All calls to createLinearTransform with a matrix4 must to through this to // avoid crashes when passing singular matrices into OpenVDB static openvdb::math::Transform::Ptr geoCreateLinearTransform(const UT_Matrix4D& mat4) { using namespace openvdb::math; return Transform::Ptr(new Transform(geoCreateAffineMap<MapBase>(mat4))); } void GEO_PrimVDB::setSpaceTransform( const GEO_PrimVolumeXform &space, const UT_Vector3R &resolution, bool force_taper) { using namespace openvdb; using namespace openvdb::math; using openvdb::Vec3d; // VDB's nonlinear frustum goes from index-space to world-space in // two steps: // 1. From index-space to NDC space where we have [-0.5,+0.5] XY // on the Z=0 plane, tapered outwards to to the Z=1 plane // (where depth=1). // 2. Then the base_map->secondMap() is applied to convert it // into world-space. // On the other hand, 'space' converts from [-1,+1] space over the given // resolution into world-space. // // Our goal is to come up with an equivalent NonlinearTransformMap of // 'space' which converts from index space to world-space, matching // GEO_PrimVDB's node-centred samples with GEO_PrimVolume's cell-centered // samples. Transform::Ptr xform; if (force_taper || space.myHasTaper) { // VDB only supports a single taper value so use average of the two fpreal taper = 0.5*(space.myTaperX + space.myTaperY); // Create a matrix which goes from vdb's post-taper XY(-0.5,+0.5) space // to [-1,1] space so that we can post-multiply by space's transform to // get into world-space. // // NonlinearFrustumMap use's 1/taper as its taper value, going from // Z=0 to Z=1. So we first scale it by the taper to undo this. UT_Matrix4D transform(1.0); transform.scale(taper, taper, 1.0); // Account for aspect ratio transform.scale(1.0, resolution.x() / resolution.y(), 1.0); // We now go from XY(-0.5,+0.5)/Z(0,1) to XY(-1,+1)/Z(2) transform.scale(2.0, 2.0, 2.0); // Translate into [-1,+1] on all axes by centering the Z axis transform.translate(0.0, 0.0, -1.0); // Now apply the space's linear transform UT_Matrix4D linear; linear = space.myXform; // convert UT_Matrix3 to UT_Matrix4 transform *= linear; transform.translate( space.myCenter.x(), space.myCenter.y(), space.myCenter.z()); // In order to get VDB to match Houdini, we create a frustum map using // Houdini's bbox, so we offset by -0.5 voxel in order taper the // Houdini bbox in VDB index space. This allows us to align Houdini's // cell-centered samples with VDB node-centered ones. BBoxd bbox(Vec3d(0.0), UTvdbConvert(resolution)); bbox.translate(Vec3d(-0.5)); // Build a NonlinearFrustumMap from this MapBase::Ptr affine_map(geoCreateAffineMap<MapBase>(transform)); xform.reset(new Transform(MapBase::Ptr( new NonlinearFrustumMap(bbox, taper, /*depth*/1.0, affine_map)))); } else // optimize for a linear transform if we have no taper { // NOTES: // - Houdini's transform goes from [-1,+1] unit-space to world-space. // - VDB's grid transform goes from index-space to world-space. UT_Matrix4D matx(/*identity*/1.0); UT_Matrix4D mult; // UT_VoxelArray's index coordinates are cell-centred while grid // indices are cell edge-aligned. So first offset the VDB indices by // +0.5 voxel to convert them into Houdini indices. matx.translate(0.5, 0.5, 0.5); // Now convert the indices from [0,dim] to [-1,+1] matx.scale(2.0/resolution(0), 2.0/resolution(1), 2.0/resolution(2)); matx.translate(-1.0, -1.0, -1.0); // Post-multiply with Houdini transform to get result that converts // into world-space. mult = space.myXform; matx *= mult; matx.translate(space.myCenter(0), space.myCenter(1), space.myCenter(2)); // Create a linear transform using the matrix xform = geoCreateLinearTransform(matx); } myGridAccessor.setTransform(*xform, *this); } // This will give you the a GEO_PrimVolumeXform. Given an index, it will // compute the world space position of that index. GEO_PrimVolumeXform GEO_PrimVDB::getIndexSpaceTransform() const { using namespace openvdb; using namespace openvdb::math; using openvdb::Vec3d; using openvdb::Mat4d; // This taper function follows from the conversion code in // GEO_PrimVolume::fromVoxelSpace() until until myXform/myCenter is // applied. It has been simplified somewhat, and uses the definition that // gamma = taper - 1. struct Local { static fpreal taper(fpreal x, fpreal z, fpreal gamma) { return 2 * (x - 0.5) * (1 + gamma * (1 - z)); } }; fpreal new_taper = 1.0; UT_Matrix4D transform(1.0); // identity MapBase::ConstPtr base_map = getGrid().transform().baseMap(); if (base_map->isType<NonlinearFrustumMap>()) { // NOTES: // - VDB's nonlinear frustum goes from index-space to world-space in // two steps: // 1. From index-space to NDC space where we have [-0.5,+0.5] XY // on the Z=0 plane, tapered outwards to to the Z=1 plane // (where depth=1). // 2. Then the base_map->secondMap() is applied to convert it // into world-space. // - Our goal is to come up with an equivalent GEO_PrimVolumeXform // which goes from index-space to world-space, matching GEO_PrimVDB's // node-centred samples with GEO_PrimVolume's cell-centered samples. // - The gotcha here is that callers use fromVoxelSpace() which will // first do a mapping of [0,1] to [-1,+1] which we need to undo. NonlinearFrustumMap::ConstPtr map_ptr = geoStandardFrustumMapPtr(*this); const NonlinearFrustumMap& frustum_map = *map_ptr; // Math below only handles NonlinearFrustumMap's with a depth of 1. UT_ASSERT(frustum_map.getDepth() == 1); fpreal taper = frustum_map.getTaper(); // We need to create a taper value for fromVoxelSpace()'s incoming // Houdini index space coordinate, so the bbox we want to taper with is // actually the Houdini index bbox. UT_BoundingBox bbox; getFrustumBounds(bbox); fpreal x = bbox.xsize(); fpreal y = bbox.ysize(); fpreal z = bbox.zsize(); Vec3d real_min(bbox.xmin(), bbox.ymin(), bbox.zmin()); Vec3d real_max(bbox.xmax(), bbox.ymax(), bbox.zmax()); // Compute a new taper based on the expected ratio of these two // z positions fpreal z_min = real_min.z(); fpreal z_max = real_max.z(); // // If t = (1+g(1-a))/(1+g(1-b)) then g = (1-t)/(t(1-b) - (1-a)) // where t = taper; g = new_taper - 1, a = z_min, b = z_max; // fpreal new_gamma = (1 - taper) / (taper * (1 - z_max) - (1 - z_min)); new_taper = new_gamma + 1; // Since we are tapering the index space, the taper map adds a // scale and a shear so we find these and invert them fpreal x_max_pos = Local::taper(real_max.x(), z_max, new_gamma); fpreal x_min_pos = Local::taper(real_min.x(), z_max, new_gamma); // Now, move x_max_pos = -x_min_pos with a shear fpreal x_scale = x_max_pos - x_min_pos; fpreal shear_x = 0.5 * x_scale - x_max_pos; // Do the same for y fpreal y_max_pos = Local::taper(real_max.y(), z_max, new_gamma); fpreal y_min_pos = Local::taper(real_min.y(), z_max, new_gamma); fpreal y_scale = y_max_pos - y_min_pos; fpreal shear_y = 0.5 * y_scale - y_max_pos; transform.translate(0, 0, -2*(z_min - 0.5)); // Scale z so that our frustum depth range is 0-1 transform.scale(1, 1, 0.5/z); // Apply the shear OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN if (taper != 1.0) OPENVDB_NO_FP_EQUALITY_WARNING_END { fpreal z_i = 1.0 / (taper - 1); transform.translate(0, 0, -z_i-1.0); transform.shear(0, -shear_x / z_i, -shear_y / z_i); transform.translate(0, 0, z_i+1.0); } else { transform.translate(shear_x, shear_y, 0.0); } transform.scale(1.0/x_scale, 1.0/y_scale, 1.0); // Scale by 1/taper to convert taper definitions transform.scale(1.0 / taper, 1.0 / taper, 1.0); // Account for aspect ratio transform.scale(1, y/x, 1); Mat4d mat4 = frustum_map.secondMap().getMat4(); transform *= UTvdbConvert(mat4); } else { // We only deal with nonlinear maps that are frustum maps UT_ASSERT(base_map->isLinear() && "Found unexpected nonlinear MapBase."); // Since VDB's transform is already from index-space to world-space, we // just need to undo the [0,1] -> [-1,+1] mapping that fromVoxelSpace() // does before transforming by myXform/myCenter. The math is thus: // scale(1/2)*translate(0.5) // But we also want to shift VDB's node-centred samples to match // GEO_PrimVolume's cell-centered ones so we want: // scale(1/2)*translate(0.5)*translate(-0.5) // This reduces down to just scale(1/2) // transform.scale(0.5, 0.5, 0.5); transform *= UTvdbConvert(base_map->getAffineMap()->getMat4()); } GEO_PrimVolumeXform result; result.myXform = transform; transform.getTranslates(result.myCenter); OPENVDB_NO_FP_EQUALITY_WARNING_BEGIN result.myHasTaper = (new_taper != 1.0); OPENVDB_NO_FP_EQUALITY_WARNING_END transform.invert(); result.myInverseXform = transform; result.myTaperX = new_taper; result.myTaperY = new_taper; return result; } bool GEO_PrimVDB::isSDF() const { if (getGrid().getGridClass() == openvdb::GRID_LEVEL_SET) return true; return false; } fpreal GEO_PrimVDB::getTaper() const { return getSpaceTransform().myTaperX; } void GEO_PrimVDB::reverse() { } UT_Vector3 GEO_PrimVDB::computeNormal() const { return UT_Vector3(0, 0, 0); } template <typename GridType> static void geo_calcVolume(const GridType &grid, fpreal &volume) { bool calculated = false; if (grid.getGridClass() == openvdb::GRID_LEVEL_SET) { try { volume = openvdb::tools::levelSetVolume(grid); calculated = true; } catch (std::exception& /*e*/) { // do nothing } } // Simply account for the total number of active voxels if (!calculated) { const openvdb::Vec3d size = grid.voxelSize(); volume = size[0] * size[1] * size[2] * grid.activeVoxelCount(); } } fpreal GEO_PrimVDB::calcVolume(const UT_Vector3 &) const { fpreal volume = 0; UTvdbCallAllTopology(getStorageType(), geo_calcVolume, getGrid(), volume); return volume; } template <typename GridType> static void geo_calcArea(const GridType &grid, fpreal &area) { bool calculated = false; if (grid.getGridClass() == openvdb::GRID_LEVEL_SET) { try { area = openvdb::tools::levelSetArea(grid); calculated = true; } catch (std::exception& /*e*/) { // do nothing } } if (!calculated) { using LeafIter = typename GridType::TreeType::LeafCIter; using VoxelIter = typename GridType::TreeType::LeafNodeType::ValueOnCIter; using openvdb::Coord; const Coord normals[] = {Coord(0,0,-1), Coord(0,0,1), Coord(-1,0,0), Coord(1,0,0), Coord(0,-1,0), Coord(0,1,0)}; // NOTE: we assume rectangular prism voxels openvdb::Vec3d voxel_size = grid.voxelSize(); const fpreal areas[] = {fpreal(voxel_size.x() * voxel_size.y()), fpreal(voxel_size.x() * voxel_size.y()), fpreal(voxel_size.y() * voxel_size.z()), fpreal(voxel_size.y() * voxel_size.z()), fpreal(voxel_size.z() * voxel_size.x()), fpreal(voxel_size.z() * voxel_size.x())}; area = 0; for (LeafIter leaf = grid.tree().cbeginLeaf(); leaf; ++leaf) { // Visit all the active voxels in this leaf node. for (VoxelIter iter = leaf->cbeginValueOn(); iter; ++iter) { // Iterate through all the neighboring voxels for (int i=0; i<6; i++) if (!grid.tree().isValueOn(iter.getCoord() + normals[i])) area += areas[i]; } } } } fpreal GEO_PrimVDB::calcArea() const { // Calculate the surface area of all the exterior voxels. fpreal area = 0; UTvdbCallAllTopology(getStorageType(), geo_calcArea, getGrid(), area); return area; } void GEO_PrimVDB::enlargePointBounds(UT_BoundingBox &box) const { UT_BoundingBox qbox; if (getBBox(&qbox)) box.enlargeBounds(qbox); } bool GEO_PrimVDB::enlargeBoundingBox(UT_BoundingRect &box, const GA_Attribute *P) const { const GA_Detail &gdp = getDetail(); if (!P) P = gdp.getP(); else if (P != gdp.getP()) return GEO_Primitive::enlargeBoundingBox(box, P); UT_BoundingBox my_bbox; if (getBBox(&my_bbox)) { box.enlargeBounds(my_bbox.xmin(), my_bbox.ymin()); box.enlargeBounds(my_bbox.xmax(), my_bbox.ymax()); return true; } return true; } bool GEO_PrimVDB::enlargeBoundingBox(UT_BoundingBox &box, const GA_Attribute *P) const { const GA_Detail &gdp = getDetail(); if (!P) P = gdp.getP(); else if (P != gdp.getP()) return GEO_Primitive::enlargeBoundingBox(box, P); UT_BoundingBox my_bbox; if (getBBox(&my_bbox)) { box.enlargeBounds(my_bbox); return true; } return true; } bool GEO_PrimVDB::enlargeBoundingSphere(UT_BoundingSphere &sphere, const GA_Attribute *P) const { const GA_Detail &gdp = getDetail(); if (!P) P = gdp.getP(); else if (P != gdp.getP()) return GEO_Primitive::enlargeBoundingSphere(sphere, P); addToBSphere(&sphere); return true; } int64 GEO_PrimVDB::getBaseMemoryUsage() const { exint mem = 0; if (hasGrid()) mem += getGrid().memUsage(); return mem; } void GEO_PrimVDB::countBaseMemory(UT_MemoryCounter &counter) const { if (hasGrid()) { // NOTE: We don't share the grid object or its transform // We don't know what type of Grid we have, but apart from what's // in GridBase, it just has a shared pointer to the tree extra, // so just add that in separately. counter.countUnshared(sizeof(openvdb::GridBase) + sizeof(openvdb::TreeBase::Ptr)); // We don't know what type of MapBase the Transform uses, // so just guess the largest one counter.countUnshared(sizeof(openvdb::math::Transform) + sizeof(openvdb::math::NonlinearFrustumMap)); // The grid's tree is shared. In order to get the reference count, // we need to get our own shared pointer to it, then use one less // than the ref count (ours counts as one). exint refcount; exint size; const openvdb::TreeBase *ptr; { openvdb::TreeBase::ConstPtr ref = getGrid().constBaseTreePtr(); refcount = ref.use_count() - 1; size = ref->memUsage(); ptr = ref.get(); } counter.countShared(size, refcount, ptr); } } template <typename GridType> static inline typename GridType::ValueType geo_doubleToGridValue(double val) { using ValueT = typename GridType::ValueType; // This ugly construction avoids compiler warnings when, // for example, initializing an openvdb::Vec3i with a double. return ValueT(openvdb::zeroVal<ValueT>() + val); } template <typename GridType> static fpreal geo_sampleGrid(const GridType &grid, const UT_Vector3 &pos) { const openvdb::math::Transform & xform = grid.transform(); openvdb::math::Vec3d vpos; typename GridType::ValueType value; vpos = openvdb::math::Vec3d(pos.x(), pos.y(), pos.z()); vpos = xform.worldToIndex(vpos); openvdb::tools::BoxSampler::sample(grid.tree(), vpos, value); fpreal result = value; return result; } template <typename GridType> static fpreal geo_sampleBoolGrid(const GridType &grid, const UT_Vector3 &pos) { const openvdb::math::Transform & xform = grid.transform(); openvdb::math::Vec3d vpos; typename GridType::ValueType value; vpos = openvdb::math::Vec3d(pos.x(), pos.y(), pos.z()); vpos = xform.worldToIndex(vpos); openvdb::tools::PointSampler::sample(grid.tree(), vpos, value); fpreal result = value; return result; } template <typename GridType> static UT_Vector3D geo_sampleGridV3(const GridType &grid, const UT_Vector3 &pos) { const openvdb::math::Transform & xform = grid.transform(); openvdb::math::Vec3d vpos; typename GridType::ValueType value; vpos = openvdb::math::Vec3d(pos.x(), pos.y(), pos.z()); vpos = xform.worldToIndex(vpos); openvdb::tools::BoxSampler::sample(grid.tree(), vpos, value); UT_Vector3D result; result.x() = double(value[0]); result.y() = double(value[1]); result.z() = double(value[2]); return result; } template <typename GridType, typename T, typename IDX> static void geo_sampleGridMany(const GridType &grid, T *f, int stride, const IDX *pos, int num) { typename GridType::ConstAccessor accessor = grid.getAccessor(); const openvdb::math::Transform & xform = grid.transform(); openvdb::math::Vec3d vpos; typename GridType::ValueType value; for (int i = 0; i < num; i++) { vpos = openvdb::math::Vec3d(pos[i].x(), pos[i].y(), pos[i].z()); vpos = xform.worldToIndex(vpos); openvdb::tools::BoxSampler::sample(accessor, vpos, value); *f = T(value); f += stride; } } template <typename GridType, typename T, typename IDX> static void geo_sampleBoolGridMany(const GridType &grid, T *f, int stride, const IDX *pos, int num) { typename GridType::ConstAccessor accessor = grid.getAccessor(); const openvdb::math::Transform & xform = grid.transform(); openvdb::math::Vec3d vpos; typename GridType::ValueType value; for (int i = 0; i < num; i++) { vpos = openvdb::math::Vec3d(pos[i].x(), pos[i].y(), pos[i].z()); vpos = xform.worldToIndex(vpos); openvdb::tools::PointSampler::sample(accessor, vpos, value); *f = T(value); f += stride; } } template <typename GridType, typename T, typename IDX> static void geo_sampleVecGridMany(const GridType &grid, T *f, int stride, const IDX *pos, int num) { typename GridType::ConstAccessor accessor = grid.getAccessor(); const openvdb::math::Transform & xform = grid.transform(); openvdb::math::Vec3d vpos; typename GridType::ValueType value; for (int i = 0; i < num; i++) { vpos = openvdb::math::Vec3d(pos[i].x(), pos[i].y(), pos[i].z()); vpos = xform.worldToIndex(vpos); openvdb::tools::BoxSampler::sample(accessor, vpos, value); f->x() = value[0]; f->y() = value[1]; f->z() = value[2]; f += stride; } } static fpreal geoEvaluateVDB(const GEO_PrimVDB *vdb, const UT_Vector3 &pos) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleGrid, vdb->getGrid(), pos); UTvdbReturnBoolType(vdb->getStorageType(), geo_sampleBoolGrid, vdb->getGrid(), pos); return 0; } static UT_Vector3D geoEvaluateVDB_V3(const GEO_PrimVDB *vdb, const UT_Vector3 &pos) { UTvdbReturnVec3Type(vdb->getStorageType(), geo_sampleGridV3, vdb->getGrid(), pos); return UT_Vector3D(0, 0, 0); } static void geoEvaluateVDBMany(const GEO_PrimVDB *vdb, float *f, int stride, const UT_Vector3 *pos, int num) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleGridMany, vdb->getGrid(), f, stride, pos, num); UTvdbReturnBoolType(vdb->getStorageType(), geo_sampleBoolGridMany, vdb->getGrid(), f, stride, pos, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateVDBMany(const GEO_PrimVDB *vdb, int *f, int stride, const UT_Vector3 *pos, int num) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleGridMany, vdb->getGrid(), f, stride, pos, num); UTvdbReturnBoolType(vdb->getStorageType(), geo_sampleBoolGridMany, vdb->getGrid(), f, stride, pos, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateVDBMany(const GEO_PrimVDB *vdb, UT_Vector3 *f, int stride, const UT_Vector3 *pos, int num) { UTvdbReturnVec3Type(vdb->getStorageType(), geo_sampleVecGridMany, vdb->getGrid(), f, stride, pos, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateVDBMany(const GEO_PrimVDB *vdb, double *f, int stride, const UT_Vector3D *pos, int num) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleGridMany, vdb->getGrid(), f, stride, pos, num); UTvdbReturnBoolType(vdb->getStorageType(), geo_sampleBoolGridMany, vdb->getGrid(), f, stride, pos, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateVDBMany(const GEO_PrimVDB *vdb, exint *f, int stride, const UT_Vector3D *pos, int num) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleGridMany, vdb->getGrid(), f, stride, pos, num); UTvdbReturnBoolType(vdb->getStorageType(), geo_sampleBoolGridMany, vdb->getGrid(), f, stride, pos, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateVDBMany(const GEO_PrimVDB *vdb, UT_Vector3D *f, int stride, const UT_Vector3D *pos, int num) { UTvdbReturnVec3Type(vdb->getStorageType(), geo_sampleVecGridMany, vdb->getGrid(), f, stride, pos, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } fpreal GEO_PrimVDB::getValueF(const UT_Vector3 &pos) const { return geoEvaluateVDB(this, pos); } UT_Vector3D GEO_PrimVDB::getValueV3(const UT_Vector3 &pos) const { return geoEvaluateVDB_V3(this, pos); } void GEO_PrimVDB::getValues(float *f, int stride, const UT_Vector3 *pos, int num) const { return geoEvaluateVDBMany(this, f, stride, pos, num); } void GEO_PrimVDB::getValues(int *f, int stride, const UT_Vector3 *pos, int num) const { return geoEvaluateVDBMany(this, f, stride, pos, num); } void GEO_PrimVDB::getValues(UT_Vector3 *f, int stride, const UT_Vector3 *pos, int num) const { return geoEvaluateVDBMany(this, f, stride, pos, num); } void GEO_PrimVDB::getValues(double *f, int stride, const UT_Vector3D *pos, int num) const { return geoEvaluateVDBMany(this, f, stride, pos, num); } void GEO_PrimVDB::getValues(exint *f, int stride, const UT_Vector3D *pos, int num) const { return geoEvaluateVDBMany(this, f, stride, pos, num); } void GEO_PrimVDB::getValues(UT_Vector3D *f, int stride, const UT_Vector3D *pos, int num) const { return geoEvaluateVDBMany(this, f, stride, pos, num); } namespace // anonymous { template <bool NORMALIZE> class geo_EvalGradients { public: geo_EvalGradients( UT_Vector3 *gradients, int stride, const UT_Vector3 *positions, int num_positions) : myGradients(gradients) , myStride(stride) , myPos(positions) , myNumPos(num_positions) { } template<typename GridT> void operator()(const GridT &grid) { using namespace openvdb; using AccessorT = typename GridT::ConstAccessor; using ValueT = typename GridT::ValueType; const math::Transform & xform = grid.transform(); const math::Vec3d dim = grid.voxelSize(); const double vox_size = SYSmin(dim[0], dim[1], dim[2]); const double h = 0.5 * vox_size; const math::Vec3d mask[] = { math::Vec3d(h, 0, 0) , math::Vec3d(0, h, 0) , math::Vec3d(0, 0, h) }; AccessorT accessor = grid.getConstAccessor(); UT_Vector3 * gradient = myGradients; for (int i = 0; i < myNumPos; i++, gradient += myStride) { const math::Vec3d pos(myPos[i].x(), myPos[i].y(), myPos[i].z()); for (int j = 0; j < 3; j++) { const math::Vec3d vpos0 = xform.worldToIndex(pos - mask[j]); const math::Vec3d vpos1 = xform.worldToIndex(pos + mask[j]); ValueT v0, v1; tools::BoxSampler::sample<AccessorT>(accessor, vpos0, v0); tools::BoxSampler::sample<AccessorT>(accessor, vpos1, v1); if (NORMALIZE) (*gradient)(j) = (v1 - v0); else (*gradient)(j) = (v1 - v0) / vox_size; } if (NORMALIZE) gradient->normalize(); } } private: UT_Vector3 * myGradients; int myStride; const UT_Vector3 * myPos; int myNumPos; }; } // namespace anonymous bool GEO_PrimVDB::evalGradients( UT_Vector3 *gradients, int stride, const UT_Vector3 *pos, int num_pos, bool normalize) const { if (normalize) { geo_EvalGradients<true> eval(gradients, stride, pos, num_pos); return UTvdbProcessTypedGridScalar(getStorageType(), getGrid(), eval); } else { geo_EvalGradients<false> eval(gradients, stride, pos, num_pos); return UTvdbProcessTypedGridScalar(getStorageType(), getGrid(), eval); } } bool GEO_PrimVDB::isAligned(const GEO_PrimVDB *vdb) const { if (getGrid().transform() == vdb->getGrid().transform()) return true; return false; } bool GEO_PrimVDB::isWorldAxisAligned() const { // Tapered are trivially not aligned. if (!SYSisEqual(getTaper(), 1)) return false; UT_Matrix4D x; x = getTransform4(); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { if (i == j) { if (x(i, j) <= 0) return false; } else { if (x(i,j) != 0) return false; } } } return true; } bool GEO_PrimVDB::isActiveRegionMatched(const GEO_PrimVDB *vdb) const { if (!isAligned(vdb)) return false; // Ideally we'd invoke hasSameTopology? return vdb->getGrid().baseTreePtr() == getGrid().baseTreePtr(); } void GEO_PrimVDB::indexToPos(int x, int y, int z, UT_Vector3 &pos) const { openvdb::math::Vec3d vpos; vpos = openvdb::math::Vec3d(x, y, z); vpos = getGrid().indexToWorld(vpos); pos = UT_Vector3(vpos[0], vpos[1], vpos[2]); } void GEO_PrimVDB::findexToPos(UT_Vector3 idx, UT_Vector3 &pos) const { openvdb::math::Vec3d vpos; vpos = openvdb::math::Vec3d(idx.x(), idx.y(), idx.z()); vpos = getGrid().indexToWorld(vpos); pos = UT_Vector3(vpos[0], vpos[1], vpos[2]); } void GEO_PrimVDB::posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const { openvdb::math::Vec3d vpos(pos.data()); openvdb::math::Coord coord = getGrid().transform().worldToIndexCellCentered(vpos); x = coord.x(); y = coord.y(); z = coord.z(); } void GEO_PrimVDB::posToIndex(UT_Vector3 pos, UT_Vector3 &index) const { openvdb::math::Vec3d vpos; vpos = openvdb::math::Vec3d(pos.x(), pos.y(), pos.z()); vpos = getGrid().worldToIndex(vpos); index = UTvdbConvert(vpos); } void GEO_PrimVDB::indexToPos(exint x, exint y, exint z, UT_Vector3D &pos) const { openvdb::math::Vec3d vpos; vpos = openvdb::math::Vec3d(x, y, z); vpos = getGrid().indexToWorld(vpos); pos = UT_Vector3D(vpos[0], vpos[1], vpos[2]); } void GEO_PrimVDB::findexToPos(UT_Vector3D idx, UT_Vector3D &pos) const { openvdb::math::Vec3d vpos; vpos = openvdb::math::Vec3d(idx.x(), idx.y(), idx.z()); vpos = getGrid().indexToWorld(vpos); pos = UT_Vector3D(vpos[0], vpos[1], vpos[2]); } void GEO_PrimVDB::posToIndex(UT_Vector3D pos, exint &x, exint &y, exint &z) const { openvdb::math::Vec3d vpos(pos.data()); openvdb::math::Coord coord = getGrid().transform().worldToIndexCellCentered(vpos); x = coord.x(); y = coord.y(); z = coord.z(); } void GEO_PrimVDB::posToIndex(UT_Vector3D pos, UT_Vector3D &index) const { openvdb::math::Vec3d vpos; vpos = openvdb::math::Vec3d(pos.x(), pos.y(), pos.z()); vpos = getGrid().worldToIndex(vpos); index = UTvdbConvert(vpos); } template <typename GridType> static fpreal geo_sampleIndex(const GridType &grid, int ix, int iy, int iz) { openvdb::math::Coord xyz; typename GridType::ValueType value; xyz = openvdb::math::Coord(ix, iy, iz); value = grid.tree().getValue(xyz); fpreal result = value; return result; } template <typename GridType> static UT_Vector3D geo_sampleIndexV3(const GridType &grid, int ix, int iy, int iz) { openvdb::math::Coord xyz; typename GridType::ValueType value; xyz = openvdb::math::Coord(ix, iy, iz); value = grid.tree().getValue(xyz); UT_Vector3D result; result.x() = double(value[0]); result.y() = double(value[1]); result.z() = double(value[2]); return result; } template <typename GridType, typename T, typename IDX> static void geo_sampleIndexMany(const GridType &grid, T *f, int stride, const IDX *ix, const IDX *iy, const IDX *iz, int num) { typename GridType::ConstAccessor accessor = grid.getAccessor(); openvdb::math::Coord xyz; typename GridType::ValueType value; for (int i = 0; i < num; i++) { xyz = openvdb::math::Coord(ix[i], iy[i], iz[i]); value = accessor.getValue(xyz); *f = T(value); f += stride; } } template <typename GridType, typename T, typename IDX> static void geo_sampleVecIndexMany(const GridType &grid, T *f, int stride, const IDX *ix, const IDX *iy, const IDX *iz, int num) { typename GridType::ConstAccessor accessor = grid.getAccessor(); openvdb::math::Coord xyz; typename GridType::ValueType value; for (int i = 0; i < num; i++) { xyz = openvdb::math::Coord(ix[i], iy[i], iz[i]); value = accessor.getValue(xyz); f->x() = value[0]; f->y() = value[1]; f->z() = value[2]; f += stride; } } static fpreal geoEvaluateIndexVDB(const GEO_PrimVDB *vdb, int ix, int iy, int iz) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleIndex, vdb->getGrid(), ix, iy, iz); return 0.0; } static UT_Vector3D geoEvaluateIndexVDB_V3(const GEO_PrimVDB *vdb, int ix, int iy, int iz) { UTvdbReturnVec3Type(vdb->getStorageType(), geo_sampleIndexV3, vdb->getGrid(), ix, iy, iz); return UT_Vector3D(0.0, 0, 0); } static void geoEvaluateIndexVDBMany(const GEO_PrimVDB *vdb, float *f, int stride, const int *ix, const int *iy, const int *iz, int num) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleIndexMany, vdb->getGrid(), f, stride, ix, iy, iz, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateIndexVDBMany(const GEO_PrimVDB *vdb, int *f, int stride, const int *ix, const int *iy, const int *iz, int num) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleIndexMany, vdb->getGrid(), f, stride, ix, iy, iz, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateIndexVDBMany(const GEO_PrimVDB *vdb, UT_Vector3 *f, int stride, const int *ix, const int *iy, const int *iz, int num) { UTvdbReturnVec3Type(vdb->getStorageType(), geo_sampleVecIndexMany, vdb->getGrid(), f, stride, ix, iy, iz, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateIndexVDBMany(const GEO_PrimVDB *vdb, double *f, int stride, const exint *ix, const exint *iy, const exint *iz, int num) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleIndexMany, vdb->getGrid(), f, stride, ix, iy, iz, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateIndexVDBMany(const GEO_PrimVDB *vdb, exint *f, int stride, const exint *ix, const exint *iy, const exint *iz, int num) { UTvdbReturnScalarType(vdb->getStorageType(), geo_sampleIndexMany, vdb->getGrid(), f, stride, ix, iy, iz, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } static void geoEvaluateIndexVDBMany(const GEO_PrimVDB *vdb, UT_Vector3D *f, int stride, const exint *ix, const exint *iy, const exint *iz, int num) { UTvdbReturnVec3Type(vdb->getStorageType(), geo_sampleVecIndexMany, vdb->getGrid(), f, stride, ix, iy, iz, num); for (int i = 0; i < num; i++) { *f = 0; f += stride; } } fpreal GEO_PrimVDB::getValueAtIndexF(int ix, int iy, int iz) const { return geoEvaluateIndexVDB(this, ix, iy, iz); } UT_Vector3D GEO_PrimVDB::getValueAtIndexV3(int ix, int iy, int iz) const { return geoEvaluateIndexVDB_V3(this, ix, iy, iz); } void GEO_PrimVDB::getValuesAtIndices(float *f, int stride, const int *ix, const int *iy, const int *iz, int num) const { geoEvaluateIndexVDBMany(this, f, stride, ix, iy, iz, num); } void GEO_PrimVDB::getValuesAtIndices(int *f, int stride, const int *ix, const int *iy, const int *iz, int num) const { geoEvaluateIndexVDBMany(this, f, stride, ix, iy, iz, num); } void GEO_PrimVDB::getValuesAtIndices(UT_Vector3 *f, int stride, const int *ix, const int *iy, const int *iz, int num) const { geoEvaluateIndexVDBMany(this, f, stride, ix, iy, iz, num); } void GEO_PrimVDB::getValuesAtIndices(double *f, int stride, const exint *ix, const exint *iy, const exint *iz, int num) const { geoEvaluateIndexVDBMany(this, f, stride, ix, iy, iz, num); } void GEO_PrimVDB::getValuesAtIndices(exint *f, int stride, const exint *ix, const exint *iy, const exint *iz, int num) const { geoEvaluateIndexVDBMany(this, f, stride, ix, iy, iz, num); } void GEO_PrimVDB::getValuesAtIndices(UT_Vector3D *f, int stride, const exint *ix, const exint *iy, const exint *iz, int num) const { geoEvaluateIndexVDBMany(this, f, stride, ix, iy, iz, num); } UT_Vector3 GEO_PrimVDB::getGradient(const UT_Vector3 &pos) const { UT_Vector3 grad; grad = 0; evalGradients(&grad, 1, &pos, 1, false); return grad; } //////////////////////////////////////// namespace { // Functor for use with UTvdbProcessTypedGridVec3() to apply a transform // to the voxel values of vector-valued grids struct gu_VecXformOp { openvdb::Mat4d mat; gu_VecXformOp(const openvdb::Mat4d& _mat): mat(_mat) {} template<typename GridT> void operator()(GridT& grid) const { openvdb::tools::transformVectors(grid, mat); } }; } // unnamed namespace void GEO_PrimVDB::transform(const UT_Matrix4 &mat) { if (!hasGrid()) return; try { using openvdb::GridBase; using namespace openvdb::math; // Get the transform const GridBase& const_grid = getConstGrid(); MapBase::ConstPtr base_map = const_grid.transform().baseMap(); Mat4d base_mat4 = base_map->getAffineMap()->getMat4(); // Get the 3x3 subcomponent of the matrix Vec3d translation = base_mat4.getTranslation(); Mat3d vdbmatrix = base_mat4.getMat3(); // Multiply our mat with the mat3 UT_Matrix3D transformed(mat); transformed = UTvdbConvert(vdbmatrix) * transformed; // Put it into a mat4 and translate it UT_Matrix4D final; final = transformed; final.setTranslates(UTvdbConvert(translation)); // Make an affine matrix out of it AffineMap::Ptr map(geoCreateAffineMap<AffineMap>(final)); // Set the affine matrix from our base_map into this map MapBase::Ptr result = simplify(map); if (base_map->isType<NonlinearFrustumMap>()) { const NonlinearFrustumMap& frustum_map = *const_grid.transform().constMap<NonlinearFrustumMap>(); MapBase::Ptr new_frustum_map (new NonlinearFrustumMap( frustum_map.getBBox(), frustum_map.getTaper(), frustum_map.getDepth(), result)); result = new_frustum_map; } // This sets the vertex position to `translation` as well myGridAccessor.setTransform(Transform(result), *this); // If (and only if) the grid is vector-valued, apply the transform to // each voxel's value. if (const_grid.getVectorType() != openvdb::VEC_INVARIANT) { gu_VecXformOp op(UTvdbConvert(UT_Matrix4D(mat))); GEOvdbProcessTypedGridVec3(*this, op, /*make_unique*/true); } } catch (std::exception& /*e*/) { UT_ASSERT(!"Failed to apply transform"); } } void GEO_PrimVDB::copyGridFrom(const GEO_PrimVDB& src_prim, bool copyPosition) { setGrid(src_prim.getGrid(), copyPosition); // makes a shallow copy // Copy the source primitive's grid serial numbers. myTreeUniqueId.exchange(src_prim.getTreeUniqueId()); myMetadataUniqueId.exchange(src_prim.getMetadataUniqueId()); myTransformUniqueId.exchange(src_prim.getTransformUniqueId()); } // If myGrid's tree is shared, replace the tree with a deep copy of itself. // Note: myGrid's metadata and transform are assumed to never be shared // (see setGrid()). void GEO_PrimVDB::GridAccessor::makeGridUnique() { if (myGrid) { UT_ASSERT(myGrid.unique()); openvdb::TreeBase::Ptr localTreePtr = myGrid->baseTreePtr(); if (localTreePtr.use_count() > 2) { // myGrid + localTreePtr = 2 myGrid->setTree(myGrid->constBaseTree().copy()); } } } bool GEO_PrimVDB::GridAccessor::isGridUnique() const { if (myGrid) { // We require the grid to always be unique, it is the tree // that is allowed to be shared. UT_ASSERT(myGrid.unique()); openvdb::TreeBase::Ptr localTreePtr = myGrid->baseTreePtr(); if (localTreePtr.use_count() > 2) { // myGrid + localTreePtr = 2 return false; } return true; } // Empty grids are trivially unique return true; } void GEO_PrimVDB::setTransform4(const UT_Matrix4 &xform4) { setTransform4(static_cast<UT_DMatrix4>(xform4)); } void GEO_PrimVDB::setTransform4(const UT_DMatrix4 &xform4) { using namespace openvdb::math; myGridAccessor.setTransform(*geoCreateLinearTransform(xform4), *this); } void GEO_PrimVDB::getRes(int &rx, int &ry, int &rz) const { using namespace openvdb; const GridBase & grid = getGrid(); const math::Vec3d dim = grid.evalActiveVoxelDim().asVec3d(); rx = static_cast<int>(dim[0]); ry = static_cast<int>(dim[1]); rz = static_cast<int>(dim[2]); } fpreal GEO_PrimVDB::getVoxelDiameter() const { UT_Vector3 p1, p2; indexToPos(0, 0, 0, p1); indexToPos(1, 1, 1, p2); p2 -= p1; return p2.length(); } UT_Vector3 GEO_PrimVDB::getVoxelSize() const { UT_Vector3 p1, p2; UT_Vector3 vsize; indexToPos(0, 0, 0, p1); indexToPos(1, 0, 0, p2); p2 -= p1; vsize.x() = p2.length(); indexToPos(0, 1, 0, p2); p2 -= p1; vsize.y() = p2.length(); indexToPos(0, 0, 1, p2); p2 -= p1; vsize.z() = p2.length(); return vsize; } template <typename GridType> static void geo_calcMinVDB( GridType &grid, fpreal &result) { auto val = openvdb::tools::extrema(grid.cbeginValueOn()); result = val.min(); } fpreal GEO_PrimVDB::calcMinimum() const { fpreal value = SYS_FPREAL_MAX; UTvdbCallScalarType( getStorageType(), geo_calcMinVDB, SYSconst_cast(getConstGrid()), value); return value; } template <typename GridType> static void geo_calcMaxVDB( GridType &grid, fpreal &result) { auto val = openvdb::tools::extrema(grid.cbeginValueOn()); result = val.max(); } fpreal GEO_PrimVDB::calcMaximum() const { fpreal value = -SYS_FPREAL_MAX; UTvdbCallScalarType( getStorageType(), geo_calcMaxVDB, SYSconst_cast(getConstGrid()), value); return value; } template <typename GridType> static void geo_calcAvgVDB( GridType &grid, fpreal &result) { auto val = openvdb::tools::statistics(grid.cbeginValueOn()); result = val.avg(); } fpreal GEO_PrimVDB::calcAverage() const { fpreal value = 0; UTvdbCallScalarType( getStorageType(), geo_calcAvgVDB, SYSconst_cast(getConstGrid()), value); return value; } bool GEO_PrimVDB::getFrustumBounds(UT_BoundingBox &idxbox) const { using namespace openvdb; using namespace openvdb::math; using openvdb::CoordBBox; using openvdb::Vec3d; idxbox.makeInvalid(); // See if we have a non-linear map, this is the sign // we want to be bounded. MapBase::ConstPtr base_map = getGrid().transform().baseMap(); if (base_map->isType<NonlinearFrustumMap>()) { const NonlinearFrustumMap& frustum_map = *getGrid().transform().constMap<NonlinearFrustumMap>(); // The returned idxbox is intended to be used with // getIndexSpaceTransform() which will shift it by -0.5 voxel. So we // need to add +0.5 to compensate. BBoxd bbox = frustum_map.getBBox(); bbox.translate(Vec3d(+0.5)); idxbox.initBounds( UTvdbConvert(bbox.min()) ); idxbox.enlargeBounds( UTvdbConvert(bbox.max()) ); return true; } return false; } static bool geoGetFrustumBoundsFromVDB(const GEO_PrimVDB *vdb, openvdb::CoordBBox &box) { using namespace openvdb; UT_BoundingBox clip; bool doclip; doclip = vdb->getFrustumBounds(clip); if (doclip) { box = CoordBBox( Coord( (int)SYSrint(clip.xmin()), (int)SYSrint(clip.ymin()), (int)SYSrint(clip.zmin()) ), Coord( (int)SYSrint(clip.xmax()), (int)SYSrint(clip.ymax()), (int)SYSrint(clip.zmax()) ) ); } return doclip; } // The result of the intersection of active regions goes into grid_a template <typename GridTypeA, typename GridTypeB> static void geoIntersect(GridTypeA& grid_a, const GridTypeB &grid_b) { typename GridTypeA::Accessor access_a = grid_a.getAccessor(); typename GridTypeB::ConstAccessor access_b = grid_b.getAccessor(); // For each on value in a, set it off if b is also off for (typename GridTypeA::ValueOnCIter iter = grid_a.cbeginValueOn(); iter; ++iter) { openvdb::CoordBBox bbox = iter.getBoundingBox(); for (int k=bbox.min().z(); k<=bbox.max().z(); k++) { for (int j=bbox.min().y(); j<=bbox.max().y(); j++) { for (int i=bbox.min().x(); i<=bbox.max().x(); i++) { openvdb::Coord coord(i, j, k); if (!access_b.isValueOn(coord)) { access_a.setValue(coord, grid_a.background()); access_a.setValueOff(coord); } } } } } } /// This class is used as a functor to set inactive voxels to the background /// value. template<typename GridType> class geoInactiveToBackground { public: typedef typename GridType::ValueOffIter Iterator; typedef typename GridType::ValueType ValueType; geoInactiveToBackground(const GridType& grid) { background = grid.background(); } inline void operator()(const Iterator& iter) const { iter.setValue(background); } private: ValueType background; }; template <typename GridType> static void geoActivateBBox(GridType& grid, const openvdb::CoordBBox &bbox, bool setvalue, double value, GEO_PrimVDB::ActivateOperation operation, bool doclip, const openvdb::CoordBBox &clipbox) { typename GridType::Accessor access = grid.getAccessor(); switch (operation) { case GEO_PrimVDB::ACTIVATE_UNION: // Union if (doclip) { openvdb::CoordBBox clipped = bbox; clipped = bbox; clipped.min().maxComponent(clipbox.min()); clipped.max().minComponent(clipbox.max()); geoActivateBBox(grid, clipped, setvalue, value, operation, false, clipped); break; } if (setvalue) { grid.fill(bbox, geo_doubleToGridValue<GridType>(value), /*active*/true); } else { openvdb::MaskGrid mask(false); mask.denseFill(bbox, true, true); grid.topologyUnion(mask); } break; case GEO_PrimVDB::ACTIVATE_INTERSECT: // Intersect { openvdb::MaskGrid mask(false); mask.fill(bbox, true, true); grid.topologyIntersection(mask); geoInactiveToBackground<GridType> bgop(grid); openvdb::tools::foreach(grid.beginValueOff(), bgop); } break; case GEO_PrimVDB::ACTIVATE_SUBTRACT: // Difference // No matter what, we clear the background colour // for inactive. grid.fill(bbox, grid.background(), /*active*/false); break; case GEO_PrimVDB::ACTIVATE_COPY: // Copy // intersect geoActivateBBox(grid, bbox, setvalue, value, GEO_PrimVDB::ACTIVATE_INTERSECT, doclip, clipbox); // and union geoActivateBBox(grid, bbox, setvalue, value, GEO_PrimVDB::ACTIVATE_UNION, doclip, clipbox); break; } } void GEO_PrimVDB::activateIndexBBoxAdapter(const void* bboxPtr, ActivateOperation operation, bool setvalue, fpreal value) { using namespace openvdb; // bboxPtr is assumed to point to an openvdb::vX_Y_Z::CoordBBox, for some // version X.Y.Z of OpenVDB that may be newer than the one with which // libHoudiniGEO.so was built. This is safe provided that CoordBBox and // its member objects are ABI-compatible between the two OpenVDB versions. const CoordBBox& bbox = *static_cast<const CoordBBox*>(bboxPtr); bool doclip; CoordBBox clipbox; doclip = geoGetFrustumBoundsFromVDB(this, clipbox); // Activate based on the parameters and inputs UTvdbCallAllTopology(this->getStorageType(), geoActivateBBox, this->getGrid(), bbox, setvalue, value, operation, doclip, clipbox); } // Gets a conservative bounding box that maps to a coordinate // in index space. openvdb::CoordBBox geoMapCoord(const openvdb::CoordBBox& bbox_b, GEO_PrimVolumeXform xform_a, GEO_PrimVolumeXform xform_b) { using openvdb::Coord; using openvdb::CoordBBox; // Get the eight corners of the voxel Coord x = Coord(bbox_b.extents().x(), 0, 0); Coord y = Coord(0, bbox_b.extents().y(), 0); Coord z = Coord(0, 0, bbox_b.extents().z()); Coord m = bbox_b.min(); const Coord corners[] = { m, m+z, m+y, m+y+z, m+x, m+x+z, m+x+y, m+x+y+z, }; CoordBBox index_bbox; for (int i=0; i<8; i++) { UT_Vector3 corner = UT_Vector3(corners[i].x(), corners[i].y(), corners[i].z()); UT_Vector3 index = xform_a.toVoxelSpace(xform_b.fromVoxelSpace(corner)); Coord coord(int32(index.x()), int32(index.y()), int32(index.z())); if (i == 0) index_bbox = CoordBBox(coord, coord); else index_bbox.expand(coord); } return index_bbox; } openvdb::CoordBBox geoMapCoord(const openvdb::Coord& coord_b, GEO_PrimVolumeXform xform_a, GEO_PrimVolumeXform xform_b) { const openvdb::CoordBBox bbox_b(coord_b, coord_b + openvdb::Coord(1,1,1)); return geoMapCoord(bbox_b, xform_a, xform_b); } /// This class is used as a functor to create a mask for a grid's active /// region. template<typename GridType> class geoMaskTopology { public: typedef typename GridType::ValueOnCIter Iterator; typedef typename openvdb::MaskGrid::Accessor Accessor; geoMaskTopology(const GEO_PrimVolumeXform& a, const GEO_PrimVolumeXform& b) : xform_a(a), xform_b(b) { } inline void operator()(const Iterator& iter, Accessor& accessor) const { openvdb::CoordBBox bbox = geoMapCoord(iter.getBoundingBox(), xform_a, xform_b); accessor.getTree()->fill(bbox, true, true); } private: const GEO_PrimVolumeXform& xform_a; const GEO_PrimVolumeXform& xform_b; }; /// This class is used as a functor to create a mask for the intersection /// of two grids. template<typename GridTypeA, typename GridTypeB> class geoMaskIntersect { public: typedef typename GridTypeA::ValueOnCIter IteratorA; typedef typename GridTypeB::ConstAccessor AccessorB; typedef typename openvdb::MaskGrid::Accessor Accessor; geoMaskIntersect(const GridTypeB& source, const GEO_PrimVolumeXform& a, const GEO_PrimVolumeXform& b) : myAccessor(source.getAccessor()), myXformA(a), myXformB(b) { } inline void operator()(const IteratorA& iter, Accessor& accessor) const { openvdb::CoordBBox bbox = iter.getBoundingBox(); for(int k = bbox.min().z(); k <= bbox.max().z(); k++) { for (int j = bbox.min().y(); j <= bbox.max().y(); j++) { for (int i = bbox.min().x(); i <= bbox.max().x(); i++) { openvdb::Coord coord(i, j, k); accessor.setActiveState(coord, containsActiveVoxels(geoMapCoord(coord, myXformB, myXformA))); } } } } private: AccessorB myAccessor; const GEO_PrimVolumeXform& myXformA; const GEO_PrimVolumeXform& myXformB; // Returns true if there is at least one voxel in the source grid that is active // within the specified bounding box. inline bool containsActiveVoxels(const openvdb::CoordBBox& bbox) const { for(int k = bbox.min().z(); k <= bbox.max().z(); k++) { for(int j = bbox.min().y(); j <= bbox.max().y(); j++) { for(int i = bbox.min().x(); i <= bbox.max().x(); i++) { if(myAccessor.isValueOn(openvdb::Coord(i, j, k))) return true; } } } return false; } }; template <typename GridTypeA, typename GridTypeB> void geoUnalignedUnion(GridTypeA &grid_a, const GridTypeB &grid_b, GEO_PrimVolumeXform xform_a, GEO_PrimVolumeXform xform_b, bool setvalue, double value, bool doclip, const openvdb::CoordBBox &clipbox) { openvdb::MaskGrid mask(false); geoMaskTopology<GridTypeB> maskop(xform_a, xform_b); openvdb::tools::transformValues(grid_b.cbeginValueOn(), mask, maskop); if(doclip) mask.clip(clipbox); if(setvalue) { typename GridTypeA::TreeType newTree(mask.tree(), geo_doubleToGridValue<GridTypeA>(value), openvdb::TopologyCopy()); openvdb::tools::compReplace(grid_a.tree(), newTree); } else grid_a.tree().topologyUnion(mask.tree()); } template <typename GridTypeA, typename GridTypeB> void geoUnalignedDifference(GridTypeA &grid_a, const GridTypeB &grid_b, GEO_PrimVolumeXform xform_a, GEO_PrimVolumeXform xform_b) { openvdb::MaskGrid mask(false); geoMaskIntersect<GridTypeA, GridTypeB> maskop(grid_b, xform_a, xform_b); openvdb::tools::transformValues(grid_a.cbeginValueOn(), mask, maskop, true, // DO NOT SHARE THE OPERATOR, since grid_b's accessor does caching... false); grid_a.tree().topologyDifference(mask.tree()); geoInactiveToBackground<GridTypeA> bgop(grid_a); openvdb::tools::foreach(grid_a.beginValueOff(), bgop); } template <typename GridTypeA, typename GridTypeB> static void geoUnalignedIntersect(GridTypeA &grid_a, const GridTypeB &grid_b, GEO_PrimVolumeXform xform_a, GEO_PrimVolumeXform xform_b) { openvdb::MaskGrid mask(false); geoMaskIntersect<GridTypeA, GridTypeB> maskop(grid_b, xform_a, xform_b); openvdb::tools::transformValues(grid_a.cbeginValueOn(), mask, maskop, true, // DO NOT SHARE THE OPERATOR, since grid_b's accessor does caching... false); grid_a.tree().topologyIntersection(mask.tree()); geoInactiveToBackground<GridTypeA> bgop(grid_a); openvdb::tools::foreach(grid_a.beginValueOff(), bgop); } // The result of the union of active regions goes into grid_a template <typename GridTypeA, typename GridTypeB> static void geoUnion(GridTypeA& grid_a, const GridTypeB &grid_b, bool setvalue, double value, bool doclip, const openvdb::CoordBBox &clipbox) { typename GridTypeA::Accessor access_a = grid_a.getAccessor(); typename GridTypeB::ConstAccessor access_b = grid_b.getAccessor(); if (!doclip && !setvalue) { grid_a.tree().topologyUnion(grid_b.tree()); return; } // For each on value in b, set a on for (typename GridTypeB::ValueOnCIter iter = grid_b.cbeginValueOn(); iter; ++iter) { openvdb::CoordBBox bbox = iter.getBoundingBox(); // Intersect with our destination if (doclip) { bbox.min().maxComponent(clipbox.min()); bbox.max().minComponent(clipbox.max()); } for (int k=bbox.min().z(); k<=bbox.max().z(); k++) { for (int j=bbox.min().y(); j<=bbox.max().y(); j++) { for (int i=bbox.min().x(); i<=bbox.max().x(); i++) { openvdb::Coord coord(i, j, k); if (setvalue) { access_a.setValue(coord, geo_doubleToGridValue<GridTypeA>(value)); } else { access_a.setValueOn(coord); } } } } } } // The result of the union of active regions goes into grid_a template <typename GridTypeA, typename GridTypeB> static void geoDifference(GridTypeA& grid_a, const GridTypeB &grid_b) { typename GridTypeA::Accessor access_a = grid_a.getAccessor(); typename GridTypeB::ConstAccessor access_b = grid_b.getAccessor(); // For each on value in a, set it off if b is on for (typename GridTypeA::ValueOnCIter iter = grid_a.cbeginValueOn(); iter; ++iter) { openvdb::CoordBBox bbox = iter.getBoundingBox(); for (int k=bbox.min().z(); k<=bbox.max().z(); k++) { for (int j=bbox.min().y(); j<=bbox.max().y(); j++) { for (int i=bbox.min().x(); i<=bbox.max().x(); i++) { openvdb::Coord coord(i, j, k); // TODO: conditional needed? Profile please. if (access_b.isValueOn(coord)) { access_a.setValue(coord, grid_a.background()); access_a.setValueOff(coord); } } } } } } template <typename GridTypeB> static void geoDoUnion(const GridTypeB &grid_b, GEO_PrimVolumeXform xform_b, GEO_PrimVDB &vdb_a, bool setvalue, double value, bool doclip, const openvdb::CoordBBox &clipbox, bool ignore_transform) { // If the transforms are equal, we can do an aligned union if (ignore_transform || grid_b.transform() == vdb_a.getGrid().transform()) { UTvdbCallAllTopology(vdb_a.getStorageType(), geoUnion, vdb_a.getGrid(), grid_b, setvalue, value, doclip, clipbox); } else { UTvdbCallAllTopology(vdb_a.getStorageType(), geoUnalignedUnion, vdb_a.getGrid(), grid_b, vdb_a.getIndexSpaceTransform(), xform_b, setvalue, value, doclip, clipbox); } } template <typename GridTypeB> static void geoDoIntersect( const GridTypeB &grid_b, GEO_PrimVolumeXform xform_b, GEO_PrimVDB &vdb_a, bool ignore_transform) { if (ignore_transform || grid_b.transform() == vdb_a.getGrid().transform()) { UTvdbCallAllTopology(vdb_a.getStorageType(), geoIntersect, vdb_a.getGrid(), grid_b); } else { UTvdbCallAllTopology(vdb_a.getStorageType(), geoUnalignedIntersect, vdb_a.getGrid(), grid_b, vdb_a.getIndexSpaceTransform(), xform_b); } } template <typename GridTypeB> static void geoDoDifference( const GridTypeB &grid_b, GEO_PrimVolumeXform xform_b, GEO_PrimVDB &vdb_a, bool ignore_transform) { if (ignore_transform || grid_b.transform() == vdb_a.getGrid().transform()) { UTvdbCallAllTopology(vdb_a.getStorageType(), geoDifference, vdb_a.getGrid(), grid_b); } else { UTvdbCallAllTopology(vdb_a.getStorageType(), geoUnalignedDifference, vdb_a.getGrid(), grid_b, vdb_a.getIndexSpaceTransform(), xform_b); } } void GEO_PrimVDB::activateByVDB( const GEO_PrimVDB *input_vdb, ActivateOperation operation, bool setvalue, fpreal value, bool ignore_transform) { const openvdb::GridBase& input_grid = input_vdb->getGrid(); bool doclip; openvdb::CoordBBox clipbox; doclip = geoGetFrustumBoundsFromVDB(this, clipbox); switch (operation) { case GEO_PrimVDB::ACTIVATE_UNION: // Union UTvdbCallAllTopology(input_vdb->getStorageType(), geoDoUnion, input_grid, input_vdb->getIndexSpaceTransform(), *this, setvalue, value, doclip, clipbox, ignore_transform); break; case GEO_PrimVDB::ACTIVATE_INTERSECT: // Intersect UTvdbCallAllTopology(input_vdb->getStorageType(), geoDoIntersect, input_grid, input_vdb->getIndexSpaceTransform(), *this, ignore_transform); break; case GEO_PrimVDB::ACTIVATE_SUBTRACT: // Difference UTvdbCallAllTopology(input_vdb->getStorageType(), geoDoDifference, input_grid, input_vdb->getIndexSpaceTransform(), *this, ignore_transform); break; case GEO_PrimVDB::ACTIVATE_COPY: // Copy UTvdbCallAllTopology(input_vdb->getStorageType(), geoDoIntersect, input_grid, input_vdb->getIndexSpaceTransform(), *this, ignore_transform); UTvdbCallAllTopology(input_vdb->getStorageType(), geoDoUnion, input_grid, input_vdb->getIndexSpaceTransform(), *this, setvalue, value, doclip, clipbox, ignore_transform); break; } } UT_Matrix4D GEO_PrimVDB::getTransform4() const { using namespace openvdb; using namespace openvdb::math; UT_Matrix4D mat4; const Transform &gxform = getGrid().transform(); NonlinearFrustumMap::ConstPtr fmap = gxform.map<NonlinearFrustumMap>(); if (fmap) { const openvdb::BBoxd &bbox = fmap->getBBox(); const openvdb::Vec3d center = bbox.getCenter(); const openvdb::Vec3d size = bbox.extents(); // TODO: Use fmap->linearMap() once that actually works mat4.identity(); mat4.translate(-center.x(), -center.y(), -bbox.min().z()); // NOTE: We scale both XY axes by size.x() because the secondMap() // has the aspect ratio baked in mat4.scale(1.0/size.x(), 1.0/size.x(), 1.0/size.z()); mat4 *= UTvdbConvert(fmap->secondMap().getMat4()); } else { mat4 = UTvdbConvert(gxform.baseMap()->getAffineMap()->getMat4()); } return mat4; } void GEO_PrimVDB::getLocalTransform(UT_Matrix3D &result) const { result = getTransform4(); } void GEO_PrimVDB::setLocalTransform(const UT_Matrix3D &new_mat3) { using namespace openvdb; using namespace openvdb::math; Transform::Ptr xform; UT_Matrix4D new_mat4; new_mat4 = new_mat3; new_mat4.setTranslates(getDetail().getPos3(vertexPoint(0))); const Transform & gxform = getGrid().transform(); NonlinearFrustumMap::ConstPtr fmap = gxform.map<NonlinearFrustumMap>(); if (fmap) { fmap = geoStandardFrustumMapPtr(*this); const openvdb::BBoxd &bbox = fmap->getBBox(); const openvdb::Vec3d center = bbox.getCenter(); const openvdb::Vec3d size = bbox.extents(); // TODO: Use fmap->linearMap() once that actually works UT_Matrix4D second; second.identity(); second.translate(-0.5, -0.5, 0.0); // adjust for frustum map center // NOTE: We scale both XY axes by size.x() because the secondMap() // has the aspect ratio baked in second.scale(size.x(), size.x(), size.z()); second.translate(center.x(), center.y(), bbox.min().z()); second *= new_mat4; xform.reset(new Transform(MapBase::Ptr( new NonlinearFrustumMap(fmap->getBBox(), fmap->getTaper(), /*depth*/1.0, geoCreateAffineMap<MapBase>(second))))); } else { xform = geoCreateLinearTransform(new_mat4); } myGridAccessor.setTransform(*xform, *this); } int GEO_PrimVDB::detachPoints(GA_PointGroup &grp) { int count = 0; if (grp.containsOffset(vertexPoint(0))) count++; if (count == 0) return 0; if (count == 1) return -2; return -1; } GA_Primitive::GA_DereferenceStatus GEO_PrimVDB::dereferencePoint(GA_Offset point, bool) { return vertexPoint(0) == point ? GA_DEREFERENCE_DESTROY : GA_DEREFERENCE_OK; } GA_Primitive::GA_DereferenceStatus GEO_PrimVDB::dereferencePoints(const GA_RangeMemberQuery &point_query, bool) { return point_query.contains(vertexPoint(0)) ? GA_DEREFERENCE_DESTROY : GA_DEREFERENCE_OK; } /// /// JSON methods /// namespace { // unnamed class geo_PrimVDBJSON : public GA_PrimitiveJSON { public: static const char *theSharedMemKey; public: geo_PrimVDBJSON() {} ~geo_PrimVDBJSON() override {} enum { geo_TBJ_VERTEX, geo_TBJ_VDB, geo_TBJ_VDB_SHMEM, geo_TBJ_VDB_VISUALIZATION, geo_TBJ_ENTRIES }; const GEO_PrimVDB *vdb(const GA_Primitive *p) const { return static_cast<const GEO_PrimVDB *>(p); } GEO_PrimVDB *vdb(GA_Primitive *p) const { return static_cast<GEO_PrimVDB *>(p); } int getEntries() const override { return geo_TBJ_ENTRIES; } const UT_StringHolder & getKeyword(int i) const override { switch (i) { case geo_TBJ_VERTEX: return theKWVertex; case geo_TBJ_VDB: return theKWVDB; case geo_TBJ_VDB_SHMEM: return theKWVDBShm; case geo_TBJ_VDB_VISUALIZATION: return theKWVDBVis; case geo_TBJ_ENTRIES: break; } UT_ASSERT(0); return UT_StringHolder::theEmptyString; } bool shouldSaveField(const GA_Primitive *prim, int i, const GA_SaveMap &sm) const override { bool is_shmem = sm.getOptions().hasOption("geo:sharedmemowner"); switch (i) { case geo_TBJ_VERTEX: return true; case geo_TBJ_VDB: return !is_shmem; case geo_TBJ_VDB_SHMEM: return is_shmem; case geo_TBJ_VDB_VISUALIZATION: return true; case geo_TBJ_ENTRIES: break; } UT_ASSERT(0); return false; } bool saveField(const GA_Primitive *pr, int i, UT_JSONWriter &w, const GA_SaveMap &map) const override { switch (i) { case geo_TBJ_VERTEX: { GA_Offset vtx = vdb(pr)->getVertexOffset(0); return w.jsonInt(int64(map.getVertexIndex(vtx))); } case geo_TBJ_VDB: return vdb(pr)->saveVDB(w, map); case geo_TBJ_VDB_SHMEM: return vdb(pr)->saveVDB(w, map, true); case geo_TBJ_VDB_VISUALIZATION: return vdb(pr)->saveVisualization(w, map); case geo_TBJ_ENTRIES: break; } return false; } bool loadField(GA_Primitive *pr, int i, UT_JSONParser &p, const GA_LoadMap &map) const override { switch (i) { case geo_TBJ_VERTEX: { int64 vidx; if (!p.parseInt(vidx)) return false; GA_Offset voff = map.getVertexOffset(GA_Index(vidx)); // Assign the preallocated vertex, but // do not bother updating the topology, // which will be done at the end of the // load anyway. vdb(pr)->assignVertex(voff, false); return true; } case geo_TBJ_VDB: return vdb(pr)->loadVDB(p); case geo_TBJ_VDB_SHMEM: return vdb(pr)->loadVDB(p, true); case geo_TBJ_VDB_VISUALIZATION: return vdb(pr)->loadVisualization(p, map); case geo_TBJ_ENTRIES: break; } UT_ASSERT(0); return false; } bool saveField(const GA_Primitive *pr, int i, UT_JSONValue &val, const GA_SaveMap &map) const override { UT_AutoJSONWriter w(val); return saveField(pr, i, *w, map); } // Re-implement the H12.5 base class version, note that this was pure // virtual in H12.1. bool loadField(GA_Primitive *pr, int i, UT_JSONParser &p, const UT_JSONValue &jval, const GA_LoadMap &map) const override { UT_AutoJSONParser parser(jval); bool ok = loadField(pr, i, *parser, map); p.stealErrors(*parser); return ok; } bool isEqual(int i, const GA_Primitive *p0, const GA_Primitive *p1) const override { switch (i) { case geo_TBJ_VERTEX: return (p0->getVertexOffset(0) == p1->getVertexOffset(0)); case geo_TBJ_VDB_SHMEM: case geo_TBJ_VDB: return false; // never save these tags as uniform case geo_TBJ_VDB_VISUALIZATION: return (vdb(p0)->getVisOptions() == vdb(p1)->getVisOptions()); case geo_TBJ_ENTRIES: break; } UT_ASSERT(0); return false; } private: }; const char *geo_PrimVDBJSON::theSharedMemKey = "sharedkey"; } // namespace unnamed static const GA_PrimitiveJSON * vdbJSON() { static SYS_AtomicPtr<GA_PrimitiveJSON> theJSON; if (!theJSON) { GA_PrimitiveJSON* json = new geo_PrimVDBJSON; if (nullptr != theJSON.compare_swap(nullptr, json)) { delete json; json = nullptr; } } return theJSON; } const GA_PrimitiveJSON * GEO_PrimVDB::getJSON() const { return vdbJSON(); } // This method is called by multiple places internally in Houdini. static void geoSetVDBStreamCompression(openvdb::io::Stream& vos, bool backwards_compatible) { // Always enable full compression, since it is fast and compresses level // sets and fog volumes well. uint32_t compression = openvdb::io::COMPRESS_ACTIVE_MASK; // Enable blosc compression unless we want it to be backwards compatible. if (vos.hasBloscCompression() && !backwards_compatible) { compression |= openvdb::io::COMPRESS_BLOSC; } vos.setCompression(compression); } bool GEO_PrimVDB::saveVDB(UT_JSONWriter &w, const GA_SaveMap &sm, bool as_shmem) const { bool ok = true; try { openvdb::GridCPtrVec grids; grids.push_back(getConstGridPtr()); if (as_shmem) { openvdb::MetaMap meta; UT_String shmem_owner; sm.getOptions().importOption("geo:sharedmemowner", shmem_owner); if (!shmem_owner.isstring()) return false; // First do a pass to collect the final size SYS_SharedMemoryOutputStream os_count(NULL); { openvdb::io::Stream vos(os_count); geoSetVDBStreamCompression(vos, /*backwards_compatible*/false); vos.write(grids, meta); } // Create the shmem segment UT_WorkBuffer shmem_key; shmem_key.sprintf("%s:%p", shmem_owner.buffer(), this); UT_SharedMemoryManager &shmgr = UT_SharedMemoryManager::get(); SYS_SharedMemory *shmem = shmgr.get(shmem_key.buffer()); if (shmem->size() != os_count.size()) shmem->reset(os_count.size()); // Save the vdb stream to the shmem segment SYS_SharedMemoryOutputStream os_shm(shmem); { openvdb::io::Stream vos(os_shm); geoSetVDBStreamCompression(vos, /*backwards_compatible*/false); vos.write(grids, meta); } // In the main json stream, just tag it with the shmem key ok = ok && w.jsonBeginArray(); ok = ok && w.jsonKeyToken(geo_PrimVDBJSON::theSharedMemKey); ok = ok && w.jsonString(shmem->id()); ok = ok && w.jsonEndArray(); } else { UT_JSONWriter::TiledStream os(w); openvdb::io::Stream vos(os); openvdb::MetaMap meta; geoSetVDBStreamCompression( vos, UT_EnvControl::getInt(ENV_HOUDINI13_VOLUME_COMPATIBILITY)); // Visual C++ requires a default meta object declared on the stack vos.write(grids, meta); } } catch (std::exception &e) { std::cerr << "Save failure: " << e.what() << "\n"; ok = false; } return ok; } bool GEO_PrimVDB::loadVDB(UT_JSONParser &p, bool as_shmem) { if (as_shmem) { bool array_error = false; UT_WorkBuffer key; if (!p.parseBeginArray(array_error) || array_error) return false; if (!p.parseString(key)) return false; if (key != geo_PrimVDBJSON::theSharedMemKey) return false; UT_WorkBuffer shmem_key; if (!p.parseString(shmem_key)) return false; SYS_SharedMemory *shmem = new SYS_SharedMemory(shmem_key.buffer(), /*read_only*/true); if (shmem->size()) { try { SYS_SharedMemoryInputStream is_shm(*shmem); openvdb::io::Stream vis(is_shm, /*delayLoad*/false); openvdb::GridPtrVecPtr grids = vis.getGrids(); int count = (grids ? grids->size() : 0); if (count != 1) { UT_String mesg; mesg.sprintf("expected to read 1 grid, got %d grid%s", count, count == 1 ? "" : "s"); throw std::runtime_error(mesg.nonNullBuffer()); } openvdb::GridBase::Ptr grid = (*grids)[0]; UT_ASSERT(grid); if (grid) setGrid(*grid); } catch (std::exception &e) { std::cerr << "Shared memory load failure: " << e.what() << "\n"; return false; } } else { // If the shared memory was set to zero, it probably died while // the IFD stream was in transit. Create a dummy grid so that // mantra doesn't flip out like a ninja. openvdb::GridBase::Ptr grid = openvdb::FloatGrid::create(0); setGrid(*grid); } if (!p.parseEndArray(array_error) || array_error) return false; } else { try { UT_JSONParser::TiledStream is(p); openvdb::io::Stream vis(is, /*delayLoad*/false); openvdb::GridPtrVecPtr grids = vis.getGrids(); int count = (grids ? grids->size() : 0); if (count != 1) { UT_String mesg; mesg.sprintf("expected to read 1 grid, got %d grid%s", count, count == 1 ? "" : "s"); throw std::runtime_error(mesg.nonNullBuffer()); } openvdb::GridBase::Ptr grid = (*grids)[0]; UT_ASSERT(grid); if (grid) { // When we saved the grid, we auto-added metadata // which isn't reflected by our primitive attributes. // if any later node tries to sync the metadata from // the vdb primitive, we'll gain extra data such as // file_bbox const char *file_metadata[] = { "file_bbox_min", "file_bbox_max", "file_compression", "file_mem_bytes", "file_voxel_count", "file_delayed_load", 0 }; for (int i = 0; file_metadata[i]; i++) { grid->removeMeta(file_metadata[i]); } setGrid(*grid); } } catch (std::exception &e) { std::cerr << "Load failure: " << e.what() << "\n"; return false; } } return true; } namespace // anonymous { enum { geo_JVOL_VISMODE, geo_JVOL_VISISO, geo_JVOL_VISDENSITY, geo_JVOL_VISLOD, }; UT_FSATable theJVolumeViz( geo_JVOL_VISMODE, "mode", geo_JVOL_VISISO, "iso", geo_JVOL_VISDENSITY, "density", geo_JVOL_VISLOD, "lod", -1, nullptr ); } // namespace anonymous bool GEO_PrimVDB::saveVisualization(UT_JSONWriter &w, const GA_SaveMap &) const { bool ok = true; ok = ok && w.jsonBeginMap(); ok = ok && w.jsonKeyToken(theJVolumeViz.getToken(geo_JVOL_VISMODE)); ok = ok && w.jsonString(GEOgetVolumeVisToken(myVis.myMode)); ok = ok && w.jsonKeyToken(theJVolumeViz.getToken(geo_JVOL_VISISO)); ok = ok && w.jsonReal(myVis.myIso); ok = ok && w.jsonKeyToken(theJVolumeViz.getToken(geo_JVOL_VISDENSITY)); ok = ok && w.jsonReal(myVis.myDensity); // Only save myLod when non-default so that it loads in older builds if (myVis.myLod != GEO_VOLUMEVISLOD_FULL) { ok = ok && w.jsonKeyToken(theJVolumeViz.getToken(geo_JVOL_VISLOD)); ok = ok && w.jsonString(GEOgetVolumeVisLodToken(myVis.myLod)); } return ok && w.jsonEndMap(); } bool GEO_PrimVDB::loadVisualization(UT_JSONParser &p, const GA_LoadMap &) { UT_JSONParser::traverser it; GEO_VolumeVis mode = myVis.myMode; fpreal iso = myVis.myIso; fpreal density = myVis.myDensity; GEO_VolumeVisLod lod = myVis.myLod; UT_WorkBuffer key; fpreal64 fval; bool foundmap=false, ok = true; for (it = p.beginMap(); ok && !it.atEnd(); ++it) { foundmap = true; if (!it.getLowerKey(key)) { ok = false; break; } switch (theJVolumeViz.findSymbol(key.buffer())) { case geo_JVOL_VISMODE: if ((ok = p.parseString(key))) mode = GEOgetVolumeVisEnum( key.buffer(), GEO_VOLUMEVIS_SMOKE); break; case geo_JVOL_VISISO: if ((ok = p.parseReal(fval))) iso = fval; break; case geo_JVOL_VISDENSITY: if ((ok = p.parseReal(fval))) density = fval; break; case geo_JVOL_VISLOD: if ((ok = p.parseString(key))) lod = GEOgetVolumeVisLodEnum( key.buffer(), GEO_VOLUMEVISLOD_FULL); break; default: p.addWarning("Unexpected key for volume visualization: %s", key.buffer()); ok = p.skipNextObject(); break; } } if (!foundmap) { p.addFatal("Expected a JSON map for volume visualization data"); ok = false; } if (ok) setVisualization(mode, iso, density, lod); return ok; } template <typename GridType> static void geo_sumPosDensity(const GridType &grid, fpreal64 &sum) { sum = 0; for (typename GridType::ValueOnCIter iter = grid.cbeginValueOn(); iter; ++iter) { fpreal value = *iter; if (value > 0) { if (iter.isTileValue()) sum += value * iter.getVoxelCount(); else sum += value; } } } fpreal GEO_PrimVDB::calcPositiveDensity() const { fpreal64 density = 0; UT_IF_ASSERT(UT_VDBType type = getStorageType();) UT_ASSERT(type == UT_VDB_FLOAT || type == UT_VDB_DOUBLE); UTvdbCallRealType(getStorageType(), geo_sumPosDensity, getGrid(), density); UTvdbCallBoolType(getStorageType(), geo_sumPosDensity, getGrid(), density); int numvoxel = getGrid().activeVoxelCount(); if (numvoxel) density /= numvoxel; UT_Vector3 zero(0, 0, 0); density *= calcVolume(zero); return density; } int GEO_PrimVDB::getBBox(UT_BoundingBox *bbox) const { if (hasGrid()) { using namespace openvdb; CoordBBox vbox; const openvdb::GridBase &grid = getGrid(); // NOTE: We use evalActiveVoxelBoundingBox() so that it matches // getRes() which calls evalActiveVoxelDim(). if (!grid.baseTree().evalActiveVoxelBoundingBox(vbox)) { bbox->makeInvalid(); return false; } // Currently VDB may return true even if the final bounds // were zero, so to avoid generating a massive bound, detect // invalid bounding boxes and return false. if (vbox.min()[0] > vbox.max()[0]) { bbox->makeInvalid(); return false; } const math::Transform &xform = grid.transform(); for (int i = 0; i < 8; i++) { math::Vec3d vpos( (i&1) ? vbox.min()[0] - 0.5 : vbox.max()[0] + 0.5, (i&2) ? vbox.min()[1] - 0.5 : vbox.max()[1] + 0.5, (i&4) ? vbox.min()[2] - 0.5 : vbox.max()[2] + 0.5); vpos = xform.indexToWorld(vpos); UT_Vector3 worldpos(vpos.x(), vpos.y(), vpos.z()); if (i == 0) bbox->initBounds(worldpos); else bbox->enlargeBounds(worldpos); } return true; } bbox->initBounds(getDetail().getPos3(vertexPoint(0))); return true; } UT_Vector3 GEO_PrimVDB::baryCenter() const { // Return the center of the index space if (!hasGrid()) return UT_Vector3(0, 0, 0); const openvdb::GridBase &grid = getGrid(); openvdb::CoordBBox bbox = grid.evalActiveVoxelBoundingBox(); UT_Vector3 pos; findexToPos(UTvdbConvert(bbox.getCenter()), pos); return pos; } bool GEO_PrimVDB::isDegenerate() const { return false; } // // Methods to handle vertex attributes for the attribute dictionary // void GEO_PrimVDB::copyPrimitive(const GEO_Primitive *psrc) { if (psrc == this) return; const GEO_PrimVDB *src = (const GEO_PrimVDB *)psrc; copyGridFrom(*src); // makes a shallow copy // TODO: Well and good to reuse the attribute handle for all our // vertices, but we should do so across primitives as well. GA_VertexWrangler vertex_wrangler(*getParent(), *src->getParent()); GEO_Primitive::copyPrimitive(psrc); myVis = src->myVis; } static inline openvdb::math::Vec3d vdbTranslation(const openvdb::math::Transform &xform) { return xform.baseMap()->getAffineMap()->getMat4().getTranslation(); } // Replace the grid's translation with the prim's vertex position void GEO_PrimVDB::GridAccessor::updateGridTranslates(const GEO_PrimVDB &prim) const { using namespace openvdb::math; const GA_Detail & geo = prim.getDetail(); // It is possible our vertex offset is invalid, such as us // being a stashed primitive. if (!GAisValid(prim.getVertexOffset(0))) return; GA_Offset ptoff = prim.vertexPoint(0); Vec3d newpos = UTvdbConvert(geo.getPos3(ptoff)); Vec3d oldpos = vdbTranslation(myGrid->transform()); MapBase::ConstPtr map = myGrid->transform().baseMap(); if (isApproxEqual(oldpos, newpos)) return; const_cast<GEO_PrimVDB&>(prim).incrTransformUniqueId(); Vec3d delta = newpos - oldpos; const_cast<GEO_PrimVDB::GridAccessor *>(this)->makeGridUnique(); myGrid->setTransform( std::make_shared<Transform>(map->postTranslate(delta))); } // Copy the translation from xform and set into our vertex position void GEO_PrimVDB::GridAccessor::setVertexPositionAdapter( const void* xformPtr, GEO_PrimVDB &prim) { // xformPtr is assumed to point to an openvdb::vX_Y_Z::math::Transform, // for some version X.Y.Z of OpenVDB that may be newer than the one // with which libHoudiniGEO.so was built. This is safe provided that // math::Transform and its member objects are ABI-compatible between // the two OpenVDB versions. const openvdb::math::Transform& xform = *static_cast<const openvdb::math::Transform*>(xformPtr); if (myGrid && &myGrid->transform() == &xform) return; prim.incrTransformUniqueId(); prim.getDetail().setPos3( prim.vertexPoint(0), UTvdbConvert(vdbTranslation(xform))); } void GEO_PrimVDB::GridAccessor::setTransformAdapter( const void* xformPtr, GEO_PrimVDB &prim) { if (!myGrid) return; // xformPtr is assumed to point to an openvdb::vX_Y_Z::math::Transform, // for some version X.Y.Z of OpenVDB that may be newer than the one // with which libHoudiniGEO.so was built. This is safe provided that // math::Transform and its member objects are ABI-compatible between // the two OpenVDB versions. const openvdb::math::Transform& xform = *static_cast<const openvdb::math::Transform*>(xformPtr); setVertexPosition(xform, prim); myGrid->setTransform(xform.copy()); } void GEO_PrimVDB::GridAccessor::setGridAdapter( const void* gridPtr, GEO_PrimVDB &prim, bool copyPosition) { // gridPtr is assumed to point to an openvdb::vX_Y_Z::GridBase, for some // version X.Y.Z of OpenVDB that may be newer than the one with which // libHoudiniGEO.so was built. This is safe provided that GridBase and // its member objects are ABI-compatible between the two OpenVDB versions. const openvdb::GridBase& grid = *static_cast<const openvdb::GridBase*>(gridPtr); if (myGrid.get() == &grid) return; if (copyPosition) setVertexPosition(grid.transform(), prim); myGrid = openvdb::ConstPtrCast<openvdb::GridBase>( grid.copyGrid()); // always shallow-copy the source grid myStorageType = UTvdbGetGridType(*myGrid); } GEO_Primitive * GEO_PrimVDB::copy(int preserve_shared_pts) const { GEO_Primitive *clone = GEO_Primitive::copy(preserve_shared_pts); if (!clone) return nullptr; GEO_PrimVDB* vdb = static_cast<GEO_PrimVDB*>(clone); // Give the clone the same serial number as this primitive. vdb->myUniqueId.exchange(this->getUniqueId()); // Give the clone a shallow copy of this primitive's grid. vdb->copyGridFrom(*this); vdb->myVis = myVis; return clone; } void GEO_PrimVDB::copySubclassData(const GA_Primitive *source) { UT_ASSERT(source != this); const GEO_PrimVDB* src = static_cast<const GEO_PrimVDB*>(source); GEO_Primitive::copySubclassData(source); // DO NOT copy P from the source, since copySubclassData // should be independent of any attributes! copyGridFrom(*src, false); // makes a shallow copy myVis = src->myVis; } void GEO_PrimVDB::assignVertex(GA_Offset new_vtx, bool update_topology) { if (getVertexCount() == 1) { GA_Offset orig_vtx = getVertexOffset(); if (orig_vtx == new_vtx) return; UT_ASSERT_P(GAisValid(orig_vtx)); destroyVertex(orig_vtx); myVertexList.set(0, new_vtx); } else { myVertexList.setTrivial(new_vtx, 1); } if (update_topology) registerVertex(new_vtx); } const char * GEO_PrimVDB::getGridName() const { GA_ROHandleS nameAttr(getParent(), GA_ATTRIB_PRIMITIVE, "name"); return nameAttr.isValid() ? nameAttr.get(getMapOffset()) : ""; } namespace // anonymous { using geo_Size = GA_Size; // Intrinsic attributes enum geo_Intrinsic { geo_INTRINSIC_BACKGROUND, geo_INTRINSIC_VOXELSIZE, geo_INTRINSIC_ACTIVEVOXELDIM, geo_INTRINSIC_ACTIVEVOXELCOUNT, geo_INTRINSIC_TRANSFORM, geo_INTRINSIC_VOLUMEVISUALMODE, geo_INTRINSIC_VOLUMEVISUALDENSITY, geo_INTRINSIC_VOLUMEVISUALISO, geo_INTRINSIC_VOLUMEVISUALLOD, geo_INTRINSIC_META_GRID_CLASS, geo_INTRINSIC_META_GRID_CREATOR, geo_INTRINSIC_META_IS_LOCAL_SPACE, geo_INTRINSIC_META_SAVE_HALF_FLOAT, geo_INTRINSIC_META_VALUE_TYPE, geo_INTRINSIC_META_VECTOR_TYPE, geo_NUM_INTRINSICS }; const UT_FSATable theMetaNames( geo_INTRINSIC_META_GRID_CLASS, "vdb_class", geo_INTRINSIC_META_GRID_CREATOR, "vdb_creator", geo_INTRINSIC_META_IS_LOCAL_SPACE, "vdb_is_local_space", geo_INTRINSIC_META_SAVE_HALF_FLOAT, "vdb_is_saved_as_half_float", geo_INTRINSIC_META_VALUE_TYPE, "vdb_value_type", geo_INTRINSIC_META_VECTOR_TYPE, "vdb_vector_type", -1, nullptr ); geo_Size intrinsicBackgroundTupleSize(const GEO_PrimVDB *p) { return UTvdbGetGridTupleSize(p->getStorageType()); } template <typename GridT> void intrinsicBackgroundV(const GridT &grid, fpreal64 *v, GA_Size n) { typename GridT::ValueType background = grid.background(); for (GA_Size i = 0; i < n; i++) v[i] = background[i]; } template <typename GridT> void intrinsicBackgroundS(const GridT &grid, fpreal64 *v) { v[0] = (fpreal64)grid.background(); } geo_Size intrinsicBackground(const GEO_PrimVDB *p, fpreal64 *v, GA_Size size) { UT_VDBType grid_type = p->getStorageType(); GA_Size n = SYSmin(UTvdbGetGridTupleSize(grid_type), size); UT_ASSERT(n > 0); UTvdbCallScalarType(grid_type, intrinsicBackgroundS, p->getGrid(), v) else UTvdbCallVec3Type(grid_type, intrinsicBackgroundV, p->getGrid(), v, n) else if (grid_type == UT_VDB_BOOL) { intrinsicBackgroundS<openvdb::BoolGrid>( UTvdbGridCast<openvdb::BoolGrid>(p->getGrid()), v); } else n = 0; return n; } geo_Size intrinsicVoxelSize(const GEO_PrimVDB *prim, fpreal64 *v, GA_Size size) { openvdb::Vec3d voxel_size = prim->getGrid().voxelSize(); GA_Size n = SYSmin(3, size); for (GA_Size i = 0; i < n; i++) v[i] = voxel_size[i]; return n; } geo_Size intrinsicActiveVoxelDim(const GEO_PrimVDB *prim, int64 *v, GA_Size size) { using namespace openvdb; Coord dim = prim->getGrid().evalActiveVoxelDim(); GA_Size n = SYSmin(3, size); for (GA_Size i = 0; i < n; i++) v[i] = dim[i]; return n; } int64 intrinsicActiveVoxelCount(const GEO_PrimVDB *prim) { return prim->getGrid().activeVoxelCount(); } geo_Size intrinsicTransform(const GEO_PrimVDB *prim, fpreal64 *v, GA_Size size) { using namespace openvdb; const GridBase & grid = prim->getGrid(); const math::Transform & xform = grid.transform(); math::MapBase::ConstPtr bmap = xform.baseMap(); math::AffineMap::Ptr amap = bmap->getAffineMap(); math::Mat4d m4 = amap->getMat4(); const double * data = m4.asPointer(); size = SYSmin(size, 16); for (int i = 0; i < size; ++i) v[i] = data[i]; return geo_Size(size); } geo_Size intrinsicSetTransform(GEO_PrimVDB *q, const fpreal64 *v, GA_Size size) { if (size < 16) return 0; UT_DMatrix4 m(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7], v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15]); q->setTransform4(m); return 16; } const char * intrinsicVisualMode(const GEO_PrimVDB *p) { return GEOgetVolumeVisToken(p->getVisualization()); } const char * intrinsicVisualLod(const GEO_PrimVDB *p) { return GEOgetVolumeVisLodToken(p->getVisLod()); } openvdb::Metadata::ConstPtr intrinsicGetMeta(const GEO_PrimVDB *p, geo_Intrinsic id) { using namespace openvdb; return p->getGrid()[theMetaNames.getToken(id) + 4]; } void intrinsicSetMeta( GEO_PrimVDB *p, geo_Intrinsic id, const openvdb::Metadata &meta) { using namespace openvdb; MetaMap &meta_map = p->getMetadata(); const char *name = theMetaNames.getToken(id) + 4; meta_map.removeMeta(name); meta_map.insertMeta(name, meta); } void intrinsicGetMetaString( const GEO_PrimVDB *p, geo_Intrinsic id, UT_String &v) { using namespace openvdb; Metadata::ConstPtr meta = intrinsicGetMeta(p, id); if (meta) v = meta->str(); else v = ""; } void intrinsicSetMetaString( GEO_PrimVDB *p, geo_Intrinsic id, const char *v) { intrinsicSetMeta(p, id, openvdb::StringMetadata(v)); } bool intrinsicGetMetaBool(const GEO_PrimVDB *p, geo_Intrinsic id) { using namespace openvdb; Metadata::ConstPtr meta = intrinsicGetMeta(p, id); if (meta) return meta->asBool(); else return false; } void intrinsicSetMetaBool(GEO_PrimVDB *p, geo_Intrinsic id, int64 v) { intrinsicSetMeta(p, id, openvdb::BoolMetadata(v != 0)); } } // namespace anonymous #define VDB_INTRINSIC_META_STR(CLASS, ID) { \ struct callbacks { \ static geo_Size evalS(const CLASS *o, UT_String &v) \ { intrinsicGetMetaString(o, ID, v); return 1; } \ static geo_Size evalSA(const CLASS *o, UT_StringArray &v) \ { \ UT_String str; \ intrinsicGetMetaString(o, ID, str); \ v.append(str); \ return 1; \ } \ static geo_Size setSS(CLASS *o, const char **v, GA_Size) \ { intrinsicSetMetaString(o, ID, v[0]); return 1; } \ static geo_Size setSA(CLASS *o, const UT_StringArray &a) \ { intrinsicSetMetaString(o, ID, a(0)); return 1; } \ }; \ GA_INTRINSIC_DEF_S(ID, theMetaNames.getToken(ID), 1) \ myEval[ID].myS = callbacks::evalS; \ myEval[ID].mySA = callbacks::evalSA; \ myEval[ID].mySetSS = callbacks::setSS; \ myEval[ID].mySetSA = callbacks::setSA; \ myEval[ID].myReadOnly = false; \ } #define VDB_INTRINSIC_META_BOOL(CLASS, ID) { \ struct callbacks { \ static geo_Size eval(const CLASS *o, int64 *v, GA_Size) \ { v[0] = intrinsicGetMetaBool(o, ID); return 1; } \ static geo_Size setFunc(CLASS *o, const int64 *v, GA_Size) \ { intrinsicSetMetaBool(o, ID, v[0]); return 1; } \ }; \ GA_INTRINSIC_DEF_I(ID, theMetaNames.getToken(ID), 1) \ myEval[ID].myI = callbacks::eval; \ myEval[ID].mySetI = callbacks::setFunc; \ myEval[ID].myReadOnly = false; \ } GA_START_INTRINSIC_DEF(GEO_PrimVDB, geo_NUM_INTRINSICS) GA_INTRINSIC_VARYING_F(GEO_PrimVDB, geo_INTRINSIC_BACKGROUND, "background", intrinsicBackgroundTupleSize, intrinsicBackground); GA_INTRINSIC_TUPLE_F(GEO_PrimVDB, geo_INTRINSIC_VOXELSIZE, "voxelsize", 3, intrinsicVoxelSize); GA_INTRINSIC_TUPLE_I(GEO_PrimVDB, geo_INTRINSIC_ACTIVEVOXELDIM, "activevoxeldimensions", 3, intrinsicActiveVoxelDim); GA_INTRINSIC_I(GEO_PrimVDB, geo_INTRINSIC_ACTIVEVOXELCOUNT, "activevoxelcount", intrinsicActiveVoxelCount); GA_INTRINSIC_TUPLE_F(GEO_PrimVDB, geo_INTRINSIC_TRANSFORM, "transform", 16, intrinsicTransform); GA_INTRINSIC_SET_TUPLE_F(GEO_PrimVDB, geo_INTRINSIC_TRANSFORM, intrinsicSetTransform); GA_INTRINSIC_S(GEO_PrimVDB, geo_INTRINSIC_VOLUMEVISUALMODE, "volumevisualmode", intrinsicVisualMode) GA_INTRINSIC_METHOD_F(GEO_PrimVDB, geo_INTRINSIC_VOLUMEVISUALDENSITY, "volumevisualdensity", getVisDensity) GA_INTRINSIC_METHOD_F(GEO_PrimVDB, geo_INTRINSIC_VOLUMEVISUALISO, "volumevisualiso", getVisIso) GA_INTRINSIC_S(GEO_PrimVDB, geo_INTRINSIC_VOLUMEVISUALLOD, "volumevisuallod", intrinsicVisualLod) VDB_INTRINSIC_META_STR(GEO_PrimVDB, geo_INTRINSIC_META_GRID_CLASS) VDB_INTRINSIC_META_STR(GEO_PrimVDB, geo_INTRINSIC_META_GRID_CREATOR) VDB_INTRINSIC_META_BOOL(GEO_PrimVDB, geo_INTRINSIC_META_IS_LOCAL_SPACE) VDB_INTRINSIC_META_BOOL(GEO_PrimVDB, geo_INTRINSIC_META_SAVE_HALF_FLOAT) VDB_INTRINSIC_META_STR(GEO_PrimVDB, geo_INTRINSIC_META_VALUE_TYPE) VDB_INTRINSIC_META_STR(GEO_PrimVDB, geo_INTRINSIC_META_VECTOR_TYPE) GA_END_INTRINSIC_DEF(GEO_PrimVDB, GEO_Primitive) /*static*/ bool GEO_PrimVDB::isIntrinsicMetadata(const char *name) { return theMetaNames.contains(name); } #endif // SESI_OPENVDB || SESI_OPENVDB_PRIM
116,179
C++
30.247983
129
0.581069
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GU_VDBPointTools.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 #include "GU_VDBPointTools.h"
118
C++
15.999998
48
0.754237
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GEO_PrimVDB.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) Side Effects Software Inc. * * Produced by: * Side Effects Software Inc * 477 Richmond Street West * Toronto, Ontario * Canada M5V 3E7 * 416-504-9876 * * NAME: GEO_PrimVDB.h ( GEO Library, C++) * * COMMENTS: Custom VDB primitive. */ // Using the native OpenVDB Primitive shipped with Houdini is strongly recommended, // as there is no guarantee that this code will be kept in sync with Houdini. // However, for debugging it can be useful, so supply -DSESI_OPENVDB_PRIM to // the compiler to build this custom primitive. #if !defined(SESI_OPENVDB) && !defined(SESI_OPENVDB_PRIM) #include <GEO/GEO_PrimVDB.h> namespace openvdb_houdini { using ::GEO_VolumeOptions; using ::GEO_PrimVDB; } #else // SESI_OPENVDB || SESI_OPENVDB_PRIM #ifndef __HDK_GEO_PrimVDB__ #define __HDK_GEO_PrimVDB__ #include <GEO/GEO_Primitive.h> #include <GEO/GEO_VolumeOptions.h> #include <GA/GA_Defines.h> #include <SYS/SYS_AtomicInt.h> // for SYS_AtomicCounter #include <UT/UT_BoundingBox.h> #include "UT_VDBUtils.h" #include <openvdb/Platform.h> #include <openvdb/openvdb.h> class GEO_Detail; class GEO_PrimVolume; class GEO_PrimVolumeXform; class UT_MemoryCounter; class OPENVDB_HOUDINI_API GEO_PrimVDB : public GEO_Primitive { public: typedef uint64 UniqueId; protected: /// NOTE: The constructor should only be called from subclass /// constructors. GEO_PrimVDB(GEO_Detail *d, GA_Offset offset = GA_INVALID_OFFSET); ~GEO_PrimVDB() override; public: static GA_PrimitiveFamilyMask buildFamilyMask() { return GA_FAMILY_NONE; } /// @{ /// Required interface methods bool isDegenerate() const override; int getBBox(UT_BoundingBox *bbox) const override; void reverse() override; UT_Vector3 computeNormal() const override; void copyPrimitive(const GEO_Primitive *src) override; void copySubclassData(const GA_Primitive *source) override; using GEO_Primitive::getVertexOffset; using GEO_Primitive::getPointOffset; using GEO_Primitive::setPointOffset; using GEO_Primitive::getPos3; using GEO_Primitive::setPos3; SYS_FORCE_INLINE GA_Offset getVertexOffset() const { return getVertexOffset(0); } SYS_FORCE_INLINE GA_Offset getPointOffset() const { return getPointOffset(0); } SYS_FORCE_INLINE void setPointOffset(GA_Offset pt) { setPointOffset(0, pt); } SYS_FORCE_INLINE UT_Vector3 getPos3() const { return getPos3(0); } SYS_FORCE_INLINE void setPos3(const UT_Vector3 &pos) { setPos3(0, pos); } /// Convert an index in the voxel array into the corresponding worldspace /// location void indexToPos(int x, int y, int z, UT_Vector3 &pos) const; void findexToPos(UT_Vector3 index, UT_Vector3 &pos) const; void indexToPos(exint x, exint y, exint z, UT_Vector3D &pos) const; void findexToPos(UT_Vector3D index, UT_Vector3D &pos) const; /// Convert a 3d position into the closest index value. void posToIndex(UT_Vector3 pos, int &x, int &y, int &z) const; void posToIndex(UT_Vector3 pos, UT_Vector3 &index) const; void posToIndex(UT_Vector3D pos, exint &x, exint &y, exint &z) const; void posToIndex(UT_Vector3D pos, UT_Vector3D &index) const; /// Evaluate the voxel value at the given world space position. /// Note that depending on the underlying VDB type, this may not /// be sensible, in which case a zero will silently be returned fpreal getValueF(const UT_Vector3 &pos) const; fpreal getValueAtIndexF(int ix, int iy, int iz) const; UT_Vector3D getValueV3(const UT_Vector3 &pos) const; UT_Vector3D getValueAtIndexV3(int ix, int iy, int iz) const; void getValues(float *f, int stride, const UT_Vector3 *pos, int num) const; void getValues(int *f, int stride, const UT_Vector3 *pos, int num) const; void getValuesAtIndices(float *f, int stride, const int *ix, const int *iy, const int *iz, int num) const; void getValuesAtIndices(int *f, int stride, const int *ix, const int *iy, const int *iz, int num) const; /// Vector grid variants. void getValues(UT_Vector3 *f, int stride, const UT_Vector3 *pos, int num) const; void getValuesAtIndices(UT_Vector3 *f, int stride, const int *ix, const int *iy, const int *iz, int num) const; void getValues(double *f, int stride, const UT_Vector3D *pos, int num) const; void getValues(exint *f, int stride, const UT_Vector3D *pos, int num) const; void getValuesAtIndices(double *f, int stride, const exint *ix, const exint *iy, const exint *iz, int num) const; void getValuesAtIndices(exint *f, int stride, const exint *ix, const exint *iy, const exint *iz, int num) const; /// Vector grid variants. void getValues(UT_Vector3D *f, int stride, const UT_Vector3D *pos, int num) const; void getValuesAtIndices(UT_Vector3D *f, int stride, const exint *ix, const exint *iy, const exint *iz, int num) const; // Worldspace gradient at the given position UT_Vector3 getGradient(const UT_Vector3 &pos) const; /// Evaluate this grid's gradients at the given world space positions. /// Does nothing and returns false if grid is non-scalar. /// If normalize is true, then the gradients will be normalized to be unit /// length. bool evalGradients( UT_Vector3 *gradients, int gradients_stride, const UT_Vector3 *positions, int num_positions, bool normalize = false) const; /// Get the storage type of the grid SYS_FORCE_INLINE UT_VDBType getStorageType() const { return myGridAccessor.getStorageType(); } /// Get the tuple size, usually 1 or 3 SYS_FORCE_INLINE int getTupleSize() const { return UTvdbGetGridTupleSize(getStorageType()); } bool isSDF() const; /// True if the two volumes map the same indices to the same positions. bool isAligned(const GEO_PrimVDB *vdb) const; /// True if the two volumes have the same active regions bool isActiveRegionMatched(const GEO_PrimVDB *vdb) const; /// True if we are aligned with the world axes. Ie, all our /// off diagonals are zero and our diagonal is positive. bool isWorldAxisAligned() const; // Transform the matrix associated with this primitive. Translate is // ignored. void transform(const UT_Matrix4 &mat) override; /// Accessors for the 4x4 matrix representing the affine transform that /// converts from index space voxel coordinates to world space. For frustum /// maps, this will be transform as if the taper value is set to 1. /// @{ void setTransform4(const UT_DMatrix4 &xform4); void setTransform4(const UT_Matrix4 &xform4); UT_Matrix4D getTransform4() const; /// @} // Take the whole set of points into consideration when applying the // point removal operation to this primitive. The method returns 0 if // successful, -1 if it failed because it would have become degenerate, // and -2 if it failed because it would have had to remove the primitive // altogether. int detachPoints(GA_PointGroup &grp) override; /// Before a point is deleted, all primitives using the point will be /// notified. The method should return "false" if it's impossible to /// delete the point. Otherwise, the vertices should be removed. GA_DereferenceStatus dereferencePoint(GA_Offset point, bool dry_run=false) override; GA_DereferenceStatus dereferencePoints(const GA_RangeMemberQuery &pt_q, bool dry_run=false) override; const GA_PrimitiveJSON *getJSON() const override; /// This method assigns a preallocated vertex to the quadric, optionally /// creating the topological link between the primitive and new vertex. void assignVertex(GA_Offset new_vtx, bool update_topology); /// Evalaute a point given a u,v coordinate (with derivatives) bool evaluatePointRefMap( GA_Offset result_vtx, GA_AttributeRefMap &hlist, fpreal u, fpreal v, uint du, uint dv) const override; /// Evalaute position given a u,v coordinate (with derivatives) int evaluatePointV4( UT_Vector4 &pos, float u, float v = 0, unsigned du=0, unsigned dv=0) const override { return GEO_Primitive::evaluatePointV4(pos, u, v, du, dv); } /// @} /// Convert transforms between native volumes and VDBs /// @{ /// Get a GEO_PrimVolumeXform which represent's the grid's full transform. /// The returned space's fromVoxelSpace() method will convert index space /// voxel coordinates to world space positions (and the vice versa for /// toVoxelSpace()). GEO_PrimVolumeXform getIndexSpaceTransform() const; /// Equivalent to getSpaceTransform(getGrid().evalActiveVoxelBoundingBox()). /// The returned space's fromVoxelSpace() method will convert 0-1 /// coordinates over the active voxel bounding box to world space (and vice /// versa for toVoxelSpace()). GEO_PrimVolumeXform getSpaceTransform() const; /// Gives the equivalent to GEO_PrimVolume's getSpaceTransform() by using /// the given bounding box to determine the bounds of the transform. /// The resulting world space sample points will be offset by half a voxel /// so that they match GEO_PrimVolume. /// The returned space's fromVoxelSpace() method will convert 0-1 /// coordinates over the bbox extents to world space (and vice versa for /// toVoxelSpace()). GEO_PrimVolumeXform getSpaceTransform(const UT_BoundingBoxD &bbox) const; /// Sets the transform from a GEO_PrimVolume's getSpaceTransform() by using /// the index space [(0,0,0), resolution] bbox. If force_taper is true, /// then the resulting transform will always be a NonlinearFrustumMap even /// if there is no tapering. void setSpaceTransform(const GEO_PrimVolumeXform &space, const UT_Vector3R &resolution, bool force_taper = false); /// @} fpreal getTaper() const; /// Returns the resolution of the active voxel array. /// Does *not* mean the indices go from 0..rx, however! void getRes(int &rx, int &ry, int &rz) const; /// Computes the voxel diameter by taking a step in x, y, and z /// converting to world space and taking the length of that vector. fpreal getVoxelDiameter() const; /// Returns the length of the voxel when you take an x, y, and z step UT_Vector3 getVoxelSize() const; /// Compute useful aggregate properties of the volume. fpreal calcMinimum() const; fpreal calcMaximum() const; fpreal calcAverage() const; /// VDBs may either be unbounded, or created with a specific frustum /// range. The latter is important for tapered VDBs that otherwise /// have a singularity at the camera location. Tools can use the /// presence of an idxbox as a clipping box in index space. /// This does *NOT* relate to getRes - it may be much larger or /// even in some cases smaller. bool getFrustumBounds(UT_BoundingBox &idxbox) const; enum ActivateOperation { ACTIVATE_UNION, // Activate anything in source ACTIVATE_INTERSECT, // Deactivate anything not in source ACTIVATE_SUBTRACT, // Deactivate anything in source ACTIVATE_COPY // Set our activation to match source }; /// Activates voxels given an *index* space bounding box. This /// is an inclusive box. /// If this is Frustum VDB, the activation will be clipped by that. /// Setting the value only takes effect if the voxels are activated, /// deactivated voxels are set to the background. void activateIndexBBox( const openvdb::CoordBBox& bbox, ActivateOperation operation, bool setvalue, fpreal value) { activateIndexBBoxAdapter( &bbox, operation, setvalue, value); } /// Activates all of the voxels in this VDB that are touched /// by active voxels in the source. /// If ignore_transform is true, voxels will be activated /// by grid index instead of world space position. void activateByVDB(const GEO_PrimVDB *vdb, ActivateOperation operation, bool setvalue, fpreal value, bool ignore_transform=false); /// @{ /// Though not strictly required (i.e. not pure virtual), these methods /// should be implemented for proper behaviour. GEO_Primitive *copy(int preserve_shared_pts = 0) const override; // Have we been deactivated and stashed? void stashed(bool beingstashed, GA_Offset offset=GA_INVALID_OFFSET) override; /// @} /// @{ /// Optional interface methods. Though not required, implementing these /// will give better behaviour for the new primitive. UT_Vector3 baryCenter() const override; fpreal calcVolume(const UT_Vector3 &refpt) const override; /// Calculate the surface area of the active voxels where /// a voxel face contributes if it borders an inactive voxel. fpreal calcArea() const override; /// @} /// @{ /// Enlarge a bounding box by the bounding box of the primitive. A /// return value of false indicates an error in the operation, most /// likely an invalid P. For any attribute other than the position /// these methods simply enlarge the bounding box based on the vertex. bool enlargeBoundingBox( UT_BoundingRect &b, const GA_Attribute *P) const override; bool enlargeBoundingBox( UT_BoundingBox &b, const GA_Attribute *P) const override; void enlargePointBounds(UT_BoundingBox &e) const override; /// @} /// Enlarge a bounding sphere to encompass the primitive. A return value /// of false indicates an error in the operation, most likely an invalid /// P. For any attribute other than the position this method simply /// enlarges the sphere based on the vertex. bool enlargeBoundingSphere( UT_BoundingSphere &b, const GA_Attribute *P) const override; /// Accessor for the local 3x3 affine transform matrix for the primitive. /// For frustum maps, this will be transform as if the taper value is set /// to 1. /// @{ void getLocalTransform(UT_Matrix3D &result) const override; void setLocalTransform(const UT_Matrix3D &new_mat3) override; /// @} /// @internal Hack to condition 4x4 matrices that we avoid creating what /// OpenVDB erroneously thinks are singular matrices. Returns true if mat4 /// was modified. static bool conditionMatrix(UT_Matrix4D &mat4); /// Visualization accessors /// @{ const GEO_VolumeOptions &getVisOptions() const { return myVis; } void setVisOptions(const GEO_VolumeOptions &vis) { setVisualization(vis.myMode, vis.myIso, vis.myDensity, vis.myLod); } void setVisualization( GEO_VolumeVis vismode, fpreal iso, fpreal density, GEO_VolumeVisLod lod = GEO_VOLUMEVISLOD_FULL) { myVis.myMode = vismode; myVis.myIso = iso; myVis.myDensity = density; myVis.myLod = lod; } GEO_VolumeVis getVisualization() const { return myVis.myMode; } fpreal getVisIso() const { return myVis.myIso; } fpreal getVisDensity() const { return myVis.myDensity; } GEO_VolumeVisLod getVisLod() const { return myVis.myLod; } /// @} /// Load the order from a JSON value bool loadOrder(const UT_JSONValue &p); /// @{ /// Save/Load vdb to a JSON stream bool saveVDB(UT_JSONWriter &w, const GA_SaveMap &sm, bool as_shmem = false) const; bool loadVDB(UT_JSONParser &p, bool as_shmem = false); /// @} bool saveVisualization( UT_JSONWriter &w, const GA_SaveMap &map) const; bool loadVisualization( UT_JSONParser &p, const GA_LoadMap &map); /// Method to perform quick lookup of vertex without the virtual call GA_Offset fastVertexOffset(GA_Size UT_IF_ASSERT_P(index)) const { UT_ASSERT_P(index < 1); return getVertexOffset(); } void setVertexPoint(int i, GA_Offset pt) { if (i == 0) setPointOffset(pt); } /// @brief Computes the total density of the volume, scaled by /// the volume's size. Negative values will be ignored. fpreal calcPositiveDensity() const; SYS_FORCE_INLINE bool hasGrid() const { return myGridAccessor.hasGrid(); } /// @brief If this primitive's grid's voxel data (i.e., its tree) /// is shared, replace the tree with a deep copy of itself that is /// not shared with anyone else. SYS_FORCE_INLINE void makeGridUnique() { myGridAccessor.makeGridUnique(); } /// @brief Returns true if the tree is not shared. If it is not shared, /// one can make destructive edits without makeGridUnique. bool isGridUnique() const { return myGridAccessor.isGridUnique(); } /// @brief Return a reference to this primitive's grid. /// @note Calling setGrid() invalidates all references previously returned. SYS_FORCE_INLINE const openvdb::GridBase & getConstGrid() const { return myGridAccessor.getConstGrid(*this); } /// @brief Return a reference to this primitive's grid. /// @note Calling setGrid() invalidates all references previously returned. SYS_FORCE_INLINE const openvdb::GridBase & getGrid() const { return getConstGrid(); } /// @brief Return a reference to this primitive's grid. /// @note Calling setGrid() invalidates all references previously returned. /// @warning Call makeGridUnique() before modifying the grid's voxel data. SYS_FORCE_INLINE openvdb::GridBase & getGrid() { incrGridUniqueIds(); return myGridAccessor.getGrid(*this); } /// @brief Return a shared pointer to this primitive's grid. /// @note Calling setGrid() causes the grid to which the shared pointer /// refers to be disassociated with this primitive. SYS_FORCE_INLINE openvdb::GridBase::ConstPtr getConstGridPtr() const { return myGridAccessor.getConstGridPtr(*this); } /// @brief Return a shared pointer to this primitive's grid. /// @note Calling setGrid() causes the grid to which the shared pointer /// refers to be disassociated with this primitive. SYS_FORCE_INLINE openvdb::GridBase::ConstPtr getGridPtr() const { return getConstGridPtr(); } /// @brief Return a shared pointer to this primitive's grid. /// @note Calling setGrid() causes the grid to which the shared pointer /// refers to be disassociated with this primitive. /// @warning Call makeGridUnique() before modifying the grid's voxel data. SYS_FORCE_INLINE openvdb::GridBase::Ptr getGridPtr() { incrGridUniqueIds(); return myGridAccessor.getGridPtr(*this); } /// @brief Set this primitive's grid to a shallow copy of the given grid. /// @note Invalidates all previous getGrid() and getConstGrid() references SYS_FORCE_INLINE void setGrid(const openvdb::GridBase &grid, bool copyPosition=true) { incrGridUniqueIds(); myGridAccessor.setGrid(grid, *this, copyPosition); } /// @brief Return a reference to this primitive's grid metadata. /// @note Calling setGrid() invalidates all references previously returned. const openvdb::MetaMap& getConstMetadata() const { return getConstGrid(); } /// @brief Return a reference to this primitive's grid metadata. /// @note Calling setGrid() invalidates all references previously returned. const openvdb::MetaMap& getMetadata() const { return getConstGrid(); } /// @brief Return a reference to this primitive's grid metadata. /// @note Calling setGrid() invalidates all references previously returned. SYS_FORCE_INLINE openvdb::MetaMap& getMetadata() { incrMetadataUniqueId(); return myGridAccessor.getGrid(*this); } /// @brief Return the value of this primitive's "name" attribute /// in the given detail. const char * getGridName() const; /// @brief Return this primitive's serial number. /// @details A primitive's serial number never changes. UniqueId getUniqueId() const { return static_cast<UniqueId>(myUniqueId.relaxedLoad()); } /// @brief Return the serial number of this primitive's voxel data. /// @details The serial number is incremented whenever a non-const /// reference or pointer to this primitive's grid is requested /// (whether or not the voxel data is ultimately modified). UniqueId getTreeUniqueId() const { return static_cast<UniqueId>(myTreeUniqueId.relaxedLoad()); } /// @brief Return the serial number of this primitive's grid metadata. /// @details The serial number is incremented whenever a non-const /// reference to the metadata or non-const access to the grid is requested /// (whether or not the metadata is ultimately modified). UniqueId getMetadataUniqueId() const { return static_cast<UniqueId>(myMetadataUniqueId.relaxedLoad()); } /// @brief Return the serial number of this primitive's transform. /// @details The serial number is incremented whenever the transform /// is modified or non-const access to this primitive's grid is requested /// (whether or not the transform is ultimately modified). UniqueId getTransformUniqueId() const { return static_cast<UniqueId>(myTransformUniqueId.relaxedLoad()); } /// @brief If this primitive's grid resolves to one of the listed grid types, /// invoke the functor @a op on the resolved grid. /// @return @c true if the functor was invoked, @c false otherwise /// /// @par Example: /// @code /// auto printOp = [](const openvdb::GridBase& grid) { grid.print(); }; /// const GEO_PrimVDB* prim = ...; /// using RealGridTypes = openvdb::TypeList<openvdb::FloatGrid, openvdb::DoubleGrid>; /// // Print info about the primitive's grid if it is a floating-point grid. /// prim->apply<RealGridTypes>(printOp); /// @endcode template<typename GridTypeListT, typename OpT> bool apply(OpT& op) const { return hasGrid() ? getConstGrid().apply<GridTypeListT>(op) : false; } /// @brief If this primitive's grid resolves to one of the listed grid types, /// invoke the functor @a op on the resolved grid. /// @return @c true if the functor was invoked, @c false otherwise /// @details If @a makeUnique is true, deep copy the grid's tree before /// invoking the functor if the tree is shared with other grids. /// /// @par Example: /// @code /// auto fillOp = [](const auto& grid) { // C++14 /// // Convert voxels in the given bounding box into background voxels. /// grid.fill(openvdb::CoordBBox(openvdb::Coord(0), openvdb::Coord(99)), /// grid.background(), /*active=*/false); /// }; /// GEO_PrimVDB* prim = ...; /// // Set background voxels in the primitive's grid if it is a floating-point grid. /// using RealGridTypes = openvdb::TypeList<openvdb::FloatGrid, openvdb::DoubleGrid>; /// prim->apply<RealGridTypes>(fillOp); /// @endcode template<typename GridTypeListT, typename OpT> bool apply(OpT& op, bool makeUnique = true) { if (hasGrid()) { auto& grid = myGridAccessor.getGrid(*this); if (makeUnique) { auto treePtr = grid.baseTreePtr(); if (treePtr.use_count() > 2) { // grid + treePtr = 2 // If the grid resolves to one of the listed types and its tree // is shared with other grids, replace the tree with a deep copy. grid.apply<GridTypeListT>([this](openvdb::GridBase& baseGrid) { baseGrid.setTree(baseGrid.constBaseTree().copy()); this->incrTreeUniqueId(); }); } } if (grid.apply<GridTypeListT>(op)) { incrGridUniqueIds(); return true; } } return false; } protected: typedef SYS_AtomicCounter AtomicUniqueId; // 64-bit /// Register intrinsic attributes GA_DECLARE_INTRINSICS(override) /// Return true if the given metadata token is an intrinsic static bool isIntrinsicMetadata(const char *name); /// @warning vertexPoint() doesn't check the bounds. Use with caution. GA_Offset vertexPoint(GA_Size) const { return getPointOffset(); } /// Report approximate memory usage, excluding sizeof(*this), /// because the subclass doesn't have access to myGridAccessor. int64 getBaseMemoryUsage() const; // This is called by the subclasses to count the // memory used by this, excluding sizeof(*this). void countBaseMemory(UT_MemoryCounter &counter) const; /// @brief Return an ID number that is guaranteed to be unique across /// all VDB primitives. static UniqueId nextUniqueId(); void incrTreeUniqueId() { myTreeUniqueId.maximum(nextUniqueId()); } void incrMetadataUniqueId() { myMetadataUniqueId.maximum(nextUniqueId()); } void incrTransformUniqueId() { myTransformUniqueId.maximum(nextUniqueId()); } void incrGridUniqueIds() { incrTreeUniqueId(); incrMetadataUniqueId(); incrTransformUniqueId(); } /// @brief Replace this primitive's grid with a shallow copy /// of another primitive's grid. void copyGridFrom(const GEO_PrimVDB&, bool copyPosition=true); /// @brief GridAccessor manages access to a GEO_PrimVDB's grid. /// @details In keeping with OpenVDB library conventions, the grid /// is stored internally by shared pointer. However, grid objects /// are never shared among primitives, though their voxel data /// (i.e., their trees) may be shared. /// <p>Among other things, GridAccessor /// - ensures that each primitive's transform and metadata are unique /// (i.e., not shared with anyone else) /// - allows primitives to share voxel data but, via makeGridUnique(), /// provides a way to break the connection /// - ensures that the primitive's transform and the grid's transform /// are in sync (specifically, the translation component, which is /// stored independently as a vertex offset). class OPENVDB_HOUDINI_API GridAccessor { public: SYS_FORCE_INLINE GridAccessor() : myStorageType(UT_VDB_INVALID) { } SYS_FORCE_INLINE void clear() { myGrid.reset(); myStorageType = UT_VDB_INVALID; } SYS_FORCE_INLINE openvdb::GridBase & getGrid(const GEO_PrimVDB &prim) { updateGridTranslates(prim); return *myGrid; } SYS_FORCE_INLINE const openvdb::GridBase & getConstGrid(const GEO_PrimVDB &prim) const { updateGridTranslates(prim); return *myGrid; } SYS_FORCE_INLINE openvdb::GridBase::Ptr getGridPtr(const GEO_PrimVDB &prim) { updateGridTranslates(prim); return myGrid; } SYS_FORCE_INLINE openvdb::GridBase::ConstPtr getConstGridPtr(const GEO_PrimVDB &prim) const { updateGridTranslates(prim); return myGrid; } // These accessors will ensure the transform's translate is set into // the vertex position. SYS_FORCE_INLINE void setGrid(const openvdb::GridBase& grid, GEO_PrimVDB& prim, bool copyPosition=true) { setGridAdapter(&grid, prim, copyPosition); } SYS_FORCE_INLINE void setTransform( const openvdb::math::Transform &xform, GEO_PrimVDB &prim) { setTransformAdapter(&xform, prim); } void makeGridUnique(); bool isGridUnique() const; SYS_FORCE_INLINE UT_VDBType getStorageType() const { return myStorageType; } SYS_FORCE_INLINE bool hasGrid() const { return myGrid != 0; } private: void updateGridTranslates(const GEO_PrimVDB &prim) const; SYS_FORCE_INLINE void setVertexPosition( const openvdb::math::Transform &xform, GEO_PrimVDB &prim) { setVertexPositionAdapter(&xform, prim); } void setGridAdapter(const void* grid, GEO_PrimVDB&, bool copyPosition); void setTransformAdapter(const void* xform, GEO_PrimVDB&); void setVertexPositionAdapter(const void* xform, GEO_PrimVDB&); private: openvdb::GridBase::Ptr myGrid; UT_VDBType myStorageType; }; private: void activateIndexBBoxAdapter( const void* bbox, ActivateOperation, bool setvalue, fpreal value); GridAccessor myGridAccessor; GEO_VolumeOptions myVis; AtomicUniqueId myUniqueId; AtomicUniqueId myTreeUniqueId; AtomicUniqueId myMetadataUniqueId; AtomicUniqueId myTransformUniqueId; }; // class GEO_PrimVDB #ifndef SESI_OPENVDB namespace openvdb_houdini { using ::GEO_VolumeOptions; using ::GEO_PrimVDB; } #endif //////////////////////////////////////// namespace UT_VDBUtils { // This overload of UT_VDBUtils::callTypedGrid(), for GridBaseType = GEO_PrimVDB, // calls makeGridUnique() on the primitive just before instantiating and // invoking the functor on the primitive's grid. This delays the call // to makeGridUnique() until it is known to be necessary and thus avoids // making deep copies of grids of types that won't be processed. template<typename GridType, typename OpType> inline void callTypedGrid(GEO_PrimVDB& prim, OpType& op) { prim.makeGridUnique(); op.template operator()<GridType>(*(UTverify_cast<GridType*>(&prim.getGrid()))); } // Overload of callTypedGrid() for GridBaseType = const GEO_PrimVDB template<typename GridType, typename OpType> inline void callTypedGrid(const GEO_PrimVDB& prim, OpType& op) { op.template operator()<GridType>(*(UTverify_cast<const GridType*>(&prim.getConstGrid()))); } } // namespace UT_VDBUtils // Define UTvdbProcessTypedGrid*() (see UT_VDBUtils.h) for grids // belonging to primitives, for various subsets of grid types. UT_VDB_DECL_PROCESS_TYPED_GRID(GEO_PrimVDB&) UT_VDB_DECL_PROCESS_TYPED_GRID(const GEO_PrimVDB&) //////////////////////////////////////// /// @brief Utility function to process the grid of a const primitive using functor @a op. /// @details It will invoke @code op.operator()<GridT>(const GridT &grid) @endcode /// @{ template <typename OpT> inline bool GEOvdbProcessTypedGrid(const GEO_PrimVDB &vdb, OpT &op) { return UTvdbProcessTypedGrid(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridReal(const GEO_PrimVDB &vdb, OpT &op) { return UTvdbProcessTypedGridReal(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridScalar(const GEO_PrimVDB &vdb, OpT &op) { return UTvdbProcessTypedGridScalar(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridTopology(const GEO_PrimVDB &vdb, OpT &op) { return UTvdbProcessTypedGridTopology(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridVec3(const GEO_PrimVDB &vdb, OpT &op) { return UTvdbProcessTypedGridVec3(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridPoint(const GEO_PrimVDB &vdb, OpT &op) { return UTvdbProcessTypedGridPoint(vdb.getStorageType(), vdb.getGrid(), op); } /// @} /// @brief Utility function to process the grid of a primitive using functor @a op. /// @param vdb the primitive whose grid is to be processed /// @param op a functor with a call operator of the form /// @code op.operator()<GridT>(GridT &grid) @endcode /// @param makeUnique if @c true, call <tt>vdb.makeGridUnique()</tt> before /// invoking the functor /// @{ template <typename OpT> inline bool GEOvdbProcessTypedGrid(GEO_PrimVDB &vdb, OpT &op, bool makeUnique = true) { if (makeUnique) return UTvdbProcessTypedGrid(vdb.getStorageType(), vdb, op); return UTvdbProcessTypedGrid(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridReal(GEO_PrimVDB &vdb, OpT &op, bool makeUnique = true) { if (makeUnique) return UTvdbProcessTypedGridReal(vdb.getStorageType(), vdb, op); return UTvdbProcessTypedGridReal(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridScalar(GEO_PrimVDB &vdb, OpT &op, bool makeUnique = true) { if (makeUnique) return UTvdbProcessTypedGridScalar(vdb.getStorageType(), vdb, op); return UTvdbProcessTypedGridScalar(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridTopology(GEO_PrimVDB &vdb, OpT &op, bool makeUnique = true) { if (makeUnique) return UTvdbProcessTypedGridTopology(vdb.getStorageType(), vdb, op); return UTvdbProcessTypedGridTopology(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridVec3(GEO_PrimVDB &vdb, OpT &op, bool makeUnique = true) { if (makeUnique) return UTvdbProcessTypedGridVec3(vdb.getStorageType(), vdb, op); return UTvdbProcessTypedGridVec3(vdb.getStorageType(), vdb.getGrid(), op); } template <typename OpT> inline bool GEOvdbProcessTypedGridPoint(GEO_PrimVDB &vdb, OpT &op, bool makeUnique = true) { if (makeUnique) return UTvdbProcessTypedGridPoint(vdb.getStorageType(), vdb, op); return UTvdbProcessTypedGridPoint(vdb.getStorageType(), vdb.getGrid(), op); } /// @} #endif // __HDK_GEO_PrimVDB__ #endif // SESI_OPENVDB || SESI_OPENVDB_PRIM
38,266
C
42.288461
137
0.602728
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Vector_Merge.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Vector_Merge.cc /// /// @author FX R&D OpenVDB team /// /// @brief Merge groups of up to three scalar grids into vector grids. #ifdef _WIN32 #define BOOST_REGEX_NO_LIB #define HBOOST_REGEX_NO_LIB #endif #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/ValueTransformer.h> // for tools::foreach() #include <openvdb/tools/Prune.h> #include <UT/UT_Interrupt.h> #include <UT/UT_SharedPtr.h> #include <UT/UT_String.h> #include <hboost/regex.hpp> #include <functional> #include <memory> #include <set> #include <sstream> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; // HAVE_MERGE_GROUP is disabled in Houdini #ifdef SESI_OPENVDB #define HAVE_MERGE_GROUP 0 #else #define HAVE_MERGE_GROUP 1 #endif class SOP_OpenVDB_Vector_Merge: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Vector_Merge(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Vector_Merge() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; static void addWarningMessage(SOP_OpenVDB_Vector_Merge* self, const char* msg) { if (self && msg) self->addWarning(SOP_MESSAGE, msg); } }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Group of X grids parms.add(hutil::ParmFactory(PRM_STRING, "xgroup", "X Group") .setDefault("@name=*.x") .setTooltip( "Specify a group of scalar input VDB grids to be used\n" "as the x components of the merged vector grids.\n" "Each x grid will be paired with a y and a z grid\n" "(if provided) to produce an output vector grid.") .setChoiceList(&hutil::PrimGroupMenuInput1)); // Group of Y grids parms.add(hutil::ParmFactory(PRM_STRING, "ygroup", "Y Group") .setDefault("@name=*.y") .setTooltip( "Specify a group of scalar input VDB grids to be used\n" "as the y components of the merged vector grids.\n" "Each y grid will be paired with an x and a z grid\n" "(if provided) to produce an output vector grid.") .setChoiceList(&hutil::PrimGroupMenuInput1)); // Group of Z grids parms.add(hutil::ParmFactory(PRM_STRING, "zgroup", "Z Group") .setDefault("@name=*.z") .setTooltip( "Specify a group of scalar input VDB grids to be used\n" "as the z components of the merged vector grids.\n" "Each z grid will be paired with an x and a y grid\n" "(if provided) to produce an output vector grid.") .setChoiceList(&hutil::PrimGroupMenuInput1)); // Use X name parms.add(hutil::ParmFactory(PRM_TOGGLE, "usexname", "Use Basename of X VDB") #ifdef SESI_OPENVDB .setDefault(PRMoneDefaults) #else .setDefault(PRMzeroDefaults) #endif .setDocumentation( "Use the base name of the __X Group__ as the name for the output VDB." " For example, if __X Group__ is `Cd.x`, the generated vector VDB" " will be named `Cd`.\n\n" "If this option is disabled or if the __X__ primitive has no `name` attribute," " the output VDB will be given the __Merged VDB Name__.")); // Output vector grid name parms.add(hutil::ParmFactory(PRM_STRING, "merge_name", "Merged VDB Name") .setDefault("merged#") .setTooltip( "Specify a name for the merged vector grids.\n" "Include a '#' character in the name to number the output grids\n" "(starting from 1) in the order that they are processed.")); { // Output grid's vector type (invariant, covariant, etc.) std::vector<std::string> items; for (int i = 0; i < openvdb::NUM_VEC_TYPES ; ++i) { items.push_back(openvdb::GridBase::vecTypeToString(openvdb::VecType(i))); items.push_back(openvdb::GridBase::vecTypeExamples(openvdb::VecType(i))); } parms.add(hutil::ParmFactory(PRM_ORD, "vectype", "Vector Type") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setDocumentation("\ Specify how the output VDB's vector values should be affected by transforms:\n\ \n\ Tuple / Color / UVW:\n\ No transformation\n\ \n\ Gradient / Normal:\n\ Inverse-transpose transformation, ignoring translation\n\ \n\ Unit Normal:\n\ Inverse-transpose transformation, ignoring translation,\n\ followed by renormalization\n\ \n\ Displacement / Velocity / Acceleration:\n\ \"Regular\" transformation, ignoring translation\n\ \n\ Position:\n\ \"Regular\" transformation with translation\n")); } #if HAVE_MERGE_GROUP // Toggle to enable/disable grouping parms.add(hutil::ParmFactory(PRM_TOGGLE, "enable_grouping", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("If enabled, create a group for all merged vector grids.")); // Output vector grid group name parms.add(hutil::ParmFactory(PRM_STRING, "group", "Merge Group") .setTooltip("Specify a name for the output group of merged vector grids.")); #endif // Toggle to keep/remove source grids parms.add(hutil::ParmFactory(PRM_TOGGLE, "remove_sources", "Remove Source VDBs") .setDefault(PRMoneDefaults) .setTooltip("Remove scalar grids that have been merged.")); // Toggle to copy inactive values in addition to active values parms.add( hutil::ParmFactory(PRM_TOGGLE, "copyinactive", "Copy Inactive Values") .setDefault(PRMzeroDefaults) .setTooltip( "If enabled, merge the values of both active and inactive voxels.\n" "If disabled, merge the values of active voxels only, treating\n" "inactive voxels as active background voxels wherever\n" "corresponding input voxels have different active states.")); #ifndef SESI_OPENVDB // Verbosity toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "verbose", "Verbose") .setDocumentation("If enabled, print debugging information to the terminal.")); #endif hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "scalar_x_group", "X Group") .setDefault("@name=*.x")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "scalar_y_group", "Y Group") .setDefault("@name=*.y")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "scalar_z_group", "Z Group") .setDefault("@name=*.z")); // Register this operator. hvdb::OpenVDBOpFactory("VDB Vector Merge", SOP_OpenVDB_Vector_Merge::factory, parms, *table) .addInput("Scalar VDBs to merge into vector") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Vector_Merge::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Merge three scalar VDB primitives into one vector VDB primitive.\"\"\"\n\ \n\ @overview\n\ \n\ This node will create a vector-valued VDB volume using the values of\n\ corresponding voxels from up to three scalar VDBs as the vector components.\n\ The scalar VDBs must have the same voxel size and transform; if they do not,\n\ use the [OpenVDB Resample node|Node:sop/DW_OpenVDBResample] to resample\n\ two of the VDBs to match the third.\n\ \n\ TIP:\n\ To reverse the merge (i.e., to split a vector VDB into three scalar VDBs),\n\ use the [OpenVDB Vector Split node|Node:sop/DW_OpenVDBVectorSplit].\n\ \n\ @related\n\ - [OpenVDB Vector Split|Node:sop/DW_OpenVDBVectorSplit]\n\ - [Node:sop/vdbvectormerge]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } void SOP_OpenVDB_Vector_Merge::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; for (auto axis: { "x", "y", "z" }) { const std::string oldName = "scalar_" + (axis + std::string{"_group"}), newName = axis + std::string{"group"}; resolveRenamedParm(*obsoleteParms, oldName.c_str(), newName.c_str()); } // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } bool SOP_OpenVDB_Vector_Merge::updateParmsFlags() { bool changed = false; #if HAVE_MERGE_GROUP changed |= enableParm("group", evalInt("enable_grouping", 0, 0) != 0); #endif return changed; } OP_Node* SOP_OpenVDB_Vector_Merge::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Vector_Merge(net, name, op); } SOP_OpenVDB_Vector_Merge::SOP_OpenVDB_Vector_Merge(OP_Network* net, const char* name, OP_Operator* op): SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { // Mapping from scalar ValueTypes to Vec3::value_types of registered vector-valued Grid types template<typename T> struct VecValueTypeMap { using Type = T; static const bool Changed = false; }; //template<> struct VecValueTypeMap<bool> { // using Type = int32_t; static const bool Changed = true; //}; //template<> struct VecValueTypeMap<uint32_t> { // using Type = int32_t; static const bool Changed = true; //}; //template<> struct VecValueTypeMap<int64_t> { // using Type = int32_t; static const bool Changed = true; //}; //template<> struct VecValueTypeMap<uint64_t> { // using Type = int32_t; static const bool Changed = true; //}; struct InterruptException {}; // Functor to merge inactive tiles and voxels from up to three // scalar-valued trees into one vector-valued tree template<typename VectorTreeT, typename ScalarTreeT> class MergeActiveOp { public: using VectorValueT = typename VectorTreeT::ValueType; using ScalarValueT = typename ScalarTreeT::ValueType; using ScalarAccessor = typename openvdb::tree::ValueAccessor<const ScalarTreeT>; MergeActiveOp(const ScalarTreeT* xTree, const ScalarTreeT* yTree, const ScalarTreeT* zTree, UT_Interrupt* interrupt) : mDummyTree(new ScalarTreeT) , mXAcc(xTree ? *xTree : *mDummyTree) , mYAcc(yTree ? *yTree : *mDummyTree) , mZAcc(zTree ? *zTree : *mDummyTree) , mInterrupt(interrupt) { /// @todo Consider initializing x, y and z dummy trees with background values /// taken from the vector tree. Currently, though, missing components are always /// initialized to zero. // Note that copies of this op share the original dummy tree. // That's OK, since this op doesn't modify the dummy tree. } MergeActiveOp(const MergeActiveOp& other) : mDummyTree(other.mDummyTree) , mXAcc(other.mXAcc) , mYAcc(other.mYAcc) , mZAcc(other.mZAcc) , mInterrupt(other.mInterrupt) { if (mInterrupt && mInterrupt->opInterrupt()) { throw InterruptException(); } } // Given an active tile or voxel in the vector tree, set its value based on // the values of corresponding tiles or voxels in the scalar trees. void operator()(const typename VectorTreeT::ValueOnIter& it) const { const openvdb::Coord xyz = it.getCoord(); ScalarValueT xval = mXAcc.getValue(xyz), yval = mYAcc.getValue(xyz), zval = mZAcc.getValue(xyz); it.setValue(VectorValueT(xval, yval, zval)); } private: UT_SharedPtr<const ScalarTreeT> mDummyTree; ScalarAccessor mXAcc, mYAcc, mZAcc; UT_Interrupt* mInterrupt; }; // MergeActiveOp // Functor to merge inactive tiles and voxels from up to three // scalar-valued trees into one vector-valued tree template<typename VectorTreeT, typename ScalarTreeT> struct MergeInactiveOp { using VectorValueT = typename VectorTreeT::ValueType; using VectorElemT = typename VectorValueT::value_type; using ScalarValueT = typename ScalarTreeT::ValueType; using VectorAccessor = typename openvdb::tree::ValueAccessor<VectorTreeT>; using ScalarAccessor = typename openvdb::tree::ValueAccessor<const ScalarTreeT>; MergeInactiveOp(const ScalarTreeT* xTree, const ScalarTreeT* yTree, const ScalarTreeT* zTree, VectorTreeT& vecTree, UT_Interrupt* interrupt) : mDummyTree(new ScalarTreeT) , mXAcc(xTree ? *xTree : *mDummyTree) , mYAcc(yTree ? *yTree : *mDummyTree) , mZAcc(zTree ? *zTree : *mDummyTree) , mVecAcc(vecTree) , mInterrupt(interrupt) { /// @todo Consider initializing x, y and z dummy trees with background values /// taken from the vector tree. Currently, though, missing components are always /// initialized to zero. // Note that copies of this op share the original dummy tree. // That's OK, since this op doesn't modify the dummy tree. } MergeInactiveOp(const MergeInactiveOp& other) : mDummyTree(other.mDummyTree) , mXAcc(other.mXAcc) , mYAcc(other.mYAcc) , mZAcc(other.mZAcc) , mVecAcc(other.mVecAcc) , mInterrupt(other.mInterrupt) { if (mInterrupt && mInterrupt->opInterrupt()) { throw InterruptException(); } } // Given an inactive tile or voxel in a scalar tree, activate the corresponding // tile or voxel in the vector tree. void operator()(const typename ScalarTreeT::ValueOffCIter& it) const { // Skip background tiles and voxels, since the output vector tree // is assumed to be initialized with the correct background. if (openvdb::math::isExactlyEqual(it.getValue(), it.getTree()->background())) return; const openvdb::Coord& xyz = it.getCoord(); if (it.isVoxelValue()) { mVecAcc.setActiveState(xyz, true); } else { // Because the vector tree was constructed with the same node configuration // as the scalar trees, tiles can be transferred directly between the two. mVecAcc.addTile(it.getLevel(), xyz, openvdb::zeroVal<VectorValueT>(), /*active=*/true); } } // Given an active tile or voxel in the vector tree, set its value based on // the values of corresponding tiles or voxels in the scalar trees. void operator()(const typename VectorTreeT::ValueOnIter& it) const { const openvdb::Coord xyz = it.getCoord(); ScalarValueT xval = mXAcc.getValue(xyz), yval = mYAcc.getValue(xyz), zval = mZAcc.getValue(xyz); it.setValue(VectorValueT(xval, yval, zval)); } // Deactivate all voxels in a leaf node of the vector tree. void operator()(const typename VectorTreeT::LeafIter& it) const { it->setValuesOff(); } private: UT_SharedPtr<const ScalarTreeT> mDummyTree; ScalarAccessor mXAcc, mYAcc, mZAcc; mutable VectorAccessor mVecAcc; UT_Interrupt* mInterrupt; }; // MergeInactiveOp //////////////////////////////////////// class ScalarGridMerger { public: using WarnFunc = std::function<void (const char*)>; ScalarGridMerger( const hvdb::Grid* x, const hvdb::Grid* y, const hvdb::Grid* z, const std::string& outGridName, bool copyInactiveValues, WarnFunc warn, UT_Interrupt* interrupt = nullptr): mOutGridName(outGridName), mCopyInactiveValues(copyInactiveValues), mWarn(warn), mInterrupt(interrupt) { mInGrid[0] = x; mInGrid[1] = y; mInGrid[2] = z; } const hvdb::GridPtr& getGrid() { return mOutGrid; } template<typename ScalarGridT> void operator()(const ScalarGridT& /*ignored*/) { if (!mInGrid[0] && !mInGrid[1] && !mInGrid[2]) return; using ScalarTreeT = typename ScalarGridT::TreeType; // Retrieve a scalar tree from each input grid. const ScalarTreeT* inTree[3] = { nullptr, nullptr, nullptr }; if (mInGrid[0]) inTree[0] = &UTvdbGridCast<ScalarGridT>(mInGrid[0])->tree(); if (mInGrid[1]) inTree[1] = &UTvdbGridCast<ScalarGridT>(mInGrid[1])->tree(); if (mInGrid[2]) inTree[2] = &UTvdbGridCast<ScalarGridT>(mInGrid[2])->tree(); if (!inTree[0] && !inTree[1] && !inTree[2]) return; // Get the type of the output vector tree. // 1. ScalarT is the input scalar tree's value type. using ScalarT = typename ScalarTreeT::ValueType; // 2. VecT is Vec3<ScalarT>, provided that there is a registered Tree with that // value type. If not, use the closest match (e.g., vec3i when ScalarT = bool). using MappedVecT = VecValueTypeMap<ScalarT>; using VecT = openvdb::math::Vec3<typename MappedVecT::Type>; // 3. VecTreeT is the type of a tree with the same height and node dimensions // as the input scalar tree, but with value type VecT instead of ScalarT. using VecTreeT = typename ScalarTreeT::template ValueConverter<VecT>::Type; using VecGridT = typename openvdb::Grid<VecTreeT>; if (MappedVecT::Changed && mWarn) { std::ostringstream ostr; ostr << "grids of type vec3<" << openvdb::typeNameAsString<ScalarT>() << "> are not supported; using " << openvdb::typeNameAsString<VecT>() << " instead"; if (!mOutGridName.empty()) ostr << " for " << mOutGridName; mWarn(ostr.str().c_str()); } // Determine the background value and the transform. VecT bkgd(0, 0, 0); const openvdb::math::Transform* xform = nullptr; for (int i = 0; i < 3; ++i) { if (inTree[i]) bkgd[i] = inTree[i]->background(); if (mInGrid[i] && !xform) xform = &(mInGrid[i]->transform()); } openvdb::math::Transform::Ptr outXform; if (xform) outXform = xform->copy(); // Construct the output vector grid, with a background value whose // components are the background values of the input scalar grids. typename VecGridT::Ptr vecGrid = VecGridT::create(bkgd); mOutGrid = vecGrid; if (outXform) { mOutGrid->setTransform(outXform); // Check that all three input grids have the same transform. bool xformMismatch = false; for (int i = 0; i < 3 && !xformMismatch; ++i) { if (mInGrid[i]) { const openvdb::math::Transform* inXform = &(mInGrid[i]->transform()); if (*outXform != *inXform) xformMismatch = true; } } if (xformMismatch && mWarn) { mWarn("component grids have different transforms"); } } if (mCopyInactiveValues) { try { MergeInactiveOp<VecTreeT, ScalarTreeT> op(inTree[0], inTree[1], inTree[2], vecGrid->tree(), mInterrupt); // 1. For each non-background inactive value in each scalar tree, // activate the corresponding region in the vector tree. for (int i = 0; i < 3; ++i) { if (mInterrupt && mInterrupt->opInterrupt()) { mOutGrid.reset(); return; } if (!inTree[i]) continue; // Because this is a topology-modifying operation, it must be done serially. openvdb::tools::foreach(inTree[i]->cbeginValueOff(), op, /*threaded=*/false, /*shareOp=*/false); } // 2. For each active value in the vector tree, set v = (x, y, z). openvdb::tools::foreach(vecGrid->beginValueOn(), op, /*threaded=*/true, /*shareOp=*/false); // 3. Deactivate all values in the vector tree, by processing // leaf nodes in parallel (which is safe) and tiles serially. openvdb::tools::foreach(vecGrid->tree().beginLeaf(), op, /*threaded=*/true); typename VecTreeT::ValueOnIter tileIt = vecGrid->beginValueOn(); tileIt.setMaxDepth(tileIt.getLeafDepth() - 1); // skip leaf nodes for (int count = 0; tileIt; ++tileIt, ++count) { if (count % 100 == 0 && mInterrupt && mInterrupt->opInterrupt()) { mOutGrid.reset(); return; } tileIt.setValueOff(); } } catch (InterruptException&) { mOutGrid.reset(); return; } } // Activate voxels (without setting their values) in the output vector grid so that // its tree topology is the union of the topologies of the input scalar grids. // Transferring voxel values from the scalar grids to the vector grid can then // be done safely from multiple threads. for (int i = 0; i < 3; ++i) { if (mInterrupt && mInterrupt->opInterrupt()) { mOutGrid.reset(); return; } if (!inTree[i]) continue; vecGrid->tree().topologyUnion(*inTree[i]); } // Set a new value for each active tile or voxel in the output vector grid. try { MergeActiveOp<VecTreeT, ScalarTreeT> op(inTree[0], inTree[1], inTree[2], mInterrupt); openvdb::tools::foreach(vecGrid->beginValueOn(), op, /*threaded=*/true, /*shareOp=*/false); } catch (InterruptException&) { mOutGrid.reset(); return; } openvdb::tools::prune(vecGrid->tree()); } private: const hvdb::Grid* mInGrid[3]; hvdb::GridPtr mOutGrid; std::string mOutGridName; bool mCopyInactiveValues; WarnFunc mWarn; UT_Interrupt* mInterrupt; }; // class ScalarGridMerger } // unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Vector_Merge::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); const bool copyInactiveValues = evalInt("copyinactive", 0, time); const bool removeSourceGrids = evalInt("remove_sources", 0, time); #ifndef SESI_OPENVDB const bool verbose = evalInt("verbose", 0, time); #else const bool verbose = false; #endif openvdb::VecType vecType = openvdb::VEC_INVARIANT; { const int vtype = static_cast<int>(evalInt("vectype", 0, time)); if (vtype >= 0 && vtype < openvdb::NUM_VEC_TYPES) { vecType = static_cast<openvdb::VecType>(vtype); } } // Get the name (or naming pattern) for merged grids. UT_String mergeName; evalString(mergeName, "merge_name", 0, time); const bool useXName = evalInt("usexname", 0, time); #if HAVE_MERGE_GROUP // Get the group name for merged grids. UT_String mergeGroupStr; if (evalInt("enable_grouping", 0, time)) { evalString(mergeGroupStr, "group", 0, time); } #endif UT_AutoInterrupt progress("Merging VDB grids"); using PrimVDBSet = std::set<GEO_PrimVDB*>; PrimVDBSet primsToRemove; // Get the groups of x, y and z scalar grids to merge. const GA_PrimitiveGroup *xGroup = nullptr, *yGroup = nullptr, *zGroup = nullptr; { UT_String groupStr; evalString(groupStr, "xgroup", 0, time); xGroup = matchGroup(*gdp, groupStr.toStdString()); evalString(groupStr, "ygroup", 0, time); yGroup = matchGroup(*gdp, groupStr.toStdString()); evalString(groupStr, "zgroup", 0, time); zGroup = matchGroup(*gdp, groupStr.toStdString()); } using PrimVDBVec = std::vector<GEO_PrimVDB*>; PrimVDBVec primsToGroup; // Iterate over VDB primitives in the selected groups. hvdb::VdbPrimIterator xIt(xGroup ? gdp : nullptr, xGroup), yIt(yGroup ? gdp : nullptr, yGroup), zIt(zGroup ? gdp : nullptr, zGroup); for (int i = 1; xIt || yIt || zIt; ++xIt, ++yIt, ++zIt, ++i) { if (progress.wasInterrupted()) return error(); GU_PrimVDB *xVdb = *xIt, *yVdb = *yIt, *zVdb = *zIt, *nonNullVdb = nullptr; // Extract grids from the VDB primitives and find one that is non-null. // Process the primitives in ZYX order to ensure the X grid is preferred. /// @todo nonNullGrid's ValueType determines the ValueType of the /// output grid's vectors, so ideally nonNullGrid should be the /// grid with the highest-precision ValueType. const hvdb::Grid *xGrid = nullptr, *yGrid = nullptr, *zGrid = nullptr, *nonNullGrid = nullptr; if (zVdb) { zGrid = nonNullGrid = &zVdb->getGrid(); nonNullVdb = zVdb; } if (yVdb) { yGrid = nonNullGrid = &yVdb->getGrid(); nonNullVdb = yVdb; } if (xVdb) { xGrid = nonNullGrid = &xVdb->getGrid(); nonNullVdb = xVdb; } if (!nonNullGrid) continue; std::string outGridName; if (mergeName.isstring()) { UT_String s; s.itoa(i); outGridName = hboost::regex_replace( mergeName.toStdString(), hboost::regex("#+"), s.toStdString()); } if (useXName && nonNullVdb) { UT_String gridName(nonNullVdb->getGridName()); UT_String basename = gridName.pathUpToExtension(); if (basename.isstring()) { outGridName = basename.toStdString(); } } // Merge the input grids into an output grid. // This does not support a partial set so we quit early in that case. ScalarGridMerger op(xGrid, yGrid, zGrid, outGridName, copyInactiveValues, [this](const char* msg) { addWarning(SOP_MESSAGE, msg); }); nonNullGrid->apply<hvdb::NumericGridTypes>(op); if (hvdb::GridPtr outGrid = op.getGrid()) { outGrid->setName(outGridName); outGrid->setVectorType(vecType); if (verbose) { std::ostringstream ostr; ostr << "Merged (" << (xVdb ? xVdb->getGridName() : "0") << ", " << (yVdb ? yVdb->getGridName() : "0") << ", " << (zVdb ? zVdb->getGridName() : "0") << ")"; if (!outGridName.empty()) ostr << " into " << outGridName; addMessage(SOP_MESSAGE, ostr.str().c_str()); } if (GEO_PrimVDB* outVdb = GU_PrimVDB::buildFromGrid(*gdp, outGrid, nonNullVdb, outGridName.c_str())) { primsToGroup.push_back(outVdb); } // Flag the input grids for removal. primsToRemove.insert(xVdb); primsToRemove.insert(yVdb); primsToRemove.insert(zVdb); } } #if HAVE_MERGE_GROUP // Optionally, add the newly-created vector grids to a group. if (!primsToGroup.empty() && mergeGroupStr.isstring()) { GA_PrimitiveGroup* mergeGroup = gdp->findPrimitiveGroup(mergeGroupStr.buffer()); if (mergeGroup == nullptr) { mergeGroup = gdp->newPrimitiveGroup(mergeGroupStr.buffer()); } if (mergeGroup != nullptr) { for (PrimVDBVec::iterator i = primsToGroup.begin(), e = primsToGroup.end(); i != e; ++i) { mergeGroup->add(*i); } } } #endif if (removeSourceGrids) { // Remove scalar grids that were merged. primsToRemove.erase(nullptr); for (PrimVDBSet::iterator i = primsToRemove.begin(), e = primsToRemove.end(); i != e; ++i) { gdp->destroyPrimitive(*(*i), /*andPoints=*/true); } } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
28,555
C++
36.772487
118
0.608195
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Transform.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Transform.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/tools/VectorTransformer.h> // for transformVectors() #include <UT/UT_Interrupt.h> #include <hboost/math/constants/constants.hpp> #include <set> #include <sstream> #include <stdexcept> #include <string> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Transform: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Transform(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Transform() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDBs to be transformed.") .setDocumentation( "A subset of the input VDBs to be transformed" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "xOrd", "Transform Order") .setDefault("tsr") ///< @todo Houdini default is "srt" .setChoiceList(&PRMtrsMenu) .setTypeExtended(PRM_TYPE_JOIN_PAIR) .setTooltip("The order in which transformations and rotations occur")); parms.add(hutil::ParmFactory( PRM_STRING | PRM_Type(PRM_Type::PRM_INTERFACE_LABEL_NONE), "rOrd", "") .setDefault("zyx") ///< @todo Houdini default is "xyz" .setChoiceList(&PRMxyzMenu)); parms.add(hutil::ParmFactory(PRM_XYZ_J, "t", "Translate") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setDocumentation("The amount of translation along the _x_, _y_ and _z_ axes")); parms.add(hutil::ParmFactory(PRM_XYZ_J, "r", "Rotate") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setDocumentation("The amount of rotation about the _x_, _y_ and _z_ axes")); parms.add(hutil::ParmFactory(PRM_XYZ_J, "s", "Scale") .setVectorSize(3) .setDefault(PRMoneDefaults) .setDocumentation("Nonuniform scaling along the _x_, _y_ and _z_ axes")); parms.add(hutil::ParmFactory(PRM_XYZ_J, "p", "Pivot") .setVectorSize(3) .setDefault(PRMzeroDefaults) .setDocumentation("The pivot point for scaling and rotation")); parms.add(hutil::ParmFactory(PRM_FLT_J, "uniformScale", "Uniform Scale") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_FREE, 10) .setDocumentation("Uniform scaling along all three axes")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "invert", "Invert Transformation") .setDefault(PRMzeroDefaults) .setDocumentation("Perform the inverse transformation.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "xformvectors", "Transform Vectors") .setDefault(PRMzeroDefaults) .setTooltip( "Apply the transform to the voxel values of vector-valued VDBs,\n" "in accordance with those VDBs' Vector Type attributes.\n") .setDocumentation( "Apply the transform to the voxel values of vector-valued VDBs," " in accordance with those VDBs' __Vector Type__ attributes (as set," " for example, with the [OpenVDB Create|Node:sop/DW_OpenVDBCreate] node).")); hvdb::OpenVDBOpFactory("VDB Transform", SOP_OpenVDB_Transform::factory, parms, *table) .setNativeName("") .addInput("VDBs to transform") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Transform::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Modify the transforms of VDB volumes.\"\"\"\n\ \n\ @overview\n\ \n\ This node modifies the transform associated with each input VDB volume.\n\ It is usually preferable to use Houdini's native [Transform|Node:sop/xform] node,\n\ except if you want to also transform the _values_ of a vector-valued VDB.\n\ \n\ @related\n\ - [Node:sop/xform]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_Transform::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Transform(net, name, op); } SOP_OpenVDB_Transform::SOP_OpenVDB_Transform(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } namespace { // Functor for use with GEOvdbApply() to apply a transform // to the voxel values of vector-valued grids struct VecXformOp { openvdb::Mat4d mat; VecXformOp(const openvdb::Mat4d& _mat): mat(_mat) {} template<typename GridT> void operator()(GridT& grid) const { openvdb::tools::transformVectors(grid, mat); } }; } // unnamed namespace OP_ERROR SOP_OpenVDB_Transform::Cache::cookVDBSop(OP_Context& context) { try { using MapBase = openvdb::math::MapBase; using AffineMap = openvdb::math::AffineMap; using NonlinearFrustumMap = openvdb::math::NonlinearFrustumMap; using Transform = openvdb::math::Transform; const fpreal time = context.getTime(); // Get UI parameters openvdb::Vec3R t(evalVec3R("t", time)), r(evalVec3R("r", time)), s(evalVec3R("s", time)), p(evalVec3R("p", time)); s *= evalFloat("uniformScale", 0, time); const auto xformOrder = evalStdString("xOrd", time); const auto rotOrder = evalStdString("rOrd", time); const bool flagInverse = evalInt("invert", 0, time); const bool xformVec = evalInt("xformvectors", 0, time); const auto isValidOrder = [](const std::string& expected, const std::string& actual) { if (actual.size() != expected.size()) return false; using CharSet = std::set<std::string::value_type>; return (CharSet(actual.begin(), actual.end()) == CharSet(expected.begin(), expected.end())); }; if (!isValidOrder("rst", xformOrder)) { std::ostringstream mesg; mesg << "Invalid transform order \"" << xformOrder << "\"; expected \"tsr\", \"rst\", etc."; throw std::runtime_error(mesg.str()); } if (!isValidOrder("xyz", rotOrder)) { std::ostringstream mesg; mesg << "Invalid rotation order \"" << rotOrder << "\"; expected \"xyz\", \"zyx\", etc."; throw std::runtime_error(mesg.str()); } // Get the group of grids to be transformed. const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); UT_AutoInterrupt progress("Transform"); // Build up the transform matrix from the UI parameters const double deg2rad = hboost::math::constants::pi<double>() / 180.0; openvdb::Mat4R mat(openvdb::Mat4R::identity()); const auto rotate = [&]() { for (auto axis = rotOrder.rbegin(); axis != rotOrder.rend(); ++axis) { switch (*axis) { case 'x': mat.preRotate(openvdb::math::X_AXIS, deg2rad*r[0]); break; case 'y': mat.preRotate(openvdb::math::Y_AXIS, deg2rad*r[1]); break; case 'z': mat.preRotate(openvdb::math::Z_AXIS, deg2rad*r[2]); break; } } }; if (xformOrder == "trs") { mat.preTranslate(p); mat.preScale(s); rotate(); mat.preTranslate(-p); mat.preTranslate(t); } else if (xformOrder == "tsr") { mat.preTranslate(p); rotate(); mat.preScale(s); mat.preTranslate(-p); mat.preTranslate(t); } else if (xformOrder == "rts") { mat.preTranslate(p); mat.preScale(s); mat.preTranslate(-p); mat.preTranslate(t); mat.preTranslate(p); rotate(); mat.preTranslate(-p); } else if (xformOrder == "rst") { mat.preTranslate(t); mat.preTranslate(p); mat.preScale(s); rotate(); mat.preTranslate(-p); } else if (xformOrder == "str") { mat.preTranslate(p); rotate(); mat.preTranslate(-p); mat.preTranslate(t); mat.preTranslate(p); mat.preScale(s); mat.preTranslate(-p); } else /*if (xformOrder == "srt")*/ { mat.preTranslate(t); mat.preTranslate(p); rotate(); mat.preScale(s); mat.preTranslate(-p); } if (flagInverse) mat = mat.inverse(); const VecXformOp xformOp(mat); // Construct an affine map. AffineMap map(mat); // For each VDB primitive in the given group... for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (progress.wasInterrupted()) throw std::runtime_error("Interrupted"); GU_PrimVDB* vdb = *it; // No need to make the grid unique at this point, since we might not need // to modify its voxel data. hvdb::Grid& grid = vdb->getGrid(); const auto& transform = grid.constTransform(); // Merge the transform's current affine representation with the new affine map. AffineMap::Ptr compound( new AffineMap(*transform.baseMap()->getAffineMap(), map)); // Simplify the affine compound map auto affineMap = openvdb::math::simplify(compound); Transform::Ptr newTransform; if (transform.isLinear()) { newTransform.reset(new Transform(affineMap)); } else { auto frustumMap = transform.constMap<NonlinearFrustumMap>(); if (!frustumMap) { throw std::runtime_error{"Unsupported non-linear map - " + transform.mapType()}; } // Create a new NonlinearFrustumMap that replaces the affine map with the transformed one. MapBase::Ptr newFrustumMap(new NonlinearFrustumMap( frustumMap->getBBox(), frustumMap->getTaper(), frustumMap->getDepth(), affineMap)); newTransform.reset(new Transform(newFrustumMap)); } // Replace the transform. grid.setTransform(newTransform); // Update the primitive's vertex position. /// @todo Need a simpler way to do this. hvdb::GridPtr copyOfGrid = grid.copyGrid(); copyOfGrid->setTransform(grid.constTransform().copy()); vdb->setGrid(*copyOfGrid); if (xformVec && vdb->getConstGrid().isInWorldSpace() && vdb->getConstGrid().getVectorType() != openvdb::VEC_INVARIANT) { // If (and only if) the grid is vector-valued, deep copy it, // then apply the transform to each voxel's value. hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*vdb, xformOp); } } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
11,597
C++
34.907121
106
0.596189
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SHOP_OpenVDB_Points.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file SHOP_OpenVDB_Points.cc /// /// @authors Dan Bailey, Richard Kwok /// /// @brief The Delayed Load Procedural SHOP for OpenVDB Points. #include <UT/UT_DSOVersion.h> #include <UT/UT_Version.h> #include <UT/UT_Ramp.h> #include <OP/OP_OperatorTable.h> #include <SHOP/SHOP_Node.h> #include <SHOP/SHOP_Operator.h> #include <PRM/PRM_Include.h> #include <houdini_utils/ParmFactory.h> #include <sstream> namespace hutil = houdini_utils; class SHOP_OpenVDB_Points : public SHOP_Node { public: static const char* nodeName() { return "openvdb_points"; } SHOP_OpenVDB_Points(OP_Network *parent, const char *name, OP_Operator *entry); ~SHOP_OpenVDB_Points() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); bool buildShaderString(UT_String &result, fpreal now, const UT_Options *options, OP_Node *obj=0, OP_Node *sop=0, SHOP_TYPE interpretType = SHOP_INVALID) override; protected: OP_ERROR cookMe(OP_Context&) override; bool updateParmsFlags() override; }; // class SHOP_OpenVDB_Points //////////////////////////////////////// OP_Node* SHOP_OpenVDB_Points::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SHOP_OpenVDB_Points(net, name, op); } SHOP_OpenVDB_Points::SHOP_OpenVDB_Points(OP_Network *parent, const char *name, OP_Operator *entry) : SHOP_Node(parent, name, entry, SHOP_GEOMETRY) { } bool SHOP_OpenVDB_Points::buildShaderString(UT_String &result, fpreal now, const UT_Options*, OP_Node*, OP_Node*, SHOP_TYPE) { UT_String fileStr = ""; evalString(fileStr, "file", 0, now); UT_String groupMaskStr = ""; evalString(groupMaskStr, "groupmask", 0, now); UT_String attrMaskStr = ""; evalString(attrMaskStr, "attrmask", 0, now); std::stringstream ss; ss << SHOP_OpenVDB_Points::nodeName(); ss << " file \"" << fileStr.toStdString() << "\""; ss << " streamdata " << evalInt("streamdata", 0, now); ss << " groupmask \"" << groupMaskStr.toStdString() << "\""; ss << " attrmask \"" << attrMaskStr.toStdString() << "\""; ss << " speedtocolor " << evalInt("speedtocolor", 0, now); ss << " maxspeed " << evalFloat("maxspeed", 0, now); // write the speed/color ramp into the ifd UT_Ramp ramp; updateRampFromMultiParm(now, getParm("function"), ramp); ss << " ramp \""; for(int n = 0, N = ramp.getNodeCount(); n < N; n++){ const UT_ColorNode* rampNode = ramp.getNode(n); ss << rampNode->t << " "; ss << rampNode->rgba.r << " " << rampNode->rgba.g << " " << rampNode->rgba.b << " "; ss << static_cast<int>(rampNode->basis) << " "; } ss << "\""; result = ss.str(); return true; } OP_ERROR SHOP_OpenVDB_Points::cookMe(OP_Context& context) { return SHOP_Node::cookMe(context); } bool SHOP_OpenVDB_Points::updateParmsFlags() { bool changed = false; const bool speedToColor = evalInt("speedtocolor", 0, 0); changed |= enableParm("sep1", speedToColor); changed |= setVisibleState("sep1", speedToColor); changed |= enableParm("maxspeed", speedToColor); changed |= setVisibleState("maxspeed", speedToColor); changed |= enableParm("function", speedToColor); changed |= setVisibleState("function", speedToColor); return changed; } //////////////////////////////////////// // Build UI and register this operator. void newShopOperator(OP_OperatorTable *table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_FILE, "file", "File") .setDefault("./filename.vdb") .setHelpText("File path to the VDB to load.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "streamdata", "Stream Data for Maximum Memory Efficiency") .setDefault(PRMoneDefaults) .setHelpText( "Stream the data from disk to keep the memory footprint as small as possible." " This will make the initial conversion marginally slower because the data" " will be loaded twice, once for pre-computation to evaluate the bounding box" " and once for the actual conversion.")); parms.add(hutil::ParmFactory(PRM_STRING, "groupmask", "Group Mask") .setDefault("") .setHelpText("Specify VDB Points Groups to use. (Default is all groups)")); parms.add(hutil::ParmFactory(PRM_STRING, "attrmask", "Attribute Mask") .setDefault("") .setHelpText("Specify VDB Points Attributes to use. (Default is all attributes)")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "speedtocolor", "Map Speed To Color") .setDefault(PRMzeroDefaults) .setHelpText( "Replaces the 'Cd' point attribute with colors mapped from the" " 'v' point attribute using a ramp.")); parms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep1", "")); parms.add(hutil::ParmFactory(PRM_FLT_J, "maxspeed", "Max Speed") .setDefault(1.0f) .setHelpText("Reference for 1.0 on the color gradient.")); parms.add(hutil::ParmFactory(PRM_MULTITYPE_RAMP_RGB, "function", "Speed to Color Function") .setDefault(PRMtwoDefaults) .setHelpText("Function mapping speeds between 0 and 1 to a color.")); ////////// // Register this operator. SHOP_Operator* shop = new SHOP_Operator(SHOP_OpenVDB_Points::nodeName(), "OpenVDB Points", SHOP_OpenVDB_Points::factory, parms.get(), /*child_table_name=*/nullptr, /*min_sources=*/0, /*max_sources=*/0, SHOP_Node::myVariableList, OP_FLAG_GENERATOR, SHOP_AUTOADD_NONE); shop->setIconName("SHOP_geometry"); table->addOperator(shop); ////////// // Set the SHOP-specific data SHOP_OperatorInfo* info = UTverify_cast<SHOP_OperatorInfo*>(shop->getOpSpecificData()); info->setShaderType(SHOP_GEOMETRY); // Set the rendermask to "*" and try to support *all* renderers. info->setRenderMask("*"); }
6,057
C++
30.226804
98
0.638105
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/PointUtils.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file PointUtils.cc /// @authors Dan Bailey, Nick Avramoussis, Richard Kwok #include "PointUtils.h" #include "AttributeTransferUtil.h" #include "Utils.h" #include <openvdb/openvdb.h> #include <openvdb/points/AttributeArrayString.h> #include <openvdb/points/PointAttribute.h> #include <openvdb/points/PointConversion.h> #include <openvdb/points/PointDataGrid.h> #include <GA/GA_AIFTuple.h> #include <GA/GA_ElementGroup.h> #include <GA/GA_Iterator.h> #include <CH/CH_Manager.h> // for CHgetEvalTime #include <PRM/PRM_SpareData.h> #include <SOP/SOP_Node.h> #include <UT/UT_UniquePtr.h> #include <algorithm> #include <map> #include <memory> #include <sstream> #include <stdexcept> #include <string> #include <type_traits> #include <vector> using namespace openvdb; using namespace openvdb::points; namespace hvdb = openvdb_houdini; namespace { inline GA_Storage gaStorageFromAttrString(const openvdb::Name& type) { if (type == "string") return GA_STORE_STRING; else if (type == "bool") return GA_STORE_BOOL; else if (type == "int8") return GA_STORE_INT8; else if (type == "int16") return GA_STORE_INT16; else if (type == "int32") return GA_STORE_INT32; else if (type == "int64") return GA_STORE_INT64; else if (type == "float") return GA_STORE_REAL32; else if (type == "double") return GA_STORE_REAL64; else if (type == "vec3i") return GA_STORE_INT32; else if (type == "vec3s") return GA_STORE_REAL32; else if (type == "vec3d") return GA_STORE_REAL64; else if (type == "quats") return GA_STORE_REAL32; else if (type == "quatd") return GA_STORE_REAL64; else if (type == "mat3s") return GA_STORE_REAL32; else if (type == "mat3d") return GA_STORE_REAL64; else if (type == "mat4s") return GA_STORE_REAL32; else if (type == "mat4d") return GA_STORE_REAL64; return GA_STORE_INVALID; } // @{ // Houdini GA Handle Traits template<typename T> struct GAHandleTraits { using RW = GA_RWHandleF; using RO = GA_ROHandleF; }; template<> struct GAHandleTraits<bool> { using RW = GA_RWHandleI; using RO = GA_ROHandleI; }; template<> struct GAHandleTraits<int8_t> { using RW = GA_RWHandleI; using RO = GA_ROHandleI; }; template<> struct GAHandleTraits<int16_t> { using RW = GA_RWHandleI; using RO = GA_ROHandleI; }; template<> struct GAHandleTraits<int32_t> { using RW = GA_RWHandleI; using RO = GA_ROHandleI; }; template<> struct GAHandleTraits<int64_t> { using RW = GA_RWHandleID; using RO = GA_ROHandleID; }; template<> struct GAHandleTraits<half> { using RW = GA_RWHandleH; using RO = GA_ROHandleH; }; template<> struct GAHandleTraits<float> { using RW = GA_RWHandleF; using RO = GA_ROHandleF; }; template<> struct GAHandleTraits<double> { using RW = GA_RWHandleD; using RO = GA_ROHandleD; }; template<> struct GAHandleTraits<std::string> { using RW = GA_RWHandleS; using RO = GA_ROHandleS; }; template<> struct GAHandleTraits<openvdb::math::Vec3<int>> { using RW=GA_RWHandleV3; using RO=GA_ROHandleV3; }; template<> struct GAHandleTraits<openvdb::Vec3s> { using RW = GA_RWHandleV3; using RO = GA_ROHandleV3; }; template<> struct GAHandleTraits<openvdb::Vec3d> { using RW = GA_RWHandleV3D; using RO = GA_ROHandleV3D; }; template<> struct GAHandleTraits<openvdb::math::Mat3s> { using RW = GA_RWHandleM3; using RO = GA_ROHandleM3; }; template<> struct GAHandleTraits<openvdb::math::Mat3d> { using RW = GA_RWHandleM3D; using RO = GA_ROHandleM3D; }; template<> struct GAHandleTraits<openvdb::Mat4s> { using RW = GA_RWHandleM4; using RO = GA_ROHandleM4; }; template<> struct GAHandleTraits<openvdb::Mat4d> { using RW = GA_RWHandleM4D; using RO = GA_ROHandleM4D; }; template<> struct GAHandleTraits<openvdb::math::Quats> { using RW = GA_RWHandleQ; using RO = GA_ROHandleQ; }; template<> struct GAHandleTraits<openvdb::math::Quatd> { using RW = GA_RWHandleQD; using RO = GA_ROHandleQD; }; // @} template<typename HandleType, typename ValueType> inline ValueType readAttributeValue(const HandleType& handle, const GA_Offset offset, const openvdb::Index component = 0) { return ValueType(handle.get(offset, component)); } template<> inline openvdb::math::Vec3<float> readAttributeValue(const GA_ROHandleV3& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Vec3<float> dstValue; const UT_Vector3F value(handle.get(offset, component)); dstValue[0] = value[0]; dstValue[1] = value[1]; dstValue[2] = value[2]; return dstValue; } template<> inline openvdb::math::Vec3<int> readAttributeValue(const GA_ROHandleV3& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Vec3<int> dstValue; const UT_Vector3 value(handle.get(offset, component)); dstValue[0] = static_cast<int>(value[0]); dstValue[1] = static_cast<int>(value[1]); dstValue[2] = static_cast<int>(value[2]); return dstValue; } template<> inline openvdb::math::Vec3<double> readAttributeValue(const GA_ROHandleV3D& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Vec3<double> dstValue; const UT_Vector3D value(handle.get(offset, component)); dstValue[0] = value[0]; dstValue[1] = value[1]; dstValue[2] = value[2]; return dstValue; } template<> inline openvdb::math::Quat<float> readAttributeValue(const GA_ROHandleQ& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Quat<float> dstValue; const UT_QuaternionF value(handle.get(offset, component)); dstValue[0] = value[0]; dstValue[1] = value[1]; dstValue[2] = value[2]; dstValue[3] = value[3]; return dstValue; } template<> inline openvdb::math::Quat<double> readAttributeValue(const GA_ROHandleQD& handle, const GA_Offset offset, const openvdb::Index component) { openvdb::math::Quat<double> dstValue; const UT_QuaternionD value(handle.get(offset, component)); dstValue[0] = value[0]; dstValue[1] = value[1]; dstValue[2] = value[2]; dstValue[3] = value[3]; return dstValue; } template<> inline openvdb::math::Mat3<float> readAttributeValue(const GA_ROHandleM3& handle, const GA_Offset offset, const openvdb::Index component) { // read transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const UT_Matrix3F value(handle.get(offset, component)); openvdb::math::Mat3<float> dstValue(value.data()); return dstValue.transpose(); } template<> inline openvdb::math::Mat3<double> readAttributeValue(const GA_ROHandleM3D& handle, const GA_Offset offset, const openvdb::Index component) { // read transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const UT_Matrix3D value(handle.get(offset, component)); openvdb::math::Mat3<double> dstValue(value.data()); return dstValue.transpose(); } template<> inline openvdb::math::Mat4<float> readAttributeValue(const GA_ROHandleM4& handle, const GA_Offset offset, const openvdb::Index component) { // read transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const UT_Matrix4F value(handle.get(offset, component)); openvdb::math::Mat4<float> dstValue(value.data()); return dstValue.transpose(); } template<> inline openvdb::math::Mat4<double> readAttributeValue(const GA_ROHandleM4D& handle, const GA_Offset offset, const openvdb::Index component) { // read transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const UT_Matrix4D value(handle.get(offset, component)); openvdb::math::Mat4<double> dstValue(value.data()); return dstValue.transpose(); } template<> inline openvdb::Name readAttributeValue(const GA_ROHandleS& handle, const GA_Offset offset, const openvdb::Index component) { return openvdb::Name(UT_String(handle.get(offset, component)).toStdString()); } template<typename HandleType, typename ValueType> inline void writeAttributeValue(const HandleType& handle, const GA_Offset offset, const openvdb::Index component, const ValueType& value) { handle.set(offset, component, static_cast<typename HandleType::BASETYPE>(value)); } template<> inline void writeAttributeValue(const GA_RWHandleV3& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Vec3<int>& value) { handle.set(offset, component, UT_Vector3F( static_cast<float>(value.x()), static_cast<float>(value.y()), static_cast<float>(value.z()))); } template<> inline void writeAttributeValue(const GA_RWHandleV3& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Vec3<float>& value) { handle.set(offset, component, UT_Vector3(value.x(), value.y(), value.z())); } template<> inline void writeAttributeValue(const GA_RWHandleV3D& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Vec3<double>& value) { handle.set(offset, component, UT_Vector3D(value.x(), value.y(), value.z())); } template<> inline void writeAttributeValue(const GA_RWHandleQ& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Quat<float>& value) { handle.set(offset, component, UT_QuaternionF(value.x(), value.y(), value.z(), value.w())); } template<> inline void writeAttributeValue(const GA_RWHandleQD& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Quat<double>& value) { handle.set(offset, component, UT_QuaternionD(value.x(), value.y(), value.z(), value.w())); } template<> inline void writeAttributeValue(const GA_RWHandleM3& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Mat3<float>& value) { // write transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const float* data(value.asPointer()); handle.set(offset, component, UT_Matrix3F(data[0], data[3], data[6], data[1], data[4], data[7], data[2], data[5], data[8])); } template<> inline void writeAttributeValue(const GA_RWHandleM3D& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Mat3<double>& value) { // write transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const double* data(value.asPointer()); handle.set(offset, component, UT_Matrix3D(data[0], data[3], data[6], data[1], data[4], data[7], data[2], data[5], data[8])); } template<> inline void writeAttributeValue(const GA_RWHandleM4& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Mat4<float>& value) { // write transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const float* data(value.asPointer()); handle.set(offset, component, UT_Matrix4F(data[0], data[4], data[8], data[12], data[1], data[5], data[9], data[13], data[2], data[6], data[10], data[14], data[3], data[7], data[11], data[15])); } template<> inline void writeAttributeValue(const GA_RWHandleM4D& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::math::Mat4<double>& value) { // write transposed matrix because Houdini uses column-major order so as // to be compatible with OpenGL const double* data(value.asPointer()); handle.set(offset, component, UT_Matrix4D(data[0], data[4], data[8], data[12], data[1], data[5], data[9], data[13], data[2], data[6], data[10], data[14], data[3], data[7], data[11], data[15])); } template<> inline void writeAttributeValue(const GA_RWHandleS& handle, const GA_Offset offset, const openvdb::Index component, const openvdb::Name& value) { handle.set(offset, component, value.c_str()); } /// @brief Writeable wrapper class around Houdini point attributes which hold /// a reference to the GA Attribute to write template <typename T> struct HoudiniWriteAttribute { using ValueType = T; struct Handle { explicit Handle(HoudiniWriteAttribute<T>& attribute) : mHandle(&attribute.mAttribute) { } template <typename ValueType> void set(openvdb::Index offset, openvdb::Index stride, const ValueType& value) { writeAttributeValue(mHandle, GA_Offset(offset), stride, T(value)); } private: typename GAHandleTraits<T>::RW mHandle; }; // struct Handle explicit HoudiniWriteAttribute(GA_Attribute& attribute) : mAttribute(attribute) { } void expand() { mAttribute.hardenAllPages(); } void compact() { mAttribute.tryCompressAllPages(); } private: GA_Attribute& mAttribute; }; // struct HoudiniWriteAttribute /// @brief Readable wrapper class around Houdini point attributes which hold /// a reference to the GA Attribute to access and optionally a list of offsets template <typename T> struct HoudiniReadAttribute { using value_type = T; using PosType = T; using ReadHandleType = typename GAHandleTraits<T>::RO; explicit HoudiniReadAttribute(const GA_Attribute& attribute, hvdb::OffsetListPtr offsets = hvdb::OffsetListPtr()) : mHandle(&attribute) , mAttribute(attribute) , mOffsets(offsets) { } static void get(const GA_Attribute& attribute, T& value, const GA_Offset offset, const openvdb::Index component) { const ReadHandleType handle(&attribute); value = readAttributeValue<ReadHandleType, T>(handle, offset, component); } // Return the value of the nth point in the array (scalar type only) void get(T& value, const size_t n, const openvdb::Index component = 0) const { value = readAttributeValue<ReadHandleType, T>(mHandle, getOffset(n), component); } // Only provided to match the required interface for the PointPartitioner void getPos(size_t n, T& xyz) const { return this->get(xyz, n); } size_t size() const { return mOffsets ? mOffsets->size() : size_t(mAttribute.getIndexMap().indexSize()); } private: GA_Offset getOffset(size_t n) const { return mOffsets ? (*mOffsets)[n] : mAttribute.getIndexMap().offsetFromIndex(GA_Index(n)); } const ReadHandleType mHandle; const GA_Attribute& mAttribute; hvdb::OffsetListPtr mOffsets; }; // HoudiniReadAttribute struct HoudiniGroup { explicit HoudiniGroup(GA_PointGroup& group, openvdb::Index64 startOffset, openvdb::Index64 total) : mGroup(group) , mStartOffset(startOffset) , mTotal(total) { mBackingArray.resize(total, 0); } HoudiniGroup(const HoudiniGroup &) = delete; HoudiniGroup& operator=(const HoudiniGroup &) = delete; void setOffsetOn(openvdb::Index index) { mBackingArray[index - mStartOffset] = 1; } void finalize() { for (openvdb::Index64 i = 0, n = mTotal; i < n; i++) { if (mBackingArray[i]) { mGroup.addOffset(GA_Offset(i + mStartOffset)); } } } private: GA_PointGroup& mGroup; openvdb::Index64 mStartOffset; openvdb::Index64 mTotal; // This is not a bit field as we need to allow threadsafe updates: std::vector<unsigned char> mBackingArray; }; // HoudiniGroup template <typename ValueType, typename CodecType = NullCodec> inline void convertAttributeFromHoudini(PointDataTree& tree, const tools::PointIndexTree& indexTree, const Name& name, const GA_Attribute* const attribute, const GA_Defaults& defaults, const Index stride = 1) { static_assert(!std::is_base_of<AttributeArray, ValueType>::value, "ValueType must not be derived from AttributeArray"); static_assert(!std::is_same<ValueType, Name>::value, "ValueType must not be Name/std::string"); using HoudiniAttribute = HoudiniReadAttribute<ValueType>; ValueType value = hvdb::evalAttrDefault<ValueType>(defaults, 0); // empty metadata if default is zero if (!math::isZero<ValueType>(value)) { TypedMetadata<ValueType> defaultValue(value); appendAttribute<ValueType, CodecType>(tree, name, zeroVal<ValueType>(), stride, /*constantstride=*/true, &defaultValue); } else { appendAttribute<ValueType, CodecType>(tree, name, zeroVal<ValueType>(), stride, /*constantstride=*/true); } HoudiniAttribute houdiniAttribute(*attribute); populateAttribute<PointDataTree, tools::PointIndexTree, HoudiniAttribute>( tree, indexTree, name, houdiniAttribute, stride); } inline void convertAttributeFromHoudini(PointDataTree& tree, const tools::PointIndexTree& indexTree, const Name& name, const GA_Attribute* const attribute, const int compression = 0) { using namespace openvdb::math; using HoudiniStringAttribute = HoudiniReadAttribute<Name>; if (!attribute) { std::stringstream ss; ss << "Invalid attribute - " << attribute->getName(); throw std::runtime_error(ss.str()); } const GA_Storage storage(hvdb::attributeStorageType(attribute)); if (storage == GA_STORE_INVALID) { std::stringstream ss; ss << "Invalid attribute type - " << attribute->getName(); throw std::runtime_error(ss.str()); } const int16_t width(hvdb::attributeTupleSize(attribute)); UT_ASSERT(width > 0); // explicitly handle string attributes if (storage == GA_STORE_STRING) { appendAttribute<Name>(tree, name); HoudiniStringAttribute houdiniAttribute(*attribute); populateAttribute<PointDataTree, tools::PointIndexTree, HoudiniStringAttribute>( tree, indexTree, name, houdiniAttribute); return; } const GA_AIFTuple* tupleAIF = attribute->getAIFTuple(); if (!tupleAIF) { std::stringstream ss; ss << "Invalid attribute type - " << attribute->getName(); throw std::runtime_error(ss.str()); } GA_Defaults defaults = tupleAIF->getDefaults(attribute); const GA_TypeInfo typeInfo(attribute->getOptions().typeInfo()); const bool isVector = width == 3 && (typeInfo == GA_TYPE_VECTOR || typeInfo == GA_TYPE_NORMAL || typeInfo == GA_TYPE_COLOR); const bool isQuaternion = width == 4 && (typeInfo == GA_TYPE_QUATERNION); const bool isMatrix3 = width == 9 && (typeInfo == GA_TYPE_TRANSFORM); const bool isMatrix4 = width == 16 && (typeInfo == GA_TYPE_TRANSFORM); if (isVector) { if (storage == GA_STORE_INT32) { convertAttributeFromHoudini<Vec3<int>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL16) { // implicitly convert 16-bit float into truncated 32-bit float convertAttributeFromHoudini<Vec3<float>, TruncateCodec>( tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL32) { if (compression == hvdb::COMPRESSION_NONE) { convertAttributeFromHoudini<Vec3<float>>( tree, indexTree, name, attribute, defaults); } else if (compression == hvdb::COMPRESSION_TRUNCATE) { convertAttributeFromHoudini<Vec3<float>, TruncateCodec>( tree, indexTree, name, attribute, defaults); } else if (compression == hvdb::COMPRESSION_UNIT_VECTOR) { convertAttributeFromHoudini<Vec3<float>, UnitVecCodec>( tree, indexTree, name, attribute, defaults); } else if (compression == hvdb::COMPRESSION_UNIT_FIXED_POINT_8) { convertAttributeFromHoudini<Vec3<float>, FixedPointCodec<true, UnitRange>>( tree, indexTree, name, attribute, defaults); } else if (compression == hvdb::COMPRESSION_UNIT_FIXED_POINT_16) { convertAttributeFromHoudini<Vec3<float>, FixedPointCodec<false, UnitRange>>( tree, indexTree, name, attribute, defaults); } } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<Vec3<double>>(tree, indexTree, name, attribute, defaults); } else { std::stringstream ss; ss << "Unknown vector attribute type - " << name; throw std::runtime_error(ss.str()); } } else if (isQuaternion) { if (storage == GA_STORE_REAL16) { // implicitly convert 16-bit float into 32-bit float convertAttributeFromHoudini<Quat<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL32) { convertAttributeFromHoudini<Quat<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<Quat<double>>(tree, indexTree, name, attribute, defaults); } else { std::stringstream ss; ss << "Unknown quaternion attribute type - " << name; throw std::runtime_error(ss.str()); } } else if (isMatrix3) { if (storage == GA_STORE_REAL16) { // implicitly convert 16-bit float into 32-bit float convertAttributeFromHoudini<Mat3<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL32) { convertAttributeFromHoudini<Mat3<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<Mat3<double>>(tree, indexTree, name, attribute, defaults); } else { std::stringstream ss; ss << "Unknown matrix3 attribute type - " << name; throw std::runtime_error(ss.str()); } } else if (isMatrix4) { if (storage == GA_STORE_REAL16) { // implicitly convert 16-bit float into 32-bit float convertAttributeFromHoudini<Mat4<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL32) { convertAttributeFromHoudini<Mat4<float>>(tree, indexTree, name, attribute, defaults); } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<Mat4<double>>(tree, indexTree, name, attribute, defaults); } else { std::stringstream ss; ss << "Unknown matrix4 attribute type - " << name; throw std::runtime_error(ss.str()); } } else { if (storage == GA_STORE_BOOL) { convertAttributeFromHoudini<bool>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_INT8) { convertAttributeFromHoudini<int8_t>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_INT16) { convertAttributeFromHoudini<int16_t>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_INT32) { convertAttributeFromHoudini<int32_t>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_INT64) { convertAttributeFromHoudini<int64_t>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL16) { convertAttributeFromHoudini<float, TruncateCodec>( tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL32 && compression == hvdb::COMPRESSION_NONE) { convertAttributeFromHoudini<float>(tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL32 && compression == hvdb::COMPRESSION_TRUNCATE) { convertAttributeFromHoudini<float, TruncateCodec>( tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL32 && compression == hvdb::COMPRESSION_UNIT_FIXED_POINT_8) { convertAttributeFromHoudini<float, FixedPointCodec<true, UnitRange>>( tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL32 && compression == hvdb::COMPRESSION_UNIT_FIXED_POINT_16) { convertAttributeFromHoudini<float, FixedPointCodec<false, UnitRange>>( tree, indexTree, name, attribute, defaults, width); } else if (storage == GA_STORE_REAL64) { convertAttributeFromHoudini<double>(tree, indexTree, name, attribute, defaults, width); } else { std::stringstream ss; ss << "Unknown attribute type - " << name; throw std::runtime_error(ss.str()); } } } template <typename ValueType> void populateHoudiniDetailAttribute(GA_RWAttributeRef& attrib, const openvdb::MetaMap& metaMap, const Name& key, const int index) { using WriteHandleType = typename GAHandleTraits<ValueType>::RW; using TypedMetadataT = TypedMetadata<ValueType>; typename TypedMetadataT::ConstPtr typedMetadata = metaMap.getMetadata<TypedMetadataT>(key); if (!typedMetadata) return; const ValueType& value = typedMetadata->value(); WriteHandleType handle(attrib.getAttribute()); writeAttributeValue<WriteHandleType, ValueType>(handle, GA_Offset(0), index, value); } template<typename ValueType> Metadata::Ptr createTypedMetadataFromAttribute(const GA_Attribute* const attribute, const uint32_t component = 0) { using HoudiniAttribute = HoudiniReadAttribute<ValueType>; ValueType value; HoudiniAttribute::get(*attribute, value, GA_Offset(0), component); return openvdb::TypedMetadata<ValueType>(value).copy(); } template<typename HoudiniType, typename ValueType> GA_Defaults buildDefaults(const ValueType& value) { HoudiniType values[1]; values[0] = value; return GA_Defaults(values, 1); } template<> GA_Defaults buildDefaults<int32>(const openvdb::math::Vec3<int>& value) { int32 values[3]; for (unsigned i = 0; i < 3; ++i) { values[i] = value(i); } return GA_Defaults(values, 3); } template<> GA_Defaults buildDefaults<fpreal32>(const openvdb::math::Vec3<float>& value) { fpreal32 values[3]; for (unsigned i = 0; i < 3; ++i) { values[i] = value(i); } return GA_Defaults(values, 3); } template<> GA_Defaults buildDefaults<fpreal64>(const openvdb::math::Vec3<double>& value) { fpreal64 values[3]; for (unsigned i = 0; i < 3; ++i) { values[i] = value(i); } return GA_Defaults(values, 3); } template<> GA_Defaults buildDefaults<fpreal32>(const openvdb::math::Quat<float>& value) { fpreal32 values[4]; for (unsigned i = 0; i < 4; ++i) { values[i] = value(i); } return GA_Defaults(values, 4); } template<> GA_Defaults buildDefaults<fpreal64>(const openvdb::math::Quat<double>& value) { fpreal64 values[4]; for (unsigned i = 0; i < 4; ++i) { values[i] = value(i); } return GA_Defaults(values, 4); } template<> GA_Defaults buildDefaults<fpreal32>(const openvdb::math::Mat3<float>& value) { fpreal32 values[9]; const float* data = value.asPointer(); for (unsigned i = 0; i < 9; ++i) { values[i] = data[i]; } return GA_Defaults(values, 9); } template<> GA_Defaults buildDefaults<fpreal64>(const openvdb::math::Mat3<double>& value) { fpreal64 values[9]; const double* data = value.asPointer(); for (unsigned i = 0; i < 9; ++i) { values[i] = data[i]; } return GA_Defaults(values, 9); } template<> GA_Defaults buildDefaults<fpreal32>(const openvdb::math::Mat4<float>& value) { fpreal32 values[16]; const float* data = value.asPointer(); for (unsigned i = 0; i < 16; ++i) { values[i] = data[i]; } return GA_Defaults(values, 16); } template<> GA_Defaults buildDefaults<fpreal64>(const openvdb::math::Mat4<double>& value) { fpreal64 values[16]; const double* data = value.asPointer(); for (unsigned i = 0; i < 16; ++i) { values[i] = data[i]; } return GA_Defaults(values, 16); } template <typename ValueType, typename HoudiniType> GA_Defaults gaDefaultsFromDescriptorTyped(const openvdb::points::AttributeSet::Descriptor& descriptor, const openvdb::Name& name) { ValueType defaultValue = descriptor.getDefaultValue<ValueType>(name); return buildDefaults<HoudiniType, ValueType>(defaultValue); } inline GA_Defaults gaDefaultsFromDescriptor(const openvdb::points::AttributeSet::Descriptor& descriptor, const openvdb::Name& name) { const size_t pos = descriptor.find(name); if (pos == openvdb::points::AttributeSet::INVALID_POS) return GA_Defaults(0); const openvdb::Name type = descriptor.type(pos).first; if (type == "bool") { return gaDefaultsFromDescriptorTyped<bool, int32>(descriptor, name); } else if (type == "int8") { return gaDefaultsFromDescriptorTyped<int8_t, int32>(descriptor, name); } else if (type == "int16") { return gaDefaultsFromDescriptorTyped<int16_t, int32>(descriptor, name); } else if (type == "int32") { return gaDefaultsFromDescriptorTyped<int32_t, int32>(descriptor, name); } else if (type == "int64") { return gaDefaultsFromDescriptorTyped<int64_t, int64>(descriptor, name); } else if (type == "float") { return gaDefaultsFromDescriptorTyped<float, fpreal32>(descriptor, name); } else if (type == "double") { return gaDefaultsFromDescriptorTyped<double, fpreal64>(descriptor, name); } else if (type == "vec3i") { return gaDefaultsFromDescriptorTyped<openvdb::math::Vec3<int>, int32>(descriptor, name); } else if (type == "vec3s") { return gaDefaultsFromDescriptorTyped<openvdb::math::Vec3s, fpreal32>(descriptor, name); } else if (type == "vec3d") { return gaDefaultsFromDescriptorTyped<openvdb::math::Vec3d, fpreal64>(descriptor, name); } else if (type == "quats") { return gaDefaultsFromDescriptorTyped<openvdb::math::Quats, fpreal32>(descriptor, name); } else if (type == "quatd") { return gaDefaultsFromDescriptorTyped<openvdb::math::Quatd, fpreal64>(descriptor, name); } else if (type == "mat3s") { return gaDefaultsFromDescriptorTyped<openvdb::math::Mat3s, fpreal32>(descriptor, name); } else if (type == "mat3d") { return gaDefaultsFromDescriptorTyped<openvdb::math::Mat3d, fpreal64>(descriptor, name); } else if (type == "mat4s") { return gaDefaultsFromDescriptorTyped<openvdb::math::Mat4s, fpreal32>(descriptor, name); } else if (type == "mat4d") { return gaDefaultsFromDescriptorTyped<openvdb::math::Mat4d, fpreal64>(descriptor, name); } return GA_Defaults(0); } } // unnamed namespace //////////////////////////////////////// namespace openvdb_houdini { float computeVoxelSizeFromHoudini(const GU_Detail& detail, const uint32_t pointsPerVoxel, const openvdb::math::Mat4d& matrix, const Index decimalPlaces, hvdb::Interrupter& interrupter) { HoudiniReadAttribute<openvdb::Vec3R> positions(*(detail.getP())); return openvdb::points::computeVoxelSize( positions, pointsPerVoxel, matrix, decimalPlaces, &interrupter); } PointDataGrid::Ptr convertHoudiniToPointDataGrid(const GU_Detail& ptGeo, const int compression, const AttributeInfoMap& attributes, const math::Transform& transform, const WarnFunc& warnings) { using HoudiniPositionAttribute = HoudiniReadAttribute<Vec3d>; // initialize primitive offsets hvdb::OffsetListPtr offsets; for (GA_Iterator primitiveIt(ptGeo.getPrimitiveRange()); !primitiveIt.atEnd(); ++primitiveIt) { const GA_Primitive* primitive = ptGeo.getPrimitiveList().get(*primitiveIt); if (primitive->getTypeId() != GA_PRIMNURBCURVE) continue; const size_t vertexCount = primitive->getVertexCount(); if (vertexCount == 0) continue; if (!offsets) offsets.reset(new hvdb::OffsetList); const GA_Offset firstOffset = primitive->getPointOffset(0); offsets->push_back(firstOffset); } // Create PointPartitioner compatible P attribute wrapper (for now no offset filtering) const GA_Attribute& positionAttribute = *ptGeo.getP(); HoudiniPositionAttribute points(positionAttribute, offsets); // Create PointIndexGrid used for consistent index ordering in all attribute conversion const tools::PointIndexGrid::Ptr pointIndexGrid = tools::createPointIndexGrid<tools::PointIndexGrid>(points, transform); // Create PointDataGrid using position attribute PointDataGrid::Ptr pointDataGrid; if (compression == 1 /*FIXED_POSITION_16*/) { pointDataGrid = createPointDataGrid<FixedPointCodec<false>, PointDataGrid>( *pointIndexGrid, points, transform); } else if (compression == 2 /*FIXED_POSITION_8*/) { pointDataGrid = createPointDataGrid<FixedPointCodec<true>, PointDataGrid>( *pointIndexGrid, points, transform); } else /*NONE*/ { pointDataGrid = createPointDataGrid<NullCodec, PointDataGrid>( *pointIndexGrid, points, transform); } const tools::PointIndexTree& indexTree = pointIndexGrid->tree(); PointDataTree& tree = pointDataGrid->tree(); const GA_Size numHoudiniPoints = ptGeo.getNumPoints(); UT_ASSERT(numHoudiniPoints >= 0); const Index64 numVDBPoints = pointCount(tree); UT_ASSERT(numVDBPoints <= static_cast<Index64>(numHoudiniPoints)); if (numVDBPoints < static_cast<Index64>(numHoudiniPoints)) { warnings("Points contain NAN positional values. These points will not be converted."); } if (!tree.cbeginLeaf()) return pointDataGrid; // store point group information const GA_ElementGroupTable& elementGroups = ptGeo.getElementGroupTable(GA_ATTRIB_POINT); const int64_t numGroups = elementGroups.entries(); // including internal groups if (numGroups > 0) { // Append (empty) groups to tree std::vector<Name> groupNames; groupNames.reserve(numGroups); for (auto it = elementGroups.beginTraverse(), itEnd = elementGroups.endTraverse(); it != itEnd; ++it) { groupNames.emplace_back((*it)->getName().toStdString()); } appendGroups(tree, groupNames); // create the group membership vector at a multiple of 1024 for fast parallel resetting const size_t groupVectorSize = numHoudiniPoints + (1024 - (numHoudiniPoints % 1024)); std::vector<short> inGroup(groupVectorSize, short(0)); // Set group membership in tree for (auto it = elementGroups.beginTraverse(), itEnd = elementGroups.endTraverse(); it != itEnd; ++it) { const GA_Range range(**it); tbb::parallel_for(GA_SplittableRange(range), [&ptGeo, &inGroup](const GA_SplittableRange& r) { for (GA_PageIterator pit = r.beginPages(); !pit.atEnd(); ++pit) { GA_Offset start, end; for (GA_Iterator iter = pit.begin(); iter.blockAdvance(start, end);) { for (GA_Offset off = start; off < end; ++off) { const GA_Index idx = ptGeo.pointIndex(off); UT_ASSERT(idx < GA_Index(inGroup.size())); inGroup[idx] = short(1); } } } }); const Name groupName = (*it)->getName().toStdString(); setGroup(tree, indexTree, inGroup, groupName); // reset groups to 0 tbb::parallel_for(tbb::blocked_range<size_t>(0, groupVectorSize / 1024), [&inGroup](const tbb::blocked_range<size_t>& range) { for (size_t n = range.begin(), N = range.end(); n != N; ++n) { std::fill_n(inGroup.begin() + n*1024, 1024, 0); } }); } } // Add other attributes to PointDataGrid for (const auto& attrInfo : attributes) { const Name& name = attrInfo.first; // skip position as this has already been added if (name == "P") continue; GA_ROAttributeRef attrRef = ptGeo.findPointAttribute(name.c_str()); if (!attrRef.isValid()) continue; GA_Attribute const * gaAttribute = attrRef.getAttribute(); if (!gaAttribute) continue; const GA_AIFSharedStringTuple* sharedStringTupleAIF = gaAttribute->getAIFSharedStringTuple(); const bool isString = bool(sharedStringTupleAIF); // Extract all the string values from the string table and insert them // into the Descriptor Metadata if (isString) { // Iterate over the strings in the table and insert them into the Metadata MetaMap& metadata = makeDescriptorUnique(tree)->getMetadata(); StringMetaInserter inserter(metadata); for (auto it = sharedStringTupleAIF->begin(gaAttribute), itEnd = sharedStringTupleAIF->end(); !(it == itEnd); ++it) { Name str(it.getString()); if (!str.empty()) inserter.insert(str); } } convertAttributeFromHoudini(tree, indexTree, name, gaAttribute, /*compression=*/attrInfo.second.first); } // Attempt to compact attributes compactAttributes(tree); return pointDataGrid; } void convertPointDataGridToHoudini( GU_Detail& detail, const PointDataGrid& grid, const std::vector<std::string>& attributes, const std::vector<std::string>& includeGroups, const std::vector<std::string>& excludeGroups, const bool inCoreOnly) { using namespace openvdb::math; const PointDataTree& tree = grid.tree(); auto leafIter = tree.cbeginLeaf(); if (!leafIter) return; // position attribute is mandatory const AttributeSet& attributeSet = leafIter->attributeSet(); const AttributeSet::Descriptor& descriptor = attributeSet.descriptor(); const bool hasPosition = descriptor.find("P") != AttributeSet::INVALID_POS; if (!hasPosition) return; // sort for binary search std::vector<std::string> sortedAttributes(attributes); std::sort(sortedAttributes.begin(), sortedAttributes.end()); // obtain cumulative point offsets and total points std::vector<Index64> offsets; MultiGroupFilter filter(includeGroups, excludeGroups, leafIter->attributeSet()); const Index64 total = pointOffsets(offsets, tree, filter, inCoreOnly); // a block's global offset is needed to transform its point offsets to global offsets const Index64 startOffset = detail.appendPointBlock(total); HoudiniWriteAttribute<Vec3f> positionAttribute(*detail.getP()); convertPointDataGridPosition(positionAttribute, grid, offsets, startOffset, filter, inCoreOnly); // add other point attributes to the hdk detail const AttributeSet::Descriptor::NameToPosMap& nameToPosMap = descriptor.map(); for (const auto& namePos : nameToPosMap) { const Name& name = namePos.first; // position handled explicitly if (name == "P") continue; // filter attributes if (!sortedAttributes.empty() && !std::binary_search(sortedAttributes.begin(), sortedAttributes.end(), name)) { continue; } const auto index = static_cast<unsigned>(namePos.second); const AttributeArray& array = leafIter->constAttributeArray(index); // don't convert group attributes if (isGroup(array)) continue; const unsigned stride = array.stride(); GA_RWAttributeRef attributeRef = detail.findPointAttribute(name.c_str()); const NamePair& type = descriptor.type(index); const Name valueType(isString(array) ? "string" : type.first); // create the attribute if it doesn't already exist in the detail if (attributeRef.isInvalid()) { const bool truncate(type.second == TruncateCodec::name()); GA_Storage storage(gaStorageFromAttrString(valueType)); if (storage == GA_STORE_INVALID) continue; if (storage == GA_STORE_REAL32 && truncate) { storage = GA_STORE_REAL16; } unsigned width = stride; const bool isVector = valueType.compare(0, 4, "vec3") == 0; const bool isQuaternion = valueType.compare(0, 4, "quat") == 0; const bool isMatrix3 = valueType.compare(0, 4, "mat3") == 0; const bool isMatrix4 = valueType.compare(0, 4, "mat4") == 0; if (isVector) width = 3; else if (isQuaternion) width = 4; else if (isMatrix3) width = 9; else if (isMatrix4) width = 16; const GA_Defaults defaults = gaDefaultsFromDescriptor(descriptor, name); attributeRef = detail.addTuple(storage, GA_ATTRIB_POINT, name.c_str(), width, defaults); // apply type info to some recognised types if (isVector) { if (name == "Cd") attributeRef->getOptions().setTypeInfo(GA_TYPE_COLOR); else if (name == "N") attributeRef->getOptions().setTypeInfo(GA_TYPE_NORMAL); else attributeRef->getOptions().setTypeInfo(GA_TYPE_VECTOR); } if (isQuaternion) { attributeRef->getOptions().setTypeInfo(GA_TYPE_QUATERNION); } if (isMatrix4 || isMatrix3) { attributeRef->getOptions().setTypeInfo(GA_TYPE_TRANSFORM); } // '|' and ':' characters are valid in OpenVDB Points names but // will make Houdini Attribute names invalid if (attributeRef.isInvalid()) { OPENVDB_THROW( RuntimeError, "Unable to create Houdini Points Attribute with name '" + name + "'. '|' and ':' characters are not supported by Houdini."); } } if (valueType == "string") { HoudiniWriteAttribute<Name> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "bool") { HoudiniWriteAttribute<bool> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "int8") { HoudiniWriteAttribute<int8_t> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "int16") { HoudiniWriteAttribute<int16_t> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "int32") { HoudiniWriteAttribute<int32_t> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "int64") { HoudiniWriteAttribute<int64_t> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "float") { HoudiniWriteAttribute<float> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "double") { HoudiniWriteAttribute<double> attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "vec3i") { HoudiniWriteAttribute<Vec3<int> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "vec3s") { HoudiniWriteAttribute<Vec3<float> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "vec3d") { HoudiniWriteAttribute<Vec3<double> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "quats") { HoudiniWriteAttribute<Quat<float> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "quatd") { HoudiniWriteAttribute<Quat<double> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "mat3s") { HoudiniWriteAttribute<Mat3<float> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "mat3d") { HoudiniWriteAttribute<Mat3<double> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "mat4s") { HoudiniWriteAttribute<Mat4<float> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else if (valueType == "mat4d") { HoudiniWriteAttribute<Mat4<double> > attribute(*attributeRef.getAttribute()); convertPointDataGridAttribute(attribute, tree, offsets, startOffset, index, stride, filter, inCoreOnly); } else { throw std::runtime_error("Unknown Attribute Type for Conversion: " + valueType); } } // add point groups to the hdk detail const AttributeSet::Descriptor::NameToPosMap& groupMap = descriptor.groupMap(); for (const auto& namePos : groupMap) { const Name& name = namePos.first; UT_ASSERT(!name.empty()); GA_PointGroup* pointGroup = detail.findPointGroup(name.c_str()); if (!pointGroup) pointGroup = detail.newPointGroup(name.c_str()); const AttributeSet::Descriptor::GroupIndex index = attributeSet.groupIndex(name); HoudiniGroup group(*pointGroup, startOffset, total); convertPointDataGridGroup(group, tree, offsets, startOffset, index, filter, inCoreOnly); } } void populateMetadataFromHoudini(openvdb::points::PointDataGrid& grid, const GU_Detail& detail, const WarnFunc& warnings) { using namespace openvdb::math; for (GA_AttributeDict::iterator iter = detail.attribs().begin(GA_SCOPE_PUBLIC); !iter.atEnd(); ++iter) { const GA_Attribute* const attribute = *iter; if (!attribute) continue; const Name name("global:" + Name(attribute->getName())); Metadata::Ptr metadata = grid[name]; if (metadata) continue; const GA_Storage storage(attributeStorageType(attribute)); const int16_t width(attributeTupleSize(attribute)); const GA_TypeInfo typeInfo(attribute->getOptions().typeInfo()); const bool isVector = width == 3 && (typeInfo == GA_TYPE_VECTOR || typeInfo == GA_TYPE_NORMAL || typeInfo == GA_TYPE_COLOR); const bool isQuaternion = width == 4 && (typeInfo == GA_TYPE_QUATERNION); const bool isMatrix3 = width == 9 && (typeInfo == GA_TYPE_TRANSFORM); const bool isMatrix4 = width == 16 && (typeInfo == GA_TYPE_TRANSFORM); if (isVector) { if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<Vec3<float> >(attribute); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<Vec3<float> >(attribute); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<Vec3<double> >(attribute); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported vector type for metadata conversion."; warnings(ss.str()); continue; } UT_ASSERT(metadata); grid.insertMeta(name, *metadata); } else if (isQuaternion) { if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<Quat<float>>(attribute); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<Quat<float>>(attribute); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<Quat<double>>(attribute); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported quaternion type for metadata conversion."; warnings(ss.str()); continue; } } else if (isMatrix3) { if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<Mat3<float>>(attribute); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<Mat3<float>>(attribute); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<Mat3<double>>(attribute); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported matrix3 type for metadata conversion."; warnings(ss.str()); continue; } } else if (isMatrix4) { if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<Mat4<float>>(attribute); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<Mat4<float>>(attribute); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<Mat4<double>>(attribute); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported matrix4 type for metadata conversion."; warnings(ss.str()); continue; } } else { for (int i = 0; i < width; i++) { if (storage == GA_STORE_BOOL) { metadata = createTypedMetadataFromAttribute<bool>(attribute, i); } else if (storage == GA_STORE_INT8) { metadata = createTypedMetadataFromAttribute<int8_t>(attribute, i); } else if (storage == GA_STORE_INT16) { metadata = createTypedMetadataFromAttribute<int16_t>(attribute, i); } else if (storage == GA_STORE_INT32) { metadata = createTypedMetadataFromAttribute<int32_t>(attribute, i); } else if (storage == GA_STORE_INT64) { metadata = createTypedMetadataFromAttribute<int64_t>(attribute, i); } else if (storage == GA_STORE_REAL16) { metadata = createTypedMetadataFromAttribute<float>(attribute, i); } else if (storage == GA_STORE_REAL32) { metadata = createTypedMetadataFromAttribute<float>(attribute, i); } else if (storage == GA_STORE_REAL64) { metadata = createTypedMetadataFromAttribute<double>(attribute, i); } else if (storage == GA_STORE_STRING) { metadata = createTypedMetadataFromAttribute<openvdb::Name>(attribute, i); } else { std::stringstream ss; ss << "Detail attribute \"" << attribute->getName() << "\" " << "unsupported type for metadata conversion."; warnings(ss.str()); continue; } UT_ASSERT(metadata); if (width > 1) { const Name arrayName(name + Name("[") + std::to_string(i) + Name("]")); grid.insertMeta(arrayName, *metadata); } else { grid.insertMeta(name, *metadata); } } } } } void convertMetadataToHoudini(GU_Detail& detail, const openvdb::MetaMap& metaMap, const WarnFunc& warnings) { struct Local { static bool isGlobalMetadata(const Name& name) { return name.compare(0, 7, "global:") == 0; } static Name toDetailName(const Name& name) { Name detailName(name); detailName.erase(0, 7); const size_t open = detailName.find('['); if (open != std::string::npos) { detailName = detailName.substr(0, open); } return detailName; } static int toDetailIndex(const Name& name) { const size_t open = name.find('['); const size_t close = name.find(']'); int index = 0; if (open != std::string::npos && close != std::string::npos && close == name.length()-1 && open > 0 && open+1 < close) { try { // parse array index index = std::stoi(name.substr(open+1, close-open-1)); } catch (const std::exception&) {} } return index; } }; using namespace openvdb::math; using DetailInfo = std::pair<Name, int>; using DetailMap = std::map<Name, DetailInfo>; DetailMap detailCreate; DetailMap detailPopulate; for(MetaMap::ConstMetaIterator iter = metaMap.beginMeta(); iter != metaMap.endMeta(); ++iter) { const Metadata::Ptr metadata = iter->second; if (!metadata) continue; const Name& key = iter->first; if (!Local::isGlobalMetadata(key)) continue; Name name = Local::toDetailName(key); int index = Local::toDetailIndex(key); // add to creation map if (detailCreate.find(name) == detailCreate.end()) { detailCreate[name] = DetailInfo(metadata->typeName(), index); } else { if (index > detailCreate[name].second) detailCreate[name].second = index; } // add to populate map detailPopulate[key] = DetailInfo(name, index); } // add all detail attributes for (const auto& item : detailCreate) { const Name& name = item.first; const DetailInfo& info = item.second; const Name& type = info.first; const int size = info.second; GA_RWAttributeRef attribute = detail.findGlobalAttribute(name); if (attribute.isInvalid()) { const GA_Storage storage = gaStorageFromAttrString(type); if (storage == GA_STORE_INVALID) { throw std::runtime_error("Invalid attribute storage type \"" + name + "\"."); } if (type == "vec3s" || type == "vec3d") { attribute = detail.addTuple(storage, GA_ATTRIB_GLOBAL, name.c_str(), 3); attribute.setTypeInfo(GA_TYPE_VECTOR); } else { attribute = detail.addTuple(storage, GA_ATTRIB_GLOBAL, name.c_str(), size+1); } if (!attribute.isValid()) { throw std::runtime_error("Error creating attribute with name \"" + name + "\"."); } } } // populate the values for (const auto& item : detailPopulate) { const Name& key = item.first; const DetailInfo& info = item.second; const Name& name = info.first; const int index = info.second; const Name& type = metaMap[key]->typeName(); GA_RWAttributeRef attrib = detail.findGlobalAttribute(name); UT_ASSERT(!attrib.isInvalid()); if (type == openvdb::typeNameAsString<bool>()) populateHoudiniDetailAttribute<bool>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<int8_t>()) populateHoudiniDetailAttribute<int8_t>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<int16_t>()) populateHoudiniDetailAttribute<int16_t>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<int32_t>()) populateHoudiniDetailAttribute<int32_t>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<int64_t>()) populateHoudiniDetailAttribute<int64_t>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<float>()) populateHoudiniDetailAttribute<float>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<double>()) populateHoudiniDetailAttribute<double>(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<Vec3<int32_t> >()) populateHoudiniDetailAttribute<Vec3<int32_t> >(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<Vec3<float> >()) populateHoudiniDetailAttribute<Vec3<float> >(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<Vec3<double> >()) populateHoudiniDetailAttribute<Vec3<double> >(attrib, metaMap, key, index); else if (type == openvdb::typeNameAsString<Name>()) populateHoudiniDetailAttribute<Name>(attrib, metaMap, key, index); else { std::stringstream ss; ss << "Metadata value \"" << key << "\" unsupported type for detail attribute conversion."; warnings(ss.str()); } } } //////////////////////////////////////// int16_t attributeTupleSize(const GA_Attribute* const attribute) { if (!attribute) return int16_t(0); const GA_AIFTuple* tupleAIF = attribute->getAIFTuple(); if (!tupleAIF) { const GA_AIFStringTuple* tupleAIFString = attribute->getAIFStringTuple(); if (tupleAIFString) { return static_cast<int16_t>(tupleAIFString->getTupleSize(attribute)); } } else { return static_cast<int16_t>(tupleAIF->getTupleSize(attribute)); } return int16_t(0); } GA_Storage attributeStorageType(const GA_Attribute* const attribute) { if (!attribute) return GA_STORE_INVALID; const GA_AIFTuple* tupleAIF = attribute->getAIFTuple(); if (!tupleAIF) { if (attribute->getAIFStringTuple()) { return GA_STORE_STRING; } } else { return tupleAIF->getStorage(attribute); } return GA_STORE_INVALID; } //////////////////////////////////////// void collectPointInfo(const PointDataGrid& grid, std::string& countStr, std::string& groupStr, std::string& attributeStr) { using AttributeSet = openvdb::points::AttributeSet; using Descriptor = openvdb::points::AttributeSet::Descriptor; const PointDataTree& tree = grid.constTree(); // iterate through all leaf nodes to find out if all are out-of-core bool allOutOfCore = true; for (auto iter = tree.cbeginLeaf(); iter; ++iter) { if (!iter->buffer().isOutOfCore()) { allOutOfCore = false; break; } } openvdb::Index64 totalPointCount = 0; // it is more technically correct to rely on the voxel count as this may be // out of sync with the attribute size, however for faster node preview when // the voxel buffers are all out-of-core, count up the sizes of the first // attribute array instead if (allOutOfCore) { for (auto iter = tree.cbeginLeaf(); iter; ++iter) { if (iter->attributeSet().size() > 0) { totalPointCount += iter->constAttributeArray(0).size(); } } } else { totalPointCount = openvdb::points::pointCount(tree); } std::ostringstream os; os << openvdb::util::formattedInt(totalPointCount); countStr = os.str(); os.clear(); os.str(""); const auto iter = tree.cbeginLeaf(); if (!iter) return; const AttributeSet& attributeSet = iter->attributeSet(); const Descriptor& descriptor = attributeSet.descriptor(); std::string viewportGroupName = ""; if (StringMetadata::ConstPtr stringMeta = grid.getMetadata<StringMetadata>(META_GROUP_VIEWPORT)) { viewportGroupName = stringMeta->value(); } const Descriptor::NameToPosMap& groupMap = descriptor.groupMap(); bool first = true; for (const auto& it : groupMap) { if (first) first = false; else os << ", "; // add an asterisk as a viewport group indicator if (it.first == viewportGroupName) os << "*"; os << it.first << "("; // for faster node preview when all the voxel buffers are out-of-core, // don't load the group arrays to display the group sizes, just print // "out-of-core" instead @todo - put the group sizes into the grid // metadata on write for this use case if (allOutOfCore) os << "out-of-core"; else { const openvdb::points::GroupFilter filter(it.first, attributeSet); os << openvdb::util::formattedInt(pointCount(tree, filter)); } os << ")"; } groupStr = (os.str().empty() ? "none" : os.str()); os.clear(); os.str(""); const Descriptor::NameToPosMap& nameToPosMap = descriptor.map(); first = true; for (const auto& it : nameToPosMap) { const openvdb::points::AttributeArray& array = *(attributeSet.getConst(it.second)); if (isGroup(array)) continue; if (first) first = false; else os << ", "; const openvdb::NamePair& type = descriptor.type(it.second); const openvdb::Name& codecType = type.second; if (isString(array)) { os << it.first << "[str]"; } else { os << it.first << "[" << type.first; // if no value compression, hide the codec os << (codecType != "null" ? "_" + codecType : ""); os << "]"; } if (!array.hasConstantStride()) os << " [dynamic]"; else if (array.stride() > 1) os << " [" << array.stride() << "]"; } attributeStr = (os.str().empty() ? "none" : os.str()); } void pointDataGridSpecificInfoText(std::ostream& infoStr, const GridBase& grid) { const PointDataGrid* pointDataGrid = dynamic_cast<const PointDataGrid*>(&grid); if (!pointDataGrid) return; // match native OpenVDB convention as much as possible infoStr << " voxel size: " << pointDataGrid->transform().voxelSize()[0] << ","; infoStr << " type: points,"; if (pointDataGrid->activeVoxelCount() != 0) { const Coord dim = grid.evalActiveVoxelDim(); infoStr << " dim: " << dim[0] << "x" << dim[1] << "x" << dim[2] << ","; } else { infoStr <<" <empty>,"; } std::string countStr, groupStr, attributeStr; collectPointInfo(*pointDataGrid, countStr, groupStr, attributeStr); infoStr << " count: " << countStr << ","; infoStr << " groups: " << groupStr << ","; infoStr << " attributes: " << attributeStr; } namespace { inline int lookupGroupInput(const PRM_SpareData* spare) { if (!spare) return 0; const char* istring = spare->getValue("sop_input"); return istring ? atoi(istring) : 0; } void sopBuildVDBPointsGroupMenu(void* data, PRM_Name* menuEntries, int /*themenusize*/, const PRM_SpareData* spare, const PRM_Parm* /*parm*/) { SOP_Node* sop = CAST_SOPNODE(static_cast<OP_Node*>(data)); int inputIndex = lookupGroupInput(spare); const GU_Detail* gdp = sop->getInputLastGeo(inputIndex, CHgetEvalTime()); // const cast as iterator requires non-const access, however data is not modified VdbPrimIterator vdbIt(const_cast<GU_Detail*>(gdp)); int n_entries = 0; for (; vdbIt; ++vdbIt) { GU_PrimVDB* vdbPrim = *vdbIt; PointDataGrid::ConstPtr grid = gridConstPtrCast<PointDataGrid>(vdbPrim->getConstGridPtr()); // ignore all but point data grids if (!grid) continue; auto leafIter = grid->tree().cbeginLeaf(); if (!leafIter) continue; const AttributeSet::Descriptor& descriptor = leafIter->attributeSet().descriptor(); for (const auto& it : descriptor.groupMap()) { // add each VDB Points group to the menu menuEntries[n_entries].setToken(it.first.c_str()); menuEntries[n_entries].setLabel(it.first.c_str()); n_entries++; } } // zero value ends the menu menuEntries[n_entries].setToken(0); menuEntries[n_entries].setLabel(0); } } // unnamed namespace #ifdef _MSC_VER OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenuInput1(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenuInput2(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenuInput3(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenuInput4(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); OPENVDB_HOUDINI_API const PRM_ChoiceList VDBPointsGroupMenu(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); #else const PRM_ChoiceList VDBPointsGroupMenuInput1(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); const PRM_ChoiceList VDBPointsGroupMenuInput2(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); const PRM_ChoiceList VDBPointsGroupMenuInput3(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); const PRM_ChoiceList VDBPointsGroupMenuInput4(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); const PRM_ChoiceList VDBPointsGroupMenu(PRM_CHOICELIST_TOGGLE, sopBuildVDBPointsGroupMenu); #endif } // namespace openvdb_houdini
68,670
C++
36.361806
147
0.622892
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Points_Delete.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file SOP_OpenVDB_Points_Delete.cc /// /// @author Francisco Gochez, Dan Bailey /// /// @brief Delete points that are members of specific groups #include <openvdb/openvdb.h> #include <openvdb/points/PointDataGrid.h> #include <openvdb/points/PointDelete.h> #include <UT/UT_Version.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/PointUtils.h> #include <openvdb_houdini/Utils.h> #include <houdini_utils/geometry.h> #include <houdini_utils/ParmFactory.h> #include <algorithm> #include <stdexcept> #include <string> #include <vector> using namespace openvdb; using namespace openvdb::points; using namespace openvdb::math; namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// class SOP_OpenVDB_Points_Delete: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Points_Delete(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Points_Delete() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; // class SOP_OpenVDB_Points_Delete //////////////////////////////////////// // Build UI and register this operator. void newSopOperator(OP_OperatorTable* table) { openvdb::initialize(); if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setHelpText("Specify a subset of the input point data grids to delete from.") .setChoiceList(&hutil::PrimGroupMenu)); parms.add(hutil::ParmFactory(PRM_STRING, "vdbpointsgroups", "VDB Points Groups") .setHelpText("Specify VDB points groups to delete.") .setChoiceList(&hvdb::VDBPointsGroupMenuInput1)); parms.add(hutil::ParmFactory(PRM_TOGGLE, "invert", "Invert") .setDefault(PRMzeroDefaults) .setHelpText("Invert point deletion so that points not belonging to any of the " "groups will be deleted.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "dropgroups", "Drop Points Groups") .setDefault(PRMoneDefaults) .setHelpText("Drop the VDB points groups that were used for deletion. This option is " "ignored if \"invert\" is enabled.")); ////////// // Register this operator. hvdb::OpenVDBOpFactory("VDB Points Delete", SOP_OpenVDB_Points_Delete::factory, parms, *table) #if UT_VERSION_INT < 0x11050000 // earlier than 17.5.0 .setNativeName("") #endif .addInput("VDB Points") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Points_Delete::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Delete points that are members of specific groups.\"\"\"\n\ \n\ @overview\n\ \n\ The OpenVDB Points Delete SOP allows deletion of points that are members\n\ of a supplied group(s).\n\ An invert toggle may be enabled to allow deleting points that are not\n\ members of the supplied group(s).\n\ \n\ @related\n\ - [OpenVDB Points Convert|Node:sop/DW_OpenVDBPointsConvert]\n\ - [OpenVDB Points Group|Node:sop/DW_OpenVDBPointsGroup]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } bool SOP_OpenVDB_Points_Delete::updateParmsFlags() { const bool invert = evalInt("invert", 0, 0) != 0; return enableParm("dropgroups", !invert); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Points_Delete::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Points_Delete(net, name, op); } SOP_OpenVDB_Points_Delete::SOP_OpenVDB_Points_Delete(OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Points_Delete::Cache::cookVDBSop(OP_Context& context) { try { const std::string groups = evalStdString("vdbpointsgroups", context.getTime()); // early exit if the VDB points group field is empty if (groups.empty()) return error(); UT_AutoInterrupt progress("Processing points group deletion"); const bool invert = evalInt("invert", 0, context.getTime()); const bool drop = evalInt("dropgroups", 0, context.getTime()); // select Houdini primitive groups we wish to use hvdb::VdbPrimIterator vdbIt(gdp, matchGroup(*gdp, evalStdString("group", context.getTime()))); for (; vdbIt; ++vdbIt) { if (progress.wasInterrupted()) { throw std::runtime_error("processing was interrupted"); } GU_PrimVDB* vdbPrim = *vdbIt; // Limit the lifetime of our const shared copies so // we don't have false-sharing when we go to make the // grid unique. std::vector<std::string> pointGroups; { PointDataGrid::ConstPtr inputGrid = openvdb::gridConstPtrCast<PointDataGrid>(vdbPrim->getConstGridPtr()); // early exit if the grid is of the wrong type if (!inputGrid) continue; // early exit if the tree is empty auto leafIter = inputGrid->tree().cbeginLeaf(); if (!leafIter) continue; // extract names of all selected VDB groups // the "exclude groups" parameter to parseNames is not used in this context, // so we disregard it by storing it in a temporary variable std::vector<std::string> tmp; AttributeSet::Descriptor::parseNames(pointGroups, tmp, groups); // determine in any of the requested groups are actually present in the tree const AttributeSet::Descriptor& descriptor = leafIter->attributeSet().descriptor(); const bool hasPointsToDrop = std::any_of(pointGroups.begin(), pointGroups.end(), [&descriptor](const std::string& group) { return descriptor.hasGroup(group); }); if (!hasPointsToDrop) continue; } // deep copy the VDB tree if it is not already unique vdbPrim->makeGridUnique(); PointDataGrid& outputGrid = UTvdbGridCast<PointDataGrid>(vdbPrim->getGrid()); deleteFromGroups(outputGrid.tree(), pointGroups, invert, drop); } } catch (const std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
6,709
C++
29.639269
100
0.636011
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_To_Polygons.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_To_Polygons.cc /// /// @author FX R&D OpenVDB team /// /// @brief OpenVDB level set to polygon conversion #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/AttributeTransferUtil.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/GeometryUtil.h> #include <openvdb/tools/VolumeToMesh.h> #include <openvdb/tools/Mask.h> // for tools::interiorMask() #include <openvdb/tools/MeshToVolume.h> #include <openvdb/tools/Morphology.h> #include <openvdb/tools/LevelSetUtil.h> #include <openvdb/tools/Prune.h> #include <openvdb/math/Operators.h> #include <openvdb/math/Mat3.h> #include <CH/CH_Manager.h> #include <GA/GA_PageIterator.h> #include <GEO/GEO_PolyCounts.h> #include <GU/GU_ConvertParms.h> #include <GU/GU_Detail.h> #include <GU/GU_PolyReduce.h> #include <GU/GU_PrimPoly.h> #include <GU/GU_PrimPolySoup.h> #include <GU/GU_Surfacer.h> #include <PRM/PRM_Parm.h> #include <UT/UT_Interrupt.h> #include <UT/UT_UniquePtr.h> #include <hboost/algorithm/string/join.hpp> #include <list> #include <memory> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// class SOP_OpenVDB_To_Polygons: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_To_Polygons(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_To_Polygons() override {} static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned i) const override { return (i > 0); } class Cache: public SOP_VDBCacheOptions { protected: OP_ERROR cookVDBSop(OP_Context&) override; template<class GridType> void referenceMeshing( std::list<openvdb::GridBase::ConstPtr>&, openvdb::tools::VolumeToMesh&, const GU_Detail* refGeo, hvdb::Interrupter&, const fpreal time); }; protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Specify a subset of the input VDB grids to surface.") .setDocumentation( "A subset of the input VDB grids to be surfaced" " (see [specifying volumes|/model/volumes#group])")); // Geometry Type parms.add(hutil::ParmFactory(PRM_ORD, "geometrytype", "Geometry Type") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "polysoup", "Polygon Soup", "poly", "Polygons" }) .setTooltip( "Specify the type of geometry to output. A polygon soup is a primitive" " that stores polygons using a compact memory representation." " Not all geometry nodes can operate directly on this primitive.") .setDocumentation( "The type of geometry to output, either polygons or a polygon soup\n\n" "A [polygon soup|/model/primitives#polysoup] is a primitive" " that stores polygons using a compact memory representation.\n\n" "WARNING:\n" " Not all geometry nodes can operate directly on polygon soups.\n")); parms.add(hutil::ParmFactory(PRM_FLT_J, "isovalue", "Isovalue") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, -1.0, PRM_RANGE_UI, 1.0) .setTooltip( "The voxel value that determines the surface\n\n" "Zero works for signed distance fields, while fog volumes require" " a larger positive value (0.5 is a good initial guess).")); parms.add(hutil::ParmFactory(PRM_FLT_J, "adaptivity", "Adaptivity") .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_RESTRICTED, 1.0) .setTooltip( "The adaptivity threshold determines how closely the output mesh follows" " the isosurface. A higher threshold enables more variation in polygon size," " allowing the surface to be represented with fewer polygons.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "computenormals", "Compute Vertex Normals") .setTooltip("Compute edge-preserving vertex normals.") .setDocumentation( "Compute edge-preserving vertex normals." " This uses the optional second input to help eliminate seams.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "keepvdbname", "Preserve VDB Name") .setTooltip("Mark each primitive with the corresponding VDB name.")); ////////// parms.add(hutil::ParmFactory(PRM_HEADING,"sep1", "Reference Options")); parms.add(hutil::ParmFactory(PRM_FLT_J, "internaladaptivity", "Internal Adaptivity") .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_RESTRICTED, 1.0) .setTooltip("Overrides the adaptivity threshold for all internal surfaces.") .setDocumentation( "When a reference surface is provided, this is the adaptivity threshold" " for regions that are inside the surface.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "transferattributes", "Transfer Surface Attributes") .setTooltip( "Transfer all attributes (primitive, vertex and point) from the reference surface.") .setDocumentation( "When a reference surface is provided, this option transfers all attributes\n" "(primitive, vertex and point) from the reference surface to the output geometry.\n" "\n" "NOTE:\n" " Primitive attribute values can't meaningfully be transferred to a\n" " polygon soup, because the entire polygon soup is a single primitive.\n" "\n" "NOTE:\n" " Computed vertex normals for primitives in the surface group\n" " will be overridden.\n")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "sharpenfeatures", "Sharpen Features") .setDefault(PRMoneDefaults) .setTooltip("Sharpen edges and corners.")); parms.add(hutil::ParmFactory(PRM_FLT_J, "edgetolerance", "Edge Tolerance") .setDefault(0.5) .setRange(PRM_RANGE_RESTRICTED, 0.0, PRM_RANGE_RESTRICTED, 1.0) .setTooltip("Controls the edge adaptivity mask.")); parms.add(hutil::ParmFactory(PRM_STRING, "surfacegroup", "Surface Group") .setDefault("surface_polygons") .setTooltip( "Specify a group for all polygons that are coincident with the reference surface.\n\n" "The group is useful for transferring attributes such as UV coordinates," " normals, etc. from the reference surface.")); parms.add(hutil::ParmFactory(PRM_STRING, "interiorgroup", "Interior Group") .setDefault("interior_polygons") .setTooltip( "Specify a group for all polygons that are interior to the reference surface.\n\n" "The group can be used to identify surface regions that might require" " projected UV coordinates or new materials.")); parms.add(hutil::ParmFactory(PRM_STRING, "seamlinegroup", "Seam Line Group") .setDefault("seam_polygons") .setTooltip( "Specify a group for all polygons that are in proximity to the seam lines.\n\n" "This group can be used to drive secondary elements such as debris and dust.")); parms.add(hutil::ParmFactory(PRM_STRING, "seampoints", "Seam Points") .setDefault("seam_points") .setTooltip( "Specify a group of the fracture seam points.\n\n" "This can be used to drive local pre-fracture dynamics," " e.g., local surface buckling.")); ////////// parms.add(hutil::ParmFactory(PRM_HEADING,"sep2", "Masking Options")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "surfacemask", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable / disable the surface mask.")); parms.add(hutil::ParmFactory(PRM_STRING, "surfacemaskname", "Surface Mask") .setChoiceList(&hutil::PrimGroupMenuInput3) .setTooltip( "A single VDB whose active voxels or (if the VDB is a level set or SDF)\n" "interior voxels define the region to be meshed")); parms.add(hutil::ParmFactory(PRM_FLT_J, "surfacemaskoffset", "Offset") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_UI, -1.0, PRM_RANGE_UI, 1.0) .setTooltip( "Isovalue that determines the interior of the surface mask\n" "when the mask is a level set or SDF")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "invertsurfacemask", "Invert Surface Mask") .setTooltip("If enabled, mesh the complement of the mask.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "adaptivityfield", "") .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip("Enable / disable the the adaptivity field.")); parms.add(hutil::ParmFactory(PRM_STRING, "adaptivityfieldname", "Adaptivity Field") .setChoiceList(&hutil::PrimGroupMenuInput3) .setTooltip( "A single scalar VDB to be used as a spatial multiplier" " for the adaptivity threshold")); ////////// hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "smoothseams", "Smooth Seams")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "invertmask", "").setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "automaticpartitions", "")); obsoleteParms.add(hutil::ParmFactory(PRM_INT_J, "activepart", "")); hvdb::OpenVDBOpFactory("VDB to Polygons", SOP_OpenVDB_To_Polygons::factory, parms, *table) .setNativeName("") #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBToPolygons") #endif .setObsoleteParms(obsoleteParms) .addInput("OpenVDB grids to surface") .addOptionalInput("Optional reference surface. Can be used " "to transfer attributes, sharpen features and to " "eliminate seams from fractured pieces.") .addOptionalInput("Optional VDB masks") .setVerb(SOP_NodeVerb::COOK_GENERATOR, []() { return new SOP_OpenVDB_To_Polygons::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Convert VDB volumes into polygonal meshes.\"\"\"\n\ \n\ @overview\n\ \n\ This node converts the surfaces of VDB volumes, including level sets,\n\ into polygonal meshes.\n\ \n\ The second and third inputs are optional.\n\ The second input provides a reference polygon surface, which is useful\n\ for converting fractured VDBs back to polygons.\n\ The third input provides additional VDBs that can be used for masking\n\ (specifying which voxels to convert to polygons) and/or to specify\n\ an adaptivity multiplier.\n\ \n\ @related\n\ - [OpenVDB Convert|Node:sop/DW_OpenVDBConvert]\n\ - [OpenVDB Create|Node:sop/DW_OpenVDBCreate]\n\ - [OpenVDB From Particles|Node:sop/DW_OpenVDBFromParticles]\n\ - [Node:sop/convert]\n\ - [Node:sop/convertvolume]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_To_Polygons::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_To_Polygons(net, name, op); } SOP_OpenVDB_To_Polygons::SOP_OpenVDB_To_Polygons(OP_Network* net, const char* name, OP_Operator* op): hvdb::SOP_NodeVDB(net, name, op) { } void SOP_OpenVDB_To_Polygons::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; // using the invertmask attribute to detect old houdini files that // had the regular polygon representation. PRM_Parm* parm = obsoleteParms->getParmPtr("invertmask"); if (parm && !parm->isFactoryDefault()) { setInt("geometrytype", 0, 0, 1); } hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } bool SOP_OpenVDB_To_Polygons::updateParmsFlags() { bool changed = false; const fpreal time = CHgetEvalTime(); const bool refexists = (nInputs() == 2); bool usePolygonSoup = evalInt("geometrytype", 0, time) == 0; changed |= enableParm("computenormals", !usePolygonSoup); changed |= enableParm("internaladaptivity", refexists); changed |= enableParm("surfacegroup", refexists); changed |= enableParm("interiorgroup", refexists); changed |= enableParm("seamlinegroup", refexists); changed |= enableParm("seampoints", refexists); changed |= enableParm("transferattributes", refexists); changed |= enableParm("sharpenfeatures", refexists); changed |= enableParm("edgetolerance", refexists); const bool maskexists = (nInputs() == 3); changed |= enableParm("surfacemask", maskexists); changed |= enableParm("adaptivitymask", maskexists); const bool surfacemask = bool(evalInt("surfacemask", 0, 0)); changed |= enableParm("surfacemaskname", maskexists && surfacemask); changed |= enableParm("surfacemaskoffset", maskexists && surfacemask); changed |= enableParm("invertsurfacemask", maskexists && surfacemask); const bool adaptivitymask = bool(evalInt("adaptivityfield", 0, 0)); changed |= enableParm("adaptivityfieldname", maskexists && adaptivitymask); return changed; } //////////////////////////////////////// void copyMesh(GU_Detail&, openvdb::tools::VolumeToMesh&, hvdb::Interrupter&, const bool usePolygonSoup = true, const char* gridName = nullptr, GA_PrimitiveGroup* surfaceGroup = nullptr, GA_PrimitiveGroup* interiorGroup = nullptr, GA_PrimitiveGroup* seamGroup = nullptr, GA_PointGroup* seamPointGroup = nullptr); void copyMesh( GU_Detail& detail, openvdb::tools::VolumeToMesh& mesher, hvdb::Interrupter&, const bool usePolygonSoup, const char* gridName, GA_PrimitiveGroup* surfaceGroup, GA_PrimitiveGroup* interiorGroup, GA_PrimitiveGroup* seamGroup, GA_PointGroup* seamPointGroup) { const openvdb::tools::PointList& points = mesher.pointList(); openvdb::tools::PolygonPoolList& polygonPoolList = mesher.polygonPoolList(); const char exteriorFlag = char(openvdb::tools::POLYFLAG_EXTERIOR); const char seamLineFlag = char(openvdb::tools::POLYFLAG_FRACTURE_SEAM); const GA_Index firstPrim = detail.getNumPrimitives(); GA_Size npoints = mesher.pointListSize(); const GA_Offset startpt = detail.appendPointBlock(npoints); UT_ASSERT_COMPILETIME(sizeof(openvdb::tools::PointList::element_type) == sizeof(UT_Vector3)); GA_RWHandleV3 pthandle(detail.getP()); pthandle.setBlock(startpt, npoints, reinterpret_cast<UT_Vector3*>(points.get())); // group fracture seam points if (seamPointGroup && GA_Size(mesher.pointFlags().size()) == npoints) { GA_Offset ptoff = startpt; for (GA_Size i = 0; i < npoints; ++i) { if (mesher.pointFlags()[i]) { seamPointGroup->addOffset(ptoff); } ++ptoff; } } // index 0 --> interior, not on seam // index 1 --> interior, on seam // index 2 --> surface, not on seam // index 3 --> surface, on seam GA_Size nquads[4] = {0, 0, 0, 0}; GA_Size ntris[4] = {0, 0, 0, 0}; for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { const openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { int flags = (((polygons.quadFlags(i) & exteriorFlag)!=0) << 1) | ((polygons.quadFlags(i) & seamLineFlag)!=0); ++nquads[flags]; } for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { int flags = (((polygons.triangleFlags(i) & exteriorFlag)!=0) << 1) | ((polygons.triangleFlags(i) & seamLineFlag)!=0); ++ntris[flags]; } } GA_Size nverts[4] = { nquads[0]*4 + ntris[0]*3, nquads[1]*4 + ntris[1]*3, nquads[2]*4 + ntris[2]*3, nquads[3]*4 + ntris[3]*3 }; UT_IntArray verts[4]; for (int flags = 0; flags < 4; ++flags) { verts[flags].setCapacity(nverts[flags]); verts[flags].entries(nverts[flags]); } GA_Size iquad[4] = {0, 0, 0, 0}; GA_Size itri[4] = {nquads[0]*4, nquads[1]*4, nquads[2]*4, nquads[3]*4}; for (size_t n = 0, N = mesher.polygonPoolListSize(); n < N; ++n) { const openvdb::tools::PolygonPool& polygons = polygonPoolList[n]; // Copy quads for (size_t i = 0, I = polygons.numQuads(); i < I; ++i) { const openvdb::Vec4I& quad = polygons.quad(i); int flags = (((polygons.quadFlags(i) & exteriorFlag)!=0) << 1) | ((polygons.quadFlags(i) & seamLineFlag)!=0); verts[flags](iquad[flags]++) = quad[0]; verts[flags](iquad[flags]++) = quad[1]; verts[flags](iquad[flags]++) = quad[2]; verts[flags](iquad[flags]++) = quad[3]; } // Copy triangles (adaptive mesh) for (size_t i = 0, I = polygons.numTriangles(); i < I; ++i) { const openvdb::Vec3I& triangle = polygons.triangle(i); int flags = (((polygons.triangleFlags(i) & exteriorFlag)!=0) << 1) | ((polygons.triangleFlags(i) & seamLineFlag)!=0); verts[flags](itri[flags]++) = triangle[0]; verts[flags](itri[flags]++) = triangle[1]; verts[flags](itri[flags]++) = triangle[2]; } } bool shared_vertices = true; if (usePolygonSoup) { // NOTE: Since we could be using the same points for multiple // polysoups, and the shared vertices option assumes that // the points are only used by this polysoup, we have to // use the unique vertices option. int num_prims = 0; for (int flags = 0; flags < 4; ++flags) { if (!nquads[flags] && !ntris[flags]) continue; num_prims++; } shared_vertices = (num_prims <= 1); } for (int flags = 0; flags < 4; ++flags) { if (!nquads[flags] && !ntris[flags]) continue; GEO_PolyCounts sizelist; if (nquads[flags]) sizelist.append(4, nquads[flags]); if (ntris[flags]) sizelist.append(3, ntris[flags]); GA_Detail::OffsetMarker marker(detail); if (usePolygonSoup) { GU_PrimPolySoup::build( &detail, startpt, npoints, sizelist, verts[flags].array(), shared_vertices); } else { GU_PrimPoly::buildBlock(&detail, startpt, npoints, sizelist, verts[flags].array()); } GA_Range range(marker.primitiveRange()); //GA_Range pntRange(marker.pointRange()); /*GU_ConvertParms parms; parms.preserveGroups = true; GUconvertCopySingleVertexPrimAttribsAndGroups(parms, *srcvdb->getParent(), srcvdb->getMapOffset(), detail, range, pntRange);*/ //if (delgroup) delgroup->removeRange(range); if (seamGroup && (flags & 1)) seamGroup->addRange(range); if (surfaceGroup && (flags & 2)) surfaceGroup->addRange(range); if (interiorGroup && !(flags & 2)) interiorGroup->addRange(range); } // Keep VDB grid name const GA_Index lastPrim = detail.getNumPrimitives(); if (gridName != nullptr && firstPrim != lastPrim) { GA_RWAttributeRef aRef = detail.findPrimitiveAttribute("name"); if (!aRef.isValid()) aRef = detail.addStringTuple(GA_ATTRIB_PRIMITIVE, "name", 1); GA_Attribute * nameAttr = aRef.getAttribute(); if (nameAttr) { const GA_AIFSharedStringTuple * stringAIF = nameAttr->getAIFSharedStringTuple(); if (stringAIF) { GA_Range range(detail.getPrimitiveMap(), detail.primitiveOffset(firstPrim), detail.primitiveOffset(lastPrim)); stringAIF->setString(nameAttr, range, gridName, 0); } } } } //////////////////////////////////////// namespace { struct InteriorMaskOp { InteriorMaskOp(double iso = 0.0): inIsovalue(iso) {} template<typename GridType> void operator()(const GridType& grid) { outGridPtr = openvdb::tools::interiorMask(grid, inIsovalue); } const double inIsovalue; openvdb::BoolGrid::Ptr outGridPtr; }; // Extract a boolean mask from a grid of any type. inline hvdb::GridCPtr getMaskFromGrid(const hvdb::GridCPtr& gridPtr, double isovalue = 0.0) { hvdb::GridCPtr maskGridPtr; if (gridPtr) { if (gridPtr->isType<openvdb::BoolGrid>()) { // If the input grid is already boolean, return it. maskGridPtr = gridPtr; } else { InteriorMaskOp op{isovalue}; gridPtr->apply<hvdb::AllGridTypes>(op); maskGridPtr = op.outGridPtr; } } return maskGridPtr; } } // unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_To_Polygons::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Surfacing VDB primitives"); const GU_Detail* vdbGeo = inputGeo(0); if (vdbGeo == nullptr) return error(); // Get the group of grids to surface. const GA_PrimitiveGroup* group = matchGroup(*vdbGeo, evalStdString("group", time)); hvdb::VdbPrimCIterator vdbIt(vdbGeo, group); if (!vdbIt) { addWarning(SOP_MESSAGE, "No VDB primitives found."); return error(); } // Eval attributes const bool usePolygonSoup = evalInt("geometrytype", 0, time) == 0; const double adaptivity = double(evalFloat("adaptivity", 0, time)); const double iso = double(evalFloat("isovalue", 0, time)); const bool computeNormals = !usePolygonSoup && evalInt("computenormals", 0, time); const bool keepVdbName = evalInt("keepvdbname", 0, time); const float maskoffset = static_cast<float>(evalFloat("surfacemaskoffset", 0, time)); const bool invertmask = evalInt("invertsurfacemask", 0, time); // Setup level set mesher openvdb::tools::VolumeToMesh mesher(iso, adaptivity); // Check mask input const GU_Detail* maskGeo = inputGeo(2); if (maskGeo) { if (evalInt("surfacemask", 0, time)) { const auto maskStr = evalStdString("surfacemaskname", time); const GA_PrimitiveGroup* maskGroup = parsePrimitiveGroups(maskStr.c_str(), GroupCreator(maskGeo)); if (!maskGroup && !maskStr.empty()) { addWarning(SOP_MESSAGE, "Surface mask not found."); } else { hvdb::VdbPrimCIterator maskIt(maskGeo, maskGroup); if (maskIt) { if (auto maskGridPtr = getMaskFromGrid(maskIt->getGridPtr(), maskoffset)) { mesher.setSurfaceMask(maskGridPtr, invertmask); } else { std::string mesg = "Surface mask " + maskIt.getPrimitiveNameOrIndex().toStdString() + " of type " + maskIt->getGrid().type() + " is not supported."; addWarning(SOP_MESSAGE, mesg.c_str()); } } } } if (evalInt("adaptivityfield", 0, time)) { const auto maskStr = evalStdString("adaptivityfieldname", time); const GA_PrimitiveGroup* maskGroup = matchGroup(*maskGeo, maskStr); if (!maskGroup && !maskStr.empty()) { addWarning(SOP_MESSAGE, "Adaptivity field not found."); } else { hvdb::VdbPrimCIterator maskIt(maskGeo, maskGroup); if (maskIt) { openvdb::FloatGrid::ConstPtr grid = openvdb::gridConstPtrCast<openvdb::FloatGrid>(maskIt->getGridPtr()); mesher.setSpatialAdaptivity(grid); } } } } // Check reference input const GU_Detail* refGeo = inputGeo(1); if (refGeo) { // Collect all level set grids. std::list<openvdb::GridBase::ConstPtr> grids; std::vector<std::string> nonLevelSetList, nonLinearList; for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; const openvdb::GridClass gridClass = vdbIt->getGrid().getGridClass(); if (gridClass != openvdb::GRID_LEVEL_SET) { nonLevelSetList.push_back(vdbIt.getPrimitiveNameOrIndex().toStdString()); continue; } if (!vdbIt->getGrid().transform().isLinear()) { nonLinearList.push_back(vdbIt.getPrimitiveNameOrIndex().toStdString()); continue; } // (We need a shallow copy to sync primitive & grid names). grids.push_back(vdbIt->getGrid().copyGrid()); openvdb::ConstPtrCast<openvdb::GridBase>(grids.back())->setName( vdbIt->getGridName()); } if (!nonLevelSetList.empty()) { std::string s = "Reference meshing is only supported for " "Level Set grids, the following grids were skipped: '" + hboost::algorithm::join(nonLevelSetList, ", ") + "'."; addWarning(SOP_MESSAGE, s.c_str()); } if (!nonLinearList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(nonLinearList, ", ") + "' because they don't have a linear/affine transform."; addWarning(SOP_MESSAGE, s.c_str()); } // Mesh using a reference surface if (!grids.empty() && !boss.wasInterrupted()) { if (grids.front()->isType<openvdb::FloatGrid>()) { referenceMeshing<openvdb::FloatGrid>(grids, mesher, refGeo, boss, time); } else if (grids.front()->isType<openvdb::DoubleGrid>()) { referenceMeshing<openvdb::DoubleGrid>(grids, mesher, refGeo, boss, time); } else { addError(SOP_MESSAGE, "Unsupported grid type."); } } } else { // Mesh each VDB primitive independently for (; vdbIt; ++vdbIt) { if (boss.wasInterrupted()) break; hvdb::GEOvdbApply<hvdb::ScalarGridTypes>(**vdbIt, mesher); copyMesh(*gdp, mesher, boss, usePolygonSoup, keepVdbName ? vdbIt.getPrimitive()->getGridName() : nullptr); } if (!boss.wasInterrupted() && computeNormals) { UTparallelFor(GA_SplittableRange(gdp->getPrimitiveRange()), hvdb::VertexNormalOp(*gdp)); } } if (boss.wasInterrupted()) { addWarning(SOP_MESSAGE, "Process was interrupted"); } boss.end(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } template<class GridType> void SOP_OpenVDB_To_Polygons::Cache::referenceMeshing( std::list<openvdb::GridBase::ConstPtr>& grids, openvdb::tools::VolumeToMesh& mesher, const GU_Detail* refGeo, hvdb::Interrupter& boss, const fpreal time) { if (refGeo == nullptr) return; const bool usePolygonSoup = evalInt("geometrytype", 0, time) == 0; const bool computeNormals = !usePolygonSoup && evalInt("computenormals", 0, time); const bool transferAttributes = evalInt("transferattributes", 0, time); const bool keepVdbName = evalInt("keepvdbname", 0, time); const bool sharpenFeatures = evalInt("sharpenfeatures", 0, time); const float edgetolerance = static_cast<float>(evalFloat("edgetolerance", 0, time)); using TreeType = typename GridType::TreeType; using ValueType = typename GridType::ValueType; // Get the first grid's transform and background value. openvdb::math::Transform::Ptr transform = grids.front()->transform().copy(); typename GridType::ConstPtr firstGrid = openvdb::gridConstPtrCast<GridType>(grids.front()); if (!firstGrid) { addError(SOP_MESSAGE, "Unsupported grid type."); return; } const ValueType backgroundValue = firstGrid->background(); const openvdb::GridClass gridClass = firstGrid->getGridClass(); typename GridType::ConstPtr refGrid; using IntGridT = typename GridType::template ValueConverter<openvdb::Int32>::Type; typename IntGridT::Ptr indexGrid; // replace openvdb::tools::MeshToVoxelEdgeData edgeData; # if 0 // Check for reference VDB { const GA_PrimitiveGroup *refGroup = matchGroup(*refGeo, ""); hvdb::VdbPrimCIterator refIt(refGeo, refGroup); if (refIt) { const openvdb::GridClass refClass = refIt->getGrid().getGridClass(); if (refIt && refClass == openvdb::GRID_LEVEL_SET) { refGrid = openvdb::gridConstPtrCast<GridType>(refIt->getGridPtr()); } } } #endif // Check for reference mesh UT_UniquePtr<GU_Detail> geoPtr; if (!refGrid) { std::string warningStr; geoPtr = hvdb::convertGeometry(*refGeo, warningStr, &boss); if (geoPtr) { refGeo = geoPtr.get(); if (!warningStr.empty()) addWarning(SOP_MESSAGE, warningStr.c_str()); } std::vector<openvdb::Vec3s> pointList; std::vector<openvdb::Vec4I> primList; pointList.resize(refGeo->getNumPoints()); primList.resize(refGeo->getNumPrimitives()); UTparallelFor(GA_SplittableRange(refGeo->getPointRange()), hvdb::TransformOp(refGeo, *transform, pointList)); UTparallelFor(GA_SplittableRange(refGeo->getPrimitiveRange()), hvdb::PrimCpyOp(refGeo, primList)); if (boss.wasInterrupted()) return; openvdb::tools::QuadAndTriangleDataAdapter<openvdb::Vec3s, openvdb::Vec4I> mesh(pointList, primList); float bandWidth = 3.0; if (gridClass != openvdb::GRID_LEVEL_SET) { bandWidth = float(backgroundValue) / float(transform->voxelSize()[0]); } indexGrid.reset(new IntGridT(0)); refGrid = openvdb::tools::meshToVolume<GridType>(boss, mesh, *transform, bandWidth, bandWidth, 0, indexGrid.get()); if (sharpenFeatures) edgeData.convert(pointList, primList); } if (boss.wasInterrupted()) return; using BoolTreeType = typename TreeType::template ValueConverter<bool>::Type; typename BoolTreeType::Ptr maskTree; if (sharpenFeatures) { maskTree = typename BoolTreeType::Ptr(new BoolTreeType(false)); maskTree->topologyUnion(indexGrid->tree()); openvdb::tree::LeafManager<BoolTreeType> maskLeafs(*maskTree); hvdb::GenAdaptivityMaskOp<typename IntGridT::TreeType, BoolTreeType> op(*refGeo, indexGrid->tree(), maskLeafs, edgetolerance); op.run(); openvdb::tools::pruneInactive(*maskTree); openvdb::tools::dilateVoxels(*maskTree, 2); mesher.setAdaptivityMask(maskTree); } if (boss.wasInterrupted()) return; const double iadaptivity = double(evalFloat("internaladaptivity", 0, time)); mesher.setRefGrid(refGrid, iadaptivity); std::vector<std::string> badTransformList, badBackgroundList, badTypeList; GA_PrimitiveGroup *surfaceGroup = nullptr, *interiorGroup = nullptr, *seamGroup = nullptr; GA_PointGroup* seamPointGroup = nullptr; { UT_String newGroupStr; evalString(newGroupStr, "surfacegroup", 0, time); if(newGroupStr.length() > 0) { surfaceGroup = gdp->findPrimitiveGroup(newGroupStr); if (!surfaceGroup) surfaceGroup = gdp->newPrimitiveGroup(newGroupStr); } evalString(newGroupStr, "interiorgroup", 0, time); if(newGroupStr.length() > 0) { interiorGroup = gdp->findPrimitiveGroup(newGroupStr); if (!interiorGroup) interiorGroup = gdp->newPrimitiveGroup(newGroupStr); } evalString(newGroupStr, "seamlinegroup", 0, time); if(newGroupStr.length() > 0) { seamGroup = gdp->findPrimitiveGroup(newGroupStr); if (!seamGroup) seamGroup = gdp->newPrimitiveGroup(newGroupStr); } evalString(newGroupStr, "seampoints", 0, time); if(newGroupStr.length() > 0) { seamPointGroup = gdp->findPointGroup(newGroupStr); if (!seamPointGroup) seamPointGroup = gdp->newPointGroup(newGroupStr); } } for (auto it = grids.begin(); it != grids.end(); ++it) { if (boss.wasInterrupted()) break; typename GridType::ConstPtr grid = openvdb::gridConstPtrCast<GridType>(*it); if (!grid) { badTypeList.push_back(grid->getName()); continue; } if (grid->transform() != *transform) { badTransformList.push_back(grid->getName()); continue; } if (!openvdb::math::isApproxEqual(grid->background(), backgroundValue)) { badBackgroundList.push_back(grid->getName()); continue; } mesher(*grid); copyMesh(*gdp, mesher, boss, usePolygonSoup, keepVdbName ? grid->getName().c_str() : nullptr, surfaceGroup, interiorGroup, seamGroup, seamPointGroup); } grids.clear(); // Sharpen Features if (!boss.wasInterrupted() && sharpenFeatures) { UTparallelFor(GA_SplittableRange(gdp->getPointRange()), hvdb::SharpenFeaturesOp( *gdp, *refGeo, edgeData, *transform, surfaceGroup, maskTree.get())); } // Compute vertex normals if (!boss.wasInterrupted() && computeNormals) { UTparallelFor(GA_SplittableRange(gdp->getPrimitiveRange()), hvdb::VertexNormalOp(*gdp, interiorGroup, (transferAttributes ? -1.0f : 0.7f) )); if (!interiorGroup) { addWarning(SOP_MESSAGE, "More accurate vertex normals can be generated " "if the interior polygon group is enabled."); } } // Transfer primitive attributes if (!boss.wasInterrupted() && transferAttributes && refGeo && indexGrid) { hvdb::transferPrimitiveAttributes(*refGeo, *gdp, *indexGrid, boss, surfaceGroup); } if (!badTransformList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(badTransformList, ", ") + "' because they don't match the transform of the first grid."; addWarning(SOP_MESSAGE, s.c_str()); } if (!badBackgroundList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(badBackgroundList, ", ") + "' because they don't match the background value of the first grid."; addWarning(SOP_MESSAGE, s.c_str()); } if (!badTypeList.empty()) { std::string s = "The following grids were skipped: '" + hboost::algorithm::join(badTypeList, ", ") + "' because they don't have the same data type as the first grid."; addWarning(SOP_MESSAGE, s.c_str()); } }
35,996
C++
36.263975
99
0.613679
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Combine.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Combine.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/math/Math.h> // for isFinite() #include <openvdb/tools/ChangeBackground.h> #include <openvdb/tools/Composite.h> #include <openvdb/tools/GridTransformer.h> // for resampleToMatch() #include <openvdb/tools/LevelSetRebuild.h> // for levelSetRebuild() #include <openvdb/tools/Morphology.h> // for deactivate() #include <openvdb/tools/Prune.h> #include <openvdb/tools/SignedFloodFill.h> #include <openvdb/util/NullInterrupter.h> #include <PRM/PRM_Parm.h> #include <UT/UT_Interrupt.h> #include <algorithm> // for std::min() #include <cctype> // for isspace() #include <iomanip> #include <set> #include <sstream> #include <stdexcept> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { // // Operations // enum Operation { OP_COPY_A, // A OP_COPY_B, // B OP_INVERT, // 1 - A OP_ADD, // A + B OP_SUBTRACT, // A - B OP_MULTIPLY, // A * B OP_DIVIDE, // A / B OP_MAXIMUM, // max(A, B) OP_MINIMUM, // min(A, B) OP_BLEND1, // (1 - A) * B OP_BLEND2, // A + (1 - A) * B OP_UNION, // CSG A u B OP_INTERSECTION, // CSG A n B OP_DIFFERENCE, // CSG A / B OP_REPLACE, // replace A with B OP_TOPO_UNION, // A u active(B) OP_TOPO_INTERSECTION, // A n active(B) OP_TOPO_DIFFERENCE // A / active(B) }; enum { OP_FIRST = OP_COPY_A, OP_LAST = OP_TOPO_DIFFERENCE }; //#define TIMES " \xd7 " // ISO-8859 multiplication symbol #define TIMES " * " const char* const sOpMenuItems[] = { "copya", "Copy A", "copyb", "Copy B", "inverta", "Invert A", "add", "Add", "subtract", "Subtract", "multiply", "Multiply", "divide", "Divide", "maximum", "Maximum", "minimum", "Minimum", "compatimesb", "(1 - A)" TIMES "B", "apluscompatimesb", "A + (1 - A)" TIMES "B", "sdfunion", "SDF Union", "sdfintersect", "SDF Intersection", "sdfdifference", "SDF Difference", "replacewithactive", "Replace A with Active B", "topounion", "Activity Union", "topointersect", "Activity Intersection", "topodifference", "Activity Difference", nullptr }; #undef TIMES inline Operation asOp(int i, Operation defaultOp = OP_COPY_A) { return (i >= OP_FIRST && i <= OP_LAST) ? static_cast<Operation>(i) : defaultOp; } inline bool needAGrid(Operation op) { return (op != OP_COPY_B); } inline bool needBGrid(Operation op) { return (op != OP_COPY_A && op != OP_INVERT); } inline bool needLevelSets(Operation op) { return (op == OP_UNION || op == OP_INTERSECTION || op == OP_DIFFERENCE); } // // Resampling options // enum ResampleMode { RESAMPLE_OFF, // don't auto-resample grids RESAMPLE_B, // resample B to match A RESAMPLE_A, // resample A to match B RESAMPLE_HI_RES, // resample higher-res grid to match lower-res RESAMPLE_LO_RES // resample lower-res grid to match higher-res }; enum { RESAMPLE_MODE_FIRST = RESAMPLE_OFF, RESAMPLE_MODE_LAST = RESAMPLE_LO_RES }; const char* const sResampleModeMenuItems[] = { "off", "Off", "btoa", "B to Match A", "atob", "A to Match B", "hitolo", "Higher-res to Match Lower-res", "lotohi", "Lower-res to Match Higher-res", nullptr }; inline ResampleMode asResampleMode(exint i, ResampleMode defaultMode = RESAMPLE_B) { return (i >= RESAMPLE_MODE_FIRST && i <= RESAMPLE_MODE_LAST) ? static_cast<ResampleMode>(i) : defaultMode; } // // Collation options // enum CollationMode { COLL_PAIRS = 0, COLL_A_WITH_1ST_B, COLL_FLATTEN_A, COLL_FLATTEN_B_TO_A, COLL_FLATTEN_A_GROUPS }; inline CollationMode asCollation(const std::string& str) { if (str == "pairs") return COLL_PAIRS; if (str == "awithfirstb") return COLL_A_WITH_1ST_B; if (str == "flattena") return COLL_FLATTEN_A; if (str == "flattenbtoa") return COLL_FLATTEN_B_TO_A; if (str == "flattenagroups") return COLL_FLATTEN_A_GROUPS; throw std::runtime_error{"invalid collation mode \"" + str + "\""}; } } // anonymous namespace /// @brief SOP to combine two VDB grids via various arithmetic operations class SOP_OpenVDB_Combine: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Combine(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Combine() override {} static OP_Node* factory(OP_Network*, const char*, OP_Operator*); class Cache: public SOP_VDBCacheOptions { public: fpreal getTime() const { return mTime; } protected: OP_ERROR cookVDBSop(OP_Context&) override; private: hvdb::GridPtr combineGrids(Operation, hvdb::GridCPtr aGrid, hvdb::GridCPtr bGrid, const UT_String& aGridName, const UT_String& bGridName, ResampleMode resample); fpreal mTime = 0.0; }; // class Cache protected: bool updateParmsFlags() override; void resolveObsoleteParms(PRM_ParmList*) override; private: template<typename> struct DispatchOp; struct CombineOp; }; //////////////////////////////////////// void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Group A parms.add(hutil::ParmFactory(PRM_STRING, "agroup", "Group A") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Use a subset of the first input as the A VDB(s).") .setDocumentation( "The VDBs to be used from the first input" " (see [specifying volumes|/model/volumes#group])")); // Group B parms.add(hutil::ParmFactory(PRM_STRING, "bgroup", "Group B") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip("Use a subset of the second input as the B VDB(s).") .setDocumentation( "The VDBs to be used from the second input" " (see [specifying volumes|/model/volumes#group])")); parms.add(hutil::ParmFactory(PRM_STRING, "collation", "Collation") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "pairs", "Combine A/B Pairs", "awithfirstb", "Combine Each A With First B", "flattena", "Flatten All A", "flattenbtoa", "Flatten All B Into First A", "flattenagroups", "Flatten A Groups" }) .setDefault("pairs") .setTooltip("Specify the order in which to combine VDBs from the A and/or B groups.") .setDocumentation("\ The order in which to combine VDBs from the _A_ and/or _B_ groups\n\ \n\ Combine _A_/_B_ Pairs:\n\ Combine pairs of _A_ and _B_ VDBs, in the order in which they appear\n\ in their respective groups.\n\ Combine Each _A_ With First _B_:\n\ Combine each _A_ VDB with the first _B_ VDB.\n\ Flatten All _A_:\n\ Collapse all of the _A_ VDBs into a single output VDB.\n\ Flatten All _B_ Into First _A_:\n\ Accumulate each _B_ VDB into the first _A_ VDB, producing a single output VDB.\n\ Flatten _A_ Groups:\n\ Collapse VDBs within each _A_ group, producing one output VDB for each group.\n\ \n\ Space-separated group patterns are treated as distinct groups in this mode.\n\ For example, \"`@name=x* @name=y*`\" results in two output VDBs\n\ (provided that there is at least one _A_ VDB whose name starts with `x`\n\ and at least one whose name starts with `y`).\n\ ")); // Menu of available operations parms.add(hutil::ParmFactory(PRM_ORD, "operation", "Operation") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, sOpMenuItems) .setDocumentation("\ Each voxel that is active in either of the input VDBs\n\ will be processed with this operation.\n\ \n\ Copy _A_:\n\ Use _A_, ignore _B_.\n\ \n\ Copy _B_:\n\ Use _B_, ignore _A_.\n\ \n\ Invert _A_:\n\ Use 0 &minus; _A_.\n\ \n\ Add:\n\ Add the values of _A_ and _B_.\n\ \n\ NOTE:\n\ Using this for fog volumes, which have density values between 0 and 1,\n\ will push densities over 1 and cause a bright interface between the\n\ input volumes when rendered. To avoid this problem, try using the\n\ _A_&nbsp;+&nbsp;(1&nbsp;&minus;&nbsp;_A_)&nbsp;&times;&nbsp;_B_\n\ operation.\n\ \n\ Subtract:\n\ Subtract the values of _B_ from the values of _A_.\n\ \n\ Multiply:\n\ Multiply the values of _A_ and _B_.\n\ \n\ Divide:\n\ Divide the values of _A_ by _B_.\n\ \n\ Maximum:\n\ Use the maximum of each corresponding value from _A_ and _B_.\n\ \n\ NOTE:\n\ Using this for fog volumes, which have density values between 0 and 1,\n\ can produce a dark interface between the inputs when rendered, due to\n\ the binary nature of choosing a value from either from _A_ or _B_.\n\ To avoid this problem, try using the\n\ (1&nbsp;&minus;&nbsp;_A_)&nbsp;&times;&nbsp;_B_ operation.\n\ \n\ Minimum:\n\ Use the minimum of each corresponding value from _A_ and _B_.\n\ \n\ (1&nbsp;&minus;&nbsp;_A_)&nbsp;&times;&nbsp;_B_:\n\ This is similar to SDF Difference, except for fog volumes,\n\ and can also be viewed as \"soft cutout\" operation.\n\ It is typically used to clear out an area around characters\n\ in a dust simulation or some other environmental volume.\n\ \n\ _A_&nbsp;+&nbsp;(1&nbsp;&minus;&nbsp;_A_)&nbsp;&times;&nbsp;_B_:\n\ This is similar to SDF Union, except for fog volumes, and\n\ can also be viewed as a \"soft union\" or \"merge\" operation.\n\ Consider using this over the Maximum or Add operations\n\ for fog volumes.\n\ \n\ SDF Union:\n\ Generate the union of signed distance fields _A_ and _B_.\n\ \n\ SDF Intersection:\n\ Generate the intersection of signed distance fields _A_ and _B_.\n\ \n\ SDF Difference:\n\ Remove signed distance field _B_ from signed distance field _A_.\n\ \n\ Replace _A_ with Active _B_:\n\ Copy the active voxels of _B_ into _A_.\n\ \n\ Activity Union:\n\ Make voxels active if they are active in either _A_ or _B_.\n\ \n\ Activity Intersection:\n\ Make voxels active if they are active in both _A_ and _B_.\n\ \n\ It is recommended to enable pruning when using this operation.\n\ \n\ Activity Difference:\n\ Make voxels active if they are active in _A_ but not in _B_.\n\ \n\ It is recommended to enable pruning when using this operation.\n")); // Scalar multiplier on the A grid parms.add(hutil::ParmFactory(PRM_FLT_J, "amult", "A Multiplier") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, -10, PRM_RANGE_UI, 10) .setTooltip( "Multiply voxel values in the A VDB by a scalar\n" "before combining the A VDB with the B VDB.")); // Scalar multiplier on the B grid parms.add(hutil::ParmFactory(PRM_FLT_J, "bmult", "B Multiplier") .setDefault(PRMoneDefaults) .setRange(PRM_RANGE_UI, -10, PRM_RANGE_UI, 10) .setTooltip( "Multiply voxel values in the B VDB by a scalar\n" "before combining the A VDB with the B VDB.")); // Menu of resampling options parms.add(hutil::ParmFactory(PRM_ORD, "resample", "Resample") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, sResampleModeMenuItems) .setTooltip( "If the A and B VDBs have different transforms, one VDB should\n" "be resampled to match the other before the two are combined.\n" "Also, level set VDBs should have matching background values\n" "(i.e., matching narrow band widths).")); // Menu of resampling interpolation order options parms.add(hutil::ParmFactory(PRM_ORD, "resampleinterp", "Interpolation") .setDefault(PRMoneDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "point", "Nearest", "linear", "Linear", "quadratic", "Quadratic" }) .setTooltip( "Specify the type of interpolation to be used when\n" "resampling one VDB to match the other's transform.") .setDocumentation( "The type of interpolation to be used when resampling one VDB" " to match the other's transform\n\n" "Nearest neighbor interpolation is fast but can introduce noticeable" " sampling artifacts. Quadratic interpolation is slow but high-quality." " Linear interpolation is intermediate in speed and quality.")); // Deactivate background value toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "deactivate", "Deactivate Background Voxels") .setDefault(PRMzeroDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setDocumentation( "Deactivate active output voxels whose values equal" " the output VDB's background value.")); // Deactivation tolerance slider parms.add(hutil::ParmFactory(PRM_FLT_J, "bgtolerance", "Deactivate Tolerance") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1) .setTooltip( "Deactivate active output voxels whose values\n" "equal the output VDB's background value.\n" "Voxel values are considered equal if they differ\n" "by less than the specified tolerance.") .setDocumentation( "When deactivation of background voxels is enabled," " voxel values are considered equal to the background" " if they differ by less than this tolerance.")); // Prune toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "prune", "Prune") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setDocumentation( "Reduce the memory footprint of output VDBs that have" " (sufficiently large) regions of voxels with the same value.\n\n" "NOTE:\n" " Pruning affects only the memory usage of a VDB.\n" " It does not remove voxels, apart from inactive voxels\n" " whose value is equal to the background.")); // Pruning tolerance slider parms.add(hutil::ParmFactory(PRM_FLT_J, "tolerance", "Prune Tolerance") .setDefault(PRMzeroDefaults) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 1) .setTooltip( "Collapse regions of constant value in output VDBs.\n" "Voxel values are considered equal if they differ\n" "by less than the specified tolerance.") .setDocumentation( "When pruning is enabled, voxel values are considered equal" " if they differ by less than the specified tolerance.")); // Flood fill toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "flood", "Signed-Flood-Fill Output SDFs") .setDefault(PRMzeroDefaults) .setTooltip( "Reclassify inactive voxels of level set VDBs as either inside or outside.") .setDocumentation( "Test inactive voxels to determine if they are inside or outside of an SDF" " and hence whether they should have negative or positive sign.")); // Obsolete parameters hutil::ParmList obsoleteParms; obsoleteParms.add(hutil::ParmFactory(PRM_ORD, "combination", "Operation") .setDefault(-2)); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep1", "")); obsoleteParms.add(hutil::ParmFactory(PRM_SEPARATOR, "sep2", "")); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "flatten", "Flatten All B into A") .setDefault(PRMzeroDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_TOGGLE, "pairs", "Combine A/B Pairs") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "groupA", "Group A")); obsoleteParms.add(hutil::ParmFactory(PRM_STRING, "groupB", "Group B")); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "mult_a", "A Multiplier") .setDefault(PRMoneDefaults)); obsoleteParms.add(hutil::ParmFactory(PRM_FLT_J, "mult_b", "B Multiplier") .setDefault(PRMoneDefaults)); // Register SOP hvdb::OpenVDBOpFactory("VDB Combine", SOP_OpenVDB_Combine::factory, parms, *table) .addInput("A VDBs") .addOptionalInput("B VDBs") .setObsoleteParms(obsoleteParms) .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Combine::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Combine the values of VDB volumes in various ways.\"\"\"\n\ \n\ @related\n\ \n\ - [Node:sop/vdbcombine]\n\ - [Node:sop/volumevop]\n\ - [Node:sop/volumemix]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } //////////////////////////////////////// OP_Node* SOP_OpenVDB_Combine::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Combine(net, name, op); } SOP_OpenVDB_Combine::SOP_OpenVDB_Combine(OP_Network* net, const char* name, OP_Operator* op) : SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// void SOP_OpenVDB_Combine::resolveObsoleteParms(PRM_ParmList* obsoleteParms) { if (!obsoleteParms) return; const fpreal time = 0.0; if (PRM_Parm* parm = obsoleteParms->getParmPtr("combination")) { if (!parm->isFactoryDefault()) { // The "combination" choices (union, intersection, difference) from // the old CSG SOP were appended to this SOP's "operation" list. switch (obsoleteParms->evalInt("combination", 0, time)) { case 0: setInt("operation", 0, 0.0, OP_UNION); break; case 1: setInt("operation", 0, 0.0, OP_INTERSECTION); break; case 2: setInt("operation", 0, 0.0, OP_DIFFERENCE); break; } } } { PRM_Parm *flatten = obsoleteParms->getParmPtr("flatten"), *pairs = obsoleteParms->getParmPtr("pairs"); if (flatten && !flatten->isFactoryDefault()) { // factory default was Off setString("flattenbtoa", CH_STRING_LITERAL, "collation", 0, time); } else if (pairs && !pairs->isFactoryDefault()) { // factory default was On setString("awithfirstb", CH_STRING_LITERAL, "collation", 0, time); } } resolveRenamedParm(*obsoleteParms, "groupA", "agroup"); resolveRenamedParm(*obsoleteParms, "groupB", "bgroup"); resolveRenamedParm(*obsoleteParms, "mult_a", "amult"); resolveRenamedParm(*obsoleteParms, "mult_b", "bmult"); // Delegate to the base class. hvdb::SOP_NodeVDB::resolveObsoleteParms(obsoleteParms); } // Enable or disable parameters in the UI. bool SOP_OpenVDB_Combine::updateParmsFlags() { bool changed = false; changed |= enableParm("resampleinterp", evalInt("resample", 0, 0) != 0); changed |= enableParm("bgtolerance", evalInt("deactivate", 0, 0) != 0); changed |= enableParm("tolerance", evalInt("prune", 0, 0) != 0); return changed; } //////////////////////////////////////// namespace { using StringVec = std::vector<std::string>; // Split a string into group patterns separated by whitespace. // For example, given '@name=d* @id="1 2" {grp1 grp2}', return // ['@name=d*', '@id="1 2"', '{grp1 grp2}']. // (This is nonstandard. Normally, multiple patterns are unioned // to define a single group.) // Nesting of quotes and braces is not supported. inline StringVec splitPatterns(const std::string& str) { StringVec patterns; bool quoted = false, braced = false; std::string pattern; for (const auto c: str) { if (isspace(c)) { if (pattern.empty()) continue; // skip whitespace between patterns if (quoted || braced) { pattern.push_back(c); // keep whitespace within quotes or braces } else { // At the end of a pattern. Start a new pattern. patterns.push_back(pattern); pattern.clear(); quoted = braced = false; } } else { switch (c) { case '"': quoted = !quoted; break; case '{': braced = true; break; case '}': braced = false; break; default: break; } pattern.push_back(c); } } if (!pattern.empty()) { patterns.push_back(pattern); } // add the final pattern // If no patterns were found, add an empty pattern, which matches everything. if (patterns.empty()) { patterns.push_back(""); } return patterns; } inline UT_String getGridName(const GU_PrimVDB* vdb, const UT_String& defaultName = "") { UT_String name{UT_String::ALWAYS_DEEP}; if (vdb != nullptr) { name = vdb->getGridName(); if (!name.isstring()) name = defaultName; } return name; } } // anonymous namespace OP_ERROR SOP_OpenVDB_Combine::Cache::cookVDBSop(OP_Context& context) { try { UT_AutoInterrupt progress{"Combining VDBs"}; mTime = context.getTime(); const Operation op = asOp(static_cast<int>(evalInt("operation", 0, getTime()))); const ResampleMode resample = asResampleMode(evalInt("resample", 0, getTime())); const CollationMode collation = asCollation(evalStdString("collation", getTime())); const bool flattenA = ((collation == COLL_FLATTEN_A) || (collation == COLL_FLATTEN_A_GROUPS)), flatten = (flattenA || (collation == COLL_FLATTEN_B_TO_A)), needA = needAGrid(op), needB = (needBGrid(op) && !flattenA); GU_Detail* aGdp = gdp; const GU_Detail* bGdp = inputGeo(1, context); const auto aGroupStr = evalStdString("agroup", getTime()); const auto bGroupStr = evalStdString("bgroup", getTime()); const auto* bGroup = (!bGdp ? nullptr : matchGroup(*bGdp, bGroupStr)); // In Flatten A Groups mode, treat space-separated subpatterns // as specifying distinct groups to be processed independently. // (In all other modes, subpatterns are unioned into a single group.) std::vector<const GA_PrimitiveGroup*> aGroupVec; if (collation != COLL_FLATTEN_A_GROUPS) { aGroupVec.push_back(matchGroup(*aGdp, aGroupStr)); } else { for (const auto& pattern: splitPatterns(aGroupStr)) { aGroupVec.push_back(matchGroup(*aGdp, pattern)); } } // For diagnostic purposes, keep track of whether any input grids are left unused. bool unusedA = false, unusedB = false; // Iterate over one or more A groups. for (const auto* aGroup: aGroupVec) { hvdb::VdbPrimIterator aIt{aGdp, GA_Range::safedeletions{}, aGroup}; hvdb::VdbPrimCIterator bIt{bGdp, bGroup}; // Populate two vectors of primitives, one comprising the A grids // and the other the B grids. (In the case of flattening operations, // these grids might be taken from the same input.) // Note: the following relies on exhausted iterators returning nullptr // and on incrementing an exhausted iterator being a no-op. std::vector<GU_PrimVDB*> aVdbVec; std::vector<const GU_PrimVDB*> bVdbVec; switch (collation) { case COLL_PAIRS: for ( ; (!needA || aIt) && (!needB || bIt); ++aIt, ++bIt) { aVdbVec.push_back(*aIt); bVdbVec.push_back(*bIt); } unusedA = unusedA || (needA && bool(aIt)); unusedB = unusedB || (needB && bool(bIt)); break; case COLL_A_WITH_1ST_B: for ( ; aIt && (!needB || bIt); ++aIt) { aVdbVec.push_back(*aIt); bVdbVec.push_back(*bIt); } break; case COLL_FLATTEN_B_TO_A: if (*bIt) { aVdbVec.push_back(*aIt); bVdbVec.push_back(*bIt); } for (++bIt; bIt; ++bIt) { aVdbVec.push_back(nullptr); bVdbVec.push_back(*bIt); } break; case COLL_FLATTEN_A: case COLL_FLATTEN_A_GROUPS: aVdbVec.push_back(*aIt); for (++aIt; aIt; ++aIt) { bVdbVec.push_back(*aIt); } break; } if ((needA && aVdbVec.empty()) || (needB && bVdbVec.empty())) continue; std::set<GU_PrimVDB*> vdbsToRemove; // Combine grids. if (!flatten) { // Iterate over A and, optionally, B grids. for (size_t i = 0, N = std::min(aVdbVec.size(), bVdbVec.size()); i < N; ++i) { if (progress.wasInterrupted()) { throw std::runtime_error{"interrupted"}; } // Note: even if needA is false, we still need to delete A grids. GU_PrimVDB* aVdb = aVdbVec[i]; const GU_PrimVDB* bVdb = bVdbVec[i]; hvdb::GridPtr aGrid; hvdb::GridCPtr bGrid; if (aVdb) aGrid = aVdb->getGridPtr(); if (bVdb) bGrid = bVdb->getConstGridPtr(); // For error reporting, get the names of the A and B grids. const UT_String aGridName = getGridName(aVdb, /*default=*/"A"), bGridName = getGridName(bVdb, /*default=*/"B"); if (hvdb::GridPtr outGrid = combineGrids(op, aGrid, bGrid, aGridName, bGridName, resample)) { // Name the output grid after the A grid if the A grid is used, // or after the B grid otherwise. UT_String outGridName = needA ? getGridName(aVdb) : getGridName(bVdb); // Add a new VDB primitive for the output grid to the output gdp. GU_PrimVDB::buildFromGrid(*gdp, outGrid, /*copyAttrsFrom=*/needA ? aVdb : bVdb, outGridName); vdbsToRemove.insert(aVdb); } } // Flatten grids (i.e., combine all B grids into the first A grid). } else { GU_PrimVDB* aVdb = aVdbVec[0]; hvdb::GridPtr aGrid; if (aVdb) aGrid = aVdb->getGridPtr(); hvdb::GridPtr outGrid; UT_String outGridName; // Iterate over B grids. const GU_PrimVDB* bVdb = nullptr; for (const GU_PrimVDB* theBVdb: bVdbVec) { if (progress.wasInterrupted()) { throw std::runtime_error{"interrupted"}; } bVdb = theBVdb; hvdb::GridCPtr bGrid; if (bVdb) { bGrid = bVdb->getConstGridPtr(); if (flattenA) { // When flattening within the A group, remove B grids, // since they're actually copies of grids from input 0. vdbsToRemove.insert(const_cast<GU_PrimVDB*>(bVdb)); } } const UT_String aGridName = getGridName(aVdb, /*default=*/"A"), bGridName = getGridName(bVdb, /*default=*/"B"); // Name the output grid after the A grid if the A grid is used, // or after the B grid otherwise. outGridName = (needA ? getGridName(aVdb) : getGridName(bVdb)); outGrid = combineGrids(op, aGrid, bGrid, aGridName, bGridName, resample); aGrid = outGrid; } if (outGrid) { // Add a new VDB primitive for the output grid to the output gdp. GU_PrimVDB::buildFromGrid(*gdp, outGrid, /*copyAttrsFrom=*/needA ? aVdb : bVdb, outGridName); vdbsToRemove.insert(aVdb); } } // Remove primitives that were copied from input 0. for (GU_PrimVDB* vdb: vdbsToRemove) { if (vdb) gdp->destroyPrimitive(*vdb, /*andPoints=*/true); } } // for each A group if (unusedA || unusedB) { std::ostringstream ostr; ostr << "some grids were not processed because there were more " << (unusedA ? "A" : "B") << " grids than " << (unusedA ? "B" : "A") << " grids"; addWarning(SOP_MESSAGE, ostr.str().c_str()); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); } //////////////////////////////////////// namespace { /// Functor to compute scale * grid + offset, for scalars scale and offset template<typename GridT> struct MulAdd { using ValueT = typename GridT::ValueType; using GridPtrT = typename GridT::Ptr; float scale, offset; explicit MulAdd(float s, float t = 0.0): scale(s), offset(t) {} void operator()(const ValueT& a, const ValueT&, ValueT& out) const { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN out = ValueT(a * scale + offset); OPENVDB_NO_TYPE_CONVERSION_WARNING_END } /// @return true if the scale is 1 and the offset is 0 bool isIdentity() const { return (openvdb::math::isApproxEqual(scale, 1.f, 1.0e-6f) && openvdb::math::isApproxEqual(offset, 0.f, 1.0e-6f)); } /// Compute dest = src * scale + offset void process(const GridT& src, GridPtrT& dest) const { if (isIdentity()) { dest = src.deepCopy(); } else { if (!dest) dest = GridT::create(src); // same transform, new tree ValueT bg; (*this)(src.background(), ValueT(), bg); openvdb::tools::changeBackground(dest->tree(), bg); dest->tree().combine2(src.tree(), src.tree(), *this, /*prune=*/false); } } }; //////////////////////////////////////// /// Functor to compute (1 - A) * B for grids A and B template<typename ValueT> struct Blend1 { float aMult, bMult; const ValueT ONE; explicit Blend1(float a = 1.0, float b = 1.0): aMult(a), bMult(b), ONE(openvdb::zeroVal<ValueT>() + 1) {} void operator()(const ValueT& a, const ValueT& b, ValueT& out) const { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN out = ValueT((ONE - aMult * a) * bMult * b); OPENVDB_NO_TYPE_CONVERSION_WARNING_END } }; //////////////////////////////////////// /// Functor to compute A + (1 - A) * B for grids A and B template<typename ValueT> struct Blend2 { float aMult, bMult; const ValueT ONE; explicit Blend2(float a = 1.0, float b = 1.0): aMult(a), bMult(b), ONE(openvdb::zeroVal<ValueT>() + 1) {} void operator()(const ValueT& a, const ValueT& b, ValueT& out) const { OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN out = ValueT(a*aMult); out = out + ValueT((ONE - out) * bMult*b); OPENVDB_NO_TYPE_CONVERSION_WARNING_END } }; //////////////////////////////////////// // Helper class to compare both scalar and vector values template<typename ValueT> struct ApproxEq { const ValueT &a, &b; ApproxEq(const ValueT& _a, const ValueT& _b): a(_a), b(_b) {} operator bool() const { return openvdb::math::isRelOrApproxEqual( a, b, /*rel*/ValueT(1e-6f), /*abs*/ValueT(1e-8f)); } }; // Specialization for Vec2 template<typename T> struct ApproxEq<openvdb::math::Vec2<T> > { using VecT = openvdb::math::Vec2<T>; using ValueT = typename VecT::value_type; const VecT &a, &b; ApproxEq(const VecT& _a, const VecT& _b): a(_a), b(_b) {} operator bool() const { return a.eq(b, /*abs=*/ValueT(1e-8f)); } }; // Specialization for Vec3 template<typename T> struct ApproxEq<openvdb::math::Vec3<T> > { using VecT = openvdb::math::Vec3<T>; using ValueT = typename VecT::value_type; const VecT &a, &b; ApproxEq(const VecT& _a, const VecT& _b): a(_a), b(_b) {} operator bool() const { return a.eq(b, /*abs=*/ValueT(1e-8f)); } }; // Specialization for Vec4 template<typename T> struct ApproxEq<openvdb::math::Vec4<T> > { using VecT = openvdb::math::Vec4<T>; using ValueT = typename VecT::value_type; const VecT &a, &b; ApproxEq(const VecT& _a, const VecT& _b): a(_a), b(_b) {} operator bool() const { return a.eq(b, /*abs=*/ValueT(1e-8f)); } }; } // unnamed namespace //////////////////////////////////////// template<typename AGridT> struct SOP_OpenVDB_Combine::DispatchOp { SOP_OpenVDB_Combine::CombineOp* combineOp; DispatchOp(SOP_OpenVDB_Combine::CombineOp& op): combineOp(&op) {} template<typename BGridT> void operator()(const BGridT&); }; // struct DispatchOp // Helper class for use with GridBase::apply() struct SOP_OpenVDB_Combine::CombineOp { SOP_OpenVDB_Combine::Cache* self; Operation op; ResampleMode resample; UT_String aGridName, bGridName; hvdb::GridCPtr aBaseGrid, bBaseGrid; hvdb::GridPtr outGrid; hvdb::Interrupter interrupt; CombineOp(): self(nullptr) {} // Functor for use with GridBase::apply() to return // a scalar grid's background value as a floating-point quantity struct BackgroundOp { double value; BackgroundOp(): value(0.0) {} template<typename GridT> void operator()(const GridT& grid) { value = static_cast<double>(grid.background()); } }; static double getScalarBackgroundValue(const hvdb::Grid& baseGrid) { BackgroundOp bgOp; baseGrid.apply<hvdb::NumericGridTypes>(bgOp); return bgOp.value; } template<typename GridT> typename GridT::Ptr resampleToMatch(const GridT& src, const hvdb::Grid& ref, int order) { using ValueT = typename GridT::ValueType; const ValueT ZERO = openvdb::zeroVal<ValueT>(); const openvdb::math::Transform& refXform = ref.constTransform(); typename GridT::Ptr dest; if (src.getGridClass() == openvdb::GRID_LEVEL_SET) { // For level set grids, use the level set rebuild tool to both resample the // source grid to match the reference grid and to rebuild the resulting level set. const bool refIsLevelSet = ref.getGridClass() == openvdb::GRID_LEVEL_SET; OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueT halfWidth = refIsLevelSet ? ValueT(ZERO + this->getScalarBackgroundValue(ref) * (1.0 / ref.voxelSize()[0])) : ValueT(src.background() * (1.0 / src.voxelSize()[0])); OPENVDB_NO_TYPE_CONVERSION_WARNING_END if (!openvdb::math::isFinite(halfWidth)) { std::stringstream msg; msg << "Resample to match: Illegal narrow band width = " << halfWidth << ", caused by grid '" << src.getName() << "' with background " << this->getScalarBackgroundValue(ref); throw std::invalid_argument(msg.str()); } try { dest = openvdb::tools::doLevelSetRebuild(src, /*iso=*/ZERO, /*exWidth=*/halfWidth, /*inWidth=*/halfWidth, &refXform, &interrupt); } catch (openvdb::TypeError&) { self->addWarning(SOP_MESSAGE, ("skipped rebuild of level set grid " + src.getName() + " of type " + src.type()).c_str()); dest.reset(); } } if (!dest && src.constTransform() != refXform) { // For non-level set grids or if level set rebuild failed due to an unsupported // grid type, use the grid transformer tool to resample the source grid to match // the reference grid. dest = src.copyWithNewTree(); dest->setTransform(refXform.copy()); using namespace openvdb; switch (order) { case 0: tools::resampleToMatch<tools::PointSampler>(src, *dest, interrupt); break; case 1: tools::resampleToMatch<tools::BoxSampler>(src, *dest, interrupt); break; case 2: tools::resampleToMatch<tools::QuadraticSampler>(src, *dest, interrupt); break; } } return dest; } // If necessary, resample one grid so that its index space registers // with the other grid's. // Note that one of the grid pointers might change as a result. template<typename AGridT, typename BGridT> void resampleGrids(const AGridT*& aGrid, const BGridT*& bGrid) { if (!aGrid || !bGrid) return; const bool needA = needAGrid(op), needB = needBGrid(op), needBoth = needA && needB; const int samplingOrder = static_cast<int>( self->evalInt("resampleinterp", 0, self->getTime())); // One of RESAMPLE_A, RESAMPLE_B or RESAMPLE_OFF, specifying whether // grid A, grid B or neither grid was resampled int resampleWhich = RESAMPLE_OFF; // Determine which of the two grids should be resampled. if (resample == RESAMPLE_HI_RES || resample == RESAMPLE_LO_RES) { const openvdb::Vec3d aVoxSize = aGrid->voxelSize(), bVoxSize = bGrid->voxelSize(); const double aVoxVol = aVoxSize[0] * aVoxSize[1] * aVoxSize[2], bVoxVol = bVoxSize[0] * bVoxSize[1] * bVoxSize[2]; resampleWhich = ((aVoxVol > bVoxVol && resample == RESAMPLE_LO_RES) || (aVoxVol < bVoxVol && resample == RESAMPLE_HI_RES)) ? RESAMPLE_A : RESAMPLE_B; } else { resampleWhich = resample; } if (aGrid->constTransform() != bGrid->constTransform()) { // If the A and B grid transforms don't match, one of the grids // should be resampled into the other's index space. if (resample == RESAMPLE_OFF) { if (needBoth) { // Resampling is disabled. Just log a warning. std::ostringstream ostr; ostr << aGridName << " and " << bGridName << " transforms don't match"; self->addWarning(SOP_MESSAGE, ostr.str().c_str()); } } else { if (needA && resampleWhich == RESAMPLE_A) { // Resample grid A into grid B's index space. aBaseGrid = this->resampleToMatch(*aGrid, *bGrid, samplingOrder); aGrid = static_cast<const AGridT*>(aBaseGrid.get()); } else if (needB && resampleWhich == RESAMPLE_B) { // Resample grid B into grid A's index space. bBaseGrid = this->resampleToMatch(*bGrid, *aGrid, samplingOrder); bGrid = static_cast<const BGridT*>(bBaseGrid.get()); } } } if (aGrid->getGridClass() == openvdb::GRID_LEVEL_SET && bGrid->getGridClass() == openvdb::GRID_LEVEL_SET) { // If both grids are level sets, ensure that their background values match. // (If one of the grids was resampled, then the background values should // already match.) const double a = this->getScalarBackgroundValue(*aGrid), b = this->getScalarBackgroundValue(*bGrid); if (!ApproxEq<double>(a, b)) { if (resample == RESAMPLE_OFF) { if (needBoth) { // Resampling/rebuilding is disabled. Just log a warning. std::ostringstream ostr; ostr << aGridName << " and " << bGridName << " background values don't match (" << std::setprecision(3) << a << " vs. " << b << ");\n" << " the output grid will not be a valid level set"; self->addWarning(SOP_MESSAGE, ostr.str().c_str()); } } else { // One of the two grids needs a level set rebuild. if (needA && resampleWhich == RESAMPLE_A) { // Rebuild A to match B's background value. aBaseGrid = this->resampleToMatch(*aGrid, *bGrid, samplingOrder); aGrid = static_cast<const AGridT*>(aBaseGrid.get()); } else if (needB && resampleWhich == RESAMPLE_B) { // Rebuild B to match A's background value. bBaseGrid = this->resampleToMatch(*bGrid, *aGrid, samplingOrder); bGrid = static_cast<const BGridT*>(bBaseGrid.get()); } } } } } void checkVectorTypes(const hvdb::Grid* aGrid, const hvdb::Grid* bGrid) { if (!aGrid || !bGrid || !needAGrid(op) || !needBGrid(op)) return; switch (op) { case OP_TOPO_UNION: case OP_TOPO_INTERSECTION: case OP_TOPO_DIFFERENCE: // No need to warn about different vector types for topology-only operations. break; default: { const openvdb::VecType aVecType = aGrid->getVectorType(), bVecType = bGrid->getVectorType(); if (aVecType != bVecType) { std::ostringstream ostr; ostr << aGridName << " and " << bGridName << " have different vector types\n" << " (" << hvdb::Grid::vecTypeToString(aVecType) << " vs. " << hvdb::Grid::vecTypeToString(bVecType) << ")"; self->addWarning(SOP_MESSAGE, ostr.str().c_str()); } break; } } } template <typename GridT> void doUnion(GridT &result, GridT &temp) { openvdb::tools::csgUnion(result, temp); } template <typename GridT> void doIntersection(GridT &result, GridT &temp) { openvdb::tools::csgIntersection(result, temp); } template <typename GridT> void doDifference(GridT &result, GridT &temp) { openvdb::tools::csgDifference(result, temp); } // Combine two grids of the same type. template<typename GridT> void combineSameType() { using ValueT = typename GridT::ValueType; const bool needA = needAGrid(op), needB = needBGrid(op); const float aMult = float(self->evalFloat("amult", 0, self->getTime())), bMult = float(self->evalFloat("bmult", 0, self->getTime())); const GridT *aGrid = nullptr, *bGrid = nullptr; if (aBaseGrid) aGrid = UTvdbGridCast<GridT>(aBaseGrid).get(); if (bBaseGrid) bGrid = UTvdbGridCast<GridT>(bBaseGrid).get(); if (needA && !aGrid) throw std::runtime_error("missing A grid"); if (needB && !bGrid) throw std::runtime_error("missing B grid"); // Warn if combining vector grids with different vector types. if (needA && needB && openvdb::VecTraits<ValueT>::IsVec) { this->checkVectorTypes(aGrid, bGrid); } // If necessary, resample one grid so that its index space // registers with the other grid's. if (aGrid && bGrid) this->resampleGrids(aGrid, bGrid); const ValueT ZERO = openvdb::zeroVal<ValueT>(); // A temporary grid is needed for binary operations, because they // cannibalize the B grid. typename GridT::Ptr resultGrid, tempGrid; switch (op) { case OP_COPY_A: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); break; case OP_COPY_B: MulAdd<GridT>(bMult).process(*bGrid, resultGrid); break; case OP_INVERT: MulAdd<GridT>(-aMult, 1.0).process(*aGrid, resultGrid); break; case OP_ADD: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compSum(*resultGrid, *tempGrid); break; case OP_SUBTRACT: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(-bMult).process(*bGrid, tempGrid); openvdb::tools::compSum(*resultGrid, *tempGrid); break; case OP_MULTIPLY: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compMul(*resultGrid, *tempGrid); break; case OP_DIVIDE: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compDiv(*resultGrid, *tempGrid); break; case OP_MAXIMUM: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compMax(*resultGrid, *tempGrid); break; case OP_MINIMUM: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compMin(*resultGrid, *tempGrid); break; case OP_BLEND1: // (1 - A) * B { const Blend1<ValueT> comp(aMult, bMult); ValueT bg; comp(aGrid->background(), ZERO, bg); resultGrid = aGrid->copyWithNewTree(); openvdb::tools::changeBackground(resultGrid->tree(), bg); resultGrid->tree().combine2(aGrid->tree(), bGrid->tree(), comp, /*prune=*/false); break; } case OP_BLEND2: // A + (1 - A) * B { const Blend2<ValueT> comp(aMult, bMult); ValueT bg; comp(aGrid->background(), ZERO, bg); resultGrid = aGrid->copyWithNewTree(); openvdb::tools::changeBackground(resultGrid->tree(), bg); resultGrid->tree().combine2(aGrid->tree(), bGrid->tree(), comp, /*prune=*/false); break; } case OP_UNION: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); doUnion(*resultGrid, *tempGrid); break; case OP_INTERSECTION: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); doIntersection(*resultGrid, *tempGrid); break; case OP_DIFFERENCE: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); doDifference(*resultGrid, *tempGrid); break; case OP_REPLACE: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); MulAdd<GridT>(bMult).process(*bGrid, tempGrid); openvdb::tools::compReplace(*resultGrid, *tempGrid); break; case OP_TOPO_UNION: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); // Note: no need to scale the B grid for topology-only operations. resultGrid->topologyUnion(*bGrid); break; case OP_TOPO_INTERSECTION: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); resultGrid->topologyIntersection(*bGrid); break; case OP_TOPO_DIFFERENCE: MulAdd<GridT>(aMult).process(*aGrid, resultGrid); resultGrid->topologyDifference(*bGrid); break; } outGrid = this->postprocess<GridT>(resultGrid); } // Combine two grids of different types. /// @todo Currently, only topology operations can be performed on grids of different types. template<typename AGridT, typename BGridT> void combineDifferentTypes() { const bool needA = needAGrid(op), needB = needBGrid(op); const AGridT* aGrid = nullptr; const BGridT* bGrid = nullptr; if (aBaseGrid) aGrid = UTvdbGridCast<AGridT>(aBaseGrid).get(); if (bBaseGrid) bGrid = UTvdbGridCast<BGridT>(bBaseGrid).get(); if (needA && !aGrid) throw std::runtime_error("missing A grid"); if (needB && !bGrid) throw std::runtime_error("missing B grid"); // Warn if combining vector grids with different vector types. if (needA && needB && openvdb::VecTraits<typename AGridT::ValueType>::IsVec && openvdb::VecTraits<typename BGridT::ValueType>::IsVec) { this->checkVectorTypes(aGrid, bGrid); } // If necessary, resample one grid so that its index space // registers with the other grid's. if (aGrid && bGrid) this->resampleGrids(aGrid, bGrid); const float aMult = float(self->evalFloat("amult", 0, self->getTime())); typename AGridT::Ptr resultGrid; switch (op) { case OP_TOPO_UNION: MulAdd<AGridT>(aMult).process(*aGrid, resultGrid); // Note: no need to scale the B grid for topology-only operations. resultGrid->topologyUnion(*bGrid); break; case OP_TOPO_INTERSECTION: MulAdd<AGridT>(aMult).process(*aGrid, resultGrid); resultGrid->topologyIntersection(*bGrid); break; case OP_TOPO_DIFFERENCE: MulAdd<AGridT>(aMult).process(*aGrid, resultGrid); resultGrid->topologyDifference(*bGrid); break; default: { std::ostringstream ostr; ostr << "can't combine grid " << aGridName << " of type " << aGrid->type() << "\n with grid " << bGridName << " of type " << bGrid->type(); throw std::runtime_error(ostr.str()); break; } } outGrid = this->postprocess<AGridT>(resultGrid); } template<typename GridT> typename GridT::Ptr postprocess(typename GridT::Ptr resultGrid) { using ValueT = typename GridT::ValueType; const ValueT ZERO = openvdb::zeroVal<ValueT>(); const bool prune = self->evalInt("prune", 0, self->getTime()), flood = self->evalInt("flood", 0, self->getTime()), deactivate = self->evalInt("deactivate", 0, self->getTime()); if (deactivate) { const float deactivationTolerance = float(self->evalFloat("bgtolerance", 0, self->getTime())); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueT tolerance(ZERO + deactivationTolerance); OPENVDB_NO_TYPE_CONVERSION_WARNING_END // Mark active output tiles and voxels as inactive if their // values match the output grid's background value. // Do this first to facilitate pruning. openvdb::tools::deactivate(*resultGrid, resultGrid->background(), tolerance); } if (flood && resultGrid->getGridClass() == openvdb::GRID_LEVEL_SET) { openvdb::tools::signedFloodFill(resultGrid->tree()); } if (prune) { const float pruneTolerance = float(self->evalFloat("tolerance", 0, self->getTime())); OPENVDB_NO_TYPE_CONVERSION_WARNING_BEGIN const ValueT tolerance(ZERO + pruneTolerance); OPENVDB_NO_TYPE_CONVERSION_WARNING_END openvdb::tools::prune(resultGrid->tree(), tolerance); } return resultGrid; } template<typename AGridT> void operator()(const AGridT&) { const bool needA = needAGrid(op), needB = needBGrid(op), needBoth = needA && needB; if (!needBoth || !aBaseGrid || !bBaseGrid || aBaseGrid->type() == bBaseGrid->type()) { this->combineSameType<AGridT>(); } else { DispatchOp<AGridT> dispatcher(*this); // Dispatch on the B grid's type. int success = bBaseGrid->apply<hvdb::VolumeGridTypes>(dispatcher); if (!success) { std::ostringstream ostr; ostr << "grid " << bGridName << " has unsupported type " << bBaseGrid->type(); self->addWarning(SOP_MESSAGE, ostr.str().c_str()); } } } }; // struct CombineOp template <> void SOP_OpenVDB_Combine::CombineOp::doUnion(openvdb::BoolGrid &result, openvdb::BoolGrid &temp) { } template <> void SOP_OpenVDB_Combine::CombineOp::doIntersection(openvdb::BoolGrid &result, openvdb::BoolGrid &temp) { } template <> void SOP_OpenVDB_Combine::CombineOp::doDifference(openvdb::BoolGrid &result, openvdb::BoolGrid &temp) { } template<typename AGridT> template<typename BGridT> void SOP_OpenVDB_Combine::DispatchOp<AGridT>::operator()(const BGridT&) { combineOp->combineDifferentTypes<AGridT, BGridT>(); } //////////////////////////////////////// hvdb::GridPtr SOP_OpenVDB_Combine::Cache::combineGrids( Operation op, hvdb::GridCPtr aGrid, hvdb::GridCPtr bGrid, const UT_String& aGridName, const UT_String& bGridName, ResampleMode resample) { hvdb::GridPtr outGrid; const bool needA = needAGrid(op), needB = needBGrid(op), needLS = needLevelSets(op); if (!needA && !needB) throw std::runtime_error("nothing to do"); if (needA && !aGrid) throw std::runtime_error("missing A grid"); if (needB && !bGrid) throw std::runtime_error("missing B grid"); if (needLS && ((aGrid && aGrid->getGridClass() != openvdb::GRID_LEVEL_SET) || (bGrid && bGrid->getGridClass() != openvdb::GRID_LEVEL_SET))) { std::ostringstream ostr; ostr << "expected level set grids for the " << sOpMenuItems[op*2+1] << " operation,\n found " << hvdb::Grid::gridClassToString(aGrid->getGridClass()) << " (" << aGridName << ") and " << hvdb::Grid::gridClassToString(bGrid->getGridClass()) << " (" << bGridName << ");\n the output grid will not be a valid level set"; addWarning(SOP_MESSAGE, ostr.str().c_str()); } if (needA && needB && aGrid->type() != bGrid->type() && op != OP_TOPO_UNION && op != OP_TOPO_INTERSECTION && op != OP_TOPO_DIFFERENCE) { std::ostringstream ostr; ostr << "can't combine grid " << aGridName << " of type " << aGrid->type() << "\n with grid " << bGridName << " of type " << bGrid->type(); addWarning(SOP_MESSAGE, ostr.str().c_str()); return outGrid; } CombineOp compOp; compOp.self = this; compOp.op = op; compOp.resample = resample; compOp.aBaseGrid = aGrid; compOp.bBaseGrid = bGrid; compOp.aGridName = aGridName; compOp.bGridName = bGridName; compOp.interrupt = hvdb::Interrupter(); int success = false; if (needA || UTvdbGetGridType(*aGrid) == UTvdbGetGridType(*bGrid)) { success = aGrid->apply<hvdb::VolumeGridTypes>(compOp); } if (!success || !compOp.outGrid) { std::ostringstream ostr; if (aGrid->type() == bGrid->type()) { ostr << "grids " << aGridName << " and " << bGridName << " have unsupported type " << aGrid->type(); } else { ostr << "grid " << (needA ? aGridName : bGridName) << " has unsupported type " << (needA ? aGrid->type() : bGrid->type()); } addWarning(SOP_MESSAGE, ostr.str().c_str()); } return compOp.outGrid; }
57,222
C++
36.085548
103
0.566758
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Remove_Divergence.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Remove_Divergence.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/math/ConjGradient.h> // for JacobiPreconditioner #include <openvdb/tools/GridOperators.h> #include <openvdb/tools/LevelSetUtil.h> // for tools::sdfInteriorMask() #include <openvdb/tools/PoissonSolver.h> #include <openvdb/tools/Prune.h> #include <UT/UT_Interrupt.h> #include <UT/UT_StringArray.h> #include <GU/GU_Detail.h> #include <PRM/PRM_Parm.h> #include <GA/GA_Handle.h> #include <GA/GA_PageIterator.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <sstream> #include <string> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; namespace { using ColliderMaskGrid = openvdb::BoolGrid; ///< @todo really should derive from velocity grid using ColliderBBox = openvdb::BBoxd; using Coord = openvdb::Coord; enum ColliderType { CT_NONE, CT_BBOX, CT_STATIC, CT_DYNAMIC }; const int DEFAULT_MAX_ITERATIONS = 10000; const double DEFAULT_MAX_ERROR = 1.0e-20; } //////////////////////////////////////// struct SOP_OpenVDB_Remove_Divergence: public hvdb::SOP_NodeVDB { SOP_OpenVDB_Remove_Divergence(OP_Network*, const char* name, OP_Operator*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int isRefInput(unsigned input) const override { return (input > 0); } class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; protected: bool updateParmsFlags() override; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip("Names of vector-valued VDBs to be processed") .setDocumentation( "A subset of vector-valued input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])\n\n" "VDBs with nonuniform voxels, including frustum grids, are not supported.\n" "They should be [resampled|Node:sop/DW_OpenVDBResample]" " to have a linear transform with uniform scale.")); { std::ostringstream ostr; ostr << "If disabled, limit the pressure solver to " << DEFAULT_MAX_ITERATIONS << " iterations."; const std::string tooltip = ostr.str(); parms.add(hutil::ParmFactory(PRM_TOGGLE, "useiterations", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip(tooltip.c_str())); parms.add(hutil::ParmFactory(PRM_INT_J, "iterations", "Iterations") .setDefault(1000) .setRange(PRM_RANGE_RESTRICTED, 1, PRM_RANGE_UI, 2000) .setTooltip("Maximum number of iterations of the pressure solver") .setDocumentation( ("Maximum number of iterations of the pressure solver\n\n" + tooltip).c_str())); } { std::ostringstream ostr; ostr << "If disabled, limit the pressure solver error to " << std::setprecision(3) << DEFAULT_MAX_ERROR << "."; const std::string tooltip = ostr.str(); parms.add(hutil::ParmFactory(PRM_TOGGLE, "usetolerance", "") .setDefault(PRMoneDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN) .setTooltip(tooltip.c_str())); ostr.str(""); ostr << "If disabled, limit the pressure solver error to 10<sup>" << int(std::log10(DEFAULT_MAX_ERROR)) << "</sup>."; parms.add(hutil::ParmFactory(PRM_FLT_J, "tolerance", "Tolerance") .setDefault(openvdb::math::Delta<float>::value()) .setRange(PRM_RANGE_RESTRICTED, 0, PRM_RANGE_UI, 0.01) .setTooltip( "The pressure solver is deemed to have converged when\n" "the magnitude of the error is less than this tolerance.") .setDocumentation( ("The pressure solver is deemed to have converged when" " the magnitude of the error is less than this tolerance.\n\n" + ostr.str()).c_str())); } parms.add(hutil::ParmFactory(PRM_TOGGLE, "usecollider", "") .setDefault(PRMzeroDefaults) .setTypeExtended(PRM_TYPE_TOGGLE_JOIN)); parms.add(hutil::ParmFactory(PRM_STRING, "collidertype", "Collider Type") .setChoiceListItems(PRM_CHOICELIST_SINGLE, { "bbox", "Bounding Box", "static", "Static VDB", "dynamic", "Dynamic VDB" }) .setDefault("bbox") .setTooltip( "Bounding Box:\n" " Use the bounding box of any reference geometry as the collider.\n" "Static VDB:\n" " Treat the active voxels of the named VDB volume as solid, stationary obstacles." "\nDynamic VDB:\n" " If the named VDB volume is vector-valued, treat the values of active voxels\n" " as velocities of moving obstacles; otherwise, treat the active voxels as\n" " stationary obstacles." )); parms.add(hutil::ParmFactory(PRM_STRING, "collider", "Collider") .setChoiceList(&hutil::PrimGroupMenuInput2) .setTooltip( "Name of the reference VDB volume whose active voxels denote solid obstacles\n\n" "If multiple volumes are selected, only the first one will be used.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "invertcollider", "Invert Collider") .setDefault(PRMzeroDefaults) .setTooltip( "Invert the collider so that active voxels denote empty space\n" "and inactive voxels denote solid obstacles.")); parms.add(hutil::ParmFactory(PRM_TOGGLE, "pressure", "Output Pressure") .setDefault(PRMzeroDefaults) .setTooltip( "Output the computed pressure for each input VDB \"v\"\n" "as a scalar VDB named \"v_pressure\".")); // Register this operator. hvdb::OpenVDBOpFactory("VDB Project Non-Divergent", SOP_OpenVDB_Remove_Divergence::factory, parms, *table) #ifndef SESI_OPENVDB .setInternalName("DW_OpenVDBRemoveDivergence") #endif .addInput("Velocity field VDBs") .addOptionalInput("Optional collider VDB or geometry") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Remove_Divergence::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Remove divergence from VDB velocity fields.\"\"\"\n\ \n\ @overview\n\ \n\ A vector-valued VDB volume can represent a velocity field.\n\ When particles flow through the field, they might either expand\n\ from a voxel or collapse into a voxel.\n\ These source/sink behaviors indicate divergence in the field.\n\ \n\ This node computes a new vector field that is close to the input\n\ but has no divergence.\n\ This can be used to condition velocity fields to limit particle creation,\n\ creating more realistic flows.\n\ \n\ If the optional collider volume is provided, the output velocity field\n\ will direct flow around obstacles (i.e., active voxels) in that volume.\n\ The collider itself may be a velocity field, in which case the obstacles\n\ are considered to be moving with the given velocities.\n\ \n\ Combined with the [OpenVDB Advect Points|Node:sop/DW_OpenVDBAdvectPoints]\n\ node and a [Solver|Node:sop/solver] node for feedback, this node\n\ can be used to build a simple FLIP solver.\n\ \n\ @related\n\ - [OpenVDB Advect Points|Node:sop/DW_OpenVDBAdvectPoints]\n\ - [Node:sop/solver]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } bool SOP_OpenVDB_Remove_Divergence::updateParmsFlags() { bool changed = false; const bool useCollider = evalInt("usecollider", 0, 0); changed |= enableParm("collidertype", useCollider); changed |= enableParm("invertcollider", useCollider); changed |= enableParm("collider", useCollider && (evalStdString("collidertype", 0) != "bbox")); changed |= enableParm("iterations", bool(evalInt("useiterations", 0, 0))); changed |= enableParm("tolerance", bool(evalInt("usetolerance", 0, 0))); return changed; } OP_Node* SOP_OpenVDB_Remove_Divergence::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Remove_Divergence(net, name, op); } SOP_OpenVDB_Remove_Divergence::SOP_OpenVDB_Remove_Divergence( OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { struct SolverParms { SolverParms() : invertCollider(false) , colliderType(CT_NONE) , iterations(1) , absoluteError(-1.0) , outputState(openvdb::math::pcg::terminationDefaults<double>()) , interrupter(nullptr) {} hvdb::GridPtr velocityGrid; hvdb::GridCPtr colliderGrid; hvdb::GridPtr pressureGrid; hvdb::GridCPtr domainMaskGrid; ColliderBBox colliderBBox; bool invertCollider; ColliderType colliderType; int iterations; double absoluteError; openvdb::math::pcg::State outputState; hvdb::Interrupter* interrupter; }; //////////////////////////////////////// /// @brief Functor to extract an interior mask from a level set grid /// of arbitrary floating-point type struct LevelSetMaskOp { template<typename GridType> void operator()(const GridType& grid) { outputGrid = openvdb::tools::sdfInteriorMask(grid); } hvdb::GridPtr outputGrid; }; /// @brief Functor to extract a topology mask from a grid of arbitrary type struct ColliderMaskOp { template<typename GridType> void operator()(const GridType& grid) { if (mask) { mask->topologyUnion(grid); mask->setTransform(grid.transform().copy()); } } ColliderMaskGrid::Ptr mask; }; //////////////////////////////////////// /// @brief Generic grid accessor /// @details This just wraps a const accessor to a collider grid, but /// it changes the behavior of the copy constructor for thread safety. template<typename GridType> class GridConstAccessor { public: using ValueType = typename GridType::ValueType; explicit GridConstAccessor(const SolverParms& parms): mAcc(static_cast<const GridType&>(*parms.colliderGrid).getConstAccessor()) {} explicit GridConstAccessor(const GridType& grid): mAcc(grid.getConstAccessor()) {} // When copying, create a new, empty accessor, to avoid a data race // with the existing accessor, which might be updating on another thread. GridConstAccessor(const GridConstAccessor& other): mAcc(other.mAcc.tree()) {} bool isValueOn(const Coord& ijk) const { return mAcc.isValueOn(ijk); } const ValueType& getValue(const Coord& ijk) const { return mAcc.getValue(ijk); } bool probeValue(const Coord& ijk, ValueType& val) const { return mAcc.probeValue(ijk, val); } private: GridConstAccessor& operator=(const GridConstAccessor&); typename GridType::ConstAccessor mAcc; }; // class GridConstAccessor using ColliderMaskAccessor = GridConstAccessor<ColliderMaskGrid>; /// @brief Bounding box accessor class BBoxConstAccessor { public: using ValueType = double; explicit BBoxConstAccessor(const SolverParms& parms): mBBox(parms.velocityGrid->transform().worldToIndexNodeCentered(parms.colliderBBox)) {} BBoxConstAccessor(const BBoxConstAccessor& other): mBBox(other.mBBox) {} // Voxels outside the bounding box are solid, i.e., active. bool isValueOn(const Coord& ijk) const { return !mBBox.isInside(ijk); } ValueType getValue(const Coord&) const { return ValueType(0); } bool probeValue(const Coord& ijk, ValueType& v) const { v=ValueType(0); return isValueOn(ijk); } private: BBoxConstAccessor& operator=(const BBoxConstAccessor&); const openvdb::CoordBBox mBBox; }; // class BBoxConstAccessor //////////////////////////////////////// /// @brief Functor to compute pressure projection in parallel over leaf nodes template<typename TreeType> struct PressureProjectionOp { using LeafNodeType = typename TreeType::LeafNodeType; using ValueType = typename TreeType::ValueType; PressureProjectionOp(SolverParms& parms, LeafNodeType** velNodes, const LeafNodeType** gradPressureNodes, bool staggered) : mVelocityNodes(velNodes) , mGradientOfPressureNodes(gradPressureNodes) , mVoxelSize(parms.velocityGrid->transform().voxelSize()[0]) , mStaggered(staggered) { } void operator()(const tbb::blocked_range<size_t>& range) const { using ElementType = typename ValueType::value_type; // Account for voxel size here, instead of in the Poisson solve. const ElementType scale = ElementType((mStaggered ? 1.0 : 4.0) * mVoxelSize * mVoxelSize); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { LeafNodeType& velocityNode = *mVelocityNodes[n]; ValueType* velocityData = velocityNode.buffer().data(); const ValueType* gradientOfPressureData = mGradientOfPressureNodes[n]->buffer().data(); for (typename LeafNodeType::ValueOnIter it = velocityNode.beginValueOn(); it; ++it) { const openvdb::Index pos = it.pos(); velocityData[pos] -= scale * gradientOfPressureData[pos]; } } } LeafNodeType* const * const mVelocityNodes; LeafNodeType const * const * const mGradientOfPressureNodes; const double mVoxelSize; const bool mStaggered; }; // class PressureProjectionOp //////////////////////////////////////// /// @brief Functor for use with Tree::modifyValue() to set a single element /// of a vector-valued voxel template<typename VectorType> struct SetVecElemOp { using ValueType = typename VectorType::ValueType; SetVecElemOp(int axis_, ValueType value_): axis(axis_), value(value_) {} void operator()(VectorType& v) const { v[axis] = value; } const int axis; const ValueType value; }; /// @brief Functor to correct the velocities of voxels adjacent to solid obstacles template<typename VelocityGridType> class CorrectCollisionVelocityOp { public: using VectorType = typename VelocityGridType::ValueType; using VectorElementType = typename VectorType::ValueType; using MaskGridType = typename VelocityGridType::template ValueConverter<bool>::Type; using MaskTreeType = typename MaskGridType::TreeType; explicit CorrectCollisionVelocityOp(SolverParms& parms): mParms(&parms) { const MaskGridType& domainMaskGrid = static_cast<const MaskGridType&>(*mParms->domainMaskGrid); typename MaskTreeType::Ptr interiorMask( new MaskTreeType(domainMaskGrid.tree(), /*background=*/false, openvdb::TopologyCopy())); mBorderMask.reset(new MaskTreeType(*interiorMask)); openvdb::tools::erodeVoxels(*interiorMask, /*iterations=*/1, openvdb::tools::NN_FACE); mBorderMask->topologyDifference(*interiorMask); } template<typename ColliderGridType> void operator()(const ColliderGridType&) { GridConstAccessor<ColliderGridType> collider( static_cast<const ColliderGridType&>(*mParms->colliderGrid)); correctVelocity(collider); } template<typename ColliderAccessorType> void correctVelocity(const ColliderAccessorType& collider) { using ColliderValueType = typename ColliderAccessorType::ValueType; VelocityGridType& velocityGrid = static_cast<VelocityGridType&>(*mParms->velocityGrid); typename VelocityGridType::Accessor velocity = velocityGrid.getAccessor(); const bool invert = mParms->invertCollider; switch (mParms->colliderType) { case CT_NONE: break; case CT_BBOX: case CT_STATIC: // For each border voxel of the velocity grid... /// @todo parallelize for (typename MaskTreeType::ValueOnCIter it = mBorderMask->cbeginValueOn(); it; ++it) { const Coord ijk = it.getCoord(); // If the neighbor in a certain direction is a stationary obstacle, // set the border voxel's velocity in that direction to zero. // x direction if ((collider.isValueOn(ijk.offsetBy(-1, 0, 0)) != invert) || (collider.isValueOn(ijk.offsetBy(1, 0, 0)) != invert)) { velocity.modifyValue(ijk, SetVecElemOp<VectorType>(0, 0)); } // y direction if ((collider.isValueOn(ijk.offsetBy(0, -1, 0)) != invert) || (collider.isValueOn(ijk.offsetBy(0, 1, 0)) != invert)) { velocity.modifyValue(ijk, SetVecElemOp<VectorType>(1, 0)); } // z direction if ((collider.isValueOn(ijk.offsetBy(0, 0, -1)) != invert) || (collider.isValueOn(ijk.offsetBy(0, 0, 1)) != invert)) { velocity.modifyValue(ijk, SetVecElemOp<VectorType>(2, 0)); } } break; case CT_DYNAMIC: // For each border voxel of the velocity grid... /// @todo parallelize for (typename MaskTreeType::ValueOnCIter it = mBorderMask->cbeginValueOn(); it; ++it) { const Coord ijk = it.getCoord(); ColliderValueType colliderVal; // If the neighbor in a certain direction is a moving obstacle, // set the border voxel's velocity in that direction to the // obstacle's velocity in that direction. for (int axis = 0; axis <= 2; ++axis) { // 0:x, 1:y, 2:z Coord neighbor = ijk; neighbor[axis] -= 1; if (collider.probeValue(neighbor, colliderVal) != invert) { // Copy or create a Vec3 from the collider value and extract one of // its components. // (Since the collider is dynamic, ColliderValueType must be a Vec3 type, // but this code has to compile for all ColliderGridTypes.) VectorElementType colliderVelocity = VectorType(colliderVal)[axis]; velocity.modifyValue(ijk, SetVecElemOp<VectorType>(axis, colliderVelocity)); } else { neighbor = ijk; neighbor[axis] += 1; if (collider.probeValue(neighbor, colliderVal) != invert) { VectorElementType colliderVelocity = VectorType(colliderVal)[axis]; velocity.modifyValue(ijk, SetVecElemOp<VectorType>(axis, colliderVelocity)); } } } } break; } // switch (mParms->colliderType) } private: SolverParms* mParms; typename MaskTreeType::Ptr mBorderMask; }; // class CorrectCollisionVelocityOp //////////////////////////////////////// //{ // Boundary condition functors /// @brief Functor specifying boundary conditions for the Poisson solver /// when exterior voxels may be either solid (and possibly in motion) or empty template<typename VelocityGridType, typename ColliderAccessorType> class ColliderBoundaryOp { public: using VectorType = typename VelocityGridType::ValueType; explicit ColliderBoundaryOp(const SolverParms& parms) : mVelocity(static_cast<VelocityGridType&>(*parms.velocityGrid).getConstAccessor()) , mCollider(parms) , mInvert(parms.invertCollider) , mDynamic(parms.colliderType == CT_DYNAMIC) , mInvVoxelSize(0.5 / (parms.velocityGrid->voxelSize()[0])) // assumes uniform voxels {} ColliderBoundaryOp(const ColliderBoundaryOp& other) // Give this op new, empty accessors, to avoid data races with // the other op's accessors, which might be updating on another thread. : mVelocity(other.mVelocity.tree()) , mCollider(other.mCollider) , mInvert(other.mInvert) , mDynamic(other.mDynamic) , mInvVoxelSize(other.mInvVoxelSize) {} void operator()(const Coord& ijk, const Coord& ijkNeighbor, double& rhs, double& diag) const { // Voxels outside both the velocity field and the collider // are considered to be empty (unless the collider is inverted). // Voxels outside the velocity field and inside the collider // are considered to be solid. if (mCollider.isValueOn(ijkNeighbor) == mInvert) { // The exterior neighbor is empty (i.e., zero), so just adjust the center weight. diag -= 1; } else { const VectorType& velocity = mVelocity.getValue(ijkNeighbor); double delta = 0.0; if (mDynamic) { // exterior neighbor is a solid obstacle with nonzero velocity const openvdb::Vec3d colliderVelocity(mCollider.getValue(ijkNeighbor)); if (ijkNeighbor[0] < ijk[0]) { delta += velocity[0] - colliderVelocity[0]; } if (ijkNeighbor[0] > ijk[0]) { delta -= (velocity[0] - colliderVelocity[0]); } if (ijkNeighbor[1] < ijk[1]) { delta += velocity[1] - colliderVelocity[1]; } if (ijkNeighbor[1] > ijk[1]) { delta -= (velocity[1] - colliderVelocity[1]); } if (ijkNeighbor[2] < ijk[2]) { delta += velocity[2] - colliderVelocity[2]; } if (ijkNeighbor[2] > ijk[2]) { delta -= (velocity[2] - colliderVelocity[2]); } } else { // exterior neighbor is a stationary solid obstacle if (ijkNeighbor[0] < ijk[0]) { delta += velocity[0]; } if (ijkNeighbor[0] > ijk[0]) { delta -= velocity[0]; } if (ijkNeighbor[1] < ijk[1]) { delta += velocity[1]; } if (ijkNeighbor[1] > ijk[1]) { delta -= velocity[1]; } if (ijkNeighbor[2] < ijk[2]) { delta += velocity[2]; } if (ijkNeighbor[2] > ijk[2]) { delta -= velocity[2]; } } rhs += delta * mInvVoxelSize; // Note: no adjustment to the center weight (diag). } } private: // Disable assignment (due to const members). ColliderBoundaryOp& operator=(const ColliderBoundaryOp&); typename VelocityGridType::ConstAccessor mVelocity; // accessor to the velocity grid ColliderAccessorType mCollider; // accessor to the collider const bool mInvert; // invert the collider? const bool mDynamic; // is the collider moving? const double mInvVoxelSize; }; // class ColliderBoundaryOp //} //////////////////////////////////////// /// @brief Main solver routine template<typename VectorGridType, typename ColliderGridType, typename BoundaryOpType> inline bool removeDivergenceWithColliderGrid(SolverParms& parms, const BoundaryOpType& boundaryOp) { using VectorTreeType = typename VectorGridType::TreeType; using VectorLeafNodeType = typename VectorTreeType::LeafNodeType; using VectorType = typename VectorGridType::ValueType; using VectorElementType = typename VectorType::ValueType; using ScalarGrid = typename VectorGridType::template ValueConverter<VectorElementType>::Type; using ScalarTree = typename ScalarGrid::TreeType; using MaskGridType = typename VectorGridType::template ValueConverter<bool>::Type; VectorGridType& velocityGrid = static_cast<VectorGridType&>(*parms.velocityGrid); const bool staggered = ((velocityGrid.getGridClass() == openvdb::GRID_STAGGERED) && (openvdb::VecTraits<VectorType>::Size == 3)); // Compute the divergence of the incoming velocity field. /// @todo Consider neighboring collider velocities at border voxels? openvdb::tools::Divergence<VectorGridType> divergenceOp(velocityGrid); typename ScalarGrid::ConstPtr divGrid = divergenceOp.process(); parms.outputState = openvdb::math::pcg::terminationDefaults<VectorElementType>(); parms.outputState.iterations = parms.iterations; parms.outputState.absoluteError = (parms.absoluteError >= 0.0 ? parms.absoluteError : DEFAULT_MAX_ERROR); parms.outputState.relativeError = 0.0; using PCT = openvdb::math::pcg::JacobiPreconditioner<openvdb::tools::poisson::LaplacianMatrix>; // Solve for pressure using Poisson's equation. typename ScalarTree::Ptr pressure; if (parms.colliderType == CT_NONE) { pressure = openvdb::tools::poisson::solveWithBoundaryConditionsAndPreconditioner<PCT>( divGrid->tree(), boundaryOp, parms.outputState, *parms.interrupter, staggered); } else { // Create a domain mask by clipping the velocity grid's topology against the collider's. // Pressure will be computed only where the domain mask is active. MaskGridType* domainMaskGrid = new MaskGridType(*divGrid); // match input grid's topology parms.domainMaskGrid.reset(domainMaskGrid); if (parms.colliderType == CT_BBOX) { if (parms.invertCollider) { // Solve for pressure only outside the bounding box. const openvdb::CoordBBox colliderISBBox = velocityGrid.transform().worldToIndexNodeCentered(parms.colliderBBox); domainMaskGrid->fill(colliderISBBox, false, false); } else { // Solve for pressure only inside the bounding box. domainMaskGrid->clipGrid(parms.colliderBBox); } } else { const ColliderGridType& colliderGrid = static_cast<const ColliderGridType&>(*parms.colliderGrid); if (parms.invertCollider) { // Solve for pressure only inside the collider. domainMaskGrid->topologyIntersection(colliderGrid); } else { // Solve for pressure only outside the collider. domainMaskGrid->topologyDifference(colliderGrid); } } pressure = openvdb::tools::poisson::solveWithBoundaryConditionsAndPreconditioner<PCT>( divGrid->tree(), domainMaskGrid->tree(), boundaryOp, parms.outputState, *parms.interrupter, staggered); } // Store the computed pressure grid. parms.pressureGrid = ScalarGrid::create(pressure); parms.pressureGrid->setTransform(velocityGrid.transform().copy()); { std::string name = parms.velocityGrid->getName(); if (!name.empty()) name += "_"; name += "pressure"; parms.pressureGrid->setName(name); } // Compute the gradient of the pressure. openvdb::tools::Gradient<ScalarGrid> gradientOp(static_cast<ScalarGrid&>(*parms.pressureGrid)); typename VectorGridType::Ptr gradientOfPressure = gradientOp.process(); // Compute pressure projection in parallel over leaf nodes. { // Pressure (and therefore the gradient of the pressure) is computed only where // the domain mask is active, but the gradient and velocity grid topologies must match // so that pressure projection can be computed in parallel over leaf nodes (see below). velocityGrid.tree().voxelizeActiveTiles(); gradientOfPressure->topologyUnion(velocityGrid); gradientOfPressure->topologyIntersection(velocityGrid); openvdb::tools::pruneInactive(gradientOfPressure->tree()); std::vector<VectorLeafNodeType*> velNodes; velocityGrid.tree().getNodes(velNodes); std::vector<const VectorLeafNodeType*> gradNodes; gradNodes.reserve(velNodes.size()); gradientOfPressure->tree().getNodes(gradNodes); tbb::parallel_for(tbb::blocked_range<size_t>(0, velNodes.size()), PressureProjectionOp<VectorTreeType>(parms, &velNodes[0], &gradNodes[0], staggered)); } if (parms.colliderType != CT_NONE) { // When obstacles are present, the Poisson solve returns a divergence-free // velocity field in the interior of the input grid, but border voxels // need to be adjusted manually to match neighboring collider velocities. CorrectCollisionVelocityOp<VectorGridType> op(parms); if (parms.colliderType == CT_BBOX) { op.correctVelocity(BBoxConstAccessor(parms)); } else { parms.colliderGrid->apply<hvdb::VolumeGridTypes>(op); } } return parms.outputState.success; } /// @brief Main solver routine in the case of no collider or a bounding box collider template<typename VectorGridType, typename BoundaryOpType> inline bool removeDivergence(SolverParms& parms, const BoundaryOpType& boundaryOp) { return removeDivergenceWithColliderGrid<VectorGridType, VectorGridType>(parms, boundaryOp); } /// @brief Functor to invoke the solver with a collider velocity grid of arbitrary vector type template<typename VelocityGridType> struct ColliderDispatchOp { SolverParms* parms; bool success; explicit ColliderDispatchOp(SolverParms& parms_): parms(&parms_) , success(false) {} template<typename ColliderGridType> void operator()(const ColliderGridType&) { using ColliderAccessorType = GridConstAccessor<ColliderGridType>; ColliderBoundaryOp<VelocityGridType, ColliderAccessorType> boundaryOp(*parms); success = removeDivergenceWithColliderGrid<VelocityGridType, ColliderGridType>( *parms, boundaryOp); } }; // struct ColliderDispatchOp /// @brief Invoke the solver for collider inputs of various types (or no collider). template<typename VelocityGridType> inline bool processGrid(SolverParms& parms) { bool success = false; switch (parms.colliderType) { case CT_NONE: // No collider success = removeDivergence<VelocityGridType>( parms, openvdb::tools::poisson::DirichletBoundaryOp<double>()); break; case CT_BBOX: // If collider geometry was supplied, the faces of its bounding box // define solid obstacles. success = removeDivergence<VelocityGridType>(parms, ColliderBoundaryOp<VelocityGridType, BBoxConstAccessor>(parms)); break; case CT_STATIC: // If a static collider grid was supplied, its active voxels define solid obstacles. success = removeDivergenceWithColliderGrid<VelocityGridType, ColliderMaskGrid>( parms, ColliderBoundaryOp<VelocityGridType, ColliderMaskAccessor>(parms)); break; case CT_DYNAMIC: { // If a dynamic collider grid was supplied, its active values define // the velocities of solid obstacles. ColliderDispatchOp<VelocityGridType> op(parms); success = parms.colliderGrid->apply<hvdb::Vec3GridTypes>(op); if (success) success = op.success; break; } } return success; } /// @brief Return the given VDB primitive's name in the form "N (NAME)", /// where N is the primitive's index and NAME is the grid name. /// @todo Use the VdbPrimCIterator method once it is adopted into the HDK. inline UT_String getPrimitiveIndexAndName(const GU_PrimVDB* prim) { UT_String result(UT_String::ALWAYS_DEEP); if (prim != nullptr) { result.itoa(prim->getMapIndex()); UT_String name = prim->getGridName(); result += (" (" + name.toStdString() + ")").c_str(); } return result; } inline std::string joinNames(UT_StringArray& names, const char* lastSep = " and ", const char* sep = ", ") { names.sort(); UT_String joined; names.join(sep, lastSep, joined); return "VDB" + (((names.size() == 1) ? " " : "s ") + joined.toStdString()); } } // unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Remove_Divergence::Cache::cookVDBSop( OP_Context& context) { try { const GU_Detail* colliderGeo = inputGeo(1); const fpreal time = context.getTime(); hvdb::Interrupter interrupter("Removing divergence"); SolverParms parms; parms.interrupter = &interrupter; parms.iterations = (!evalInt("useiterations", 0, time) ? DEFAULT_MAX_ITERATIONS : static_cast<int>(evalInt("iterations", 0, time))); parms.absoluteError = (!evalInt("usetolerance", 0, time) ? -1.0 : evalFloat("tolerance", 0, time)); parms.invertCollider = evalInt("invertcollider", 0, time); UT_String groupStr; evalString(groupStr, "group", 0, time); const GA_PrimitiveGroup* group = matchGroup(*gdp, groupStr.toStdString()); const bool outputPressure = evalInt("pressure", 0, time); const bool useCollider = evalInt("usecollider", 0, time); const auto colliderTypeStr = evalStdString("collidertype", time); UT_StringArray xformMismatchGridNames, nonuniformGridNames; // Retrieve either a collider grid or a collider bounding box // (or neither) from the reference input. if (useCollider && colliderGeo) { if (colliderTypeStr == "bbox") { // Use the bounding box of the reference geometry as a collider. UT_BoundingBox box; colliderGeo->getBBox(&box); parms.colliderBBox.min() = openvdb::Vec3d(box.xmin(), box.ymin(), box.zmin()); parms.colliderBBox.max() = openvdb::Vec3d(box.xmax(), box.ymax(), box.zmax()); parms.colliderType = CT_BBOX; } else { // Retrieve the collider grid. UT_String colliderStr; evalString(colliderStr, "collider", 0, time); const GA_PrimitiveGroup* colliderGroup = parsePrimitiveGroups( colliderStr.buffer(), GroupCreator(colliderGeo)); if (hvdb::VdbPrimCIterator colliderIt = hvdb::VdbPrimCIterator(colliderGeo, colliderGroup)) { if (colliderIt->getConstGrid().getGridClass() == openvdb::GRID_LEVEL_SET) { // If the collider grid is a level set, extract an interior mask from it. LevelSetMaskOp op; if (hvdb::GEOvdbApply<hvdb::NumericGridTypes>(**colliderIt, op)) { parms.colliderGrid = op.outputGrid; } } if (!parms.colliderGrid) { parms.colliderGrid = colliderIt->getConstGridPtr(); } if (parms.colliderGrid && !parms.colliderGrid->constTransform().hasUniformScale()) { nonuniformGridNames.append(getPrimitiveIndexAndName(*colliderIt)); } if (++colliderIt) { addWarning(SOP_MESSAGE, ("found multiple collider VDBs; using VDB " + getPrimitiveIndexAndName(*colliderIt).toStdString()).c_str()); } } if (!parms.colliderGrid) { if (colliderStr.isstring()) { addError(SOP_MESSAGE, ("collider \"" + colliderStr.toStdString() + "\" not found").c_str()); } else { addError(SOP_MESSAGE, "collider VDB not found"); } return error(); } if (parms.colliderGrid->empty()) { // An empty collider grid was found; ignore it. parms.colliderGrid.reset(); } if (parms.colliderGrid) { const bool isVec3Grid = (3 == UTvdbGetGridTupleSize(UTvdbGetGridType(*parms.colliderGrid))); if (isVec3Grid && (colliderTypeStr == "dynamic")) { // The collider grid is vector-valued. Its active values // are the velocities of moving obstacles. parms.colliderType = CT_DYNAMIC; } else { // The active voxels of the collider grid define stationary, // solid obstacles. Extract a topology mask of those voxels. parms.colliderType = CT_STATIC; ColliderMaskOp op; op.mask = ColliderMaskGrid::create(); parms.colliderGrid->apply<hvdb::AllGridTypes>(op); parms.colliderGrid = op.mask; } } } } int numGridsProcessed = 0; std::ostringstream infoStrm; // Main loop for (hvdb::VdbPrimIterator vdbIt(gdp, group); vdbIt; ++vdbIt) { if (interrupter.wasInterrupted()) break; const UT_VDBType velocityType = vdbIt->getStorageType(); if (velocityType == UT_VDB_VEC3F || velocityType == UT_VDB_VEC3D) { // Found a vector-valued input grid. ++numGridsProcessed; vdbIt->makeGridUnique(); // ensure that the grid's tree is not shared parms.velocityGrid = vdbIt->getGridPtr(); const openvdb::math::Transform& xform = parms.velocityGrid->constTransform(); if (!xform.hasUniformScale()) { nonuniformGridNames.append(getPrimitiveIndexAndName(*vdbIt)); } if (parms.colliderGrid && (parms.colliderGrid->constTransform() != xform)) { // The velocity and collider grid transforms need to match. xformMismatchGridNames.append(getPrimitiveIndexAndName(*vdbIt)); } // Remove divergence. bool success = false; if (velocityType == UT_VDB_VEC3F) { success = processGrid<openvdb::Vec3SGrid>(parms); } else if (velocityType == UT_VDB_VEC3D) { success = processGrid<openvdb::Vec3DGrid>(parms); } if (!success) { std::ostringstream errStrm; errStrm << "solver failed to converge for VDB " << getPrimitiveIndexAndName(*vdbIt).c_str() << " with error " << parms.outputState.absoluteError; addWarning(SOP_MESSAGE, errStrm.str().c_str()); } else { if (outputPressure && parms.pressureGrid) { hvdb::createVdbPrimitive(*gdp, parms.pressureGrid); } if (numGridsProcessed > 1) infoStrm << "\n"; infoStrm << "solver converged for VDB " << getPrimitiveIndexAndName(*vdbIt).c_str() << " in " << parms.outputState.iterations << " iteration" << (parms.outputState.iterations == 1 ? "" : "s") << " with error " << parms.outputState.absoluteError; } } parms.velocityGrid.reset(); } if (!interrupter.wasInterrupted()) { // Report various issues. if (numGridsProcessed == 0) { addWarning(SOP_MESSAGE, "found no floating-point vector VDBs"); } else { if (nonuniformGridNames.size() > 0) { const std::string names = joinNames(nonuniformGridNames); addWarning(SOP_MESSAGE, ((names + ((nonuniformGridNames.size() == 1) ? " has" : " have")) + " nonuniform voxels and should be resampled").c_str()); } if (xformMismatchGridNames.size() > 0) { const std::string names = joinNames(xformMismatchGridNames, " or "); addWarning(SOP_MESSAGE, ("vector field and collider transforms don't match for " + names).c_str()); } const std::string info = infoStrm.str(); if (!info.empty()) { addMessage(SOP_MESSAGE, info.c_str()); } } } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
40,862
C++
38.905273
100
0.616661
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/UT_VDBUtils.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /* * Copyright (c) Side Effects Software Inc. * * Produced by: * Side Effects Software Inc * 477 Richmond Street West * Toronto, Ontario * Canada M5V 3E7 * 416-504-9876 * * NAME: UT_VDBUtils.h (UT Library, C++) * * COMMENTS: */ #include "UT_VDBUtils.h" namespace openvdb_houdini { // empty }
427
C++
16.833333
48
0.622951
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Remap.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Remap.cc /// /// @author FX R&D OpenVDB team #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <openvdb/math/Math.h> // Tolerance and isApproxEqual #include <openvdb/tools/ValueTransformer.h> #include <UT/UT_Ramp.h> #include <GU/GU_Detail.h> #include <PRM/PRM_Parm.h> #include <tbb/blocked_range.h> #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <algorithm> #include <cmath> #include <map> #include <string> #include <sstream> #include <vector> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; //////////////////////////////////////// // Local Utility Methods namespace { template<typename T> inline T minValue(const T a, const T b) { return std::min(a, b); } template<typename T> inline T maxValue(const T a, const T b) { return std::max(a, b); } template<typename T> inline openvdb::math::Vec3<T> minValue(const openvdb::math::Vec3<T>& a, const openvdb::math::Vec3<T>& b) { return openvdb::math::minComponent(a, b); } template<typename T> inline openvdb::math::Vec3<T> maxValue(const openvdb::math::Vec3<T>& a, const openvdb::math::Vec3<T>& b) { return openvdb::math::maxComponent(a, b); } template<typename T> inline T minComponent(const T s) { return s; } template<typename T> inline T maxComponent(const T s) { return s; } template<typename T> inline T minComponent(const openvdb::math::Vec3<T>& v) { return minValue(v[0], minValue(v[1], v[2])); } template<typename T> inline T maxComponent(const openvdb::math::Vec3<T>& v) { return maxValue(v[0], maxValue(v[1], v[2])); } //////////////////////////////////////// template<typename NodeType> struct NodeMinMax { using ValueType = typename NodeType::ValueType; NodeMinMax(const std::vector<const NodeType*>& nodes, ValueType background) : mNodes(&nodes[0]), mBackground(background), mMin(background), mMax(background) {} NodeMinMax(NodeMinMax& other, tbb::split) : mNodes(other.mNodes), mBackground(other.mBackground), mMin(mBackground), mMax(mBackground) {} void join(NodeMinMax& other) { mMin = minValue(other.mMin, mMin); mMax = maxValue(other.mMax, mMax); } void operator()(const tbb::blocked_range<size_t>& range) { ValueType minTmp(mMin), maxTmp(mMax); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { const NodeType& node = *mNodes[n]; for (typename NodeType::ValueAllCIter it = node.cbeginValueAll(); it; ++it) { if (node.isChildMaskOff(it.pos())) { const ValueType val = *it; minTmp = minValue(minTmp, val); maxTmp = maxValue(maxTmp, val); } } } mMin = minValue(minTmp, mMin); mMax = maxValue(maxTmp, mMax); } NodeType const * const * const mNodes; ValueType mBackground, mMin, mMax; }; template<typename NodeType> struct Deactivate { using ValueType = typename NodeType::ValueType; Deactivate(std::vector<NodeType*>& nodes, ValueType background) : mNodes(&nodes[0]), mBackground(background) {} void operator()(const tbb::blocked_range<size_t>& range) const { const ValueType background(mBackground), delta = openvdb::math::Tolerance<ValueType>::value(); for (size_t n = range.begin(), N = range.end(); n < N; ++n) { for (typename NodeType::ValueOnIter it = mNodes[n]->beginValueOn(); it; ++it) { if (openvdb::math::isApproxEqual(background, *it, delta)) { it.setValueOff(); } } } } NodeType * const * const mNodes; ValueType mBackground; }; template<typename TreeType> void evalMinMax(const TreeType& tree, typename TreeType::ValueType& minVal, typename TreeType::ValueType& maxVal) { minVal = tree.background(); maxVal = tree.background(); { // eval voxels using LeafNodeType = typename TreeType::LeafNodeType; std::vector<const LeafNodeType*> nodes; tree.getNodes(nodes); NodeMinMax<LeafNodeType> op(nodes, tree.background()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op); minVal = minValue(minVal, op.mMin); maxVal = maxValue(maxVal, op.mMax); } { // eval first tiles using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; std::vector<const InternalNodeType*> nodes; tree.getNodes(nodes); NodeMinMax<InternalNodeType> op(nodes, tree.background()); tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op); minVal = minValue(minVal, op.mMin); maxVal = maxValue(maxVal, op.mMax); } { // eval remaining tiles typename TreeType::ValueType minTmp(minVal), maxTmp(maxVal); typename TreeType::ValueAllCIter it(tree); it.setMaxDepth(TreeType::ValueAllCIter::LEAF_DEPTH - 2); for ( ; it; ++it) { const typename TreeType::ValueType val = *it; minTmp = minValue(minTmp, val); maxTmp = maxValue(maxTmp, val); } minVal = minValue(minVal, minTmp); maxVal = maxValue(maxVal, maxTmp); } } template<typename TreeType> void deactivateBackgroundValues(TreeType& tree) { { // eval voxels using LeafNodeType = typename TreeType::LeafNodeType; std::vector<LeafNodeType*> nodes; tree.getNodes(nodes); Deactivate<LeafNodeType> op(nodes, tree.background()); tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), op); } { // eval first tiles using RootNodeType = typename TreeType::RootNodeType; using NodeChainType = typename RootNodeType::NodeChainType; using InternalNodeType = typename NodeChainType::template Get<1>; std::vector<InternalNodeType*> nodes; tree.getNodes(nodes); Deactivate<InternalNodeType> op(nodes, tree.background()); tbb::parallel_for(tbb::blocked_range<size_t>(0, nodes.size()), op); } { // eval remaining tiles using ValueType = typename TreeType::ValueType; const ValueType background(tree.background()), delta = openvdb::math::Tolerance<ValueType>::value(); typename TreeType::ValueOnIter it(tree); it.setMaxDepth(TreeType::ValueAllCIter::LEAF_DEPTH - 2); for ( ; it; ++it) { if (openvdb::math::isApproxEqual(background, *it, delta)) { it.setValueOff(); } } } } //////////////////////////////////////// struct RemapGridValues { enum Extrapolation { CLAMP, PRESERVE, EXTRAPOLATE }; RemapGridValues(Extrapolation belowExt, Extrapolation aboveExt, UT_Ramp& ramp, const fpreal inMin, const fpreal inMax, const fpreal outMin, const fpreal outMax, bool deactivate, UT_ErrorManager* errorManager = nullptr) : mBelowExtrapolation(belowExt) , mAboveExtrapolation(aboveExt) , mRamp(&ramp) , mErrorManager(errorManager) , mPrimitiveIndex(0) , mPrimitiveName() , mInfo("Remapped grids: (first range shows actual min/max values)\n") , mInMin(inMin) , mInMax(inMax) , mOutMin(outMin) , mOutMax(outMax) , mDeactivate(deactivate) { mRamp->ensureRampIsBuilt(); } ~RemapGridValues() { if (mErrorManager) { mErrorManager->addMessage(SOP_OPTYPE_NAME, SOP_MESSAGE, mInfo.c_str()); } } void setPrimitiveIndex(int i) { mPrimitiveIndex = i; } void setPrimitiveName(const std::string& name) { mPrimitiveName = name; } template<typename GridType> void operator()(GridType& grid) { using ValueType = typename GridType::ValueType; using LeafNodeType = typename GridType::TreeType::LeafNodeType; std::vector<LeafNodeType*> leafnodes; grid.tree().getNodes(leafnodes); ValueType inputMin, inputMax; evalMinMax(grid.tree(), inputMin, inputMax); ValueTransform<GridType> op(*mRamp, leafnodes, mBelowExtrapolation, mAboveExtrapolation, mInMin, mInMax, mOutMin, mOutMax); // update voxels tbb::parallel_for(tbb::blocked_range<size_t>(0, leafnodes.size()), op); // update tiles typename GridType::ValueAllIter it = grid.beginValueAll(); it.setMaxDepth(GridType::ValueAllIter::LEAF_DEPTH - 1); openvdb::tools::foreach(it, op, true); // update background value grid.tree().root().setBackground(op.map(grid.background()), /*updateChildNodes=*/false); grid.setGridClass(openvdb::GRID_UNKNOWN); ValueType outputMin, outputMax; evalMinMax(grid.tree(), outputMin, outputMax); size_t activeVoxelDelta = size_t(grid.tree().activeVoxelCount()); if (mDeactivate) { deactivateBackgroundValues(grid.tree()); activeVoxelDelta -= size_t(grid.tree().activeVoxelCount()); } { // log std::stringstream msg; msg << " (" << mPrimitiveIndex << ") '" << mPrimitiveName << "'" << " [" << minComponent(inputMin) << ", " << maxComponent(inputMax) << "]" << " -> [" << minComponent(outputMin) << ", " << maxComponent(outputMax) << "]"; if (mDeactivate && activeVoxelDelta > 0) { msg << ", deactivated " << activeVoxelDelta << " voxels."; } msg << "\n"; mInfo += msg.str(); } } private: template<typename GridType> struct ValueTransform { using LeafNodeType = typename GridType::TreeType::LeafNodeType; ValueTransform(const UT_Ramp& utramp, std::vector<LeafNodeType*>& leafnodes, Extrapolation belowExt, Extrapolation aboveExt, const fpreal inMin, const fpreal inMax, const fpreal outMin, const fpreal outMax) : ramp(&utramp) , nodes(&leafnodes[0]) , belowExtrapolation(belowExt) , aboveExtrapolation(aboveExt) , xMin(inMin) , xScale((inMax - inMin)) , yMin(outMin) , yScale((outMax - outMin)) { xScale = std::abs(xScale) > fpreal(0.0) ? fpreal(1.0) / xScale : fpreal(0.0); } inline void operator()(const tbb::blocked_range<size_t>& range) const { for (size_t n = range.begin(), N = range.end(); n < N; ++n) { typename GridType::ValueType * data = nodes[n]->buffer().data(); for (size_t i = 0, I = LeafNodeType::SIZE; i < I; ++i) { data[i] = map(data[i]); } } } inline void operator()(const typename GridType::ValueAllIter& it) const { it.setValue(map(*it)); } template<typename T> inline T map(const T s) const { fpreal pos = (fpreal(s) - xMin) * xScale; if (pos < fpreal(0.0)) { // below (normalized) minimum if (belowExtrapolation == PRESERVE) return s; if (belowExtrapolation == EXTRAPOLATE) return T((pos * xScale) * yScale); pos = std::max(pos, fpreal(0.0)); // clamp } if (pos > fpreal(1.0)) { // above (normalized) maximum if (aboveExtrapolation == PRESERVE) return s; if (aboveExtrapolation == EXTRAPOLATE) return T((pos * xScale) * yScale); pos = std::min(pos, fpreal(1.0)); //clamp } float values[4] = { 0.0f }; ramp->rampLookup(pos, values); return T(yMin + (values[0] * yScale)); } template<typename T> inline openvdb::math::Vec3<T> map(const openvdb::math::Vec3<T>& v) const { openvdb::math::Vec3<T> out; out[0] = map(v[0]); out[1] = map(v[1]); out[2] = map(v[2]); return out; } UT_Ramp const * const ramp; LeafNodeType * const * const nodes; const Extrapolation belowExtrapolation, aboveExtrapolation; fpreal xMin, xScale, yMin, yScale; }; // struct ValueTransform ////////// Extrapolation mBelowExtrapolation, mAboveExtrapolation; UT_Ramp * const mRamp; UT_ErrorManager * const mErrorManager; int mPrimitiveIndex; std::string mPrimitiveName, mInfo; const fpreal mInMin, mInMax, mOutMin, mOutMax; const bool mDeactivate; }; // struct RemapGridValues } // unnamed namespace //////////////////////////////////////// // SOP Implementation struct SOP_OpenVDB_Remap: public hvdb::SOP_NodeVDB { SOP_OpenVDB_Remap(OP_Network*, const char* name, OP_Operator*); static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); int sortInputRange(); int sortOutputRange(); class Cache: public SOP_VDBCacheOptions { public: void evalRamp(UT_Ramp&, fpreal time); protected: OP_ERROR cookVDBSop(OP_Context&) override; }; // class Cache }; int inputRangeCB(void*, int, float, const PRM_Template*); int outputRangeCB(void*, int, float, const PRM_Template*); int inputRangeCB(void* data, int /*idx*/, float /*time*/, const PRM_Template*) { SOP_OpenVDB_Remap* sop = static_cast<SOP_OpenVDB_Remap*>(data); if (sop == nullptr) return 0; return sop->sortInputRange(); } int outputRangeCB(void* data, int /*idx*/, float /*time*/, const PRM_Template*) { SOP_OpenVDB_Remap* sop = static_cast<SOP_OpenVDB_Remap*>(data); if (sop == nullptr) return 0; return sop->sortOutputRange(); } int SOP_OpenVDB_Remap::sortInputRange() { const fpreal inMin = evalFloat("inrange", 0, 0); const fpreal inMax = evalFloat("inrange", 1, 0); if (inMin > inMax) { setFloat("inrange", 0, 0, inMax); setFloat("inrange", 1, 0, inMin); } return 1; } int SOP_OpenVDB_Remap::sortOutputRange() { const fpreal outMin = evalFloat("outrange", 0, 0); const fpreal outMax = evalFloat("outrange", 1, 0); if (outMin > outMax) { setFloat("outrange", 0, 0, outMax); setFloat("outrange", 1, 0, outMin); } return 1; } void SOP_OpenVDB_Remap::Cache::evalRamp(UT_Ramp& ramp, fpreal time) { const auto rampStr = evalStdString("function", time); UT_IStream strm(rampStr.c_str(), rampStr.size(), UT_ISTREAM_ASCII); ramp.load(strm); } void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenu) .setTooltip("Specify a subset of the input grids.") .setDocumentation( "A subset of the input VDBs to be processed" " (see [specifying volumes|/model/volumes#group])")); { // Extrapolation char const * const items[] = { "clamp", "Clamp", "preserve", "Preserve", "extrapolate", "Extrapolate", nullptr }; parms.add(hutil::ParmFactory(PRM_ORD, "below", "Below Minimum") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip( "Specify how to handle input values below the input range minimum:" " either by clamping to the output minimum (Clamp)," " leaving out-of-range values intact (Preserve)," " or extrapolating linearly from the output minimum (Extrapolate).") .setDocumentation( "How to handle input values below the input range minimum\n\n" "Clamp:\n" " Clamp values to the output minimum.\n" "Preserve:\n" " Leave out-of-range values intact.\n" "Extrapolate:\n" " Extrapolate values linearly from the output minimum.\n")); parms.add(hutil::ParmFactory(PRM_ORD, "above", "Above Maximum") .setDefault(PRMzeroDefaults) .setChoiceListItems(PRM_CHOICELIST_SINGLE, items) .setTooltip( "Specify how to handle input values above the input range maximum:" " either by clamping to the input maximum (Clamp)," " leaving out-of-range values intact (Preserve)," " or extrapolating linearly from the input maximum (Extrapolate).") .setDocumentation( "How to handle output values above the input range maximum\n\n" "Clamp:\n" " Clamp values to the input maximum.\n" "Preserve:\n" " Leave out-of-range values intact.\n" "Extrapolate:\n" " Extrapolate values linearly from the input maximum.\n")); } std::vector<fpreal> defaultRange; defaultRange.push_back(fpreal(0.0)); defaultRange.push_back(fpreal(1.0)); parms.add(hutil::ParmFactory(PRM_FLT_J, "inrange", "Input Range") .setDefault(defaultRange) .setVectorSize(2) .setTooltip("Input min/max value range") .setCallbackFunc(&inputRangeCB)); parms.add(hutil::ParmFactory(PRM_FLT_J, "outrange", "Output Range") .setDefault(defaultRange) .setVectorSize(2) .setTooltip("Output min/max value range") .setCallbackFunc(&outputRangeCB)); { std::map<std::string, std::string> rampSpare; rampSpare[PRM_SpareData::getFloatRampDefaultToken()] = "1pos ( 0.0 ) 1value ( 0.0 ) 1interp ( linear ) " "2pos ( 1.0 ) 2value ( 1.0 ) 2interp ( linear )"; rampSpare[PRM_SpareData::getRampShowControlsDefaultToken()] = "0"; parms.add(hutil::ParmFactory(PRM_MULTITYPE_RAMP_FLT, "function", "Transfer Function") .setDefault(PRMtwoDefaults) .setSpareData(rampSpare) .setTooltip("X Axis: 0 = input range minimum, 1 = input range maximum.\n" "Y Axis: 0 = output range minimum, 1 = output range maximum.\n") .setDocumentation( "Map values through a transfer function where _x_ = 0 corresponds to" " the input range minimum, _x_ = 1 to the input range maximum," " _y_ = 0 to the output range minimum, and _y_ = 1 to the" " output range maximum.")); } parms.add(hutil::ParmFactory(PRM_TOGGLE, "deactivate", "Deactivate Background Voxels") .setTooltip("Deactivate voxels with values equal to the remapped background value.")); hvdb::OpenVDBOpFactory("VDB Remap", SOP_OpenVDB_Remap::factory, parms, *table) .setNativeName("") .addInput("VDB Grids") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Remap::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Perform a remapping of the voxel values in a VDB volume.\"\"\"\n\ \n\ @overview\n\ \n\ This node remaps voxel values to a new range, optionally through\n\ a user-specified transfer function.\n\ \n\ @related\n\ - [Node:sop/volumevop]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_Remap::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Remap(net, name, op); } SOP_OpenVDB_Remap::SOP_OpenVDB_Remap(OP_Network* net, const char* name, OP_Operator* op) : hvdb::SOP_NodeVDB(net, name, op) { } OP_ERROR SOP_OpenVDB_Remap::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); hvdb::Interrupter boss("Remapping values"); const fpreal inMin = evalFloat("inrange", 0, time); const fpreal inMax = evalFloat("inrange", 1, time); const fpreal outMin = evalFloat("outrange", 0, time); const fpreal outMax = evalFloat("outrange", 1, time); const bool deactivate = bool(evalInt("deactivate", 0, time)); RemapGridValues::Extrapolation belowExtrapolation = RemapGridValues::CLAMP; RemapGridValues::Extrapolation aboveExtrapolation = RemapGridValues::CLAMP; auto extrapolation = evalInt("below", 0, time); if (extrapolation == 1) belowExtrapolation = RemapGridValues::PRESERVE; else if (extrapolation == 2) belowExtrapolation = RemapGridValues::EXTRAPOLATE; extrapolation = evalInt("above", 0, time); if (extrapolation == 1) aboveExtrapolation = RemapGridValues::PRESERVE; else if (extrapolation == 2) aboveExtrapolation = RemapGridValues::EXTRAPOLATE; const GA_PrimitiveGroup* group = matchGroup(*gdp, evalStdString("group", time)); size_t vdbPrimCount = 0; UT_Ramp ramp; evalRamp(ramp, time); RemapGridValues remap(belowExtrapolation, aboveExtrapolation, ramp, inMin, inMax, outMin, outMax, deactivate, UTgetErrorManager()); for (hvdb::VdbPrimIterator it(gdp, group); it; ++it) { if (boss.wasInterrupted()) break; remap.setPrimitiveName(it.getPrimitiveName().toStdString()); remap.setPrimitiveIndex(int(it.getIndex())); hvdb::GEOvdbApply<hvdb::NumericGridTypes::Append<hvdb::Vec3GridTypes>>(**it, remap); GU_PrimVDB* vdbPrim = *it; const GEO_VolumeOptions& visOps = vdbPrim->getVisOptions(); vdbPrim->setVisualization(GEO_VOLUMEVIS_SMOKE , visOps.myIso, visOps.myDensity); ++vdbPrimCount; } if (vdbPrimCount == 0) { addWarning(SOP_MESSAGE, "Did not find any VDBs."); } } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
22,179
C++
31.285298
100
0.603048
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/GeometryUtil.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file GeometryUtil.h /// @author FX R&D Simulation team /// @brief Utility methods and tools for geometry processing #ifndef OPENVDB_HOUDINI_GEOMETRY_UTIL_HAS_BEEN_INCLUDED #define OPENVDB_HOUDINI_GEOMETRY_UTIL_HAS_BEEN_INCLUDED #include <openvdb/openvdb.h> #include <openvdb/tools/MeshToVolume.h> // for openvdb::tools::MeshToVoxelEdgeData #include <openvdb/tree/LeafManager.h> #include <openvdb/util/Util.h> // for openvdb::util::COORD_OFFSETS #include <GU/GU_Detail.h> #include <algorithm> // for std::max/min() #include <memory> #include <string> #include <vector> class GA_SplittableRange; class OBJ_Camera; class OP_Context; class OP_Node; #ifdef SESI_OPENVDB #ifdef OPENVDB_HOUDINI_API #undef OPENVDB_HOUDINI_API #define OPENVDB_HOUDINI_API #endif #endif namespace openvdb_houdini { class Interrupter; /// Add geometry to the given detail to indicate the extents of a frustum transform. OPENVDB_HOUDINI_API void drawFrustum(GU_Detail&, const openvdb::math::Transform&, const UT_Vector3* boxColor, const UT_Vector3* tickColor, bool shaded, bool drawTicks = true); /// Construct a frustum transform from a Houdini camera. OPENVDB_HOUDINI_API openvdb::math::Transform::Ptr frustumTransformFromCamera( OP_Node&, OP_Context&, OBJ_Camera&, float offset, float nearPlaneDist, float farPlaneDist, float voxelDepthSize = 1.0, int voxelCountX = 100); //////////////////////////////////////// /// @brief Return @c true if the point at the given offset is referenced /// by primitives from a certain primitive group. OPENVDB_HOUDINI_API bool pointInPrimGroup(GA_Offset ptnOffset, GU_Detail&, const GA_PrimitiveGroup&); //////////////////////////////////////// /// @brief Convert geometry to quads and triangles. /// @return a pointer to a new GU_Detail object if the geometry was /// converted or subdivided, otherwise a null pointer OPENVDB_HOUDINI_API std::unique_ptr<GU_Detail> convertGeometry(const GU_Detail&, std::string& warning, Interrupter*); //////////////////////////////////////// /// TBB body object for threaded world to voxel space transformation and copy of points class OPENVDB_HOUDINI_API TransformOp { public: TransformOp(GU_Detail const * const gdp, const openvdb::math::Transform& transform, std::vector<openvdb::Vec3s>& pointList); void operator()(const GA_SplittableRange&) const; private: GU_Detail const * const mGdp; const openvdb::math::Transform& mTransform; std::vector<openvdb::Vec3s>* const mPointList; }; //////////////////////////////////////// /// @brief TBB body object for threaded primitive copy /// @details Produces a primitive-vertex index list. class OPENVDB_HOUDINI_API PrimCpyOp { public: PrimCpyOp(GU_Detail const * const gdp, std::vector<openvdb::Vec4I>& primList); void operator()(const GA_SplittableRange&) const; private: GU_Detail const * const mGdp; std::vector<openvdb::Vec4I>* const mPrimList; }; //////////////////////////////////////// /// @brief TBB body object for threaded vertex normal generation /// @details Averages face normals from all similarly oriented primitives, /// that share the same vertex-point, to maintain sharp features. class OPENVDB_HOUDINI_API VertexNormalOp { public: VertexNormalOp(GU_Detail&, const GA_PrimitiveGroup* interiorPrims=nullptr, float angle=0.7f); void operator()(const GA_SplittableRange&) const; private: bool isInteriorPrim(GA_Offset primOffset) const { return mInteriorPrims && mInteriorPrims->containsIndex( mDetail.primitiveIndex(primOffset)); } const GU_Detail& mDetail; const GA_PrimitiveGroup* mInteriorPrims; GA_RWHandleV3 mNormalHandle; const float mAngle; }; //////////////////////////////////////// /// TBB body object for threaded sharp feature construction class OPENVDB_HOUDINI_API SharpenFeaturesOp { public: using EdgeData = openvdb::tools::MeshToVoxelEdgeData; SharpenFeaturesOp(GU_Detail& meshGeo, const GU_Detail& refGeo, EdgeData& edgeData, const openvdb::math::Transform& xform, const GA_PrimitiveGroup* surfacePrims = nullptr, const openvdb::BoolTree* mask = nullptr); void operator()(const GA_SplittableRange&) const; private: GU_Detail& mMeshGeo; const GU_Detail& mRefGeo; EdgeData& mEdgeData; const openvdb::math::Transform& mXForm; const GA_PrimitiveGroup* mSurfacePrims; const openvdb::BoolTree* mMaskTree; }; //////////////////////////////////////// /// TBB body object for threaded sharp feature construction template<typename IndexTreeType, typename BoolTreeType> class GenAdaptivityMaskOp { public: using BoolLeafManager = openvdb::tree::LeafManager<BoolTreeType>; GenAdaptivityMaskOp(const GU_Detail& refGeo, const IndexTreeType& indexTree, BoolLeafManager&, float edgetolerance = 0.0); void run(bool threaded = true); void operator()(const tbb::blocked_range<size_t>&) const; private: const GU_Detail& mRefGeo; const IndexTreeType& mIndexTree; BoolLeafManager& mLeafs; float mEdgeTolerance; }; template<typename IndexTreeType, typename BoolTreeType> GenAdaptivityMaskOp<IndexTreeType, BoolTreeType>::GenAdaptivityMaskOp(const GU_Detail& refGeo, const IndexTreeType& indexTree, BoolLeafManager& leafMgr, float edgetolerance) : mRefGeo(refGeo) , mIndexTree(indexTree) , mLeafs(leafMgr) , mEdgeTolerance(edgetolerance) { mEdgeTolerance = std::max(0.0f, mEdgeTolerance); mEdgeTolerance = std::min(1.0f, mEdgeTolerance); } template<typename IndexTreeType, typename BoolTreeType> void GenAdaptivityMaskOp<IndexTreeType, BoolTreeType>::run(bool threaded) { if (threaded) { tbb::parallel_for(mLeafs.getRange(), *this); } else { (*this)(mLeafs.getRange()); } } template<typename IndexTreeType, typename BoolTreeType> void GenAdaptivityMaskOp<IndexTreeType, BoolTreeType>::operator()( const tbb::blocked_range<size_t>& range) const { using IndexAccessorType = typename openvdb::tree::ValueAccessor<const IndexTreeType>; IndexAccessorType idxAcc(mIndexTree); UT_Vector3 tmpN, normal; GA_Offset primOffset; int tmpIdx; openvdb::Coord ijk, nijk; typename BoolTreeType::LeafNodeType::ValueOnIter iter; for (size_t n = range.begin(); n < range.end(); ++n) { iter = mLeafs.leaf(n).beginValueOn(); for (; iter; ++iter) { ijk = iter.getCoord(); bool edgeVoxel = false; int idx = idxAcc.getValue(ijk); primOffset = mRefGeo.primitiveOffset(idx); normal = mRefGeo.getGEOPrimitive(primOffset)->computeNormal(); for (size_t i = 0; i < 18; ++i) { nijk = ijk + openvdb::util::COORD_OFFSETS[i]; if (idxAcc.probeValue(nijk, tmpIdx) && tmpIdx != idx) { primOffset = mRefGeo.primitiveOffset(tmpIdx); tmpN = mRefGeo.getGEOPrimitive(primOffset)->computeNormal(); if (normal.dot(tmpN) < mEdgeTolerance) { edgeVoxel = true; break; } } } if (!edgeVoxel) iter.setValueOff(); } } } } // namespace openvdb_houdini //////////////////////////////////////// #endif // OPENVDB_HOUDINI_GEOMETRY_UTIL_HAS_BEEN_INCLUDED
7,516
C
26.636029
97
0.66711
NVIDIA-Omniverse/ext-openvdb/openvdb_houdini/openvdb_houdini/SOP_OpenVDB_Vector_Split.cc
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 // /// @file SOP_OpenVDB_Vector_Split.cc /// /// @author FX R&D OpenVDB team /// /// @brief Split vector grids into component scalar grids. #include <houdini_utils/ParmFactory.h> #include <openvdb_houdini/Utils.h> #include <openvdb_houdini/SOP_NodeVDB.h> #include <UT/UT_Interrupt.h> #include <set> #include <sstream> #include <stdexcept> #include <string> namespace hvdb = openvdb_houdini; namespace hutil = houdini_utils; class SOP_OpenVDB_Vector_Split: public hvdb::SOP_NodeVDB { public: SOP_OpenVDB_Vector_Split(OP_Network*, const char* name, OP_Operator*); ~SOP_OpenVDB_Vector_Split() override = default; static OP_Node* factory(OP_Network*, const char* name, OP_Operator*); class Cache: public SOP_VDBCacheOptions { OP_ERROR cookVDBSop(OP_Context&) override; }; }; void newSopOperator(OP_OperatorTable* table) { if (table == nullptr) return; hutil::ParmList parms; // Input vector grid group name parms.add(hutil::ParmFactory(PRM_STRING, "group", "Group") .setChoiceList(&hutil::PrimGroupMenuInput1) .setTooltip( "Specify a subset of the input VDB grids to be split.\n" "Vector-valued grids will be split into component scalar grids;\n" "all other grids will be unchanged.") .setDocumentation( "A subset of the input VDBs to be split" " (see [specifying volumes|/model/volumes#group])\n\n" "Vector-valued VDBs are split into component scalar VDBs;" " VDBs of other types are passed through unchanged.")); // Toggle to keep/remove source grids parms.add( hutil::ParmFactory(PRM_TOGGLE, "remove_sources", "Remove Source VDBs") .setDefault(PRMoneDefaults) .setTooltip("Remove vector grids that have been split.") .setDocumentation("If enabled, delete vector grids that have been split.")); // Toggle to copy inactive values in addition to active values parms.add( hutil::ParmFactory(PRM_TOGGLE, "copyinactive", "Copy Inactive Values") .setDefault(PRMzeroDefaults) .setTooltip( "If enabled, split the values of both active and inactive voxels.\n" "If disabled, split the values of active voxels only.")); #ifndef SESI_OPENVDB // Verbosity toggle parms.add(hutil::ParmFactory(PRM_TOGGLE, "verbose", "Verbose") .setDocumentation("If enabled, print debugging information to the terminal.")); #endif // Register this operator. hvdb::OpenVDBOpFactory("VDB Vector Split", SOP_OpenVDB_Vector_Split::factory, parms, *table) .addInput("Vector VDBs to split into scalar VDBs") .setVerb(SOP_NodeVerb::COOK_INPLACE, []() { return new SOP_OpenVDB_Vector_Split::Cache; }) .setDocumentation("\ #icon: COMMON/openvdb\n\ #tags: vdb\n\ \n\ \"\"\"Split a vector VDB primitive into three scalar VDB primitives.\"\"\"\n\ \n\ @overview\n\ \n\ This node will create three new scalar primitives named `<<input>>.x`,\n\ `<<input>>.y`, and `<<input>>.z`.\n\ \n\ TIP:\n\ To reverse the split (i.e., to merge three scalar VDBs into a vector VDB),\n\ use the [OpenVDB Vector Merge node|Node:sop/DW_OpenVDBVectorMerge]\n\ and set the groups to `@name=*.x`, `@name=*.y`, and `@name=*.z`.\n\ \n\ @related\n\ - [OpenVDB Vector Merge|Node:sop/DW_OpenVDBVectorMerge]\n\ - [Node:sop/vdbvectorsplit]\n\ \n\ @examples\n\ \n\ See [openvdb.org|http://www.openvdb.org/download/] for source code\n\ and usage examples.\n"); } OP_Node* SOP_OpenVDB_Vector_Split::factory(OP_Network* net, const char* name, OP_Operator* op) { return new SOP_OpenVDB_Vector_Split(net, name, op); } SOP_OpenVDB_Vector_Split::SOP_OpenVDB_Vector_Split(OP_Network* net, const char* name, OP_Operator* op): SOP_NodeVDB(net, name, op) { } //////////////////////////////////////// namespace { class VectorGridSplitter { private: const GEO_PrimVDB& mInVdb; hvdb::GridPtr mXGrid, mYGrid, mZGrid; bool mCopyInactiveValues; public: VectorGridSplitter(const GEO_PrimVDB& _vdb, bool _inactive): mInVdb(_vdb), mCopyInactiveValues(_inactive) {} const hvdb::GridPtr& getXGrid() { return mXGrid; } const hvdb::GridPtr& getYGrid() { return mYGrid; } const hvdb::GridPtr& getZGrid() { return mZGrid; } template<typename VecGridT> void operator()(const VecGridT& vecGrid) { const std::string gridName = mInVdb.getGridName(); using VecT = typename VecGridT::ValueType; using ScalarTreeT = typename VecGridT::TreeType::template ValueConverter<typename VecT::value_type>::Type; using ScalarGridT = typename openvdb::Grid<ScalarTreeT>; using ScalarGridPtr = typename ScalarGridT::Ptr; const VecT bkgd = vecGrid.background(); // Construct the output scalar grids, with background values taken from // the components of the input vector grid's background value. ScalarGridPtr xGrid = ScalarGridT::create(bkgd.x()), yGrid = ScalarGridT::create(bkgd.y()), zGrid = ScalarGridT::create(bkgd.z()); mXGrid = xGrid; mYGrid = yGrid; mZGrid = zGrid; // The output scalar grids share the input vector grid's transform. if (openvdb::math::Transform::Ptr xform = vecGrid.transform().copy()) { xGrid->setTransform(xform); yGrid->setTransform(xform); zGrid->setTransform(xform); } // Use accessors for fast sequential voxel access. typename ScalarGridT::Accessor xAccessor = xGrid->getAccessor(), yAccessor = yGrid->getAccessor(), zAccessor = zGrid->getAccessor(); // For each tile or voxel value in the input vector tree, // set a corresponding value in each of the output scalar trees. openvdb::CoordBBox bbox; if (mCopyInactiveValues) { for (typename VecGridT::ValueAllCIter it = vecGrid.cbeginValueAll(); it; ++it) { if (!it.getBoundingBox(bbox)) continue; const VecT& val = it.getValue(); const bool active = it.isValueOn(); if (it.isTileValue()) { xGrid->fill(bbox, val.x(), active); yGrid->fill(bbox, val.y(), active); zGrid->fill(bbox, val.z(), active); } else { // it.isVoxelValue() xAccessor.setValue(bbox.min(), val.x()); yAccessor.setValue(bbox.min(), val.y()); zAccessor.setValue(bbox.min(), val.z()); if (!active) { xAccessor.setValueOff(bbox.min()); yAccessor.setValueOff(bbox.min()); zAccessor.setValueOff(bbox.min()); } } } } else { for (typename VecGridT::ValueOnCIter it = vecGrid.cbeginValueOn(); it; ++it) { if (!it.getBoundingBox(bbox)) continue; const VecT& val = it.getValue(); if (it.isTileValue()) { xGrid->fill(bbox, val.x()); yGrid->fill(bbox, val.y()); zGrid->fill(bbox, val.z()); } else { // it.isVoxelValue() xAccessor.setValueOn(bbox.min(), val.x()); yAccessor.setValueOn(bbox.min(), val.y()); zAccessor.setValueOn(bbox.min(), val.z()); } } } } }; // class VectorGridSplitter } // unnamed namespace //////////////////////////////////////// OP_ERROR SOP_OpenVDB_Vector_Split::Cache::cookVDBSop(OP_Context& context) { try { const fpreal time = context.getTime(); const bool copyInactiveValues = evalInt("copyinactive", 0, time); const bool removeSourceGrids = evalInt("remove_sources", 0, time); #ifndef SESI_OPENVDB const bool verbose = evalInt("verbose", 0, time); #else const bool verbose = false; #endif UT_AutoInterrupt progress("Splitting VDB grids"); using PrimVDBSet = std::set<GEO_PrimVDB*>; PrimVDBSet primsToRemove; // Get the group of grids to split. const GA_PrimitiveGroup* splitGroup = nullptr; { UT_String groupStr; evalString(groupStr, "group", 0, time); splitGroup = matchGroup(*gdp, groupStr.toStdString()); } // Iterate over VDB primitives in the selected group. for (hvdb::VdbPrimIterator it(gdp, splitGroup); it; ++it) { if (progress.wasInterrupted()) return error(); GU_PrimVDB* vdb = *it; const std::string gridName = vdb->getGridName(); VectorGridSplitter op(*vdb, copyInactiveValues); if (!hvdb::GEOvdbApply<hvdb::Vec3GridTypes>(*vdb, op)) { if (verbose && !gridName.empty()) { addWarning(SOP_MESSAGE, (gridName + " is not a vector grid").c_str()); } continue; } // Add the new scalar grids to the detail, copying attributes and // group membership from the input vector grid. const std::string xGridName = gridName.empty() ? "x" : gridName + ".x", yGridName = gridName.empty() ? "y" : gridName + ".y", zGridName = gridName.empty() ? "z" : gridName + ".z"; GU_PrimVDB::buildFromGrid(*gdp, op.getXGrid(), vdb, xGridName.c_str()); GU_PrimVDB::buildFromGrid(*gdp, op.getYGrid(), vdb, yGridName.c_str()); GU_PrimVDB::buildFromGrid(*gdp, op.getZGrid(), vdb, zGridName.c_str()); if (verbose) { std::ostringstream ostr; ostr << "Split "; if (!gridName.empty()) ostr << gridName << " "; ostr << "into " << xGridName << ", " << yGridName << " and " << zGridName; addMessage(SOP_MESSAGE, ostr.str().c_str()); } primsToRemove.insert(vdb); } if (removeSourceGrids) { // Remove vector grids that were split. for (PrimVDBSet::iterator i = primsToRemove.begin(), e = primsToRemove.end(); i != e; ++i) { gdp->destroyPrimitive(*(*i), /*andPoints=*/true); } } primsToRemove.clear(); } catch (std::exception& e) { addError(SOP_MESSAGE, e.what()); } return error(); }
10,638
C++
33.654723
98
0.586764